diff --git a/.circleci/config.yml b/.circleci/config.yml
index 7853fbb8e..4b59cf285 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -14,7 +14,7 @@ jobs:
docker_layer_caching: true
environment:
- GO_VERSION: '1.11.4'
+ GO_VERSION: '1.13'
K8S_VERSION: 'v1.12.0'
VAULT_VERSION: '1.0.0'
KUBECONFIG: '/home/circleci/.kube/config'
@@ -29,7 +29,6 @@ jobs:
steps:
- checkout
-
- run:
name: Setup golang
command: |
@@ -39,100 +38,103 @@ jobs:
"https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz" \
&& sudo tar -C /usr/local -xzf go.linux-amd64.tar.gz
echo 'export PATH="$GOPATH/bin:$PATH"' >> "${BASH_ENV}"
-
- run:
name: Run fmt
command: |
make check-fmt
-
- run:
name: Run golint
command: |
make lint
-
- run:
name: Run misspell
command: |
- make check-misspell
-
+ make check-misspell
- run:
name: Run ineffassign
command: |
- make ineffassign
-
- - run:
- name: Setup kubectl
- command: |
- curl \
- -Lo kubectl \
- "https://storage.googleapis.com/kubernetes-release/release/${K8S_VERSION}/bin/linux/amd64/kubectl" \
- && chmod +x kubectl \
- && sudo mv kubectl /usr/local/bin/
- mkdir -p "${HOME}/.kube"
- touch "${HOME}/.kube/config"
-
- - run:
- name: Setup minikube
- command: |
- curl \
- -Lo minikube \
- "https://github.com/kubernetes/minikube/releases/download/${MINIKUBE_VERSION}/minikube-linux-amd64" \
- && chmod +x minikube \
- && sudo mv minikube /usr/local/bin/
-
- - run:
- name: Start minikube
- command: |
- sudo -E minikube start --vm-driver=none --cpus 2 --memory 4096 --kubernetes-version="${K8S_VERSION}"
-
+ make ineffassign
- run:
- name: Install Helm
+ name: Check generators for unwanted diffs
command: |
- curl https://raw.githubusercontent.com/helm/helm/master/scripts/get > get_helm.sh
- chmod 700 get_helm.sh
- ./get_helm.sh
-
- helm init
- helm repo add banzaicloud-stable http://kubernetes-charts.banzaicloud.com/branch/master
- helm repo update
-
- - run:
- name: Start Minio
- command: |
- kubectl create -f hack/minio.yaml
- kubectl wait --for=condition=available deployment/minio-deployment --timeout=120s
- minio="$(kubectl get pod -l app=minio -o 'jsonpath={.items[0].metadata.name}')"
- kubectl wait --for=condition=Ready pod "${minio}" --timeout=120s
-
- - run:
- name: Setup minio cli
- command: |
- kubectl create -f hack/minio-mc.yaml
- kubectl wait --for=condition=available deployment/minio-mc-deployment --timeout=120s
- mc_pod="$(kubectl get pod -l app=minio-mc -o 'jsonpath={.items[0].metadata.name}')"
- kubectl wait --for=condition=Ready pod "${mc_pod}" --timeout=120s
- kubectl exec "${mc_pod}" -- \
- mc config host add minio \
- 'http://minio-service.default.svc.cluster.local:9000' \
- 'minio_access_key' \
- 'minio_secret_key'
-
- - run:
- name: Create test bucket
- command: |
- mc_pod="$(kubectl get pod -l app=minio-mc -o 'jsonpath={.items[0].metadata.name}')"
- kubectl exec "${mc_pod}" -- \
- mc mb --region 'test_region' minio/logs
-
- - run:
- name: Build docker image
- command: |
- make docker DOCKER_TAG=local
-
+ make check-diff
- run:
name: Test
command: |
- hack/test.sh
+ make test
+ #- run:
+ # name: Setup kubectl
+ # command: |
+ # curl \
+ # -Lo kubectl \
+ # "https://storage.googleapis.com/kubernetes-release/release/${K8S_VERSION}/bin/linux/amd64/kubectl" \
+ # && chmod +x kubectl \
+ # && sudo mv kubectl /usr/local/bin/
+ # mkdir -p "${HOME}/.kube"
+ # touch "${HOME}/.kube/config"
+#
+ #- run:
+ # name: Setup minikube
+ # command: |
+ # curl \
+ # -Lo minikube \
+ # "https://github.com/kubernetes/minikube/releases/download/${MINIKUBE_VERSION}/minikube-linux-amd64" \
+ # && chmod +x minikube \
+ # && sudo mv minikube /usr/local/bin/
+#
+ #- run:
+ # name: Start minikube
+ # command: |
+ # sudo -E minikube start --vm-driver=none --cpus 2 --memory 4096 --kubernetes-version="${K8S_VERSION}"
+#
+ #- run:
+ # name: Install Helm
+ # command: |
+ # curl https://raw.githubusercontent.com/helm/helm/master/scripts/get > get_helm.sh
+ # chmod 700 get_helm.sh
+ # ./get_helm.sh
+#
+ # helm init
+ # helm repo add banzaicloud-stable http://kubernetes-charts.banzaicloud.com/branch/master
+ # helm repo update
+#
+ #- run:
+ # name: Start Minio
+ # command: |
+ # kubectl create -f hack/minio.yaml
+ # kubectl wait --for=condition=available deployment/minio-deployment --timeout=120s
+ # minio="$(kubectl get pod -l app=minio -o 'jsonpath={.items[0].metadata.name}')"
+ # kubectl wait --for=condition=Ready pod "${minio}" --timeout=120s
+#
+ #- run:
+ # name: Setup minio cli
+ # command: |
+ # kubectl create -f hack/minio-mc.yaml
+ # kubectl wait --for=condition=available deployment/minio-mc-deployment --timeout=120s
+ # mc_pod="$(kubectl get pod -l app=minio-mc -o 'jsonpath={.items[0].metadata.name}')"
+ # kubectl wait --for=condition=Ready pod "${mc_pod}" --timeout=120s
+ # kubectl exec "${mc_pod}" -- \
+ # mc config host add minio \
+ # 'http://minio-service.default.svc.cluster.local:9000' \
+ # 'minio_access_key' \
+ # 'minio_secret_key'
+#
+ #- run:
+ # name: Create test bucket
+ # command: |
+ # mc_pod="$(kubectl get pod -l app=minio-mc -o 'jsonpath={.items[0].metadata.name}')"
+ # kubectl exec "${mc_pod}" -- \
+ # mc mb --region 'test_region' minio/logs
+#
+ #- run:
+ # name: Build docker image
+ # command: |
+ # make docker DOCKER_TAG=local
+#
+ #- run:
+ # name: Test
+ # command: |
+ # hack/test.sh
workflows:
version: 2
@@ -143,8 +145,8 @@ workflows:
jobs:
- helm/lint-chart:
filters:
- tags:
- ignore: /.*/
+ tags:
+ ignore: /.*/
- helm/publish-chart:
context: helm
@@ -153,4 +155,3 @@ workflows:
ignore: /.*/
tags:
only: /chart\/.*\/\d+.\d+.\d+.*/
-
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
deleted file mode 100644
index 8e266a19c..000000000
--- a/.github/ISSUE_TEMPLATE/bug_report.md
+++ /dev/null
@@ -1,27 +0,0 @@
----
-name: Bug report
-about: Report a bug or features that are not working as intended
-title: ''
-labels: ''
-assignees: ''
-
----
-
-**Describe the bug**
-A clear and concise description of what the bug is.
-
-**To Reproduce**
-Steps to reproduce the behavior:
-1. Go to '...'
-2. Click on '....'
-3. Scroll down to '....'
-4. See error
-
-**Expected behavior**
-A clear and concise description of what you expected to happen.
-
-**Screenshots**
-If applicable, add screenshots to help explain your problem.
-
-**Additional context**
-Add any other context about the problem here.
diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md
deleted file mode 100644
index bbcbbe7d6..000000000
--- a/.github/ISSUE_TEMPLATE/feature_request.md
+++ /dev/null
@@ -1,20 +0,0 @@
----
-name: Feature request
-about: Suggest an idea for this project
-title: ''
-labels: ''
-assignees: ''
-
----
-
-**Is your feature request related to a problem? Please describe.**
-A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
-
-**Describe the solution you'd like**
-A clear and concise description of what you want to happen.
-
-**Describe alternatives you've considered**
-A clear and concise description of any alternative solutions or features you've considered.
-
-**Additional context**
-Add any other context or screenshots about the feature request here.
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
deleted file mode 100644
index 1da98c4e1..000000000
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ /dev/null
@@ -1,34 +0,0 @@
-| Q | A
-| --------------- | ---
-| Bug fix? | no|yes
-| New feature? | no|yes
-| API breaks? | no|yes
-| Deprecations? | no|yes
-| Related tickets | fixes #X, partially #Y, mentioned in #Z
-| License | Apache 2.0
-
-
-### What's in this PR?
-
-
-
-### Why?
-
-
-
-### Additional context
-
-
-
-### Checklist
-
-
-- [ ] Implementation tested (with at least one cloud provider)
-- [ ] Error handling code meets the [guideline](https://github.com/banzaicloud/pipeline/blob/master/docs/error-handling-guide.md)
-- [ ] Logging code meets the guideline (TODO)
-- [ ] User guide and development docs updated (if needed)
-- [ ] Related Helm chart(s) updated (if needed)
-
-### To Do
-
-- [ ] If the PR is not complete but you want to discuss the approach, list what remains to be done here
diff --git a/Dockerfile b/Dockerfile
index 7178a61af..71ce731a8 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,24 +1,26 @@
-FROM golang:1.11-alpine as golang
-
-RUN apk add --update --no-cache ca-certificates curl git make
-RUN go get -u github.com/golang/dep/cmd/dep
-
-ADD Gopkg.toml /go/src/github.com/banzaicloud/logging-operator/Gopkg.toml
-ADD Gopkg.lock /go/src/github.com/banzaicloud/logging-operator/Gopkg.lock
-
-WORKDIR /go/src/github.com/banzaicloud/logging-operator
-RUN dep ensure -v -vendor-only
-ADD . /go/src/github.com/banzaicloud/logging-operator
-RUN go install ./cmd/manager
-
-
-FROM alpine:3.8
-
-RUN apk add --no-cache ca-certificates
-
-COPY --from=golang /go/bin/manager /usr/local/bin/logging-operator
-
-RUN adduser -D logging-operator
-USER logging-operator
-
-ENTRYPOINT ["/usr/local/bin/logging-operator"]
\ No newline at end of file
+# Build the manager binary
+FROM golang:1.13 as builder
+
+WORKDIR /workspace
+# Copy the Go Modules manifests
+COPY go.mod go.mod
+COPY go.sum go.sum
+# cache deps before building and copying source so that we don't need to re-download as much
+# and so that source changes don't invalidate our downloaded layer
+RUN go mod download
+
+# Copy the go source
+COPY main.go main.go
+COPY api/ api/
+COPY controllers/ controllers/
+COPY pkg/ pkg/
+
+# Build
+RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o manager main.go
+
+# Use distroless as minimal base image to package the manager binary
+# Refer to https://github.com/GoogleContainerTools/distroless for more details
+FROM gcr.io/distroless/static:latest
+WORKDIR /
+COPY --from=builder /workspace/manager .
+ENTRYPOINT ["/manager"]
diff --git a/Gopkg.lock b/Gopkg.lock
deleted file mode 100644
index a8c55c94b..000000000
--- a/Gopkg.lock
+++ /dev/null
@@ -1,1012 +0,0 @@
-# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
-
-
-[[projects]]
- digest = "1:2173c429b0c4654deb4f3e8d1f503c374f93a6b5549d74f9cba797c1e787f8e4"
- name = "cloud.google.com/go"
- packages = ["compute/metadata"]
- pruneopts = "NT"
- revision = "c9474f2f8deb81759839474b6bd1726bbfe1c1c4"
- version = "v0.36.0"
-
-[[projects]]
- digest = "1:25870183293a3fb61cc9afd060a61d63a486f091db72af01a8ea3449f5ca530d"
- name = "github.com/Masterminds/goutils"
- packages = ["."]
- pruneopts = "NT"
- revision = "41ac8693c5c10a92ea1ff5ac3a7f95646f6123b0"
- version = "v1.1.0"
-
-[[projects]]
- digest = "1:a26f8da48b22e6176c1c6a2459904bb30bd0c49ada04b2963c2c3a203e81a620"
- name = "github.com/Masterminds/semver"
- packages = ["."]
- pruneopts = "NT"
- revision = "c7af12943936e8c39859482e61f0574c2fd7fc75"
- version = "v1.4.2"
-
-[[projects]]
- digest = "1:b3bf7ebdab400adfa4d81687848571417ded5618231ef58124adf5544cab5e59"
- name = "github.com/Masterminds/sprig"
- packages = ["."]
- pruneopts = "NT"
- revision = "b1fe2752acccf8c3d7f8a1e7c75c7ae7d83a1975"
- version = "v2.18.0"
-
-[[projects]]
- digest = "1:0a111edd8693fd977f42a0c4f199a0efb13c20aec9da99ad8830c7bb6a87e8d6"
- name = "github.com/PuerkitoBio/purell"
- packages = ["."]
- pruneopts = "NT"
- revision = "44968752391892e1b0d0b821ee79e9a85fa13049"
- version = "v1.1.1"
-
-[[projects]]
- branch = "master"
- digest = "1:8098cd40cd09879efbf12e33bcd51ead4a66006ac802cd563a66c4f3373b9727"
- name = "github.com/PuerkitoBio/urlesc"
- packages = ["."]
- pruneopts = "NT"
- revision = "de5bf2ad457846296e2031421a34e2568e304e35"
-
-[[projects]]
- digest = "1:680b63a131506e668818d630d3ca36123ff290afa0afc9f4be21940adca3f27d"
- name = "github.com/appscode/jsonpatch"
- packages = ["."]
- pruneopts = "NT"
- revision = "7c0e3b262f30165a8ec3d0b4c6059fd92703bfb2"
- version = "1.0.0"
-
-[[projects]]
- branch = "master"
- digest = "1:c819830f4f5ef85874a90ac3cbcc96cd322c715f5c96fbe4722eacd3dafbaa07"
- name = "github.com/beorn7/perks"
- packages = ["quantile"]
- pruneopts = "NT"
- revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
-
-[[projects]]
- digest = "1:c61f4f97321a37adcb5b4fd4fd61209cd553e46c99ee606c465553541b12a229"
- name = "github.com/coreos/prometheus-operator"
- packages = [
- "pkg/apis/monitoring",
- "pkg/apis/monitoring/v1",
- "pkg/client/versioned/scheme",
- "pkg/client/versioned/typed/monitoring/v1",
- ]
- pruneopts = "NT"
- revision = "72ec4b9b16ef11700724dc71fec77112536eed40"
- version = "v0.26.0"
-
-[[projects]]
- digest = "1:4b8b5811da6970495e04d1f4e98bb89518cc3cfc3b3f456bdb876ed7b6c74049"
- name = "github.com/davecgh/go-spew"
- packages = ["spew"]
- pruneopts = "NT"
- revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
- version = "v1.1.1"
-
-[[projects]]
- digest = "1:2453249730493850718f891fb40b8f1bc932a0265384fc85b269dc04a01d4673"
- name = "github.com/emicklei/go-restful"
- packages = [
- ".",
- "log",
- ]
- pruneopts = "NT"
- revision = "85d198d05a92d31823b852b4a5928114912e8949"
- version = "v2.9.0"
-
-[[projects]]
- digest = "1:81466b4218bf6adddac2572a30ac733a9255919bc2f470b4827a317bd4ee1756"
- name = "github.com/ghodss/yaml"
- packages = ["."]
- pruneopts = "NT"
- revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7"
- version = "v1.0.0"
-
-[[projects]]
- branch = "master"
- digest = "1:d421af4c4fe51d399667d573982d663fe1fa67020a88d3ae43466ebfe8e2b5c9"
- name = "github.com/go-logr/logr"
- packages = ["."]
- pruneopts = "NT"
- revision = "9fb12b3b21c5415d16ac18dc5cd42c1cfdd40c4e"
-
-[[projects]]
- digest = "1:340497a512995aa69c0add901d79a2096b3449d35a44a6f1f1115091a9f8c687"
- name = "github.com/go-logr/zapr"
- packages = ["."]
- pruneopts = "NT"
- revision = "7536572e8d55209135cd5e7ccf7fce43dca217ab"
- version = "v0.1.0"
-
-[[projects]]
- digest = "1:260f7ebefc63024c8dfe2c9f1a2935a89fa4213637a1f522f592f80c001cc441"
- name = "github.com/go-openapi/jsonpointer"
- packages = ["."]
- pruneopts = "NT"
- revision = "ef5f0afec364d3b9396b7b77b43dbe26bf1f8004"
- version = "v0.18.0"
-
-[[projects]]
- digest = "1:98abd61947ff5c7c6fcfec5473d02a4821ed3a2dd99a4fbfdb7925b0dd745546"
- name = "github.com/go-openapi/jsonreference"
- packages = ["."]
- pruneopts = "NT"
- revision = "8483a886a90412cd6858df4ea3483dce9c8e35a3"
- version = "v0.18.0"
-
-[[projects]]
- branch = "master"
- digest = "1:8f80caf2fa31f78a035f33981c9685013033073b53f344f579e60fa69f0c6670"
- name = "github.com/go-openapi/spec"
- packages = ["."]
- pruneopts = "NT"
- revision = "53d776530bf78a11b03a7b52dd8a083086b045e5"
-
-[[projects]]
- digest = "1:dc0f590770e5a6c70ea086232324f7b7dc4857c60eca63ab8ff78e0a5cfcdbf3"
- name = "github.com/go-openapi/swag"
- packages = ["."]
- pruneopts = "NT"
- revision = "1d29f06aebd59ccdf11ae04aa0334ded96e2d909"
- version = "v0.18.0"
-
-[[projects]]
- digest = "1:9059915429f7f3a5f18cfa6b7cab9a28721d7ac6db4079a62044aa229eb7f2a8"
- name = "github.com/gobuffalo/envy"
- packages = ["."]
- pruneopts = "NT"
- revision = "fa0dfdc10b5366ce365b7d9d1755a03e4e797bc5"
- version = "v1.6.15"
-
-[[projects]]
- digest = "1:0b39706cfa32c1ba9e14435b5844d04aef81b60f44b6077e61e0607d56692603"
- name = "github.com/gogo/protobuf"
- packages = [
- "proto",
- "sortkeys",
- ]
- pruneopts = "NT"
- revision = "ba06b47c162d49f2af050fb4c75bcbc86a159d5c"
- version = "v1.2.1"
-
-[[projects]]
- branch = "master"
- digest = "1:e2b86e41f3d669fc36b50d31d32d22c8ac656c75aa5ea89717ce7177e134ff2a"
- name = "github.com/golang/glog"
- packages = ["."]
- pruneopts = "NT"
- revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998"
-
-[[projects]]
- branch = "master"
- digest = "1:20b774dcfdf0fff3148432beb828c52404f3eb3d70b7ce71ae0356ed6cbc2bae"
- name = "github.com/golang/groupcache"
- packages = ["lru"]
- pruneopts = "NT"
- revision = "5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b"
-
-[[projects]]
- digest = "1:d7cb4458ea8782e6efacd8f4940796ec559c90833509c436f40c4085b98156dd"
- name = "github.com/golang/protobuf"
- packages = [
- "proto",
- "ptypes",
- "ptypes/any",
- "ptypes/duration",
- "ptypes/timestamp",
- ]
- pruneopts = "NT"
- revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5"
- version = "v1.2.0"
-
-[[projects]]
- branch = "master"
- digest = "1:05f95ffdfcf651bdb0f05b40b69e7f5663047f8da75c72d58728acb59b5cc107"
- name = "github.com/google/btree"
- packages = ["."]
- pruneopts = "NT"
- revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306"
-
-[[projects]]
- branch = "master"
- digest = "1:52c5834e2bebac9030c97cc0798ac11c3aa8a39f098aeb419f142533da6cd3cc"
- name = "github.com/google/gofuzz"
- packages = ["."]
- pruneopts = "NT"
- revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1"
-
-[[projects]]
- digest = "1:56a1f3949ebb7fa22fa6b4e4ac0fe0f77cc4faee5b57413e6fa9199a8458faf1"
- name = "github.com/google/uuid"
- packages = ["."]
- pruneopts = "NT"
- revision = "9b3b1e0f5f99ae461456d768e7d301a7acdaa2d8"
- version = "v1.1.0"
-
-[[projects]]
- digest = "1:289332c13b80edfefc88397cce5266c16845dcf204fa2f6ac7e464ee4c7f6e96"
- name = "github.com/googleapis/gnostic"
- packages = [
- "OpenAPIv2",
- "compiler",
- "extensions",
- ]
- pruneopts = "NT"
- revision = "7c663266750e7d82587642f65e60bc4083f1f84e"
- version = "v0.2.0"
-
-[[projects]]
- digest = "1:c44f4c3b105e9a06f278c0d12982c915d12cd1537d252391904444777a6791df"
- name = "github.com/goph/emperror"
- packages = ["."]
- pruneopts = "NT"
- revision = "4cdd86c173cfed1f47be88bd88327140f81bcede"
- version = "v0.16.0"
-
-[[projects]]
- branch = "master"
- digest = "1:bb7bd892abcb75ef819ce2efab9d54d22b7e38dc05ffac55428bb0578b52912b"
- name = "github.com/gregjones/httpcache"
- packages = [
- ".",
- "diskcache",
- ]
- pruneopts = "NT"
- revision = "3befbb6ad0cc97d4c25d851e9528915809e1a22f"
-
-[[projects]]
- digest = "1:b42cde0e1f3c816dd57f57f7bbcf05ca40263ad96f168714c130c611fc0856a6"
- name = "github.com/hashicorp/golang-lru"
- packages = [
- ".",
- "simplelru",
- ]
- pruneopts = "NT"
- revision = "20f1fb78b0740ba8c3cb143a61e86ba5c8669768"
- version = "v0.5.0"
-
-[[projects]]
- digest = "1:dc54242755f5b6721dd880843de6e45fe234838ea9149ec8249951880fd5802f"
- name = "github.com/huandu/xstrings"
- packages = ["."]
- pruneopts = "NT"
- revision = "f02667b379e2fb5916c3cda2cf31e0eb885d79f8"
- version = "v1.2.0"
-
-[[projects]]
- digest = "1:aaa38889f11896ee3644d77e17dc7764cc47f5f3d3b488268df2af2b52541c5f"
- name = "github.com/imdario/mergo"
- packages = ["."]
- pruneopts = "NT"
- revision = "7c29201646fa3de8506f701213473dd407f19646"
- version = "v0.3.7"
-
-[[projects]]
- digest = "1:f5b9328966ccea0970b1d15075698eff0ddb3e75889560aad2e9f76b289b536a"
- name = "github.com/joho/godotenv"
- packages = ["."]
- pruneopts = "NT"
- revision = "23d116af351c84513e1946b527c88823e476be13"
- version = "v1.3.0"
-
-[[projects]]
- digest = "1:1d39c063244ad17c4b18e8da1551163b6ffb52bd1640a49a8ec5c3b7bf4dbd5d"
- name = "github.com/json-iterator/go"
- packages = ["."]
- pruneopts = "NT"
- revision = "1624edc4454b8682399def8740d46db5e4362ba4"
- version = "v1.1.5"
-
-[[projects]]
- digest = "1:4059c14e87a2de3a434430340521b5feece186c1469eff0834c29a63870de3ed"
- name = "github.com/konsorten/go-windows-terminal-sequences"
- packages = ["."]
- pruneopts = "NT"
- revision = "5c8c8bd35d3832f5d134ae1e1e375b69a4d25242"
- version = "v1.0.1"
-
-[[projects]]
- branch = "master"
- digest = "1:4925ec3736ef6c299cfcf61597782e3d66ec13114f7476019d04c742a7be55d0"
- name = "github.com/mailru/easyjson"
- packages = [
- "buffer",
- "jlexer",
- "jwriter",
- ]
- pruneopts = "NT"
- revision = "6243d8e04c3f819e79757e8bc3faa15c3cb27003"
-
-[[projects]]
- digest = "1:56dbf15e091bf7926cb33a57cb6bdfc658fc6d3498d2f76f10a97ce7856f1fde"
- name = "github.com/markbates/inflect"
- packages = ["."]
- pruneopts = "NT"
- revision = "24b83195037b3bc61fcda2d28b7b0518bce293b6"
- version = "v1.0.4"
-
-[[projects]]
- digest = "1:ea1db000388d88b31db7531c83016bef0d6db0d908a07794bfc36aca16fbf935"
- name = "github.com/matttproud/golang_protobuf_extensions"
- packages = ["pbutil"]
- pruneopts = "NT"
- revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
- version = "v1.0.1"
-
-[[projects]]
- digest = "1:2f42fa12d6911c7b7659738758631bec870b7e9b4c6be5444f963cdcfccc191f"
- name = "github.com/modern-go/concurrent"
- packages = ["."]
- pruneopts = "NT"
- revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94"
- version = "1.0.3"
-
-[[projects]]
- digest = "1:c6aca19413b13dc59c220ad7430329e2ec454cc310bc6d8de2c7e2b93c18a0f6"
- name = "github.com/modern-go/reflect2"
- packages = ["."]
- pruneopts = "NT"
- revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd"
- version = "1.0.1"
-
-[[projects]]
- digest = "1:df8e741cd0f86087367f3bcfeb1cf237e96fada71194b6d4cee9412d221ec763"
- name = "github.com/operator-framework/operator-sdk"
- packages = [
- "pkg/k8sutil",
- "pkg/leader",
- "pkg/log/zap",
- "pkg/metrics",
- "version",
- ]
- pruneopts = "NT"
- revision = "6754b70169f1b62355516947270e33b9f73d8159"
- version = "v0.5.0"
-
-[[projects]]
- digest = "1:93b1d84c5fa6d1ea52f4114c37714cddd84d5b78f151b62bb101128dd51399bf"
- name = "github.com/pborman/uuid"
- packages = ["."]
- pruneopts = "NT"
- revision = "adf5a7427709b9deb95d29d3fa8a2bf9cfd388f1"
- version = "v1.2"
-
-[[projects]]
- branch = "master"
- digest = "1:bf2ac97824a7221eb16b096aecc1c390d4c8a4e49524386aaa2e2dd215cbfb31"
- name = "github.com/petar/GoLLRB"
- packages = ["llrb"]
- pruneopts = "NT"
- revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4"
-
-[[projects]]
- digest = "1:e4e9e026b8e4c5630205cd0208efb491b40ad40552e57f7a646bb8a46896077b"
- name = "github.com/peterbourgon/diskv"
- packages = ["."]
- pruneopts = "NT"
- revision = "5f041e8faa004a95c88a202771f4cc3e991971e6"
- version = "v2.0.1"
-
-[[projects]]
- digest = "1:14715f705ff5dfe0ffd6571d7d201dd8e921030f8070321a79380d8ca4ec1a24"
- name = "github.com/pkg/errors"
- packages = ["."]
- pruneopts = "NT"
- revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4"
- version = "v0.8.1"
-
-[[projects]]
- digest = "1:ec2a29e3bd141038ae5c3d3a4f57db0c341fcc1d98055a607aedd683aed124ee"
- name = "github.com/prometheus/client_golang"
- packages = [
- "prometheus",
- "prometheus/internal",
- "prometheus/promhttp",
- ]
- pruneopts = "NT"
- revision = "505eaef017263e299324067d40ca2c48f6a2cf50"
- version = "v0.9.2"
-
-[[projects]]
- branch = "master"
- digest = "1:c2cc5049e927e2749c0d5163c9f8d924880d83e84befa732b9aad0b6be227bed"
- name = "github.com/prometheus/client_model"
- packages = ["go"]
- pruneopts = "NT"
- revision = "fd36f4220a901265f90734c3183c5f0c91daa0b8"
-
-[[projects]]
- digest = "1:30261b5e263b5c4fb40571b53a41a99c96016c6b1b2c45c1cefd226fc3f6304b"
- name = "github.com/prometheus/common"
- packages = [
- "expfmt",
- "internal/bitbucket.org/ww/goautoneg",
- "model",
- ]
- pruneopts = "NT"
- revision = "cfeb6f9992ffa54aaa4f2170ade4067ee478b250"
- version = "v0.2.0"
-
-[[projects]]
- branch = "master"
- digest = "1:1c282f5c094061ce301d1ea3098799fc907ac1399e9f064c463787323a7b7340"
- name = "github.com/prometheus/procfs"
- packages = [
- ".",
- "internal/util",
- "iostats",
- "nfs",
- "xfs",
- ]
- pruneopts = "NT"
- revision = "6ed1f7e1041181781dd2826d3001075d011a80cc"
-
-[[projects]]
- digest = "1:fcef1ce61da6f8f6f115154fb0e0e5b159fe11656839ba1e6061372711c013ee"
- name = "github.com/rogpeppe/go-internal"
- packages = [
- "modfile",
- "module",
- "semver",
- ]
- pruneopts = "NT"
- revision = "1cf9852c553c5b7da2d5a4a091129a7822fed0c9"
- version = "v1.2.2"
-
-[[projects]]
- digest = "1:1f84287a4ca2c8f729d8155ba4c45915f5854ebbd214e406070779753da68422"
- name = "github.com/sirupsen/logrus"
- packages = ["."]
- pruneopts = "NT"
- revision = "e1e72e9de974bd926e5c56f83753fba2df402ce5"
- version = "v1.3.0"
-
-[[projects]]
- digest = "1:1bc08ec221c4fb25e6f2c019b23fe989fb44573c696983d8e403a3b76cc378e1"
- name = "github.com/spf13/afero"
- packages = [
- ".",
- "mem",
- ]
- pruneopts = "NT"
- revision = "f4711e4db9e9a1d3887343acb72b2bbfc2f686f5"
- version = "v1.2.1"
-
-[[projects]]
- digest = "1:9d8420bbf131d1618bde6530af37c3799340d3762cc47210c1d9532a4c3a2779"
- name = "github.com/spf13/pflag"
- packages = ["."]
- pruneopts = "NT"
- revision = "298182f68c66c05229eb03ac171abe6e309ee79a"
- version = "v1.0.3"
-
-[[projects]]
- digest = "1:22f696cee54865fb8e9ff91df7b633f6b8f22037a8015253c6b6a71ca82219c7"
- name = "go.uber.org/atomic"
- packages = ["."]
- pruneopts = "NT"
- revision = "1ea20fb1cbb1cc08cbd0d913a96dead89aa18289"
- version = "v1.3.2"
-
-[[projects]]
- digest = "1:58ca93bdf81bac106ded02226b5395a0595d5346cdc4caa8d9c1f3a5f8f9976e"
- name = "go.uber.org/multierr"
- packages = ["."]
- pruneopts = "NT"
- revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a"
- version = "v1.1.0"
-
-[[projects]]
- digest = "1:572fa4496563920f3e3107a2294cf2621d6cc4ffd03403fb6397b1bab9fa082a"
- name = "go.uber.org/zap"
- packages = [
- ".",
- "buffer",
- "internal/bufferpool",
- "internal/color",
- "internal/exit",
- "zapcore",
- ]
- pruneopts = "NT"
- revision = "ff33455a0e382e8a81d14dd7c922020b6b5e7982"
- version = "v1.9.1"
-
-[[projects]]
- branch = "master"
- digest = "1:b19fb19351db5de242e3f1203e63c207c69bf4f4df4822b4ef15220e0204e0e4"
- name = "golang.org/x/crypto"
- packages = [
- "pbkdf2",
- "scrypt",
- "ssh/terminal",
- ]
- pruneopts = "NT"
- revision = "7f87c0fbb88b590338857bcb720678c2583d4dea"
-
-[[projects]]
- branch = "master"
- digest = "1:60c1f5371132225f21f849a13e379d55c4512ac9ed4b37e7fa33ea0fedeb8480"
- name = "golang.org/x/net"
- packages = [
- "context",
- "context/ctxhttp",
- "http/httpguts",
- "http2",
- "http2/hpack",
- "idna",
- ]
- pruneopts = "NT"
- revision = "fe579d43d83210096a79b46dcca0e3721058393a"
-
-[[projects]]
- branch = "master"
- digest = "1:22a51305a9f13b8c8ca91c335a0da16a1a7b537155e677e45d7905465e457e87"
- name = "golang.org/x/oauth2"
- packages = [
- ".",
- "google",
- "internal",
- "jws",
- "jwt",
- ]
- pruneopts = "NT"
- revision = "529b322ea34655aa15fb32e063f3d4d3cf803cac"
-
-[[projects]]
- branch = "master"
- digest = "1:90abfd79711e2d0ce66e6d23a1b652f8e16c76e12a2ef4b255d1bf0ff4f254b8"
- name = "golang.org/x/sys"
- packages = [
- "unix",
- "windows",
- ]
- pruneopts = "NT"
- revision = "cc5685c2db1239775905f3911f0067c0fa74762f"
-
-[[projects]]
- digest = "1:8c74f97396ed63cc2ef04ebb5fc37bb032871b8fd890a25991ed40974b00cd2a"
- name = "golang.org/x/text"
- packages = [
- "collate",
- "collate/build",
- "internal/colltab",
- "internal/gen",
- "internal/tag",
- "internal/triegen",
- "internal/ucd",
- "language",
- "secure/bidirule",
- "transform",
- "unicode/bidi",
- "unicode/cldr",
- "unicode/norm",
- "unicode/rangetable",
- "width",
- ]
- pruneopts = "NT"
- revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
- version = "v0.3.0"
-
-[[projects]]
- branch = "master"
- digest = "1:9fdc2b55e8e0fafe4b41884091e51e77344f7dc511c5acedcfd98200003bff90"
- name = "golang.org/x/time"
- packages = ["rate"]
- pruneopts = "NT"
- revision = "85acf8d2951cb2a3bde7632f9ff273ef0379bcbd"
-
-[[projects]]
- branch = "master"
- digest = "1:dfddac8ab4fec08ac3679d4f64f6054a6be3c849faf6ea05e525e40f7aeeb133"
- name = "golang.org/x/tools"
- packages = [
- "go/ast/astutil",
- "go/gcexportdata",
- "go/internal/cgo",
- "go/internal/gcimporter",
- "go/internal/packagesdriver",
- "go/packages",
- "go/types/typeutil",
- "imports",
- "internal/fastwalk",
- "internal/gopathwalk",
- "internal/module",
- "internal/semver",
- ]
- pruneopts = "NT"
- revision = "2dc4ef2775b8122dd5afe2c18fd6f775e87f89e5"
-
-[[projects]]
- digest = "1:902ffa11f1d8c19c12b05cabffe69e1a16608ad03a8899ebcb9c6bde295660ae"
- name = "google.golang.org/appengine"
- packages = [
- ".",
- "internal",
- "internal/app_identity",
- "internal/base",
- "internal/datastore",
- "internal/log",
- "internal/modules",
- "internal/remote_api",
- "internal/urlfetch",
- "urlfetch",
- ]
- pruneopts = "NT"
- revision = "e9657d882bb81064595ca3b56cbe2546bbabf7b1"
- version = "v1.4.0"
-
-[[projects]]
- digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a"
- name = "gopkg.in/inf.v0"
- packages = ["."]
- pruneopts = "NT"
- revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf"
- version = "v0.9.1"
-
-[[projects]]
- digest = "1:18108594151654e9e696b27b181b953f9a90b16bf14d253dd1b397b025a1487f"
- name = "gopkg.in/yaml.v2"
- packages = ["."]
- pruneopts = "NT"
- revision = "51d6538a90f86fe93ac480b35f37b2be17fef232"
- version = "v2.2.2"
-
-[[projects]]
- digest = "1:6fa82ea248029bbbdddade20c06ab177ff6e485e5e45e48b045707415b7efd34"
- name = "k8s.io/api"
- packages = [
- "admission/v1beta1",
- "admissionregistration/v1alpha1",
- "admissionregistration/v1beta1",
- "apps/v1",
- "apps/v1beta1",
- "apps/v1beta2",
- "auditregistration/v1alpha1",
- "authentication/v1",
- "authentication/v1beta1",
- "authorization/v1",
- "authorization/v1beta1",
- "autoscaling/v1",
- "autoscaling/v2beta1",
- "autoscaling/v2beta2",
- "batch/v1",
- "batch/v1beta1",
- "batch/v2alpha1",
- "certificates/v1beta1",
- "coordination/v1beta1",
- "core/v1",
- "events/v1beta1",
- "extensions/v1beta1",
- "networking/v1",
- "policy/v1beta1",
- "rbac/v1",
- "rbac/v1alpha1",
- "rbac/v1beta1",
- "scheduling/v1alpha1",
- "scheduling/v1beta1",
- "settings/v1alpha1",
- "storage/v1",
- "storage/v1alpha1",
- "storage/v1beta1",
- ]
- pruneopts = "NT"
- revision = "05914d821849570fba9eacfb29466f2d8d3cd229"
-
-[[projects]]
- digest = "1:c6f23048e162e65d586c809fd02e263e180ad157f110df17437c22517bb59a4b"
- name = "k8s.io/apiextensions-apiserver"
- packages = [
- "pkg/apis/apiextensions",
- "pkg/apis/apiextensions/v1beta1",
- ]
- pruneopts = "NT"
- revision = "0fe22c71c47604641d9aa352c785b7912c200562"
-
-[[projects]]
- digest = "1:15b5c41ff6faa4d0400557d4112d6337e1abc961c65513d44fce7922e32c9ca7"
- name = "k8s.io/apimachinery"
- packages = [
- "pkg/api/errors",
- "pkg/api/meta",
- "pkg/api/resource",
- "pkg/apis/meta/internalversion",
- "pkg/apis/meta/v1",
- "pkg/apis/meta/v1/unstructured",
- "pkg/apis/meta/v1beta1",
- "pkg/conversion",
- "pkg/conversion/queryparams",
- "pkg/fields",
- "pkg/labels",
- "pkg/runtime",
- "pkg/runtime/schema",
- "pkg/runtime/serializer",
- "pkg/runtime/serializer/json",
- "pkg/runtime/serializer/protobuf",
- "pkg/runtime/serializer/recognizer",
- "pkg/runtime/serializer/streaming",
- "pkg/runtime/serializer/versioning",
- "pkg/selection",
- "pkg/types",
- "pkg/util/cache",
- "pkg/util/clock",
- "pkg/util/diff",
- "pkg/util/errors",
- "pkg/util/framer",
- "pkg/util/intstr",
- "pkg/util/json",
- "pkg/util/mergepatch",
- "pkg/util/naming",
- "pkg/util/net",
- "pkg/util/runtime",
- "pkg/util/sets",
- "pkg/util/strategicpatch",
- "pkg/util/uuid",
- "pkg/util/validation",
- "pkg/util/validation/field",
- "pkg/util/wait",
- "pkg/util/yaml",
- "pkg/version",
- "pkg/watch",
- "third_party/forked/golang/json",
- "third_party/forked/golang/reflect",
- ]
- pruneopts = "NT"
- revision = "2b1284ed4c93a43499e781493253e2ac5959c4fd"
-
-[[projects]]
- digest = "1:c904a3d70131b33df36e4e51b574226b82308fc1ea66964aa21095a95d453fc9"
- name = "k8s.io/client-go"
- packages = [
- "discovery",
- "dynamic",
- "kubernetes",
- "kubernetes/scheme",
- "kubernetes/typed/admissionregistration/v1alpha1",
- "kubernetes/typed/admissionregistration/v1beta1",
- "kubernetes/typed/apps/v1",
- "kubernetes/typed/apps/v1beta1",
- "kubernetes/typed/apps/v1beta2",
- "kubernetes/typed/auditregistration/v1alpha1",
- "kubernetes/typed/authentication/v1",
- "kubernetes/typed/authentication/v1beta1",
- "kubernetes/typed/authorization/v1",
- "kubernetes/typed/authorization/v1beta1",
- "kubernetes/typed/autoscaling/v1",
- "kubernetes/typed/autoscaling/v2beta1",
- "kubernetes/typed/autoscaling/v2beta2",
- "kubernetes/typed/batch/v1",
- "kubernetes/typed/batch/v1beta1",
- "kubernetes/typed/batch/v2alpha1",
- "kubernetes/typed/certificates/v1beta1",
- "kubernetes/typed/coordination/v1beta1",
- "kubernetes/typed/core/v1",
- "kubernetes/typed/events/v1beta1",
- "kubernetes/typed/extensions/v1beta1",
- "kubernetes/typed/networking/v1",
- "kubernetes/typed/policy/v1beta1",
- "kubernetes/typed/rbac/v1",
- "kubernetes/typed/rbac/v1alpha1",
- "kubernetes/typed/rbac/v1beta1",
- "kubernetes/typed/scheduling/v1alpha1",
- "kubernetes/typed/scheduling/v1beta1",
- "kubernetes/typed/settings/v1alpha1",
- "kubernetes/typed/storage/v1",
- "kubernetes/typed/storage/v1alpha1",
- "kubernetes/typed/storage/v1beta1",
- "pkg/apis/clientauthentication",
- "pkg/apis/clientauthentication/v1alpha1",
- "pkg/apis/clientauthentication/v1beta1",
- "pkg/version",
- "plugin/pkg/client/auth/exec",
- "plugin/pkg/client/auth/gcp",
- "rest",
- "rest/watch",
- "restmapper",
- "third_party/forked/golang/template",
- "tools/auth",
- "tools/cache",
- "tools/clientcmd",
- "tools/clientcmd/api",
- "tools/clientcmd/api/latest",
- "tools/clientcmd/api/v1",
- "tools/leaderelection",
- "tools/leaderelection/resourcelock",
- "tools/metrics",
- "tools/pager",
- "tools/record",
- "tools/reference",
- "transport",
- "util/buffer",
- "util/cert",
- "util/connrotation",
- "util/flowcontrol",
- "util/homedir",
- "util/integer",
- "util/jsonpath",
- "util/retry",
- "util/workqueue",
- ]
- pruneopts = "NT"
- revision = "8d9ed539ba3134352c586810e749e58df4e94e4f"
-
-[[projects]]
- digest = "1:dc1ae99dcab96913d81ae970b1f7a7411a54199b14bfb17a7e86f9a56979c720"
- name = "k8s.io/code-generator"
- packages = [
- "cmd/client-gen",
- "cmd/client-gen/args",
- "cmd/client-gen/generators",
- "cmd/client-gen/generators/fake",
- "cmd/client-gen/generators/scheme",
- "cmd/client-gen/generators/util",
- "cmd/client-gen/path",
- "cmd/client-gen/types",
- "cmd/conversion-gen",
- "cmd/conversion-gen/args",
- "cmd/conversion-gen/generators",
- "cmd/deepcopy-gen",
- "cmd/deepcopy-gen/args",
- "cmd/defaulter-gen",
- "cmd/defaulter-gen/args",
- "cmd/informer-gen",
- "cmd/informer-gen/args",
- "cmd/informer-gen/generators",
- "cmd/lister-gen",
- "cmd/lister-gen/args",
- "cmd/lister-gen/generators",
- "pkg/util",
- ]
- pruneopts = "T"
- revision = "c2090bec4d9b1fb25de3812f868accc2bc9ecbae"
-
-[[projects]]
- branch = "master"
- digest = "1:2b9071c93303f1196cfe959c7f7f69ed1e4a5180f240a259536c5886f79f86d4"
- name = "k8s.io/gengo"
- packages = [
- "args",
- "examples/deepcopy-gen/generators",
- "examples/defaulter-gen/generators",
- "examples/set-gen/sets",
- "generator",
- "namer",
- "parser",
- "types",
- ]
- pruneopts = "T"
- revision = "0689ccc1d7d65d9dd1bedcc3b0b1ed7df91ba266"
-
-[[projects]]
- digest = "1:29f93bb84d907a2c035e729e19d66fe52165d8c905cb3ef1920140d76ae6afaf"
- name = "k8s.io/klog"
- packages = ["."]
- pruneopts = "NT"
- revision = "71442cd4037d612096940ceb0f3fec3f7fff66e0"
- version = "v0.2.0"
-
-[[projects]]
- digest = "1:c48a795cd7048bb1888273bc604b6e69b22f9b8089c3df65f77cc527757b515c"
- name = "k8s.io/kube-openapi"
- packages = [
- "cmd/openapi-gen",
- "cmd/openapi-gen/args",
- "pkg/common",
- "pkg/generators",
- "pkg/generators/rules",
- "pkg/util/proto",
- "pkg/util/sets",
- ]
- pruneopts = "NT"
- revision = "0cf8f7e6ed1d2e3d47d02e3b6e559369af24d803"
-
-[[projects]]
- digest = "1:06035489efbd51ccface65fc878ceeb849aba05b2f9443c8993f363fc96e80ac"
- name = "sigs.k8s.io/controller-runtime"
- packages = [
- "pkg/cache",
- "pkg/cache/internal",
- "pkg/client",
- "pkg/client/apiutil",
- "pkg/client/config",
- "pkg/controller",
- "pkg/event",
- "pkg/handler",
- "pkg/internal/controller",
- "pkg/internal/controller/metrics",
- "pkg/internal/recorder",
- "pkg/leaderelection",
- "pkg/manager",
- "pkg/metrics",
- "pkg/patch",
- "pkg/predicate",
- "pkg/reconcile",
- "pkg/recorder",
- "pkg/runtime/inject",
- "pkg/runtime/log",
- "pkg/runtime/scheme",
- "pkg/runtime/signals",
- "pkg/source",
- "pkg/source/internal",
- "pkg/webhook/admission",
- "pkg/webhook/admission/types",
- "pkg/webhook/internal/metrics",
- "pkg/webhook/types",
- ]
- pruneopts = "NT"
- revision = "12d98582e72927b6cd0123e2b4e819f9341ce62c"
- version = "v0.1.10"
-
-[[projects]]
- digest = "1:0a14ea9a2647d064bb9d48b2de78306e74b196681efd7b654eb0b518d90c2e8d"
- name = "sigs.k8s.io/controller-tools"
- packages = [
- "pkg/crd/generator",
- "pkg/crd/util",
- "pkg/internal/codegen",
- "pkg/internal/codegen/parse",
- "pkg/internal/general",
- "pkg/util",
- ]
- pruneopts = "NT"
- revision = "950a0e88e4effb864253b3c7504b326cc83b9d11"
- version = "v0.1.8"
-
-[[projects]]
- digest = "1:8730e0150dfb2b7e173890c8b9868e7a273082ef8e39f4940e3506a481cf895c"
- name = "sigs.k8s.io/yaml"
- packages = ["."]
- pruneopts = "NT"
- revision = "fd68e9863619f6ec2fdd8625fe1f02e7c877e480"
- version = "v1.1.0"
-
-[solve-meta]
- analyzer-name = "dep"
- analyzer-version = 1
- input-imports = [
- "github.com/Masterminds/sprig",
- "github.com/go-logr/logr",
- "github.com/go-openapi/spec",
- "github.com/goph/emperror",
- "github.com/operator-framework/operator-sdk/pkg/k8sutil",
- "github.com/operator-framework/operator-sdk/pkg/leader",
- "github.com/operator-framework/operator-sdk/pkg/log/zap",
- "github.com/operator-framework/operator-sdk/pkg/metrics",
- "github.com/operator-framework/operator-sdk/version",
- "github.com/sirupsen/logrus",
- "github.com/spf13/pflag",
- "k8s.io/api/apps/v1",
- "k8s.io/api/core/v1",
- "k8s.io/api/rbac/v1",
- "k8s.io/apimachinery/pkg/api/errors",
- "k8s.io/apimachinery/pkg/apis/meta/v1",
- "k8s.io/apimachinery/pkg/runtime",
- "k8s.io/apimachinery/pkg/runtime/schema",
- "k8s.io/apimachinery/pkg/types",
- "k8s.io/apimachinery/pkg/util/intstr",
- "k8s.io/client-go/plugin/pkg/client/auth/gcp",
- "k8s.io/code-generator/cmd/client-gen",
- "k8s.io/code-generator/cmd/conversion-gen",
- "k8s.io/code-generator/cmd/deepcopy-gen",
- "k8s.io/code-generator/cmd/defaulter-gen",
- "k8s.io/code-generator/cmd/informer-gen",
- "k8s.io/code-generator/cmd/lister-gen",
- "k8s.io/gengo/args",
- "k8s.io/kube-openapi/cmd/openapi-gen",
- "k8s.io/kube-openapi/pkg/common",
- "sigs.k8s.io/controller-runtime/pkg/client",
- "sigs.k8s.io/controller-runtime/pkg/client/config",
- "sigs.k8s.io/controller-runtime/pkg/controller",
- "sigs.k8s.io/controller-runtime/pkg/handler",
- "sigs.k8s.io/controller-runtime/pkg/manager",
- "sigs.k8s.io/controller-runtime/pkg/reconcile",
- "sigs.k8s.io/controller-runtime/pkg/runtime/log",
- "sigs.k8s.io/controller-runtime/pkg/runtime/scheme",
- "sigs.k8s.io/controller-runtime/pkg/runtime/signals",
- "sigs.k8s.io/controller-runtime/pkg/source",
- "sigs.k8s.io/controller-tools/pkg/crd/generator",
- ]
- solver-name = "gps-cdcl"
- solver-version = 1
diff --git a/Gopkg.toml b/Gopkg.toml
deleted file mode 100644
index b4c4b8cda..000000000
--- a/Gopkg.toml
+++ /dev/null
@@ -1,79 +0,0 @@
-# Force dep to vendor the code generators, which aren't imported just used at dev time.
-required = [
- "k8s.io/code-generator/cmd/defaulter-gen",
- "k8s.io/code-generator/cmd/deepcopy-gen",
- "k8s.io/code-generator/cmd/conversion-gen",
- "k8s.io/code-generator/cmd/client-gen",
- "k8s.io/code-generator/cmd/lister-gen",
- "k8s.io/code-generator/cmd/informer-gen",
- "k8s.io/kube-openapi/cmd/openapi-gen",
- "k8s.io/gengo/args",
- "sigs.k8s.io/controller-tools/pkg/crd/generator",
-]
-
-[[override]]
- name = "k8s.io/code-generator"
- # revision for tag "kubernetes-1.13.1"
- revision = "c2090bec4d9b1fb25de3812f868accc2bc9ecbae"
-
-[[override]]
- name = "k8s.io/kube-openapi"
- revision = "0cf8f7e6ed1d2e3d47d02e3b6e559369af24d803"
-
-[[override]]
- name = "github.com/go-openapi/spec"
- branch = "master"
-
-[[override]]
- name = "sigs.k8s.io/controller-tools"
- version = "=v0.1.8"
-
-[[override]]
- name = "k8s.io/api"
- # revision for tag "kubernetes-1.13.1"
- revision = "05914d821849570fba9eacfb29466f2d8d3cd229"
-
-[[override]]
- name = "k8s.io/apiextensions-apiserver"
- # revision for tag "kubernetes-1.13.1"
- revision = "0fe22c71c47604641d9aa352c785b7912c200562"
-
-[[override]]
- name = "k8s.io/apimachinery"
- # revision for tag "kubernetes-1.13.1"
- revision = "2b1284ed4c93a43499e781493253e2ac5959c4fd"
-
-[[override]]
- name = "k8s.io/client-go"
- # revision for tag "kubernetes-1.13.1"
- revision = "8d9ed539ba3134352c586810e749e58df4e94e4f"
-
-[[override]]
- name = "github.com/coreos/prometheus-operator"
- version = "=v0.26.0"
-
-[[override]]
- name = "sigs.k8s.io/controller-runtime"
- version = "=v0.1.10"
-
-[[constraint]]
- name = "github.com/operator-framework/operator-sdk"
- # The version rule is used for a specific release and the master branch for in between releases.
- # branch = "master" #osdk_branch_annotation
- version = "=v0.5.0" #osdk_version_annotation
-
-[prune]
- go-tests = true
- non-go = true
-
- [[prune.project]]
- name = "k8s.io/code-generator"
- non-go = false
-
- [[prune.project]]
- name = "k8s.io/gengo"
- non-go = false
-
-[[constraint]]
- name = "github.com/Masterminds/sprig"
- version = "2.18.0"
diff --git a/LICENCE b/LICENSE
similarity index 99%
rename from LICENCE
rename to LICENSE
index 261eeb9e9..f49a4e16e 100644
--- a/LICENCE
+++ b/LICENSE
@@ -198,4 +198,4 @@
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
- limitations under the License.
+ limitations under the License.
\ No newline at end of file
diff --git a/Makefile b/Makefile
index 344a6f983..f1e1bce70 100644
--- a/Makefile
+++ b/Makefile
@@ -1,34 +1,35 @@
+# this makefile was generated by
+include Makefile.app
+
+OS = $(shell uname)
+
+# Image URL to use all building/pushing image targets
+IMG ?= controller:latest
+# Produce CRDs that work back to Kubernetes 1.11 (no version conversion)
+CRD_OPTIONS ?= "crd:trivialVersions=true"
+
+KUBEBUILDER_VERSION = 2.0.0
VERSION := $(shell git describe --abbrev=0 --tags)
DOCKER_IMAGE = banzaicloud/logging-operator
DOCKER_TAG ?= ${VERSION}
GOFILES_NOVENDOR = $(shell find . -type f -name '*.go' -not -path "./vendor/*" -not -path "./client/*")
+GOFILES_NOPLUGINS = $(shell find . -type f -name '*.go' -not -path "./pkg/model/filter/*" -not -path "./pkg/model/output/*" -not -path "./pkg/model/input/*")
PKGS=$(shell go list ./... | grep -v /vendor)
-DEP_VERSION = 0.5.0
-
-bin/dep: bin/dep-${DEP_VERSION}
- @ln -sf dep-${DEP_VERSION} bin/dep
-
-bin/dep-${DEP_VERSION}:
- @mkdir -p bin
- curl https://raw.githubusercontent.com/golang/dep/master/install.sh | INSTALL_DIRECTORY=bin DEP_RELEASE_TAG=v${DEP_VERSION} sh
- @mv bin/dep $@
-
-.PHONY: vendor
-vendor: bin/dep ## Install dependencies
- bin/dep ensure -v -vendor-only
+export KUBEBUILDER_ASSETS := $(PWD)/bin
+export PATH := $(PWD)/bin:$(PATH)
-build: vendor
- go build -v $(PKGS)
+all: manager
+# Generate docs
+.PHONY: docs
+docs:
+ go run cmd/docs.go
check-fmt:
PKGS="${GOFILES_NOVENDOR}" GOFMT="gofmt" ./scripts/fmt-check.sh
-fmt:
- gofmt -w ${GOFILES_NOVENDOR}
-
lint: install-golint
- golint -min_confidence 0.9 -set_exit_status $(PKGS)
+ golint -min_confidence 0.9 -set_exit_status $(GOFILES_NOPLUGINS)
install-golint:
GOLINT_CMD=$(shell command -v golint 2> /dev/null)
@@ -57,10 +58,85 @@ ifndef INEFFASSIGN_CMD
go get -u github.com/gordonklaus/ineffassign
endif
-.PHONY: docker
-docker: ## Build Docker image
- docker build -t ${DOCKER_IMAGE}:${DOCKER_TAG} -f Dockerfile .
+.PHONY: bin/kubebuilder_${KUBEBUILDER_VERSION}
+bin/kubebuilder_${KUBEBUILDER_VERSION}:
+ @mkdir -p bin
+ifeq (${OS}, Darwin)
+ curl -L https://github.com/kubernetes-sigs/kubebuilder/releases/download/v${KUBEBUILDER_VERSION}/kubebuilder_${KUBEBUILDER_VERSION}_darwin_amd64.tar.gz | tar xvz - -C bin
+ @ln -sf kubebuilder_${KUBEBUILDER_VERSION}_darwin_amd64/bin bin/kubebuilder_${KUBEBUILDER_VERSION}
+endif
+ifeq (${OS}, Linux)
+ curl -L https://github.com/kubernetes-sigs/kubebuilder/releases/download/v${KUBEBUILDER_VERSION}/kubebuilder_${KUBEBUILDER_VERSION}_linux_amd64.tar.gz | tar -C bin -xzv
+ @ln -sf kubebuilder_${KUBEBUILDER_VERSION}_linux_amd64/bin bin/kubebuilder_${KUBEBUILDER_VERSION}
+endif
-.PHONY: docs
-docs:
- go run cmd/docgen/docgen.go
\ No newline at end of file
+bin/kubebuilder: bin/kubebuilder_${KUBEBUILDER_VERSION}
+ @ln -sf kubebuilder_${KUBEBUILDER_VERSION}/kubebuilder bin/kubebuilder
+ @ln -sf kubebuilder_${KUBEBUILDER_VERSION}/kube-apiserver bin/kube-apiserver
+ @ln -sf kubebuilder_${KUBEBUILDER_VERSION}/etcd bin/etcd
+ @ln -sf kubebuilder_${KUBEBUILDER_VERSION}/kubectl bin/kubectl
+
+# Run tests
+test: generate fmt vet manifests bin/kubebuilder
+ @which kubebuilder
+ @which etcd
+ kubebuilder version
+ go test ./api/... ./controllers/... ./pkg/... -coverprofile cover.out
+
+# Build manager binary
+manager: generate fmt vet
+ go build -o bin/manager main.go
+
+# Run against the configured Kubernetes cluster in ~/.kube/config
+run: generate fmt vet
+ go run ./main.go --verbose
+
+# Install CRDs into a cluster
+install: manifests
+ kubectl apply -f config/crd/bases
+
+# Deploy controller in the configured Kubernetes cluster in ~/.kube/config
+deploy: manifests
+ kubectl apply -f config/crd/bases
+ kustomize build config/default | kubectl apply -f -
+
+# Generate manifests e.g. CRD, RBAC etc.
+manifests: controller-gen
+ $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases
+
+# Run go fmt against code
+fmt:
+ go fmt ./...
+
+# Run go vet against code
+vet:
+ go vet ./...
+
+# Generate code
+generate: controller-gen
+ $(CONTROLLER_GEN) object:headerFile=./hack/boilerplate.go.txt paths=./api/...
+ $(CONTROLLER_GEN) object:headerFile=./hack/boilerplate.go.txt paths=./pkg/model/...
+
+# Build the docker image
+docker-build: test
+ docker build . -t ${IMG}
+ @echo "updating kustomize image patch file for manager resource"
+ sed -i'' -e 's@image: .*@image: '"${IMG}"'@' ./config/default/manager_image_patch.yaml
+
+# Push the docker image
+docker-push:
+ docker push ${IMG}
+
+# find or download controller-gen
+# download controller-gen if necessary
+controller-gen:
+ifeq (, $(shell which controller-gen))
+ go get sigs.k8s.io/controller-tools/cmd/controller-gen@v0.2.1
+CONTROLLER_GEN=$(shell go env GOPATH)/bin/controller-gen
+else
+CONTROLLER_GEN=$(shell which controller-gen)
+endif
+
+check-diff:
+ $(MAKE) generate manifests docs
+ git diff --exit-code
diff --git a/Makefile.app b/Makefile.app
new file mode 100644
index 000000000..8e59b88a8
--- /dev/null
+++ b/Makefile.app
@@ -0,0 +1,6 @@
+.PHONY: check
+check: test license
+
+.PHONY: license
+license:
+ ./scripts/check-header.sh
\ No newline at end of file
diff --git a/PROJECT b/PROJECT
new file mode 100644
index 000000000..10baf4105
--- /dev/null
+++ b/PROJECT
@@ -0,0 +1,25 @@
+version: "2"
+domain: banzaicloud.io
+repo: github.com/banzaicloud/logging-operator
+resources:
+- group: logging
+ version: v1beta1
+ kind: Fluentbit
+- group: logging
+ version: v1beta1
+ kind: Fluentd
+- group: logging
+ version: v1beta1
+ kind: Flow
+- group: logging
+ version: v1beta1
+ kind: ClusterFlow
+- group: logging
+ version: v1beta1
+ kind: Output
+- group: logging
+ version: v1beta1
+ kind: ClusterOutput
+- group: logging
+ version: v1beta1
+ kind: Logging
diff --git a/README.md b/README.md
index 26ebd0fb7..d62b7c615 100644
--- a/README.md
+++ b/README.md
@@ -24,22 +24,35 @@
-# logging-operator
+# logging-operator v2
-Logging operator for Kubernetes based on Fluentd and Fluent-bit. For more details please follow up with this [post](https://banzaicloud.com/blog/k8s-logging-operator/).
+Logging operator for Kubernetes based on Fluentd and Fluent-bit.
## What is this operator for?
This operator helps you to pack together logging information with your applications. With the help of Custom Resource Definition you can describe the behaviour of your application within its charts. The operator does the rest.
-
+
+
+### Feature highlights
+
+- Namespace isolation
+- Native Kubernetes label selectors
+- Secure communication (TLS)
+- Configuration validation
+- Multiple flow support (multiply logs for different transformations)
+- Multiple [output](docs/plugins/outputs) support (store the same logs in multiple storage: S3, GCS, ES, Loki and more...)
+- Multiple logging system support (multiple fluentd, fluent-bit deployment on the same cluster)
### Motivation
-The logging operator automates the deployment and configuration of a Kubernetes logging pipeline. Under the hood the operator configures a fluent-bit daemonset for collecting container logs from the node file system. Fluent-bit enriches the logs with Kubernetes metadata and transfers them to fluentd. Fluentd receives, filters and transfer logs to multiple outputs. The whole flow can be defined in a single custom resource. Your logs will always be transferred on authenticated and encrypted channels.
+The logging operator automates the deployment and configuration of a Kubernetes logging pipeline. Under the hood the operator configures a fluent-bit daemonset for collecting container logs from the node file system. Fluent-bit enriches the logs with Kubernetes metadata and transfers them to fluentd. Fluentd receives, filters and transfer logs to multiple outputs. Your logs will always be transferred on authenticated and encrypted channels.
##### Blogs
+ - [Logging-Operator v2](https://banzaicloud.com/blog/logging-operator-v2/) (soon)
+
+##### Blogs (general logging and operator v1)
- [Advanced logging on Kubernetes](https://banzaicloud.com/blog/k8s-logging-advanced/)
- [Secure logging on Kubernetes with Fluentd and Fluent Bit](https://banzaicloud.com/blog/k8s-logging-tls/)
- [Centralized logging under Kubernetes](https://banzaicloud.com/blog/k8s-logging/)
@@ -47,7 +60,6 @@ The logging operator automates the deployment and configuration of a Kubernetes
- [And more...](https://banzaicloud.com/tags/logging/)
-
Logging-operator is a core part of the [Pipeline](https://beta.banzaicloud.io) platform, a Cloud Native application and devops platform that natively supports multi- and hybrid-cloud deployments with multiple authentication backends. Check out the developer beta:
@@ -55,16 +67,32 @@ Logging-operator is a core part of the [Pipeline](https://beta.banzaicloud.io) p
+## Architecture
+
+Available custom resources:
+- [logging](/docs/crds.md#loggings) - Represents a logging system. Includes `Fluentd` and `Fluent-bit` configuration. Specifies the `controlNamespace`. Fluentd and Fluent-bit will be deployed in the `controlNamespace`
+- [output](/docs/crds.md#outputs-clusteroutputs) - Defines an Output for a logging flow. This is a namespaced resource.
+- [flow](/docs/crds.md#flows-clusterflows) - Defines a logging flow with `filters` and `outputs`. You can specify `selectors` to filter logs by labels. Outputs can be `output` or `clusteroutput`. This is a namespaced resource.
+- [clusteroutput](/docs/crds.md#outputs-clusteroutputs) - Defines an output without namespace restriction. Only effective in `controlNamespace`.
+- [clusterflow](/docs/crds.md#flows-clusterflows) - Defines a logging flow without namespace restriction.
+
+The detailed CRD documentation can be found [here](/docs/crds.md).
+
+
+
+*connection between custom resources*
+
---
## Contents
- Installation
- [Deploy with Helm](#deploying-with-helm-chart)
- - [Deploy with Manifest](#deploying-with-kubernetes-manifest)
- [Supported Plugins](#supported-plugins)
- Examples
- - [S3 Output](./docs/examples/s3.md)
- - [Elasticsearch Output](./docs/examples/es.md)
+ - [S3 Output](./docs/example-s3.md)
+ - [Elasticsearch Output](./docs/example-es.md)
+ - [Nginx with Elasticsearch Output](./docs/example-es-nginx.md)
+ - [Nginx with Loki Output](./docs/example-loki-nginx.md)
- [Troubleshooting](#troubleshooting)
- [Contributing](#contributing)
---
@@ -85,57 +113,30 @@ $ helm repo update
$ helm install banzaicloud-stable/logging-operator
```
-#### Install FluentD, FluentBit CRs from chart
-```bash
-$ helm install banzaicloud-stable/logging-operator-fluent
-```
-
-
---
-## Deploying with Kubernetes Manifest
-
-```
-# Create all the CRDs used by the Operator
-kubectl create -f deploy/crds/logging_v1alpha1_plugin_crd.yaml
-kubectl create -f deploy/crds/logging_v1alpha1_fluentbit_crd.yaml
-kubectl create -f deploy/crds/logging_v1alpha1_fluentd_crd.yaml
-
-# If RBAC enabled create the required resources
-kubectl create -f deploy/clusterrole.yaml
-kubectl create -f deploy/clusterrole_binding.yaml
-kubectl create -f deploy/service_account.yaml
-
-# Create the Operator
-kubectl create -f deploy/operator.yaml
+## Supported Plugins
-# Create the fluent-bit daemonset by submiting a fluent-bit CR
-kubectl create -f deploy/crds/logging_v1alpha1_fluentbit_cr.yaml
+For complete list of supported plugins pleas checkl the [plugins index](/docs/plugins/index.md).
-# Create the fluentd deployment by submitting a fluentd CR
-kubectl create -f deploy/crds/logging_v1alpha1_fluentd_cr.yaml
+| Name | Type | Description | Status | Version |
+|---------------------------------------------------------|:------:|:-------------------------------------------------------------------------:|---------|-------------------------------------------------------------------------------------------|
+| [Alibaba](./docs/plugins/outputs/oss.md) | Output | Store logs the Alibaba Cloud Object Storage Service | GA | [0.0.1](https://github.com/aliyun/fluent-plugin-oss) |
+| [Amazon S3](./docs/plugins/outputs/s3.md) | Output | Store logs in Amazon S3 | GA | [1.1.10](https://github.com/fluent/fluent-plugin-s3/releases/tag/v1.1.10) |
+| [Azure](./docs/plugins/outputs/azurestore.md) | Output | Store logs in Azure Storega | GA | [0.1.1](https://github.com/htgc/fluent-plugin-azurestorage/releases/tag/v0.1.0) |
+| [Google Storage](./docs/plugins/outputs/gcs.md) | Output | Store logs in Google Cloud Storage | GA | [0.4.0.beta1](https://github.com/banzaicloud/fluent-plugin-gcs) |
+| [Grafana Loki](./docs/plugins/outputs/loki.md) | Output | Transfer logs to Loki | Testing | [0.2](https://github.com/banzaicloud/fluent-plugin-kubernetes-loki/releases/tag/v0.2) |
+| [ElasticSearch](./docs/plugins/outputs/elasticsearch.md) | Output | Send your logs to Elasticsearch | GA | [3.5.5](https://github.com/uken/fluent-plugin-elasticsearch/releases/tag/v3.5.5) |
+| [Sumologic](./docs/plugins/outputs/sumologic.md) | Output | Send your logs to Sumologic | GA | [1.5.0](https://github.com/SumoLogic/fluentd-output-sumologic/releases/tag/1.5.0) |
+| [Tag Normaliser](./docs/plugins/filters/tagnormaliser.md) | Parser | Normalise tags for outputs | GA | |
+| [Parser](./docs/plugins/filters/parser.md) | Parser | Parse logs with parser plugin | GA | |
-```
-
-## Supported Plugins
-| Name | Type | Description | Status | Version |
-|-------------------------------------------------|:------:|:-------------------------------------------------------------------------:|---------|------------------------------------------------------------------------------------------|
-| [Alibaba](./docs/plugins/alibaba.md) | Output | Store logs the Alibaba Cloud Object Storage Service | GA | [0.0.2](https://github.com/jicong/fluent-plugin-oss) |
-| [Amazon S3](./docs/plugins/s3.md) | Output | Store logs in Amazon S3 | GA | [1.1.10](https://github.com/fluent/fluent-plugin-s3/releases/tag/v1.1.10) |
-| [Azure](./docs/plugins/azure.md) | Output | Store logs in Azure Storega | GA | [0.1.1](https://github.com/htgc/fluent-plugin-azurestorage/releases/tag/v0.1.0) |
-| [Google Storage](./docs/plugins/gcs.md) | Output | Store logs in Google Cloud Storage | GA | [0.4.0.beta1](https://github.com/banzaicloud/fluent-plugin-gcs) |
-| [Grafana Loki](./docs/plugins/loki.md) | Output | Transfer logs to Loki | Testing | [0.2](https://github.com/banzaicloud/fluent-plugin-kubernetes-loki/releases/tag/v0.2) |
-| [ElasticSearch](./docs/plugins/parser.md) | Output | Send your logs to Elasticsearch | GA | [3.5.2](https://github.com/uken/fluent-plugin-elasticsearch/releases/tag/v3.5.2) |
-| [HDFS](https://docs.fluentd.org/output/webhdfs) | Output | Fluentd output plugin to write data into Hadoop HDFS over WebHDFS/HttpFs. | GA | [1.2.3](https://github.com/fluent/fluent-plugin-webhdfs/releases/tag/v1.2.3) |
-| [Parser](./docs/plugins/parser.md) | Parser | Parse logs with parser plugin | GA | |
---
## Troubleshooting
If you encounter any problems that the documentation does not address, please [file an issue](https://github.com/banzaicloud/logging-operator/issues) or talk to us on the Banzai Cloud Slack channel [#logging-operator](https://slack.banzaicloud.io/).
-
-
## Contributing
If you find this project useful here's how you can help:
@@ -144,6 +145,8 @@ If you find this project useful here's how you can help:
- Help new users with issues they may encounter
- Support the development of this project and star this repo!
+For more information please read the [developer documentation](./docs/developers.md)
+
## License
Copyright (c) 2017-2019 [Banzai Cloud, Inc.](https://banzaicloud.com)
diff --git a/api/v1beta1/clusterflow_types.go b/api/v1beta1/clusterflow_types.go
new file mode 100644
index 000000000..c5089ddac
--- /dev/null
+++ b/api/v1beta1/clusterflow_types.go
@@ -0,0 +1,44 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1beta1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +kubebuilder:object:root=true
+
+// ClusterFlow is the Schema for the clusterflows API
+type ClusterFlow struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // Name of the logging cluster to be attached
+ Spec FlowSpec `json:"spec,omitempty"`
+ Status FlowStatus `json:"status,omitempty"`
+}
+
+// +kubebuilder:object:root=true
+
+// ClusterFlowList contains a list of ClusterFlow
+type ClusterFlowList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []ClusterFlow `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&ClusterFlow{}, &ClusterFlowList{})
+}
diff --git a/api/v1beta1/clusterflow_types_test.go b/api/v1beta1/clusterflow_types_test.go
new file mode 100644
index 000000000..630cbb716
--- /dev/null
+++ b/api/v1beta1/clusterflow_types_test.go
@@ -0,0 +1,81 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1beta1
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+ "k8s.io/apimachinery/pkg/types"
+
+ "golang.org/x/net/context"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// These tests are written in BDD-style using Ginkgo framework. Refer to
+// http://onsi.github.io/ginkgo to learn more.
+
+var _ = Describe("ClusterFlow", func() {
+ var (
+ key types.NamespacedName
+ created, fetched *ClusterFlow
+ )
+
+ BeforeEach(func() {
+ // Add any setup steps that needs to be executed before each test
+ })
+
+ AfterEach(func() {
+ // Add any teardown steps that needs to be executed after each test
+ })
+
+ // Add Tests for OpenAPI validation (or additional CRD features) specified in
+ // your API definition.
+ // Avoid adding tests for vanilla CRUD operations because they would
+ // test Kubernetes API server, which isn't the goal here.
+ Context("Create API", func() {
+
+ It("should create an object successfully", func() {
+
+ key = types.NamespacedName{
+ Namespace: "foo",
+ Name: "foo",
+ }
+ created = &ClusterFlow{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "foo",
+ Namespace: "foo",
+ },
+ Spec: FlowSpec{
+ Selectors: map[string]string{},
+ OutputRefs: []string{},
+ },
+ Status: FlowStatus{},
+ }
+
+ By("creating an API obj")
+ Expect(k8sClient.Create(context.TODO(), created)).To(Succeed())
+
+ fetched = &ClusterFlow{}
+ Expect(k8sClient.Get(context.TODO(), key, fetched)).To(Succeed())
+ Expect(fetched).To(Equal(created))
+
+ By("deleting the created object")
+ Expect(k8sClient.Delete(context.TODO(), created)).To(Succeed())
+ Expect(k8sClient.Get(context.TODO(), key, created)).ToNot(Succeed())
+ })
+
+ })
+
+})
diff --git a/api/v1beta1/clusteroutput_types.go b/api/v1beta1/clusteroutput_types.go
new file mode 100644
index 000000000..e85180241
--- /dev/null
+++ b/api/v1beta1/clusteroutput_types.go
@@ -0,0 +1,51 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1beta1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +kubebuilder:object:root=true
+
+// ClusterOutput is the Schema for the clusteroutputs API
+type ClusterOutput struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec ClusterOutputSpec `json:"spec"`
+ Status OutputStatus `json:"status,omitempty"`
+}
+
+// +kubebuilder:object:generate=true
+
+// ClusterOutputSpec contains Kubernetes spec for CLusterOutput
+type ClusterOutputSpec struct {
+ OutputSpec `json:",inline"`
+ EnabledNamespaces []string `json:"enabledNamespaces,omitempty"`
+}
+
+// +kubebuilder:object:root=true
+
+// ClusterOutputList contains a list of ClusterOutput
+type ClusterOutputList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []ClusterOutput `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&ClusterOutput{}, &ClusterOutputList{})
+}
diff --git a/api/v1beta1/clusteroutput_types_test.go b/api/v1beta1/clusteroutput_types_test.go
new file mode 100644
index 000000000..8618aaf21
--- /dev/null
+++ b/api/v1beta1/clusteroutput_types_test.go
@@ -0,0 +1,84 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1beta1
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "golang.org/x/net/context"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+)
+
+// These tests are written in BDD-style using Ginkgo framework. Refer to
+// http://onsi.github.io/ginkgo to learn more.
+
+var _ = Describe("ClusterOutput", func() {
+ var (
+ key types.NamespacedName
+ created, fetched *ClusterOutput
+ )
+
+ BeforeEach(func() {
+ // Add any setup steps that needs to be executed before each test
+ })
+
+ AfterEach(func() {
+ // Add any teardown steps that needs to be executed after each test
+ })
+
+ // Add Tests for OpenAPI validation (or additional CRD features) specified in
+ // your API definition.
+ // Avoid adding tests for vanilla CRUD operations because they would
+ // test Kubernetes API server, which isn't the goal here.
+ Context("Create API", func() {
+
+ It("should create an object successfully", func() {
+
+ key = types.NamespacedName{
+ Name: "foo",
+ Namespace: "foo",
+ }
+ created = &ClusterOutput{
+ TypeMeta: metav1.TypeMeta{},
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "foo",
+ Namespace: "foo",
+ },
+ Spec: ClusterOutputSpec{
+ OutputSpec: OutputSpec{
+ S3OutputConfig: nil,
+ NullOutputConfig: nil,
+ },
+ },
+ Status: OutputStatus{},
+ }
+
+ By("creating an API obj")
+ Expect(k8sClient.Create(context.TODO(), created)).To(Succeed())
+
+ fetched = &ClusterOutput{}
+ Expect(k8sClient.Get(context.TODO(), key, fetched)).To(Succeed())
+ Expect(fetched).To(Equal(created))
+
+ By("deleting the created object")
+ Expect(k8sClient.Delete(context.TODO(), created)).To(Succeed())
+ Expect(k8sClient.Get(context.TODO(), key, created)).ToNot(Succeed())
+ })
+
+ })
+
+})
diff --git a/api/v1beta1/common_types.go b/api/v1beta1/common_types.go
new file mode 100644
index 000000000..32f2afc21
--- /dev/null
+++ b/api/v1beta1/common_types.go
@@ -0,0 +1,22 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1beta1
+
+// ImageSpec struct hold information about image specification
+type ImageSpec struct {
+ Repository string `json:"repository"`
+ Tag string `json:"tag"`
+ PullPolicy string `json:"pullPolicy"`
+}
diff --git a/api/v1beta1/flow_types.go b/api/v1beta1/flow_types.go
new file mode 100644
index 000000000..f9a081044
--- /dev/null
+++ b/api/v1beta1/flow_types.go
@@ -0,0 +1,66 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1beta1
+
+import (
+ "github.com/banzaicloud/logging-operator/pkg/model/filter"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// FlowSpec is the Kubernetes spec for Flows
+type FlowSpec struct {
+ Selectors map[string]string `json:"selectors"`
+ Filters []Filter `json:"filters,omitempty"`
+ LoggingRef string `json:"loggingRef,omitempty"`
+ OutputRefs []string `json:"outputRefs"`
+}
+
+// Filter definition for FlowSpec
+type Filter struct {
+ StdOut *filter.StdOutFilterConfig `json:"stdout,omitempty"`
+ Parser *filter.ParserConfig `json:"parser,omitempty"`
+ TagNormaliser *filter.TagNormaliser `json:"tag_normaliser,omitempty"`
+}
+
+// FlowStatus defines the observed state of Flow
+type FlowStatus struct {
+ // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
+ // Important: Run "make" to regenerate code after modifying this file
+}
+
+// +kubebuilder:object:root=true
+// +kubebuilder:printcolumn:name="Logging",type=string,JSONPath=`.spec.loggingRef`
+
+// Flow Kubernetes object
+type Flow struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec FlowSpec `json:"spec,omitempty"`
+ Status FlowStatus `json:"status,omitempty"`
+}
+
+// +kubebuilder:object:root=true
+
+// FlowList contains a list of Flow
+type FlowList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []Flow `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&Flow{}, &FlowList{})
+}
diff --git a/api/v1beta1/flow_types_test.go b/api/v1beta1/flow_types_test.go
new file mode 100644
index 000000000..817ca7677
--- /dev/null
+++ b/api/v1beta1/flow_types_test.go
@@ -0,0 +1,81 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1beta1
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "golang.org/x/net/context"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+)
+
+// These tests are written in BDD-style using Ginkgo framework. Refer to
+// http://onsi.github.io/ginkgo to learn more.
+
+var _ = Describe("Flow", func() {
+ var (
+ key types.NamespacedName
+ created, fetched *Flow
+ )
+
+ BeforeEach(func() {
+ // Add any setup steps that needs to be executed before each test
+ })
+
+ AfterEach(func() {
+ // Add any teardown steps that needs to be executed after each test
+ })
+
+ // Add Tests for OpenAPI validation (or additional CRD features) specified in
+ // your API definition.
+ // Avoid adding tests for vanilla CRUD operations because they would
+ // test Kubernetes API server, which isn't the goal here.
+ Context("Create API", func() {
+
+ It("should create an object successfully", func() {
+
+ key = types.NamespacedName{
+ Name: "foo",
+ Namespace: "default",
+ }
+ created = &Flow{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "foo",
+ Namespace: "default",
+ },
+ Spec: FlowSpec{
+ Selectors: map[string]string{},
+ OutputRefs: []string{},
+ },
+ Status: FlowStatus{},
+ }
+
+ By("creating an API obj")
+ Expect(k8sClient.Create(context.TODO(), created)).To(Succeed())
+
+ fetched = &Flow{}
+ Expect(k8sClient.Get(context.TODO(), key, fetched)).To(Succeed())
+ Expect(fetched).To(Equal(created))
+
+ By("deleting the created object")
+ Expect(k8sClient.Delete(context.TODO(), created)).To(Succeed())
+ Expect(k8sClient.Get(context.TODO(), key, created)).ToNot(Succeed())
+ })
+
+ })
+
+})
diff --git a/api/v1beta1/fluentbit_types.go b/api/v1beta1/fluentbit_types.go
new file mode 100644
index 000000000..9e8297cc3
--- /dev/null
+++ b/api/v1beta1/fluentbit_types.go
@@ -0,0 +1,57 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1beta1
+
+import (
+ "strconv"
+
+ corev1 "k8s.io/api/core/v1"
+)
+
+// +kubebuilder:object:generate=true
+
+// FluentbitSpec defines the desired state of Fluentbit
+type FluentbitSpec struct {
+ Annotations map[string]string `json:"annotations,omitempty"`
+ Image ImageSpec `json:"image,omitempty"`
+ TLS FluentbitTLS `json:"tls,omitempty"`
+ TargetHost string `json:"targetHost,omitempty"`
+ TargetPort int32 `json:"targetPort,omitempty"`
+ Resources corev1.ResourceRequirements `json:"resources,omitempty"`
+ Parser string `json:"parser,omitempty"`
+ Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
+}
+
+// +kubebuilder:object:generate=true
+
+// FluentbitTLS defines the TLS configs
+type FluentbitTLS struct {
+ Enabled bool `json:"enabled"`
+ SecretName string `json:"secretName"`
+ SharedKey string `json:"sharedKey,omitempty"`
+}
+
+// GetPrometheusPortFromAnnotation gets the port value from annotation
+func (spec FluentbitSpec) GetPrometheusPortFromAnnotation() int32 {
+ var err error
+ var port int64
+ if spec.Annotations != nil {
+ port, err = strconv.ParseInt(spec.Annotations["prometheus.io/port"], 10, 32)
+ if err != nil {
+ panic(err)
+ }
+ }
+ return int32(port)
+}
diff --git a/api/v1beta1/fluentd_types.go b/api/v1beta1/fluentd_types.go
new file mode 100644
index 000000000..1af6ac110
--- /dev/null
+++ b/api/v1beta1/fluentd_types.go
@@ -0,0 +1,60 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1beta1
+
+import (
+ "strconv"
+
+ corev1 "k8s.io/api/core/v1"
+)
+
+// +kubebuilder:object:generate=true
+
+// FluentdSpec defines the desired state of Fluentd
+type FluentdSpec struct {
+ Annotations map[string]string `json:"annotations,omitempty"`
+ TLS FluentdTLS `json:"tls,omitempty"`
+ Image ImageSpec `json:"image,omitempty"`
+ FluentdPvcSpec corev1.PersistentVolumeClaimSpec `json:"fluentdPvcSpec,omitempty"`
+ DisablePvc bool `json:"disablePvc,omitempty"`
+ VolumeModImage ImageSpec `json:"volumeModImage,omitempty"`
+ ConfigReloaderImage ImageSpec `json:"configReloaderImage,omitempty"`
+ Resources corev1.ResourceRequirements `json:"resources,omitempty"`
+ Port int32 `json:"port,omitempty"`
+ Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
+ NodeSelector map[string]string `json:"nodeSelector,omitempty"`
+}
+
+// +kubebuilder:object:generate=true
+
+// FluentdTLS defines the TLS configs
+type FluentdTLS struct {
+ Enabled bool `json:"enabled"`
+ SecretName string `json:"secretName"`
+ SharedKey string `json:"sharedKey,omitempty"`
+}
+
+// GetPrometheusPortFromAnnotation gets the port value from annotation
+func (spec FluentdSpec) GetPrometheusPortFromAnnotation() int32 {
+ var err error
+ var port int64
+ if spec.Annotations != nil {
+ port, err = strconv.ParseInt(spec.Annotations["prometheus.io/port"], 10, 32)
+ if err != nil {
+ return 0
+ }
+ }
+ return int32(port)
+}
diff --git a/api/v1beta1/groupversion_info.go b/api/v1beta1/groupversion_info.go
new file mode 100644
index 000000000..0b5ee001d
--- /dev/null
+++ b/api/v1beta1/groupversion_info.go
@@ -0,0 +1,34 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package v1alpha2 contains API Schema definitions for the logging v1alpha2 API group
+// +kubebuilder:object:generate=true
+// +groupName=logging.banzaicloud.io
+package v1beta1
+
+import (
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "sigs.k8s.io/controller-runtime/pkg/scheme"
+)
+
+var (
+ // GroupVersion is group version used to register these objects
+ GroupVersion = schema.GroupVersion{Group: "logging.banzaicloud.io", Version: "v1beta1"}
+
+ // SchemeBuilder is used to add go types to the GroupVersionKind scheme
+ SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
+
+ // AddToScheme adds the types in this group-version to the given scheme.
+ AddToScheme = SchemeBuilder.AddToScheme
+)
diff --git a/api/v1beta1/logging_types.go b/api/v1beta1/logging_types.go
new file mode 100644
index 000000000..217a4e463
--- /dev/null
+++ b/api/v1beta1/logging_types.go
@@ -0,0 +1,181 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1beta1
+
+import (
+ "fmt"
+
+ v1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
+// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
+
+// LoggingSpec defines the desired state of Logging
+type LoggingSpec struct {
+ LoggingRef string `json:"loggingRef,omitempty"`
+ FlowConfigCheckDisabled bool `json:"flowConfigCheckDisabled,omitempty"`
+ FlowConfigOverride string `json:"flowConfigOverride,omitempty"`
+ FluentbitSpec *FluentbitSpec `json:"fluentbit,omitempty"`
+ FluentdSpec *FluentdSpec `json:"fluentd,omitempty"`
+ WatchNamespaces []string `json:"watchNamespaces,omitempty"`
+ ControlNamespace string `json:"controlNamespace"`
+}
+
+// LoggingStatus defines the observed state of Logging
+type LoggingStatus struct {
+ ConfigCheckResults map[string]bool `json:"configCheckResults,omitempty"`
+}
+
+// +kubebuilder:object:root=true
+// +kubebuilder:subresource:status
+// +kubebuilder:resource:path=loggings,scope=Cluster
+
+// Logging is the Schema for the loggings API
+type Logging struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec LoggingSpec `json:"spec,omitempty"`
+ Status LoggingStatus `json:"status,omitempty"`
+}
+
+// +kubebuilder:object:root=true
+
+// LoggingList contains a list of Logging
+type LoggingList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []Logging `json:"items"`
+}
+
+// SetDefaults fill empty attributes
+func (l *Logging) SetDefaults() *Logging {
+ copy := l.DeepCopy()
+ if !copy.Spec.FlowConfigCheckDisabled && copy.Status.ConfigCheckResults == nil {
+ copy.Status.ConfigCheckResults = make(map[string]bool)
+ }
+ if copy.Spec.WatchNamespaces == nil {
+ copy.Spec.WatchNamespaces = []string{}
+ }
+ if copy.Spec.FluentdSpec != nil {
+ if copy.Spec.FluentdSpec.Image.Repository == "" {
+ copy.Spec.FluentdSpec.Image.Repository = "banzaicloud/fluentd"
+ }
+ if copy.Spec.FluentdSpec.Image.Tag == "" {
+ copy.Spec.FluentdSpec.Image.Tag = "v1.6.3-alpine"
+ }
+ if copy.Spec.FluentdSpec.Image.PullPolicy == "" {
+ copy.Spec.FluentdSpec.Image.PullPolicy = "IfNotPresent"
+ }
+ if copy.Spec.FluentdSpec.Annotations == nil {
+ copy.Spec.FluentdSpec.Annotations = map[string]string{
+ "prometheus.io/scrape": "true",
+ "prometheus.io/path": "/metrics",
+ "prometheus.io/port": "25000",
+ }
+ }
+ if copy.Spec.FluentdSpec.FluentdPvcSpec.AccessModes == nil {
+ copy.Spec.FluentdSpec.FluentdPvcSpec.AccessModes = []v1.PersistentVolumeAccessMode{
+ v1.ReadWriteOnce,
+ }
+ }
+ if copy.Spec.FluentdSpec.FluentdPvcSpec.Resources.Requests == nil {
+ copy.Spec.FluentdSpec.FluentdPvcSpec.Resources.Requests = map[v1.ResourceName]resource.Quantity{
+ "storage": resource.MustParse("20Gi"),
+ }
+ }
+ if copy.Spec.FluentdSpec.VolumeModImage.Repository == "" {
+ copy.Spec.FluentdSpec.VolumeModImage.Repository = "busybox"
+ }
+ if copy.Spec.FluentdSpec.VolumeModImage.Tag == "" {
+ copy.Spec.FluentdSpec.VolumeModImage.Tag = "latest"
+ }
+ if copy.Spec.FluentdSpec.VolumeModImage.PullPolicy == "" {
+ copy.Spec.FluentdSpec.VolumeModImage.PullPolicy = "IfNotPresent"
+ }
+ if copy.Spec.FluentdSpec.ConfigReloaderImage.Repository == "" {
+ copy.Spec.FluentdSpec.ConfigReloaderImage.Repository = "jimmidyson/configmap-reload"
+ }
+ if copy.Spec.FluentdSpec.ConfigReloaderImage.Tag == "" {
+ copy.Spec.FluentdSpec.ConfigReloaderImage.Tag = "v0.2.2"
+ }
+ if copy.Spec.FluentdSpec.ConfigReloaderImage.PullPolicy == "" {
+ copy.Spec.FluentdSpec.ConfigReloaderImage.PullPolicy = "IfNotPresent"
+ }
+ if copy.Spec.FluentdSpec.Resources.Limits == nil {
+ copy.Spec.FluentdSpec.Resources.Limits = v1.ResourceList{
+ v1.ResourceMemory: resource.MustParse("200M"),
+ v1.ResourceCPU: resource.MustParse("1000m"),
+ }
+ }
+ if copy.Spec.FluentdSpec.Resources.Requests == nil {
+ copy.Spec.FluentdSpec.Resources.Requests = v1.ResourceList{
+ v1.ResourceMemory: resource.MustParse("100M"),
+ v1.ResourceCPU: resource.MustParse("500m"),
+ }
+ }
+ if copy.Spec.FluentdSpec.Port == 0 {
+ copy.Spec.FluentdSpec.Port = 24240
+ }
+ }
+ if copy.Spec.FluentbitSpec != nil {
+ if copy.Spec.FluentbitSpec.Image.Repository == "" {
+ copy.Spec.FluentbitSpec.Image.Repository = "fluent/fluent-bit"
+ }
+ if copy.Spec.FluentbitSpec.Image.Tag == "" {
+ copy.Spec.FluentbitSpec.Image.Tag = "1.2.2"
+ }
+ if copy.Spec.FluentbitSpec.Image.PullPolicy == "" {
+ copy.Spec.FluentbitSpec.Image.PullPolicy = "IfNotPresent"
+ }
+ if copy.Spec.FluentbitSpec.Resources.Limits == nil {
+ copy.Spec.FluentbitSpec.Resources.Limits = v1.ResourceList{
+ v1.ResourceMemory: resource.MustParse("100M"),
+ v1.ResourceCPU: resource.MustParse("200m"),
+ }
+ }
+ if copy.Spec.FluentbitSpec.Resources.Requests == nil {
+ copy.Spec.FluentbitSpec.Resources.Requests = v1.ResourceList{
+ v1.ResourceMemory: resource.MustParse("50M"),
+ v1.ResourceCPU: resource.MustParse("100m"),
+ }
+ }
+ if copy.Spec.FluentbitSpec.Annotations == nil {
+ copy.Spec.FluentbitSpec.Annotations = map[string]string{
+ "prometheus.io/scrape": "true",
+ "prometheus.io/path": "/api/v1/metrics/prometheus",
+ "prometheus.io/port": "2020",
+ }
+ }
+ }
+ return copy
+}
+
+// QualifiedName is the "logging-resource" name combined
+func (l *Logging) QualifiedName(name string) string {
+ return fmt.Sprintf("%s-%s", l.Name, name)
+}
+
+// QualifiedNamespacedName is the "namespace-logging-resource" name combined
+func (l *Logging) QualifiedNamespacedName(name string) string {
+ return fmt.Sprintf("%s-%s-%s", l.Spec.ControlNamespace, l.Name, name)
+}
+
+func init() {
+ SchemeBuilder.Register(&Logging{}, &LoggingList{})
+}
diff --git a/api/v1beta1/output_types.go b/api/v1beta1/output_types.go
new file mode 100644
index 000000000..84de8b6b4
--- /dev/null
+++ b/api/v1beta1/output_types.go
@@ -0,0 +1,64 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1beta1
+
+import (
+ "github.com/banzaicloud/logging-operator/pkg/model/output"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// OutputSpec defines the desired state of Output
+type OutputSpec struct {
+ LoggingRef string `json:"loggingRef,omitempty"`
+ S3OutputConfig *output.S3OutputConfig `json:"s3,omitempty"`
+ AzureStorage *output.AzureStorage `json:"azurestorage,omitempty"`
+ GCSOutput *output.GCSOutput `json:"gcs,omitempty"`
+ OSSOutput *output.OSSOutput `json:"oss,omitempty"`
+ ElasticsearchOutput *output.ElasticsearchOutput `json:"elasticsearch,omitempty"`
+ LokiOutput *output.LokiOutput `json:"loki,omitempty"`
+ SumologicOutput *output.SumologicOutput `json:"sumologic,omitempty"`
+ ForwardOutput *output.ForwardOutput `json:"forward,omitempty"`
+ NullOutputConfig *output.NullOutputConfig `json:"nullout,omitempty"`
+}
+
+// OutputStatus defines the observed state of Output
+type OutputStatus struct {
+ // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
+ // Important: Run "make" to regenerate code after modifying this file
+}
+
+// +kubebuilder:object:root=true
+
+// Output is the Schema for the outputs API
+type Output struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec OutputSpec `json:"spec,omitempty"`
+ Status OutputStatus `json:"status,omitempty"`
+}
+
+// +kubebuilder:object:root=true
+
+// OutputList contains a list of Output
+type OutputList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []Output `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&Output{}, &OutputList{})
+}
diff --git a/api/v1beta1/output_types_test.go b/api/v1beta1/output_types_test.go
new file mode 100644
index 000000000..3915c5a4c
--- /dev/null
+++ b/api/v1beta1/output_types_test.go
@@ -0,0 +1,82 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1beta1
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "golang.org/x/net/context"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+)
+
+// These tests are written in BDD-style using Ginkgo framework. Refer to
+// http://onsi.github.io/ginkgo to learn more.
+
+var _ = Describe("Output", func() {
+ var (
+ key types.NamespacedName
+ created, fetched *Output
+ )
+
+ BeforeEach(func() {
+ // Add any setup steps that needs to be executed before each test
+ })
+
+ AfterEach(func() {
+ // Add any teardown steps that needs to be executed after each test
+ })
+
+ // Add Tests for OpenAPI validation (or additional CRD features) specified in
+ // your API definition.
+ // Avoid adding tests for vanilla CRUD operations because they would
+ // test Kubernetes API server, which isn't the goal here.
+ Context("Create API", func() {
+
+ It("should create an object successfully", func() {
+
+ key = types.NamespacedName{
+ Name: "foo",
+ Namespace: "default",
+ }
+ created = &Output{
+ TypeMeta: metav1.TypeMeta{},
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "foo",
+ Namespace: "default",
+ },
+ Spec: OutputSpec{
+ S3OutputConfig: nil,
+ NullOutputConfig: nil,
+ },
+ Status: OutputStatus{},
+ }
+
+ By("creating an API obj")
+ Expect(k8sClient.Create(context.TODO(), created)).To(Succeed())
+
+ fetched = &Output{}
+ Expect(k8sClient.Get(context.TODO(), key, fetched)).To(Succeed())
+ Expect(fetched).To(Equal(created))
+
+ By("deleting the created object")
+ Expect(k8sClient.Delete(context.TODO(), created)).To(Succeed())
+ Expect(k8sClient.Get(context.TODO(), key, created)).ToNot(Succeed())
+ })
+
+ })
+
+})
diff --git a/api/v1beta1/suite_test.go b/api/v1beta1/suite_test.go
new file mode 100644
index 000000000..b17e77da8
--- /dev/null
+++ b/api/v1beta1/suite_test.go
@@ -0,0 +1,73 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1beta1
+
+import (
+ "path/filepath"
+ "testing"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "k8s.io/client-go/kubernetes/scheme"
+ "k8s.io/client-go/rest"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/envtest"
+ logf "sigs.k8s.io/controller-runtime/pkg/log"
+ "sigs.k8s.io/controller-runtime/pkg/log/zap"
+)
+
+// These tests use Ginkgo (BDD-style Go testing framework). Refer to
+// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
+
+var cfg *rest.Config
+var k8sClient client.Client
+var testEnv *envtest.Environment
+
+func TestAPIs(t *testing.T) {
+ RegisterFailHandler(Fail)
+
+ RunSpecsWithDefaultAndCustomReporters(t,
+ "v1alpha2 Suite",
+ []Reporter{envtest.NewlineReporter{}})
+}
+
+var _ = BeforeSuite(func(done Done) {
+ logf.SetLogger(zap.LoggerTo(GinkgoWriter, true))
+
+ By("bootstrapping test environment")
+ testEnv = &envtest.Environment{
+ CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")},
+ }
+
+ err := SchemeBuilder.AddToScheme(scheme.Scheme)
+ Expect(err).NotTo(HaveOccurred())
+
+ cfg, err = testEnv.Start()
+ Expect(err).ToNot(HaveOccurred())
+ Expect(cfg).ToNot(BeNil())
+
+ k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
+ Expect(err).ToNot(HaveOccurred())
+ Expect(k8sClient).ToNot(BeNil())
+
+ close(done)
+}, 60)
+
+var _ = AfterSuite(func() {
+ By("tearing down the test environment")
+ err := testEnv.Stop()
+ Expect(err).ToNot(HaveOccurred())
+})
diff --git a/pkg/apis/logging/v1alpha1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go
similarity index 50%
rename from pkg/apis/logging/v1alpha1/zz_generated.deepcopy.go
rename to api/v1beta1/zz_generated.deepcopy.go
index 69a9e38f2..67f298042 100644
--- a/pkg/apis/logging/v1alpha1/zz_generated.deepcopy.go
+++ b/api/v1beta1/zz_generated.deepcopy.go
@@ -1,75 +1,110 @@
// +build !ignore_autogenerated
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by operator-sdk. DO NOT EDIT.
-
-package v1alpha1
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by controller-gen. DO NOT EDIT.
+
+package v1beta1
import (
- v1 "k8s.io/api/core/v1"
+ "github.com/banzaicloud/logging-operator/pkg/model/filter"
+ "github.com/banzaicloud/logging-operator/pkg/model/output"
+ "k8s.io/api/core/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *FPlugin) DeepCopyInto(out *FPlugin) {
+func (in *ClusterFlow) DeepCopyInto(out *ClusterFlow) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ out.Status = in.Status
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterFlow.
+func (in *ClusterFlow) DeepCopy() *ClusterFlow {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterFlow)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterFlow) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterFlowList) DeepCopyInto(out *ClusterFlowList) {
*out = *in
- if in.Parameters != nil {
- in, out := &in.Parameters, &out.Parameters
- *out = make([]Parameter, len(*in))
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ClusterFlow, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
- return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FPlugin.
-func (in *FPlugin) DeepCopy() *FPlugin {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterFlowList.
+func (in *ClusterFlowList) DeepCopy() *ClusterFlowList {
if in == nil {
return nil
}
- out := new(FPlugin)
+ out := new(ClusterFlowList)
in.DeepCopyInto(out)
return out
}
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterFlowList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Fluentbit) DeepCopyInto(out *Fluentbit) {
+func (in *ClusterOutput) DeepCopyInto(out *ClusterOutput) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status
- return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Fluentbit.
-func (in *Fluentbit) DeepCopy() *Fluentbit {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOutput.
+func (in *ClusterOutput) DeepCopy() *ClusterOutput {
if in == nil {
return nil
}
- out := new(Fluentbit)
+ out := new(ClusterOutput)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *Fluentbit) DeepCopyObject() runtime.Object {
+func (in *ClusterOutput) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
@@ -77,32 +112,31 @@ func (in *Fluentbit) DeepCopyObject() runtime.Object {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *FluentbitList) DeepCopyInto(out *FluentbitList) {
+func (in *ClusterOutputList) DeepCopyInto(out *ClusterOutputList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
- *out = make([]Fluentbit, len(*in))
+ *out = make([]ClusterOutput, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
- return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentbitList.
-func (in *FluentbitList) DeepCopy() *FluentbitList {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOutputList.
+func (in *ClusterOutputList) DeepCopy() *ClusterOutputList {
if in == nil {
return nil
}
- out := new(FluentbitList)
+ out := new(ClusterOutputList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *FluentbitList) DeepCopyObject() runtime.Object {
+func (in *ClusterOutputList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
@@ -110,92 +144,77 @@ func (in *FluentbitList) DeepCopyObject() runtime.Object {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *FluentbitSpec) DeepCopyInto(out *FluentbitSpec) {
+func (in *ClusterOutputSpec) DeepCopyInto(out *ClusterOutputSpec) {
*out = *in
- if in.Annotations != nil {
- in, out := &in.Annotations, &out.Annotations
- *out = make(map[string]string, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
- out.Image = in.Image
- out.TLS = in.TLS
- in.Resources.DeepCopyInto(&out.Resources)
- if in.Tolerations != nil {
- in, out := &in.Tolerations, &out.Tolerations
- *out = make([]v1.Toleration, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
+ in.OutputSpec.DeepCopyInto(&out.OutputSpec)
+ if in.EnabledNamespaces != nil {
+ in, out := &in.EnabledNamespaces, &out.EnabledNamespaces
+ *out = make([]string, len(*in))
+ copy(*out, *in)
}
- return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentbitSpec.
-func (in *FluentbitSpec) DeepCopy() *FluentbitSpec {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOutputSpec.
+func (in *ClusterOutputSpec) DeepCopy() *ClusterOutputSpec {
if in == nil {
return nil
}
- out := new(FluentbitSpec)
+ out := new(ClusterOutputSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *FluentbitStatus) DeepCopyInto(out *FluentbitStatus) {
+func (in *Filter) DeepCopyInto(out *Filter) {
*out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentbitStatus.
-func (in *FluentbitStatus) DeepCopy() *FluentbitStatus {
- if in == nil {
- return nil
+ if in.StdOut != nil {
+ in, out := &in.StdOut, &out.StdOut
+ *out = new(filter.StdOutFilterConfig)
+ **out = **in
+ }
+ if in.Parser != nil {
+ in, out := &in.Parser, &out.Parser
+ *out = new(filter.ParserConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.TagNormaliser != nil {
+ in, out := &in.TagNormaliser, &out.TagNormaliser
+ *out = new(filter.TagNormaliser)
+ **out = **in
}
- out := new(FluentbitStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *FluentbitTLS) DeepCopyInto(out *FluentbitTLS) {
- *out = *in
- return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentbitTLS.
-func (in *FluentbitTLS) DeepCopy() *FluentbitTLS {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Filter.
+func (in *Filter) DeepCopy() *Filter {
if in == nil {
return nil
}
- out := new(FluentbitTLS)
+ out := new(Filter)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Fluentd) DeepCopyInto(out *Fluentd) {
+func (in *Flow) DeepCopyInto(out *Flow) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status
- return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Fluentd.
-func (in *Fluentd) DeepCopy() *Fluentd {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Flow.
+func (in *Flow) DeepCopy() *Flow {
if in == nil {
return nil
}
- out := new(Fluentd)
+ out := new(Flow)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *Fluentd) DeepCopyObject() runtime.Object {
+func (in *Flow) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
@@ -203,32 +222,31 @@ func (in *Fluentd) DeepCopyObject() runtime.Object {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *FluentdList) DeepCopyInto(out *FluentdList) {
+func (in *FlowList) DeepCopyInto(out *FlowList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
- *out = make([]Fluentd, len(*in))
+ *out = make([]Flow, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
- return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentdList.
-func (in *FluentdList) DeepCopy() *FluentdList {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowList.
+func (in *FlowList) DeepCopy() *FlowList {
if in == nil {
return nil
}
- out := new(FluentdList)
+ out := new(FlowList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *FluentdList) DeepCopyObject() runtime.Object {
+func (in *FlowList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
@@ -236,171 +254,194 @@ func (in *FluentdList) DeepCopyObject() runtime.Object {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *FluentdSpec) DeepCopyInto(out *FluentdSpec) {
+func (in *FlowSpec) DeepCopyInto(out *FlowSpec) {
*out = *in
- if in.Annotations != nil {
- in, out := &in.Annotations, &out.Annotations
+ if in.Selectors != nil {
+ in, out := &in.Selectors, &out.Selectors
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
- out.TLS = in.TLS
- out.Image = in.Image
- in.FluentdPvcSpec.DeepCopyInto(&out.FluentdPvcSpec)
- out.VolumeModImage = in.VolumeModImage
- out.ConfigReloaderImage = in.ConfigReloaderImage
- in.Resources.DeepCopyInto(&out.Resources)
- if in.Tolerations != nil {
- in, out := &in.Tolerations, &out.Tolerations
- *out = make([]v1.Toleration, len(*in))
+ if in.Filters != nil {
+ in, out := &in.Filters, &out.Filters
+ *out = make([]Filter, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
- return
+ if in.OutputRefs != nil {
+ in, out := &in.OutputRefs, &out.OutputRefs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentdSpec.
-func (in *FluentdSpec) DeepCopy() *FluentdSpec {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSpec.
+func (in *FlowSpec) DeepCopy() *FlowSpec {
if in == nil {
return nil
}
- out := new(FluentdSpec)
+ out := new(FlowSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *FluentdStatus) DeepCopyInto(out *FluentdStatus) {
+func (in *FlowStatus) DeepCopyInto(out *FlowStatus) {
*out = *in
- return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentdStatus.
-func (in *FluentdStatus) DeepCopy() *FluentdStatus {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowStatus.
+func (in *FlowStatus) DeepCopy() *FlowStatus {
if in == nil {
return nil
}
- out := new(FluentdStatus)
+ out := new(FlowStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *FluentdTLS) DeepCopyInto(out *FluentdTLS) {
+func (in *FluentbitSpec) DeepCopyInto(out *FluentbitSpec) {
*out = *in
- return
+ if in.Annotations != nil {
+ in, out := &in.Annotations, &out.Annotations
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ out.Image = in.Image
+ out.TLS = in.TLS
+ in.Resources.DeepCopyInto(&out.Resources)
+ if in.Tolerations != nil {
+ in, out := &in.Tolerations, &out.Tolerations
+ *out = make([]v1.Toleration, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentdTLS.
-func (in *FluentdTLS) DeepCopy() *FluentdTLS {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentbitSpec.
+func (in *FluentbitSpec) DeepCopy() *FluentbitSpec {
if in == nil {
return nil
}
- out := new(FluentdTLS)
+ out := new(FluentbitSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ImageSpec) DeepCopyInto(out *ImageSpec) {
+func (in *FluentbitTLS) DeepCopyInto(out *FluentbitTLS) {
*out = *in
- return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSpec.
-func (in *ImageSpec) DeepCopy() *ImageSpec {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentbitTLS.
+func (in *FluentbitTLS) DeepCopy() *FluentbitTLS {
if in == nil {
return nil
}
- out := new(ImageSpec)
+ out := new(FluentbitTLS)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Input) DeepCopyInto(out *Input) {
+func (in *FluentdSpec) DeepCopyInto(out *FluentdSpec) {
*out = *in
- if in.Label != nil {
- in, out := &in.Label, &out.Label
+ if in.Annotations != nil {
+ in, out := &in.Annotations, &out.Annotations
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ out.TLS = in.TLS
+ out.Image = in.Image
+ in.FluentdPvcSpec.DeepCopyInto(&out.FluentdPvcSpec)
+ out.VolumeModImage = in.VolumeModImage
+ out.ConfigReloaderImage = in.ConfigReloaderImage
+ in.Resources.DeepCopyInto(&out.Resources)
+ if in.Tolerations != nil {
+ in, out := &in.Tolerations, &out.Tolerations
+ *out = make([]v1.Toleration, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.NodeSelector != nil {
+ in, out := &in.NodeSelector, &out.NodeSelector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
- return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Input.
-func (in *Input) DeepCopy() *Input {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentdSpec.
+func (in *FluentdSpec) DeepCopy() *FluentdSpec {
if in == nil {
return nil
}
- out := new(Input)
+ out := new(FluentdSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *KubernetesSecret) DeepCopyInto(out *KubernetesSecret) {
+func (in *FluentdTLS) DeepCopyInto(out *FluentdTLS) {
*out = *in
- return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesSecret.
-func (in *KubernetesSecret) DeepCopy() *KubernetesSecret {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentdTLS.
+func (in *FluentdTLS) DeepCopy() *FluentdTLS {
if in == nil {
return nil
}
- out := new(KubernetesSecret)
+ out := new(FluentdTLS)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Parameter) DeepCopyInto(out *Parameter) {
+func (in *ImageSpec) DeepCopyInto(out *ImageSpec) {
*out = *in
- if in.ValueFrom != nil {
- in, out := &in.ValueFrom, &out.ValueFrom
- *out = new(ValueFrom)
- **out = **in
- }
- return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Parameter.
-func (in *Parameter) DeepCopy() *Parameter {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSpec.
+func (in *ImageSpec) DeepCopy() *ImageSpec {
if in == nil {
return nil
}
- out := new(Parameter)
+ out := new(ImageSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Plugin) DeepCopyInto(out *Plugin) {
+func (in *Logging) DeepCopyInto(out *Logging) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
- out.Status = in.Status
- return
+ in.Status.DeepCopyInto(&out.Status)
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plugin.
-func (in *Plugin) DeepCopy() *Plugin {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Logging.
+func (in *Logging) DeepCopy() *Logging {
if in == nil {
return nil
}
- out := new(Plugin)
+ out := new(Logging)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *Plugin) DeepCopyObject() runtime.Object {
+func (in *Logging) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
@@ -408,32 +449,31 @@ func (in *Plugin) DeepCopyObject() runtime.Object {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *PluginList) DeepCopyInto(out *PluginList) {
+func (in *LoggingList) DeepCopyInto(out *LoggingList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
- *out = make([]Plugin, len(*in))
+ *out = make([]Logging, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
- return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginList.
-func (in *PluginList) DeepCopy() *PluginList {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingList.
+func (in *LoggingList) DeepCopy() *LoggingList {
if in == nil {
return nil
}
- out := new(PluginList)
+ out := new(LoggingList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *PluginList) DeepCopyObject() runtime.Object {
+func (in *LoggingList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
@@ -441,65 +481,187 @@ func (in *PluginList) DeepCopyObject() runtime.Object {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *PluginSpec) DeepCopyInto(out *PluginSpec) {
+func (in *LoggingSpec) DeepCopyInto(out *LoggingSpec) {
*out = *in
- in.Input.DeepCopyInto(&out.Input)
- if in.Filter != nil {
- in, out := &in.Filter, &out.Filter
- *out = make([]FPlugin, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
+ if in.FluentbitSpec != nil {
+ in, out := &in.FluentbitSpec, &out.FluentbitSpec
+ *out = new(FluentbitSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.FluentdSpec != nil {
+ in, out := &in.FluentdSpec, &out.FluentdSpec
+ *out = new(FluentdSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.WatchNamespaces != nil {
+ in, out := &in.WatchNamespaces, &out.WatchNamespaces
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingSpec.
+func (in *LoggingSpec) DeepCopy() *LoggingSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(LoggingSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LoggingStatus) DeepCopyInto(out *LoggingStatus) {
+ *out = *in
+ if in.ConfigCheckResults != nil {
+ in, out := &in.ConfigCheckResults, &out.ConfigCheckResults
+ *out = make(map[string]bool, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
}
}
- if in.Output != nil {
- in, out := &in.Output, &out.Output
- *out = make([]FPlugin, len(*in))
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingStatus.
+func (in *LoggingStatus) DeepCopy() *LoggingStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(LoggingStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Output) DeepCopyInto(out *Output) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ out.Status = in.Status
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Output.
+func (in *Output) DeepCopy() *Output {
+ if in == nil {
+ return nil
+ }
+ out := new(Output)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Output) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OutputList) DeepCopyInto(out *OutputList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Output, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
- return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginSpec.
-func (in *PluginSpec) DeepCopy() *PluginSpec {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputList.
+func (in *OutputList) DeepCopy() *OutputList {
if in == nil {
return nil
}
- out := new(PluginSpec)
+ out := new(OutputList)
in.DeepCopyInto(out)
return out
}
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OutputList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *PluginStatus) DeepCopyInto(out *PluginStatus) {
+func (in *OutputSpec) DeepCopyInto(out *OutputSpec) {
*out = *in
- return
+ if in.S3OutputConfig != nil {
+ in, out := &in.S3OutputConfig, &out.S3OutputConfig
+ *out = new(output.S3OutputConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.AzureStorage != nil {
+ in, out := &in.AzureStorage, &out.AzureStorage
+ *out = new(output.AzureStorage)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.GCSOutput != nil {
+ in, out := &in.GCSOutput, &out.GCSOutput
+ *out = new(output.GCSOutput)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.OSSOutput != nil {
+ in, out := &in.OSSOutput, &out.OSSOutput
+ *out = new(output.OSSOutput)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ElasticsearchOutput != nil {
+ in, out := &in.ElasticsearchOutput, &out.ElasticsearchOutput
+ *out = new(output.ElasticsearchOutput)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.LokiOutput != nil {
+ in, out := &in.LokiOutput, &out.LokiOutput
+ *out = new(output.LokiOutput)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.SumologicOutput != nil {
+ in, out := &in.SumologicOutput, &out.SumologicOutput
+ *out = new(output.SumologicOutput)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ForwardOutput != nil {
+ in, out := &in.ForwardOutput, &out.ForwardOutput
+ *out = new(output.ForwardOutput)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.NullOutputConfig != nil {
+ in, out := &in.NullOutputConfig, &out.NullOutputConfig
+ *out = new(output.NullOutputConfig)
+ **out = **in
+ }
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginStatus.
-func (in *PluginStatus) DeepCopy() *PluginStatus {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputSpec.
+func (in *OutputSpec) DeepCopy() *OutputSpec {
if in == nil {
return nil
}
- out := new(PluginStatus)
+ out := new(OutputSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ValueFrom) DeepCopyInto(out *ValueFrom) {
+func (in *OutputStatus) DeepCopyInto(out *OutputStatus) {
*out = *in
- out.SecretKeyRef = in.SecretKeyRef
- return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValueFrom.
-func (in *ValueFrom) DeepCopy() *ValueFrom {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputStatus.
+func (in *OutputStatus) DeepCopy() *OutputStatus {
if in == nil {
return nil
}
- out := new(ValueFrom)
+ out := new(OutputStatus)
in.DeepCopyInto(out)
return out
}
diff --git a/build/Dockerfile b/build/Dockerfile
deleted file mode 100644
index c2f7eec44..000000000
--- a/build/Dockerfile
+++ /dev/null
@@ -1,15 +0,0 @@
-FROM alpine:3.8
-
-ENV OPERATOR=/usr/local/bin/logging-operator \
- USER_UID=1001 \
- USER_NAME=logging-operator
-
-# install operator binary
-COPY build/_output/bin/logging-operator ${OPERATOR}
-
-COPY build/bin /usr/local/bin
-RUN /usr/local/bin/user_setup
-
-ENTRYPOINT ["/usr/local/bin/entrypoint"]
-
-USER ${USER_UID}
diff --git a/build/bin/entrypoint b/build/bin/entrypoint
deleted file mode 100755
index 76d31a162..000000000
--- a/build/bin/entrypoint
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/sh -e
-
-# This is documented here:
-# https://docs.openshift.com/container-platform/3.11/creating_images/guidelines.html#openshift-specific-guidelines
-
-if ! whoami &>/dev/null; then
- if [ -w /etc/passwd ]; then
- echo "${USER_NAME:-logging-operator}:x:$(id -u):$(id -g):${USER_NAME:-logging-operator} user:${HOME}:/sbin/nologin" >> /etc/passwd
- fi
-fi
-
-exec ${OPERATOR} $@
diff --git a/build/bin/user_setup b/build/bin/user_setup
deleted file mode 100755
index 1e36064cb..000000000
--- a/build/bin/user_setup
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/sh
-set -x
-
-# ensure $HOME exists and is accessible by group 0 (we don't know what the runtime UID will be)
-mkdir -p ${HOME}
-chown ${USER_UID}:0 ${HOME}
-chmod ug+rwx ${HOME}
-
-# runtime user will need to be able to self-insert in /etc/passwd
-chmod g+rw /etc/passwd
-
-# no need for this script to remain in the image after running
-rm $0
diff --git a/charts/logging-operator-fluent/Chart.yaml b/charts/logging-operator-fluent/Chart.yaml
deleted file mode 100644
index 3d7608b02..000000000
--- a/charts/logging-operator-fluent/Chart.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
-apiVersion: v1
-description: Logging operator CR for Fluentd and Fluent-bit.
-name: logging-operator-fluent
-version: 0.3.3
-home: https://github.com/banzaicloud/logging-operator
-icon: https://banzaicloud.com/img/banzai-cloud-logo.png
-keywords:
- - logging
- - monitoring
- - fluentd
- - fluenbit
- - operator
-sources:
- - https://github.com/banzaicloud/logging-operator
-maintainers:
- - name: Banzai Cloud
- email: info@banzaicloud.com
-
diff --git a/charts/logging-operator-fluent/templates/fluentbit-cr.yaml b/charts/logging-operator-fluent/templates/fluentbit-cr.yaml
deleted file mode 100644
index f65ef07b8..000000000
--- a/charts/logging-operator-fluent/templates/fluentbit-cr.yaml
+++ /dev/null
@@ -1,33 +0,0 @@
-{{- if .Values.fluentbit.enabled }}
-{{ $fluentbitUseGenericSecret := or .Values.tls.secretName (not .Values.fluentbit.tlsSecret ) }}
-apiVersion: logging.banzaicloud.com/v1alpha1
-kind: Fluentbit
-metadata:
- name: {{ template "logging-operator-fluent.fullname" . }}-fluentbit
- labels:
- app.kubernetes.io/name: {{ include "logging-operator-fluent.name" . }}
- helm.sh/chart: {{ include "logging-operator-fluent.chart" . }}
- app.kubernetes.io/instance: {{ .Release.Name }}
- app.kubernetes.io/managed-by: {{ .Release.Service }}
-spec:
- namespace: {{ default .Release.Namespace .Values.watchNamespace }}
- annotations:
- prometheus.io/scrape: "true"
- prometheus.io/path: "/api/v1/metrics/prometheus"
- prometheus.io/port: "2020"
- image: {{ toYaml .Values.fluentbit.image | nindent 4 }}
- resources: {}
- {{- if .Values.fluentbit.tolerations }}
- tolerations: {{ toYaml .Values.fluentbit.tolerations | nindent 4 }}
- {{- end }}
- tls:
- enabled: {{ .Values.tls.enabled }}
-{{- if $fluentbitUseGenericSecret }}
- secretName: {{ .Values.tls.secretName | default (include "logging-operator-fluent.fullname" .) }}
- secretType: generic
-{{- else }}
- secretName: {{ .Values.fluentbit.tlsSecret }}
- secretType: tls
-{{- end }}
- sharedKey: {{ .Values.tls.sharedKey | default (derivePassword 1 "long" (.Release.Time | toString) .Release.Name .Chart.Name ) | b64enc | quote }}
-{{ end }}
diff --git a/charts/logging-operator-fluent/templates/fluentd-cr.yaml b/charts/logging-operator-fluent/templates/fluentd-cr.yaml
deleted file mode 100644
index f205e3d61..000000000
--- a/charts/logging-operator-fluent/templates/fluentd-cr.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
-{{- if .Values.fluentd.enabled }}
-{{ $fluentdUseGenericSecret := or .Values.tls.secretName (not .Values.fluentd.tlsSecret) }}
-apiVersion: logging.banzaicloud.com/v1alpha1
-kind: Fluentd
-metadata:
- name: {{ template "logging-operator-fluent.fullname" . }}-fluentd
- labels:
- app.kubernetes.io/name: {{ include "logging-operator-fluent.name" . }}
- helm.sh/chart: {{ include "logging-operator-fluent.chart" . }}
- app.kubernetes.io/instance: {{ .Release.Name }}
- app.kubernetes.io/managed-by: {{ .Release.Service }}
-spec:
- namespace: {{ default .Release.Namespace .Values.watchNamespace }}
- annotations:
- prometheus.io/scrape: "true"
- prometheus.io/path: "/metrics"
- prometheus.io/port: "25000"
- image: {{ toYaml .Values.fluentd.image | nindent 4 }}
- volumeModImage: {{ toYaml .Values.fluentd.volumeModImage | nindent 4 }}
- configReloaderImage: {{ toYaml .Values.fluentd.configReloaderImage | nindent 4 }}
- resources: {}
- fluentdPvcSpec: {{ toYaml .Values.fluentd.fluentdPvcSpec | nindent 4 }}
- {{- if .Values.fluentd.tolerations }}
- tolerations: {{ toYaml .Values.fluentd.tolerations | nindent 4 }}
- {{- end }}
- tls:
- enabled: {{ .Values.tls.enabled }}
-{{- if $fluentdUseGenericSecret }}
- secretName: {{ .Values.tls.secretName | default (include "logging-operator-fluent.fullname" .) }}
- secretType: generic
-{{- else }}
- secretName: {{ .Values.fluentd.tlsSecret }}
- secretType: tls
-{{- end }}
- sharedKey: {{ .Values.tls.sharedKey | default (derivePassword 1 "long" (.Release.Time | toString) .Release.Name .Chart.Name ) | b64enc | quote }}
- serviceType: {{ .Values.fluentd.serviceType | default "ClusterIP" | quote }}
-{{ end }}
diff --git a/charts/logging-operator-fluent/templates/psp.yaml b/charts/logging-operator-fluent/templates/psp.yaml
deleted file mode 100644
index bfa1f488a..000000000
--- a/charts/logging-operator-fluent/templates/psp.yaml
+++ /dev/null
@@ -1,73 +0,0 @@
-{{ if .Values.psp.enabled }}
-apiVersion: policy/v1beta1
-kind: PodSecurityPolicy
-metadata:
- creationTimestamp: null
- name: psp.fluent-bit
- annotations:
- seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
- seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default'
-spec:
- allowedHostPaths:
- - pathPrefix: /var/lib/docker/containers
- readOnly: true
- - pathPrefix: /var/log
- readOnly: true
- fsGroup:
- rule: MustRunAs
- ranges:
- - min: 1
- max: 65535
- readOnlyRootFilesystem: true
- allowPrivilegeEscalation: false
- runAsUser:
- rule: MustRunAs
- ranges:
- - min: 1
- max: 65535
- seLinux:
- rule: RunAsAny
- supplementalGroups:
- rule: MustRunAs
- ranges:
- - min: 1
- max: 65535
- volumes:
- - configMap
- - emptyDir
- - secret
- - hostPath
----
-apiVersion: policy/v1beta1
-kind: PodSecurityPolicy
-metadata:
- creationTimestamp: null
- name: psp.fluentd
- annotations:
- seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
- seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default'
-spec:
- fsGroup:
- rule: MustRunAs
- ranges:
- - min: 1
- max: 65535
- allowPrivilegeEscalation: false
- runAsUser:
- rule: MustRunAs
- ranges:
- - min: 1
- max: 65535
- seLinux:
- rule: RunAsAny
- supplementalGroups:
- rule: MustRunAs
- ranges:
- - min: 1
- max: 65535
- volumes:
- - configMap
- - emptyDir
- - secret
- - persistentVolumeClaim
-{{ end }}
diff --git a/charts/logging-operator-fluent/templates/rbac.yaml b/charts/logging-operator-fluent/templates/rbac.yaml
deleted file mode 100644
index eabb1b020..000000000
--- a/charts/logging-operator-fluent/templates/rbac.yaml
+++ /dev/null
@@ -1,71 +0,0 @@
-{{ if .Values.psp.enabled }}
-apiVersion: rbac.authorization.k8s.io/v1
-kind: Role
-metadata:
- name: {{ template "logging-operator-fluent.fullname" . }}-fluent-bit
- namespace: {{ .Release.Namespace }}
- labels:
- kubernetes.io/cluster-service: "true"
- addonmanager.kubernetes.io/mode: Reconcile
-rules:
-- apiGroups:
- - policy
- resourceNames:
- - psp.fluent-bit
- resources:
- - podsecuritypolicies
- verbs:
- - use
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: Role
-metadata:
- name: {{ template "logging-operator-fluent.fullname" . }}-fluentd
- namespace: {{ .Release.Namespace }}
- labels:
- kubernetes.io/cluster-service: "true"
- addonmanager.kubernetes.io/mode: Reconcile
-rules:
-- apiGroups:
- - policy
- resourceNames:
- - psp.fluentd
- resources:
- - podsecuritypolicies
- verbs:
- - use
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
- name: {{ template "logging-operator-fluent.fullname" . }}-fluent-bit
- namespace: {{ .Release.Namespace }}
- labels:
- addonmanager.kubernetes.io/mode: Reconcile
- kubernetes.io/cluster-service: 'true'
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: Role
- name: {{ template "logging-operator-fluent.fullname" . }}-fluent-bit
-subjects:
- - kind: ServiceAccount
- name: logging
- namespace: {{ .Release.Namespace }}
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
- name: {{ template "logging-operator-fluent.fullname" . }}-fluentd
- namespace: {{ .Release.Namespace }}
- labels:
- addonmanager.kubernetes.io/mode: Reconcile
- kubernetes.io/cluster-service: 'true'
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: Role
- name: {{ template "logging-operator-fluent.fullname" . }}-fluentd
-subjects:
- - kind: ServiceAccount
- name: logging-fluentd
- namespace: {{ .Release.Namespace }}
-{{ end }}
diff --git a/charts/logging-operator-fluent/templates/secret.yaml b/charts/logging-operator-fluent/templates/secret.yaml
deleted file mode 100644
index f0b93e661..000000000
--- a/charts/logging-operator-fluent/templates/secret.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-{{- if and .Values.tls.enabled (not .Values.tls.secretName) }}
-{{ $ca := genCA "svc-cat-ca" 3650 }}
-{{ $cn := printf "fluentd.%s.svc.cluster.local" .Release.Namespace }}
-{{ $server := genSignedCert $cn nil nil 365 $ca }}
-{{ $client := genSignedCert "" nil nil 365 $ca }}
-
-apiVersion: v1
-kind: Secret
-metadata:
- name: {{ template "logging-operator-fluent.fullname" . }}
- labels:
- app.kubernetes.io/name: {{ include "logging-operator-fluent.name" . }}
- helm.sh/chart: {{ include "logging-operator-fluent.chart" . }}
- app.kubernetes.io/instance: {{ .Release.Name }}
- app.kubernetes.io/managed-by: {{ .Release.Service }}
-data:
- caCert: {{ b64enc $ca.Cert }}
- clientCert: {{ b64enc $client.Cert }}
- clientKey: {{ b64enc $client.Key }}
- serverCert: {{ b64enc $server.Cert }}
- serverKey: {{ b64enc $server.Key }}
-{{ end }}
\ No newline at end of file
diff --git a/charts/logging-operator-fluent/values.yaml b/charts/logging-operator-fluent/values.yaml
deleted file mode 100644
index 409356406..000000000
--- a/charts/logging-operator-fluent/values.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-# Default values for logging-operator-fluent.
-# This is a YAML-formatted file.
-# Declare variables to be passed into your templates.
-
-tls:
- enabled: false
- secretName: ""
- sharedKey: ""
-
-fluentbit:
- enabled: true
- namespace: ""
- tolerations:
- image:
- tag: "1.1.3"
- repository: "fluent/fluent-bit"
- pullPolicy: "IfNotPresent"
- tlsSecret: ""
-
-fluentd:
- enabled: true
- namespace: ""
- image:
- tag: "v1.5.0"
- repository: "banzaicloud/fluentd"
- pullPolicy: "IfNotPresent"
- volumeModImage:
- tag: "latest"
- repository: "busybox"
- pullPolicy: "IfNotPresent"
- configReloaderImage:
- tag: "v0.2.2"
- repository: "jimmidyson/configmap-reload"
- pullPolicy: "IfNotPresent"
- tolerations:
- fluentdPvcSpec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 21Gi
- tlsSecret: ""
-
-psp:
- enabled: false
diff --git a/charts/nginx-logging-demo/.helmignore b/charts/logging-operator-logging/.helmignore
similarity index 100%
rename from charts/nginx-logging-demo/.helmignore
rename to charts/logging-operator-logging/.helmignore
diff --git a/charts/logging-operator-logging/Chart.yaml b/charts/logging-operator-logging/Chart.yaml
new file mode 100644
index 000000000..deda998cb
--- /dev/null
+++ b/charts/logging-operator-logging/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+appVersion: "2.1.0"
+description: A Helm chart to configure logging resource for the logging-operator
+name: logging-operator-logging
+version: 2.1.0
diff --git a/charts/logging-operator-fluent/README.md b/charts/logging-operator-logging/README.md
similarity index 51%
rename from charts/logging-operator-fluent/README.md
rename to charts/logging-operator-logging/README.md
index 37e4950bf..a31d540b6 100644
--- a/charts/logging-operator-fluent/README.md
+++ b/charts/logging-operator-logging/README.md
@@ -1,51 +1,41 @@
-# Logging Operator Fluent Chart
+# Installing logging resource to logging-operator
## tl;dr:
```bash
$ helm repo add banzaicloud-stable https://kubernetes-charts.banzaicloud.com
$ helm repo update
-# Prerequisites
-$ helm install banzaicloud-stable/logging-operator
-# Install fluent and fluent-bit cr
-$ helm install banzaicloud-stable/logging-operator-fluent
+$ helm install banzaicloud-stable/logging-operator-logging
```
-## Introduction
-
-This chart applies Fluentd and Fluent-bit custom resources to [Logging Operator](https://github.com/banzaicloud/banzai-charts/logging-operator) deployment on a [Kubernetes](http://kubernetes.io)
-
-## Prerequisites
-
-- Kubernetes 1.8+ with Beta APIs enabled
-- [Logging Operator](https://github.com/banzaicloud/logging-operator)
-
-
-| Parameter | Description | Default |
-| --------------------------------------------------- | ------------------------------------------------------ | ------------------------------ |
-| `tls.enabled` | Enabled TLS communication between components | true |
-| `tls.secretName` | Specified generic secret name, which contain tls certs | This will overwrite automatic Helm certificate generation and overrides `fluentbit.tlsSecret` and `fluentd.tlsSecret`. |
-| `tls.sharedKey` | Shared key between nodes (fluentd-fluentbit) | [autogenerated] |
-| `fluentbit.enabled` | Install fluent-bit | true |
-| `fluentbit.namespace` | Specified fluentbit installation namespace | same as operator namespace |
-| `fluentbit.image.tag` | Fluentbit container image tag | `1.1.3` |
-| `fluentbit.image.repository` | Fluentbit container image repository | `fluent/fluent-bit` |
-| `fluentbit.image.pullPolicy` | Fluentbit container pull policy | `IfNotPresent` |
-| `fluentbit.tolerations` | Fluentbit tolerations | `nil` |
-| `fluentbit.tlsSecret` | Secret name that contains Fluentbit TLS client cert | Ignored if `tls.secretName` is specified. Must refer to a secret of type `kubernetes.io/tls` |
-| `fluentd.enabled` | Install fluentd | true |
-| `fluentd.namespace` | Specified fluentd installation namespace | same as operator namespace |
-| `fluentd.image.tag` | Fluentd container image tag | `v1.5.0` |
-| `fluentd.image.repository` | Fluentd container image repository | `banzaicloud/fluentd` |
-| `fluentd.image.pullPolicy` | Fluentd container pull policy | `IfNotPresent` |
-| `fluentd.volumeModImage.tag` | Fluentd volumeModImage container image tag | `latest` |
-| `fluentd.volumeModImage.repository` | Fluentd volumeModImage container image repository | `busybox` |
-| `fluentd.volumeModImage.pullPolicy` | Fluentd volumeModImage container pull policy | `IfNotPresent` |
-| `fluentd.configReloaderImage.tag` | Fluentd configReloaderImage container image tag | `v0.2.2` |
-| `fluentd.configReloaderImage.repository` | Fluentd configReloaderImage container image repository | `jimmidyson/configmap-reload` |
-| `fluentd.configReloaderImage.pullPolicy` | Fluentd configReloaderImage container pull policy | `IfNotPresent` |
-| `fluentd.fluentdPvcSpec.accessModes` | Fluentd persistence volume access modes | `[ReadWriteOnce]` |
-| `fluentd.fluentdPvcSpec.resources.requests.storage` | Fluentd persistence volume size | `21Gi` |
-| `fluentd.tolerations` | Fluentd tolerations | `nil` |
-| `fluentd.tlsSecret` | Secret name that contains Fluentd TLS client cert | Ignored if `tls.secretName` is specified. Must refer to a secret of type `kubernetes.io/tls`. |
-| `psp.enabled` | Install PodSecurityPolicy | `false` |
+## Configuration
+
+The following tables lists the configurable parameters of the logging-operator-logging chart and their default values.
+
+| Parameter | Description | Default |
+| --------------------------------------------------- | ------------------------------------------------------ | ------------------------------ |
+| `tls.enabled` | Enabled TLS communication between components | true |
+| `tls.fluentdSecretName` | Specified secret name, which contain tls certs | This will overwrite automatic Helm certificate generation. |
+| `tls.fluentbitSecretName` | Specified secret name, which contain tls certs | This will overwrite automatic Helm certificate generation. |
+| `tls.sharedKey` | Shared key between nodes (fluentd-fluentbit) | [autogenerated] |
+| `fluentbit.enabled` | Install fluent-bit | true |
+| `fluentbit.namespace` | Specified fluentbit installation namespace | same as operator namespace |
+| `fluentbit.tolerations` | Tolerations for fluentbit daemonset | none |
+| `fluentbit.image.tag` | Fluentbit container image tag | `1.1.3` |
+| `fluentbit.image.repository` | Fluentbit container image repository | `fluent/fluent-bit` |
+| `fluentbit.image.pullPolicy` | Fluentbit container pull policy | `IfNotPresent` |
+| `fluentd.enabled` | Install fluentd | true |
+| `fluentd.image.tag` | Fluentd container image tag | `v1.6.3-alpine` |
+| `fluentd.image.repository` | Fluentd container image repository | `banzaicloud/fluentd` |
+| `fluentd.image.pullPolicy` | Fluentd container pull policy | `IfNotPresent` |
+| `fluentd.volumeModImage.tag` | Fluentd volumeModImage container image tag | `latest` |
+| `fluentd.volumeModImage.repository` | Fluentd volumeModImage container image repository | `busybox` |
+| `fluentd.volumeModImage.pullPolicy` | Fluentd volumeModImage container pull policy | `IfNotPresent` |
+| `fluentd.configReloaderImage.tag` | Fluentd configReloaderImage container image tag | `v0.2.2` |
+| `fluentd.configReloaderImage.repository` | Fluentd configReloaderImage container image repository | `jimmidyson/configmap-reload` |
+| `fluentd.configReloaderImage.pullPolicy` | Fluentd configReloaderImage container pull policy | `IfNotPresent` |
+| `fluentd.fluentdPvcSpec.accessModes` | Fluentd persistence volume access modes | `[ReadWriteOnce]` |
+| `fluentd.fluentdPvcSpec.resources.requests.storage` | Fluentd persistence volume size | `21Gi` |
+| `fluentd.fluentdPvcSpec.resources.storageClassName` | Fluentd persistence volume storageclass | `"""` |
+| `fluentd.tolerations` | Tolerations for fluentd statefulset | none |
+| `fluentd.nodeSelector` | Node selector for fluentd pods | none |
diff --git a/charts/logging-operator-logging/templates/NOTES.txt b/charts/logging-operator-logging/templates/NOTES.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/charts/nginx-logging-demo/templates/_helpers.tpl b/charts/logging-operator-logging/templates/_helpers.tpl
similarity index 62%
rename from charts/nginx-logging-demo/templates/_helpers.tpl
rename to charts/logging-operator-logging/templates/_helpers.tpl
index 86303e7c0..adf39e886 100644
--- a/charts/nginx-logging-demo/templates/_helpers.tpl
+++ b/charts/logging-operator-logging/templates/_helpers.tpl
@@ -2,7 +2,7 @@
{{/*
Expand the name of the chart.
*/}}
-{{- define "nginx-logging-demo.name" -}}
+{{- define "logging-operator-logging.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
@@ -11,7 +11,7 @@ Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
-{{- define "nginx-logging-demo.fullname" -}}
+{{- define "logging-operator-logging.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
@@ -27,6 +27,19 @@ If release name contains chart name it will be used as a full name.
{{/*
Create chart name and version as used by the chart label.
*/}}
-{{- define "nginx-logging-demo.chart" -}}
+{{- define "logging-operator-logging.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "logging-operator-logging.labels" -}}
+app.kubernetes.io/name: {{ include "logging-operator-logging.name" . }}
+helm.sh/chart: {{ include "logging-operator-logging.chart" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
diff --git a/charts/logging-operator-logging/templates/logging.yaml b/charts/logging-operator-logging/templates/logging.yaml
new file mode 100644
index 000000000..b56a2932c
--- /dev/null
+++ b/charts/logging-operator-logging/templates/logging.yaml
@@ -0,0 +1,38 @@
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+ name: {{ include "logging-operator-logging.name" . }}
+ labels:
+{{ include "logging-operator-logging.labels" . | indent 4 }}
+spec:
+ {{- with .Values.loggingRef }}
+ loggingRef: {{ . }}
+ {{- end }}
+ {{- with .Values.flowConfigCheckDisabled }}
+ flowConfigCheckDisabled: {{ . }}
+ {{- end }}
+ {{- with .Values.flowConfigOverride }}
+ flowConfigOverride: {{ . }}
+ {{- end }}
+ controlNamespace: {{ .Values.controlNamespace | default .Release.Namespace }}
+ fluentd:
+ {{- if .Values.tls.enabled }}
+ tls:
+ enabled: true
+ secretName: {{ .Values.tls.fluentdSecretName | default (printf "%s-%s" (include "logging-operator-logging.name" . ) "fluentd-tls" ) }}
+ sharedKey: "{{ .Values.tls.sharedKey | default (derivePassword 1 "long" (.Release.Time | toString) .Release.Name .Chart.Name ) }}"
+ {{- end }}
+ {{- if .Values.fluentd }}
+{{ toYaml .Values.fluentd | indent 4}}
+ {{- end}}
+ fluentbit:
+ {{- if .Values.tls.enabled }}
+ tls:
+ enabled: true
+ secretName: {{ .Values.tls.fluentbitSecretName | default (printf "%s-%s" (include "logging-operator-logging.name" . ) "fluentbit-tls" ) }}
+ sharedKey: "{{ .Values.tls.sharedKey | default (derivePassword 1 "long" (.Release.Time | toString) .Release.Name .Chart.Name ) }}"
+ {{- end }}
+ {{- if .Values.fluentbit }}
+{{ toYaml .Values.fluentbit | indent 4}}
+ {{- end}}
+
diff --git a/charts/logging-operator-logging/templates/secret.yaml b/charts/logging-operator-logging/templates/secret.yaml
new file mode 100644
index 000000000..721f68621
--- /dev/null
+++ b/charts/logging-operator-logging/templates/secret.yaml
@@ -0,0 +1,34 @@
+{{- if .Values.tls.enabled }}
+{{ $ca := genCA "svc-cat-ca" 3650 }}
+{{ $cn := printf "%s-%s.%s.svc.cluster.local" (include "logging-operator-logging.name" .) "fluentd" .Release.Namespace }}
+{{ $server := genSignedCert $cn nil nil 365 $ca }}
+{{ $client := genSignedCert "" nil nil 365 $ca }}
+
+{{- if not .Values.tls.fluentdSecretName }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ include "logging-operator-logging.name" . }}-fluentd-tls
+ labels:
+{{ include "logging-operator-logging.labels" . | indent 4 }}
+data:
+ ca.crt: {{ b64enc $ca.Cert }}
+ tls.crt: {{ b64enc $server.Cert }}
+ tls.key: {{ b64enc $server.Key }}
+{{ end }}
+
+---
+
+{{- if not .Values.tls.fluentbitSecretName }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ include "logging-operator-logging.name" . }}-fluentbit-tls
+ labels:
+{{ include "logging-operator-logging.labels" . | indent 4 }}
+data:
+ ca.crt: {{ b64enc $ca.Cert }}
+ tls.crt: {{ b64enc $client.Cert }}
+ tls.key: {{ b64enc $client.Key }}
+{{ end }}
+{{ end }}
diff --git a/charts/logging-operator-logging/values.yaml b/charts/logging-operator-logging/values.yaml
new file mode 100644
index 000000000..eb2e0766e
--- /dev/null
+++ b/charts/logging-operator-logging/values.yaml
@@ -0,0 +1,35 @@
+# Default values for logging-operator-logging.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+# Reference name of the logging deployment
+loggingRef: ""
+# Disable configuration check before deploy
+flowConfigCheckDisabled: false
+# Use static configuration instead of generated config.
+flowConfigOverride: ""
+
+# Fluent-bit configurations
+fluentbit: {}
+# Fluentd configurations
+fluentd: {}
+# fluentdPvcSpec:
+# accessModes:
+# - ReadWriteOnce
+# resources:
+# requests:
+# storage: 40Gi
+# storageClassName: fast
+
+# Enable secure connection between fluentd and fluent-bit
+tls:
+ enabled: true
+ # Shared key for fluentd authentication
+ sharedKey: ""
+ fluentbitSecretName: ""
+ fluentdSecretName: ""
+
+# Limit namespaces from where to read Flow and Output specs
+watchNamespaces: []
+# Control namespace that contains ClusterOutput and ClusterFlow resources
+controlNamespace: ""
\ No newline at end of file
diff --git a/charts/logging-operator/Chart.yaml b/charts/logging-operator/Chart.yaml
index 361b7fc6c..0b542f64f 100644
--- a/charts/logging-operator/Chart.yaml
+++ b/charts/logging-operator/Chart.yaml
@@ -1,18 +1,5 @@
apiVersion: v1
-description: Logging operator for Kubernetes based on Fluentd and Fluent-bit.
+appVersion: "2.1.0"
+description: A Helm chart to install Banzai Cloud logging-operator
name: logging-operator
-version: 0.3.3
-appVersion: 0.2.2
-home: https://github.com/banzaicloud/logging-operator
-icon: https://banzaicloud.com/img/banzai-cloud-logo.png
-keywords:
- - logging
- - monitoring
- - fluentd
- - fluenbit
- - operator
-sources:
-- https://github.com/banzaicloud/logging-operator
-maintainers:
-- name: Banzai Cloud
- email: info@banzaicloud.com
+version: 2.1.0
diff --git a/charts/logging-operator/README.md b/charts/logging-operator/README.md
index cad88b1dd..0a54a1747 100644
--- a/charts/logging-operator/README.md
+++ b/charts/logging-operator/README.md
@@ -12,7 +12,7 @@ $ helm install banzaicloud-stable/logging-operator
## Introduction
-This chart bootstraps an [Logging Operator](https://github.com/banzaicloud/banzai-charts/logging-operator) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
+This chart bootstraps an [Logging Operator](https://github.com/banzaicloud/logging-operator) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites
@@ -45,12 +45,11 @@ The following tables lists the configurable parameters of the logging-operator c
| Parameter | Description | Default |
| --------------------------------------------------- | ------------------------------------------------------ | ------------------------------ |
| `image.repository` | Container image repository | `banzaicloud/logging-operator` |
-| `image.tag` | Container image tag | `0.2.2` |
+| `image.tag` | Container image tag | `2.0.0` |
| `image.pullPolicy` | Container pull policy | `IfNotPresent` |
| `nameOverride` | Override name of app | `` |
| `fullnameOverride` | Override full name of app | `` |
| `watchNamespace` | Namespace to watch fot LoggingOperator CRD | `` |
-| `grafana.dashboard.enabled` | Install grafana logging-operator dashboard | `true` |
| `rbac.enabled` | Create rbac service account and roles | `true` |
| `rbac.psp.enabled` | Must be used with `rbac.enabled` true. If true, creates & uses RBAC resources required in the cluster with [Pod Security Policies](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) enabled. | `false` |
| `affinity` | Node Affinity | `{}` |
@@ -68,31 +67,46 @@ $ helm install --name my-release -f values.yaml banzaicloud-stable/logging-opera
> **Tip**: You can use the default [values.yaml](values.yaml)
-## Installing Fluentd and Fluent-bit
+## Installing Fluentd and Fluent-bit via logging
-The previous chart does **not** install Fluentd or Fluent-bit custom resource. To install them please use the [Logging Operator Fluent](https://github.com/banzaicloud/banzai-charts/logging-operator-fluent) chart.
+The previous chart does **not** install `logging` resource to deploy Fluentd and Fluent-bit on luster. To install them please use the [Logging Operator Logging](https://github.com/banzaicloud/logging-operator/tree/master/charts/logging-operator-logging) chart.
## tl;dr:
```bash
$ helm repo add banzaicloud-stable https://kubernetes-charts.banzaicloud.com
$ helm repo update
-$ helm install banzaicloud-stable/logging-operator-fluent
+$ helm install banzaicloud-stable/logging-operator-logging
```
+## Configuration
+
+The following tables lists the configurable parameters of the logging-operator-logging chart and their default values.
+## tl;dr:
+
+```bash
+$ helm repo add banzaicloud-stable https://kubernetes-charts.banzaicloud.com
+$ helm repo update
+$ helm install banzaicloud-stable/logging-operator-logging
+```
+
+## Configuration
+
+The following tables lists the configurable parameters of the logging-operator-logging chart and their default values.
+
| Parameter | Description | Default |
| --------------------------------------------------- | ------------------------------------------------------ | ------------------------------ |
| `tls.enabled` | Enabled TLS communication between components | true |
-| `tls.secretName` | Specified secret name, which contain tls certs | This will overwrite automatic Helm certificate generation. |
+| `tls.fluentdSecretName` | Specified secret name, which contain tls certs | This will overwrite automatic Helm certificate generation. |
+| `tls.fluentbitSecretName` | Specified secret name, which contain tls certs | This will overwrite automatic Helm certificate generation. |
| `tls.sharedKey` | Shared key between nodes (fluentd-fluentbit) | [autogenerated] |
| `fluentbit.enabled` | Install fluent-bit | true |
| `fluentbit.namespace` | Specified fluentbit installation namespace | same as operator namespace |
-| `fluentbit.image.tag` | Fluentbit container image tag | `1.1.3` |
+| `fluentbit.image.tag` | Fluentbit container image tag | `1.1.3` |
| `fluentbit.image.repository` | Fluentbit container image repository | `fluent/fluent-bit` |
| `fluentbit.image.pullPolicy` | Fluentbit container pull policy | `IfNotPresent` |
| `fluentd.enabled` | Install fluentd | true |
-| `fluentd.namespace` | Specified fluentd installation namespace | same as operator namespace |
-| `fluentd.image.tag` | Fluentd container image tag | `v1.5.0` |
+| `fluentd.image.tag` | Fluentd container image tag | `v1.6.3-alpine` |
| `fluentd.image.repository` | Fluentd container image repository | `banzaicloud/fluentd` |
| `fluentd.image.pullPolicy` | Fluentd container pull policy | `IfNotPresent` |
| `fluentd.volumeModImage.tag` | Fluentd volumeModImage container image tag | `latest` |
@@ -103,3 +117,4 @@ $ helm install banzaicloud-stable/logging-operator-fluent
| `fluentd.configReloaderImage.pullPolicy` | Fluentd configReloaderImage container pull policy | `IfNotPresent` |
| `fluentd.fluentdPvcSpec.accessModes` | Fluentd persistence volume access modes | `[ReadWriteOnce]` |
| `fluentd.fluentdPvcSpec.resources.requests.storage` | Fluentd persistence volume size | `21Gi` |
+| `fluentd.fluentdPvcSpec.resources.storageClassName` | Fluentd persistence volume storageclass | `"""` |
\ No newline at end of file
diff --git a/charts/logging-operator/grafana-dashboards/logging-dashboard_rev1.json b/charts/logging-operator/grafana-dashboards/logging-dashboard_rev1.json
deleted file mode 100644
index 676fa21a7..000000000
--- a/charts/logging-operator/grafana-dashboards/logging-dashboard_rev1.json
+++ /dev/null
@@ -1,1069 +0,0 @@
-{
- "__inputs": [
- {
- "name": "DS_PROMETHEUS",
- "label": "Prometheus",
- "description": "",
- "type": "datasource",
- "pluginId": "prometheus",
- "pluginName": "Prometheus"
- }
- ],
- "__requires": [
- {
- "type": "grafana",
- "id": "grafana",
- "name": "Grafana",
- "version": "5.1.3"
- },
- {
- "type": "panel",
- "id": "graph",
- "name": "Graph",
- "version": "5.0.0"
- },
- {
- "type": "datasource",
- "id": "prometheus",
- "name": "Prometheus",
- "version": "5.0.0"
- },
- {
- "type": "panel",
- "id": "singlestat",
- "name": "Singlestat",
- "version": "5.0.0"
- }
- ],
- "annotations": {
- "list": [
- {
- "builtIn": 1,
- "datasource": "-- Grafana --",
- "enable": true,
- "hide": true,
- "iconColor": "rgba(0, 211, 255, 1)",
- "name": "Annotations & Alerts",
- "type": "dashboard"
- }
- ]
- },
- "editable": true,
- "gnetId": 7752,
- "graphTooltip": 0,
- "id": null,
- "links": [],
- "panels": [
- {
- "cacheTimeout": null,
- "colorBackground": false,
- "colorValue": true,
- "colors": [
- "#d44a3a",
- "rgba(237, 129, 40, 0.89)",
- "#299c46"
- ],
- "datasource": "Prometheus",
- "format": "none",
- "gauge": {
- "maxValue": 100,
- "minValue": 0,
- "show": false,
- "thresholdLabels": false,
- "thresholdMarkers": true
- },
- "gridPos": {
- "h": 5,
- "w": 2,
- "x": 0,
- "y": 0
- },
- "id": 4,
- "interval": null,
- "links": [],
- "mappingType": 1,
- "mappingTypes": [
- {
- "name": "value to text",
- "value": 1
- },
- {
- "name": "range to text",
- "value": 2
- }
- ],
- "maxDataPoints": 100,
- "nullPointMode": "connected",
- "nullText": null,
- "postfix": "",
- "postfixFontSize": "50%",
- "prefix": "",
- "prefixFontSize": "50%",
- "rangeMaps": [
- {
- "from": "null",
- "text": "N/A",
- "to": "null"
- }
- ],
- "sparkline": {
- "fillColor": "rgba(31, 118, 189, 0.18)",
- "full": false,
- "lineColor": "rgb(31, 120, 193)",
- "show": true
- },
- "tableColumn": "",
- "targets": [
- {
- "expr": "sum(kube_node_status_condition{condition=\"Ready\"})",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "",
- "refId": "A"
- }
- ],
- "thresholds": "0,1",
- "title": "Active Nodes",
- "type": "singlestat",
- "valueFontSize": "100%",
- "valueMaps": [
- {
- "op": "=",
- "text": "N/A",
- "value": "null"
- }
- ],
- "valueName": "current"
- },
- {
- "cacheTimeout": null,
- "colorBackground": false,
- "colorValue": true,
- "colors": [
- "#d44a3a",
- "rgba(237, 129, 40, 0.89)",
- "#299c46"
- ],
- "datasource": "Prometheus",
- "format": "none",
- "gauge": {
- "maxValue": 100,
- "minValue": 0,
- "show": false,
- "thresholdLabels": false,
- "thresholdMarkers": true
- },
- "gridPos": {
- "h": 5,
- "w": 2,
- "x": 2,
- "y": 0
- },
- "id": 6,
- "interval": null,
- "links": [],
- "mappingType": 1,
- "mappingTypes": [
- {
- "name": "value to text",
- "value": 1
- },
- {
- "name": "range to text",
- "value": 2
- }
- ],
- "maxDataPoints": 100,
- "nullPointMode": "connected",
- "nullText": null,
- "postfix": "",
- "postfixFontSize": "50%",
- "prefix": "",
- "prefixFontSize": "50%",
- "rangeMaps": [
- {
- "from": "null",
- "text": "N/A",
- "to": "null"
- }
- ],
- "sparkline": {
- "fillColor": "rgba(31, 118, 189, 0.18)",
- "full": false,
- "lineColor": "rgb(31, 120, 193)",
- "show": true
- },
- "tableColumn": "",
- "targets": [
- {
- "expr": "sum(kube_pod_info{pod=~\"fluent-bit.*\"})",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "Active Fluent-bit",
- "refId": "A"
- }
- ],
- "thresholds": "0,1",
- "title": "Fluent-bit",
- "type": "singlestat",
- "valueFontSize": "100%",
- "valueMaps": [
- {
- "op": "=",
- "text": "N/A",
- "value": "null"
- }
- ],
- "valueName": "avg"
- },
- {
- "cacheTimeout": null,
- "colorBackground": false,
- "colorValue": true,
- "colors": [
- "#d44a3a",
- "rgba(237, 129, 40, 0.89)",
- "#299c46"
- ],
- "datasource": "Prometheus",
- "format": "none",
- "gauge": {
- "maxValue": 100,
- "minValue": 0,
- "show": false,
- "thresholdLabels": false,
- "thresholdMarkers": true
- },
- "gridPos": {
- "h": 5,
- "w": 2,
- "x": 4,
- "y": 0
- },
- "id": 8,
- "interval": null,
- "links": [],
- "mappingType": 1,
- "mappingTypes": [
- {
- "name": "value to text",
- "value": 1
- },
- {
- "name": "range to text",
- "value": 2
- }
- ],
- "maxDataPoints": 100,
- "nullPointMode": "connected",
- "nullText": null,
- "postfix": "",
- "postfixFontSize": "50%",
- "prefix": "",
- "prefixFontSize": "50%",
- "rangeMaps": [
- {
- "from": "null",
- "text": "N/A",
- "to": "null"
- }
- ],
- "sparkline": {
- "fillColor": "rgba(31, 118, 189, 0.18)",
- "full": false,
- "lineColor": "rgb(31, 120, 193)",
- "show": true
- },
- "tableColumn": "",
- "targets": [
- {
- "expr": "sum(kube_pod_info{pod=~\"fluentd.*\"})",
- "format": "time_series",
- "instant": false,
- "intervalFactor": 1,
- "refId": "A"
- }
- ],
- "thresholds": "0,1",
- "title": "Fluentd",
- "type": "singlestat",
- "valueFontSize": "100%",
- "valueMaps": [
- {
- "op": "=",
- "text": "N/A",
- "value": "null"
- }
- ],
- "valueName": "current"
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "Prometheus",
- "fill": 1,
- "gridPos": {
- "h": 5,
- "w": 9,
- "x": 6,
- "y": 0
- },
- "id": 2,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": true,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "sort": "current",
- "sortDesc": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(fluentbit_input_bytes_total[1m])",
- "format": "time_series",
- "hide": false,
- "intervalFactor": 1,
- "legendFormat": "{{ kubernetes_pod_name }}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Fluent-bit input bytes/s",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "Prometheus",
- "fill": 1,
- "gridPos": {
- "h": 5,
- "w": 9,
- "x": 15,
- "y": 0
- },
- "id": 9,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": true,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "sort": "current",
- "sortDesc": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(fluentbit_output_proc_bytes_total[1m])",
- "format": "time_series",
- "hide": false,
- "intervalFactor": 1,
- "legendFormat": "{{ kubernetes_pod_name }}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Fluent-bit output bytes/s",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "Prometheus",
- "fill": 1,
- "gridPos": {
- "h": 5,
- "w": 8,
- "x": 0,
- "y": 5
- },
- "id": 10,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": true,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "sort": "current",
- "sortDesc": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(fluentbit_output_errors_total[1m])",
- "format": "time_series",
- "hide": false,
- "intervalFactor": 1,
- "legendFormat": "{{ kubernetes_pod_name }}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Fluent-bit error/s",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": "0",
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "Prometheus",
- "fill": 1,
- "gridPos": {
- "h": 5,
- "w": 8,
- "x": 8,
- "y": 5
- },
- "id": 12,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": true,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "sort": "current",
- "sortDesc": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(fluentd_output_status_emit_count[1m])",
- "format": "time_series",
- "hide": false,
- "intervalFactor": 1,
- "legendFormat": "{{ type }}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Fluentd output emit/s by Plugin",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "ops",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": "0",
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "Prometheus",
- "fill": 1,
- "gridPos": {
- "h": 5,
- "w": 8,
- "x": 16,
- "y": 5
- },
- "id": 15,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": true,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "sort": "current",
- "sortDesc": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "fluentd_output_status_buffer_queue_length",
- "format": "time_series",
- "hide": false,
- "intervalFactor": 1,
- "legendFormat": "{{ type }}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Fluentd output buffer queue",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": "0",
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "Prometheus",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 8,
- "x": 0,
- "y": 10
- },
- "id": 11,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": true,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "sort": "current",
- "sortDesc": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(fluentbit_output_retries_total[1m])",
- "format": "time_series",
- "hide": false,
- "intervalFactor": 1,
- "legendFormat": "Retries {{ kubernetes_pod_name }}",
- "refId": "A"
- },
- {
- "expr": "rate(fluentbit_output_retries_failed_total[1m])",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "Failed {{ kubernetes_pod_name }}",
- "refId": "B"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Fluent-bit retries/fails",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "decimals": null,
- "format": "short",
- "label": "",
- "logBase": 1,
- "max": null,
- "min": "0",
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": "0",
- "show": false
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "Prometheus",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 8,
- "x": 8,
- "y": 10
- },
- "id": 14,
- "legend": {
- "alignAsTable": false,
- "avg": false,
- "current": true,
- "max": true,
- "min": true,
- "rightSide": false,
- "show": true,
- "sort": "current",
- "sortDesc": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum(rate(fluentd_output_status_retry_count[1m]))",
- "format": "time_series",
- "hide": false,
- "intervalFactor": 1,
- "legendFormat": "Retry rate",
- "refId": "A"
- },
- {
- "expr": "sum(rate(fluentd_output_status_num_errors[1m]))",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "Error rate",
- "refId": "C"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Fluentd output error/retry rate",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "ops",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": "0",
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "Prometheus",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 8,
- "x": 16,
- "y": 10
- },
- "id": 13,
- "legend": {
- "alignAsTable": false,
- "avg": false,
- "current": true,
- "max": true,
- "min": true,
- "rightSide": false,
- "show": true,
- "sort": "current",
- "sortDesc": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "fluentd_output_status_buffer_total_bytes",
- "format": "time_series",
- "hide": false,
- "intervalFactor": 1,
- "legendFormat": "{{ type }}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Fluentd output buffer size",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": "0",
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- }
- ],
- "refresh": "30s",
- "schemaVersion": 16,
- "style": "dark",
- "tags": [],
- "templating": {
- "list": []
- },
- "time": {
- "from": "now-15m",
- "to": "now"
- },
- "timepicker": {
- "refresh_intervals": [
- "5s",
- "10s",
- "30s",
- "1m",
- "5m",
- "15m",
- "30m",
- "1h",
- "2h",
- "1d"
- ],
- "time_options": [
- "5m",
- "15m",
- "1h",
- "6h",
- "12h",
- "24h",
- "2d",
- "7d",
- "30d"
- ]
- },
- "timezone": "",
- "title": "Logging Dashboard",
- "uid": "bNn5LUtiz",
- "version": 10,
- "description": "This is a simple dashboard for: https://github.com/banzaicloud/logging-operator utilising Fluent-bit and Fluentd"
-}
\ No newline at end of file
diff --git a/charts/logging-operator/templates/NOTES.txt b/charts/logging-operator/templates/NOTES.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/charts/logging-operator/templates/_helpers.tpl b/charts/logging-operator/templates/_helpers.tpl
index a58c97189..a5e197e22 100644
--- a/charts/logging-operator/templates/_helpers.tpl
+++ b/charts/logging-operator/templates/_helpers.tpl
@@ -30,3 +30,16 @@ Create chart name and version as used by the chart label.
{{- define "logging-operator.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "logging-operator.labels" -}}
+app.kubernetes.io/name: {{ include "logging-operator.name" . }}
+helm.sh/chart: {{ include "logging-operator.chart" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
diff --git a/charts/logging-operator/templates/configmap.yaml b/charts/logging-operator/templates/configmap.yaml
deleted file mode 100644
index 127e65b30..000000000
--- a/charts/logging-operator/templates/configmap.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-kind: ConfigMap
-apiVersion: v1
-metadata:
- name: {{ template "logging-operator.fullname" . }}
- labels:
- app.kubernetes.io/name: {{ include "logging-operator.name" . }}
- helm.sh/chart: {{ include "logging-operator.chart" . }}
- app.kubernetes.io/instance: {{ .Release.Name }}
- app.kubernetes.io/managed-by: {{ .Release.Service }}
-
-data:
- config.toml: |-
- # This is the config for the logging operator
-
- [logging-operator]
- rbac = {{ .Values.rbac.enabled }}
\ No newline at end of file
diff --git a/charts/logging-operator/templates/crd.yaml b/charts/logging-operator/templates/crd.yaml
deleted file mode 100644
index f15d981c7..000000000
--- a/charts/logging-operator/templates/crd.yaml
+++ /dev/null
@@ -1,109 +0,0 @@
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: plugins.logging.banzaicloud.com
- annotations:
- "helm.sh/hook": crd-install
- "helm.sh/hook-delete-policy": "before-hook-creation"
-spec:
- group: logging.banzaicloud.com
- names:
- kind: Plugin
- listKind: PluginList
- plural: plugins
- singular: plugin
- scope: Namespaced
- subresources:
- status: {}
- validation:
- openAPIV3Schema:
- properties:
- apiVersion:
- type: string
- kind:
- type: string
- metadata:
- type: object
- spec:
- type: object
- status:
- type: object
- version: v1alpha1
- versions:
- - name: v1alpha1
- served: true
- storage: true
----
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: fluentbits.logging.banzaicloud.com
- annotations:
- "helm.sh/hook": crd-install
- "helm.sh/hook-delete-policy": "before-hook-creation"
-spec:
- group: logging.banzaicloud.com
- names:
- kind: Fluentbit
- listKind: FluentbitList
- plural: fluentbits
- singular: fluentbit
- scope: Namespaced
- subresources:
- status: {}
- validation:
- openAPIV3Schema:
- properties:
- apiVersion:
- type: string
- kind:
- type: string
- metadata:
- type: object
- spec:
- type: object
- status:
- type: object
- version: v1alpha1
- versions:
- - name: v1alpha1
- served: true
- storage: true
-
----
-
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: fluentds.logging.banzaicloud.com
- annotations:
- "helm.sh/hook": crd-install
- "helm.sh/hook-delete-policy": "before-hook-creation"
-spec:
- group: logging.banzaicloud.com
- names:
- kind: Fluentd
- listKind: FluentdList
- plural: fluentds
- singular: fluentd
- scope: Namespaced
- subresources:
- status: {}
- validation:
- openAPIV3Schema:
- properties:
- apiVersion:
- type: string
- kind:
- type: string
- metadata:
- type: object
- spec:
- type: object
- status:
- type: object
- version: v1alpha1
- versions:
- - name: v1alpha1
- served: true
- storage: true
\ No newline at end of file
diff --git a/charts/logging-operator/templates/deployment.yaml b/charts/logging-operator/templates/deployment.yaml
index b4b29ebc8..97a16394b 100644
--- a/charts/logging-operator/templates/deployment.yaml
+++ b/charts/logging-operator/templates/deployment.yaml
@@ -1,12 +1,9 @@
apiVersion: apps/v1
kind: Deployment
metadata:
- name: {{ template "logging-operator.fullname" . }}
+ name: {{ include "logging-operator.fullname" . }}
labels:
- app.kubernetes.io/name: {{ include "logging-operator.name" . }}
- helm.sh/chart: {{ include "logging-operator.chart" . }}
- app.kubernetes.io/instance: {{ .Release.Name }}
- app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{ include "logging-operator.labels" . | indent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
@@ -15,56 +12,35 @@ spec:
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
- annotations:
- scheduler.alpha.kubernetes.io/tolerations: {{ toJson .Values.tolerations | quote }}
labels:
app.kubernetes.io/name: {{ include "logging-operator.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
- containers:
- - name: {{ template "logging-operator.name" . }}
- image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
- imagePullPolicy: "{{ .Values.image.pullPolicy }}"
- command:
- - logging-operator
- env:
- - name: WATCH_NAMESPACE
- value: {{ .Values.watchNamespace | quote }}
- - name: KUBERNETES_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- - name: OPERATOR_NAME
- value: {{ include "logging-operator.fullname" . | quote }}
- volumeMounts:
- - mountPath: /logging-operator/config
- name: config
-
- {{- if .Values.securityContext }}
- securityContext: {{ toYaml .Values.securityContext | nindent 10 }}
- {{- end }}
- resources: {{ toYaml .Values.resources | nindent 10 }}
- {{- if .Values.podSecurityContext }}
- securityContext: {{ toYaml .Values.podSecurityContext | nindent 8 }}
- {{- end }}
- {{- if .Values.rbac.enabled }}
- serviceAccountName: {{ template "logging-operator.fullname" . }}
- {{- end }}
- volumes:
- - configMap:
- name: {{ template "logging-operator.fullname" . }}
- name: config
-
- {{- with .Values.nodeSelector }}
- nodeSelector: {{ toYaml . | nindent 8 }}
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
{{- end }}
+ containers:
+ - name: {{ .Chart.Name }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ resources:
+ {{- toYaml .Values.resources | nindent 12 }}
+ ports:
+ - name: http
+ containerPort: {{ .Values.http.port }}
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
{{- with .Values.affinity }}
- affinity: {{ toYaml . | nindent 8 }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
- tolerations: {{ toYaml . | nindent 8 }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- if .Values.rbac.enabled }}
+ serviceAccountName: {{ include "logging-operator.fullname" . }}
{{- end }}
\ No newline at end of file
diff --git a/charts/logging-operator/templates/grafana-dashboard-logging.yaml b/charts/logging-operator/templates/grafana-dashboard-logging.yaml
deleted file mode 100644
index 353e96db7..000000000
--- a/charts/logging-operator/templates/grafana-dashboard-logging.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-{{- if .Values.grafana.dashboard.enabled }}
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: {{ template "logging-operator.fullname" . }}-grafana-dashboard-logging
- labels:
- pipeline_grafana_dashboard: "1"
-data:
- logging.json: |-2
-
-{{.Files.Get "grafana-dashboards/logging-dashboard_rev1.json"| indent 4}}
-{{- end }}
\ No newline at end of file
diff --git a/charts/logging-operator/templates/logging.banzaicloud.io_clusterflows.yaml b/charts/logging-operator/templates/logging.banzaicloud.io_clusterflows.yaml
new file mode 100644
index 000000000..711c45034
--- /dev/null
+++ b/charts/logging-operator/templates/logging.banzaicloud.io_clusterflows.yaml
@@ -0,0 +1,140 @@
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ creationTimestamp: null
+ name: clusterflows.logging.banzaicloud.io
+spec:
+ group: logging.banzaicloud.io
+ names:
+ kind: ClusterFlow
+ listKind: ClusterFlowList
+ plural: clusterflows
+ singular: clusterflow
+ scope: ""
+ validation:
+ openAPIV3Schema:
+ description: ClusterFlow is the Schema for the clusterflows API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: Name of the logging cluster to be attached
+ properties:
+ filters:
+ items:
+ description: Filter definition for FlowSpec
+ properties:
+ parser:
+ description: https://docs.fluentd.org/filter/parser
+ properties:
+ emit_invalid_record_to_error:
+ description: 'Emit invalid record to @ERROR label. Invalid
+ cases are: key not exist, format is not matched, unexpected
+ error'
+ type: boolean
+ hash_value_fiel:
+ description: Store parsed values as a hash value in a field.
+ type: string
+ inject_key_prefix:
+ description: Store parsed values with specified key name prefix.
+ type: string
+ key_name:
+ description: Specify field name in the record to parse.
+ type: string
+ parsers:
+ items:
+ properties:
+ estimate_current_event:
+ description: If true, use Fluent::EventTime.now(current
+ time) as a timestamp when time_key is specified.
+ type: boolean
+ expression:
+ description: Regexp expression to evaluate
+ type: string
+ keep_time_key:
+ description: If true, keep time field in the record.
+ type: boolean
+ null_empty_string:
+ description: If true, empty string field is replaced
+ with nil
+ type: boolean
+ null_value_pattern:
+ description: ' Specify null value pattern.'
+ type: string
+ time_key:
+ description: Specify time field for event time. If the
+ event doesn't have this field, current time is used.
+ type: string
+ type:
+ description: 'Parse type: apache2, apache_error, nginx,
+ syslog, csv, tsv, ltsv, json, multiline, none'
+ type: string
+ type: object
+ type: array
+ remove_key_name_field:
+ description: Remove key_name field when parsing is succeeded
+ type: boolean
+ replace_invalid_sequence:
+ description: If true, invalid string is replaced with safe
+ characters and re-parse it.
+ type: boolean
+ reserve_data:
+ description: Keep original key-value pair in parsed result.
+ type: boolean
+ reserve_time:
+ description: Keep original event time in parsed result.
+ type: boolean
+ required:
+ - key_name
+ type: object
+ stdout:
+ type: object
+ tag_normaliser:
+ properties:
+ format:
+ description: Re-Tag log messages info at [github](https://github.com/banzaicloud/fluent-plugin-tag-normaliser)
+ type: string
+ type: object
+ type: object
+ type: array
+ loggingRef:
+ type: string
+ outputRefs:
+ items:
+ type: string
+ type: array
+ selectors:
+ additionalProperties:
+ type: string
+ type: object
+ required:
+ - outputRefs
+ - selectors
+ type: object
+ status:
+ description: FlowStatus defines the observed state of Flow
+ type: object
+ type: object
+ version: v1beta1
+ versions:
+ - name: v1beta1
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
diff --git a/charts/logging-operator/templates/logging.banzaicloud.io_clusteroutputs.yaml b/charts/logging-operator/templates/logging.banzaicloud.io_clusteroutputs.yaml
new file mode 100644
index 000000000..1fffbaaf0
--- /dev/null
+++ b/charts/logging-operator/templates/logging.banzaicloud.io_clusteroutputs.yaml
@@ -0,0 +1,2274 @@
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ creationTimestamp: null
+ name: clusteroutputs.logging.banzaicloud.io
+spec:
+ group: logging.banzaicloud.io
+ names:
+ kind: ClusterOutput
+ listKind: ClusterOutputList
+ plural: clusteroutputs
+ singular: clusteroutput
+ scope: ""
+ validation:
+ openAPIV3Schema:
+ description: ClusterOutput is the Schema for the clusteroutputs API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: ClusterOutputSpec contains Kubernetes spec for CLusterOutput
+ properties:
+ azurestorage:
+ properties:
+ auto_create_container:
+ description: 'Automatically create container if not exists(default:
+ true)'
+ type: boolean
+ azure_container:
+ description: Your azure storage container
+ type: string
+ azure_object_key_format:
+ description: 'Object key format (default: %{path}%{time_slice}_%{index}.%{file_extension})'
+ type: string
+ azure_storage_access_key:
+ description: Your azure storage access key
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ azure_storage_account:
+ description: Your azure storage account
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ azure_storage_type:
+ description: 'Azure storage type currently only "blob" supported
+ (default: blob)'
+ type: string
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ format:
+ description: 'Compat format type: out_file, json, ltsv (default:
+ out_file)'
+ type: string
+ path:
+ description: Path prefix of the files on Azure
+ type: string
+ store_as:
+ description: 'Store as: gzip, json, text, lzo, lzma2 (default: gzip)'
+ type: string
+ required:
+ - azure_container
+ - azure_storage_access_key
+ - azure_storage_account
+ type: object
+ elasticsearch:
+ description: Send your logs to Elasticsearch
+ properties:
+ application_name:
+ description: 'Specify the application name for the rollover index
+ to be created.(default: default)'
+ type: string
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ content_type:
+ description: 'With content_type application/x-ndjson, elasticsearch
+ plugin adds application/x-ndjson as Content-Type in payload. (default:
+ application/json)'
+ type: string
+ customize_template:
+ description: Specify the string and its value to be replaced in
+ form of hash. Can contain multiple key value pair that would be
+ replaced in the specified template_file. This setting only creates
+ template and to add rollover index please check the rollover_index
+ configuration.
+ type: string
+ deflector_alias:
+ description: Specify the deflector alias which would be assigned
+ to the rollover index created. This is useful in case of using
+ the Elasticsearch rollover API
+ type: string
+ fail_on_putting_template_retry_exceed:
+ description: 'Indicates whether to fail when max_retry_putting_template
+ is exceeded. If you have multiple output plugin, you could use
+ this property to do not fail on fluentd statup.(default: true)'
+ type: boolean
+ host:
+ description: You can specify Elasticsearch host by this parameter.
+ (default:localhost)
+ type: string
+ hosts:
+ description: You can specify multiple Elasticsearch hosts with separator
+ ",". If you specify hosts option, host and port options are ignored.
+ type: string
+ http_backend:
+ description: 'With http_backend typhoeus, elasticsearch plugin uses
+ typhoeus faraday http backend. Typhoeus can handle HTTP keepalive.
+ (default: excon)'
+ type: string
+ id_key:
+ description: https://github.com/uken/fluent-plugin-elasticsearch#id_key
+ type: string
+ include_index_in_url:
+ description: With this option set to true, Fluentd manifests the
+ index name in the request URL (rather than in the request body).
+ You can use this option to enforce an URL-based access control.
+ type: boolean
+ include_tag_key:
+ description: 'This will add the Fluentd tag in the JSON record.(default:
+ false)'
+ type: boolean
+ include_timestamp:
+ description: Adds a @timestamp field to the log, following all settings
+ logstash_format does, except without the restrictions on index_name.
+ This allows one to log to an alias in Elasticsearch and utilize
+ the rollover API.
+ type: boolean
+ index_date_pattern:
+ description: 'Specify this to override the index date pattern for
+ creating a rollover index.(default: now/d)'
+ type: string
+ index_prefix:
+ description: Specify the index prefix for the rollover index to
+ be created.
+ type: string
+ logstash_dateformat:
+ description: 'Set the Logstash date format.(default: %Y.%m.%d)'
+ type: string
+ logstash_format:
+ description: 'Enable Logstash log format.(default: false)'
+ type: boolean
+ logstash_prefix:
+ description: 'Set the Logstash prefix.(default: true)'
+ type: string
+ logstash_prefix_separator:
+ description: 'Set the Logstash prefix separator.(default: -)'
+ type: string
+ max_retry_get_es_version:
+ description: 'You can specify times of retry obtaining Elasticsearch
+ version.(default: 15)'
+ type: string
+ max_retry_putting_template:
+ description: 'You can specify times of retry putting template.(default:
+ 10)'
+ type: string
+ password:
+ description: Password for HTTP Basic authentication.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ path:
+ description: Path for HTTP Basic authentication.
+ type: string
+ pipeline:
+ description: This param is to set a pipeline id of your elasticsearch
+ to be added into the request, you can configure ingest node.
+ type: string
+ port:
+ description: 'You can specify Elasticsearch port by this parameter.(default:
+ 9200)'
+ type: integer
+ prefer_oj_serializer:
+ description: 'With default behavior, Elasticsearch client uses Yajl
+ as JSON encoder/decoder. Oj is the alternative high performance
+ JSON encoder/decoder. When this parameter sets as true, Elasticsearch
+ client uses Oj as JSON encoder/decoder. (default: fqlse)'
+ type: boolean
+ reconnect_on_error:
+ description: 'Indicates that the plugin should reset connection
+ on any error (reconnect on next send). By default it will reconnect
+ only on "host unreachable exceptions". We recommended to set this
+ true in the presence of elasticsearch shield.(default: false)'
+ type: boolean
+ reload_connections:
+ description: 'You can tune how the elasticsearch-transport host
+ reloading feature works.(default: true)'
+ type: boolean
+ reload_on_failure:
+ description: 'Indicates that the elasticsearch-transport will try
+ to reload the nodes addresses if there is a failure while making
+ the request, this can be useful to quickly remove a dead node
+ from the list of addresses.(default: false)'
+ type: boolean
+ remove_keys_on_update:
+ description: Remove keys on update will not update the configured
+ keys in elasticsearch when a record is being updated. This setting
+ only has any effect if the write operation is update or upsert.
+ type: string
+ remove_keys_on_update_key:
+ description: This setting allows remove_keys_on_update to be configured
+ with a key in each record, in much the same way as target_index_key
+ works.
+ type: string
+ request_timeout:
+ description: 'You can specify HTTP request timeout.(default: 5s)'
+ type: string
+ resurrect_after:
+ description: 'You can set in the elasticsearch-transport how often
+ dead connections from the elasticsearch-transport''s pool will
+ be resurrected.(default: 60s)'
+ type: string
+ retry_tag:
+ description: This setting allows custom routing of messages in response
+ to bulk request failures. The default behavior is to emit failed
+ records using the same tag that was provided.
+ type: string
+ rollover_index:
+ description: 'Specify this as true when an index with rollover capability
+ needs to be created.(default: false) https://github.com/uken/fluent-plugin-elasticsearch#rollover_index'
+ type: boolean
+ routing_key:
+ description: Similar to parent_key config, will add _routing into
+ elasticsearch command if routing_key is set and the field does
+ exist in input event.
+ type: string
+ scheme:
+ description: 'Connection scheme (default: http)'
+ type: string
+ ssl_verify:
+ description: 'Skip ssl verification (default: true)'
+ type: boolean
+ ssl_version:
+ description: If you want to configure SSL/TLS version, you can specify
+ ssl_version parameter. [SSLv23, TLSv1, TLSv1_1, TLSv1_2]
+ type: string
+ tag_key:
+ description: 'This will add the Fluentd tag in the JSON record.(default:
+ tag)'
+ type: string
+ target_index_key:
+ description: Tell this plugin to find the index name to write to
+ in the record under this key in preference to other mechanisms.
+ Key can be specified as path to nested record using dot ('.')
+ as a separator. https://github.com/uken/fluent-plugin-elasticsearch#target_index_key
+ type: string
+ target_type_key:
+ description: 'Similar to target_index_key config, find the type
+ name to write to in the record under this key (or nested record).
+ If key not found in record - fallback to type_name.(default: true)'
+ type: string
+ template_file:
+ description: The path to the file containing the template to install.
+ type: string
+ template_name:
+ description: The name of the template to define. If a template by
+ the name given is already present, it will be left unchanged,
+ unless template_overwrite is set, in which case the template will
+ be updated.
+ type: string
+ template_overwrite:
+ description: 'Always update the template, even if it already exists.(default:
+ false)'
+ type: boolean
+ templates:
+ description: Specify index templates in form of hash. Can contain
+ multiple templates.
+ type: string
+ time_key:
+ description: By default, when inserting records in Logstash format,
+ @timestamp is dynamically created with the time at log ingestion.
+ If you'd like to use a custom time, include an @timestamp with
+ your record.
+ type: string
+ time_key_format:
+ description: The format of the time stamp field (@timestamp or what
+ you specify with time_key). This parameter only has an effect
+ when logstash_format is true as it only affects the name of the
+ index we write to.
+ type: string
+ time_parse_error_tag:
+ description: With logstash_format true, elasticsearch plugin parses
+ timestamp field for generating index name. If the record has invalid
+ timestamp value, this plugin emits an error event to @ERROR label
+ with time_parse_error_tag configured tag.
+ type: string
+ time_precision:
+ description: Should the record not include a time_key, define the
+ degree of sub-second time precision to preserve from the time
+ portion of the routed event.
+ type: string
+ user:
+ description: User for HTTP Basic authentication. This plugin will
+ escape required URL encoded characters within %{} placeholders.
+ e.g. %{demo+}
+ type: string
+ utc_index:
+ description: 'By default, the records inserted into index logstash-YYMMDD
+ with UTC (Coordinated Universal Time). This option allows to use
+ local time if you describe utc_index to false.(default: true)'
+ type: boolean
+ with_transporter_log:
+ description: 'This is debugging purpose option to enable to obtain
+ transporter layer log. (default: false)'
+ type: boolean
+ write_operation:
+ description: 'The write_operation can be any of: (index,create,update,upsert)(default:
+ index)'
+ type: string
+ required:
+ - ssl_verify
+ type: object
+ enabledNamespaces:
+ items:
+ type: string
+ type: array
+ forward:
+ properties:
+ ack_response_timeout:
+ description: 'This option is used when require_ack_response is true.
+ This default value is based on popular tcp_syn_retries. (default:
+ 190)'
+ type: integer
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ connect_timeout:
+ description: The timeout time for socket connect. When the connection
+ timed out during establishment, Errno::ETIMEDOUT is raised.
+ type: integer
+ dns_round_robin:
+ description: Enable client-side DNS round robin. Uniform randomly
+ pick an IP address to send data when a hostname has several IP
+ addresses. `heartbeat_type udp` is not available with `dns_round_robin
+ true`. Use `heartbeat_type tcp` or `heartbeat_type none`.
+ type: boolean
+ expire_dns_cache:
+ description: 'Set TTL to expire DNS cache in seconds. Set 0 not
+ to use DNS Cache. (defult: 0)'
+ type: integer
+ hard_timeout:
+ description: 'The hard timeout used to detect server failure. The
+ default value is equal to the send_timeout parameter. (default:
+ 60)'
+ type: integer
+ heartbeat_interval:
+ description: 'The interval of the heartbeat packer. (default: 1)'
+ type: integer
+ heartbeat_type:
+ description: The transport protocol to use for heartbeats. Set "none"
+ to disable heartbeat. [transport, tcp, udp, none]
+ type: string
+ ignore_network_errors_at_startup:
+ description: Ignore DNS resolution and errors at startup time.
+ type: boolean
+ keepalive:
+ description: 'Enable keepalive connection. (default: false)'
+ type: boolean
+ keepalive_timeout:
+ description: 'Expired time of keepalive. Default value is nil, which
+ means to keep connection as long as possible. (default: 0)'
+ type: integer
+ phi_failure_detector:
+ description: 'Use the "Phi accrual failure detector" to detect server
+ failure. (default: true)'
+ type: boolean
+ phi_threshold:
+ description: 'The threshold parameter used to detect server faults.
+ (default: 16) `phi_threshold` is deeply related to `heartbeat_interval`.
+ If you are using longer `heartbeat_interval`, please use the larger
+ `phi_threshold`. Otherwise you will see frequent detachments of
+ destination servers. The default value 16 is tuned for `heartbeat_interval`
+ 1s.'
+ type: integer
+ recover_wait:
+ description: 'The wait time before accepting a server fault recovery.
+ (default: 10)'
+ type: integer
+ require_ack_response:
+ description: Change the protocol to at-least-once. The plugin waits
+ the ack from destination's in_forward plugin.
+ type: boolean
+ security:
+ properties:
+ allow_anonymous_source:
+ description: Allow anonymous source. sections are required
+ if disabled.
+ type: boolean
+ self_hostname:
+ description: Hostname
+ type: string
+ shared_key:
+ description: Shared key for authentication.
+ type: string
+ user_auth:
+ description: If true, use user based authentication.
+ type: boolean
+ required:
+ - self_hostname
+ - shared_key
+ type: object
+ send_timeout:
+ description: 'The timeout time when sending event logs. (default:
+ 60)'
+ type: integer
+ servers:
+ description: Server definitions at least one is required
+ items:
+ description: server
+ properties:
+ host:
+ description: The IP address or host name of the server.
+ type: string
+ name:
+ description: The name of the server. Used for logging and
+ certificate verification in TLS transport (when host is
+ address).
+ type: string
+ password:
+ description: The password for authentication.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ port:
+ description: 'The port number of the host. Note that both
+ TCP packets (event stream) and UDP packets (heartbeat message)
+ are sent to this port. (default: 24224)'
+ type: integer
+ shared_key:
+ description: The shared key per server.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ standby:
+ description: Marks a node as the standby node for an Active-Standby
+ model between Fluentd nodes. When an active node goes down,
+ the standby node is promoted to an active node. The standby
+ node is not used by the out_forward plugin until then.
+ type: boolean
+ username:
+ description: The username for authentication.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ weight:
+ description: 'The load balancing weight. If the weight of
+ one server is 20 and the weight of the other server is 30,
+ events are sent in a 2:3 ratio. (default: 60).'
+ type: integer
+ required:
+ - host
+ type: object
+ type: array
+ tls_allow_self_signed_cert:
+ description: 'Allow self signed certificates or not. (default: false)'
+ type: boolean
+ tls_cert_logical_store_name:
+ description: The certificate logical store name on Windows system
+ certstore. This parameter is for Windows only.
+ type: string
+ tls_cert_path:
+ description: The additional CA certificate path for TLS.
+ type: string
+ tls_cert_thumbprint:
+ description: The certificate thumbprint for searching from Windows
+ system certstore This parameter is for Windows only.
+ type: string
+ tls_cert_use_enterprise_store:
+ description: Enable to use certificate enterprise store on Windows
+ system certstore. This parameter is for Windows only.
+ type: boolean
+ tls_ciphers:
+ description: 'The cipher configuration of TLS transport. (default:
+ ALL:!aNULL:!eNULL:!SSLv2)'
+ type: string
+ tls_client_cert_path:
+ description: The client certificate path for TLS
+ type: string
+ tls_client_private_key_passphrase:
+ description: The client private key passphrase for TLS.
+ type: string
+ tls_client_private_key_path:
+ description: The client private key path for TLS.
+ type: string
+ tls_insecure_mode:
+ description: 'Skip all verification of certificates or not. (default:
+ false)'
+ type: boolean
+ tls_verify_hostname:
+ description: 'Verify hostname of servers and certificates or not
+ in TLS transport. (default: true)'
+ type: boolean
+ tls_version:
+ description: 'The default version of TLS transport. [TLSv1_1, TLSv1_2]
+ (default: TLSv1_2)'
+ type: string
+ verify_connection_at_startup:
+ description: 'Verify that a connection can be made with one of out_forward
+ nodes at the time of startup. (default: false)'
+ type: boolean
+ required:
+ - servers
+ type: object
+ gcs:
+ properties:
+ acl:
+ description: 'Permission for the object in GCS: auth_read owner_full
+ owner_read private project_private public_read'
+ type: string
+ auto_create_bucket:
+ description: 'Create GCS bucket if it does not exists (default:
+ true)'
+ type: boolean
+ bucket:
+ description: Name of a GCS bucket
+ type: string
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ client_retries:
+ description: Number of times to retry requests on server error
+ type: integer
+ client_timeout:
+ description: Default timeout to use in requests
+ type: integer
+ credentials_json:
+ description: GCS service account credentials in JSON format
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ encryption_key:
+ description: Customer-supplied, AES-256 encryption key
+ type: string
+ format:
+ properties:
+ type:
+ description: 'Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value
+ (default: json)'
+ enum:
+ - out_file
+ - json
+ - ltsv
+ - csv
+ - msgpack
+ - hash
+ - single_value
+ type: string
+ type: object
+ hex_random_length:
+ description: 'Max length of `%{hex_random}` placeholder(4-16) (default:
+ 4)'
+ type: integer
+ keyfile:
+ description: Path of GCS service account credentials JSON file
+ type: string
+ object_key_format:
+ description: 'Format of GCS object keys (default: %{path}%{time_slice}_%{index}.%{file_extension})'
+ type: string
+ object_metadata:
+ description: User provided web-safe keys and arbitrary string values
+ that will returned with requests for the file as "x-goog-meta-"
+ response headers.
+ items:
+ properties:
+ key:
+ description: Key
+ type: string
+ value:
+ description: Value
+ type: string
+ required:
+ - key
+ - value
+ type: object
+ type: array
+ overwrite:
+ description: 'Overwrite already existing path (default: false)'
+ type: boolean
+ path:
+ description: Path prefix of the files on GCS
+ type: string
+ project:
+ description: Project identifier for GCS
+ type: string
+ storage_class:
+ description: 'Storage class of the file: dra nearline coldline multi_regional
+ regional standard'
+ type: string
+ store_as:
+ description: 'Archive format on GCS: gzip json text (default: gzip)'
+ type: string
+ transcoding:
+ description: Enable the decompressive form of transcoding
+ type: boolean
+ required:
+ - bucket
+ - project
+ type: object
+ loggingRef:
+ type: string
+ loki:
+ description: Fluentd output plugin to ship logs to a Loki server.
+ properties:
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ extra_labels:
+ description: 'Set of labels to include with every Loki stream.(default:
+ nil)'
+ type: boolean
+ password:
+ description: Specify password if the Loki server requires authentication.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ tenant:
+ description: Loki is a multi-tenant log storage platform and all
+ requests sent must include a tenant.
+ type: string
+ url:
+ description: The url of the Loki server to send logs to. (default:https://logs-us-west1.grafana.net)
+ type: string
+ username:
+ description: Specify a username if the Loki server requires authentication.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ type: object
+ nullout:
+ type: object
+ oss:
+ properties:
+ aaccess_key_secret:
+ description: Your access secret key
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ access_key_id:
+ description: Your access key id
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ auto_create_bucket:
+ description: 'desc ''Create OSS bucket if it does not exists (default:
+ false)'
+ type: boolean
+ bucket:
+ description: Your bucket name
+ type: string
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ check_bucket:
+ description: 'Check bucket if exists or not (default: true)'
+ type: boolean
+ check_object:
+ description: 'Check object before creation (default: true)'
+ type: boolean
+ download_crc_enable:
+ description: 'Download crc enabled (default: true)'
+ type: boolean
+ endpoint:
+ description: OSS endpoint to connect to'
+ type: string
+ format:
+ properties:
+ type:
+ description: 'Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value
+ (default: json)'
+ enum:
+ - out_file
+ - json
+ - ltsv
+ - csv
+ - msgpack
+ - hash
+ - single_value
+ type: string
+ type: object
+ hex_random_length:
+ description: 'The length of `%{hex_random}` placeholder(4-16) (default:
+ 4)'
+ type: integer
+ index_format:
+ description: '`sprintf` format for `%{index}` (default: %d)'
+ type: string
+ key_format:
+ description: 'The format of OSS object keys (default: %{path}/%{time_slice}_%{index}_%{thread_id}.%{file_extension})'
+ type: string
+ open_timeout:
+ description: 'Timeout for open connections (default: 10)'
+ type: integer
+ oss_sdk_log_dir:
+ description: 'OSS SDK log directory (default: /var/log/td-agent)'
+ type: string
+ overwrite:
+ description: 'Overwrite already existing path (default: false)'
+ type: boolean
+ path:
+ description: 'Path prefix of the files on OSS (default: fluent/logs)'
+ type: string
+ read_timeout:
+ description: 'Timeout for read response (default: 120)'
+ type: integer
+ store_as:
+ description: 'Archive format on OSS: gzip, json, text, lzo, lzma2
+ (default: gzip)'
+ type: string
+ upload_crc_enable:
+ description: 'Upload crc enabled (default: true)'
+ type: boolean
+ warn_for_delay:
+ description: Given a threshold to treat events as delay, output
+ warning logs if delayed events were put into OSS
+ type: string
+ required:
+ - aaccess_key_secret
+ - access_key_id
+ - bucket
+ - endpoint
+ type: object
+ s3:
+ properties:
+ acl:
+ description: Permission for the object in S3
+ type: string
+ assume_role_credentials:
+ description: assume_role_credentials
+ properties:
+ duration_seconds:
+ description: The duration, in seconds, of the role session (900-3600)
+ type: string
+ external_id:
+ description: A unique identifier that is used by third parties
+ when assuming roles in their customers' accounts.
+ type: string
+ policy:
+ description: An IAM policy in JSON format
+ type: string
+ role_arn:
+ description: The Amazon Resource Name (ARN) of the role to assume
+ type: string
+ role_session_name:
+ description: An identifier for the assumed role session
+ type: string
+ required:
+ - role_arn
+ - role_session_name
+ type: object
+ auto_create_bucket:
+ description: Create S3 bucket if it does not exists
+ type: string
+ aws_iam_retries:
+ description: The number of attempts to load instance profile credentials
+ from the EC2 metadata service using IAM role
+ type: string
+ aws_key_id:
+ description: AWS access key id
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ aws_sec_key:
+ description: AWS secret key.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ check_apikey_on_start:
+ description: Check AWS key on start
+ type: string
+ check_bucket:
+ description: Check bucket if exists or not
+ type: string
+ check_object:
+ description: Check object before creation
+ type: string
+ compute_checksums:
+ description: AWS SDK uses MD5 for API request/response by default
+ type: string
+ enable_transfer_acceleration:
+ description: 'If true, S3 Transfer Acceleration will be enabled
+ for uploads. IMPORTANT: You must first enable this feature on
+ your destination S3 bucket'
+ type: string
+ force_path_style:
+ description: If true, the bucket name is always left in the request
+ URI and never moved to the host as a sub-domain
+ type: string
+ format:
+ properties:
+ type:
+ description: 'Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value
+ (default: json)'
+ enum:
+ - out_file
+ - json
+ - ltsv
+ - csv
+ - msgpack
+ - hash
+ - single_value
+ type: string
+ type: object
+ grant_full_control:
+ description: Allows grantee READ, READ_ACP, and WRITE_ACP permissions
+ on the object
+ type: string
+ grant_read:
+ description: Allows grantee to read the object data and its metadata
+ type: string
+ grant_read_acp:
+ description: Allows grantee to read the object ACL
+ type: string
+ grant_write_acp:
+ description: Allows grantee to write the ACL for the applicable
+ object
+ type: string
+ hex_random_length:
+ description: The length of `%{hex_random}` placeholder(4-16)
+ type: string
+ index_format:
+ description: '`sprintf` format for `%{index}`'
+ type: string
+ instance_profile_credentials:
+ description: instance_profile_credentials
+ properties:
+ http_open_timeout:
+ description: Number of seconds to wait for the connection to
+ open
+ type: string
+ http_read_timeout:
+ description: Number of seconds to wait for one block to be read
+ type: string
+ ip_address:
+ description: IP address (default:169.254.169.254)
+ type: string
+ port:
+ description: Port number (default:80)
+ type: string
+ retries:
+ description: Number of times to retry when retrieving credentials
+ type: string
+ type: object
+ overwrite:
+ description: Overwrite already existing path
+ type: string
+ path:
+ description: Path prefix of the files on S3
+ type: string
+ proxy_uri:
+ description: URI of proxy environment
+ type: string
+ s3_bucket:
+ description: S3 bucket name
+ type: string
+ s3_endpoint:
+ description: Custom S3 endpoint (like minio)
+ type: string
+ s3_metadata:
+ description: Arbitrary S3 metadata headers to set for the object
+ type: string
+ s3_object_key_format:
+ description: 'The format of S3 object keys (default: %{path}%{time_slice}_%{index}.%{file_extension})'
+ type: string
+ s3_region:
+ description: S3 region name
+ type: string
+ shared_credentials:
+ description: shared_credentials
+ properties:
+ path:
+ description: 'Path to the shared file. (default: $HOME/.aws/credentials)'
+ type: string
+ profile_name:
+ description: Profile name. Default to 'default' or ENV['AWS_PROFILE']
+ type: string
+ type: object
+ signature_version:
+ description: Signature version for API Request (s3,v4)
+ type: string
+ sse_customer_algorithm:
+ description: Specifies the algorithm to use to when encrypting the
+ object
+ type: string
+ sse_customer_key:
+ description: Specifies the customer-provided encryption key for
+ Amazon S3 to use in encrypting data
+ type: string
+ sse_customer_key_md5:
+ description: Specifies the 128-bit MD5 digest of the encryption
+ key according to RFC 1321
+ type: string
+ ssekms_key_id:
+ description: Specifies the AWS KMS key ID to use for object encryption
+ type: string
+ ssl_verify_peer:
+ description: If false, the certificate of endpoint will not be verified
+ type: string
+ storage_class:
+ description: The type of storage to use for the object(STANDARD,REDUCED_REDUNDANCY,STANDARD_IA)
+ type: string
+ store_as:
+ description: Archive format on S3
+ type: string
+ use_bundled_cert:
+ description: Use aws-sdk-ruby bundled cert
+ type: string
+ use_server_side_encryption:
+ description: The Server-side encryption algorithm used when storing
+ this object in S3 (AES256, aws:kms)
+ type: string
+ warn_for_delay:
+ description: Given a threshold to treat events as delay, output
+ warning logs if delayed events were put into s3
+ type: string
+ required:
+ - s3_bucket
+ type: object
+ sumologic:
+ properties:
+ add_timestamp:
+ description: 'Add timestamp (or timestamp_key) field to logs before
+ sending to sumologic (default: true)'
+ type: boolean
+ data_type:
+ description: 'The type of data that will be sent to Sumo Logic,
+ either logs or metrics (default: logs)'
+ type: string
+ disable_cookies:
+ description: 'Option to disable cookies on the HTTP Client. (default:
+ false)'
+ type: boolean
+ endpoint:
+ description: SumoLogic HTTP Collector URL
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ log_format:
+ description: 'Format to post logs into Sumo. (default: json)'
+ type: string
+ log_key:
+ description: 'Used to specify the key when merging json or sending
+ logs in text format (default: message)'
+ type: string
+ metric_data_format:
+ description: 'The format of metrics you will be sending, either
+ graphite or carbon2 or prometheus (default: graphite)'
+ type: string
+ open_timeout:
+ description: 'Set timeout seconds to wait until connection is opened.
+ (default: 60)'
+ type: integer
+ proxy_uri:
+ description: Add the uri of the proxy environment if present.
+ type: string
+ source_category:
+ description: 'Set _sourceCategory metadata field within SumoLogic
+ (default: nil)'
+ type: string
+ source_host:
+ description: 'Set _sourceHost metadata field within SumoLogic (default:
+ nil)'
+ type: string
+ source_name:
+ description: Set _sourceName metadata field within SumoLogic - overrides
+ source_name_key (default is nil)
+ type: string
+ source_name_key:
+ description: 'Set as source::path_key''s value so that the source_name
+ can be extracted from Fluentd''s buffer (default: source_name)'
+ type: string
+ timestamp_key:
+ description: 'Field name when add_timestamp is on (default: timestamp)'
+ type: string
+ verify_ssl:
+ description: 'Verify ssl certificate. (default: true)'
+ type: boolean
+ required:
+ - endpoint
+ - source_name
+ type: object
+ type: object
+ status:
+ description: OutputStatus defines the observed state of Output
+ type: object
+ required:
+ - spec
+ type: object
+ version: v1beta1
+ versions:
+ - name: v1beta1
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
diff --git a/charts/logging-operator/templates/logging.banzaicloud.io_flows.yaml b/charts/logging-operator/templates/logging.banzaicloud.io_flows.yaml
new file mode 100644
index 000000000..1a5dc7a83
--- /dev/null
+++ b/charts/logging-operator/templates/logging.banzaicloud.io_flows.yaml
@@ -0,0 +1,145 @@
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ creationTimestamp: null
+ name: flows.logging.banzaicloud.io
+spec:
+ additionalPrinterColumns:
+ - JSONPath: .spec.loggingRef
+ name: Logging
+ type: string
+ group: logging.banzaicloud.io
+ names:
+ kind: Flow
+ listKind: FlowList
+ plural: flows
+ singular: flow
+ scope: ""
+ subresources: {}
+ validation:
+ openAPIV3Schema:
+ description: Flow Kubernetes object
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: FlowSpec is the Kubernetes spec for Flows
+ properties:
+ filters:
+ items:
+ description: Filter definition for FlowSpec
+ properties:
+ parser:
+ description: https://docs.fluentd.org/filter/parser
+ properties:
+ emit_invalid_record_to_error:
+ description: 'Emit invalid record to @ERROR label. Invalid
+ cases are: key not exist, format is not matched, unexpected
+ error'
+ type: boolean
+ hash_value_fiel:
+ description: Store parsed values as a hash value in a field.
+ type: string
+ inject_key_prefix:
+ description: Store parsed values with specified key name prefix.
+ type: string
+ key_name:
+ description: Specify field name in the record to parse.
+ type: string
+ parsers:
+ items:
+ properties:
+ estimate_current_event:
+ description: If true, use Fluent::EventTime.now(current
+ time) as a timestamp when time_key is specified.
+ type: boolean
+ expression:
+ description: Regexp expression to evaluate
+ type: string
+ keep_time_key:
+ description: If true, keep time field in the record.
+ type: boolean
+ null_empty_string:
+ description: If true, empty string field is replaced
+ with nil
+ type: boolean
+ null_value_pattern:
+ description: ' Specify null value pattern.'
+ type: string
+ time_key:
+ description: Specify time field for event time. If the
+ event doesn't have this field, current time is used.
+ type: string
+ type:
+ description: 'Parse type: apache2, apache_error, nginx,
+ syslog, csv, tsv, ltsv, json, multiline, none'
+ type: string
+ type: object
+ type: array
+ remove_key_name_field:
+ description: Remove key_name field when parsing is succeeded
+ type: boolean
+ replace_invalid_sequence:
+ description: If true, invalid string is replaced with safe
+ characters and re-parse it.
+ type: boolean
+ reserve_data:
+ description: Keep original key-value pair in parsed result.
+ type: boolean
+ reserve_time:
+ description: Keep original event time in parsed result.
+ type: boolean
+ required:
+ - key_name
+ type: object
+ stdout:
+ type: object
+ tag_normaliser:
+ properties:
+ format:
+ description: Re-Tag log messages info at [github](https://github.com/banzaicloud/fluent-plugin-tag-normaliser)
+ type: string
+ type: object
+ type: object
+ type: array
+ loggingRef:
+ type: string
+ outputRefs:
+ items:
+ type: string
+ type: array
+ selectors:
+ additionalProperties:
+ type: string
+ type: object
+ required:
+ - outputRefs
+ - selectors
+ type: object
+ status:
+ description: FlowStatus defines the observed state of Flow
+ type: object
+ type: object
+ version: v1beta1
+ versions:
+ - name: v1beta1
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
diff --git a/charts/logging-operator/templates/logging.banzaicloud.io_loggings.yaml b/charts/logging-operator/templates/logging.banzaicloud.io_loggings.yaml
new file mode 100644
index 000000000..cedc5432f
--- /dev/null
+++ b/charts/logging-operator/templates/logging.banzaicloud.io_loggings.yaml
@@ -0,0 +1,416 @@
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ creationTimestamp: null
+ name: loggings.logging.banzaicloud.io
+spec:
+ group: logging.banzaicloud.io
+ names:
+ kind: Logging
+ listKind: LoggingList
+ plural: loggings
+ singular: logging
+ scope: Cluster
+ subresources:
+ status: {}
+ validation:
+ openAPIV3Schema:
+ description: Logging is the Schema for the loggings API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: LoggingSpec defines the desired state of Logging
+ properties:
+ controlNamespace:
+ type: string
+ flowConfigCheckDisabled:
+ type: boolean
+ flowConfigOverride:
+ type: string
+ fluentbit:
+ description: FluentbitSpec defines the desired state of Fluentbit
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ image:
+ description: ImageSpec struct hold information about image specification
+ properties:
+ pullPolicy:
+ type: string
+ repository:
+ type: string
+ tag:
+ type: string
+ required:
+ - pullPolicy
+ - repository
+ - tag
+ type: object
+ parser:
+ type: string
+ resources:
+ description: ResourceRequirements describes the compute resource
+ requirements.
+ properties:
+ limits:
+ additionalProperties:
+ type: string
+ description: 'Limits describes the maximum amount of compute
+ resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ requests:
+ additionalProperties:
+ type: string
+ description: 'Requests describes the minimum amount of compute
+ resources required. If Requests is omitted for a container,
+ it defaults to Limits if that is explicitly specified, otherwise
+ to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ type: object
+ targetHost:
+ type: string
+ targetPort:
+ format: int32
+ type: integer
+ tls:
+ description: FluentbitTLS defines the TLS configs
+ properties:
+ enabled:
+ type: boolean
+ secretName:
+ type: string
+ sharedKey:
+ type: string
+ required:
+ - enabled
+ - secretName
+ type: object
+ tolerations:
+ items:
+ description: The pod this Toleration is attached to tolerates
+ any taint that matches the triple using the
+ matching operator .
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match. Empty
+ means match all taint effects. When specified, allowed values
+ are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration applies
+ to. Empty means match all taint keys. If the key is empty,
+ operator must be Exists; this combination means to match
+ all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship to the
+ value. Valid operators are Exists and Equal. Defaults to
+ Equal. Exists is equivalent to wildcard for value, so that
+ a pod can tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period of time
+ the toleration (which must be of effect NoExecute, otherwise
+ this field is ignored) tolerates the taint. By default,
+ it is not set, which means tolerate the taint forever (do
+ not evict). Zero and negative values will be treated as
+ 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration matches
+ to. If the operator is Exists, the value should be empty,
+ otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ type: object
+ fluentd:
+ description: FluentdSpec defines the desired state of Fluentd
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ configReloaderImage:
+ description: ImageSpec struct hold information about image specification
+ properties:
+ pullPolicy:
+ type: string
+ repository:
+ type: string
+ tag:
+ type: string
+ required:
+ - pullPolicy
+ - repository
+ - tag
+ type: object
+ disablePvc:
+ type: boolean
+ fluentdPvcSpec:
+ description: PersistentVolumeClaimSpec describes the common attributes
+ of storage devices and allows a Source for provider-specific attributes
+ properties:
+ accessModes:
+ description: 'AccessModes contains the desired access modes
+ the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1'
+ items:
+ type: string
+ type: array
+ dataSource:
+ description: This field requires the VolumeSnapshotDataSource
+ alpha feature gate to be enabled and currently VolumeSnapshot
+ is the only supported data source. If the provisioner can
+ support VolumeSnapshot data source, it will create a new volume
+ and data will be restored to the volume at the same time.
+ If the provisioner does not support VolumeSnapshot data source,
+ volume will not be created and the failure will be reported
+ as an event. In the future, we plan to support more data source
+ types and the behavior of the provisioner may change.
+ properties:
+ apiGroup:
+ description: APIGroup is the group for the resource being
+ referenced. If APIGroup is not specified, the specified
+ Kind must be in the core API group. For any other third-party
+ types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ description: 'Resources represents the minimum resources the
+ volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources'
+ properties:
+ limits:
+ additionalProperties:
+ type: string
+ description: 'Limits describes the maximum amount of compute
+ resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ requests:
+ additionalProperties:
+ type: string
+ description: 'Requests describes the minimum amount of compute
+ resources required. If Requests is omitted for a container,
+ it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. More info:
+ https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ type: object
+ selector:
+ description: A label query over volumes to consider for binding.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector
+ that contains values, a key, and an operator that relates
+ the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In, NotIn,
+ Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values.
+ If the operator is In or NotIn, the values array
+ must be non-empty. If the operator is Exists or
+ DoesNotExist, the values array must be empty. This
+ array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs.
+ A single {key,value} in the matchLabels map is equivalent
+ to an element of matchExpressions, whose key field is
+ "key", the operator is "In", and the values array contains
+ only "value". The requirements are ANDed.
+ type: object
+ type: object
+ storageClassName:
+ description: 'Name of the StorageClass required by the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1'
+ type: string
+ volumeMode:
+ description: volumeMode defines what type of volume is required
+ by the claim. Value of Filesystem is implied when not included
+ in claim spec. This is a beta feature.
+ type: string
+ volumeName:
+ description: VolumeName is the binding reference to the PersistentVolume
+ backing this claim.
+ type: string
+ type: object
+ image:
+ description: ImageSpec struct hold information about image specification
+ properties:
+ pullPolicy:
+ type: string
+ repository:
+ type: string
+ tag:
+ type: string
+ required:
+ - pullPolicy
+ - repository
+ - tag
+ type: object
+ nodeSelector:
+ additionalProperties:
+ type: string
+ type: object
+ port:
+ format: int32
+ type: integer
+ resources:
+ description: ResourceRequirements describes the compute resource
+ requirements.
+ properties:
+ limits:
+ additionalProperties:
+ type: string
+ description: 'Limits describes the maximum amount of compute
+ resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ requests:
+ additionalProperties:
+ type: string
+ description: 'Requests describes the minimum amount of compute
+ resources required. If Requests is omitted for a container,
+ it defaults to Limits if that is explicitly specified, otherwise
+ to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ type: object
+ tls:
+ description: FluentdTLS defines the TLS configs
+ properties:
+ enabled:
+ type: boolean
+ secretName:
+ type: string
+ sharedKey:
+ type: string
+ required:
+ - enabled
+ - secretName
+ type: object
+ tolerations:
+ items:
+ description: The pod this Toleration is attached to tolerates
+ any taint that matches the triple using the
+ matching operator .
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match. Empty
+ means match all taint effects. When specified, allowed values
+ are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration applies
+ to. Empty means match all taint keys. If the key is empty,
+ operator must be Exists; this combination means to match
+ all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship to the
+ value. Valid operators are Exists and Equal. Defaults to
+ Equal. Exists is equivalent to wildcard for value, so that
+ a pod can tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period of time
+ the toleration (which must be of effect NoExecute, otherwise
+ this field is ignored) tolerates the taint. By default,
+ it is not set, which means tolerate the taint forever (do
+ not evict). Zero and negative values will be treated as
+ 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration matches
+ to. If the operator is Exists, the value should be empty,
+ otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ volumeModImage:
+ description: ImageSpec struct hold information about image specification
+ properties:
+ pullPolicy:
+ type: string
+ repository:
+ type: string
+ tag:
+ type: string
+ required:
+ - pullPolicy
+ - repository
+ - tag
+ type: object
+ type: object
+ loggingRef:
+ type: string
+ watchNamespaces:
+ items:
+ type: string
+ type: array
+ required:
+ - controlNamespace
+ type: object
+ status:
+ description: LoggingStatus defines the observed state of Logging
+ properties:
+ configCheckResults:
+ additionalProperties:
+ type: boolean
+ type: object
+ type: object
+ type: object
+ version: v1beta1
+ versions:
+ - name: v1beta1
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
diff --git a/charts/logging-operator/templates/logging.banzaicloud.io_outputs.yaml b/charts/logging-operator/templates/logging.banzaicloud.io_outputs.yaml
new file mode 100644
index 000000000..de30ee7c4
--- /dev/null
+++ b/charts/logging-operator/templates/logging.banzaicloud.io_outputs.yaml
@@ -0,0 +1,2268 @@
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ creationTimestamp: null
+ name: outputs.logging.banzaicloud.io
+spec:
+ group: logging.banzaicloud.io
+ names:
+ kind: Output
+ listKind: OutputList
+ plural: outputs
+ singular: output
+ scope: ""
+ validation:
+ openAPIV3Schema:
+ description: Output is the Schema for the outputs API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: OutputSpec defines the desired state of Output
+ properties:
+ azurestorage:
+ properties:
+ auto_create_container:
+ description: 'Automatically create container if not exists(default:
+ true)'
+ type: boolean
+ azure_container:
+ description: Your azure storage container
+ type: string
+ azure_object_key_format:
+ description: 'Object key format (default: %{path}%{time_slice}_%{index}.%{file_extension})'
+ type: string
+ azure_storage_access_key:
+ description: Your azure storage access key
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ azure_storage_account:
+ description: Your azure storage account
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ azure_storage_type:
+ description: 'Azure storage type currently only "blob" supported
+ (default: blob)'
+ type: string
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ format:
+ description: 'Compat format type: out_file, json, ltsv (default:
+ out_file)'
+ type: string
+ path:
+ description: Path prefix of the files on Azure
+ type: string
+ store_as:
+ description: 'Store as: gzip, json, text, lzo, lzma2 (default: gzip)'
+ type: string
+ required:
+ - azure_container
+ - azure_storage_access_key
+ - azure_storage_account
+ type: object
+ elasticsearch:
+ description: Send your logs to Elasticsearch
+ properties:
+ application_name:
+ description: 'Specify the application name for the rollover index
+ to be created.(default: default)'
+ type: string
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ content_type:
+ description: 'With content_type application/x-ndjson, elasticsearch
+ plugin adds application/x-ndjson as Content-Type in payload. (default:
+ application/json)'
+ type: string
+ customize_template:
+ description: Specify the string and its value to be replaced in
+ form of hash. Can contain multiple key value pair that would be
+ replaced in the specified template_file. This setting only creates
+ template and to add rollover index please check the rollover_index
+ configuration.
+ type: string
+ deflector_alias:
+ description: Specify the deflector alias which would be assigned
+ to the rollover index created. This is useful in case of using
+ the Elasticsearch rollover API
+ type: string
+ fail_on_putting_template_retry_exceed:
+ description: 'Indicates whether to fail when max_retry_putting_template
+ is exceeded. If you have multiple output plugin, you could use
+ this property to do not fail on fluentd statup.(default: true)'
+ type: boolean
+ host:
+ description: You can specify Elasticsearch host by this parameter.
+ (default:localhost)
+ type: string
+ hosts:
+ description: You can specify multiple Elasticsearch hosts with separator
+ ",". If you specify hosts option, host and port options are ignored.
+ type: string
+ http_backend:
+ description: 'With http_backend typhoeus, elasticsearch plugin uses
+ typhoeus faraday http backend. Typhoeus can handle HTTP keepalive.
+ (default: excon)'
+ type: string
+ id_key:
+ description: https://github.com/uken/fluent-plugin-elasticsearch#id_key
+ type: string
+ include_index_in_url:
+ description: With this option set to true, Fluentd manifests the
+ index name in the request URL (rather than in the request body).
+ You can use this option to enforce an URL-based access control.
+ type: boolean
+ include_tag_key:
+ description: 'This will add the Fluentd tag in the JSON record.(default:
+ false)'
+ type: boolean
+ include_timestamp:
+ description: Adds a @timestamp field to the log, following all settings
+ logstash_format does, except without the restrictions on index_name.
+ This allows one to log to an alias in Elasticsearch and utilize
+ the rollover API.
+ type: boolean
+ index_date_pattern:
+ description: 'Specify this to override the index date pattern for
+ creating a rollover index.(default: now/d)'
+ type: string
+ index_prefix:
+ description: Specify the index prefix for the rollover index to
+ be created.
+ type: string
+ logstash_dateformat:
+ description: 'Set the Logstash date format.(default: %Y.%m.%d)'
+ type: string
+ logstash_format:
+ description: 'Enable Logstash log format.(default: false)'
+ type: boolean
+ logstash_prefix:
+ description: 'Set the Logstash prefix.(default: true)'
+ type: string
+ logstash_prefix_separator:
+ description: 'Set the Logstash prefix separator.(default: -)'
+ type: string
+ max_retry_get_es_version:
+ description: 'You can specify times of retry obtaining Elasticsearch
+ version.(default: 15)'
+ type: string
+ max_retry_putting_template:
+ description: 'You can specify times of retry putting template.(default:
+ 10)'
+ type: string
+ password:
+ description: Password for HTTP Basic authentication.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ path:
+ description: Path for HTTP Basic authentication.
+ type: string
+ pipeline:
+ description: This param is to set a pipeline id of your elasticsearch
+ to be added into the request, you can configure ingest node.
+ type: string
+ port:
+ description: 'You can specify Elasticsearch port by this parameter.(default:
+ 9200)'
+ type: integer
+ prefer_oj_serializer:
+ description: 'With default behavior, Elasticsearch client uses Yajl
+ as JSON encoder/decoder. Oj is the alternative high performance
+ JSON encoder/decoder. When this parameter sets as true, Elasticsearch
+ client uses Oj as JSON encoder/decoder. (default: fqlse)'
+ type: boolean
+ reconnect_on_error:
+ description: 'Indicates that the plugin should reset connection
+ on any error (reconnect on next send). By default it will reconnect
+ only on "host unreachable exceptions". We recommended to set this
+ true in the presence of elasticsearch shield.(default: false)'
+ type: boolean
+ reload_connections:
+ description: 'You can tune how the elasticsearch-transport host
+ reloading feature works.(default: true)'
+ type: boolean
+ reload_on_failure:
+ description: 'Indicates that the elasticsearch-transport will try
+ to reload the nodes addresses if there is a failure while making
+ the request, this can be useful to quickly remove a dead node
+ from the list of addresses.(default: false)'
+ type: boolean
+ remove_keys_on_update:
+ description: Remove keys on update will not update the configured
+ keys in elasticsearch when a record is being updated. This setting
+ only has any effect if the write operation is update or upsert.
+ type: string
+ remove_keys_on_update_key:
+ description: This setting allows remove_keys_on_update to be configured
+ with a key in each record, in much the same way as target_index_key
+ works.
+ type: string
+ request_timeout:
+ description: 'You can specify HTTP request timeout.(default: 5s)'
+ type: string
+ resurrect_after:
+ description: 'You can set in the elasticsearch-transport how often
+ dead connections from the elasticsearch-transport''s pool will
+ be resurrected.(default: 60s)'
+ type: string
+ retry_tag:
+ description: This setting allows custom routing of messages in response
+ to bulk request failures. The default behavior is to emit failed
+ records using the same tag that was provided.
+ type: string
+ rollover_index:
+ description: 'Specify this as true when an index with rollover capability
+ needs to be created.(default: false) https://github.com/uken/fluent-plugin-elasticsearch#rollover_index'
+ type: boolean
+ routing_key:
+ description: Similar to parent_key config, will add _routing into
+ elasticsearch command if routing_key is set and the field does
+ exist in input event.
+ type: string
+ scheme:
+ description: 'Connection scheme (default: http)'
+ type: string
+ ssl_verify:
+ description: 'Skip ssl verification (default: true)'
+ type: boolean
+ ssl_version:
+ description: If you want to configure SSL/TLS version, you can specify
+ ssl_version parameter. [SSLv23, TLSv1, TLSv1_1, TLSv1_2]
+ type: string
+ tag_key:
+ description: 'This will add the Fluentd tag in the JSON record.(default:
+ tag)'
+ type: string
+ target_index_key:
+ description: Tell this plugin to find the index name to write to
+ in the record under this key in preference to other mechanisms.
+ Key can be specified as path to nested record using dot ('.')
+ as a separator. https://github.com/uken/fluent-plugin-elasticsearch#target_index_key
+ type: string
+ target_type_key:
+ description: 'Similar to target_index_key config, find the type
+ name to write to in the record under this key (or nested record).
+ If key not found in record - fallback to type_name.(default: true)'
+ type: string
+ template_file:
+ description: The path to the file containing the template to install.
+ type: string
+ template_name:
+ description: The name of the template to define. If a template by
+ the name given is already present, it will be left unchanged,
+ unless template_overwrite is set, in which case the template will
+ be updated.
+ type: string
+ template_overwrite:
+ description: 'Always update the template, even if it already exists.(default:
+ false)'
+ type: boolean
+ templates:
+ description: Specify index templates in form of hash. Can contain
+ multiple templates.
+ type: string
+ time_key:
+ description: By default, when inserting records in Logstash format,
+ @timestamp is dynamically created with the time at log ingestion.
+ If you'd like to use a custom time, include an @timestamp with
+ your record.
+ type: string
+ time_key_format:
+ description: The format of the time stamp field (@timestamp or what
+ you specify with time_key). This parameter only has an effect
+ when logstash_format is true as it only affects the name of the
+ index we write to.
+ type: string
+ time_parse_error_tag:
+ description: With logstash_format true, elasticsearch plugin parses
+ timestamp field for generating index name. If the record has invalid
+ timestamp value, this plugin emits an error event to @ERROR label
+ with time_parse_error_tag configured tag.
+ type: string
+ time_precision:
+ description: Should the record not include a time_key, define the
+ degree of sub-second time precision to preserve from the time
+ portion of the routed event.
+ type: string
+ user:
+ description: User for HTTP Basic authentication. This plugin will
+ escape required URL encoded characters within %{} placeholders.
+ e.g. %{demo+}
+ type: string
+ utc_index:
+ description: 'By default, the records inserted into index logstash-YYMMDD
+ with UTC (Coordinated Universal Time). This option allows to use
+ local time if you describe utc_index to false.(default: true)'
+ type: boolean
+ with_transporter_log:
+ description: 'This is debugging purpose option to enable to obtain
+ transporter layer log. (default: false)'
+ type: boolean
+ write_operation:
+ description: 'The write_operation can be any of: (index,create,update,upsert)(default:
+ index)'
+ type: string
+ required:
+ - ssl_verify
+ type: object
+ forward:
+ properties:
+ ack_response_timeout:
+ description: 'This option is used when require_ack_response is true.
+ This default value is based on popular tcp_syn_retries. (default:
+ 190)'
+ type: integer
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ connect_timeout:
+ description: The timeout time for socket connect. When the connection
+ timed out during establishment, Errno::ETIMEDOUT is raised.
+ type: integer
+ dns_round_robin:
+ description: Enable client-side DNS round robin. Uniform randomly
+ pick an IP address to send data when a hostname has several IP
+ addresses. `heartbeat_type udp` is not available with `dns_round_robin
+ true`. Use `heartbeat_type tcp` or `heartbeat_type none`.
+ type: boolean
+ expire_dns_cache:
+ description: 'Set TTL to expire DNS cache in seconds. Set 0 not
+ to use DNS Cache. (defult: 0)'
+ type: integer
+ hard_timeout:
+ description: 'The hard timeout used to detect server failure. The
+ default value is equal to the send_timeout parameter. (default:
+ 60)'
+ type: integer
+ heartbeat_interval:
+ description: 'The interval of the heartbeat packer. (default: 1)'
+ type: integer
+ heartbeat_type:
+ description: The transport protocol to use for heartbeats. Set "none"
+ to disable heartbeat. [transport, tcp, udp, none]
+ type: string
+ ignore_network_errors_at_startup:
+ description: Ignore DNS resolution and errors at startup time.
+ type: boolean
+ keepalive:
+ description: 'Enable keepalive connection. (default: false)'
+ type: boolean
+ keepalive_timeout:
+ description: 'Expired time of keepalive. Default value is nil, which
+ means to keep connection as long as possible. (default: 0)'
+ type: integer
+ phi_failure_detector:
+ description: 'Use the "Phi accrual failure detector" to detect server
+ failure. (default: true)'
+ type: boolean
+ phi_threshold:
+ description: 'The threshold parameter used to detect server faults.
+ (default: 16) `phi_threshold` is deeply related to `heartbeat_interval`.
+ If you are using longer `heartbeat_interval`, please use the larger
+ `phi_threshold`. Otherwise you will see frequent detachments of
+ destination servers. The default value 16 is tuned for `heartbeat_interval`
+ 1s.'
+ type: integer
+ recover_wait:
+ description: 'The wait time before accepting a server fault recovery.
+ (default: 10)'
+ type: integer
+ require_ack_response:
+ description: Change the protocol to at-least-once. The plugin waits
+ the ack from destination's in_forward plugin.
+ type: boolean
+ security:
+ properties:
+ allow_anonymous_source:
+ description: Allow anonymous source. sections are required
+ if disabled.
+ type: boolean
+ self_hostname:
+ description: Hostname
+ type: string
+ shared_key:
+ description: Shared key for authentication.
+ type: string
+ user_auth:
+ description: If true, use user based authentication.
+ type: boolean
+ required:
+ - self_hostname
+ - shared_key
+ type: object
+ send_timeout:
+ description: 'The timeout time when sending event logs. (default:
+ 60)'
+ type: integer
+ servers:
+ description: Server definitions at least one is required
+ items:
+ description: server
+ properties:
+ host:
+ description: The IP address or host name of the server.
+ type: string
+ name:
+ description: The name of the server. Used for logging and
+ certificate verification in TLS transport (when host is
+ address).
+ type: string
+ password:
+ description: The password for authentication.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ port:
+ description: 'The port number of the host. Note that both
+ TCP packets (event stream) and UDP packets (heartbeat message)
+ are sent to this port. (default: 24224)'
+ type: integer
+ shared_key:
+ description: The shared key per server.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ standby:
+ description: Marks a node as the standby node for an Active-Standby
+ model between Fluentd nodes. When an active node goes down,
+ the standby node is promoted to an active node. The standby
+ node is not used by the out_forward plugin until then.
+ type: boolean
+ username:
+ description: The username for authentication.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ weight:
+ description: 'The load balancing weight. If the weight of
+ one server is 20 and the weight of the other server is 30,
+ events are sent in a 2:3 ratio. (default: 60).'
+ type: integer
+ required:
+ - host
+ type: object
+ type: array
+ tls_allow_self_signed_cert:
+ description: 'Allow self signed certificates or not. (default: false)'
+ type: boolean
+ tls_cert_logical_store_name:
+ description: The certificate logical store name on Windows system
+ certstore. This parameter is for Windows only.
+ type: string
+ tls_cert_path:
+ description: The additional CA certificate path for TLS.
+ type: string
+ tls_cert_thumbprint:
+ description: The certificate thumbprint for searching from Windows
+ system certstore This parameter is for Windows only.
+ type: string
+ tls_cert_use_enterprise_store:
+ description: Enable to use certificate enterprise store on Windows
+ system certstore. This parameter is for Windows only.
+ type: boolean
+ tls_ciphers:
+ description: 'The cipher configuration of TLS transport. (default:
+ ALL:!aNULL:!eNULL:!SSLv2)'
+ type: string
+ tls_client_cert_path:
+ description: The client certificate path for TLS
+ type: string
+ tls_client_private_key_passphrase:
+ description: The client private key passphrase for TLS.
+ type: string
+ tls_client_private_key_path:
+ description: The client private key path for TLS.
+ type: string
+ tls_insecure_mode:
+ description: 'Skip all verification of certificates or not. (default:
+ false)'
+ type: boolean
+ tls_verify_hostname:
+ description: 'Verify hostname of servers and certificates or not
+ in TLS transport. (default: true)'
+ type: boolean
+ tls_version:
+ description: 'The default version of TLS transport. [TLSv1_1, TLSv1_2]
+ (default: TLSv1_2)'
+ type: string
+ verify_connection_at_startup:
+ description: 'Verify that a connection can be made with one of out_forward
+ nodes at the time of startup. (default: false)'
+ type: boolean
+ required:
+ - servers
+ type: object
+ gcs:
+ properties:
+ acl:
+ description: 'Permission for the object in GCS: auth_read owner_full
+ owner_read private project_private public_read'
+ type: string
+ auto_create_bucket:
+ description: 'Create GCS bucket if it does not exists (default:
+ true)'
+ type: boolean
+ bucket:
+ description: Name of a GCS bucket
+ type: string
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ client_retries:
+ description: Number of times to retry requests on server error
+ type: integer
+ client_timeout:
+ description: Default timeout to use in requests
+ type: integer
+ credentials_json:
+ description: GCS service account credentials in JSON format
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ encryption_key:
+ description: Customer-supplied, AES-256 encryption key
+ type: string
+ format:
+ properties:
+ type:
+ description: 'Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value
+ (default: json)'
+ enum:
+ - out_file
+ - json
+ - ltsv
+ - csv
+ - msgpack
+ - hash
+ - single_value
+ type: string
+ type: object
+ hex_random_length:
+ description: 'Max length of `%{hex_random}` placeholder(4-16) (default:
+ 4)'
+ type: integer
+ keyfile:
+ description: Path of GCS service account credentials JSON file
+ type: string
+ object_key_format:
+ description: 'Format of GCS object keys (default: %{path}%{time_slice}_%{index}.%{file_extension})'
+ type: string
+ object_metadata:
+ description: User provided web-safe keys and arbitrary string values
+ that will returned with requests for the file as "x-goog-meta-"
+ response headers.
+ items:
+ properties:
+ key:
+ description: Key
+ type: string
+ value:
+ description: Value
+ type: string
+ required:
+ - key
+ - value
+ type: object
+ type: array
+ overwrite:
+ description: 'Overwrite already existing path (default: false)'
+ type: boolean
+ path:
+ description: Path prefix of the files on GCS
+ type: string
+ project:
+ description: Project identifier for GCS
+ type: string
+ storage_class:
+ description: 'Storage class of the file: dra nearline coldline multi_regional
+ regional standard'
+ type: string
+ store_as:
+ description: 'Archive format on GCS: gzip json text (default: gzip)'
+ type: string
+ transcoding:
+ description: Enable the decompressive form of transcoding
+ type: boolean
+ required:
+ - bucket
+ - project
+ type: object
+ loggingRef:
+ type: string
+ loki:
+ description: Fluentd output plugin to ship logs to a Loki server.
+ properties:
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ extra_labels:
+ description: 'Set of labels to include with every Loki stream.(default:
+ nil)'
+ type: boolean
+ password:
+ description: Specify password if the Loki server requires authentication.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ tenant:
+ description: Loki is a multi-tenant log storage platform and all
+ requests sent must include a tenant.
+ type: string
+ url:
+ description: The url of the Loki server to send logs to. (default:https://logs-us-west1.grafana.net)
+ type: string
+ username:
+ description: Specify a username if the Loki server requires authentication.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ type: object
+ nullout:
+ type: object
+ oss:
+ properties:
+ aaccess_key_secret:
+ description: Your access secret key
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ access_key_id:
+ description: Your access key id
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ auto_create_bucket:
+ description: 'desc ''Create OSS bucket if it does not exists (default:
+ false)'
+ type: boolean
+ bucket:
+ description: Your bucket name
+ type: string
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ check_bucket:
+ description: 'Check bucket if exists or not (default: true)'
+ type: boolean
+ check_object:
+ description: 'Check object before creation (default: true)'
+ type: boolean
+ download_crc_enable:
+ description: 'Download crc enabled (default: true)'
+ type: boolean
+ endpoint:
+ description: OSS endpoint to connect to'
+ type: string
+ format:
+ properties:
+ type:
+ description: 'Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value
+ (default: json)'
+ enum:
+ - out_file
+ - json
+ - ltsv
+ - csv
+ - msgpack
+ - hash
+ - single_value
+ type: string
+ type: object
+ hex_random_length:
+ description: 'The length of `%{hex_random}` placeholder(4-16) (default:
+ 4)'
+ type: integer
+ index_format:
+ description: '`sprintf` format for `%{index}` (default: %d)'
+ type: string
+ key_format:
+ description: 'The format of OSS object keys (default: %{path}/%{time_slice}_%{index}_%{thread_id}.%{file_extension})'
+ type: string
+ open_timeout:
+ description: 'Timeout for open connections (default: 10)'
+ type: integer
+ oss_sdk_log_dir:
+ description: 'OSS SDK log directory (default: /var/log/td-agent)'
+ type: string
+ overwrite:
+ description: 'Overwrite already existing path (default: false)'
+ type: boolean
+ path:
+ description: 'Path prefix of the files on OSS (default: fluent/logs)'
+ type: string
+ read_timeout:
+ description: 'Timeout for read response (default: 120)'
+ type: integer
+ store_as:
+ description: 'Archive format on OSS: gzip, json, text, lzo, lzma2
+ (default: gzip)'
+ type: string
+ upload_crc_enable:
+ description: 'Upload crc enabled (default: true)'
+ type: boolean
+ warn_for_delay:
+ description: Given a threshold to treat events as delay, output
+ warning logs if delayed events were put into OSS
+ type: string
+ required:
+ - aaccess_key_secret
+ - access_key_id
+ - bucket
+ - endpoint
+ type: object
+ s3:
+ properties:
+ acl:
+ description: Permission for the object in S3
+ type: string
+ assume_role_credentials:
+ description: assume_role_credentials
+ properties:
+ duration_seconds:
+ description: The duration, in seconds, of the role session (900-3600)
+ type: string
+ external_id:
+ description: A unique identifier that is used by third parties
+ when assuming roles in their customers' accounts.
+ type: string
+ policy:
+ description: An IAM policy in JSON format
+ type: string
+ role_arn:
+ description: The Amazon Resource Name (ARN) of the role to assume
+ type: string
+ role_session_name:
+ description: An identifier for the assumed role session
+ type: string
+ required:
+ - role_arn
+ - role_session_name
+ type: object
+ auto_create_bucket:
+ description: Create S3 bucket if it does not exists
+ type: string
+ aws_iam_retries:
+ description: The number of attempts to load instance profile credentials
+ from the EC2 metadata service using IAM role
+ type: string
+ aws_key_id:
+ description: AWS access key id
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ aws_sec_key:
+ description: AWS secret key.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ check_apikey_on_start:
+ description: Check AWS key on start
+ type: string
+ check_bucket:
+ description: Check bucket if exists or not
+ type: string
+ check_object:
+ description: Check object before creation
+ type: string
+ compute_checksums:
+ description: AWS SDK uses MD5 for API request/response by default
+ type: string
+ enable_transfer_acceleration:
+ description: 'If true, S3 Transfer Acceleration will be enabled
+ for uploads. IMPORTANT: You must first enable this feature on
+ your destination S3 bucket'
+ type: string
+ force_path_style:
+ description: If true, the bucket name is always left in the request
+ URI and never moved to the host as a sub-domain
+ type: string
+ format:
+ properties:
+ type:
+ description: 'Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value
+ (default: json)'
+ enum:
+ - out_file
+ - json
+ - ltsv
+ - csv
+ - msgpack
+ - hash
+ - single_value
+ type: string
+ type: object
+ grant_full_control:
+ description: Allows grantee READ, READ_ACP, and WRITE_ACP permissions
+ on the object
+ type: string
+ grant_read:
+ description: Allows grantee to read the object data and its metadata
+ type: string
+ grant_read_acp:
+ description: Allows grantee to read the object ACL
+ type: string
+ grant_write_acp:
+ description: Allows grantee to write the ACL for the applicable
+ object
+ type: string
+ hex_random_length:
+ description: The length of `%{hex_random}` placeholder(4-16)
+ type: string
+ index_format:
+ description: '`sprintf` format for `%{index}`'
+ type: string
+ instance_profile_credentials:
+ description: instance_profile_credentials
+ properties:
+ http_open_timeout:
+ description: Number of seconds to wait for the connection to
+ open
+ type: string
+ http_read_timeout:
+ description: Number of seconds to wait for one block to be read
+ type: string
+ ip_address:
+ description: IP address (default:169.254.169.254)
+ type: string
+ port:
+ description: Port number (default:80)
+ type: string
+ retries:
+ description: Number of times to retry when retrieving credentials
+ type: string
+ type: object
+ overwrite:
+ description: Overwrite already existing path
+ type: string
+ path:
+ description: Path prefix of the files on S3
+ type: string
+ proxy_uri:
+ description: URI of proxy environment
+ type: string
+ s3_bucket:
+ description: S3 bucket name
+ type: string
+ s3_endpoint:
+ description: Custom S3 endpoint (like minio)
+ type: string
+ s3_metadata:
+ description: Arbitrary S3 metadata headers to set for the object
+ type: string
+ s3_object_key_format:
+ description: 'The format of S3 object keys (default: %{path}%{time_slice}_%{index}.%{file_extension})'
+ type: string
+ s3_region:
+ description: S3 region name
+ type: string
+ shared_credentials:
+ description: shared_credentials
+ properties:
+ path:
+ description: 'Path to the shared file. (default: $HOME/.aws/credentials)'
+ type: string
+ profile_name:
+ description: Profile name. Default to 'default' or ENV['AWS_PROFILE']
+ type: string
+ type: object
+ signature_version:
+ description: Signature version for API Request (s3,v4)
+ type: string
+ sse_customer_algorithm:
+ description: Specifies the algorithm to use to when encrypting the
+ object
+ type: string
+ sse_customer_key:
+ description: Specifies the customer-provided encryption key for
+ Amazon S3 to use in encrypting data
+ type: string
+ sse_customer_key_md5:
+ description: Specifies the 128-bit MD5 digest of the encryption
+ key according to RFC 1321
+ type: string
+ ssekms_key_id:
+ description: Specifies the AWS KMS key ID to use for object encryption
+ type: string
+ ssl_verify_peer:
+ description: If false, the certificate of endpoint will not be verified
+ type: string
+ storage_class:
+ description: The type of storage to use for the object(STANDARD,REDUCED_REDUNDANCY,STANDARD_IA)
+ type: string
+ store_as:
+ description: Archive format on S3
+ type: string
+ use_bundled_cert:
+ description: Use aws-sdk-ruby bundled cert
+ type: string
+ use_server_side_encryption:
+ description: The Server-side encryption algorithm used when storing
+ this object in S3 (AES256, aws:kms)
+ type: string
+ warn_for_delay:
+ description: Given a threshold to treat events as delay, output
+ warning logs if delayed events were put into s3
+ type: string
+ required:
+ - s3_bucket
+ type: object
+ sumologic:
+ properties:
+ add_timestamp:
+ description: 'Add timestamp (or timestamp_key) field to logs before
+ sending to sumologic (default: true)'
+ type: boolean
+ data_type:
+ description: 'The type of data that will be sent to Sumo Logic,
+ either logs or metrics (default: logs)'
+ type: string
+ disable_cookies:
+ description: 'Option to disable cookies on the HTTP Client. (default:
+ false)'
+ type: boolean
+ endpoint:
+ description: SumoLogic HTTP Collector URL
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ log_format:
+ description: 'Format to post logs into Sumo. (default: json)'
+ type: string
+ log_key:
+ description: 'Used to specify the key when merging json or sending
+ logs in text format (default: message)'
+ type: string
+ metric_data_format:
+ description: 'The format of metrics you will be sending, either
+ graphite or carbon2 or prometheus (default: graphite)'
+ type: string
+ open_timeout:
+ description: 'Set timeout seconds to wait until connection is opened.
+ (default: 60)'
+ type: integer
+ proxy_uri:
+ description: Add the uri of the proxy environment if present.
+ type: string
+ source_category:
+ description: 'Set _sourceCategory metadata field within SumoLogic
+ (default: nil)'
+ type: string
+ source_host:
+ description: 'Set _sourceHost metadata field within SumoLogic (default:
+ nil)'
+ type: string
+ source_name:
+ description: Set _sourceName metadata field within SumoLogic - overrides
+ source_name_key (default is nil)
+ type: string
+ source_name_key:
+ description: 'Set as source::path_key''s value so that the source_name
+ can be extracted from Fluentd''s buffer (default: source_name)'
+ type: string
+ timestamp_key:
+ description: 'Field name when add_timestamp is on (default: timestamp)'
+ type: string
+ verify_ssl:
+ description: 'Verify ssl certificate. (default: true)'
+ type: boolean
+ required:
+ - endpoint
+ - source_name
+ type: object
+ type: object
+ status:
+ description: OutputStatus defines the observed state of Output
+ type: object
+ type: object
+ version: v1beta1
+ versions:
+ - name: v1beta1
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
diff --git a/charts/logging-operator/templates/psp.yaml b/charts/logging-operator/templates/psp.yaml
index 25be0f127..515d29448 100644
--- a/charts/logging-operator/templates/psp.yaml
+++ b/charts/logging-operator/templates/psp.yaml
@@ -26,6 +26,6 @@ spec:
seLinux:
rule: RunAsAny
volumes:
- - secret
- - configMap
-{{ end }}
+ - secret
+ - configMap
+{{ end }}
\ No newline at end of file
diff --git a/charts/logging-operator/templates/rbac.yaml b/charts/logging-operator/templates/rbac.yaml
index b49e8f3ae..dfda89278 100644
--- a/charts/logging-operator/templates/rbac.yaml
+++ b/charts/logging-operator/templates/rbac.yaml
@@ -20,90 +20,73 @@ metadata:
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
rules:
-- apiGroups:
- - logging.banzaicloud.com
- resources:
- - plugins
- - fluentds
- - fluentbits
- verbs:
- - "*"
-- apiGroups:
- - ""
- - apps
- - autoscaling
- - batch
- - extensions
- - policy
- - rbac.authorization.k8s.io
- resources:
- - namespaces
- - nodes
- - persistentvolumeclaims
- - pods
- - services
- - resourcequotas
- - replicationcontrollers
- - limitranges
- - persistentvolumeclaims
- - persistentvolumes
- - endpoints
- - secrets
- - configmaps
- - serviceaccounts
- - clusterroles
- - clusterrolebindings
- verbs:
- - "*"
-- apiGroups:
- - apps
- resources:
- - daemonsets
- - deployments
- - replicasets
- verbs:
- - "*"
-- apiGroups:
- - extensions
- resources:
- - daemonsets
- - deployments
- - replicasets
- verbs:
- - "*"
-- apiGroups:
- - apps
- resources:
- - statefulsets
- verbs:
- - get
- - list
- - watch
-- apiGroups:
- - batch
- resources:
- - cronjobs
- - jobs
- verbs:
- - list
- - watch
-- apiGroups:
- - autoscaling
- resources:
- - horizontalpodautoscalers
- verbs:
- - list
- - watch
-{{- if .Values.rbac.psp.enabled }}
-- apiGroups:
- - extensions
- resources:
- - podsecuritypolicies
- resourceNames:
- - psp.logging-operator
- verbs:
- - use
-{{- end }}
+ - apiGroups:
+ - logging.banzaicloud.io
+ resources:
+ - loggings
+ - flows
+ - clusterflows
+ - outputs
+ - clusteroutputs
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - logging.banzaicloud.io
+ resources:
+ - loggings/status
+ verbs:
+ - get
+ - patch
+ - update
+ - apiGroups:
+ - ""
+ - apps
+ - batch
+ - extensions
+ - policy
+ - rbac.authorization.k8s.io
+ resources:
+ - namespaces
+ - nodes
+ - persistentvolumeclaims
+ - pods
+ - services
+ - resourcequotas
+ - replicationcontrollers
+ - limitranges
+ - persistentvolumeclaims
+ - persistentvolumes
+ - endpoints
+ - secrets
+ - configmaps
+ - serviceaccounts
+ - roles
+ - rolebindings
+ - clusterroles
+ - clusterrolebindings
+ - daemonsets
+ - deployments
+ - replicasets
+ - statefulsets
+ - jobs
+ verbs:
+ - "*"
+ {{- if .Values.rbac.psp.enabled }}
+ - apiGroups:
+ - extensions
+ resources:
+ - podsecuritypolicies
+ resourceNames:
+ - psp.logging-operator
+ verbs:
+ - use
+ {{- end }}
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
@@ -115,12 +98,12 @@ metadata:
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
subjects:
-- kind: ServiceAccount
- name: {{ template "logging-operator.fullname" . }}
- namespace: {{ .Release.Namespace }}
+ - kind: ServiceAccount
+ name: {{ template "logging-operator.fullname" . }}
+ namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "logging-operator.fullname" . }}
-{{- end }}
+ {{- end }}
diff --git a/charts/logging-operator/templates/service.yaml b/charts/logging-operator/templates/service.yaml
new file mode 100644
index 000000000..b1aee6fca
--- /dev/null
+++ b/charts/logging-operator/templates/service.yaml
@@ -0,0 +1,18 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "logging-operator.fullname" . }}
+ labels:
+{{ include "logging-operator.labels" . | indent 4 }}
+spec:
+ type: ClusterIP
+ clusterIP: None
+ ports:
+ - port: {{ .Values.http.port }}
+ targetPort: http
+ protocol: TCP
+ name: http
+ selector:
+ app.kubernetes.io/name: {{ include "logging-operator.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: query
\ No newline at end of file
diff --git a/charts/logging-operator/values.yaml b/charts/logging-operator/values.yaml
index cb60d8e0c..b6206d532 100644
--- a/charts/logging-operator/values.yaml
+++ b/charts/logging-operator/values.yaml
@@ -6,39 +6,18 @@ replicaCount: 1
image:
repository: banzaicloud/logging-operator
- tag: 0.2.2
+ tag: 2.0.1
pullPolicy: IfNotPresent
+imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
-# Namespace to watch fot LoggingOperator CRD
-watchNamespace: ""
-
-grafana:
- dashboard:
- enabled: true
-
-
-## Role Based Access
-## Ref: https://kubernetes.io/docs/admin/authorization/rbac/
-##
-rbac:
- enabled: true
- ## Pod Security Policy
- ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
- ##
- psp:
- enabled: false
-
-## Define resources requests and limits for single Pods.
-## ref: https://kubernetes.io/docs/user-guide/compute-resources/
-## We usually recommend not to specify default resources and to leave this as a conscious
-## choice for the user. This also increases chances charts run on environments with little
-## resources, such as Minikube. If you do want to specify resources, uncomment the following
-## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
-##
resources: {}
+ # We usually recommend not to specify default resources and to leave this as a conscious
+ # choice for the user. This also increases chances charts run on environments with little
+ # resources, such as Minikube. If you do want to specify resources, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
@@ -46,46 +25,24 @@ resources: {}
# cpu: 100m
# memory: 128Mi
-
-## Define which Nodes the Pods are scheduled on.
-## ref: https://kubernetes.io/docs/user-guide/node-selection/
-##
nodeSelector: {}
-
-## If specified, the pod's tolerations.
-## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
-##
tolerations: []
-# - key: "key"
-# operator: "Equal"
-# value: "value"
-# effect: "NoSchedule"
-## Assign the Logging operator to run on specific nodes
-## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
-##
affinity: {}
-# requiredDuringSchedulingIgnoredDuringExecution:
-# nodeSelectorTerms:
-# - matchExpressions:
-# - key: kubernetes.io/e2e-az-name
-# operator: In
-# values:
-# - e2e-az1
-# - e2e-az2
+http:
+ # http listen port number
+ port: 8080
+ # Service definition for query http service
+ service:
+ type: ClusterIP
+ # Annotations to query http service
+ annotations: {}
+ # Labels to query http service
+ labels: {}
-## SecurityContext holds pod-level security attributes and common container settings.
-## This defaults to non root user with uid 1000 and gid 2000. *v1.PodSecurityContext false
-## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
-##
-podSecurityContext:
- runAsNonRoot: true
- runAsUser: 1000
- fsGroup: 2000
-securityContext:
- allowPrivilegeEscalation: false
- readOnlyRootFilesystem: true
- # capabilities:
- # drop: ["ALL"]
+rbac:
+ enabled: true
+ psp:
+ enabled: false
diff --git a/charts/nginx-logging-demo/templates/logging.yaml b/charts/nginx-logging-demo/templates/logging.yaml
deleted file mode 100644
index a0bfb0837..000000000
--- a/charts/nginx-logging-demo/templates/logging.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
-apiVersion: "logging.banzaicloud.com/v1alpha1"
-kind: "Plugin"
-metadata:
- name: {{ include "nginx-logging-demo.fullname" . }}
- labels:
- app: {{ include "nginx-logging-demo.name" . }}
- chart: {{ include "nginx-logging-demo.chart" . }}
- heritage: {{ .Release.Service }}
- release: {{ .Release.Name }}
-spec:
- input:
- label:
- app: {{ include "nginx-logging-demo.name" . }}
-{{- if .Values.forwarding.enabled }}
- output:
- - type: forward
- name: forward
- parameters:
- - name: host
- value: {{ .Values.forwarding.targetHost | quote }}
- - name: port
- value: {{ .Values.forwarding.targetPort | quote }}
- - name: name
- value: {{ .Values.forwarding.targetHost | quote }}
-{{- if .Values.forwarding.tlsSharedKey }}
- - name: tlsSharedKey
- value: {{ .Values.forwarding.tlsSharedKey | b64enc | quote }}
-{{- end }}
-{{- end }}
- filter:
- - type: parser
- name: parser-nginx
- parameters:
- - name: format
- value: '/^(?[^ ]*) (?[^ ]*) (?[^ ]*) \[(?[^\]]*)\] "(?\S+)(?: +(?[^\"]*?)(?: +\S*)?)?" (?[^ ]*) (?[^ ]*)(?: "(?[^\"]*)" "(?[^\"]*)"(?:\s+(?[^ ]+))?)?$/'
- - name: timeFormat
- value: "%d/%b/%Y:%H:%M:%S %z"
\ No newline at end of file
diff --git a/charts/nginx-logging-demo/templates/tests/test-connection.yaml b/charts/nginx-logging-demo/templates/tests/test-connection.yaml
deleted file mode 100644
index 026771b04..000000000
--- a/charts/nginx-logging-demo/templates/tests/test-connection.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
-apiVersion: v1
-kind: Pod
-metadata:
- name: "{{ include "nginx-logging-demo.fullname" . }}-test-connection"
- labels:
- app: {{ include "nginx-logging-demo.name" . }}
- chart: {{ include "nginx-logging-demo.chart" . }}
- heritage: {{ .Release.Service }}
- release: {{ .Release.Name }}
- annotations:
- "helm.sh/hook": test-success
-spec:
- containers:
- - name: wget
- image: busybox
- command: ['wget']
- args: ['{{ include "nginx-logging-demo.fullname" . }}:{{ .Values.service.port }}']
- restartPolicy: Never
diff --git a/charts/nginx-logging-es-demo/Chart.yaml b/charts/nginx-logging-es-demo/Chart.yaml
index dd088068d..c7bcd2f41 100644
--- a/charts/nginx-logging-es-demo/Chart.yaml
+++ b/charts/nginx-logging-es-demo/Chart.yaml
@@ -1,8 +1,8 @@
apiVersion: v1
-appVersion: "1.0"
+appVersion: "2.0"
description: A Demo application for the logging-operator
name: nginx-logging-es-demo
-version: 0.1.1
+version: 0.2.0
maintainers:
- name: Banzai Cloud
email: info@banzaicloud.com
diff --git a/charts/nginx-logging-es-demo/templates/logging.yaml b/charts/nginx-logging-es-demo/templates/logging.yaml
index 8040b9b5c..bc2776f5a 100644
--- a/charts/nginx-logging-es-demo/templates/logging.yaml
+++ b/charts/nginx-logging-es-demo/templates/logging.yaml
@@ -1,37 +1,8 @@
-apiVersion: "logging.banzaicloud.com/v1alpha1"
-kind: "Plugin"
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
metadata:
- name: {{ include "nginx-logging-es-demo.fullname" . }}
- labels:
- app: {{ include "nginx-logging-es-demo.name" . }}
- chart: {{ include "nginx-logging-es-demo.chart" . }}
- heritage: {{ .Release.Service }}
- release: {{ .Release.Name }}
+ name: {{ include "nginx-logging-es-demo.fullname" . }}-logging
spec:
- input:
- label:
- app: {{ include "nginx-logging-es-demo.name" . }}
-
- output:
- - type: "elasticsearch"
- name: {{ include "nginx-logging-es-demo.name" . }}
- parameters:
- - name: host
- value: "elasticsearch-elasticsearch-cluster"
- - name: port
- value: "9200"
- - name: scheme
- value: "https"
- - name: sslVerify
- value: "false"
- - name: sslVersion
- value: "TLSv1_2"
-
- filter:
- - type: parser
- name: parser-nginx
- parameters:
- - name: format
- value: '/^(?[^ ]*) (?[^ ]*) (?[^ ]*) \[(?[^\]]*)\] "(?\S+)(?: +(?[^\"]*?)(?: +\S*)?)?" (?[^ ]*) (?[^ ]*)(?: "(?[^\"]*)" "(?[^\"]*)"(?:\s+(?[^ ]+))?)?$/'
- - name: timeFormat
- value: "%d/%b/%Y:%H:%M:%S %z"
\ No newline at end of file
+ fluentd: {}
+ fluentbit: {}
+ controlNamespace: {{ .Release.Namespace }}
diff --git a/charts/nginx-logging-es-demo/templates/logging_flow.yaml b/charts/nginx-logging-es-demo/templates/logging_flow.yaml
new file mode 100644
index 000000000..dde7bb800
--- /dev/null
+++ b/charts/nginx-logging-es-demo/templates/logging_flow.yaml
@@ -0,0 +1,23 @@
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+ name: {{ include "nginx-logging-es-demo.fullname" . }}-es-flow
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app: {{ include "nginx-logging-es-demo.name" . }}
+ chart: {{ include "nginx-logging-es-demo.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+spec:
+ filters:
+ - tag_normaliser: {}
+ - parser:
+ key_name: message
+ remove_key_name_field: true
+ reserve_data: true
+ parsers:
+ - type: nginx
+ selectors:
+ app: {{ include "nginx-logging-es-demo.name" . }}
+ outputRefs:
+ - {{ include "nginx-logging-es-demo.fullname" . }}-es-output
diff --git a/charts/nginx-logging-es-demo/templates/logging_output.yaml b/charts/nginx-logging-es-demo/templates/logging_output.yaml
new file mode 100644
index 000000000..c01c46e04
--- /dev/null
+++ b/charts/nginx-logging-es-demo/templates/logging_output.yaml
@@ -0,0 +1,22 @@
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Output
+metadata:
+ name: {{ include "nginx-logging-es-demo.fullname" . }}-es-output
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app: {{ include "nginx-logging-es-demo.name" . }}
+ chart: {{ include "nginx-logging-es-demo.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+spec:
+ elasticsearch:
+ host: elasticsearch-elasticsearch-cluster.{{ .Release.Namespace }}.svc.cluster.local
+ port: 9200
+ scheme: https
+ ssl_verify: false
+ ssl_version: TLSv1_2
+ buffer:
+ path: /tmp/buffer
+ timekey: 1m
+ timekey_wait: 30s
+ timekey_use_utc: true
\ No newline at end of file
diff --git a/charts/nginx-logging-loki-demo/.helmignore b/charts/nginx-logging-loki-demo/.helmignore
new file mode 100644
index 000000000..50af03172
--- /dev/null
+++ b/charts/nginx-logging-loki-demo/.helmignore
@@ -0,0 +1,22 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/nginx-logging-demo/Chart.yaml b/charts/nginx-logging-loki-demo/Chart.yaml
similarity index 68%
rename from charts/nginx-logging-demo/Chart.yaml
rename to charts/nginx-logging-loki-demo/Chart.yaml
index 92515dcaa..984a49383 100644
--- a/charts/nginx-logging-demo/Chart.yaml
+++ b/charts/nginx-logging-loki-demo/Chart.yaml
@@ -1,8 +1,8 @@
apiVersion: v1
-appVersion: "1.0"
+appVersion: "2.0"
description: A Demo application for the logging-operator
-name: nginx-logging-demo
-version: 0.1.2
+name: nginx-logging-loki-demo
+version: 0.2.0
maintainers:
- name: Banzai Cloud
email: info@banzaicloud.com
diff --git a/charts/nginx-logging-demo/README.md b/charts/nginx-logging-loki-demo/README.md
similarity index 88%
rename from charts/nginx-logging-demo/README.md
rename to charts/nginx-logging-loki-demo/README.md
index a40dc5cf8..ed47a8422 100644
--- a/charts/nginx-logging-demo/README.md
+++ b/charts/nginx-logging-loki-demo/README.md
@@ -1,18 +1,18 @@
-# Logging Operator Nginx demonstration Chart
+# Logging Operator Nginx & Loki output demonstration Chart
[Logging Operator](https://github.com/banzaicloud/logging-operator) is a managed centralized logging component based on fluentd and fluent-bit.
## tl;dr:
```bash
-$ helm repo add banzaicloud-stable http://kubernetes-charts.banzaicloud.com/branch/master
+$ helm repo add banzaicloud-stable https://kubernetes-charts.banzaicloud.com/
$ helm repo update
-$ helm install banzaicloud-stable/nginx-logging-demo
+$ helm install banzaicloud-stable/nginx-logging-loki-demo
```
## Introduction
-This chart demonstrates the use of the [Logging Operator](https://github.com/banzaicloud/banzai-charts/logging-operator) with an nginx deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
+This chart demonstrates the use of the [Logging Operator](https://github.com/banzaicloud/banzai-charts/logging-operator) with an Nginx deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites
@@ -24,7 +24,7 @@ This chart demonstrates the use of the [Logging Operator](https://github.com/ba
To install the chart with the release name `log-test-nginx`:
```bash
-$ helm install --name log-test-nginx banzaicloud-stable/nginx-logging-demo
+$ helm install --name log-test-nginx banzaicloud-stable/nginx-logging-loki-demo
```
## Uninstalling the Chart
@@ -38,7 +38,7 @@ The command removes all the Kubernetes components associated with the chart and
## Configuration
-The following tables lists the configurable parameters of the nginx-logging-demo chart and their default values.
+The following tables lists the configurable parameters of the nginx-logging-loki-demo.chart and their default values.
| Parameter | Description | Default |
| --------------------------------------------------- | ------------------------------------------------------ | ------------------------------ |
@@ -56,7 +56,7 @@ The following tables lists the configurable parameters of the nginx-logging-demo
Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example:
```bash
-$ helm install --name my-release -f values.yaml banzaicloud-stable/nginx-logging-demo
+$ helm install --name my-release -f values.yaml banzaicloud-stable/nginx-logging-loki-demo
```
> **Tip**: You can use the default [values.yaml](values.yaml)
diff --git a/charts/nginx-logging-demo/templates/NOTES.txt b/charts/nginx-logging-loki-demo/templates/NOTES.txt
similarity index 79%
rename from charts/nginx-logging-demo/templates/NOTES.txt
rename to charts/nginx-logging-loki-demo/templates/NOTES.txt
index 46c4f729a..0aa1ba11c 100644
--- a/charts/nginx-logging-demo/templates/NOTES.txt
+++ b/charts/nginx-logging-loki-demo/templates/NOTES.txt
@@ -6,16 +6,16 @@
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
- export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "nginx-logging-demo.fullname" . }})
+ export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "nginx-logging-loki-demo.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
- You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "nginx-logging-demo.fullname" . }}'
- export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "nginx-logging-demo.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
+ You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "nginx-logging-loki-demo.fullname" . }}'
+ export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "nginx-logging-loki-demo.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
- export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ include "nginx-logging-demo.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
+ export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ include "nginx-logging-loki-demo.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl port-forward $POD_NAME 8080:80
{{- end }}
diff --git a/charts/logging-operator-fluent/templates/_helpers.tpl b/charts/nginx-logging-loki-demo/templates/_helpers.tpl
similarity index 91%
rename from charts/logging-operator-fluent/templates/_helpers.tpl
rename to charts/nginx-logging-loki-demo/templates/_helpers.tpl
index 149739ce7..aa5351287 100644
--- a/charts/logging-operator-fluent/templates/_helpers.tpl
+++ b/charts/nginx-logging-loki-demo/templates/_helpers.tpl
@@ -2,7 +2,7 @@
{{/*
Expand the name of the chart.
*/}}
-{{- define "logging-operator-fluent.name" -}}
+{{- define "nginx-logging-loki-demo.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
@@ -11,7 +11,7 @@ Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
-{{- define "logging-operator-fluent.fullname" -}}
+{{- define "nginx-logging-loki-demo.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
@@ -27,6 +27,6 @@ If release name contains chart name it will be used as a full name.
{{/*
Create chart name and version as used by the chart label.
*/}}
-{{- define "logging-operator-fluent.chart" -}}
+{{- define "nginx-logging-loki-demo.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
diff --git a/charts/nginx-logging-demo/templates/deployment.yaml b/charts/nginx-logging-loki-demo/templates/deployment.yaml
similarity index 80%
rename from charts/nginx-logging-demo/templates/deployment.yaml
rename to charts/nginx-logging-loki-demo/templates/deployment.yaml
index bf70c6fba..3a6e2cd6e 100644
--- a/charts/nginx-logging-demo/templates/deployment.yaml
+++ b/charts/nginx-logging-loki-demo/templates/deployment.yaml
@@ -1,22 +1,22 @@
apiVersion: apps/v1
kind: Deployment
metadata:
- name: {{ include "nginx-logging-demo.fullname" . }}
+ name: {{ include "nginx-logging-loki-demo.fullname" . }}
labels:
- app: {{ include "nginx-logging-demo.name" . }}
- chart: {{ include "nginx-logging-demo.chart" . }}
+ app: {{ include "nginx-logging-loki-demo.name" . }}
+ chart: {{ include "nginx-logging-loki-demo.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
- app: {{ include "nginx-logging-demo.name" . }}
+ app: {{ include "nginx-logging-loki-demo.name" . }}
release: {{ .Release.Name }}
template:
metadata:
labels:
- app: {{ include "nginx-logging-demo.name" . }}
+ app: {{ include "nginx-logging-loki-demo.name" . }}
release: {{ .Release.Name }}
spec:
containers:
diff --git a/charts/nginx-logging-demo/templates/ingress.yaml b/charts/nginx-logging-loki-demo/templates/ingress.yaml
similarity index 81%
rename from charts/nginx-logging-demo/templates/ingress.yaml
rename to charts/nginx-logging-loki-demo/templates/ingress.yaml
index b64548268..31abb93d4 100644
--- a/charts/nginx-logging-demo/templates/ingress.yaml
+++ b/charts/nginx-logging-loki-demo/templates/ingress.yaml
@@ -1,12 +1,12 @@
{{- if .Values.ingress.enabled -}}
-{{- $fullName := include "nginx-logging-demo.fullname" . -}}
+{{- $fullName := include "nginx-logging-loki-demo.fullname" . -}}
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
- app: {{ include "nginx-logging-demo.name" . }}
- chart: {{ include "nginx-logging-demo.chart" . }}
+ app: {{ include "nginx-logging-loki-demo.name" . }}
+ chart: {{ include "nginx-logging-loki-demo.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
{{- with .Values.ingress.annotations }}
diff --git a/charts/nginx-logging-loki-demo/templates/logging.yaml b/charts/nginx-logging-loki-demo/templates/logging.yaml
new file mode 100644
index 000000000..21a29b6d4
--- /dev/null
+++ b/charts/nginx-logging-loki-demo/templates/logging.yaml
@@ -0,0 +1,8 @@
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+ name: {{ include "nginx-logging-loki-demo.fullname" . }}-logging
+spec:
+ fluentd: {}
+ fluentbit: {}
+ controlNamespace: {{ .Release.Namespace }}
diff --git a/charts/nginx-logging-loki-demo/templates/logging_flow.yaml b/charts/nginx-logging-loki-demo/templates/logging_flow.yaml
new file mode 100644
index 000000000..9e1cddf18
--- /dev/null
+++ b/charts/nginx-logging-loki-demo/templates/logging_flow.yaml
@@ -0,0 +1,23 @@
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+ name: {{ include "nginx-logging-loki-demo.fullname" . }}-loki-flow
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app: {{ include "nginx-logging-loki-demo.name" . }}
+ chart: {{ include "nginx-logging-loki-demo.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+spec:
+ filters:
+ - tag_normaliser: {}
+ - parser:
+ key_name: message
+ remove_key_name_field: true
+ reserve_data: true
+ parsers:
+ - type: nginx
+ selectors:
+ app: {{ include "nginx-logging-loki-demo.name" . }}
+ outputRefs:
+ - {{ include "nginx-logging-loki-demo.fullname" . }}-loki-output
diff --git a/charts/nginx-logging-loki-demo/templates/logging_output.yaml b/charts/nginx-logging-loki-demo/templates/logging_output.yaml
new file mode 100644
index 000000000..d4c2966b6
--- /dev/null
+++ b/charts/nginx-logging-loki-demo/templates/logging_output.yaml
@@ -0,0 +1,18 @@
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Output
+metadata:
+ name: {{ include "nginx-logging-loki-demo.fullname" . }}-loki-output
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app: {{ include "nginx-logging-loki-demo.name" . }}
+ chart: {{ include "nginx-logging-loki-demo.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+spec:
+ loki:
+ url: http://loki:3100
+ buffer:
+ path: /tmp/buffer
+ timekey: 1m
+ timekey_wait: 30s
+ timekey_use_utc: true
\ No newline at end of file
diff --git a/charts/nginx-logging-demo/templates/service.yaml b/charts/nginx-logging-loki-demo/templates/service.yaml
similarity index 57%
rename from charts/nginx-logging-demo/templates/service.yaml
rename to charts/nginx-logging-loki-demo/templates/service.yaml
index f07f68c51..46b207bd7 100644
--- a/charts/nginx-logging-demo/templates/service.yaml
+++ b/charts/nginx-logging-loki-demo/templates/service.yaml
@@ -1,10 +1,10 @@
apiVersion: v1
kind: Service
metadata:
- name: {{ include "nginx-logging-demo.fullname" . }}
+ name: {{ include "nginx-logging-loki-demo.fullname" . }}
labels:
- app: {{ include "nginx-logging-demo.name" . }}
- chart: {{ include "nginx-logging-demo.chart" . }}
+ app: {{ include "nginx-logging-loki-demo.name" . }}
+ chart: {{ include "nginx-logging-loki-demo.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
spec:
@@ -15,5 +15,5 @@ spec:
protocol: TCP
name: http
selector:
- app: {{ include "nginx-logging-demo.name" . }}
+ app: {{ include "nginx-logging-loki-demo.name" . }}
release: {{ .Release.Name }}
diff --git a/charts/nginx-logging-loki-demo/templates/tests/test-connection.yaml b/charts/nginx-logging-loki-demo/templates/tests/test-connection.yaml
new file mode 100644
index 000000000..e3d976f72
--- /dev/null
+++ b/charts/nginx-logging-loki-demo/templates/tests/test-connection.yaml
@@ -0,0 +1,18 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: "{{ include "nginx-logging-loki-demo.fullname" . }}-test-connection"
+ labels:
+ app: {{ include "nginx-logging-loki-demo.name" . }}
+ chart: {{ include "nginx-logging-loki-demo.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ annotations:
+ "helm.sh/hook": test-success
+spec:
+ containers:
+ - name: wget
+ image: busybox
+ command: ['wget']
+ args: ['{{ include "nginx-logging-loki-demo.fullname" . }}:{{ .Values.service.port }}']
+ restartPolicy: Never
diff --git a/charts/nginx-logging-demo/values.yaml b/charts/nginx-logging-loki-demo/values.yaml
similarity index 82%
rename from charts/nginx-logging-demo/values.yaml
rename to charts/nginx-logging-loki-demo/values.yaml
index 2bdee916f..760982f84 100644
--- a/charts/nginx-logging-demo/values.yaml
+++ b/charts/nginx-logging-loki-demo/values.yaml
@@ -1,4 +1,4 @@
-# Default values for nginx-logging-demo.
+# Default values for nginx-logging-loki-demo.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
@@ -30,13 +30,6 @@ ingress:
# hosts:
# - chart-example.local
-forwarding:
- enabled: false
- # will use the the existing tls secret used by the fluentd input
- #tlsSharedKey: example
- #targetHost: fluentd.target.svc
- #targetPort: 24240
-
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
diff --git a/cmd/docgen/docgen.go b/cmd/docgen/docgen.go
deleted file mode 100644
index cec38d83e..000000000
--- a/cmd/docgen/docgen.go
+++ /dev/null
@@ -1,92 +0,0 @@
-package main
-
-import (
- "bytes"
- "fmt"
- "io/ioutil"
- "regexp"
- "text/template"
- "text/template/parse"
-
- "github.com/Masterminds/sprig"
- "github.com/banzaicloud/logging-operator/pkg/resources/plugins"
-)
-
-//TODO handle parameters
-func main() {
- pluginMap := plugins.GetAll()
- var indexPage bytes.Buffer
- indexPage.WriteString("# List of ")
- for name, plugin := range pluginMap {
- var data bytes.Buffer
- data.WriteString(fmt.Sprintf("# Plugin %s\n", name))
- t := template.New("PluginTemplate").Funcs(sprig.TxtFuncMap())
- t, err := t.Parse(plugin.Template)
- if err != nil {
- panic(err)
- }
- data.WriteString("## Variables\n")
- data.WriteString("| Variable name | Default | Applied function |\n")
- data.WriteString(fmt.Sprintf("|---|---|---|\n"))
- for _, item := range listTemplateFields(t) {
- regExp, err := regexp.Compile(`{{(?P\w*)?\s*.(?P.*)}}`)
- if err != nil {
- panic(err)
- }
- matches := regExp.FindStringSubmatch(item)
- vairableName := matches[2]
- variableFunc := matches[1]
- defaultValue, ok := plugin.DefaultValues[matches[2]]
- if !ok {
- defaultValue = "-"
- }
- data.WriteString(fmt.Sprintf("| %s | %s | %s |\n", vairableName, defaultValue, variableFunc))
-
- }
- data.WriteString("## Plugin template\n")
- data.WriteString("```" + plugin.Template + "\n```")
- err = ioutil.WriteFile("docs/plugins/"+name+".md", data.Bytes(), 0644)
- if err != nil {
- panic(err)
- }
- }
-}
-
-func listTemplateFields(t *template.Template) []string {
- return listNodeFields(t.Tree.Root, nil)
-}
-
-func listNodeFields(node parse.Node, res []string) []string {
- if node.Type() == parse.NodeAction {
- if !contains(node.String(), res) {
- res = append(res, node.String())
- }
- }
-
- if ifn, ok := node.(*parse.IfNode); ok {
- for _, n := range ifn.List.Nodes {
- res = listNodeFields(n, res)
- }
- if ifn.ElseList != nil {
- for _, n := range ifn.ElseList.Nodes {
- res = listNodeFields(n, res)
- }
- }
- }
-
- if ln, ok := node.(*parse.ListNode); ok {
- for _, n := range ln.Nodes {
- res = listNodeFields(n, res)
- }
- }
- return res
-}
-
-func contains(s string, sl []string) bool {
- for _, i := range sl {
- if i == s {
- return true
- }
- }
- return false
-}
diff --git a/cmd/docs.go b/cmd/docs.go
new file mode 100644
index 000000000..0e152681b
--- /dev/null
+++ b/cmd/docs.go
@@ -0,0 +1,306 @@
+/*
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/printer"
+ "go/token"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+
+ "github.com/go-logr/logr"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/log/zap"
+)
+
+var log logr.Logger
+
+type doc struct {
+ Name string
+ Content string
+ Type string
+ RootNode *ast.File
+}
+
+func (d *doc) append(line string) {
+ d.Content = d.Content + line + "\n"
+}
+
+func (d *doc) checkNodes(n ast.Node) bool {
+ generic, ok := n.(*ast.GenDecl)
+ if ok {
+ typeName, ok := generic.Specs[0].(*ast.TypeSpec)
+ if ok {
+ _, ok := typeName.Type.(*ast.InterfaceType)
+ if ok && typeName.Name.Name == "_doc" {
+ d.append(fmt.Sprintf("# %s", getTypeName(generic, d.Name)))
+ d.append("## Overview")
+ d.append(getTypeDocs(generic))
+ d.append("## Configuration")
+ }
+ structure, ok := typeName.Type.(*ast.StructType)
+ if ok {
+ d.append(fmt.Sprintf("### %s", getTypeName(generic, typeName.Name.Name)))
+ if getTypeDocs(generic) != "" {
+ d.append(fmt.Sprintf("#### %s", getTypeDocs(generic)))
+ }
+ d.append("| Variable Name | Type | Required | Default | Description |")
+ d.append("|---|---|---|---|---|")
+ for _, item := range structure.Fields.List {
+ name, com, def, required := getValuesFromItem(item)
+ d.append(fmt.Sprintf("| %s | %s | %s | %s | %s |", name, normaliseType(item.Type), required, def, com))
+ }
+ }
+
+ }
+ }
+
+ return true
+}
+
+func normaliseType(fieldType ast.Expr) string {
+ fset := token.NewFileSet()
+ var typeNameBuf bytes.Buffer
+ err := printer.Fprint(&typeNameBuf, fset, fieldType)
+ if err != nil {
+ log.Error(err, "error getting type")
+ }
+ return typeNameBuf.String()
+}
+
+func (d *doc) generate() {
+ if d.RootNode != nil {
+ ast.Inspect(d.RootNode, d.checkNodes)
+ log.Info("DocumentRoot not present skipping parse")
+ }
+ directory := fmt.Sprintf("./%s/%s/", docsPath, d.Type)
+ err := os.MkdirAll(directory, os.ModePerm)
+ if err != nil {
+ log.Error(err, "Md file create error %s", err.Error())
+ }
+ filepath := fmt.Sprintf("./%s/%s/%s.md", docsPath, d.Type, d.Name)
+ f, err := os.Create(filepath)
+ if err != nil {
+ log.Error(err, "Md file create error %s", err.Error())
+ }
+ defer closeFile(f)
+
+ _, err = f.WriteString(d.Content)
+ if err != nil {
+ log.Error(err, "Md file write error %s", err.Error())
+ }
+}
+
+type PluginDir struct {
+ Type string
+ Path string
+}
+
+var pluginDirs = []PluginDir{
+ {"filters", "./pkg/model/filter/"},
+ {"outputs", "./pkg/model/output/"},
+ {"common", "./pkg/model/common/"},
+}
+
+var docsPath = "docs/plugins"
+
+type plugin struct {
+ Name string
+ Type string
+ SourcePath string
+ DocumentationPath string
+}
+
+type plugins []plugin
+
+var ignoredPluginsList = []string{
+ "null",
+ ".*.deepcopy",
+}
+
+func main() {
+ verboseLogging := true
+ ctrl.SetLogger(zap.Logger(verboseLogging))
+ log = ctrl.Log.WithName("docs").WithName("main")
+ //log.Info("plugin Directories:", "packageDir", packageDir)
+
+ fileList, err := getPlugins(pluginDirs)
+ if err != nil {
+ log.Error(err, "Directory check error.")
+ }
+ for _, file := range fileList {
+ log.Info("plugin", "Name", file.SourcePath)
+ document := getDocumentParser(file)
+ document.generate()
+ }
+
+ index := doc{
+ Name: "index",
+ }
+ index.append("## Table of Contents\n\n")
+ for _, p := range pluginDirs {
+ index.append(fmt.Sprintf("### %s\n", p.Type))
+ for _, plugin := range fileList {
+ if plugin.Type == p.Type {
+ index.append(fmt.Sprintf("- [%s](%s)", plugin.Name, plugin.DocumentationPath))
+ }
+ }
+ index.append("\n")
+ }
+
+ index.generate()
+
+}
+
+func getPrefixedLine(origin, expression string) string {
+ r := regexp.MustCompile(expression)
+ result := r.FindStringSubmatch(origin)
+ if len(result) > 1 {
+ return fmt.Sprintf("%s", result[1])
+ }
+ return ""
+}
+
+func getTypeName(generic *ast.GenDecl, defaultName string) string {
+ structName := generic.Doc.Text()
+ result := getPrefixedLine(structName, `\+docName:\"(.*)\"`)
+ if result != "" {
+ return result
+ }
+ return defaultName
+}
+
+func getTypeDocs(generic *ast.GenDecl) string {
+ comment := ""
+ if generic.Doc != nil {
+ for _, line := range generic.Doc.List {
+ newLine := strings.TrimPrefix(line.Text, "//")
+ newLine = strings.TrimSpace(newLine)
+ if !strings.HasPrefix(newLine, "+kubebuilder") &&
+ !strings.HasPrefix(newLine, "+docName") {
+ comment += newLine + "\n"
+ }
+ }
+ }
+ return comment
+}
+
+func getLink(def string) string {
+ result := getPrefixedLine(def, `\+docLink:\"(.*)\"`)
+ if result != "" {
+ url := strings.Split(result, ",")
+ def = strings.Replace(def, fmt.Sprintf("+docLink:\"%s\"", result), fmt.Sprintf("[%s](%s)", url[0], url[1]), 1)
+ }
+ return def
+}
+
+func formatRequired(r bool) string {
+ if r {
+ return "Yes"
+ }
+ return "No"
+}
+
+func getValuesFromItem(item *ast.Field) (name, comment, def, required string) {
+ commentWithDefault := ""
+ if item.Doc != nil {
+ for _, line := range item.Doc.List {
+ newLine := strings.TrimPrefix(line.Text, "//")
+ newLine = strings.TrimSpace(newLine)
+ if !strings.HasPrefix(newLine, "+kubebuilder") {
+ commentWithDefault += newLine + " "
+ }
+ }
+ }
+ tag := item.Tag.Value
+ tagResult := getPrefixedLine(tag, `plugin:\"default:(.*)\"`)
+ nameResult := getPrefixedLine(tag, `json:\"([^,\"]*).*\"`)
+ required = formatRequired(!strings.Contains(getPrefixedLine(tag, `json:\"(.*)\"`), "omitempty"))
+ if tagResult != "" {
+ return nameResult, getLink(commentWithDefault), tagResult, required
+ }
+ result := getPrefixedLine(commentWithDefault, `\(default:(.*)\)`)
+ if result != "" {
+ ignore := fmt.Sprintf("(default:%s)", result)
+ comment = strings.Replace(commentWithDefault, ignore, "", 1)
+ return nameResult, comment, getLink(result), required
+ }
+
+ return nameResult, getLink(commentWithDefault), "-", required
+
+}
+
+func getDocumentParser(file plugin) *doc {
+ fileSet := token.NewFileSet()
+ node, err := parser.ParseFile(fileSet, file.SourcePath, nil, parser.ParseComments)
+ if err != nil {
+ log.Error(err, "Error!")
+ }
+ newDoc := &doc{
+ Name: file.Name,
+ RootNode: node,
+ Type: file.Type,
+ }
+ return newDoc
+}
+
+func getPlugins(PluginDirs []PluginDir) (plugins, error) {
+ var PluginList plugins
+ for _, p := range PluginDirs {
+ files, err := ioutil.ReadDir(p.Path)
+ if err != nil {
+ log.Error(err, err.Error())
+ return nil, err
+ }
+ for _, file := range files {
+ log.V(2).Info("fileListGenerator", "filename", "file")
+ fname := strings.Replace(file.Name(), ".go", "", 1)
+ if filepath.Ext(file.Name()) == ".go" && getPluginWhiteList(fname) {
+ fullPath := p.Path + file.Name()
+ filepath := fmt.Sprintf("./%s/%s.md", p.Type, fname)
+ PluginList = append(PluginList, plugin{
+ Name: fname, SourcePath: fullPath, DocumentationPath: filepath, Type: p.Type})
+ }
+ }
+ }
+
+ return PluginList, nil
+}
+
+func closeFile(f *os.File) {
+ err := f.Close()
+ if err != nil {
+ log.Error(err, "File Close Error: %s", err.Error())
+ }
+}
+
+func getPluginWhiteList(pluginName string) bool {
+ for _, p := range ignoredPluginsList {
+ r := regexp.MustCompile(p)
+ if r.MatchString(pluginName) {
+ log.Info("fileListGenerator", "ignored plugin", pluginName)
+ return false
+ }
+ }
+ return true
+}
diff --git a/cmd/manager/main.go b/cmd/manager/main.go
deleted file mode 100644
index 6c8b93829..000000000
--- a/cmd/manager/main.go
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
- "context"
- "flag"
- "fmt"
- "os"
- "runtime"
-
- "github.com/banzaicloud/logging-operator/pkg/apis"
- "github.com/banzaicloud/logging-operator/pkg/controller"
-
- "github.com/operator-framework/operator-sdk/pkg/k8sutil"
- "github.com/operator-framework/operator-sdk/pkg/leader"
- "github.com/operator-framework/operator-sdk/pkg/log/zap"
- "github.com/operator-framework/operator-sdk/pkg/metrics"
- sdkVersion "github.com/operator-framework/operator-sdk/version"
- "github.com/spf13/pflag"
- _ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
- "sigs.k8s.io/controller-runtime/pkg/client/config"
- "sigs.k8s.io/controller-runtime/pkg/manager"
- logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
- "sigs.k8s.io/controller-runtime/pkg/runtime/signals"
-)
-
-// Change below variables to serve metrics on different host or port.
-var (
- metricsHost = "0.0.0.0"
- metricsPort int32 = 8383
-)
-var log = logf.Log.WithName("cmd")
-
-func printVersion() {
- log.Info(fmt.Sprintf("Go Version: %s", runtime.Version()))
- log.Info(fmt.Sprintf("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH))
- log.Info(fmt.Sprintf("Version of operator-sdk: %v", sdkVersion.Version))
-}
-
-func main() {
- // Add the zap logger flag set to the CLI. The flag set must
- // be added before calling pflag.Parse().
- pflag.CommandLine.AddFlagSet(zap.FlagSet())
-
- // Add flags registered by imported packages (e.g. glog and
- // controller-runtime)
- pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
-
- pflag.Parse()
-
- // Use a zap logr.Logger implementation. If none of the zap
- // flags are configured (or if the zap flag set is not being
- // used), this defaults to a production zap logger.
- //
- // The logger instantiated here can be changed to any logger
- // implementing the logr.Logger interface. This logger will
- // be propagated through the whole operator, generating
- // uniform and structured logs.
- logf.SetLogger(zap.Logger())
-
- printVersion()
-
- namespace, err := k8sutil.GetWatchNamespace()
- if err != nil {
- log.Error(err, "Failed to get watch namespace")
- os.Exit(1)
- }
-
- // Get a config to talk to the apiserver
- cfg, err := config.GetConfig()
- if err != nil {
- log.Error(err, "")
- os.Exit(1)
- }
-
- ctx := context.TODO()
-
- // Become the leader before proceeding
- err = leader.Become(ctx, "logging-operator-lock")
- if err != nil {
- log.Error(err, "")
- os.Exit(1)
- }
-
- // Create a new Cmd to provide shared dependencies and start components
- mgr, err := manager.New(cfg, manager.Options{
- Namespace: namespace,
- MetricsBindAddress: fmt.Sprintf("%s:%d", metricsHost, metricsPort),
- })
- if err != nil {
- log.Error(err, "")
- os.Exit(1)
- }
-
- log.Info("Registering Components.")
-
- // Setup Scheme for all resources
- if err := apis.AddToScheme(mgr.GetScheme()); err != nil {
- log.Error(err, "")
- os.Exit(1)
- }
-
- // Setup all Controllers
- if err := controller.AddToManager(mgr); err != nil {
- log.Error(err, "")
- os.Exit(1)
- }
-
- // Create Service object to expose the metrics port.
- _, err = metrics.ExposeMetricsPort(ctx, metricsPort)
- if err != nil {
- log.Info(err.Error())
- }
-
- log.Info("Starting the Cmd.")
-
- // Start the Cmd
- if err := mgr.Start(signals.SetupSignalHandler()); err != nil {
- log.Error(err, "Manager exited non-zero")
- os.Exit(1)
- }
-}
diff --git a/config/certmanager/certificate.yaml b/config/certmanager/certificate.yaml
new file mode 100644
index 000000000..9d6bad1e1
--- /dev/null
+++ b/config/certmanager/certificate.yaml
@@ -0,0 +1,24 @@
+# The following manifests contain a self-signed issuer CR and a certificate CR.
+# More document can be found at https://docs.cert-manager.io
+apiVersion: certmanager.k8s.io/v1alpha1
+kind: Issuer
+metadata:
+ name: selfsigned-issuer
+ namespace: system
+spec:
+ selfSigned: {}
+---
+apiVersion: certmanager.k8s.io/v1alpha1
+kind: Certificate
+metadata:
+ name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml
+ namespace: system
+spec:
+ # $(SERVICENAME) and $(NAMESPACE) will be substituted by kustomize
+ commonName: $(SERVICENAME).$(NAMESPACE).svc
+ dnsNames:
+ - $(SERVICENAME).$(NAMESPACE).svc.cluster.local
+ issuerRef:
+ kind: Issuer
+ name: selfsigned-issuer
+ secretName: webhook-server-cert # this secret will not be prefixed, since it's not managed by kustomize
diff --git a/config/certmanager/kustomization.yaml b/config/certmanager/kustomization.yaml
new file mode 100644
index 000000000..8181bc3a2
--- /dev/null
+++ b/config/certmanager/kustomization.yaml
@@ -0,0 +1,26 @@
+resources:
+- certificate.yaml
+
+# the following config is for teaching kustomize how to do var substitution
+vars:
+- name: NAMESPACE # namespace of the service and the certificate CR
+ objref:
+ kind: Service
+ version: v1
+ name: webhook-service
+ fieldref:
+ fieldpath: metadata.namespace
+- name: CERTIFICATENAME
+ objref:
+ kind: Certificate
+ group: certmanager.k8s.io
+ version: v1alpha1
+ name: serving-cert # this name should match the one in certificate.yaml
+- name: SERVICENAME
+ objref:
+ kind: Service
+ version: v1
+ name: webhook-service
+
+configurations:
+- kustomizeconfig.yaml
diff --git a/config/certmanager/kustomizeconfig.yaml b/config/certmanager/kustomizeconfig.yaml
new file mode 100644
index 000000000..49e0b1e7a
--- /dev/null
+++ b/config/certmanager/kustomizeconfig.yaml
@@ -0,0 +1,16 @@
+# This configuration is for teaching kustomize how to update name ref and var substitution
+nameReference:
+- kind: Issuer
+ group: certmanager.k8s.io
+ fieldSpecs:
+ - kind: Certificate
+ group: certmanager.k8s.io
+ path: spec/issuerRef/name
+
+varReference:
+- kind: Certificate
+ group: certmanager.k8s.io
+ path: spec/commonName
+- kind: Certificate
+ group: certmanager.k8s.io
+ path: spec/dnsNames
diff --git a/config/crd/bases/logging.banzaicloud.io_clusterflows.yaml b/config/crd/bases/logging.banzaicloud.io_clusterflows.yaml
new file mode 100644
index 000000000..711c45034
--- /dev/null
+++ b/config/crd/bases/logging.banzaicloud.io_clusterflows.yaml
@@ -0,0 +1,140 @@
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ creationTimestamp: null
+ name: clusterflows.logging.banzaicloud.io
+spec:
+ group: logging.banzaicloud.io
+ names:
+ kind: ClusterFlow
+ listKind: ClusterFlowList
+ plural: clusterflows
+ singular: clusterflow
+ scope: ""
+ validation:
+ openAPIV3Schema:
+ description: ClusterFlow is the Schema for the clusterflows API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: Name of the logging cluster to be attached
+ properties:
+ filters:
+ items:
+ description: Filter definition for FlowSpec
+ properties:
+ parser:
+ description: https://docs.fluentd.org/filter/parser
+ properties:
+ emit_invalid_record_to_error:
+ description: 'Emit invalid record to @ERROR label. Invalid
+ cases are: key not exist, format is not matched, unexpected
+ error'
+ type: boolean
+ hash_value_fiel:
+ description: Store parsed values as a hash value in a field.
+ type: string
+ inject_key_prefix:
+ description: Store parsed values with specified key name prefix.
+ type: string
+ key_name:
+ description: Specify field name in the record to parse.
+ type: string
+ parsers:
+ items:
+ properties:
+ estimate_current_event:
+ description: If true, use Fluent::EventTime.now(current
+ time) as a timestamp when time_key is specified.
+ type: boolean
+ expression:
+ description: Regexp expression to evaluate
+ type: string
+ keep_time_key:
+ description: If true, keep time field in the record.
+ type: boolean
+ null_empty_string:
+ description: If true, empty string field is replaced
+ with nil
+ type: boolean
+ null_value_pattern:
+ description: ' Specify null value pattern.'
+ type: string
+ time_key:
+ description: Specify time field for event time. If the
+ event doesn't have this field, current time is used.
+ type: string
+ type:
+ description: 'Parse type: apache2, apache_error, nginx,
+ syslog, csv, tsv, ltsv, json, multiline, none'
+ type: string
+ type: object
+ type: array
+ remove_key_name_field:
+ description: Remove key_name field when parsing is succeeded
+ type: boolean
+ replace_invalid_sequence:
+ description: If true, invalid string is replaced with safe
+ characters and re-parse it.
+ type: boolean
+ reserve_data:
+ description: Keep original key-value pair in parsed result.
+ type: boolean
+ reserve_time:
+ description: Keep original event time in parsed result.
+ type: boolean
+ required:
+ - key_name
+ type: object
+ stdout:
+ type: object
+ tag_normaliser:
+ properties:
+ format:
+ description: Re-Tag log messages info at [github](https://github.com/banzaicloud/fluent-plugin-tag-normaliser)
+ type: string
+ type: object
+ type: object
+ type: array
+ loggingRef:
+ type: string
+ outputRefs:
+ items:
+ type: string
+ type: array
+ selectors:
+ additionalProperties:
+ type: string
+ type: object
+ required:
+ - outputRefs
+ - selectors
+ type: object
+ status:
+ description: FlowStatus defines the observed state of Flow
+ type: object
+ type: object
+ version: v1beta1
+ versions:
+ - name: v1beta1
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
diff --git a/config/crd/bases/logging.banzaicloud.io_clusteroutputs.yaml b/config/crd/bases/logging.banzaicloud.io_clusteroutputs.yaml
new file mode 100644
index 000000000..1fffbaaf0
--- /dev/null
+++ b/config/crd/bases/logging.banzaicloud.io_clusteroutputs.yaml
@@ -0,0 +1,2274 @@
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ creationTimestamp: null
+ name: clusteroutputs.logging.banzaicloud.io
+spec:
+ group: logging.banzaicloud.io
+ names:
+ kind: ClusterOutput
+ listKind: ClusterOutputList
+ plural: clusteroutputs
+ singular: clusteroutput
+ scope: ""
+ validation:
+ openAPIV3Schema:
+ description: ClusterOutput is the Schema for the clusteroutputs API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: ClusterOutputSpec contains Kubernetes spec for CLusterOutput
+ properties:
+ azurestorage:
+ properties:
+ auto_create_container:
+ description: 'Automatically create container if not exists(default:
+ true)'
+ type: boolean
+ azure_container:
+ description: Your azure storage container
+ type: string
+ azure_object_key_format:
+ description: 'Object key format (default: %{path}%{time_slice}_%{index}.%{file_extension})'
+ type: string
+ azure_storage_access_key:
+ description: Your azure storage access key
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ azure_storage_account:
+ description: Your azure storage account
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ azure_storage_type:
+ description: 'Azure storage type currently only "blob" supported
+ (default: blob)'
+ type: string
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ format:
+ description: 'Compat format type: out_file, json, ltsv (default:
+ out_file)'
+ type: string
+ path:
+ description: Path prefix of the files on Azure
+ type: string
+ store_as:
+ description: 'Store as: gzip, json, text, lzo, lzma2 (default: gzip)'
+ type: string
+ required:
+ - azure_container
+ - azure_storage_access_key
+ - azure_storage_account
+ type: object
+ elasticsearch:
+ description: Send your logs to Elasticsearch
+ properties:
+ application_name:
+ description: 'Specify the application name for the rollover index
+ to be created.(default: default)'
+ type: string
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ content_type:
+ description: 'With content_type application/x-ndjson, elasticsearch
+ plugin adds application/x-ndjson as Content-Type in payload. (default:
+ application/json)'
+ type: string
+ customize_template:
+ description: Specify the string and its value to be replaced in
+ form of hash. Can contain multiple key value pair that would be
+ replaced in the specified template_file. This setting only creates
+ template and to add rollover index please check the rollover_index
+ configuration.
+ type: string
+ deflector_alias:
+ description: Specify the deflector alias which would be assigned
+ to the rollover index created. This is useful in case of using
+ the Elasticsearch rollover API
+ type: string
+ fail_on_putting_template_retry_exceed:
+ description: 'Indicates whether to fail when max_retry_putting_template
+ is exceeded. If you have multiple output plugin, you could use
+ this property to do not fail on fluentd statup.(default: true)'
+ type: boolean
+ host:
+ description: You can specify Elasticsearch host by this parameter.
+ (default:localhost)
+ type: string
+ hosts:
+ description: You can specify multiple Elasticsearch hosts with separator
+ ",". If you specify hosts option, host and port options are ignored.
+ type: string
+ http_backend:
+ description: 'With http_backend typhoeus, elasticsearch plugin uses
+ typhoeus faraday http backend. Typhoeus can handle HTTP keepalive.
+ (default: excon)'
+ type: string
+ id_key:
+ description: https://github.com/uken/fluent-plugin-elasticsearch#id_key
+ type: string
+ include_index_in_url:
+ description: With this option set to true, Fluentd manifests the
+ index name in the request URL (rather than in the request body).
+ You can use this option to enforce an URL-based access control.
+ type: boolean
+ include_tag_key:
+ description: 'This will add the Fluentd tag in the JSON record.(default:
+ false)'
+ type: boolean
+ include_timestamp:
+ description: Adds a @timestamp field to the log, following all settings
+ logstash_format does, except without the restrictions on index_name.
+ This allows one to log to an alias in Elasticsearch and utilize
+ the rollover API.
+ type: boolean
+ index_date_pattern:
+ description: 'Specify this to override the index date pattern for
+ creating a rollover index.(default: now/d)'
+ type: string
+ index_prefix:
+ description: Specify the index prefix for the rollover index to
+ be created.
+ type: string
+ logstash_dateformat:
+ description: 'Set the Logstash date format.(default: %Y.%m.%d)'
+ type: string
+ logstash_format:
+ description: 'Enable Logstash log format.(default: false)'
+ type: boolean
+ logstash_prefix:
+ description: 'Set the Logstash prefix.(default: true)'
+ type: string
+ logstash_prefix_separator:
+ description: 'Set the Logstash prefix separator.(default: -)'
+ type: string
+ max_retry_get_es_version:
+ description: 'You can specify times of retry obtaining Elasticsearch
+ version.(default: 15)'
+ type: string
+ max_retry_putting_template:
+ description: 'You can specify times of retry putting template.(default:
+ 10)'
+ type: string
+ password:
+ description: Password for HTTP Basic authentication.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ path:
+ description: Path for HTTP Basic authentication.
+ type: string
+ pipeline:
+ description: This param is to set a pipeline id of your elasticsearch
+ to be added into the request, you can configure ingest node.
+ type: string
+ port:
+ description: 'You can specify Elasticsearch port by this parameter.(default:
+ 9200)'
+ type: integer
+ prefer_oj_serializer:
+ description: 'With default behavior, Elasticsearch client uses Yajl
+ as JSON encoder/decoder. Oj is the alternative high performance
+ JSON encoder/decoder. When this parameter sets as true, Elasticsearch
+ client uses Oj as JSON encoder/decoder. (default: fqlse)'
+ type: boolean
+ reconnect_on_error:
+ description: 'Indicates that the plugin should reset connection
+ on any error (reconnect on next send). By default it will reconnect
+ only on "host unreachable exceptions". We recommended to set this
+ true in the presence of elasticsearch shield.(default: false)'
+ type: boolean
+ reload_connections:
+ description: 'You can tune how the elasticsearch-transport host
+ reloading feature works.(default: true)'
+ type: boolean
+ reload_on_failure:
+ description: 'Indicates that the elasticsearch-transport will try
+ to reload the nodes addresses if there is a failure while making
+ the request, this can be useful to quickly remove a dead node
+ from the list of addresses.(default: false)'
+ type: boolean
+ remove_keys_on_update:
+ description: Remove keys on update will not update the configured
+ keys in elasticsearch when a record is being updated. This setting
+ only has any effect if the write operation is update or upsert.
+ type: string
+ remove_keys_on_update_key:
+ description: This setting allows remove_keys_on_update to be configured
+ with a key in each record, in much the same way as target_index_key
+ works.
+ type: string
+ request_timeout:
+ description: 'You can specify HTTP request timeout.(default: 5s)'
+ type: string
+ resurrect_after:
+ description: 'You can set in the elasticsearch-transport how often
+ dead connections from the elasticsearch-transport''s pool will
+ be resurrected.(default: 60s)'
+ type: string
+ retry_tag:
+ description: This setting allows custom routing of messages in response
+ to bulk request failures. The default behavior is to emit failed
+ records using the same tag that was provided.
+ type: string
+ rollover_index:
+ description: 'Specify this as true when an index with rollover capability
+ needs to be created.(default: false) https://github.com/uken/fluent-plugin-elasticsearch#rollover_index'
+ type: boolean
+ routing_key:
+ description: Similar to parent_key config, will add _routing into
+ elasticsearch command if routing_key is set and the field does
+ exist in input event.
+ type: string
+ scheme:
+ description: 'Connection scheme (default: http)'
+ type: string
+ ssl_verify:
+ description: 'Skip ssl verification (default: true)'
+ type: boolean
+ ssl_version:
+ description: If you want to configure SSL/TLS version, you can specify
+ ssl_version parameter. [SSLv23, TLSv1, TLSv1_1, TLSv1_2]
+ type: string
+ tag_key:
+ description: 'This will add the Fluentd tag in the JSON record.(default:
+ tag)'
+ type: string
+ target_index_key:
+ description: Tell this plugin to find the index name to write to
+ in the record under this key in preference to other mechanisms.
+ Key can be specified as path to nested record using dot ('.')
+ as a separator. https://github.com/uken/fluent-plugin-elasticsearch#target_index_key
+ type: string
+ target_type_key:
+ description: 'Similar to target_index_key config, find the type
+ name to write to in the record under this key (or nested record).
+ If key not found in record - fallback to type_name.(default: true)'
+ type: string
+ template_file:
+ description: The path to the file containing the template to install.
+ type: string
+ template_name:
+ description: The name of the template to define. If a template by
+ the name given is already present, it will be left unchanged,
+ unless template_overwrite is set, in which case the template will
+ be updated.
+ type: string
+ template_overwrite:
+ description: 'Always update the template, even if it already exists.(default:
+ false)'
+ type: boolean
+ templates:
+ description: Specify index templates in form of hash. Can contain
+ multiple templates.
+ type: string
+ time_key:
+ description: By default, when inserting records in Logstash format,
+ @timestamp is dynamically created with the time at log ingestion.
+ If you'd like to use a custom time, include an @timestamp with
+ your record.
+ type: string
+ time_key_format:
+ description: The format of the time stamp field (@timestamp or what
+ you specify with time_key). This parameter only has an effect
+ when logstash_format is true as it only affects the name of the
+ index we write to.
+ type: string
+ time_parse_error_tag:
+ description: With logstash_format true, elasticsearch plugin parses
+ timestamp field for generating index name. If the record has invalid
+ timestamp value, this plugin emits an error event to @ERROR label
+ with time_parse_error_tag configured tag.
+ type: string
+ time_precision:
+ description: Should the record not include a time_key, define the
+ degree of sub-second time precision to preserve from the time
+ portion of the routed event.
+ type: string
+ user:
+ description: User for HTTP Basic authentication. This plugin will
+ escape required URL encoded characters within %{} placeholders.
+ e.g. %{demo+}
+ type: string
+ utc_index:
+ description: 'By default, the records inserted into index logstash-YYMMDD
+ with UTC (Coordinated Universal Time). This option allows to use
+ local time if you describe utc_index to false.(default: true)'
+ type: boolean
+ with_transporter_log:
+ description: 'This is debugging purpose option to enable to obtain
+ transporter layer log. (default: false)'
+ type: boolean
+ write_operation:
+ description: 'The write_operation can be any of: (index,create,update,upsert)(default:
+ index)'
+ type: string
+ required:
+ - ssl_verify
+ type: object
+ enabledNamespaces:
+ items:
+ type: string
+ type: array
+ forward:
+ properties:
+ ack_response_timeout:
+ description: 'This option is used when require_ack_response is true.
+ This default value is based on popular tcp_syn_retries. (default:
+ 190)'
+ type: integer
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ connect_timeout:
+ description: The timeout time for socket connect. When the connection
+ timed out during establishment, Errno::ETIMEDOUT is raised.
+ type: integer
+ dns_round_robin:
+ description: Enable client-side DNS round robin. Uniform randomly
+ pick an IP address to send data when a hostname has several IP
+ addresses. `heartbeat_type udp` is not available with `dns_round_robin
+ true`. Use `heartbeat_type tcp` or `heartbeat_type none`.
+ type: boolean
+ expire_dns_cache:
+ description: 'Set TTL to expire DNS cache in seconds. Set 0 not
+ to use DNS Cache. (defult: 0)'
+ type: integer
+ hard_timeout:
+ description: 'The hard timeout used to detect server failure. The
+ default value is equal to the send_timeout parameter. (default:
+ 60)'
+ type: integer
+ heartbeat_interval:
+ description: 'The interval of the heartbeat packer. (default: 1)'
+ type: integer
+ heartbeat_type:
+ description: The transport protocol to use for heartbeats. Set "none"
+ to disable heartbeat. [transport, tcp, udp, none]
+ type: string
+ ignore_network_errors_at_startup:
+ description: Ignore DNS resolution and errors at startup time.
+ type: boolean
+ keepalive:
+ description: 'Enable keepalive connection. (default: false)'
+ type: boolean
+ keepalive_timeout:
+ description: 'Expired time of keepalive. Default value is nil, which
+ means to keep connection as long as possible. (default: 0)'
+ type: integer
+ phi_failure_detector:
+ description: 'Use the "Phi accrual failure detector" to detect server
+ failure. (default: true)'
+ type: boolean
+ phi_threshold:
+ description: 'The threshold parameter used to detect server faults.
+ (default: 16) `phi_threshold` is deeply related to `heartbeat_interval`.
+ If you are using longer `heartbeat_interval`, please use the larger
+ `phi_threshold`. Otherwise you will see frequent detachments of
+ destination servers. The default value 16 is tuned for `heartbeat_interval`
+ 1s.'
+ type: integer
+ recover_wait:
+ description: 'The wait time before accepting a server fault recovery.
+ (default: 10)'
+ type: integer
+ require_ack_response:
+ description: Change the protocol to at-least-once. The plugin waits
+ the ack from destination's in_forward plugin.
+ type: boolean
+ security:
+ properties:
+ allow_anonymous_source:
+ description: Allow anonymous source. sections are required
+ if disabled.
+ type: boolean
+ self_hostname:
+ description: Hostname
+ type: string
+ shared_key:
+ description: Shared key for authentication.
+ type: string
+ user_auth:
+ description: If true, use user based authentication.
+ type: boolean
+ required:
+ - self_hostname
+ - shared_key
+ type: object
+ send_timeout:
+ description: 'The timeout time when sending event logs. (default:
+ 60)'
+ type: integer
+ servers:
+ description: Server definitions at least one is required
+ items:
+ description: server
+ properties:
+ host:
+ description: The IP address or host name of the server.
+ type: string
+ name:
+ description: The name of the server. Used for logging and
+ certificate verification in TLS transport (when host is
+ address).
+ type: string
+ password:
+ description: The password for authentication.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ port:
+ description: 'The port number of the host. Note that both
+ TCP packets (event stream) and UDP packets (heartbeat message)
+ are sent to this port. (default: 24224)'
+ type: integer
+ shared_key:
+ description: The shared key per server.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ standby:
+ description: Marks a node as the standby node for an Active-Standby
+ model between Fluentd nodes. When an active node goes down,
+ the standby node is promoted to an active node. The standby
+ node is not used by the out_forward plugin until then.
+ type: boolean
+ username:
+ description: The username for authentication.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ weight:
+ description: 'The load balancing weight. If the weight of
+ one server is 20 and the weight of the other server is 30,
+ events are sent in a 2:3 ratio. (default: 60).'
+ type: integer
+ required:
+ - host
+ type: object
+ type: array
+ tls_allow_self_signed_cert:
+ description: 'Allow self signed certificates or not. (default: false)'
+ type: boolean
+ tls_cert_logical_store_name:
+ description: The certificate logical store name on Windows system
+ certstore. This parameter is for Windows only.
+ type: string
+ tls_cert_path:
+ description: The additional CA certificate path for TLS.
+ type: string
+ tls_cert_thumbprint:
+ description: The certificate thumbprint for searching from Windows
+ system certstore This parameter is for Windows only.
+ type: string
+ tls_cert_use_enterprise_store:
+ description: Enable to use certificate enterprise store on Windows
+ system certstore. This parameter is for Windows only.
+ type: boolean
+ tls_ciphers:
+ description: 'The cipher configuration of TLS transport. (default:
+ ALL:!aNULL:!eNULL:!SSLv2)'
+ type: string
+ tls_client_cert_path:
+ description: The client certificate path for TLS
+ type: string
+ tls_client_private_key_passphrase:
+ description: The client private key passphrase for TLS.
+ type: string
+ tls_client_private_key_path:
+ description: The client private key path for TLS.
+ type: string
+ tls_insecure_mode:
+ description: 'Skip all verification of certificates or not. (default:
+ false)'
+ type: boolean
+ tls_verify_hostname:
+ description: 'Verify hostname of servers and certificates or not
+ in TLS transport. (default: true)'
+ type: boolean
+ tls_version:
+ description: 'The default version of TLS transport. [TLSv1_1, TLSv1_2]
+ (default: TLSv1_2)'
+ type: string
+ verify_connection_at_startup:
+ description: 'Verify that a connection can be made with one of out_forward
+ nodes at the time of startup. (default: false)'
+ type: boolean
+ required:
+ - servers
+ type: object
+ gcs:
+ properties:
+ acl:
+ description: 'Permission for the object in GCS: auth_read owner_full
+ owner_read private project_private public_read'
+ type: string
+ auto_create_bucket:
+ description: 'Create GCS bucket if it does not exists (default:
+ true)'
+ type: boolean
+ bucket:
+ description: Name of a GCS bucket
+ type: string
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ client_retries:
+ description: Number of times to retry requests on server error
+ type: integer
+ client_timeout:
+ description: Default timeout to use in requests
+ type: integer
+ credentials_json:
+ description: GCS service account credentials in JSON format
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ encryption_key:
+ description: Customer-supplied, AES-256 encryption key
+ type: string
+ format:
+ properties:
+ type:
+ description: 'Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value
+ (default: json)'
+ enum:
+ - out_file
+ - json
+ - ltsv
+ - csv
+ - msgpack
+ - hash
+ - single_value
+ type: string
+ type: object
+ hex_random_length:
+ description: 'Max length of `%{hex_random}` placeholder(4-16) (default:
+ 4)'
+ type: integer
+ keyfile:
+ description: Path of GCS service account credentials JSON file
+ type: string
+ object_key_format:
+ description: 'Format of GCS object keys (default: %{path}%{time_slice}_%{index}.%{file_extension})'
+ type: string
+ object_metadata:
+ description: User provided web-safe keys and arbitrary string values
+ that will returned with requests for the file as "x-goog-meta-"
+ response headers.
+ items:
+ properties:
+ key:
+ description: Key
+ type: string
+ value:
+ description: Value
+ type: string
+ required:
+ - key
+ - value
+ type: object
+ type: array
+ overwrite:
+ description: 'Overwrite already existing path (default: false)'
+ type: boolean
+ path:
+ description: Path prefix of the files on GCS
+ type: string
+ project:
+ description: Project identifier for GCS
+ type: string
+ storage_class:
+ description: 'Storage class of the file: dra nearline coldline multi_regional
+ regional standard'
+ type: string
+ store_as:
+ description: 'Archive format on GCS: gzip json text (default: gzip)'
+ type: string
+ transcoding:
+ description: Enable the decompressive form of transcoding
+ type: boolean
+ required:
+ - bucket
+ - project
+ type: object
+ loggingRef:
+ type: string
+ loki:
+ description: Fluentd output plugin to ship logs to a Loki server.
+ properties:
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ extra_labels:
+ description: 'Set of labels to include with every Loki stream.(default:
+ nil)'
+ type: boolean
+ password:
+ description: Specify password if the Loki server requires authentication.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ tenant:
+ description: Loki is a multi-tenant log storage platform and all
+ requests sent must include a tenant.
+ type: string
+ url:
+ description: The url of the Loki server to send logs to. (default:https://logs-us-west1.grafana.net)
+ type: string
+ username:
+ description: Specify a username if the Loki server requires authentication.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ type: object
+ nullout:
+ type: object
+ oss:
+ properties:
+ aaccess_key_secret:
+ description: Your access secret key
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ access_key_id:
+ description: Your access key id
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ auto_create_bucket:
+ description: 'desc ''Create OSS bucket if it does not exists (default:
+ false)'
+ type: boolean
+ bucket:
+ description: Your bucket name
+ type: string
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ check_bucket:
+ description: 'Check bucket if exists or not (default: true)'
+ type: boolean
+ check_object:
+ description: 'Check object before creation (default: true)'
+ type: boolean
+ download_crc_enable:
+ description: 'Download crc enabled (default: true)'
+ type: boolean
+ endpoint:
+ description: OSS endpoint to connect to'
+ type: string
+ format:
+ properties:
+ type:
+ description: 'Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value
+ (default: json)'
+ enum:
+ - out_file
+ - json
+ - ltsv
+ - csv
+ - msgpack
+ - hash
+ - single_value
+ type: string
+ type: object
+ hex_random_length:
+ description: 'The length of `%{hex_random}` placeholder(4-16) (default:
+ 4)'
+ type: integer
+ index_format:
+ description: '`sprintf` format for `%{index}` (default: %d)'
+ type: string
+ key_format:
+ description: 'The format of OSS object keys (default: %{path}/%{time_slice}_%{index}_%{thread_id}.%{file_extension})'
+ type: string
+ open_timeout:
+ description: 'Timeout for open connections (default: 10)'
+ type: integer
+ oss_sdk_log_dir:
+ description: 'OSS SDK log directory (default: /var/log/td-agent)'
+ type: string
+ overwrite:
+ description: 'Overwrite already existing path (default: false)'
+ type: boolean
+ path:
+ description: 'Path prefix of the files on OSS (default: fluent/logs)'
+ type: string
+ read_timeout:
+ description: 'Timeout for read response (default: 120)'
+ type: integer
+ store_as:
+ description: 'Archive format on OSS: gzip, json, text, lzo, lzma2
+ (default: gzip)'
+ type: string
+ upload_crc_enable:
+ description: 'Upload crc enabled (default: true)'
+ type: boolean
+ warn_for_delay:
+ description: Given a threshold to treat events as delay, output
+ warning logs if delayed events were put into OSS
+ type: string
+ required:
+ - aaccess_key_secret
+ - access_key_id
+ - bucket
+ - endpoint
+ type: object
+ s3:
+ properties:
+ acl:
+ description: Permission for the object in S3
+ type: string
+ assume_role_credentials:
+ description: assume_role_credentials
+ properties:
+ duration_seconds:
+ description: The duration, in seconds, of the role session (900-3600)
+ type: string
+ external_id:
+ description: A unique identifier that is used by third parties
+ when assuming roles in their customers' accounts.
+ type: string
+ policy:
+ description: An IAM policy in JSON format
+ type: string
+ role_arn:
+ description: The Amazon Resource Name (ARN) of the role to assume
+ type: string
+ role_session_name:
+ description: An identifier for the assumed role session
+ type: string
+ required:
+ - role_arn
+ - role_session_name
+ type: object
+ auto_create_bucket:
+ description: Create S3 bucket if it does not exists
+ type: string
+ aws_iam_retries:
+ description: The number of attempts to load instance profile credentials
+ from the EC2 metadata service using IAM role
+ type: string
+ aws_key_id:
+ description: AWS access key id
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ aws_sec_key:
+ description: AWS secret key.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ check_apikey_on_start:
+ description: Check AWS key on start
+ type: string
+ check_bucket:
+ description: Check bucket if exists or not
+ type: string
+ check_object:
+ description: Check object before creation
+ type: string
+ compute_checksums:
+ description: AWS SDK uses MD5 for API request/response by default
+ type: string
+ enable_transfer_acceleration:
+ description: 'If true, S3 Transfer Acceleration will be enabled
+ for uploads. IMPORTANT: You must first enable this feature on
+ your destination S3 bucket'
+ type: string
+ force_path_style:
+ description: If true, the bucket name is always left in the request
+ URI and never moved to the host as a sub-domain
+ type: string
+ format:
+ properties:
+ type:
+ description: 'Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value
+ (default: json)'
+ enum:
+ - out_file
+ - json
+ - ltsv
+ - csv
+ - msgpack
+ - hash
+ - single_value
+ type: string
+ type: object
+ grant_full_control:
+ description: Allows grantee READ, READ_ACP, and WRITE_ACP permissions
+ on the object
+ type: string
+ grant_read:
+ description: Allows grantee to read the object data and its metadata
+ type: string
+ grant_read_acp:
+ description: Allows grantee to read the object ACL
+ type: string
+ grant_write_acp:
+ description: Allows grantee to write the ACL for the applicable
+ object
+ type: string
+ hex_random_length:
+ description: The length of `%{hex_random}` placeholder(4-16)
+ type: string
+ index_format:
+ description: '`sprintf` format for `%{index}`'
+ type: string
+ instance_profile_credentials:
+ description: instance_profile_credentials
+ properties:
+ http_open_timeout:
+ description: Number of seconds to wait for the connection to
+ open
+ type: string
+ http_read_timeout:
+ description: Number of seconds to wait for one block to be read
+ type: string
+ ip_address:
+ description: IP address (default:169.254.169.254)
+ type: string
+ port:
+ description: Port number (default:80)
+ type: string
+ retries:
+ description: Number of times to retry when retrieving credentials
+ type: string
+ type: object
+ overwrite:
+ description: Overwrite already existing path
+ type: string
+ path:
+ description: Path prefix of the files on S3
+ type: string
+ proxy_uri:
+ description: URI of proxy environment
+ type: string
+ s3_bucket:
+ description: S3 bucket name
+ type: string
+ s3_endpoint:
+ description: Custom S3 endpoint (like minio)
+ type: string
+ s3_metadata:
+ description: Arbitrary S3 metadata headers to set for the object
+ type: string
+ s3_object_key_format:
+ description: 'The format of S3 object keys (default: %{path}%{time_slice}_%{index}.%{file_extension})'
+ type: string
+ s3_region:
+ description: S3 region name
+ type: string
+ shared_credentials:
+ description: shared_credentials
+ properties:
+ path:
+ description: 'Path to the shared file. (default: $HOME/.aws/credentials)'
+ type: string
+ profile_name:
+ description: Profile name. Default to 'default' or ENV['AWS_PROFILE']
+ type: string
+ type: object
+ signature_version:
+ description: Signature version for API Request (s3,v4)
+ type: string
+ sse_customer_algorithm:
+ description: Specifies the algorithm to use to when encrypting the
+ object
+ type: string
+ sse_customer_key:
+ description: Specifies the customer-provided encryption key for
+ Amazon S3 to use in encrypting data
+ type: string
+ sse_customer_key_md5:
+ description: Specifies the 128-bit MD5 digest of the encryption
+ key according to RFC 1321
+ type: string
+ ssekms_key_id:
+ description: Specifies the AWS KMS key ID to use for object encryption
+ type: string
+ ssl_verify_peer:
+ description: If false, the certificate of endpoint will not be verified
+ type: string
+ storage_class:
+ description: The type of storage to use for the object(STANDARD,REDUCED_REDUNDANCY,STANDARD_IA)
+ type: string
+ store_as:
+ description: Archive format on S3
+ type: string
+ use_bundled_cert:
+ description: Use aws-sdk-ruby bundled cert
+ type: string
+ use_server_side_encryption:
+ description: The Server-side encryption algorithm used when storing
+ this object in S3 (AES256, aws:kms)
+ type: string
+ warn_for_delay:
+ description: Given a threshold to treat events as delay, output
+ warning logs if delayed events were put into s3
+ type: string
+ required:
+ - s3_bucket
+ type: object
+ sumologic:
+ properties:
+ add_timestamp:
+ description: 'Add timestamp (or timestamp_key) field to logs before
+ sending to sumologic (default: true)'
+ type: boolean
+ data_type:
+ description: 'The type of data that will be sent to Sumo Logic,
+ either logs or metrics (default: logs)'
+ type: string
+ disable_cookies:
+ description: 'Option to disable cookies on the HTTP Client. (default:
+ false)'
+ type: boolean
+ endpoint:
+ description: SumoLogic HTTP Collector URL
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ log_format:
+ description: 'Format to post logs into Sumo. (default: json)'
+ type: string
+ log_key:
+ description: 'Used to specify the key when merging json or sending
+ logs in text format (default: message)'
+ type: string
+ metric_data_format:
+ description: 'The format of metrics you will be sending, either
+ graphite or carbon2 or prometheus (default: graphite)'
+ type: string
+ open_timeout:
+ description: 'Set timeout seconds to wait until connection is opened.
+ (default: 60)'
+ type: integer
+ proxy_uri:
+ description: Add the uri of the proxy environment if present.
+ type: string
+ source_category:
+ description: 'Set _sourceCategory metadata field within SumoLogic
+ (default: nil)'
+ type: string
+ source_host:
+ description: 'Set _sourceHost metadata field within SumoLogic (default:
+ nil)'
+ type: string
+ source_name:
+ description: Set _sourceName metadata field within SumoLogic - overrides
+ source_name_key (default is nil)
+ type: string
+ source_name_key:
+ description: 'Set as source::path_key''s value so that the source_name
+ can be extracted from Fluentd''s buffer (default: source_name)'
+ type: string
+ timestamp_key:
+ description: 'Field name when add_timestamp is on (default: timestamp)'
+ type: string
+ verify_ssl:
+ description: 'Verify ssl certificate. (default: true)'
+ type: boolean
+ required:
+ - endpoint
+ - source_name
+ type: object
+ type: object
+ status:
+ description: OutputStatus defines the observed state of Output
+ type: object
+ required:
+ - spec
+ type: object
+ version: v1beta1
+ versions:
+ - name: v1beta1
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
diff --git a/config/crd/bases/logging.banzaicloud.io_flows.yaml b/config/crd/bases/logging.banzaicloud.io_flows.yaml
new file mode 100644
index 000000000..1a5dc7a83
--- /dev/null
+++ b/config/crd/bases/logging.banzaicloud.io_flows.yaml
@@ -0,0 +1,145 @@
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ creationTimestamp: null
+ name: flows.logging.banzaicloud.io
+spec:
+ additionalPrinterColumns:
+ - JSONPath: .spec.loggingRef
+ name: Logging
+ type: string
+ group: logging.banzaicloud.io
+ names:
+ kind: Flow
+ listKind: FlowList
+ plural: flows
+ singular: flow
+ scope: ""
+ subresources: {}
+ validation:
+ openAPIV3Schema:
+ description: Flow Kubernetes object
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: FlowSpec is the Kubernetes spec for Flows
+ properties:
+ filters:
+ items:
+ description: Filter definition for FlowSpec
+ properties:
+ parser:
+ description: https://docs.fluentd.org/filter/parser
+ properties:
+ emit_invalid_record_to_error:
+ description: 'Emit invalid record to @ERROR label. Invalid
+ cases are: key not exist, format is not matched, unexpected
+ error'
+ type: boolean
+ hash_value_fiel:
+ description: Store parsed values as a hash value in a field.
+ type: string
+ inject_key_prefix:
+ description: Store parsed values with specified key name prefix.
+ type: string
+ key_name:
+ description: Specify field name in the record to parse.
+ type: string
+ parsers:
+ items:
+ properties:
+ estimate_current_event:
+ description: If true, use Fluent::EventTime.now(current
+ time) as a timestamp when time_key is specified.
+ type: boolean
+ expression:
+ description: Regexp expression to evaluate
+ type: string
+ keep_time_key:
+ description: If true, keep time field in the record.
+ type: boolean
+ null_empty_string:
+ description: If true, empty string field is replaced
+ with nil
+ type: boolean
+ null_value_pattern:
+ description: ' Specify null value pattern.'
+ type: string
+ time_key:
+ description: Specify time field for event time. If the
+ event doesn't have this field, current time is used.
+ type: string
+ type:
+ description: 'Parse type: apache2, apache_error, nginx,
+ syslog, csv, tsv, ltsv, json, multiline, none'
+ type: string
+ type: object
+ type: array
+ remove_key_name_field:
+ description: Remove key_name field when parsing is succeeded
+ type: boolean
+ replace_invalid_sequence:
+ description: If true, invalid string is replaced with safe
+ characters and re-parse it.
+ type: boolean
+ reserve_data:
+ description: Keep original key-value pair in parsed result.
+ type: boolean
+ reserve_time:
+ description: Keep original event time in parsed result.
+ type: boolean
+ required:
+ - key_name
+ type: object
+ stdout:
+ type: object
+ tag_normaliser:
+ properties:
+ format:
+ description: Re-Tag log messages info at [github](https://github.com/banzaicloud/fluent-plugin-tag-normaliser)
+ type: string
+ type: object
+ type: object
+ type: array
+ loggingRef:
+ type: string
+ outputRefs:
+ items:
+ type: string
+ type: array
+ selectors:
+ additionalProperties:
+ type: string
+ type: object
+ required:
+ - outputRefs
+ - selectors
+ type: object
+ status:
+ description: FlowStatus defines the observed state of Flow
+ type: object
+ type: object
+ version: v1beta1
+ versions:
+ - name: v1beta1
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
diff --git a/config/crd/bases/logging.banzaicloud.io_loggings.yaml b/config/crd/bases/logging.banzaicloud.io_loggings.yaml
new file mode 100644
index 000000000..cedc5432f
--- /dev/null
+++ b/config/crd/bases/logging.banzaicloud.io_loggings.yaml
@@ -0,0 +1,416 @@
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ creationTimestamp: null
+ name: loggings.logging.banzaicloud.io
+spec:
+ group: logging.banzaicloud.io
+ names:
+ kind: Logging
+ listKind: LoggingList
+ plural: loggings
+ singular: logging
+ scope: Cluster
+ subresources:
+ status: {}
+ validation:
+ openAPIV3Schema:
+ description: Logging is the Schema for the loggings API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: LoggingSpec defines the desired state of Logging
+ properties:
+ controlNamespace:
+ type: string
+ flowConfigCheckDisabled:
+ type: boolean
+ flowConfigOverride:
+ type: string
+ fluentbit:
+ description: FluentbitSpec defines the desired state of Fluentbit
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ image:
+ description: ImageSpec struct hold information about image specification
+ properties:
+ pullPolicy:
+ type: string
+ repository:
+ type: string
+ tag:
+ type: string
+ required:
+ - pullPolicy
+ - repository
+ - tag
+ type: object
+ parser:
+ type: string
+ resources:
+ description: ResourceRequirements describes the compute resource
+ requirements.
+ properties:
+ limits:
+ additionalProperties:
+ type: string
+ description: 'Limits describes the maximum amount of compute
+ resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ requests:
+ additionalProperties:
+ type: string
+ description: 'Requests describes the minimum amount of compute
+ resources required. If Requests is omitted for a container,
+ it defaults to Limits if that is explicitly specified, otherwise
+ to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ type: object
+ targetHost:
+ type: string
+ targetPort:
+ format: int32
+ type: integer
+ tls:
+ description: FluentbitTLS defines the TLS configs
+ properties:
+ enabled:
+ type: boolean
+ secretName:
+ type: string
+ sharedKey:
+ type: string
+ required:
+ - enabled
+ - secretName
+ type: object
+ tolerations:
+ items:
+ description: The pod this Toleration is attached to tolerates
+ any taint that matches the triple using the
+ matching operator .
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match. Empty
+ means match all taint effects. When specified, allowed values
+ are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration applies
+ to. Empty means match all taint keys. If the key is empty,
+ operator must be Exists; this combination means to match
+ all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship to the
+ value. Valid operators are Exists and Equal. Defaults to
+ Equal. Exists is equivalent to wildcard for value, so that
+ a pod can tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period of time
+ the toleration (which must be of effect NoExecute, otherwise
+ this field is ignored) tolerates the taint. By default,
+ it is not set, which means tolerate the taint forever (do
+ not evict). Zero and negative values will be treated as
+ 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration matches
+ to. If the operator is Exists, the value should be empty,
+ otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ type: object
+ fluentd:
+ description: FluentdSpec defines the desired state of Fluentd
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ configReloaderImage:
+ description: ImageSpec struct hold information about image specification
+ properties:
+ pullPolicy:
+ type: string
+ repository:
+ type: string
+ tag:
+ type: string
+ required:
+ - pullPolicy
+ - repository
+ - tag
+ type: object
+ disablePvc:
+ type: boolean
+ fluentdPvcSpec:
+ description: PersistentVolumeClaimSpec describes the common attributes
+ of storage devices and allows a Source for provider-specific attributes
+ properties:
+ accessModes:
+ description: 'AccessModes contains the desired access modes
+ the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1'
+ items:
+ type: string
+ type: array
+ dataSource:
+ description: This field requires the VolumeSnapshotDataSource
+ alpha feature gate to be enabled and currently VolumeSnapshot
+ is the only supported data source. If the provisioner can
+ support VolumeSnapshot data source, it will create a new volume
+ and data will be restored to the volume at the same time.
+ If the provisioner does not support VolumeSnapshot data source,
+ volume will not be created and the failure will be reported
+ as an event. In the future, we plan to support more data source
+ types and the behavior of the provisioner may change.
+ properties:
+ apiGroup:
+ description: APIGroup is the group for the resource being
+ referenced. If APIGroup is not specified, the specified
+ Kind must be in the core API group. For any other third-party
+ types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ description: 'Resources represents the minimum resources the
+ volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources'
+ properties:
+ limits:
+ additionalProperties:
+ type: string
+ description: 'Limits describes the maximum amount of compute
+ resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ requests:
+ additionalProperties:
+ type: string
+ description: 'Requests describes the minimum amount of compute
+ resources required. If Requests is omitted for a container,
+ it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. More info:
+ https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ type: object
+ selector:
+ description: A label query over volumes to consider for binding.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector
+ that contains values, a key, and an operator that relates
+ the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In, NotIn,
+ Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values.
+ If the operator is In or NotIn, the values array
+ must be non-empty. If the operator is Exists or
+ DoesNotExist, the values array must be empty. This
+ array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs.
+ A single {key,value} in the matchLabels map is equivalent
+ to an element of matchExpressions, whose key field is
+ "key", the operator is "In", and the values array contains
+ only "value". The requirements are ANDed.
+ type: object
+ type: object
+ storageClassName:
+ description: 'Name of the StorageClass required by the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1'
+ type: string
+ volumeMode:
+ description: volumeMode defines what type of volume is required
+ by the claim. Value of Filesystem is implied when not included
+ in claim spec. This is a beta feature.
+ type: string
+ volumeName:
+ description: VolumeName is the binding reference to the PersistentVolume
+ backing this claim.
+ type: string
+ type: object
+ image:
+ description: ImageSpec struct hold information about image specification
+ properties:
+ pullPolicy:
+ type: string
+ repository:
+ type: string
+ tag:
+ type: string
+ required:
+ - pullPolicy
+ - repository
+ - tag
+ type: object
+ nodeSelector:
+ additionalProperties:
+ type: string
+ type: object
+ port:
+ format: int32
+ type: integer
+ resources:
+ description: ResourceRequirements describes the compute resource
+ requirements.
+ properties:
+ limits:
+ additionalProperties:
+ type: string
+ description: 'Limits describes the maximum amount of compute
+ resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ requests:
+ additionalProperties:
+ type: string
+ description: 'Requests describes the minimum amount of compute
+ resources required. If Requests is omitted for a container,
+ it defaults to Limits if that is explicitly specified, otherwise
+ to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ type: object
+ tls:
+ description: FluentdTLS defines the TLS configs
+ properties:
+ enabled:
+ type: boolean
+ secretName:
+ type: string
+ sharedKey:
+ type: string
+ required:
+ - enabled
+ - secretName
+ type: object
+ tolerations:
+ items:
+ description: The pod this Toleration is attached to tolerates
+ any taint that matches the triple using the
+ matching operator .
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match. Empty
+ means match all taint effects. When specified, allowed values
+ are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration applies
+ to. Empty means match all taint keys. If the key is empty,
+ operator must be Exists; this combination means to match
+ all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship to the
+ value. Valid operators are Exists and Equal. Defaults to
+ Equal. Exists is equivalent to wildcard for value, so that
+ a pod can tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period of time
+ the toleration (which must be of effect NoExecute, otherwise
+ this field is ignored) tolerates the taint. By default,
+ it is not set, which means tolerate the taint forever (do
+ not evict). Zero and negative values will be treated as
+ 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration matches
+ to. If the operator is Exists, the value should be empty,
+ otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ volumeModImage:
+ description: ImageSpec struct hold information about image specification
+ properties:
+ pullPolicy:
+ type: string
+ repository:
+ type: string
+ tag:
+ type: string
+ required:
+ - pullPolicy
+ - repository
+ - tag
+ type: object
+ type: object
+ loggingRef:
+ type: string
+ watchNamespaces:
+ items:
+ type: string
+ type: array
+ required:
+ - controlNamespace
+ type: object
+ status:
+ description: LoggingStatus defines the observed state of Logging
+ properties:
+ configCheckResults:
+ additionalProperties:
+ type: boolean
+ type: object
+ type: object
+ type: object
+ version: v1beta1
+ versions:
+ - name: v1beta1
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
diff --git a/config/crd/bases/logging.banzaicloud.io_outputs.yaml b/config/crd/bases/logging.banzaicloud.io_outputs.yaml
new file mode 100644
index 000000000..de30ee7c4
--- /dev/null
+++ b/config/crd/bases/logging.banzaicloud.io_outputs.yaml
@@ -0,0 +1,2268 @@
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ creationTimestamp: null
+ name: outputs.logging.banzaicloud.io
+spec:
+ group: logging.banzaicloud.io
+ names:
+ kind: Output
+ listKind: OutputList
+ plural: outputs
+ singular: output
+ scope: ""
+ validation:
+ openAPIV3Schema:
+ description: Output is the Schema for the outputs API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: OutputSpec defines the desired state of Output
+ properties:
+ azurestorage:
+ properties:
+ auto_create_container:
+ description: 'Automatically create container if not exists(default:
+ true)'
+ type: boolean
+ azure_container:
+ description: Your azure storage container
+ type: string
+ azure_object_key_format:
+ description: 'Object key format (default: %{path}%{time_slice}_%{index}.%{file_extension})'
+ type: string
+ azure_storage_access_key:
+ description: Your azure storage access key
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ azure_storage_account:
+ description: Your azure storage account
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ azure_storage_type:
+ description: 'Azure storage type currently only "blob" supported
+ (default: blob)'
+ type: string
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ format:
+ description: 'Compat format type: out_file, json, ltsv (default:
+ out_file)'
+ type: string
+ path:
+ description: Path prefix of the files on Azure
+ type: string
+ store_as:
+ description: 'Store as: gzip, json, text, lzo, lzma2 (default: gzip)'
+ type: string
+ required:
+ - azure_container
+ - azure_storage_access_key
+ - azure_storage_account
+ type: object
+ elasticsearch:
+ description: Send your logs to Elasticsearch
+ properties:
+ application_name:
+ description: 'Specify the application name for the rollover index
+ to be created.(default: default)'
+ type: string
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ content_type:
+ description: 'With content_type application/x-ndjson, elasticsearch
+ plugin adds application/x-ndjson as Content-Type in payload. (default:
+ application/json)'
+ type: string
+ customize_template:
+ description: Specify the string and its value to be replaced in
+ form of hash. Can contain multiple key value pair that would be
+ replaced in the specified template_file. This setting only creates
+ template and to add rollover index please check the rollover_index
+ configuration.
+ type: string
+ deflector_alias:
+ description: Specify the deflector alias which would be assigned
+ to the rollover index created. This is useful in case of using
+ the Elasticsearch rollover API
+ type: string
+ fail_on_putting_template_retry_exceed:
+ description: 'Indicates whether to fail when max_retry_putting_template
+ is exceeded. If you have multiple output plugin, you could use
+ this property to do not fail on fluentd statup.(default: true)'
+ type: boolean
+ host:
+ description: You can specify Elasticsearch host by this parameter.
+ (default:localhost)
+ type: string
+ hosts:
+ description: You can specify multiple Elasticsearch hosts with separator
+ ",". If you specify hosts option, host and port options are ignored.
+ type: string
+ http_backend:
+ description: 'With http_backend typhoeus, elasticsearch plugin uses
+ typhoeus faraday http backend. Typhoeus can handle HTTP keepalive.
+ (default: excon)'
+ type: string
+ id_key:
+ description: https://github.com/uken/fluent-plugin-elasticsearch#id_key
+ type: string
+ include_index_in_url:
+ description: With this option set to true, Fluentd manifests the
+ index name in the request URL (rather than in the request body).
+ You can use this option to enforce an URL-based access control.
+ type: boolean
+ include_tag_key:
+ description: 'This will add the Fluentd tag in the JSON record.(default:
+ false)'
+ type: boolean
+ include_timestamp:
+ description: Adds a @timestamp field to the log, following all settings
+ logstash_format does, except without the restrictions on index_name.
+ This allows one to log to an alias in Elasticsearch and utilize
+ the rollover API.
+ type: boolean
+ index_date_pattern:
+ description: 'Specify this to override the index date pattern for
+ creating a rollover index.(default: now/d)'
+ type: string
+ index_prefix:
+ description: Specify the index prefix for the rollover index to
+ be created.
+ type: string
+ logstash_dateformat:
+ description: 'Set the Logstash date format.(default: %Y.%m.%d)'
+ type: string
+ logstash_format:
+ description: 'Enable Logstash log format.(default: false)'
+ type: boolean
+ logstash_prefix:
+ description: 'Set the Logstash prefix.(default: true)'
+ type: string
+ logstash_prefix_separator:
+ description: 'Set the Logstash prefix separator.(default: -)'
+ type: string
+ max_retry_get_es_version:
+ description: 'You can specify times of retry obtaining Elasticsearch
+ version.(default: 15)'
+ type: string
+ max_retry_putting_template:
+ description: 'You can specify times of retry putting template.(default:
+ 10)'
+ type: string
+ password:
+ description: Password for HTTP Basic authentication.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ path:
+ description: Path for HTTP Basic authentication.
+ type: string
+ pipeline:
+ description: This param is to set a pipeline id of your elasticsearch
+ to be added into the request, you can configure ingest node.
+ type: string
+ port:
+ description: 'You can specify Elasticsearch port by this parameter.(default:
+ 9200)'
+ type: integer
+ prefer_oj_serializer:
+ description: 'With default behavior, Elasticsearch client uses Yajl
+ as JSON encoder/decoder. Oj is the alternative high performance
+ JSON encoder/decoder. When this parameter sets as true, Elasticsearch
+ client uses Oj as JSON encoder/decoder. (default: fqlse)'
+ type: boolean
+ reconnect_on_error:
+ description: 'Indicates that the plugin should reset connection
+ on any error (reconnect on next send). By default it will reconnect
+ only on "host unreachable exceptions". We recommended to set this
+ true in the presence of elasticsearch shield.(default: false)'
+ type: boolean
+ reload_connections:
+ description: 'You can tune how the elasticsearch-transport host
+ reloading feature works.(default: true)'
+ type: boolean
+ reload_on_failure:
+ description: 'Indicates that the elasticsearch-transport will try
+ to reload the nodes addresses if there is a failure while making
+ the request, this can be useful to quickly remove a dead node
+ from the list of addresses.(default: false)'
+ type: boolean
+ remove_keys_on_update:
+ description: Remove keys on update will not update the configured
+ keys in elasticsearch when a record is being updated. This setting
+ only has any effect if the write operation is update or upsert.
+ type: string
+ remove_keys_on_update_key:
+ description: This setting allows remove_keys_on_update to be configured
+ with a key in each record, in much the same way as target_index_key
+ works.
+ type: string
+ request_timeout:
+ description: 'You can specify HTTP request timeout.(default: 5s)'
+ type: string
+ resurrect_after:
+ description: 'You can set in the elasticsearch-transport how often
+ dead connections from the elasticsearch-transport''s pool will
+ be resurrected.(default: 60s)'
+ type: string
+ retry_tag:
+ description: This setting allows custom routing of messages in response
+ to bulk request failures. The default behavior is to emit failed
+ records using the same tag that was provided.
+ type: string
+ rollover_index:
+ description: 'Specify this as true when an index with rollover capability
+ needs to be created.(default: false) https://github.com/uken/fluent-plugin-elasticsearch#rollover_index'
+ type: boolean
+ routing_key:
+ description: Similar to parent_key config, will add _routing into
+ elasticsearch command if routing_key is set and the field does
+ exist in input event.
+ type: string
+ scheme:
+ description: 'Connection scheme (default: http)'
+ type: string
+ ssl_verify:
+ description: 'Skip ssl verification (default: true)'
+ type: boolean
+ ssl_version:
+ description: If you want to configure SSL/TLS version, you can specify
+ ssl_version parameter. [SSLv23, TLSv1, TLSv1_1, TLSv1_2]
+ type: string
+ tag_key:
+ description: 'This will add the Fluentd tag in the JSON record.(default:
+ tag)'
+ type: string
+ target_index_key:
+ description: Tell this plugin to find the index name to write to
+ in the record under this key in preference to other mechanisms.
+ Key can be specified as path to nested record using dot ('.')
+ as a separator. https://github.com/uken/fluent-plugin-elasticsearch#target_index_key
+ type: string
+ target_type_key:
+ description: 'Similar to target_index_key config, find the type
+ name to write to in the record under this key (or nested record).
+ If key not found in record - fallback to type_name.(default: true)'
+ type: string
+ template_file:
+ description: The path to the file containing the template to install.
+ type: string
+ template_name:
+ description: The name of the template to define. If a template by
+ the name given is already present, it will be left unchanged,
+ unless template_overwrite is set, in which case the template will
+ be updated.
+ type: string
+ template_overwrite:
+ description: 'Always update the template, even if it already exists.(default:
+ false)'
+ type: boolean
+ templates:
+ description: Specify index templates in form of hash. Can contain
+ multiple templates.
+ type: string
+ time_key:
+ description: By default, when inserting records in Logstash format,
+ @timestamp is dynamically created with the time at log ingestion.
+ If you'd like to use a custom time, include an @timestamp with
+ your record.
+ type: string
+ time_key_format:
+ description: The format of the time stamp field (@timestamp or what
+ you specify with time_key). This parameter only has an effect
+ when logstash_format is true as it only affects the name of the
+ index we write to.
+ type: string
+ time_parse_error_tag:
+ description: With logstash_format true, elasticsearch plugin parses
+ timestamp field for generating index name. If the record has invalid
+ timestamp value, this plugin emits an error event to @ERROR label
+ with time_parse_error_tag configured tag.
+ type: string
+ time_precision:
+ description: Should the record not include a time_key, define the
+ degree of sub-second time precision to preserve from the time
+ portion of the routed event.
+ type: string
+ user:
+ description: User for HTTP Basic authentication. This plugin will
+ escape required URL encoded characters within %{} placeholders.
+ e.g. %{demo+}
+ type: string
+ utc_index:
+ description: 'By default, the records inserted into index logstash-YYMMDD
+ with UTC (Coordinated Universal Time). This option allows to use
+ local time if you describe utc_index to false.(default: true)'
+ type: boolean
+ with_transporter_log:
+ description: 'This is debugging purpose option to enable to obtain
+ transporter layer log. (default: false)'
+ type: boolean
+ write_operation:
+ description: 'The write_operation can be any of: (index,create,update,upsert)(default:
+ index)'
+ type: string
+ required:
+ - ssl_verify
+ type: object
+ forward:
+ properties:
+ ack_response_timeout:
+ description: 'This option is used when require_ack_response is true.
+ This default value is based on popular tcp_syn_retries. (default:
+ 190)'
+ type: integer
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ connect_timeout:
+ description: The timeout time for socket connect. When the connection
+ timed out during establishment, Errno::ETIMEDOUT is raised.
+ type: integer
+ dns_round_robin:
+ description: Enable client-side DNS round robin. Uniform randomly
+ pick an IP address to send data when a hostname has several IP
+ addresses. `heartbeat_type udp` is not available with `dns_round_robin
+ true`. Use `heartbeat_type tcp` or `heartbeat_type none`.
+ type: boolean
+ expire_dns_cache:
+ description: 'Set TTL to expire DNS cache in seconds. Set 0 not
+ to use DNS Cache. (defult: 0)'
+ type: integer
+ hard_timeout:
+ description: 'The hard timeout used to detect server failure. The
+ default value is equal to the send_timeout parameter. (default:
+ 60)'
+ type: integer
+ heartbeat_interval:
+ description: 'The interval of the heartbeat packer. (default: 1)'
+ type: integer
+ heartbeat_type:
+ description: The transport protocol to use for heartbeats. Set "none"
+ to disable heartbeat. [transport, tcp, udp, none]
+ type: string
+ ignore_network_errors_at_startup:
+ description: Ignore DNS resolution and errors at startup time.
+ type: boolean
+ keepalive:
+ description: 'Enable keepalive connection. (default: false)'
+ type: boolean
+ keepalive_timeout:
+ description: 'Expired time of keepalive. Default value is nil, which
+ means to keep connection as long as possible. (default: 0)'
+ type: integer
+ phi_failure_detector:
+ description: 'Use the "Phi accrual failure detector" to detect server
+ failure. (default: true)'
+ type: boolean
+ phi_threshold:
+ description: 'The threshold parameter used to detect server faults.
+ (default: 16) `phi_threshold` is deeply related to `heartbeat_interval`.
+ If you are using longer `heartbeat_interval`, please use the larger
+ `phi_threshold`. Otherwise you will see frequent detachments of
+ destination servers. The default value 16 is tuned for `heartbeat_interval`
+ 1s.'
+ type: integer
+ recover_wait:
+ description: 'The wait time before accepting a server fault recovery.
+ (default: 10)'
+ type: integer
+ require_ack_response:
+ description: Change the protocol to at-least-once. The plugin waits
+ the ack from destination's in_forward plugin.
+ type: boolean
+ security:
+ properties:
+ allow_anonymous_source:
+ description: Allow anonymous source. sections are required
+ if disabled.
+ type: boolean
+ self_hostname:
+ description: Hostname
+ type: string
+ shared_key:
+ description: Shared key for authentication.
+ type: string
+ user_auth:
+ description: If true, use user based authentication.
+ type: boolean
+ required:
+ - self_hostname
+ - shared_key
+ type: object
+ send_timeout:
+ description: 'The timeout time when sending event logs. (default:
+ 60)'
+ type: integer
+ servers:
+ description: Server definitions at least one is required
+ items:
+ description: server
+ properties:
+ host:
+ description: The IP address or host name of the server.
+ type: string
+ name:
+ description: The name of the server. Used for logging and
+ certificate verification in TLS transport (when host is
+ address).
+ type: string
+ password:
+ description: The password for authentication.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ port:
+ description: 'The port number of the host. Note that both
+ TCP packets (event stream) and UDP packets (heartbeat message)
+ are sent to this port. (default: 24224)'
+ type: integer
+ shared_key:
+ description: The shared key per server.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ standby:
+ description: Marks a node as the standby node for an Active-Standby
+ model between Fluentd nodes. When an active node goes down,
+ the standby node is promoted to an active node. The standby
+ node is not used by the out_forward plugin until then.
+ type: boolean
+ username:
+ description: The username for authentication.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ weight:
+ description: 'The load balancing weight. If the weight of
+ one server is 20 and the weight of the other server is 30,
+ events are sent in a 2:3 ratio. (default: 60).'
+ type: integer
+ required:
+ - host
+ type: object
+ type: array
+ tls_allow_self_signed_cert:
+ description: 'Allow self signed certificates or not. (default: false)'
+ type: boolean
+ tls_cert_logical_store_name:
+ description: The certificate logical store name on Windows system
+ certstore. This parameter is for Windows only.
+ type: string
+ tls_cert_path:
+ description: The additional CA certificate path for TLS.
+ type: string
+ tls_cert_thumbprint:
+ description: The certificate thumbprint for searching from Windows
+ system certstore This parameter is for Windows only.
+ type: string
+ tls_cert_use_enterprise_store:
+ description: Enable to use certificate enterprise store on Windows
+ system certstore. This parameter is for Windows only.
+ type: boolean
+ tls_ciphers:
+ description: 'The cipher configuration of TLS transport. (default:
+ ALL:!aNULL:!eNULL:!SSLv2)'
+ type: string
+ tls_client_cert_path:
+ description: The client certificate path for TLS
+ type: string
+ tls_client_private_key_passphrase:
+ description: The client private key passphrase for TLS.
+ type: string
+ tls_client_private_key_path:
+ description: The client private key path for TLS.
+ type: string
+ tls_insecure_mode:
+ description: 'Skip all verification of certificates or not. (default:
+ false)'
+ type: boolean
+ tls_verify_hostname:
+ description: 'Verify hostname of servers and certificates or not
+ in TLS transport. (default: true)'
+ type: boolean
+ tls_version:
+ description: 'The default version of TLS transport. [TLSv1_1, TLSv1_2]
+ (default: TLSv1_2)'
+ type: string
+ verify_connection_at_startup:
+ description: 'Verify that a connection can be made with one of out_forward
+ nodes at the time of startup. (default: false)'
+ type: boolean
+ required:
+ - servers
+ type: object
+ gcs:
+ properties:
+ acl:
+ description: 'Permission for the object in GCS: auth_read owner_full
+ owner_read private project_private public_read'
+ type: string
+ auto_create_bucket:
+ description: 'Create GCS bucket if it does not exists (default:
+ true)'
+ type: boolean
+ bucket:
+ description: Name of a GCS bucket
+ type: string
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ client_retries:
+ description: Number of times to retry requests on server error
+ type: integer
+ client_timeout:
+ description: Default timeout to use in requests
+ type: integer
+ credentials_json:
+ description: GCS service account credentials in JSON format
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ encryption_key:
+ description: Customer-supplied, AES-256 encryption key
+ type: string
+ format:
+ properties:
+ type:
+ description: 'Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value
+ (default: json)'
+ enum:
+ - out_file
+ - json
+ - ltsv
+ - csv
+ - msgpack
+ - hash
+ - single_value
+ type: string
+ type: object
+ hex_random_length:
+ description: 'Max length of `%{hex_random}` placeholder(4-16) (default:
+ 4)'
+ type: integer
+ keyfile:
+ description: Path of GCS service account credentials JSON file
+ type: string
+ object_key_format:
+ description: 'Format of GCS object keys (default: %{path}%{time_slice}_%{index}.%{file_extension})'
+ type: string
+ object_metadata:
+ description: User provided web-safe keys and arbitrary string values
+ that will returned with requests for the file as "x-goog-meta-"
+ response headers.
+ items:
+ properties:
+ key:
+ description: Key
+ type: string
+ value:
+ description: Value
+ type: string
+ required:
+ - key
+ - value
+ type: object
+ type: array
+ overwrite:
+ description: 'Overwrite already existing path (default: false)'
+ type: boolean
+ path:
+ description: Path prefix of the files on GCS
+ type: string
+ project:
+ description: Project identifier for GCS
+ type: string
+ storage_class:
+ description: 'Storage class of the file: dra nearline coldline multi_regional
+ regional standard'
+ type: string
+ store_as:
+ description: 'Archive format on GCS: gzip json text (default: gzip)'
+ type: string
+ transcoding:
+ description: Enable the decompressive form of transcoding
+ type: boolean
+ required:
+ - bucket
+ - project
+ type: object
+ loggingRef:
+ type: string
+ loki:
+ description: Fluentd output plugin to ship logs to a Loki server.
+ properties:
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ extra_labels:
+ description: 'Set of labels to include with every Loki stream.(default:
+ nil)'
+ type: boolean
+ password:
+ description: Specify password if the Loki server requires authentication.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ tenant:
+ description: Loki is a multi-tenant log storage platform and all
+ requests sent must include a tenant.
+ type: string
+ url:
+ description: The url of the Loki server to send logs to. (default:https://logs-us-west1.grafana.net)
+ type: string
+ username:
+ description: Specify a username if the Loki server requires authentication.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ type: object
+ nullout:
+ type: object
+ oss:
+ properties:
+ aaccess_key_secret:
+ description: Your access secret key
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ access_key_id:
+ description: Your access key id
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ auto_create_bucket:
+ description: 'desc ''Create OSS bucket if it does not exists (default:
+ false)'
+ type: boolean
+ bucket:
+ description: Your bucket name
+ type: string
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ check_bucket:
+ description: 'Check bucket if exists or not (default: true)'
+ type: boolean
+ check_object:
+ description: 'Check object before creation (default: true)'
+ type: boolean
+ download_crc_enable:
+ description: 'Download crc enabled (default: true)'
+ type: boolean
+ endpoint:
+ description: OSS endpoint to connect to'
+ type: string
+ format:
+ properties:
+ type:
+ description: 'Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value
+ (default: json)'
+ enum:
+ - out_file
+ - json
+ - ltsv
+ - csv
+ - msgpack
+ - hash
+ - single_value
+ type: string
+ type: object
+ hex_random_length:
+ description: 'The length of `%{hex_random}` placeholder(4-16) (default:
+ 4)'
+ type: integer
+ index_format:
+ description: '`sprintf` format for `%{index}` (default: %d)'
+ type: string
+ key_format:
+ description: 'The format of OSS object keys (default: %{path}/%{time_slice}_%{index}_%{thread_id}.%{file_extension})'
+ type: string
+ open_timeout:
+ description: 'Timeout for open connections (default: 10)'
+ type: integer
+ oss_sdk_log_dir:
+ description: 'OSS SDK log directory (default: /var/log/td-agent)'
+ type: string
+ overwrite:
+ description: 'Overwrite already existing path (default: false)'
+ type: boolean
+ path:
+ description: 'Path prefix of the files on OSS (default: fluent/logs)'
+ type: string
+ read_timeout:
+ description: 'Timeout for read response (default: 120)'
+ type: integer
+ store_as:
+ description: 'Archive format on OSS: gzip, json, text, lzo, lzma2
+ (default: gzip)'
+ type: string
+ upload_crc_enable:
+ description: 'Upload crc enabled (default: true)'
+ type: boolean
+ warn_for_delay:
+ description: Given a threshold to treat events as delay, output
+ warning logs if delayed events were put into OSS
+ type: string
+ required:
+ - aaccess_key_secret
+ - access_key_id
+ - bucket
+ - endpoint
+ type: object
+ s3:
+ properties:
+ acl:
+ description: Permission for the object in S3
+ type: string
+ assume_role_credentials:
+ description: assume_role_credentials
+ properties:
+ duration_seconds:
+ description: The duration, in seconds, of the role session (900-3600)
+ type: string
+ external_id:
+ description: A unique identifier that is used by third parties
+ when assuming roles in their customers' accounts.
+ type: string
+ policy:
+ description: An IAM policy in JSON format
+ type: string
+ role_arn:
+ description: The Amazon Resource Name (ARN) of the role to assume
+ type: string
+ role_session_name:
+ description: An identifier for the assumed role session
+ type: string
+ required:
+ - role_arn
+ - role_session_name
+ type: object
+ auto_create_bucket:
+ description: Create S3 bucket if it does not exists
+ type: string
+ aws_iam_retries:
+ description: The number of attempts to load instance profile credentials
+ from the EC2 metadata service using IAM role
+ type: string
+ aws_key_id:
+ description: AWS access key id
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ aws_sec_key:
+ description: AWS secret key.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ check_apikey_on_start:
+ description: Check AWS key on start
+ type: string
+ check_bucket:
+ description: Check bucket if exists or not
+ type: string
+ check_object:
+ description: Check object before creation
+ type: string
+ compute_checksums:
+ description: AWS SDK uses MD5 for API request/response by default
+ type: string
+ enable_transfer_acceleration:
+ description: 'If true, S3 Transfer Acceleration will be enabled
+ for uploads. IMPORTANT: You must first enable this feature on
+ your destination S3 bucket'
+ type: string
+ force_path_style:
+ description: If true, the bucket name is always left in the request
+ URI and never moved to the host as a sub-domain
+ type: string
+ format:
+ properties:
+ type:
+ description: 'Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value
+ (default: json)'
+ enum:
+ - out_file
+ - json
+ - ltsv
+ - csv
+ - msgpack
+ - hash
+ - single_value
+ type: string
+ type: object
+ grant_full_control:
+ description: Allows grantee READ, READ_ACP, and WRITE_ACP permissions
+ on the object
+ type: string
+ grant_read:
+ description: Allows grantee to read the object data and its metadata
+ type: string
+ grant_read_acp:
+ description: Allows grantee to read the object ACL
+ type: string
+ grant_write_acp:
+ description: Allows grantee to write the ACL for the applicable
+ object
+ type: string
+ hex_random_length:
+ description: The length of `%{hex_random}` placeholder(4-16)
+ type: string
+ index_format:
+ description: '`sprintf` format for `%{index}`'
+ type: string
+ instance_profile_credentials:
+ description: instance_profile_credentials
+ properties:
+ http_open_timeout:
+ description: Number of seconds to wait for the connection to
+ open
+ type: string
+ http_read_timeout:
+ description: Number of seconds to wait for one block to be read
+ type: string
+ ip_address:
+ description: IP address (default:169.254.169.254)
+ type: string
+ port:
+ description: Port number (default:80)
+ type: string
+ retries:
+ description: Number of times to retry when retrieving credentials
+ type: string
+ type: object
+ overwrite:
+ description: Overwrite already existing path
+ type: string
+ path:
+ description: Path prefix of the files on S3
+ type: string
+ proxy_uri:
+ description: URI of proxy environment
+ type: string
+ s3_bucket:
+ description: S3 bucket name
+ type: string
+ s3_endpoint:
+ description: Custom S3 endpoint (like minio)
+ type: string
+ s3_metadata:
+ description: Arbitrary S3 metadata headers to set for the object
+ type: string
+ s3_object_key_format:
+ description: 'The format of S3 object keys (default: %{path}%{time_slice}_%{index}.%{file_extension})'
+ type: string
+ s3_region:
+ description: S3 region name
+ type: string
+ shared_credentials:
+ description: shared_credentials
+ properties:
+ path:
+ description: 'Path to the shared file. (default: $HOME/.aws/credentials)'
+ type: string
+ profile_name:
+ description: Profile name. Default to 'default' or ENV['AWS_PROFILE']
+ type: string
+ type: object
+ signature_version:
+ description: Signature version for API Request (s3,v4)
+ type: string
+ sse_customer_algorithm:
+ description: Specifies the algorithm to use to when encrypting the
+ object
+ type: string
+ sse_customer_key:
+ description: Specifies the customer-provided encryption key for
+ Amazon S3 to use in encrypting data
+ type: string
+ sse_customer_key_md5:
+ description: Specifies the 128-bit MD5 digest of the encryption
+ key according to RFC 1321
+ type: string
+ ssekms_key_id:
+ description: Specifies the AWS KMS key ID to use for object encryption
+ type: string
+ ssl_verify_peer:
+ description: If false, the certificate of endpoint will not be verified
+ type: string
+ storage_class:
+ description: The type of storage to use for the object(STANDARD,REDUCED_REDUNDANCY,STANDARD_IA)
+ type: string
+ store_as:
+ description: Archive format on S3
+ type: string
+ use_bundled_cert:
+ description: Use aws-sdk-ruby bundled cert
+ type: string
+ use_server_side_encryption:
+ description: The Server-side encryption algorithm used when storing
+ this object in S3 (AES256, aws:kms)
+ type: string
+ warn_for_delay:
+ description: Given a threshold to treat events as delay, output
+ warning logs if delayed events were put into s3
+ type: string
+ required:
+ - s3_bucket
+ type: object
+ sumologic:
+ properties:
+ add_timestamp:
+ description: 'Add timestamp (or timestamp_key) field to logs before
+ sending to sumologic (default: true)'
+ type: boolean
+ data_type:
+ description: 'The type of data that will be sent to Sumo Logic,
+ either logs or metrics (default: logs)'
+ type: string
+ disable_cookies:
+ description: 'Option to disable cookies on the HTTP Client. (default:
+ false)'
+ type: boolean
+ endpoint:
+ description: SumoLogic HTTP Collector URL
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ log_format:
+ description: 'Format to post logs into Sumo. (default: json)'
+ type: string
+ log_key:
+ description: 'Used to specify the key when merging json or sending
+ logs in text format (default: message)'
+ type: string
+ metric_data_format:
+ description: 'The format of metrics you will be sending, either
+ graphite or carbon2 or prometheus (default: graphite)'
+ type: string
+ open_timeout:
+ description: 'Set timeout seconds to wait until connection is opened.
+ (default: 60)'
+ type: integer
+ proxy_uri:
+ description: Add the uri of the proxy environment if present.
+ type: string
+ source_category:
+ description: 'Set _sourceCategory metadata field within SumoLogic
+ (default: nil)'
+ type: string
+ source_host:
+ description: 'Set _sourceHost metadata field within SumoLogic (default:
+ nil)'
+ type: string
+ source_name:
+ description: Set _sourceName metadata field within SumoLogic - overrides
+ source_name_key (default is nil)
+ type: string
+ source_name_key:
+ description: 'Set as source::path_key''s value so that the source_name
+ can be extracted from Fluentd''s buffer (default: source_name)'
+ type: string
+ timestamp_key:
+ description: 'Field name when add_timestamp is on (default: timestamp)'
+ type: string
+ verify_ssl:
+ description: 'Verify ssl certificate. (default: true)'
+ type: boolean
+ required:
+ - endpoint
+ - source_name
+ type: object
+ type: object
+ status:
+ description: OutputStatus defines the observed state of Output
+ type: object
+ type: object
+ version: v1beta1
+ versions:
+ - name: v1beta1
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml
new file mode 100644
index 000000000..35a3278d9
--- /dev/null
+++ b/config/crd/kustomization.yaml
@@ -0,0 +1,37 @@
+# This kustomization.yaml is not intended to be run by itself,
+# since it depends on service name and namespace that are out of this kustomize package.
+# It should be run by config/default
+resources:
+- bases/logging.banzaicloud.io_fluentbits.yaml
+- bases/logging.banzaicloud.io_flows.yaml
+- bases/logging.banzaicloud.io_clusterflows.yaml
+- bases/logging.banzaicloud.io_outputs.yaml
+- bases/logging.banzaicloud.io_clusteroutputs.yaml
+- bases/logging.banzaicloud.io_fluentds.yaml
+- bases/logging.banzaicloud.io_loggings.yaml
+# +kubebuilder:scaffold:crdkustomizeresource
+
+patches:
+# [WEBHOOK] patches here are for enabling the conversion webhook for each CRD
+#- patches/webhook_in_fluentbits.yaml
+#- patches/webhook_in_flows.yaml
+#- patches/webhook_in_clusterflows.yaml
+#- patches/webhook_in_outputs.yaml
+#- patches/webhook_in_clusteroutputs.yaml
+#- patches/webhook_in_fluentds.yaml
+#- patches/webhook_in_loggings.yaml
+# +kubebuilder:scaffold:crdkustomizewebhookpatch
+
+# [CAINJECTION] patches here are for enabling the CA injection for each CRD
+#- patches/cainjection_in_fluentbits.yaml
+#- patches/cainjection_in_flows.yaml
+#- patches/cainjection_in_clusterflows.yaml
+#- patches/cainjection_in_outputs.yaml
+#- patches/cainjection_in_clusteroutputs.yaml
+#- patches/cainjection_in_fluentds.yaml
+#- patches/cainjection_in_loggings.yaml
+# +kubebuilder:scaffold:crdkustomizecainjectionpatch
+
+# the following config is for teaching kustomize how to do kustomization for CRDs.
+configurations:
+- kustomizeconfig.yaml
diff --git a/config/crd/kustomizeconfig.yaml b/config/crd/kustomizeconfig.yaml
new file mode 100644
index 000000000..6f83d9a94
--- /dev/null
+++ b/config/crd/kustomizeconfig.yaml
@@ -0,0 +1,17 @@
+# This file is for teaching kustomize how to substitute name and namespace reference in CRD
+nameReference:
+- kind: Service
+ version: v1
+ fieldSpecs:
+ - kind: CustomResourceDefinition
+ group: apiextensions.k8s.io
+ path: spec/conversion/webhookClientConfig/service/name
+
+namespace:
+- kind: CustomResourceDefinition
+ group: apiextensions.k8s.io
+ path: spec/conversion/webhookClientConfig/service/namespace
+ create: false
+
+varReference:
+- path: metadata/annotations
diff --git a/config/crd/patches/cainjection_in_clusterflows.yaml b/config/crd/patches/cainjection_in_clusterflows.yaml
new file mode 100644
index 000000000..47817e94e
--- /dev/null
+++ b/config/crd/patches/cainjection_in_clusterflows.yaml
@@ -0,0 +1,8 @@
+# The following patch adds a directive for certmanager to inject CA into the CRD
+# CRD conversion requires k8s 1.13 or later.
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ certmanager.k8s.io/inject-ca-from: $(NAMESPACE)/$(CERTIFICATENAME)
+ name: clusterflows.logging.banzaicloud.io
diff --git a/config/crd/patches/cainjection_in_clusteroutputs.yaml b/config/crd/patches/cainjection_in_clusteroutputs.yaml
new file mode 100644
index 000000000..8129716bd
--- /dev/null
+++ b/config/crd/patches/cainjection_in_clusteroutputs.yaml
@@ -0,0 +1,8 @@
+# The following patch adds a directive for certmanager to inject CA into the CRD
+# CRD conversion requires k8s 1.13 or later.
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ certmanager.k8s.io/inject-ca-from: $(NAMESPACE)/$(CERTIFICATENAME)
+ name: clusteroutputs.logging.banzaicloud.io
diff --git a/config/crd/patches/cainjection_in_flows.yaml b/config/crd/patches/cainjection_in_flows.yaml
new file mode 100644
index 000000000..ffa03f496
--- /dev/null
+++ b/config/crd/patches/cainjection_in_flows.yaml
@@ -0,0 +1,8 @@
+# The following patch adds a directive for certmanager to inject CA into the CRD
+# CRD conversion requires k8s 1.13 or later.
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ certmanager.k8s.io/inject-ca-from: $(NAMESPACE)/$(CERTIFICATENAME)
+ name: flows.logging.banzaicloud.io
diff --git a/config/crd/patches/cainjection_in_fluentbits.yaml b/config/crd/patches/cainjection_in_fluentbits.yaml
new file mode 100644
index 000000000..5463045da
--- /dev/null
+++ b/config/crd/patches/cainjection_in_fluentbits.yaml
@@ -0,0 +1,8 @@
+# The following patch adds a directive for certmanager to inject CA into the CRD
+# CRD conversion requires k8s 1.13 or later.
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ certmanager.k8s.io/inject-ca-from: $(NAMESPACE)/$(CERTIFICATENAME)
+ name: fluentbits.logging.banzaicloud.io
diff --git a/config/crd/patches/cainjection_in_fluentds.yaml b/config/crd/patches/cainjection_in_fluentds.yaml
new file mode 100644
index 000000000..ae1f9cd6a
--- /dev/null
+++ b/config/crd/patches/cainjection_in_fluentds.yaml
@@ -0,0 +1,8 @@
+# The following patch adds a directive for certmanager to inject CA into the CRD
+# CRD conversion requires k8s 1.13 or later.
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ certmanager.k8s.io/inject-ca-from: $(NAMESPACE)/$(CERTIFICATENAME)
+ name: fluentds.logging.banzaicloud.io
diff --git a/config/crd/patches/cainjection_in_loggings.yaml b/config/crd/patches/cainjection_in_loggings.yaml
new file mode 100644
index 000000000..cd8e9829d
--- /dev/null
+++ b/config/crd/patches/cainjection_in_loggings.yaml
@@ -0,0 +1,8 @@
+# The following patch adds a directive for certmanager to inject CA into the CRD
+# CRD conversion requires k8s 1.13 or later.
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ certmanager.k8s.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
+ name: loggings.logging.banzaicloud.io
diff --git a/config/crd/patches/cainjection_in_outputs.yaml b/config/crd/patches/cainjection_in_outputs.yaml
new file mode 100644
index 000000000..2b1014cb4
--- /dev/null
+++ b/config/crd/patches/cainjection_in_outputs.yaml
@@ -0,0 +1,8 @@
+# The following patch adds a directive for certmanager to inject CA into the CRD
+# CRD conversion requires k8s 1.13 or later.
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ certmanager.k8s.io/inject-ca-from: $(NAMESPACE)/$(CERTIFICATENAME)
+ name: outputs.logging.banzaicloud.io
diff --git a/config/crd/patches/webhook_in_clusterflows.yaml b/config/crd/patches/webhook_in_clusterflows.yaml
new file mode 100644
index 000000000..df62e5a47
--- /dev/null
+++ b/config/crd/patches/webhook_in_clusterflows.yaml
@@ -0,0 +1,17 @@
+# The following patch enables conversion webhook for CRD
+# CRD conversion requires k8s 1.13 or later.
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: clusterflows.logging.banzaicloud.io
+spec:
+ conversion:
+ strategy: Webhook
+ webhookClientConfig:
+ # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank,
+ # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager)
+ caBundle: Cg==
+ service:
+ namespace: system
+ name: webhook-service
+ path: /convert
diff --git a/config/crd/patches/webhook_in_clusteroutputs.yaml b/config/crd/patches/webhook_in_clusteroutputs.yaml
new file mode 100644
index 000000000..79ede2544
--- /dev/null
+++ b/config/crd/patches/webhook_in_clusteroutputs.yaml
@@ -0,0 +1,17 @@
+# The following patch enables conversion webhook for CRD
+# CRD conversion requires k8s 1.13 or later.
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: clusteroutputs.logging.banzaicloud.io
+spec:
+ conversion:
+ strategy: Webhook
+ webhookClientConfig:
+ # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank,
+ # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager)
+ caBundle: Cg==
+ service:
+ namespace: system
+ name: webhook-service
+ path: /convert
diff --git a/config/crd/patches/webhook_in_flows.yaml b/config/crd/patches/webhook_in_flows.yaml
new file mode 100644
index 000000000..6ca375a4f
--- /dev/null
+++ b/config/crd/patches/webhook_in_flows.yaml
@@ -0,0 +1,17 @@
+# The following patch enables conversion webhook for CRD
+# CRD conversion requires k8s 1.13 or later.
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: flows.logging.banzaicloud.io
+spec:
+ conversion:
+ strategy: Webhook
+ webhookClientConfig:
+ # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank,
+ # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager)
+ caBundle: Cg==
+ service:
+ namespace: system
+ name: webhook-service
+ path: /convert
diff --git a/config/crd/patches/webhook_in_fluentbits.yaml b/config/crd/patches/webhook_in_fluentbits.yaml
new file mode 100644
index 000000000..0e5cedd89
--- /dev/null
+++ b/config/crd/patches/webhook_in_fluentbits.yaml
@@ -0,0 +1,17 @@
+# The following patch enables conversion webhook for CRD
+# CRD conversion requires k8s 1.13 or later.
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: fluentbits.logging.banzaicloud.io
+spec:
+ conversion:
+ strategy: Webhook
+ webhookClientConfig:
+ # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank,
+ # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager)
+ caBundle: Cg==
+ service:
+ namespace: system
+ name: webhook-service
+ path: /convert
diff --git a/config/crd/patches/webhook_in_fluentds.yaml b/config/crd/patches/webhook_in_fluentds.yaml
new file mode 100644
index 000000000..569f58e1a
--- /dev/null
+++ b/config/crd/patches/webhook_in_fluentds.yaml
@@ -0,0 +1,17 @@
+# The following patch enables conversion webhook for CRD
+# CRD conversion requires k8s 1.13 or later.
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: fluentds.logging.banzaicloud.io
+spec:
+ conversion:
+ strategy: Webhook
+ webhookClientConfig:
+ # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank,
+ # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager)
+ caBundle: Cg==
+ service:
+ namespace: system
+ name: webhook-service
+ path: /convert
diff --git a/config/crd/patches/webhook_in_loggings.yaml b/config/crd/patches/webhook_in_loggings.yaml
new file mode 100644
index 000000000..32cb20c68
--- /dev/null
+++ b/config/crd/patches/webhook_in_loggings.yaml
@@ -0,0 +1,17 @@
+# The following patch enables conversion webhook for CRD
+# CRD conversion requires k8s 1.13 or later.
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: loggings.logging.banzaicloud.io
+spec:
+ conversion:
+ strategy: Webhook
+ webhookClientConfig:
+ # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank,
+ # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager)
+ caBundle: Cg==
+ service:
+ namespace: system
+ name: webhook-service
+ path: /convert
diff --git a/config/crd/patches/webhook_in_outputs.yaml b/config/crd/patches/webhook_in_outputs.yaml
new file mode 100644
index 000000000..02c77319a
--- /dev/null
+++ b/config/crd/patches/webhook_in_outputs.yaml
@@ -0,0 +1,17 @@
+# The following patch enables conversion webhook for CRD
+# CRD conversion requires k8s 1.13 or later.
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: outputs.logging.banzaicloud.io
+spec:
+ conversion:
+ strategy: Webhook
+ webhookClientConfig:
+ # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank,
+ # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager)
+ caBundle: Cg==
+ service:
+ namespace: system
+ name: webhook-service
+ path: /convert
diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml
new file mode 100644
index 000000000..072ac400d
--- /dev/null
+++ b/config/default/kustomization.yaml
@@ -0,0 +1,43 @@
+# Adds namespace to all resources.
+namespace: logging-operator-ws-system
+
+# Value of this field is prepended to the
+# names of all resources, e.g. a deployment named
+# "wordpress" becomes "alices-wordpress".
+# Note that it should also match with the prefix (text before '-') of the namespace
+# field above.
+namePrefix: logging-operator-ws-
+
+# Labels to add to all resources and selectors.
+#commonLabels:
+# someName: someValue
+
+bases:
+- ../crd
+- ../rbac
+- ../manager
+# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in crd/kustomization.yaml
+#- ../webhook
+# [CERTMANAGER] To enable cert-manager, uncomment next line. 'WEBHOOK' components are required.
+#- ../certmanager
+
+patches:
+- manager_image_patch.yaml
+ # Protect the /metrics endpoint by putting it behind auth.
+ # Only one of manager_auth_proxy_patch.yaml and
+ # manager_prometheus_metrics_patch.yaml should be enabled.
+- manager_auth_proxy_patch.yaml
+ # If you want your controller-manager to expose the /metrics
+ # endpoint w/o any authn/z, uncomment the following line and
+ # comment manager_auth_proxy_patch.yaml.
+ # Only one of manager_auth_proxy_patch.yaml and
+ # manager_prometheus_metrics_patch.yaml should be enabled.
+#- manager_prometheus_metrics_patch.yaml
+
+# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in crd/kustomization.yaml
+#- manager_webhook_patch.yaml
+
+# [CAINJECTION] Uncomment next line to enable the CA injection in the admission webhooks.
+# Uncomment 'CAINJECTION' in crd/kustomization.yaml to enable the CA injection in the admission webhooks.
+# 'CERTMANAGER' needs to be enabled to use ca injection
+#- webhookcainjection_patch.yaml
diff --git a/config/default/manager_auth_proxy_patch.yaml b/config/default/manager_auth_proxy_patch.yaml
new file mode 100644
index 000000000..d3994fb91
--- /dev/null
+++ b/config/default/manager_auth_proxy_patch.yaml
@@ -0,0 +1,24 @@
+# This patch inject a sidecar container which is a HTTP proxy for the controller manager,
+# it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews.
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: controller-manager
+ namespace: system
+spec:
+ template:
+ spec:
+ containers:
+ - name: kube-rbac-proxy
+ image: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.0
+ args:
+ - "--secure-listen-address=0.0.0.0:8443"
+ - "--upstream=http://127.0.0.1:8080/"
+ - "--logtostderr=true"
+ - "--v=10"
+ ports:
+ - containerPort: 8443
+ name: https
+ - name: manager
+ args:
+ - "--metrics-addr=127.0.0.1:8080"
diff --git a/config/default/manager_image_patch.yaml b/config/default/manager_image_patch.yaml
new file mode 100644
index 000000000..eb909570e
--- /dev/null
+++ b/config/default/manager_image_patch.yaml
@@ -0,0 +1,12 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: controller-manager
+ namespace: system
+spec:
+ template:
+ spec:
+ containers:
+ # Change the value of image field below to your controller image URL
+ - image: IMAGE_URL
+ name: manager
diff --git a/config/default/manager_prometheus_metrics_patch.yaml b/config/default/manager_prometheus_metrics_patch.yaml
new file mode 100644
index 000000000..0b96c6813
--- /dev/null
+++ b/config/default/manager_prometheus_metrics_patch.yaml
@@ -0,0 +1,19 @@
+# This patch enables Prometheus scraping for the manager pod.
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: controller-manager
+ namespace: system
+spec:
+ template:
+ metadata:
+ annotations:
+ prometheus.io/scrape: 'true'
+ spec:
+ containers:
+ # Expose the prometheus metrics on default port
+ - name: manager
+ ports:
+ - containerPort: 8080
+ name: metrics
+ protocol: TCP
diff --git a/config/default/manager_webhook_patch.yaml b/config/default/manager_webhook_patch.yaml
new file mode 100644
index 000000000..f2f7157b4
--- /dev/null
+++ b/config/default/manager_webhook_patch.yaml
@@ -0,0 +1,23 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: controller-manager
+ namespace: system
+spec:
+ template:
+ spec:
+ containers:
+ - name: manager
+ ports:
+ - containerPort: 443
+ name: webhook-server
+ protocol: TCP
+ volumeMounts:
+ - mountPath: /tmp/k8s-webhook-server/serving-certs
+ name: cert
+ readOnly: true
+ volumes:
+ - name: cert
+ secret:
+ defaultMode: 420
+ secretName: webhook-server-cert
diff --git a/config/default/webhookcainjection_patch.yaml b/config/default/webhookcainjection_patch.yaml
new file mode 100644
index 000000000..f6d71cb76
--- /dev/null
+++ b/config/default/webhookcainjection_patch.yaml
@@ -0,0 +1,15 @@
+# This patch add annotation to admission webhook config and
+# the variables $(NAMESPACE) and $(CERTIFICATENAME) will be substituted by kustomize.
+apiVersion: admissionregistration.k8s.io/v1beta1
+kind: MutatingWebhookConfiguration
+metadata:
+ name: mutating-webhook-configuration
+ annotations:
+ certmanager.k8s.io/inject-ca-from: $(NAMESPACE)/$(CERTIFICATENAME)
+---
+apiVersion: admissionregistration.k8s.io/v1beta1
+kind: ValidatingWebhookConfiguration
+metadata:
+ name: validating-webhook-configuration
+ annotations:
+ certmanager.k8s.io/inject-ca-from: $(NAMESPACE)/$(CERTIFICATENAME)
diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml
new file mode 100644
index 000000000..5c5f0b84c
--- /dev/null
+++ b/config/manager/kustomization.yaml
@@ -0,0 +1,2 @@
+resources:
+- manager.yaml
diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml
new file mode 100644
index 000000000..b6c85a52d
--- /dev/null
+++ b/config/manager/manager.yaml
@@ -0,0 +1,39 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ labels:
+ control-plane: controller-manager
+ name: system
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: controller-manager
+ namespace: system
+ labels:
+ control-plane: controller-manager
+spec:
+ selector:
+ matchLabels:
+ control-plane: controller-manager
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ control-plane: controller-manager
+ spec:
+ containers:
+ - command:
+ - /manager
+ args:
+ - --enable-leader-election
+ image: controller:latest
+ name: manager
+ resources:
+ limits:
+ cpu: 100m
+ memory: 30Mi
+ requests:
+ cpu: 100m
+ memory: 20Mi
+ terminationGracePeriodSeconds: 10
diff --git a/config/rbac/auth_proxy_role.yaml b/config/rbac/auth_proxy_role.yaml
new file mode 100644
index 000000000..618f5e417
--- /dev/null
+++ b/config/rbac/auth_proxy_role.yaml
@@ -0,0 +1,13 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: proxy-role
+rules:
+- apiGroups: ["authentication.k8s.io"]
+ resources:
+ - tokenreviews
+ verbs: ["create"]
+- apiGroups: ["authorization.k8s.io"]
+ resources:
+ - subjectaccessreviews
+ verbs: ["create"]
diff --git a/deploy/clusterrole_binding.yaml b/config/rbac/auth_proxy_role_binding.yaml
similarity index 64%
rename from deploy/clusterrole_binding.yaml
rename to config/rbac/auth_proxy_role_binding.yaml
index d4cca39a2..48ed1e4b8 100644
--- a/deploy/clusterrole_binding.yaml
+++ b/config/rbac/auth_proxy_role_binding.yaml
@@ -1,12 +1,12 @@
-kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
metadata:
- name: logging-operator
-subjects:
-- kind: ServiceAccount
- name: logging-operator
- namespace: default
+ name: proxy-rolebinding
roleRef:
- kind: ClusterRole
- name: logging-operator
apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: proxy-role
+subjects:
+- kind: ServiceAccount
+ name: default
+ namespace: system
diff --git a/config/rbac/auth_proxy_service.yaml b/config/rbac/auth_proxy_service.yaml
new file mode 100644
index 000000000..d61e5469f
--- /dev/null
+++ b/config/rbac/auth_proxy_service.yaml
@@ -0,0 +1,18 @@
+apiVersion: v1
+kind: Service
+metadata:
+ annotations:
+ prometheus.io/port: "8443"
+ prometheus.io/scheme: https
+ prometheus.io/scrape: "true"
+ labels:
+ control-plane: controller-manager
+ name: controller-manager-metrics-service
+ namespace: system
+spec:
+ ports:
+ - name: https
+ port: 8443
+ targetPort: https
+ selector:
+ control-plane: controller-manager
diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml
new file mode 100644
index 000000000..817f1fe61
--- /dev/null
+++ b/config/rbac/kustomization.yaml
@@ -0,0 +1,11 @@
+resources:
+- role.yaml
+- role_binding.yaml
+- leader_election_role.yaml
+- leader_election_role_binding.yaml
+# Comment the following 3 lines if you want to disable
+# the auth proxy (https://github.com/brancz/kube-rbac-proxy)
+# which protects your /metrics endpoint.
+- auth_proxy_service.yaml
+- auth_proxy_role.yaml
+- auth_proxy_role_binding.yaml
diff --git a/config/rbac/leader_election_role.yaml b/config/rbac/leader_election_role.yaml
new file mode 100644
index 000000000..85093a8c2
--- /dev/null
+++ b/config/rbac/leader_election_role.yaml
@@ -0,0 +1,26 @@
+# permissions to do leader election.
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: leader-election-role
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - delete
+- apiGroups:
+ - ""
+ resources:
+ - configmaps/status
+ verbs:
+ - get
+ - update
+ - patch
diff --git a/config/rbac/leader_election_role_binding.yaml b/config/rbac/leader_election_role_binding.yaml
new file mode 100644
index 000000000..eed16906f
--- /dev/null
+++ b/config/rbac/leader_election_role_binding.yaml
@@ -0,0 +1,12 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: leader-election-rolebinding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: leader-election-role
+subjects:
+- kind: ServiceAccount
+ name: default
+ namespace: system
diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml
new file mode 100644
index 000000000..292fdb260
--- /dev/null
+++ b/config/rbac/role.yaml
@@ -0,0 +1,28 @@
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ creationTimestamp: null
+ name: manager-role
+rules:
+- apiGroups:
+ - logging.banzaicloud.io
+ resources:
+ - loggings
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - logging.banzaicloud.io
+ resources:
+ - loggings/status
+ verbs:
+ - get
+ - patch
+ - update
diff --git a/config/rbac/role_binding.yaml b/config/rbac/role_binding.yaml
new file mode 100644
index 000000000..8f2658702
--- /dev/null
+++ b/config/rbac/role_binding.yaml
@@ -0,0 +1,12 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: manager-rolebinding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: manager-role
+subjects:
+- kind: ServiceAccount
+ name: default
+ namespace: system
diff --git a/config/samples/logging_v1alpha2_cluster_output_custom.yaml b/config/samples/logging_v1alpha2_cluster_output_custom.yaml
new file mode 100644
index 000000000..a9c895622
--- /dev/null
+++ b/config/samples/logging_v1alpha2_cluster_output_custom.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: control
+---
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: ClusterOutput
+metadata:
+ name: output-custom-cluster
+ namespace: control
+spec:
+ loggingRef: customref
+ nullout: {}
\ No newline at end of file
diff --git a/config/samples/logging_v1alpha2_flow.yaml b/config/samples/logging_v1alpha2_flow.yaml
new file mode 100644
index 000000000..8cd48e4ad
--- /dev/null
+++ b/config/samples/logging_v1alpha2_flow.yaml
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: custom
+---
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+ name: flow-sample
+spec:
+ selectors:
+ sampleKey: sampleValue
+ filters:
+ - stdout: {}
+ outputRefs:
+ - "output-sample"
diff --git a/config/samples/logging_v1alpha2_flow_custom.yaml b/config/samples/logging_v1alpha2_flow_custom.yaml
new file mode 100644
index 000000000..118a4789f
--- /dev/null
+++ b/config/samples/logging_v1alpha2_flow_custom.yaml
@@ -0,0 +1,14 @@
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+ name: flow-custom
+ namespace: custom
+spec:
+ loggingRef: customref
+ selectors:
+ sampleKey: customValue
+ filters:
+ - stdout: {}
+ outputRefs:
+ - "output-custom"
+ - "output-custom-cluster"
diff --git a/config/samples/logging_v1alpha2_logging_custom.yaml b/config/samples/logging_v1alpha2_logging_custom.yaml
new file mode 100644
index 000000000..f65a8765e
--- /dev/null
+++ b/config/samples/logging_v1alpha2_logging_custom.yaml
@@ -0,0 +1,12 @@
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+ name: customlogging
+spec:
+ loggingRef: customref
+ fluentd: {
+ disablePvc: true
+ }
+ watchNamespaces: ["custom"]
+ controlNamespace: control
+
diff --git a/config/samples/logging_v1alpha2_logging_default.yaml b/config/samples/logging_v1alpha2_logging_default.yaml
new file mode 100644
index 000000000..528978975
--- /dev/null
+++ b/config/samples/logging_v1alpha2_logging_default.yaml
@@ -0,0 +1,11 @@
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+ name: defaultlogging
+spec:
+ fluentd: {
+ disablePvc: true
+ }
+ fluentbit: {}
+ controlNamespace: default
+
diff --git a/config/samples/logging_v1alpha2_output.yaml b/config/samples/logging_v1alpha2_output.yaml
new file mode 100644
index 000000000..5d200a24d
--- /dev/null
+++ b/config/samples/logging_v1alpha2_output.yaml
@@ -0,0 +1,6 @@
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Output
+metadata:
+ name: output-sample
+spec:
+ nullout: {}
\ No newline at end of file
diff --git a/config/samples/logging_v1alpha2_output_custom.yaml b/config/samples/logging_v1alpha2_output_custom.yaml
new file mode 100644
index 000000000..04d984017
--- /dev/null
+++ b/config/samples/logging_v1alpha2_output_custom.yaml
@@ -0,0 +1,8 @@
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Output
+metadata:
+ name: output-custom
+ namespace: custom
+spec:
+ loggingRef: customref
+ nullout: {}
\ No newline at end of file
diff --git a/config/webhook/kustomization.yaml b/config/webhook/kustomization.yaml
new file mode 100644
index 000000000..9cf26134e
--- /dev/null
+++ b/config/webhook/kustomization.yaml
@@ -0,0 +1,6 @@
+resources:
+- manifests.yaml
+- service.yaml
+
+configurations:
+- kustomizeconfig.yaml
diff --git a/config/webhook/kustomizeconfig.yaml b/config/webhook/kustomizeconfig.yaml
new file mode 100644
index 000000000..25e21e3c9
--- /dev/null
+++ b/config/webhook/kustomizeconfig.yaml
@@ -0,0 +1,25 @@
+# the following config is for teaching kustomize where to look at when substituting vars.
+# It requires kustomize v2.1.0 or newer to work properly.
+nameReference:
+- kind: Service
+ version: v1
+ fieldSpecs:
+ - kind: MutatingWebhookConfiguration
+ group: admissionregistration.k8s.io
+ path: webhooks/clientConfig/service/name
+ - kind: ValidatingWebhookConfiguration
+ group: admissionregistration.k8s.io
+ path: webhooks/clientConfig/service/name
+
+namespace:
+- kind: MutatingWebhookConfiguration
+ group: admissionregistration.k8s.io
+ path: webhooks/clientConfig/service/namespace
+ create: true
+- kind: ValidatingWebhookConfiguration
+ group: admissionregistration.k8s.io
+ path: webhooks/clientConfig/service/namespace
+ create: true
+
+varReference:
+- path: metadata/annotations
diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml
new file mode 100644
index 000000000..e69de29bb
diff --git a/config/webhook/service.yaml b/config/webhook/service.yaml
new file mode 100644
index 000000000..b4861025a
--- /dev/null
+++ b/config/webhook/service.yaml
@@ -0,0 +1,12 @@
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: webhook-service
+ namespace: system
+spec:
+ ports:
+ - port: 443
+ targetPort: 443
+ selector:
+ control-plane: controller-manager
diff --git a/controllers/logging_controller.go b/controllers/logging_controller.go
new file mode 100644
index 000000000..3ec2abfef
--- /dev/null
+++ b/controllers/logging_controller.go
@@ -0,0 +1,288 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package controllers
+
+import (
+ "bytes"
+ "context"
+
+ "emperror.dev/errors"
+ "github.com/banzaicloud/logging-operator/pkg/model/render"
+ "github.com/banzaicloud/logging-operator/pkg/resources"
+ "github.com/banzaicloud/logging-operator/pkg/resources/fluentbit"
+ "github.com/banzaicloud/logging-operator/pkg/resources/fluentd"
+ "github.com/banzaicloud/logging-operator/pkg/resources/model"
+ "github.com/go-logr/logr"
+ appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ rbacv1 "k8s.io/api/rbac/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/apimachinery/pkg/types"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/handler"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+ "sigs.k8s.io/controller-runtime/pkg/source"
+
+ loggingv1alpha2 "github.com/banzaicloud/logging-operator/api/v1beta1"
+)
+
+// LoggingReconciler reconciles a Logging object
+type LoggingReconciler struct {
+ client.Client
+ Log logr.Logger
+}
+
+// +kubebuilder:rbac:groups=logging.banzaicloud.io,resources=loggings,verbs=get;list;watch;create;update;patch;delete
+// +kubebuilder:rbac:groups=logging.banzaicloud.io,resources=loggings/status,verbs=get;update;patch
+
+// Reconcile logging resources
+func (r *LoggingReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
+ _ = context.Background()
+ log := r.Log.WithValues("logging", req.NamespacedName)
+
+ logging := &loggingv1alpha2.Logging{}
+ err := r.Client.Get(context.TODO(), req.NamespacedName, logging)
+ if err != nil {
+ // Object not found, return. Created objects are automatically garbage collected.
+ // For additional cleanup logic use finalizers.
+ if apierrors.IsNotFound(err) {
+ return reconcile.Result{}, nil
+ }
+ return reconcile.Result{}, err
+ }
+
+ logging = logging.SetDefaults()
+
+ fluentdConfig, err := r.clusterConfiguration(logging)
+ if err != nil {
+ return reconcile.Result{}, err
+ }
+
+ log.V(1).Info("flow configuration", "config", fluentdConfig)
+
+ reconcilers := make([]resources.ComponentReconciler, 0)
+
+ if logging.Spec.FluentdSpec != nil {
+ reconcilers = append(reconcilers, fluentd.New(r.Client, r.Log, logging, &fluentdConfig).Reconcile)
+ }
+
+ if logging.Spec.FluentbitSpec != nil {
+ reconcilers = append(reconcilers, fluentbit.New(r.Client, r.Log, logging).Reconcile)
+ }
+
+ for _, rec := range reconcilers {
+ result, err := rec()
+ if err != nil {
+ return reconcile.Result{}, err
+ }
+ if result != nil {
+ // short circuit if requested explicitly
+ return *result, err
+ }
+ }
+
+ return ctrl.Result{}, nil
+}
+
+func (r *LoggingReconciler) clusterConfiguration(logging *loggingv1alpha2.Logging) (string, error) {
+ if logging.Spec.FlowConfigOverride != "" {
+ return logging.Spec.FlowConfigOverride, nil
+ }
+ loggingResources, err := r.GetResources(logging)
+ if err != nil {
+ return "", errors.WrapIfWithDetails(err, "failed to get logging resources", "logging", logging)
+ }
+ builder, err := loggingResources.CreateModel()
+ if err != nil {
+ return "", errors.WrapIfWithDetails(err, "failed to create model", "logging", logging)
+ }
+ fluentConfig, err := builder.Build()
+ if err != nil {
+ return "", errors.WrapIfWithDetails(err, "failed to build model", "logging", logging)
+ }
+ output := &bytes.Buffer{}
+ renderer := render.FluentRender{
+ Out: output,
+ Indent: 2,
+ }
+ err = renderer.Render(fluentConfig)
+ if err != nil {
+ return "", errors.WrapIfWithDetails(err, "failed to render fluentd config", "logging", logging)
+ }
+ return output.String(), nil
+}
+
+// SetupLoggingWithManager setup logging manager
+func SetupLoggingWithManager(mgr ctrl.Manager, logger logr.Logger) *ctrl.Builder {
+ clusterOutputSource := &source.Kind{Type: &loggingv1alpha2.ClusterOutput{}}
+ clusterFlowSource := &source.Kind{Type: &loggingv1alpha2.ClusterFlow{}}
+ outputSource := &source.Kind{Type: &loggingv1alpha2.Output{}}
+ flowSource := &source.Kind{Type: &loggingv1alpha2.Flow{}}
+
+ requestMapper := &handler.EnqueueRequestsFromMapFunc{
+ ToRequests: handler.ToRequestsFunc(func(mapObject handler.MapObject) []reconcile.Request {
+ object, err := meta.Accessor(mapObject.Object)
+ if err != nil {
+ return nil
+ }
+ // get all the logging resources from the cache
+ loggingList := &loggingv1alpha2.LoggingList{}
+ err = mgr.GetCache().List(context.TODO(), loggingList)
+ if err != nil {
+ logger.Error(err, "failed to list logging resources")
+ return nil
+ }
+ if o, ok := object.(*loggingv1alpha2.ClusterOutput); ok {
+ return reconcileRequestsForLoggingRef(loggingList, o.Spec.LoggingRef)
+ }
+ if o, ok := object.(*loggingv1alpha2.Output); ok {
+ return reconcileRequestsForLoggingRef(loggingList, o.Spec.LoggingRef)
+ }
+ if o, ok := object.(*loggingv1alpha2.Flow); ok {
+ return reconcileRequestsForLoggingRef(loggingList, o.Spec.LoggingRef)
+ }
+ if o, ok := object.(*loggingv1alpha2.ClusterFlow); ok {
+ return reconcileRequestsForLoggingRef(loggingList, o.Spec.LoggingRef)
+ }
+ return nil
+ }),
+ }
+
+ builder := ctrl.NewControllerManagedBy(mgr).
+ For(&loggingv1alpha2.Logging{}).
+ Owns(&corev1.Pod{}).
+ Watches(clusterOutputSource, requestMapper).
+ Watches(clusterFlowSource, requestMapper).
+ Watches(outputSource, requestMapper).
+ Watches(flowSource, requestMapper)
+
+ FluentdWatches(builder)
+ FluentbitWatches(builder)
+
+ return builder
+}
+
+func reconcileRequestsForLoggingRef(loggingList *loggingv1alpha2.LoggingList, loggingRef string) []reconcile.Request {
+ filtered := make([]reconcile.Request, 0)
+ for _, l := range loggingList.Items {
+ if l.Spec.LoggingRef == loggingRef {
+ filtered = append(filtered, reconcile.Request{
+ NamespacedName: types.NamespacedName{
+ // this happens to be empty as long as Logging is cluster scoped
+ Namespace: l.Namespace,
+ Name: l.Name,
+ },
+ })
+ }
+ }
+ return filtered
+}
+
+// FluentdWatches for fluentd statefulset
+func FluentdWatches(builder *ctrl.Builder) *ctrl.Builder {
+ return builder.
+ Owns(&corev1.ConfigMap{}).
+ Owns(&corev1.Service{}).
+ Owns(&appsv1.Deployment{}).
+ Owns(&rbacv1.ClusterRole{}).
+ Owns(&rbacv1.ClusterRoleBinding{}).
+ Owns(&corev1.ServiceAccount{})
+}
+
+// FluentbitWatches for fluent-bit daemonset
+func FluentbitWatches(builder *ctrl.Builder) *ctrl.Builder {
+ return builder.
+ Owns(&corev1.ConfigMap{}).
+ Owns(&appsv1.DaemonSet{}).
+ Owns(&rbacv1.ClusterRole{}).
+ Owns(&rbacv1.ClusterRoleBinding{}).
+ Owns(&corev1.ServiceAccount{})
+}
+
+// GetResources collect all resources referenced by logging resource
+func (r *LoggingReconciler) GetResources(logging *loggingv1alpha2.Logging) (*model.LoggingResources, error) {
+ loggingResources := model.NewLoggingResources(logging, r.Client, r.Log)
+ var err error
+
+ clusterFlows := &loggingv1alpha2.ClusterFlowList{}
+ err = r.List(context.TODO(), clusterFlows, client.InNamespace(logging.Spec.ControlNamespace))
+ if err != nil {
+ return nil, err
+ }
+ if len(clusterFlows.Items) > 0 {
+ for _, i := range clusterFlows.Items {
+ if i.Spec.LoggingRef == logging.Spec.LoggingRef {
+ loggingResources.ClusterFlows = append(loggingResources.ClusterFlows, i)
+ }
+ }
+ }
+
+ clusterOutputs := &loggingv1alpha2.ClusterOutputList{}
+ err = r.List(context.TODO(), clusterOutputs, client.InNamespace(logging.Spec.ControlNamespace))
+ if err != nil {
+ return nil, err
+ }
+ if len(clusterOutputs.Items) > 0 {
+ for _, i := range clusterOutputs.Items {
+ if i.Spec.LoggingRef == logging.Spec.LoggingRef {
+ loggingResources.ClusterOutputs = append(loggingResources.ClusterOutputs, i)
+ }
+ }
+ }
+
+ watchNamespaces := logging.Spec.WatchNamespaces
+
+ if len(watchNamespaces) == 0 {
+ nsList := &corev1.NamespaceList{}
+ err = r.List(context.TODO(), nsList)
+ if err != nil {
+ return nil, errors.WrapIf(err, "failed to list all namespaces")
+ }
+ for _, ns := range nsList.Items {
+ watchNamespaces = append(watchNamespaces, ns.Name)
+ }
+ }
+
+ for _, ns := range watchNamespaces {
+ flows := &loggingv1alpha2.FlowList{}
+ err = r.List(context.TODO(), flows, client.InNamespace(ns))
+ if err != nil {
+ return nil, err
+ }
+ if len(flows.Items) > 0 {
+ for _, i := range flows.Items {
+ if i.Spec.LoggingRef == logging.Spec.LoggingRef {
+ loggingResources.Flows = append(loggingResources.Flows, i)
+ }
+ }
+ }
+ outputs := &loggingv1alpha2.OutputList{}
+ err = r.List(context.TODO(), outputs, client.InNamespace(ns))
+ if err != nil {
+ return nil, err
+ }
+ if len(outputs.Items) > 0 {
+ for _, i := range outputs.Items {
+ if i.Spec.LoggingRef == logging.Spec.LoggingRef {
+ loggingResources.Outputs = append(loggingResources.Outputs, i)
+ }
+ }
+ }
+ }
+
+ return loggingResources, nil
+}
diff --git a/controllers/logging_controller_test.go b/controllers/logging_controller_test.go
new file mode 100644
index 000000000..2e4b2b877
--- /dev/null
+++ b/controllers/logging_controller_test.go
@@ -0,0 +1,552 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package controllers_test
+
+import (
+ "context"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "emperror.dev/errors"
+ "github.com/banzaicloud/logging-operator/api/v1beta1"
+ "github.com/banzaicloud/logging-operator/controllers"
+ "github.com/banzaicloud/logging-operator/pkg/model/output"
+ "github.com/banzaicloud/logging-operator/pkg/model/secret"
+ "github.com/banzaicloud/logging-operator/pkg/resources/fluentd"
+ "github.com/onsi/gomega"
+ appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/wait"
+ "k8s.io/client-go/kubernetes/scheme"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+)
+
+var (
+ err error
+ mgr ctrl.Manager
+ requests chan reconcile.Request
+ stopMgr chan struct{}
+ mgrStopped *sync.WaitGroup
+ reconcilerErrors chan error
+ g gomega.GomegaWithT
+)
+
+func TestFluentdResourcesCreatedAndRemoved(t *testing.T) {
+ g := gomega.NewGomegaWithT(t)
+ defer beforeEach(t)()
+
+ logging := &v1beta1.Logging{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test",
+ },
+ Spec: v1beta1.LoggingSpec{
+ WatchNamespaces: []string{testNamespace},
+ FluentdSpec: &v1beta1.FluentdSpec{},
+ FlowConfigCheckDisabled: true,
+ ControlNamespace: controlNamespace,
+ },
+ }
+
+ defer ensureCreated(t, logging)()
+
+ cm := &corev1.Secret{}
+
+ defer ensureCreatedEventually(t, controlNamespace, logging.QualifiedName(fluentd.SecretConfigName), cm)()
+
+ g.Expect(cm.Data["fluent.conf"]).Should(gomega.And(
+ gomega.ContainSubstring("@include /fluentd/etc/input.conf"),
+ gomega.ContainSubstring("@include /fluentd/app-config/*"),
+ gomega.ContainSubstring("@include /fluentd/etc/devnull.conf"),
+ ))
+
+ deployment := &appsv1.StatefulSet{}
+
+ defer ensureCreatedEventually(t, controlNamespace, logging.QualifiedName(fluentd.StatefulSetName), deployment)()
+}
+
+func TestSingleFlowWithoutOutputRefs(t *testing.T) {
+ g := gomega.NewGomegaWithT(t)
+ defer beforeEach(t)()
+
+ logging := &v1beta1.Logging{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test",
+ },
+ Spec: v1beta1.LoggingSpec{
+ WatchNamespaces: []string{testNamespace},
+ FluentdSpec: &v1beta1.FluentdSpec{},
+ FlowConfigCheckDisabled: true,
+ ControlNamespace: controlNamespace,
+ },
+ }
+
+ flow := &v1beta1.Flow{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test-flow",
+ Namespace: testNamespace,
+ },
+ Spec: v1beta1.FlowSpec{
+ Selectors: map[string]string{
+ "a": "b",
+ },
+ OutputRefs: []string{},
+ },
+ }
+
+ defer ensureCreated(t, logging)()
+ defer ensureCreated(t, flow)()
+
+ secret := &corev1.Secret{}
+
+ defer ensureCreatedEventually(t, controlNamespace, logging.QualifiedName(fluentd.AppSecretConfigName), secret)()
+
+ g.Expect(string(secret.Data[fluentd.AppConfigKey])).Should(gomega.ContainSubstring("a:b"))
+}
+
+func TestSingleFlowWithoutExistingLoggingRef(t *testing.T) {
+ g := gomega.NewGomegaWithT(t)
+ defer beforeEach(t)()
+
+ logging := &v1beta1.Logging{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test",
+ },
+ Spec: v1beta1.LoggingSpec{
+ WatchNamespaces: []string{testNamespace},
+ FluentdSpec: &v1beta1.FluentdSpec{},
+ FlowConfigCheckDisabled: true,
+ ControlNamespace: controlNamespace,
+ },
+ }
+
+ flow := &v1beta1.Flow{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test-flow",
+ Namespace: testNamespace,
+ },
+ Spec: v1beta1.FlowSpec{
+ LoggingRef: "nonexistent",
+ Selectors: map[string]string{
+ "a": "b",
+ },
+ OutputRefs: []string{},
+ },
+ }
+
+ defer ensureCreated(t, logging)()
+ defer ensureCreated(t, flow)()
+
+ secret := &corev1.Secret{}
+
+ defer ensureCreatedEventually(t, controlNamespace, logging.QualifiedName(fluentd.AppSecretConfigName), secret)()
+
+ g.Expect(string(secret.Data[fluentd.AppConfigKey])).ShouldNot(gomega.ContainSubstring("namespace " + testNamespace))
+}
+
+func TestSingleFlowWithOutputRefDefaultLoggingRef(t *testing.T) {
+ g := gomega.NewGomegaWithT(t)
+ defer beforeEach(t)()
+
+ logging := &v1beta1.Logging{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test",
+ },
+ Spec: v1beta1.LoggingSpec{
+ WatchNamespaces: []string{testNamespace},
+ FluentdSpec: &v1beta1.FluentdSpec{},
+ FlowConfigCheckDisabled: true,
+ ControlNamespace: controlNamespace,
+ },
+ }
+
+ output := &v1beta1.Output{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test-output",
+ Namespace: testNamespace,
+ },
+ Spec: v1beta1.OutputSpec{
+ NullOutputConfig: output.NewNullOutputConfig(),
+ },
+ }
+
+ flow := &v1beta1.Flow{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test-flow",
+ Namespace: testNamespace,
+ },
+ Spec: v1beta1.FlowSpec{
+ Selectors: map[string]string{
+ "a": "b",
+ },
+ OutputRefs: []string{"test-output"},
+ },
+ }
+
+ defer ensureCreated(t, logging)()
+ defer ensureCreated(t, output)()
+ defer ensureCreated(t, flow)()
+
+ secret := &corev1.Secret{}
+ defer ensureCreatedEventually(t, controlNamespace, logging.QualifiedName(fluentd.AppSecretConfigName), secret)()
+
+ g.Expect(string(secret.Data[fluentd.AppConfigKey])).Should(gomega.ContainSubstring("a:b"))
+}
+
+func TestSingleFlowWithClusterOutput(t *testing.T) {
+ g := gomega.NewGomegaWithT(t)
+ defer beforeEach(t)()
+
+ logging := &v1beta1.Logging{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test",
+ },
+ Spec: v1beta1.LoggingSpec{
+ WatchNamespaces: []string{testNamespace},
+ FluentdSpec: &v1beta1.FluentdSpec{},
+ FlowConfigCheckDisabled: true,
+ ControlNamespace: controlNamespace,
+ },
+ }
+
+ output := &v1beta1.ClusterOutput{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test-cluster-output",
+ Namespace: controlNamespace,
+ },
+ Spec: v1beta1.ClusterOutputSpec{
+ OutputSpec: v1beta1.OutputSpec{
+ NullOutputConfig: output.NewNullOutputConfig(),
+ },
+ },
+ }
+
+ flow := &v1beta1.Flow{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test-flow",
+ Namespace: testNamespace,
+ },
+ Spec: v1beta1.FlowSpec{
+ Selectors: map[string]string{
+ "a": "b",
+ },
+ OutputRefs: []string{"test-cluster-output"},
+ },
+ }
+
+ defer ensureCreated(t, logging)()
+ defer ensureCreated(t, output)()
+ defer ensureCreated(t, flow)()
+
+ secret := &corev1.Secret{}
+ defer ensureCreatedEventually(t, controlNamespace, logging.QualifiedName(fluentd.AppSecretConfigName), secret)()
+
+ g.Expect(string(secret.Data[fluentd.AppConfigKey])).Should(gomega.ContainSubstring("a:b"))
+}
+
+func TestClusterFlowWithNamespacedOutput(t *testing.T) {
+ defer beforeEach(t)()
+
+ logging := &v1beta1.Logging{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test",
+ },
+ Spec: v1beta1.LoggingSpec{
+ WatchNamespaces: []string{testNamespace},
+ FluentdSpec: &v1beta1.FluentdSpec{},
+ FlowConfigCheckDisabled: true,
+ ControlNamespace: controlNamespace,
+ },
+ }
+
+ output := &v1beta1.Output{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test-output",
+ Namespace: testNamespace,
+ },
+ Spec: v1beta1.OutputSpec{
+ NullOutputConfig: output.NewNullOutputConfig(),
+ },
+ }
+
+ flow := &v1beta1.ClusterFlow{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test-flow",
+ Namespace: controlNamespace,
+ },
+ Spec: v1beta1.FlowSpec{
+ Selectors: map[string]string{
+ "a": "b",
+ },
+ OutputRefs: []string{"test-output"},
+ },
+ }
+
+ defer ensureCreated(t, logging)()
+ defer ensureCreated(t, output)()
+ defer ensureCreated(t, flow)()
+
+ err := wait.Poll(time.Second, time.Second*3, func() (bool, error) {
+ select {
+ case err := <-reconcilerErrors:
+ expected := "referenced output not found: test-output"
+ if !strings.Contains(err.Error(), expected) {
+ return false, errors.Errorf("expected `%s` but received `%s`", expected, err.Error())
+ } else {
+ return true, nil
+ }
+ case <-time.After(100 * time.Millisecond):
+ return false, nil
+ }
+ })
+
+ if err != nil {
+ t.Fatalf("%+v", err)
+ }
+}
+
+func TestSingleFlowWithOutputRef(t *testing.T) {
+ g := gomega.NewGomegaWithT(t)
+ defer beforeEach(t)()
+
+ logging := &v1beta1.Logging{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test",
+ },
+ Spec: v1beta1.LoggingSpec{
+ LoggingRef: "someloggingref",
+ WatchNamespaces: []string{testNamespace},
+ FluentdSpec: &v1beta1.FluentdSpec{},
+ FlowConfigCheckDisabled: true,
+ ControlNamespace: controlNamespace,
+ },
+ }
+
+ output := &v1beta1.Output{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test-output",
+ Namespace: testNamespace,
+ },
+ Spec: v1beta1.OutputSpec{
+ LoggingRef: "someloggingref",
+ NullOutputConfig: output.NewNullOutputConfig(),
+ },
+ }
+
+ flow := &v1beta1.Flow{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test-flow",
+ Namespace: testNamespace,
+ },
+ Spec: v1beta1.FlowSpec{
+ LoggingRef: "someloggingref",
+ Selectors: map[string]string{
+ "a": "b",
+ },
+ OutputRefs: []string{"test-output"},
+ },
+ }
+
+ defer ensureCreated(t, logging)()
+ defer ensureCreated(t, output)()
+ defer ensureCreated(t, flow)()
+
+ secret := &corev1.Secret{}
+ defer ensureCreatedEventually(t, controlNamespace, logging.QualifiedName(fluentd.AppSecretConfigName), secret)()
+
+ g.Expect(string(secret.Data[fluentd.AppConfigKey])).Should(gomega.ContainSubstring("a:b"))
+}
+
+func TestSingleFlowDefaultLoggingRefInvalidOutputRef(t *testing.T) {
+ defer beforeEach(t)()
+
+ logging := &v1beta1.Logging{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test",
+ },
+ Spec: v1beta1.LoggingSpec{
+ WatchNamespaces: []string{testNamespace},
+ FluentdSpec: &v1beta1.FluentdSpec{},
+ FlowConfigCheckDisabled: true,
+ ControlNamespace: controlNamespace,
+ },
+ }
+
+ flow := &v1beta1.Flow{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test-flow",
+ Namespace: testNamespace,
+ },
+ Spec: v1beta1.FlowSpec{
+ Selectors: map[string]string{
+ "a": "b",
+ },
+ OutputRefs: []string{"test-output-nonexistent"},
+ },
+ }
+
+ defer ensureCreated(t, logging)()
+ defer ensureCreated(t, flow)()
+
+ err := wait.Poll(time.Second, time.Second*3, func() (bool, error) {
+ select {
+ case err := <-reconcilerErrors:
+ expected := "referenced output not found: test-output-nonexistent"
+ if !strings.Contains(err.Error(), expected) {
+ return false, errors.Errorf("expected `%s` but received `%s`", expected, err.Error())
+ } else {
+ return true, nil
+ }
+ case <-time.After(100 * time.Millisecond):
+ return false, nil
+ }
+ })
+
+ if err != nil {
+ t.Fatalf("%+v", err)
+ }
+}
+
+func TestSingleFlowWithSecretInOutput(t *testing.T) {
+ g := gomega.NewGomegaWithT(t)
+ defer beforeEach(t)()
+
+ logging := &v1beta1.Logging{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test",
+ },
+ Spec: v1beta1.LoggingSpec{
+ FluentdSpec: &v1beta1.FluentdSpec{},
+ FlowConfigCheckDisabled: true,
+ WatchNamespaces: []string{testNamespace},
+ ControlNamespace: controlNamespace,
+ },
+ }
+
+ output := &v1beta1.Output{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test-output",
+ Namespace: testNamespace,
+ },
+ Spec: v1beta1.OutputSpec{
+ S3OutputConfig: &output.S3OutputConfig{
+ AwsAccessKey: &secret.Secret{
+ ValueFrom: &secret.ValueFrom{
+ SecretKeyRef: &secret.KubernetesSecret{
+ Name: "topsecret",
+ Key: "key",
+ },
+ },
+ },
+ SharedCredentials: &output.S3SharedCredentials{},
+ },
+ },
+ }
+ flow := &v1beta1.Flow{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test-flow",
+ Namespace: testNamespace,
+ },
+ Spec: v1beta1.FlowSpec{
+ Selectors: map[string]string{
+ "a": "b",
+ },
+ OutputRefs: []string{
+ "test-output",
+ },
+ },
+ }
+ topsecret := &corev1.Secret{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "topsecret",
+ Namespace: testNamespace,
+ },
+ StringData: map[string]string{
+ "key": "topsecretdata",
+ },
+ }
+ defer ensureCreated(t, logging)()
+ defer ensureCreated(t, topsecret)()
+ defer ensureCreated(t, output)()
+ defer ensureCreated(t, flow)()
+
+ secret := &corev1.Secret{}
+
+ defer ensureCreatedEventually(t, controlNamespace, logging.QualifiedName(fluentd.AppSecretConfigName), secret)()
+ g.Expect(string(secret.Data[fluentd.AppConfigKey])).Should(gomega.ContainSubstring("topsecretdata"))
+}
+
+// TODO add following tests:
+// - resources from non watched namespaces are not incorporated
+// - namespaced flow cannot use an output not enabled for the given namespace
+
+func beforeEach(t *testing.T) func() {
+ mgr, err = ctrl.NewManager(cfg, ctrl.Options{
+ Scheme: scheme.Scheme,
+ })
+ g.Expect(err).NotTo(gomega.HaveOccurred())
+
+ flowReconciler := &controllers.LoggingReconciler{
+ Client: mgr.GetClient(),
+ Log: ctrl.Log.WithName("controllers").WithName("Flow"),
+ }
+
+ var wrappedReconciler reconcile.Reconciler
+ wrappedReconciler, requests, _, reconcilerErrors = duplicateRequest(t, flowReconciler)
+
+ err := controllers.SetupLoggingWithManager(mgr, ctrl.Log.WithName("manager").WithName("Setup")).Complete(wrappedReconciler)
+ g.Expect(err).NotTo(gomega.HaveOccurred())
+
+ stopMgr, mgrStopped = startTestManager(t, mgr)
+
+ return func() {
+ close(stopMgr)
+ mgrStopped.Wait()
+ }
+}
+
+func ensureCreated(t *testing.T, object runtime.Object) func() {
+ err := mgr.GetClient().Create(context.TODO(), object)
+ if err != nil {
+ t.Fatalf("%+v", err)
+ }
+ return func() {
+ mgr.GetClient().Delete(context.TODO(), object)
+ }
+}
+
+func ensureCreatedEventually(t *testing.T, ns, name string, object runtime.Object) func() {
+ err := wait.Poll(time.Second, time.Second*3, func() (bool, error) {
+ err := mgr.GetClient().Get(context.TODO(), types.NamespacedName{
+ Name: name, Namespace: ns,
+ }, object)
+ if apierrors.IsNotFound(err) {
+ return false, nil
+ }
+ return true, err
+ })
+ if err != nil {
+ t.Fatalf("%+v", errors.WithStack(err))
+ }
+ return func() {
+ mgr.GetClient().Delete(context.TODO(), object)
+ }
+}
diff --git a/controllers/suite_test.go b/controllers/suite_test.go
new file mode 100644
index 000000000..c01b78654
--- /dev/null
+++ b/controllers/suite_test.go
@@ -0,0 +1,142 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package controllers_test
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+ "sync"
+ "testing"
+
+ "github.com/banzaicloud/logging-operator/api/v1beta1"
+ "github.com/pborman/uuid"
+ v12 "k8s.io/api/core/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/kubernetes/scheme"
+ "k8s.io/client-go/rest"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/envtest"
+ logf "sigs.k8s.io/controller-runtime/pkg/log"
+ "sigs.k8s.io/controller-runtime/pkg/log/zap"
+ "sigs.k8s.io/controller-runtime/pkg/manager"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+ // +kubebuilder:scaffold:imports
+)
+
+// These tests use Ginkgo (BDD-style Go testing framework). Refer to
+// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
+
+var cfg *rest.Config
+var k8sClient client.Client
+var testEnv *envtest.Environment
+var testNamespace = "test-" + uuid.New()[:8]
+var controlNamespace = "control"
+
+func TestMain(m *testing.M) {
+ err := beforeSuite()
+ if err != nil {
+ fmt.Printf("%+v", err)
+ os.Exit(1)
+ }
+ code := m.Run()
+ err = afterSuite()
+ if err != nil {
+ fmt.Printf("%+v", err)
+ os.Exit(1)
+ }
+ os.Exit(code)
+}
+
+func beforeSuite() error {
+ logf.SetLogger(zap.LoggerTo(os.Stdout, true))
+
+ testEnv = &envtest.Environment{
+ CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")},
+ }
+
+ var err error
+
+ cfg, err = testEnv.Start()
+ if err != nil {
+ return err
+ }
+ if cfg == nil {
+ return fmt.Errorf("failed to start testenv, config is nil")
+ }
+
+ err = v1beta1.AddToScheme(scheme.Scheme)
+ if err != nil {
+ return err
+ }
+
+ k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
+ if err != nil {
+ return err
+ }
+ if k8sClient == nil {
+ return fmt.Errorf("failed to create k8s config")
+ }
+
+ for _, ns := range []string{controlNamespace, testNamespace} {
+ err := k8sClient.Create(context.TODO(), &v12.Namespace{
+ ObjectMeta: v1.ObjectMeta{
+ Name: ns,
+ },
+ })
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func afterSuite() error {
+ return testEnv.Stop()
+}
+
+// duplicateRequest returns a reconcile.Reconcile implementation that delegates to inner and
+// writes the request to requests after Reconcile is finished.
+func duplicateRequest(t *testing.T, inner reconcile.Reconciler) (reconcile.Reconciler, chan reconcile.Request, chan reconcile.Result, chan error) {
+ requests := make(chan reconcile.Request)
+ results := make(chan reconcile.Result)
+ errors := make(chan error)
+ fn := reconcile.Func(func(req reconcile.Request) (reconcile.Result, error) {
+ result, err := inner.Reconcile(req)
+ if err != nil {
+ t.Logf("reconcile failure err: %+v req: %+v, result: %+v", err, req, result)
+ errors <- err
+ }
+ requests <- req
+ results <- result
+ return result, err
+ })
+ return fn, requests, results, errors
+}
+
+// startTestManager adds recFn
+func startTestManager(t *testing.T, mgr manager.Manager) (chan struct{}, *sync.WaitGroup) {
+ stop := make(chan struct{})
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ if err := mgr.Start(stop); err != nil {
+ t.Fatalf("%+v", err)
+ }
+ }()
+ return stop, wg
+}
diff --git a/deploy/clusterrole.yaml b/deploy/clusterrole.yaml
deleted file mode 100644
index 85a4d051b..000000000
--- a/deploy/clusterrole.yaml
+++ /dev/null
@@ -1,60 +0,0 @@
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
- creationTimestamp: null
- name: logging-operator
-rules:
-- apiGroups:
- - ""
- resources:
- - pods
- - services
- - endpoints
- - persistentvolumeclaims
- - events
- - configmaps
- - secrets
- - serviceaccounts
- verbs:
- - '*'
-- apiGroups:
- - rbac.authorization.k8s.io
- resources:
- - clusterroles
- - clusterrolebindings
- verbs:
- - list
- - get
- - create
- - update
- - watch
-- apiGroups:
- - ""
- resources:
- - namespaces
- verbs:
- - get
-- apiGroups:
- - apps
- resources:
- - deployments
- - daemonsets
- - replicasets
- - statefulsets
- verbs:
- - '*'
-- apiGroups:
- - monitoring.coreos.com
- resources:
- - servicemonitors
- verbs:
- - get
- - create
-- apiGroups:
- - logging.banzaicloud.com
- resources:
- - '*'
- - fluentbits
- - fluentds
- verbs:
- - '*'
diff --git a/deploy/crds/logging_v1alpha1_fluentbit_cr.yaml b/deploy/crds/logging_v1alpha1_fluentbit_cr.yaml
deleted file mode 100644
index 575f8ed2a..000000000
--- a/deploy/crds/logging_v1alpha1_fluentbit_cr.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-apiVersion: logging.banzaicloud.com/v1alpha1
-kind: Fluentbit
-metadata:
- name: example-fluentbit
- labels:
- release: test
-spec:
- namespace: default
- annotations:
- prometheus.io/scrape: "true"
- prometheus.io/path: "/api/v1/metrics/prometheus"
- prometheus.io/port: "2020"
- tls:
- enabled: false
- image:
- tag: "1.1.3"
- repository: "fluent/fluent-bit"
- pullPolicy: "IfNotPresent"
- resources: {}
diff --git a/deploy/crds/logging_v1alpha1_fluentbit_crd.yaml b/deploy/crds/logging_v1alpha1_fluentbit_crd.yaml
deleted file mode 100644
index e2c9264f3..000000000
--- a/deploy/crds/logging_v1alpha1_fluentbit_crd.yaml
+++ /dev/null
@@ -1,33 +0,0 @@
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- creationTimestamp: null
- name: fluentbits.logging.banzaicloud.com
-spec:
- group: logging.banzaicloud.com
- names:
- kind: Fluentbit
- listKind: FluentbitList
- plural: fluentbits
- singular: fluentbit
- scope: Namespaced
- subresources:
- status: {}
- validation:
- openAPIV3Schema:
- properties:
- apiVersion:
- type: string
- kind:
- type: string
- metadata:
- type: object
- spec:
- type: object
- status:
- type: object
- version: v1alpha1
- versions:
- - name: v1alpha1
- served: true
- storage: true
diff --git a/deploy/crds/logging_v1alpha1_fluentd_cr.yaml b/deploy/crds/logging_v1alpha1_fluentd_cr.yaml
deleted file mode 100644
index aa149e59c..000000000
--- a/deploy/crds/logging_v1alpha1_fluentd_cr.yaml
+++ /dev/null
@@ -1,33 +0,0 @@
-apiVersion: logging.banzaicloud.com/v1alpha1
-kind: Fluentd
-metadata:
- name: example-fluentd
- labels:
- release: test
-spec:
- namespace: default
- annotations:
- prometheus.io/scrape: "true"
- prometheus.io/path: "/metrics"
- prometheus.io/port: "25000"
- tls:
- enabled: false
- image:
- tag: "v1.5.0"
- repository: "banzaicloud/fluentd"
- pullPolicy: "IfNotPresent"
- volumeModImage:
- tag: "latest"
- repository: "busybox"
- pullPolicy: "IfNotPresent"
- configReloaderImage:
- tag: "v0.2.2"
- repository: "jimmidyson/configmap-reload"
- pullPolicy: "IfNotPresent"
- resources: {}
- fluentdPvcSpec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 21Gi
\ No newline at end of file
diff --git a/deploy/crds/logging_v1alpha1_fluentd_crd.yaml b/deploy/crds/logging_v1alpha1_fluentd_crd.yaml
deleted file mode 100644
index 93148ffde..000000000
--- a/deploy/crds/logging_v1alpha1_fluentd_crd.yaml
+++ /dev/null
@@ -1,33 +0,0 @@
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- creationTimestamp: null
- name: fluentds.logging.banzaicloud.com
-spec:
- group: logging.banzaicloud.com
- names:
- kind: Fluentd
- listKind: FluentdList
- plural: fluentds
- singular: fluentd
- scope: Namespaced
- subresources:
- status: {}
- validation:
- openAPIV3Schema:
- properties:
- apiVersion:
- type: string
- kind:
- type: string
- metadata:
- type: object
- spec:
- type: object
- status:
- type: object
- version: v1alpha1
- versions:
- - name: v1alpha1
- served: true
- storage: true
diff --git a/deploy/crds/logging_v1alpha1_plugin_cr.yaml b/deploy/crds/logging_v1alpha1_plugin_cr.yaml
deleted file mode 100644
index 026250e7a..000000000
--- a/deploy/crds/logging_v1alpha1_plugin_cr.yaml
+++ /dev/null
@@ -1,31 +0,0 @@
-apiVersion: logging.banzaicloud.com/v1alpha1
-kind: Plugin
-metadata:
- name: example-plugin
- labels:
- release: test
-spec:
- input:
- label:
- app: "*"
- output:
- - type: gcs
- name: outputGCS
- parameters:
- - name: project
- valueFrom:
- secretKeyRef:
- name: ""
- key: ""
- - name: client_email
- valueFrom:
- secretKeyRef:
- name: ""
- key: ""
- - name: private_key
- valueFrom:
- secretKeyRef:
- name: ""
- key: ""
- - name: bucket
- value: ""
\ No newline at end of file
diff --git a/deploy/crds/logging_v1alpha1_plugin_crd.yaml b/deploy/crds/logging_v1alpha1_plugin_crd.yaml
deleted file mode 100644
index b798db0d9..000000000
--- a/deploy/crds/logging_v1alpha1_plugin_crd.yaml
+++ /dev/null
@@ -1,33 +0,0 @@
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- creationTimestamp: null
- name: plugins.logging.banzaicloud.com
-spec:
- group: logging.banzaicloud.com
- names:
- kind: Plugin
- listKind: PluginList
- plural: plugins
- singular: plugin
- scope: Namespaced
- subresources:
- status: {}
- validation:
- openAPIV3Schema:
- properties:
- apiVersion:
- type: string
- kind:
- type: string
- metadata:
- type: object
- spec:
- type: object
- status:
- type: object
- version: v1alpha1
- versions:
- - name: v1alpha1
- served: true
- storage: true
diff --git a/deploy/operator.yaml b/deploy/operator.yaml
deleted file mode 100644
index 1ac252764..000000000
--- a/deploy/operator.yaml
+++ /dev/null
@@ -1,33 +0,0 @@
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: logging-operator
-spec:
- replicas: 1
- selector:
- matchLabels:
- name: logging-operator
- template:
- metadata:
- labels:
- name: logging-operator
- spec:
- serviceAccountName: logging-operator
- containers:
- - name: logging-operator
- # Replace this with the built image name
- image: banzaicloud/logging-operator:0.2.2
- command:
- - logging-operator
- imagePullPolicy: IfNotPresent
- env:
- - name: WATCH_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- - name: OPERATOR_NAME
- value: "logging-operator"
diff --git a/deploy/service_account.yaml b/deploy/service_account.yaml
deleted file mode 100644
index 1a684cff5..000000000
--- a/deploy/service_account.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: logging-operator
diff --git a/developer.md b/developer.md
deleted file mode 100644
index 0a57334e7..000000000
--- a/developer.md
+++ /dev/null
@@ -1,68 +0,0 @@
-# Developer's Guide
-
-## Setting up local development environment
-
-
-### Prerequisites
-
-These steps are required to build the logging-operator and run on your computer.
-
-### Install operator-sdk
-
-Please follow the official guide for the **operator-sdk**:
-https://github.com/operator-framework/operator-sdk#quick-start
-
-### Set-up the `kubernetes` context
-
-Set up the kubernetes environment where you want create resources
-
-#### Docker-for-mac
-
-```
-kubectl config use-context docker-for-desktop
-```
-
-#### Minikube
-
-```
-kubectl config use-context minikiube
-```
-
-### Install using operator-sdk local
-
-```
-operator-sdk up local
-```
-
-## Building docker image from the operator
-
-```
-$ docker build -t banzaicloud/logging-operator:local
-```
-
-### Using Helm to install logging-operator (with custom image)
-
-Add banzaicloud-stable repo (or download the chart)
-
-```
-helm repo add banzaicloud-stable http://kubernetes-charts.banzaicloud.com/branch/master
-helm repo update
-```
-
-Install the Helm deployment with custom (local) image
-
-```
-helm install banzaicloud-stable/logging-operator --set image.tag="local"
-```
-
-Verify installation
-
-```
-helm list
-```
-
-### Contribution
-
-1. When contributing please check the issues and pull-requests weather your problem has been already addressed.
-2. Open an issue and/or pull request describing your contribution
-3. Please follow the issue and pull-request templates instructions
diff --git a/docs/crds.md b/docs/crds.md
new file mode 100644
index 000000000..805c399ad
--- /dev/null
+++ b/docs/crds.md
@@ -0,0 +1,277 @@
+# Custom Resource Definitions
+
+This document contains the detailed information about the CRDs logging-operator uses.
+
+Available CRDs:
+- [loggings.logging.banzaicloud.io](/config/crd/bases/logging.banzaicloud.io_loggings.yaml)
+- [outputs.logging.banzaicloud.io](/config/crd/bases/logging.banzaicloud.io_outputs.yaml)
+- [flows.logging.banzaicloud.io](/config/crd/bases/logging.banzaicloud.io_flows.yaml)
+- [clusteroutputs.logging.banzaicloud.io](/config/crd/bases/logging.banzaicloud.io_clusteroutputs.yaml)
+- [clusterflows.logging.banzaicloud.io](/config/crd/bases/logging.banzaicloud.io_clusterflows.yaml)
+
+> You can find example yamls [here](/docs/examples)
+
+## loggings
+
+Logging resource define a logging infrastructure for your cluster. You can define **one** or **more** `logging` resource. This resource holds together a `logging pipeline`. It is responsible to deploy `fluentd` and `fluent-bit` on the cluster. It declares a `controlNamespace` and `watchNamespaces` if applicable.
+
+> Note: The `logging` resources are referenced by `loggingRef`. If you setup multiple `logging flow` you have to reference other objects to this field. This can happen if you want to run multiple fluentd with separated configuration.
+
+You can install `logging` resource via [Helm chart](/charts/logging-operator-logging) with built-in TLS generation.
+
+### Namespace separation
+A `logging pipeline` consist two type of resources.
+- `Namespaced` resources: `Flow`, `Output`
+- `Global` resources: `ClusterFlow`, `ClusterOutput`
+
+The `namespaced` resources only effective in their **own** namespace. `Global` resources are operate **cluster wide**.
+
+> You can only create `ClusterFlow` and `ClusterOutput` in the `controlNamespace`. It **MUST** be a **protected** namespace that only **administrators** have access.
+
+Create a namespace for logging
+```bash
+kubectl create ns logging
+```
+
+**`logging` plain example**
+```yaml
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+ name: default-logging-simple
+ namespace: logging
+spec:
+ fluentd: {}
+ fluentbit: {}
+ controlNamespace: logging
+```
+
+**`logging` with filtered namespaces**
+```yaml
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+ name: default-logging-namespaced
+ namespace: logging
+spec:
+ fluentd: {}
+ fluentbit: {}
+ controlNamespace: logging
+ watchNamespaces: ["prod", "test"]
+```
+
+### Logging parameters
+| Name | Type | Default | Description |
+|-------------------------|----------------|---------|-------------------------------------------------------------------------|
+| loggingRef | string | "" | Reference name of the logging deployment |
+| flowConfigCheckDisabled | bool | False | Disable configuration check before deploy |
+| flowConfigOverride | string | "" | Use static configuration instead of generated config. |
+| fluentbit | [FluentbitSpec](#Fluent-bit-Spec) | {} | Fluent-bit configurations |
+| fluentd | [FluentdSpec](#Fluentd-Spec) | {} | Fluentd configurations |
+| watchNamespaces | []string | "" | Limit namespaces from where to read Flow and Output specs |
+| controlNamespace | string | "" | Control namespace that contains ClusterOutput and ClusterFlow resources |
+
+#### Fluentd Spec
+
+You can customize the `fluentd` statefulset with the following parameters.
+
+| Name | Type | Default | Description |
+|-------------------------|----------------|---------|-------------------------------------------------------------------------|
+| annotations | map[string]string | {} | Extra annotations to Kubernetes resource|
+| tls | [TLS](#TLS-Spec) | {} | Configure TLS settings|
+| image | [ImageSpec](#Image-Spec) | {} | Fluentd image override |
+| fluentdPvcSpec | [PersistentVolumeClaimSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.12/#persistentvolumeclaimspec-v1-core) | {} | FLuentd PVC spec to mount persistent volume for Buffer |
+| disablePvc | bool | false | Disable PVC binding |
+| volumeModImage | [ImageSpec](#Image-Spec) | {} | Volume modifier image override |
+| configReloaderImage | [ImageSpec](#Image-Spec) | {} | Config reloader image override |
+| resources | [ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.12/#resourcerequirements-v1-core) | {} | Resource requirements and limits |
+
+**`logging` with custom fluentd pvc**
+```yaml
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+ name: default-logging-simple
+ namespace: logging
+spec:
+ fluentd:
+ fluentdPvcSpec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 40Gi
+ storageClassName: fast
+ fluentbit: {}
+ controlNamespace: logging
+```
+
+#### Fluent-bit Spec
+| Name | Type | Default | Description |
+|-------------------------|----------------|---------|-------------------------------------------------------------------------|
+| annotations | map[string]string | {} | Extra annotations to Kubernetes resource|
+| tls | [TLS](#TLS-Spec) | {} | Configure TLS settings|
+| image | [ImageSpec](#Image-Spec) | {} | Fluentd image override |
+| resources | [ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.12/#resourcerequirements-v1-core) | {} | Resource requirements and limits |
+| targetHost | string | *Fluentd host* | Hostname to send the logs forward |
+| targetPort | int | *Fluentd port* | Port to send the logs forward |
+| parser | string | cri | Change fluent-bit input parse configuration. [Available parsers](https://github.com/fluent/fluent-bit/blob/master/conf/parsers.conf) |
+
+**`logging` with custom fluent-bit annotations**
+```yaml
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+ name: default-logging-simple
+ namespace: logging
+spec:
+ fluentd: {}
+ fluentbit:
+ annotations:
+ my-annotations/enable: true
+ controlNamespace: logging
+```
+
+#### Image Spec
+
+Override default images
+
+| Name | Type | Default | Description |
+|-------------------------|----------------|---------|-------------|
+| repository | string | "" | Image repository |
+| tag | string | "" | Image tag |
+| pullPolicy | string | "" | Always, IfNotPresent, Never |
+
+**`logging` with custom fluentd image**
+```yaml
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+ name: default-logging-simple
+ namespace: logging
+spec:
+ fluentd:
+ image:
+ repository: banzaicloud/fluentd
+ tag: v1.6.3-alpine
+ pullPolicy: IfNotPresent
+ fluentbit: {}
+ controlNamespace: logging
+```
+
+#### TLS Spec
+
+Define TLS certificate secret
+
+| Name | Type | Default | Description |
+|-------------------------|----------------|---------|-------------|
+| enabled | string | "" | Image repository |
+| secretName | string | "" | Kubernetes secret that contains: **tls.crt, tls.key, ca.crt** |
+| sharedKey | string | "" | Shared secret for fluentd authentication |
+
+
+**`logging` setup with TLS**
+```yaml
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+ name: default-logging-tls
+ namespace: logging
+spec:
+ fluentd:
+ disablePvc: true
+ tls:
+ enabled: true
+ secretName: fluentd-tls
+ sharedKey: asdadas
+ fluentbit:
+ tls:
+ enabled: true
+ secretName: fluentbit-tls
+ sharedKey: asdadas
+ controlNamespace: logging
+
+```
+
+## outputs, clusteroutputs
+
+Outputs are the final stage for a `logging flow`. You can define multiple `outputs` and attach them to multiple `flows`.
+
+> Note: `Flow` can be connected to `Output` and `ClusterOutput` but `ClusterFlow` is only attachable to `ClusterOutput`.
+
+### Defining outputs
+
+The supported `Output` plugins are documented [here](./plugins/outputs)
+
+| Name | Type | Default | Description |
+|-------------------------|-------------------|---------|-------------|
+| **Output Definitions** | [Output](./plugins/outputs) | nil | Named output definitions |
+| loggingRef | string | "" | Specified `logging` resource reference to connect `Output` and `ClusterOutput` to |
+
+
+**`output` s3 example**
+```yaml
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Output
+metadata:
+ name: s3-output-sample
+spec:
+ s3:
+ aws_key_id:
+ valueFrom:
+ secretKeyRef:
+ name: s3-secret
+ key: awsAccessKeyId
+ namespace: default
+ aws_sec_key:
+ valueFrom:
+ secretKeyRef:
+ name: s3-secret
+ key: awsSecretAccesKey
+ namespace: default
+ s3_bucket: example-logging-bucket
+ s3_region: eu-west-1
+ path: logs/${tag}/%Y/%m/%d/
+ buffer:
+ path: /tmp/buffer
+ timekey: 1m
+ timekey_wait: 10s
+ timekey_use_utc: true
+```
+
+## flows, clusterflows
+
+Flows define a `logging flow` that defines the `filters` and `outputs`.
+
+> `Flow` resources are `namespaced`, the `selector` only select `Pod` logs within namespace.
+> `ClusterFlow` select logs from **ALL** namespace.
+
+### Parameters
+| Name | Type | Default | Description |
+|-------------------------|-------------------|---------|-------------|
+| selectors | map[string]string | {} | Kubernetes label selectors for the log. |
+| filters | [][Filter](./plugins/filters) | [] | List of applied [filter](./plugins/filters). |
+| loggingRef | string | "" | Specified `logging` resource reference to connect `FLow` and `ClusterFlow` to |
+| outputRefs | []string | [] | List of [Outputs](#Defining-outputs) or [ClusterOutputs](#Defining-outputs) names |
+
+*`flow` example with filters and output in the `default` namespace*
+```yaml
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+ name: flow-sample
+ namespace: default
+spec:
+ filters:
+ - parse:
+ key_name: log
+ remove_key_name_field: true
+ parsers:
+ - type: nginx
+ - tag_normaliser:
+ format: ${namespace_name}.${pod_name}.${container_name}
+ outputRefs:
+ - s3-output
+ selectors:
+ app: nginx
+```
\ No newline at end of file
diff --git a/docs/developers.md b/docs/developers.md
new file mode 100644
index 000000000..c6f351b6a
--- /dev/null
+++ b/docs/developers.md
@@ -0,0 +1,154 @@
+# Developers documentation
+
+THis documentation helps to set-up a developer environment and writing plugins for the operator.
+
+## Setting up Kind
+
+Install Kind on your computer
+```
+go get sigs.k8s.io/kind@v0.5.1
+```
+
+Create cluster
+```
+kind create cluster --name logging
+```
+
+Install prerequisites (this is a Kubebuilder makefile that will generate and install crds)
+```
+make install
+```
+
+Run the Operator
+```
+go run main.go
+```
+
+## Writing a plugin
+
+To add a `plugin` to the logging operator you need to define the plugin struct.
+
+> Note: Place your plugin in the corresponding directory `pkg/model/filter` or `pkg/model/output`
+
+```go
+type MyExampleOutput struct {
+ // Path that is required for the plugin
+ Path string `json:"path,omitempty"`
+}
+```
+
+The plugin uses the **JSON** tags to parse and validate configuration. Without tags the configuration is not valid. The `fluent` parameter name must match with the JSON tag. Don't forget to use `omitempty` for non required parameters.
+
+### Implement `ToDirective`
+
+To render the configuration you have to implement the `ToDirective` function.
+```go
+func (c *S3OutputConfig) ToDirective(secretLoader secret.SecretLoader) (types.Directive, error) {
+ ...
+}
+```
+For simple Plugins you can use the `NewFlatDirective` function.
+```go
+func (c *ExampleOutput) ToDirective(secretLoader secret.SecretLoader) (types.Directive, error) {
+ return types.NewFlatDirective(types.PluginMeta{
+ Type: "example",
+ Directive: "output",
+ Tags: "**",
+ }, c, secretLoader)
+}
+```
+For more example please check the available plugins.
+
+### Reuse existing Plugin sections
+
+You can embed existing configuration for your plugins. For example modern `Output` plugins have `Buffer` section.
+
+```go
+// +docLink:"Buffer,./buffer.md"
+Buffer *Buffer `json:"buffer,omitempty"`
+```
+
+If you are using embedded sections you must call its `ToDirective` method manually and append it as a `SubDirective`
+
+```go
+if c.Buffer != nil {
+ if buffer, err := c.Buffer.ToDirective(secretLoader); err != nil {
+ return nil, err
+ } else {
+ s3.SubDirectives = append(s3.SubDirectives, buffer)
+ }
+}
+```
+
+### Special plugin tags
+To document the plugins logging-operator uses the Go `tags` (like JSON tags). Logging operator uses `plugin` named tags for special instructions.
+
+Special tag `default`
+The default tag helps to give `default` values for parameters. These parameters are explicitly set in the generated fluentd configuration.
+```go
+RetryForever bool `json:"retry_forever" plugin:"default:true"`
+```
+Special tag `required`
+The required tag ensures that the attribute can **not** be empty
+```go
+RetryForever bool `json:"retry_forever" plugin:"required"`
+```
+
+## Generate documentation for Plugin
+
+The operator parse the `docstrings` for the documentation.
+
+```go
+...
+// AWS access key id
+AwsAccessKey *secret.Secret `json:"aws_key_id,omitempty"`
+...
+```
+
+Will generate the following Markdown
+
+| Variable Name | Default | Applied function |
+|---|---|---|
+|AwsAccessKey| | AWS access key id|
+
+You can *hint* default values in docstring via `(default: value)`. This is useful if you don't want to set default explicitly with `tag`. However during rendering defaults in `tags` have priority over docstring.
+```go
+...
+// The format of S3 object keys (default: %{path}%{time_slice}_%{index}.%{file_extension})
+S3ObjectKeyFormat string `json:"s3_object_key_format,omitempty"`
+...
+```
+
+### Special docstrings
+
+- `+docName:"Title for the plugin section"`
+- `+docLink:"Buffer,./buffer.md"`
+
+You can declare document **title** and **description** above the `type _doc interface{}` variable declaration.
+
+Example Document headings:
+```go
+// +docName:"Amazon S3 plugin for Fluentd"
+// **s3** output plugin buffers event logs in local file and upload it to S3 periodically. This plugin splits files exactly by using the time of event logs (not the time when the logs are received). For example, a log '2011-01-02 message B' is reached, and then another log '2011-01-03 message B' is reached in this order, the former one is stored in "20110102.gz" file, and latter one in "20110103.gz" file.
+type _doc interface{}
+```
+
+Example Plugin headings:
+```go
+// +kubebuilder:object:generate=true
+// +docName:"Shared Credentials"
+type S3SharedCredentials struct {
+...
+```
+
+Example linking embedded sections
+```go
+// +docLink:"Buffer,./buffer.md"
+Buffer *Buffer `json:"buffer,omitempty"`
+```
+
+### Generate docs for your Plugin
+
+```
+make docs
+```
diff --git a/docs/example-es-nginx.md b/docs/example-es-nginx.md
new file mode 100644
index 000000000..507bf467e
--- /dev/null
+++ b/docs/example-es-nginx.md
@@ -0,0 +1,163 @@
+
+
+# Store Nginx Access Logs in ElasticSearch with Logging Operator
+
+
+
+### Add operator chart repository:
+```bash
+helm repo add es-operator https://raw.githubusercontent.com/upmc-enterprises/elasticsearch-operator/master/charts/
+helm repo add banzaicloud-stable https://kubernetes-charts.banzaicloud.com
+helm repo update
+```
+
+### Install ElasticSearch with operator
+```bash
+helm install --name elasticsearch-operator es-operator/elasticsearch-operator --set rbac.enabled=True
+helm install --name elasticsearch es-operator/elasticsearch --set kibana.enabled=True --set cerebro.enabled=True
+```
+> [Elasticsearch Operator Documentation](https://github.com/upmc-enterprises/elasticsearch-operator)
+
+## Install with Helm
+### Logging Operator
+```bash
+helm install --name logging banzaicloud-stable/logging-operator
+```
+> You can install `logging` resource via [Helm chart](/charts/logging-operator-logging) with built-in TLS generation.
+
+### Nginx App + Logging Definition
+```bash
+helm install --name nginx-demo banzaicloud-stable/nginx-logging-es-demo
+```
+
+## Install from manifest
+
+#### Create `logging` resource
+```bash
+cat < Note: `ClusterOutput` and `ClusterFlow` resource will only be accepted in the `controlNamespace`
+
+
+#### Create an ElasticSearch output definition
+```bash
+cat < Note: For production set-up we recommend using longer `timekey` interval to avoid generating too many object.
+
+#### Create `flow` resource
+```bash
+cat <
+
+
+
+#### Forward Kibana Dashboard
+```bash
+kubectl port-forward svc/kibana-elasticsearch-cluster 5601:80
+```
+[Dashboard URL: https://localhost:5601](https://localhost:5601)
+
+
+
+
diff --git a/docs/example-es.md b/docs/example-es.md
new file mode 100644
index 000000000..c5c50c96f
--- /dev/null
+++ b/docs/example-es.md
@@ -0,0 +1,95 @@
+
+
+# Save all logs to ElasticSearch
+
+
+#### Add operator chart repository:
+```bash
+$ helm repo add es-operator https://raw.githubusercontent.com/upmc-enterprises/elasticsearch-operator/master/charts/
+$ helm repo add banzaicloud-stable https://kubernetes-charts.banzaicloud.com
+$ helm repo update
+```
+
+## Install ElasticSearch with operator
+```bash
+$ helm install --name elasticsearch-operator es-operator/elasticsearch-operator --set rbac.enabled=True
+$ helm install --name elasticsearch es-operator/elasticsearch --set kibana.enabled=True --set cerebro.enabled=True
+```
+> [Elasticsearch Operator Documentation](https://github.com/upmc-enterprises/elasticsearch-operator)
+
+
+#### Forward cerebro & kibana dashboards
+```bash
+$ kubectl port-forward svc/cerebro-elasticsearch-cluster 9001:80
+$ kubectl port-forward svc/kibana-elasticsearch-cluster 5601:80
+```
+
+
+### Create default logging
+
+Create a namespace for logging
+```bash
+kubectl create ns logging
+```
+> You can install `logging` resource via [Helm chart](/charts/logging-operator-logging) with built-in TLS generation.
+
+Create `logging` resource
+```bash
+cat < Note: `ClusterOutput` and `ClusterFlow` resource will only be accepted in the `controlNamespace`
+
+
+Create an ElasticSearch output definition
+
+```bash
+cat < Note: For production set-up we recommend using longer `timekey` interval to avoid generating too many object.
+
+The following snippet will use [tag_normaliser](./plugins/filters/tagnormaliser.md) to re-tag logs and after push it to ElasticSearch.
+
+```bash
+cat <
+
+# Store Nginx Access Logs in Grafana Loki with Logging Operator
+
+
+
+### Add operator chart repository:
+```bash
+helm repo add loki https://grafana.github.io/loki/charts
+helm repo add banzaicloud-stable https://kubernetes-charts.banzaicloud.com
+helm repo update
+```
+
+### Install Loki
+```bash
+helm install --name loki loki/loki
+```
+> [Grafana Loki Documentation](https://github.com/grafana/loki/tree/master/production/helm)
+### Install Grafana
+```bash
+helm install --name grafana stable/grafana \
+ --set "datasources.datasources\\.yaml.apiVersion=1" \
+ --set "datasources.datasources\\.yaml.datasources[0].name=Loki" \
+ --set "datasources.datasources\\.yaml.datasources[0].type=loki" \
+ --set "datasources.datasources\\.yaml.datasources[0].url=http://loki:3100" \
+ --set "datasources.datasources\\.yaml.datasources[0].access=proxy"
+```
+
+
+## Install with Helm
+### Logging Operator
+```bash
+helm install --name logging banzaicloud-stable/logging-operator
+```
+> You can install `logging` resource via [Helm chart](/charts/logging-operator-logging) with built-in TLS generation.
+
+### Nginx App + Logging Definition
+```bash
+helm install --name nginx-demo banzaicloud-stable/nginx-logging-loki-demo
+```
+
+## Install from manifest
+
+#### Create `logging` resource
+```bash
+cat < Note: `ClusterOutput` and `ClusterFlow` resource will only be accepted in the `controlNamespace`
+
+
+#### Create an Loki output definition
+```bash
+cat < Note: For production set-up we recommend using longer `timekey` interval to avoid generating too many object.
+
+#### Create `flow` resource
+```bash
+cat <
+
+
diff --git a/docs/example-s3.md b/docs/example-s3.md
new file mode 100644
index 000000000..30893a11c
--- /dev/null
+++ b/docs/example-s3.md
@@ -0,0 +1,114 @@
+# Save all logs to S3
+
+Before you start [install logging-operator](/README.md#deploying-with-helm-chart)
+
+### Create default logging
+
+Create a namespace for logging
+```bash
+kubectl create ns logging
+```
+> You can install `logging` resource via [Helm chart](/charts/logging-operator-logging) with built-in TLS generation.
+
+Create `logging` resource
+```bash
+kubectl apply -f logging.yaml
+```
+*logging.yaml*
+```yaml
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+ name: default-logging-simple
+spec:
+ fluentd: {}
+ fluentbit: {}
+ controlNamespace: logging-system
+```
+
+> Note: `ClusterOutput` and `ClusterFlow` resource will only be accepted in the `controlNamespace`
+
+### Create AWS secret
+
+If you have your `$AWS_ACCESS_KEY_ID` and `$AWS_SECRET_ACCESS_KEY` set you can use the following snippet.
+```bash
+kubectl create secret generic logging-s3 --from-literal "awsAccessKeyId=$AWS_ACCESS_KEY_ID" --from-literal "awsSecretAccesKey=$AWS_SECRET_ACCESS_KEY"
+```
+Or set up the secret manually.
+```bash
+kubectl apply -f secret.yaml
+```
+*secret.yaml*
+```yaml
+apiVersion: v1
+kind: Secret
+metadata:
+ name: logging-s3
+ namespace: logging-system
+type: Opaque
+data:
+ awsAccessKeyId:
+ awsSecretAccesKey:
+```
+
+> You **MUST** install the `secret` and the `output` definition in the **SAME** namespace
+
+Create an S3 output definition
+
+```bash
+kubectl apply -f clusteroutput.yaml
+```
+*clusteroutput.yaml*
+```yaml
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: ClusterOutput
+metadata:
+ name: s3-output
+ namespace: logging-system
+spec:
+ s3:
+ aws_key_id:
+ valueFrom:
+ secretKeyRef:
+ name: logging-s3
+ key: awsAccessKeyId
+ aws_sec_key:
+ valueFrom:
+ secretKeyRef:
+ name: logging-s3
+ key: awsSecretAccesKey
+ s3_bucket: logging-amazon-s3
+ s3_region: eu-central-1
+ path: logs/${tag}/%Y/%m/%d/
+ buffer:
+ path: /tmp/buffer
+ timekey: 10m
+ timekey_wait: 30s
+ timekey_use_utc: true
+```
+
+> Note: For production set-up we recommend using longer `timekey` interval to avoid generating too many object.
+
+The following snippet will use [tag_normaliser](./plugins/filters/tagnormaliser.md) to re-tag logs and after push it to S3.
+
+```bash
+kubectl apply -f clusterflow.yaml
+```
+*clusterflow.yaml*
+```yaml
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: ClusterFlow
+metadata:
+ name: all-log-to-s3
+ namespace: logging-system
+spec:
+ filters:
+ - tag_normaliser: {}
+ selectors: {}
+ outputRefs:
+ - s3-output
+```
+
+The logs will be available in the bucket on a `path` like:
+
+```/logs/default.default-logging-simple-fluentbit-lsdp5.fluent-bit/2019/09/11/201909111432_0.gz```
\ No newline at end of file
diff --git a/docs/examples/es.md b/docs/examples/es.md
deleted file mode 100644
index a1c95bbbf..000000000
--- a/docs/examples/es.md
+++ /dev/null
@@ -1,50 +0,0 @@
-
-
-## Example Logging-operator with Elasticsearch Operator
-
-
-
-#### Add operator chart repository:
-```bash
-$ helm repo add es-operator https://raw.githubusercontent.com/upmc-enterprises/elasticsearch-operator/master/charts/
-$ helm repo add banzaicloud-stable https://kubernetes-charts.banzaicloud.com
-$ helm repo update
-```
-
-#### Install operators
-```bash
-$ helm install --name elasticsearch-operator es-operator/elasticsearch-operator --set rbac.enabled=True
-$ helm install --name elasticsearch es-operator/elasticsearch --set kibana.enabled=True --set cerebro.enabled=True
-$ helm install --name logging banzaicloud-stable/logging-operator
-$ helm install --name fluent banzaicloud-stable/logging-operator-fluent
-```
-> [Elasticsearch Operator Documentation](https://github.com/upmc-enterprises/elasticsearch-operator)
-
-#### Install Nginx Demo chart
-```bash
-$ helm install banzaicloud-stable/nginx-logging-es-demo
-```
-
-#### Forward cerebro & kibana dashboards
-```bash
-$ kubectl port-forward svc/cerebro-elasticsearch-cluster 9001:80
-$ kubectl port-forward svc/kibana-elasticsearch-cluster 5601:80
-```
-
-[![asciicast](https://asciinema.org/a/9EcfIzlUQJSjJdopEh5HCU7OT.svg)](https://asciinema.org/a/9EcfIzlUQJSjJdopEh5HCU7OT)
-
-## License
-
-Copyright (c) 2017-2019 [Banzai Cloud, Inc.](https://banzaicloud.com)
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-[http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0)
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
diff --git a/docs/examples/logging_flow_multiple_output.yaml b/docs/examples/logging_flow_multiple_output.yaml
new file mode 100644
index 000000000..07e35b48b
--- /dev/null
+++ b/docs/examples/logging_flow_multiple_output.yaml
@@ -0,0 +1,11 @@
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+ name: flow-sample
+ namespace: default
+spec:
+ outputRefs:
+ - gcs-output-sample
+ - s3-output-example
+ selectors:
+ app: nginx
diff --git a/docs/examples/logging_flow_single_output.yaml b/docs/examples/logging_flow_single_output.yaml
new file mode 100644
index 000000000..90e3783d2
--- /dev/null
+++ b/docs/examples/logging_flow_single_output.yaml
@@ -0,0 +1,10 @@
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+ name: flow-sample
+ namespace: default
+spec:
+ outputRefs:
+ - s3-output-sample
+ selectors:
+ app: nginx
diff --git a/docs/examples/logging_flow_with_filters.yaml b/docs/examples/logging_flow_with_filters.yaml
new file mode 100644
index 000000000..4b45316ff
--- /dev/null
+++ b/docs/examples/logging_flow_with_filters.yaml
@@ -0,0 +1,18 @@
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+ name: flow-sample
+ namespace: default
+spec:
+ filters:
+ - parser:
+ key_name: log
+ remove_key_name_field: true
+ parsers:
+ - type: nginx
+ - tag_normaliser:
+ format: ${namespace_name}.${pod_name}.${container_name}
+ outputRefs:
+ - s3-output
+ selectors:
+ app: nginx
diff --git a/docs/examples/logging_logging_simple.yaml b/docs/examples/logging_logging_simple.yaml
new file mode 100644
index 000000000..6cd76bb43
--- /dev/null
+++ b/docs/examples/logging_logging_simple.yaml
@@ -0,0 +1,9 @@
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+ name: default-logging-simple
+ namespace: logging
+spec:
+ fluentd: {}
+ fluentbit: {}
+ controlNamespace: logging
\ No newline at end of file
diff --git a/docs/examples/logging_logging_tls.yaml b/docs/examples/logging_logging_tls.yaml
new file mode 100644
index 000000000..1f6df314d
--- /dev/null
+++ b/docs/examples/logging_logging_tls.yaml
@@ -0,0 +1,18 @@
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+ name: default-logging-tls
+ namespace: logging
+spec:
+ fluentd:
+ disablePvc: true
+ tls:
+ enabled: true
+ secretName: fluentd-tls
+ sharedKey: asdadas
+ fluentbit:
+ tls:
+ enabled: true
+ secretName: fluentbit-tls
+ sharedKey: asdadas
+ controlNamespace: logging
\ No newline at end of file
diff --git a/docs/examples/logging_output_azurestorage.yaml b/docs/examples/logging_output_azurestorage.yaml
new file mode 100644
index 000000000..357f47a83
--- /dev/null
+++ b/docs/examples/logging_output_azurestorage.yaml
@@ -0,0 +1,22 @@
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Output
+metadata:
+ name: azure-output-sample
+spec:
+ azurestorage:
+ azure_storage_account:
+ valueFrom:
+ secretKeyRef:
+ name: azurestorage-secret
+ key: azureStorageAccount
+ azure_storage_access_key:
+ valueFrom:
+ secretKeyRef:
+ name: azurestorage-secret
+ key: azureStorageAccessKey
+ azure_container: example-azure-container
+ path: logs/${tag}/%Y/%m/%d/
+ buffer:
+ timekey: 1m
+ timekey_wait: 10s
+ timekey_use_utc: true
\ No newline at end of file
diff --git a/docs/examples/logging_output_forward.yaml b/docs/examples/logging_output_forward.yaml
new file mode 100644
index 000000000..a71fbf876
--- /dev/null
+++ b/docs/examples/logging_output_forward.yaml
@@ -0,0 +1,13 @@
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Output
+metadata:
+ name: forward-output-sample
+spec:
+ forward:
+ servers:
+ - host: fluent-public.input.io
+ port: "24240"
+ buffer:
+ timekey: 1m
+ timekey_wait: 10s
+ timekey_use_utc: true
diff --git a/docs/examples/logging_output_gcs.yaml b/docs/examples/logging_output_gcs.yaml
new file mode 100644
index 000000000..045995233
--- /dev/null
+++ b/docs/examples/logging_output_gcs.yaml
@@ -0,0 +1,18 @@
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Output
+metadata:
+ name: gcs-output-sample
+spec:
+ gcs:
+ credentials_json:
+ valueFrom:
+ secretKeyRef:
+ name: gcs-secret
+ key: credentials.json
+ project: logging-example
+ bucket: banzai-log-test
+ path: logs/${tag}/%Y/%m/%d/
+ buffer:
+ timekey: 1m
+ timekey_wait: 10s
+ timekey_use_utc: true
\ No newline at end of file
diff --git a/docs/examples/logging_output_s3.yaml b/docs/examples/logging_output_s3.yaml
new file mode 100644
index 000000000..287543d45
--- /dev/null
+++ b/docs/examples/logging_output_s3.yaml
@@ -0,0 +1,25 @@
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Output
+metadata:
+ name: s3-output-sample
+spec:
+ s3:
+ aws_key_id:
+ valueFrom:
+ secretKeyRef:
+ name: s3-secret
+ key: awsAccessKeyId
+ aws_sec_key:
+ valueFrom:
+ secretKeyRef:
+ name: s3-secret
+ key: awsSecretAccesKey
+ s3_bucket: example-logging-bucket
+ s3_region: eu-central-1
+ path: logs/${tag}/%Y/%m/%d/
+ buffer:
+ timekey: 1m
+ timekey_wait: 10s
+ timekey_use_utc: true
+ format:
+ type: json
diff --git a/docs/examples/logging_output_sumologic.yaml b/docs/examples/logging_output_sumologic.yaml
new file mode 100644
index 000000000..481b495de
--- /dev/null
+++ b/docs/examples/logging_output_sumologic.yaml
@@ -0,0 +1,14 @@
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Output
+metadata:
+ name: sumologic-output-sample
+spec:
+ sumologic:
+ endpoint:
+ valueFrom:
+ secretKeyRef:
+ name: sumologic
+ key: endpoint
+ log_format: json
+ source_category: prod/someapp/logs
+ source_name: AppA
diff --git a/docs/examples/s3.md b/docs/examples/s3.md
deleted file mode 100644
index dc96e310c..000000000
--- a/docs/examples/s3.md
+++ /dev/null
@@ -1,107 +0,0 @@
-
-
-
-#### Install S3 output Plugin chart with Aws Credential Access
-```bash
-$ helm install \
---set bucketName='' \
---set region='' \
---set endpoint='' \
---set awsCredentialsAccess.enabled=true \
---set awsCredentialsAccess.secret.awsAccessValue='' \
---set awsCredentialsAccess.secret.awsSecretValue='' \
-banzaicloud-stable/s3-output
-```
-
-> There is **no** need to encode base64 these values.
-
-#### Install Nginx Demo app
-```bash
-$ helm install banzaicloud-stable/nginx-logging-demo
-```
-
-
-### Create Secret
-
-Create a manifest file for the AWS access key:
-
-```
-apiVersion: v1
-kind: Secret
-metadata:
- name: loggings3
-type: Opaque
-data:
- awsAccessKeyId:
- awsSecretAccesKey:
-```
-
-Submit the secret with kubectl:
-
-```
-kubectl apply -f secret.yaml
-```
-
-### Create LoggingOperator resource
-
-Create a manifest that defines that you want to parse the nginx logs with the specified regular expressions on the standard output of pods with the `app: nginx` label, and store them in the given S3 bucket.
-
-```
-apiVersion: "logging.banzaicloud.com/v1alpha1"
-kind: "Plugin"
-metadata:
- name: "nginx-logging"
- labels:
- release: test
-spec:
- input:
- label:
- app: nginx
- filter:
- - type: parser
- name: parser-nginx
- parameters:
- - name: format
- value: '/^(?[^ ]*) (?[^ ]*) (?[^ ]*) \[(?[^\]]*)\] "(?\S+)(?: +(?[^\"]*?)(?: +\S*)?)?" (?[^ ]*) (?[^ ]*)(?: "(?[^\"]*)" "(?[^\"]*)"(?:\s+(?[^ ]+))?)?$/'
- - name: timeFormat
- value: "%d/%b/%Y:%H:%M:%S %z"
- output:
- - type: s3
- name: outputS3
- parameters:
- - name: aws_key_id
- valueFrom:
- secretKeyRef:
- name: loggings3
- key: awsAccessKeyId
- - name: aws_sec_key
- valueFrom:
- secretKeyRef:
- name: loggings3
- key: awsSecretAccesKey
- - name: s3_bucket
- value: logging-bucket
- - name: s3_region
- value: ap-northeast-1
- - name: s3_endpoint
- value: https://s3.amazonaws.com
-```
-
-
-
-## License
-
-Copyright (c) 2017-2019 [Banzai Cloud, Inc.](https://banzaicloud.com)
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-[http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0)
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
diff --git a/docs/examples/tls.md b/docs/examples/tls.md
deleted file mode 100644
index 7dab5fb99..000000000
--- a/docs/examples/tls.md
+++ /dev/null
@@ -1,101 +0,0 @@
-
-# TLS Configuration
-
-To configure TLS for Fluentd and Fluentbit the operator needs TLS certificates
-set via the Fluentd and Fluentbit Custom Resources respectively. This can be
-done in two ways:
-
-## Generic Opaque secret (default)
-
-Create a secret like this:
-
-```
-apiVersion: v1
-data:
- caCert: ...
- clientCert: ...
- clientKey: ...
- serverCert: ...
- serverKey: ...
-kind: Secret
-metadata:
- name: something-something-tls
-type: Opaque
-```
-
-Note that we are providing three certificates in the same secret, one for
-Fluentd (`serverCert`), one for Fluentbit (`clientCert`), and the CA
-certificate (`caCert`).
-
-Then in your custom resource configure like this:
-
-```
-apiVersion: logging.banzaicloud.com/v1alpha1
-kind: Fluentd/Fluentbit
-metadata:
- name: my-fluent-thing
-spec:
- ...
- tls:
- enabled: true
- secretName: something-something-tls
- sharedKey: changeme
-```
-
-
-## `kubernetes.io/tls`
-
-The alternative is if your certificates are in secrets of type `kubernetes.io/tls`, e.g.
-
-```
-apiVersion: v1
-data:
- ca.crt: LS0tLS1...
- tls.crt: LS0tLS1...
- tls.key: LS0tLS1...
-kind: Secret
-metadata:
- name: something-something-tls
-type: kubernetes.io/tls
-```
-
-Then configure your custom resources like this:
-
-```
-apiVersion: logging.banzaicloud.com/v1alpha1
-kind: Fluentd/Fluentbit
-metadata:
- name: my-fluent-thing
-spec:
- ...
- tls:
- enabled: true
- secretName: something-something-tls
- secretType: tls
- sharedKey: changeme
-```
-
-Note: in this case we can use the same secret for both Fluentbit and Fluentd,
-or create separate secrets for each.
-
-Note: the secret's data include the CA certificate, which is in-line with the
-structure created by [jetstack/cert-manager](https://github.com/jetstack/cert-manager/).
-
-## Usage with the helm chart
-
-For the generic Opaque secret just set `tls.enabled=True` and optionally provide the `tls.secretName` value to use your own certificates (instead of the automatically generated ones from the chart).
-
-For `kubernetes.io/tls` install `logging-operator-fluent` with a `values.yaml` like this:
-
-```
-tls:
- enabled: true
-
-fluentbit:
- tlsSecret: something-something-tls
-
-fluentd:
- tlsSecret: otherthing-otherthing-tls
-```
-
-For more information see the helm chart's [README.md](https://github.com/banzaicloud/logging-operator/blob/master/charts/logging-operator-fluent/README.md).
diff --git a/docs/img/es_cerb.png b/docs/img/es_cerb.png
new file mode 100644
index 000000000..928840f72
Binary files /dev/null and b/docs/img/es_cerb.png differ
diff --git a/docs/img/es_kibana.png b/docs/img/es_kibana.png
new file mode 100644
index 000000000..77d1e765d
Binary files /dev/null and b/docs/img/es_kibana.png differ
diff --git a/docs/img/helm_logo.png b/docs/img/helm_logo.png
deleted file mode 100644
index 3b26f4dd3..000000000
Binary files a/docs/img/helm_logo.png and /dev/null differ
diff --git a/docs/img/lll.png b/docs/img/les.png
similarity index 100%
rename from docs/img/lll.png
rename to docs/img/les.png
diff --git a/docs/img/ll_es.gif b/docs/img/ll_es.gif
deleted file mode 100644
index 9c942b18e..000000000
Binary files a/docs/img/ll_es.gif and /dev/null differ
diff --git a/docs/img/log_helm.gif b/docs/img/log_helm.gif
deleted file mode 100644
index b4334b274..000000000
Binary files a/docs/img/log_helm.gif and /dev/null differ
diff --git a/docs/img/log_man.png b/docs/img/log_man.png
deleted file mode 100644
index 5ede09772..000000000
Binary files a/docs/img/log_man.png and /dev/null differ
diff --git a/docs/img/logging-operator-v2-architecture.png b/docs/img/logging-operator-v2-architecture.png
new file mode 100644
index 000000000..043053a1f
Binary files /dev/null and b/docs/img/logging-operator-v2-architecture.png differ
diff --git a/docs/img/loggingo_flow.png b/docs/img/logging_operator_flow.png
similarity index 100%
rename from docs/img/loggingo_flow.png
rename to docs/img/logging_operator_flow.png
diff --git a/docs/img/logo.png b/docs/img/logo.png
deleted file mode 100644
index 1f0593080..000000000
Binary files a/docs/img/logo.png and /dev/null differ
diff --git a/docs/img/loki1.png b/docs/img/loki1.png
new file mode 100644
index 000000000..69bfa3c2a
Binary files /dev/null and b/docs/img/loki1.png differ
diff --git a/docs/img/nginx-elastic.png b/docs/img/nginx-elastic.png
new file mode 100644
index 000000000..6eddf58b2
Binary files /dev/null and b/docs/img/nginx-elastic.png differ
diff --git a/docs/img/nginx-loki.png b/docs/img/nginx-loki.png
new file mode 100644
index 000000000..2b23b99b0
Binary files /dev/null and b/docs/img/nginx-loki.png differ
diff --git a/docs/img/nle.png b/docs/img/nle.png
new file mode 100644
index 000000000..2a0558aa7
Binary files /dev/null and b/docs/img/nle.png differ
diff --git a/docs/img/nll.png b/docs/img/nll.png
new file mode 100644
index 000000000..5d25e8eac
Binary files /dev/null and b/docs/img/nll.png differ
diff --git a/docs/img/s3_logo.png b/docs/img/s3_logo.png
deleted file mode 100644
index 3d3d4fd10..000000000
Binary files a/docs/img/s3_logo.png and /dev/null differ
diff --git a/docs/model.md b/docs/model.md
new file mode 100644
index 000000000..9e0d11d08
--- /dev/null
+++ b/docs/model.md
@@ -0,0 +1,44 @@
+## Goal
+
+Define an opinionated logical fluentd configuration model for processing kubernetes log events using go structs and
+render the following two representations:
+ - working fluentd configuration
+ - a configuration format that can be used for visual representation
+
+## The model
+
+Flow (data pipeline)
+
+The term "data pipeline" is used in fluentd for labeled (https://docs.fluentd.org/quickstart/life-of-a-fluentd-event#labels)
+configuration sections, that should apply only to a subset of events. Also these labeled events will be skipped by the
+default (non-labeled) plugins. See:
+https://docs.fluentd.org/configuration/routing-examples#input-greater-than-filter-greater-than-output-with-label
+
+The flow is identified by a kubernetes namespace and/or a set of labels. The non-labeled non-namespaced events can be
+considered as the "global" flow that processes all events.
+
+### Components
+
+Each flow has the following fluentd components:
+ - inside the label section:
+ - zero or more sequential filters
+ - one or more outputs
+ - outside the label section (in the global section):
+ - a router
+
+1. Event reaches the single input source.
+2. The routers (a smart match directive) for the specific flow get the event, examines it for the
+ kubernetes namespace and label information and reemits it with a fluentd label when they found a match.
+ The global router matches all events.
+3. The event arrives to the correct flow based on it's label. Filters are then applied in sequential order and finally
+the flow's outputs will be the end of the event's journey.
+```
+ global router -> global flow [filter, ...] => output[, output, ...]
+ /
+input - router A -> flow A [filter, ...] => output[, output, ...]
+ \
+ router B -> flow B [filter, ...] => output[, output, ...]
+ .
+ .
+ .
+```
\ No newline at end of file
diff --git a/docs/plugins/alibaba.md b/docs/plugins/alibaba.md
deleted file mode 100644
index ad7188388..000000000
--- a/docs/plugins/alibaba.md
+++ /dev/null
@@ -1,30 +0,0 @@
-# Plugin alibaba
-## Variables
-| Variable name | Default | Applied function |
-|---|---|---|
-| pattern | - | |
-| aliKeyId | - | |
-| aliKeySecret | - | |
-| bucket | - | |
-| aliBucketEndpoint | - | |
-| oss_object_key_format | %{time_slice}/%{host}-%{uuid}.%{file_ext} | |
-| buffer_path | /buffers/ali | |
-| buffer_chunk_limit | 1m | |
-| time_slice_format | %Y%m%d | |
-| time_slice_wait | 10m | |
-## Plugin template
-```
-
- @type oss
- oss_key_id {{ .aliKeyId }}
- oss_key_secret {{ .aliKeySecret }}
- oss_bucket {{ .bucket }}
- oss_endpoint {{ .aliBucketEndpoint }}
- oss_object_key_format {{ .oss_object_key_format }}
-
- buffer_path {{ .buffer_path }}
- buffer_chunk_limit {{ .buffer_chunk_limit }}
- time_slice_format {{ .time_slice_format }}
- time_slice_wait {{ .time_slice_wait }}
-
-```
\ No newline at end of file
diff --git a/docs/plugins/azure.md b/docs/plugins/azure.md
deleted file mode 100644
index fa11abac5..000000000
--- a/docs/plugins/azure.md
+++ /dev/null
@@ -1,45 +0,0 @@
-# Plugin azure
-## Variables
-| Variable name | Default | Applied function |
-|---|---|---|
-| pattern | - | |
-| storageAccountName | - | |
-| storageAccountKey | - | |
-| bucket | - | |
-| azure_object_key_format | %{path}%{time_slice}_%{index}.%{file_extension} | |
-| path | logs/${tag}/%Y/%m/%d/ | |
-| time_slice_format | %Y%m%d-%H | |
-| bufferPath | /buffers/azure | |
-| timekey | 1h | |
-| timekey_wait | 10m | |
-| timekey_use_utc | true | |
-| format | json | |
-## Plugin template
-```
-
- @type azurestorage
-
- azure_storage_account {{ .storageAccountName }}
- azure_storage_access_key {{ .storageAccountKey }}
- azure_container {{ .bucket }}
- azure_storage_type blob
- store_as gzip
- auto_create_container true
- azure_object_key_format {{ .azure_object_key_format }}
- path {{ .path }}
- time_slice_format {{ .time_slice_format }}
- # if you want to use ${tag} or %Y/%m/%d/ like syntax in path / object_key_format,
- # need to specify tag for ${tag} and time for %Y/%m/%d in argument.
-
- @type file
- path {{ .bufferPath }}
- timekey {{ .timekey }}
- timekey_wait {{ .timekey_wait }}
- timekey_use_utc {{ .timekey_use_utc }}
-
-
-
- @type {{ .format }}
-
-
-```
\ No newline at end of file
diff --git a/docs/plugins/common/security.md b/docs/plugins/common/security.md
new file mode 100644
index 000000000..edd908a8f
--- /dev/null
+++ b/docs/plugins/common/security.md
@@ -0,0 +1,7 @@
+### Security
+| Variable Name | Type | Required | Default | Description |
+|---|---|---|---|---|
+| self_hostname | string | Yes | - | Hostname |
+| shared_key | string | Yes | - | Shared key for authentication. |
+| user_auth | bool | No | - | If true, use user based authentication. |
+| allow_anonymous_source | bool | No | - | Allow anonymous source. sections are required if disabled. |
diff --git a/docs/plugins/common/transport.md b/docs/plugins/common/transport.md
new file mode 100644
index 000000000..131a80d27
--- /dev/null
+++ b/docs/plugins/common/transport.md
@@ -0,0 +1,15 @@
+### Transport
+| Variable Name | Type | Required | Default | Description |
+|---|---|---|---|---|
+| protocol | string | No | - | Protocol Default: :tcp |
+| version | string | No | - | Version Default: 'TLSv1_2' |
+| ciphers | string | No | - | Ciphers Default: "ALL:!aNULL:!eNULL:!SSLv2" |
+| insecure | bool | No | - | Use secure connection when use tls) Default: false |
+| ca_path | string | No | - | Specify path to CA certificate file |
+| cert_path | string | No | - | Specify path to Certificate file |
+| private_key_path | string | No | - | Specify path to private Key file |
+| private_key_passphrase | string | No | - | public CA private key passphrase contained path |
+| client_cert_auth | bool | No | - | When this is set Fluentd will check all incoming HTTPS requests for a client certificate signed by the trusted CA, requests that don't supply a valid client certificate will fail. |
+| ca_cert_path | string | No | - | Specify private CA contained path |
+| ca_private_key_path | string | No | - | private CA private key contained path |
+| ca_private_key_passphrase | string | No | - | private CA private key passphrase contained path |
diff --git a/docs/plugins/elasticsearch.md b/docs/plugins/elasticsearch.md
deleted file mode 100644
index 5fce40258..000000000
--- a/docs/plugins/elasticsearch.md
+++ /dev/null
@@ -1,70 +0,0 @@
-# Plugin elasticsearch
-## Variables
-| Variable name | Default | Applied function |
-|---|---|---|
-| pattern | - | |
-| logLevel | info | |
-| host | - | |
-| port | - | |
-| scheme | scheme | |
-| sslVerify | true | |
-| sslVersion | TLSv1_2 | |
-| logstashFormat | true | |
-| logstashPrefix | logstash | |
-| user | | |
-| password | | |
-| log_es_400_reason | false | |
-| bufferPath | /buffers/elasticsearch | |
-| timekey | 1h | |
-| timekey_wait | 10m | |
-| timekey_use_utc | true | |
-| flush_thread_count | 2 | |
-| flush_interval | 5s | |
-| retry_forever | true | |
-| retry_max_interval | 30 | |
-| chunkLimit | 2M | |
-| queueLimit | 8 | |
-## Plugin template
-```
-
- @type elasticsearch
- @log_level {{ .logLevel }}
- include_tag_key true
- type_name fluentd
- host {{ .host }}
- port {{ .port }}
- scheme {{ .scheme }}
- {{- if .sslVerify }}
- ssl_verify {{ .sslVerify }}
- {{- end}}
- {{- if .sslVersion }}
- ssl_version {{ .sslVersion }}
- {{- end}}
- logstash_format {{ .logstashFormat }}
- logstash_prefix {{ .logstashPrefix }}
- reconnect_on_error true
- {{- if .user }}
- user {{ .user }}
- {{- end}}
- {{- if .password }}
- password {{ .password }}
- {{- end}}
- log_es_400_reason {{ .log_es_400_reason }}
-
- @type file
- path {{ .bufferPath }}
- timekey {{ .timekey }}
- timekey_wait {{ .timekey_wait }}
- timekey_use_utc {{ .timekey_use_utc }}
- flush_mode interval
- retry_type exponential_backoff
- flush_thread_count {{ .flush_thread_count }}
- flush_interval {{ .flush_interval }}
- retry_forever {{ .retry_forever }}
- retry_max_interval {{ .retry_max_interval }}
- chunk_limit_size {{ .chunkLimit }}
- queue_limit_length {{ .queueLimit }}
- overflow_action block
-
-
-```
\ No newline at end of file
diff --git a/docs/plugins/filters/parser.md b/docs/plugins/filters/parser.md
new file mode 100644
index 000000000..cd7d873be
--- /dev/null
+++ b/docs/plugins/filters/parser.md
@@ -0,0 +1,24 @@
+### Parser
+#### https://docs.fluentd.org/filter/parser
+
+| Variable Name | Type | Required | Default | Description |
+|---|---|---|---|---|
+| key_name | string | Yes | - | Specify field name in the record to parse. |
+| reserve_time | bool | No | - | Keep original event time in parsed result. |
+| reserve_data | bool | No | - | Keep original key-value pair in parsed result. |
+| remove_key_name_field | bool | No | - | Remove key_name field when parsing is succeeded |
+| replace_invalid_sequence | bool | No | - | If true, invalid string is replaced with safe characters and re-parse it. |
+| inject_key_prefix | string | No | - | Store parsed values with specified key name prefix. |
+| hash_value_fiel | string | No | - | Store parsed values as a hash value in a field. |
+| emit_invalid_record_to_error | bool | No | - | Emit invalid record to @ERROR label. Invalid cases are: key not exist, format is not matched, unexpected error |
+| parsers | []ParseSection | No | - | [Parse Section](#Parse-Section) |
+### Parse Section
+| Variable Name | Type | Required | Default | Description |
+|---|---|---|---|---|
+| type | string | No | - | Parse type: apache2, apache_error, nginx, syslog, csv, tsv, ltsv, json, multiline, none |
+| expression | string | No | - | Regexp expression to evaluate |
+| time_key | string | No | - | Specify time field for event time. If the event doesn't have this field, current time is used. |
+| null_value_pattern | string | No | - | Specify null value pattern. |
+| null_empty_string | bool | No | - | If true, empty string field is replaced with nil |
+| estimate_current_event | bool | No | - | If true, use Fluent::EventTime.now(current time) as a timestamp when time_key is specified. |
+| keep_time_key | bool | No | - | If true, keep time field in the record. |
diff --git a/docs/plugins/filters/stdout.md b/docs/plugins/filters/stdout.md
new file mode 100644
index 000000000..36005f79c
--- /dev/null
+++ b/docs/plugins/filters/stdout.md
@@ -0,0 +1,3 @@
+### StdOutFilterConfig
+| Variable Name | Type | Required | Default | Description |
+|---|---|---|---|---|
diff --git a/docs/plugins/filters/tagnormaliser.md b/docs/plugins/filters/tagnormaliser.md
new file mode 100644
index 000000000..2fe9e06a6
--- /dev/null
+++ b/docs/plugins/filters/tagnormaliser.md
@@ -0,0 +1,21 @@
+# Fluentd Plugin to re-tag based on log metadata
+## Overview
+More info at https://github.com/banzaicloud/fluent-plugin-tag-normaliser
+
+Available kubernetes metadata
+
+| Parameter | Description | Example |
+|-----------|-------------|---------|
+| ${pod_name} | Pod name | understood-butterfly-nginx-logging-demo-7dcdcfdcd7-h7p9n |
+| ${container_name} | Container name inside the Pod | nginx-logging-demo |
+| ${namespace_name} | Namespace name | default |
+| ${pod_id} | Kubernetes UUID for Pod | 1f50d309-45a6-11e9-b795-025000000001 |
+| ${labels} | Kubernetes Pod labels. This is a nested map. You can access nested attributes via `.` | {"app":"nginx-logging-demo", "pod-template-hash":"7dcdcfdcd7" } |
+| ${host} | Node hostname the Pod runs on | docker-desktop |
+| ${docker_id} | Docker UUID of the container | 3a38148aa37aa3... |
+
+## Configuration
+### Tag Normaliser parameters
+| Variable Name | Type | Required | Default | Description |
+|---|---|---|---|---|
+| format | string | No | ${namespace_name}.${pod_name}.${container_name} | Re-Tag log messages info at [github](https://github.com/banzaicloud/fluent-plugin-tag-normaliser) |
diff --git a/docs/plugins/forward.md b/docs/plugins/forward.md
deleted file mode 100644
index 0590cb780..000000000
--- a/docs/plugins/forward.md
+++ /dev/null
@@ -1,64 +0,0 @@
-# Plugin forward
-## Variables
-| Variable name | Default | Applied function |
-|---|---|---|
-| pattern | - | |
-| clientHostname | fluentd.client | |
-| tlsSharedKey | | |
-| tlsCACertFilel | fluentd/tls/caCert | |
-| tlsCertFile | fluentd/tls/clientCert | |
-| tlsKeyFile | /fluentd/tls/clientKey | |
-| name | target | |
-| host | - | |
-| port | - | |
-| bufferPath | /buffers/forward | |
-| timekey | 1h | |
-| timekey_wait | 10m | |
-| timekey_use_utc | true | |
-| flush_thread_count | 2 | |
-| flush_interval | 5s | |
-| retry_forever | true | |
-| retry_max_interval | 30 | |
-| chunkLimit | 2M | |
-| queueLimit | 8 | |
-## Plugin template
-```
-
- @type forward
-
- {{ if not (eq .tlsSharedKey "") -}}
- transport tls
- tls_version TLSv1_2
- tls_cert_path {{ .tlsCACertFile }}
- tls_client_cert_path {{ .tlsCertFile }}
- tls_client_private_key_path {{ .tlsKeyFile }}
-
- self_hostname {{ .clientHostname }}
- shared_key {{ .tlsSharedKey }}
-
- {{ end -}}
-
-
- name {{ .name }}
- host {{ .host }}
- port {{ .port }}
-
-
-
- @type file
- path {{ .bufferPath }}
- timekey {{ .timekey }}
- timekey_wait {{ .timekey_wait }}
- timekey_use_utc {{ .timekey_use_utc }}
- flush_mode interval
- retry_type exponential_backoff
- flush_thread_count {{ .flush_thread_count }}
- flush_interval {{ .flush_interval }}
- retry_forever {{ .retry_forever }}
- retry_max_interval {{ .retry_max_interval }}
- chunk_limit_size {{ .chunkLimit }}
- queue_limit_length {{ .queueLimit }}
- overflow_action block
-
-
-```
\ No newline at end of file
diff --git a/docs/plugins/gcs.md b/docs/plugins/gcs.md
deleted file mode 100644
index a9f20f721..000000000
--- a/docs/plugins/gcs.md
+++ /dev/null
@@ -1,42 +0,0 @@
-# Plugin gcs
-## Variables
-| Variable name | Default | Applied function |
-|---|---|---|
-| pattern | - | |
-| project | - | |
-| private_key | - | toJson |
-| client_email | - | |
-| bucket | - | |
-| object_key_format | %{path}%{time_slice}_%{index}.%{file_extension} | |
-| path | logs/${tag}/%Y/%m/%d/ | |
-| bufferPath | /buffers/gcs | |
-| timekey | 1h | |
-| timekey_wait | 10m | |
-| timekey_use_utc | true | |
-| format | json | |
-## Plugin template
-```
-
- @type gcs
-
- project {{ .project }}
- credentialsJson { "private_key": {{ toJson .private_key }}, "client_email": "{{ .client_email }}" }
- bucket {{ .bucket }}
- object_key_format {{ .object_key_format }}
- path {{ .path }}
-
- # if you want to use ${tag} or %Y/%m/%d/ like syntax in path / object_key_format,
- # need to specify tag for ${tag} and time for %Y/%m/%d in argument.
-
- @type file
- path {{ .bufferPath }}
- timekey {{ .timekey }}
- timekey_wait {{ .timekey_wait }}
- timekey_use_utc {{ .timekey_use_utc }}
-
-
-
- @type {{ .format }}
-
-
-```
\ No newline at end of file
diff --git a/docs/plugins/index.md b/docs/plugins/index.md
new file mode 100644
index 000000000..fa482e883
--- /dev/null
+++ b/docs/plugins/index.md
@@ -0,0 +1,31 @@
+## Table of Contents
+
+
+### filters
+
+- [parser](./filters/parser.md)
+- [stdout](./filters/stdout.md)
+- [tagnormaliser](./filters/tagnormaliser.md)
+
+
+### outputs
+
+- [azurestore](./outputs/azurestore.md)
+- [buffer](./outputs/buffer.md)
+- [elasticsearch](./outputs/elasticsearch.md)
+- [file](./outputs/file.md)
+- [format](./outputs/format.md)
+- [forward](./outputs/forward.md)
+- [gcs](./outputs/gcs.md)
+- [loki](./outputs/loki.md)
+- [oss](./outputs/oss.md)
+- [s3](./outputs/s3.md)
+- [sumologic](./outputs/sumologic.md)
+
+
+### common
+
+- [security](./common/security.md)
+- [transport](./common/transport.md)
+
+
diff --git a/docs/plugins/loki.md b/docs/plugins/loki.md
deleted file mode 100644
index 2da66abc4..000000000
--- a/docs/plugins/loki.md
+++ /dev/null
@@ -1,27 +0,0 @@
-# Plugin loki
-## Variables
-| Variable name | Default | Applied function |
-|---|---|---|
-| pattern | - | |
-| url | | |
-| username | | |
-| password | | |
-| extraLabels | | |
-| flushInterval | 10s | |
-| chunkLimitSize | 1m | |
-| flushAtShutdown | true | |
-## Plugin template
-```
-
- @type kubernetes_loki
- url {{ .url }}
- username {{ .username }}
- password {{ .password }}
- extra_labels {{ .extraLabels }}
-
- flush_interval {{ .flushInterval }}
- chunk_limit_size {{ .chunkLimitSize }}
- flush_at_shutdown {{ .flushAtShutdown }}
-
-
-```
\ No newline at end of file
diff --git a/docs/plugins/outputs/azurestore.md b/docs/plugins/outputs/azurestore.md
new file mode 100644
index 000000000..f68395613
--- /dev/null
+++ b/docs/plugins/outputs/azurestore.md
@@ -0,0 +1,13 @@
+### AzureStorage
+| Variable Name | Type | Required | Default | Description |
+|---|---|---|---|---|
+| path | string | No | - | Path prefix of the files on Azure |
+| azure_storage_account | *secret.Secret | Yes | - | Your azure storage account [Secret](./secret.md) |
+| azure_storage_access_key | *secret.Secret | Yes | - | Your azure storage access key [Secret](./secret.md) |
+| azure_container | string | Yes | - | Your azure storage container |
+| azure_storage_type | string | No | blob | Azure storage type currently only "blob" supported |
+| azure_object_key_format | string | No | %{path}%{time_slice}_%{index}.%{file_extension} | Object key format |
+| store_as | string | No | gzip | Store as: gzip, json, text, lzo, lzma2 |
+| auto_create_container | bool | No | true | Automatically create container if not exists |
+| format | string | No | json | Compat format type: out_file, json, ltsv (default: out_file) |
+| buffer | *Buffer | No | - | [Buffer](./buffer.md) |
diff --git a/docs/plugins/outputs/buffer.md b/docs/plugins/outputs/buffer.md
new file mode 100644
index 000000000..3141d57e3
--- /dev/null
+++ b/docs/plugins/outputs/buffer.md
@@ -0,0 +1,35 @@
+### Buffer
+| Variable Name | Type | Required | Default | Description |
+|---|---|---|---|---|
+| type | string | No | - | Fluentd core bundles memory and file plugins. 3rd party plugins are also available when installed. |
+| tags | string | No | tag,time | When tag is specified as buffer chunk key, output plugin writes events into chunks separately per tags. |
+| path | string | No | /buffers/default.*.buffer | The path where buffer chunks are stored. The '*' is replaced with random characters. This parameter is required. |
+| chunk_limit_size | string | No | - | The max size of each chunks: events will be written into chunks until the size of chunks become this size |
+| chunk_limit_records | int | No | - | The max number of events that each chunks can store in it |
+| total_limit_size | string | No | - | The size limitation of this buffer plugin instance. Once the total size of stored buffer reached this threshold, all append operations will fail with error (and data will be lost) |
+| queue_limit_length | int | No | - | The queue length limitation of this buffer plugin instance |
+| chunk_full_threshold | string | No | - | The percentage of chunk size threshold for flushing. output plugin will flush the chunk when actual size reaches chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in default) |
+| queued_chunks_limit_size | int | No | - | Limit the number of queued chunks. If you set smaller flush_interval, e.g. 1s, there are lots of small queued chunks in buffer. This is not good with file buffer because it consumes lots of fd resources when output destination has a problem. This parameter mitigates such situations. |
+| compress | string | No | - | If you set this option to gzip, you can get Fluentd to compress data records before writing to buffer chunks. |
+| flush_at_shutdown | bool | No | - | The value to specify to flush/write all buffer chunks at shutdown, or not |
+| flush_mode | string | No | - | Default: default (equals to lazy if time is specified as chunk key, interval otherwise) lazy: flush/write chunks once per timekey interval: flush/write chunks per specified time via flush_interval immediate: flush/write chunks immediately after events are appended into chunks |
+| flush_interval | string | No | - | Default: 60s |
+| flush_thread_count | int | No | - | The number of threads of output plugins, which is used to write chunks in parallel |
+| flush_thread_interval | string | No | - | The sleep interval seconds of threads to wait next flush trial (when no chunks are waiting) |
+| flush_thread_burst_interval | string | No | - | The sleep interval seconds of threads between flushes when output plugin flushes waiting chunks next to next |
+| delayed_commit_timeout | string | No | - | The timeout seconds until output plugin decides that async write operation fails |
+| overflow_action | string | No | - | How output plugin behaves when its buffer queue is full throw_exception: raise exception to show this error in log block: block processing of input plugin to emit events into that buffer drop_oldest_chunk: drop/purge oldest chunk to accept newly incoming chunk |
+| retry_timeout | string | No | - | The maximum seconds to retry to flush while failing, until plugin discards buffer chunks |
+| retry_forever | bool | No | true | If true, plugin will ignore retry_timeout and retry_max_times options and retry flushing forever |
+| retry_max_times | int | No | - | The maximum number of times to retry to flush while failing |
+| retry_secondary_threshold | string | No | - | The ratio of retry_timeout to switch to use secondary while failing (Maximum valid value is 1.0) |
+| retry_type | string | No | - | exponential_backoff: wait seconds will become large exponentially per failures periodic: output plugin will retry periodically with fixed intervals (configured via retry_wait) |
+| retry_wait | string | No | - | Seconds to wait before next retry to flush, or constant factor of exponential backoff |
+| retry_exponential_backoff_base | string | No | - | The base number of exponential backoff for retries |
+| retry_max_interval | string | No | - | The maximum interval seconds for exponential backoff between retries while failing |
+| retry_randomize | bool | No | - | If true, output plugin will retry after randomized interval not to do burst retries |
+| disable_chunk_backup | bool | No | - | Instead of storing unrecoverable chunks in the backup directory, just discard them. This option is new in Fluentd v1.2.6. |
+| timekey | string | Yes | 10m | Output plugin will flush chunks per specified time (enabled when time is specified in chunk keys) |
+| timekey_wait | string | No | - | Output plugin writes chunks after timekey_wait seconds later after timekey expiration |
+| timekey_use_utc | bool | No | - | Output plugin decides to use UTC or not to format placeholders using timekey |
+| timekey_zone | string | No | - | The timezone (-0700 or Asia/Tokyo) string for formatting timekey placeholders |
diff --git a/docs/plugins/outputs/elasticsearch.md b/docs/plugins/outputs/elasticsearch.md
new file mode 100644
index 000000000..999f9ba40
--- /dev/null
+++ b/docs/plugins/outputs/elasticsearch.md
@@ -0,0 +1,59 @@
+### Elasticsearch
+#### Send your logs to Elasticsearch
+
+| Variable Name | Type | Required | Default | Description |
+|---|---|---|---|---|
+| host | string | No | localhost | You can specify Elasticsearch host by this parameter. |
+| port | int | No | 9200 | You can specify Elasticsearch port by this parameter. |
+| hosts | string | No | - | You can specify multiple Elasticsearch hosts with separator ",". If you specify hosts option, host and port options are ignored. |
+| user | string | No | - | User for HTTP Basic authentication. This plugin will escape required URL encoded characters within %{} placeholders. e.g. %{demo+} |
+| password | *secret.Secret | No | - | Password for HTTP Basic authentication. [Secret](./secret.md) |
+| path | string | No | - | Path for HTTP Basic authentication. |
+| scheme | string | No | http | Connection scheme |
+| ssl_verify | bool | Yes | true | Skip ssl verification (default: true) |
+| ssl_version | string | No | - | If you want to configure SSL/TLS version, you can specify ssl_version parameter. [SSLv23, TLSv1, TLSv1_1, TLSv1_2] |
+| logstash_format | bool | No | false | Enable Logstash log format. |
+| include_timestamp | bool | No | - | Adds a @timestamp field to the log, following all settings logstash_format does, except without the restrictions on index_name. This allows one to log to an alias in Elasticsearch and utilize the rollover API. |
+| logstash_prefix | string | No | true | Set the Logstash prefix. |
+| logstash_prefix_separator | string | No | - | Set the Logstash prefix separator. |
+| logstash_dateformat | string | No | %Y.%m.%d | Set the Logstash date format. |
+| pipeline | string | No | - | This param is to set a pipeline id of your elasticsearch to be added into the request, you can configure ingest node. |
+| time_key_format | string | No | - | The format of the time stamp field (@timestamp or what you specify with time_key). This parameter only has an effect when logstash_format is true as it only affects the name of the index we write to. |
+| time_precision | string | No | - | Should the record not include a time_key, define the degree of sub-second time precision to preserve from the time portion of the routed event. |
+| time_key | string | No | - | By default, when inserting records in Logstash format, @timestamp is dynamically created with the time at log ingestion. If you'd like to use a custom time, include an @timestamp with your record. |
+| utc_index | bool | No | true | By default, the records inserted into index logstash-YYMMDD with UTC (Coordinated Universal Time). This option allows to use local time if you describe utc_index to false. |
+| target_index_key | string | No | - | Tell this plugin to find the index name to write to in the record under this key in preference to other mechanisms. Key can be specified as path to nested record using dot ('.') as a separator. https://github.com/uken/fluent-plugin-elasticsearch#target_index_key |
+| target_type_key | string | No | true | Similar to target_index_key config, find the type name to write to in the record under this key (or nested record). If key not found in record - fallback to type_name. |
+| template_name | string | No | - | The name of the template to define. If a template by the name given is already present, it will be left unchanged, unless template_overwrite is set, in which case the template will be updated. |
+| template_file | string | No | - | The path to the file containing the template to install. |
+| templates | string | No | - | Specify index templates in form of hash. Can contain multiple templates. |
+| customize_template | string | No | - | Specify the string and its value to be replaced in form of hash. Can contain multiple key value pair that would be replaced in the specified template_file. This setting only creates template and to add rollover index please check the rollover_index configuration. |
+| rollover_index | bool | No | false | Specify this as true when an index with rollover capability needs to be created. https://github.com/uken/fluent-plugin-elasticsearch#rollover_index |
+| index_date_pattern | string | No | now/d | Specify this to override the index date pattern for creating a rollover index. |
+| deflector_alias | string | No | - | Specify the deflector alias which would be assigned to the rollover index created. This is useful in case of using the Elasticsearch rollover API |
+| index_prefix | string | No | - | Specify the index prefix for the rollover index to be created. |
+| application_name | string | No | default | Specify the application name for the rollover index to be created. |
+| template_overwrite | bool | No | false | Always update the template, even if it already exists. |
+| max_retry_putting_template | string | No | 10 | You can specify times of retry putting template. |
+| fail_on_putting_template_retry_exceed | bool | No | true | Indicates whether to fail when max_retry_putting_template is exceeded. If you have multiple output plugin, you could use this property to do not fail on fluentd statup. |
+| max_retry_get_es_version | string | No | 15 | You can specify times of retry obtaining Elasticsearch version. |
+| request_timeout | string | No | 5s | You can specify HTTP request timeout. |
+| reload_connections | bool | No | true | You can tune how the elasticsearch-transport host reloading feature works. |
+| reload_on_failure | bool | No | false | Indicates that the elasticsearch-transport will try to reload the nodes addresses if there is a failure while making the request, this can be useful to quickly remove a dead node from the list of addresses. |
+| resurrect_after | string | No | 60s | You can set in the elasticsearch-transport how often dead connections from the elasticsearch-transport's pool will be resurrected. |
+| include_tag_key | bool | No | false | This will add the Fluentd tag in the JSON record. |
+| tag_key | string | No | tag | This will add the Fluentd tag in the JSON record. |
+| id_key | string | No | - | https://github.com/uken/fluent-plugin-elasticsearch#id_key |
+| routing_key | string | No | - | Similar to parent_key config, will add _routing into elasticsearch command if routing_key is set and the field does exist in input event. |
+| remove_keys_on_update | string | No | - | Remove keys on update will not update the configured keys in elasticsearch when a record is being updated. This setting only has any effect if the write operation is update or upsert. |
+| remove_keys_on_update_key | string | No | - | This setting allows remove_keys_on_update to be configured with a key in each record, in much the same way as target_index_key works. |
+| retry_tag | string | No | - | This setting allows custom routing of messages in response to bulk request failures. The default behavior is to emit failed records using the same tag that was provided. |
+| write_operation | string | No | index | The write_operation can be any of: (index,create,update,upsert) |
+| reconnect_on_error | bool | No | false | Indicates that the plugin should reset connection on any error (reconnect on next send). By default it will reconnect only on "host unreachable exceptions". We recommended to set this true in the presence of elasticsearch shield. |
+| with_transporter_log | bool | No | false | This is debugging purpose option to enable to obtain transporter layer log. |
+| content_type | string | No | application/json | With content_type application/x-ndjson, elasticsearch plugin adds application/x-ndjson as Content-Type in payload. |
+| include_index_in_url | bool | No | - | With this option set to true, Fluentd manifests the index name in the request URL (rather than in the request body). You can use this option to enforce an URL-based access control. |
+| time_parse_error_tag | string | No | - | With logstash_format true, elasticsearch plugin parses timestamp field for generating index name. If the record has invalid timestamp value, this plugin emits an error event to @ERROR label with time_parse_error_tag configured tag. |
+| http_backend | string | No | excon | With http_backend typhoeus, elasticsearch plugin uses typhoeus faraday http backend. Typhoeus can handle HTTP keepalive. |
+| prefer_oj_serializer | bool | No | fqlse | With default behavior, Elasticsearch client uses Yajl as JSON encoder/decoder. Oj is the alternative high performance JSON encoder/decoder. When this parameter sets as true, Elasticsearch client uses Oj as JSON encoder/decoder. |
+| buffer | *Buffer | No | - | [Buffer](./buffer.md) |
diff --git a/docs/plugins/outputs/file.md b/docs/plugins/outputs/file.md
new file mode 100644
index 000000000..a739dc4c8
--- /dev/null
+++ b/docs/plugins/outputs/file.md
@@ -0,0 +1,4 @@
+### FileOutputConfig
+| Variable Name | Type | Required | Default | Description |
+|---|---|---|---|---|
+| path | string | Yes | - | |
diff --git a/docs/plugins/outputs/format.md b/docs/plugins/outputs/format.md
new file mode 100644
index 000000000..53bf6df60
--- /dev/null
+++ b/docs/plugins/outputs/format.md
@@ -0,0 +1,4 @@
+### Format
+| Variable Name | Type | Required | Default | Description |
+|---|---|---|---|---|
+| type | string | No | json | Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value |
diff --git a/docs/plugins/outputs/forward.md b/docs/plugins/outputs/forward.md
new file mode 100644
index 000000000..e966a41c2
--- /dev/null
+++ b/docs/plugins/outputs/forward.md
@@ -0,0 +1,47 @@
+### ForwardOutput
+| Variable Name | Type | Required | Default | Description |
+|---|---|---|---|---|
+| servers | []FluentdServer | Yes | - | Server definitions at least one is required [Server](#Fluentd-Server) |
+| require_ack_response | bool | No | - | Change the protocol to at-least-once. The plugin waits the ack from destination's in_forward plugin. |
+| ack_response_timeout | int | No | 190 | This option is used when require_ack_response is true. This default value is based on popular tcp_syn_retries. |
+| send_timeout | int | No | 60 | The timeout time when sending event logs. |
+| connect_timeout | int | No | - | The timeout time for socket connect. When the connection timed out during establishment, Errno::ETIMEDOUT is raised. |
+| recover_wait | int | No | 10 | The wait time before accepting a server fault recovery. |
+| heartbeat_type | string | No | - | The transport protocol to use for heartbeats. Set "none" to disable heartbeat. [transport, tcp, udp, none] |
+| heartbeat_interval | int | No | 1 | The interval of the heartbeat packer. |
+| phi_failure_detector | bool | No | true | Use the "Phi accrual failure detector" to detect server failure. |
+| phi_threshold | int | No | 16 | The threshold parameter used to detect server faults. `phi_threshold` is deeply related to `heartbeat_interval`. If you are using longer `heartbeat_interval`, please use the larger `phi_threshold`. Otherwise you will see frequent detachments of destination servers. The default value 16 is tuned for `heartbeat_interval` 1s. |
+| hard_timeout | int | No | 60 | The hard timeout used to detect server failure. The default value is equal to the send_timeout parameter. |
+| expire_dns_cache | int | No | - | Set TTL to expire DNS cache in seconds. Set 0 not to use DNS Cache. (defult: 0) |
+| dns_round_robin | bool | No | - | Enable client-side DNS round robin. Uniform randomly pick an IP address to send data when a hostname has several IP addresses. `heartbeat_type udp` is not available with `dns_round_robin true`. Use `heartbeat_type tcp` or `heartbeat_type none`. |
+| ignore_network_errors_at_startup | bool | No | - | Ignore DNS resolution and errors at startup time. |
+| tls_version | string | No | TLSv1_2 | The default version of TLS transport. [TLSv1_1, TLSv1_2] |
+| tls_ciphers | string | No | ALL:!aNULL:!eNULL:!SSLv2 | The cipher configuration of TLS transport. |
+| tls_insecure_mode | bool | No | false | Skip all verification of certificates or not. |
+| tls_allow_self_signed_cert | bool | No | false | Allow self signed certificates or not. |
+| tls_verify_hostname | bool | No | true | Verify hostname of servers and certificates or not in TLS transport. |
+| tls_cert_path | string | No | - | The additional CA certificate path for TLS. |
+| tls_client_cert_path | string | No | - | The client certificate path for TLS |
+| tls_client_private_key_path | string | No | - | The client private key path for TLS. |
+| tls_client_private_key_passphrase | string | No | - | The client private key passphrase for TLS. |
+| tls_cert_thumbprint | string | No | - | The certificate thumbprint for searching from Windows system certstore This parameter is for Windows only. |
+| tls_cert_logical_store_name | string | No | - | The certificate logical store name on Windows system certstore. This parameter is for Windows only. |
+| tls_cert_use_enterprise_store | bool | No | - | Enable to use certificate enterprise store on Windows system certstore. This parameter is for Windows only. |
+| keepalive | bool | No | false | Enable keepalive connection. |
+| keepalive_timeout | int | No | 0 | Expired time of keepalive. Default value is nil, which means to keep connection as long as possible. |
+| security | *common.Security | No | - | [Security](/docs/plugins/common/security.md) |
+| verify_connection_at_startup | bool | No | false | Verify that a connection can be made with one of out_forward nodes at the time of startup. |
+| buffer | *Buffer | No | - | [Buffer](./buffer.md) |
+### Fluentd Server
+#### server
+
+| Variable Name | Type | Required | Default | Description |
+|---|---|---|---|---|
+| host | string | Yes | - | The IP address or host name of the server. |
+| name | string | No | - | The name of the server. Used for logging and certificate verification in TLS transport (when host is address). |
+| port | int | No | 24224 | The port number of the host. Note that both TCP packets (event stream) and UDP packets (heartbeat message) are sent to this port. |
+| shared_key | *secret.Secret | No | - | The shared key per server. |
+| username | *secret.Secret | No | - | The username for authentication. |
+| password | *secret.Secret | No | - | The password for authentication. |
+| standby | bool | No | - | Marks a node as the standby node for an Active-Standby model between Fluentd nodes. When an active node goes down, the standby node is promoted to an active node. The standby node is not used by the out_forward plugin until then. |
+| weight | int | No | 60 | The load balancing weight. If the weight of one server is 20 and the weight of the other server is 30, events are sent in a 2:3 ratio. . |
diff --git a/docs/plugins/outputs/gcs.md b/docs/plugins/outputs/gcs.md
new file mode 100644
index 000000000..0aa346075
--- /dev/null
+++ b/docs/plugins/outputs/gcs.md
@@ -0,0 +1,27 @@
+### GCSOutput
+| Variable Name | Type | Required | Default | Description |
+|---|---|---|---|---|
+| project | string | Yes | - | Project identifier for GCS |
+| keyfile | string | No | - | Path of GCS service account credentials JSON file |
+| credentials_json | *secret.Secret | No | - | GCS service account credentials in JSON format [Secret](./secret.md) |
+| client_retries | int | No | - | Number of times to retry requests on server error |
+| client_timeout | int | No | - | Default timeout to use in requests |
+| bucket | string | Yes | - | Name of a GCS bucket |
+| object_key_format | string | No | %{path}%{time_slice}_%{index}.%{file_extension} | Format of GCS object keys |
+| path | string | No | - | Path prefix of the files on GCS |
+| store_as | string | No | gzip | Archive format on GCS: gzip json text |
+| transcoding | bool | No | - | Enable the decompressive form of transcoding |
+| auto_create_bucket | bool | No | true | Create GCS bucket if it does not exists |
+| hex_random_length | int | No | 4 | Max length of `%{hex_random}` placeholder(4-16) |
+| overwrite | bool | No | false | Overwrite already existing path |
+| acl | string | No | - | Permission for the object in GCS: auth_read owner_full owner_read private project_private public_read |
+| storage_class | string | No | - | Storage class of the file: dra nearline coldline multi_regional regional standard |
+| encryption_key | string | No | - | Customer-supplied, AES-256 encryption key |
+| object_metadata | []ObjectMetadata | No | - | User provided web-safe keys and arbitrary string values that will returned with requests for the file as "x-goog-meta-" response headers. [Object Metadata](#ObjectMetadata) |
+| format | *Format | No | - | [Format](./format.md) |
+| buffer | *Buffer | No | - | [Buffer](./buffer.md) |
+### ObjectMetadata
+| Variable Name | Type | Required | Default | Description |
+|---|---|---|---|---|
+| key | string | Yes | - | Key |
+| value | string | Yes | - | Value |
diff --git a/docs/plugins/outputs/loki.md b/docs/plugins/outputs/loki.md
new file mode 100644
index 000000000..fb9421ecb
--- /dev/null
+++ b/docs/plugins/outputs/loki.md
@@ -0,0 +1,11 @@
+### Loki
+#### Fluentd output plugin to ship logs to a Loki server.
+
+| Variable Name | Type | Required | Default | Description |
+|---|---|---|---|---|
+| url | string | No | https://logs-us-west1.grafana.net | The url of the Loki server to send logs to. |
+| username | *secret.Secret | No | - | Specify a username if the Loki server requires authentication. [Secret](./secret.md) |
+| password | *secret.Secret | No | - | Specify password if the Loki server requires authentication. [Secret](./secret.md) |
+| tenant | string | No | - | Loki is a multi-tenant log storage platform and all requests sent must include a tenant. |
+| extra_labels | bool | No | nil | Set of labels to include with every Loki stream. |
+| buffer | *Buffer | No | - | [Buffer](./buffer.md) |
diff --git a/docs/plugins/outputs/oss.md b/docs/plugins/outputs/oss.md
new file mode 100644
index 000000000..36dbeb689
--- /dev/null
+++ b/docs/plugins/outputs/oss.md
@@ -0,0 +1,24 @@
+### Aliyun OSS
+| Variable Name | Type | Required | Default | Description |
+|---|---|---|---|---|
+| endpoint | string | Yes | - | OSS endpoint to connect to' |
+| bucket | string | Yes | - | Your bucket name |
+| access_key_id | *secret.Secret | Yes | - | Your access key id [Secret](./secret.md) |
+| aaccess_key_secret | *secret.Secret | Yes | - | Your access secret key [Secret](./secret.md) |
+| path | string | No | fluent/logs | Path prefix of the files on OSS |
+| upload_crc_enable | bool | No | true | Upload crc enabled |
+| download_crc_enable | bool | No | true | Download crc enabled |
+| open_timeout | int | No | 10 | Timeout for open connections |
+| read_timeout | int | No | 120 | Timeout for read response |
+| oss_sdk_log_dir | string | No | /var/log/td-agent | OSS SDK log directory |
+| key_format | string | No | %{path}/%{time_slice}_%{index}_%{thread_id}.%{file_extension} | The format of OSS object keys |
+| store_as | string | No | gzip | Archive format on OSS: gzip, json, text, lzo, lzma2 |
+| auto_create_bucket | bool | No | false | desc 'Create OSS bucket if it does not exists |
+| overwrite | bool | No | false | Overwrite already existing path |
+| check_bucket | bool | No | true | Check bucket if exists or not |
+| check_object | bool | No | true | Check object before creation |
+| hex_random_length | int | No | 4 | The length of `%{hex_random}` placeholder(4-16) |
+| index_format | string | No | %d | `sprintf` format for `%{index}` |
+| warn_for_delay | string | No | - | Given a threshold to treat events as delay, output warning logs if delayed events were put into OSS |
+| format | *Format | No | - | [Format](./format.md) |
+| buffer | *Buffer | No | - | [Buffer](./buffer.md) |
diff --git a/docs/plugins/outputs/s3.md b/docs/plugins/outputs/s3.md
new file mode 100644
index 000000000..06215dda3
--- /dev/null
+++ b/docs/plugins/outputs/s3.md
@@ -0,0 +1,76 @@
+# Amazon S3 plugin for Fluentd
+## Overview
+**s3** output plugin buffers event logs in local file and upload it to S3 periodically. This plugin splits files exactly by using the time of event logs (not the time when the logs are received). For example, a log '2011-01-02 message B' is reached, and then another log '2011-01-03 message B' is reached in this order, the former one is stored in "20110102.gz" file, and latter one in "20110103.gz" file.
+
+## Configuration
+### Output Config
+| Variable Name | Type | Required | Default | Description |
+|---|---|---|---|---|
+| aws_key_id | *secret.Secret | No | - | AWS access key id [Secret](./secret.md) |
+| aws_sec_key | *secret.Secret | No | - | AWS secret key. [Secret](./secret.md) |
+| check_apikey_on_start | string | No | - | Check AWS key on start |
+| grant_read | string | No | - | Allows grantee to read the object data and its metadata |
+| overwrite | string | No | - | Overwrite already existing path |
+| path | string | No | - | Path prefix of the files on S3 |
+| grant_write_acp | string | No | - | Allows grantee to write the ACL for the applicable object |
+| check_bucket | string | No | - | Check bucket if exists or not |
+| sse_customer_key | string | No | - | Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data |
+| sse_customer_key_md5 | string | No | - | Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321 |
+| compute_checksums | string | No | - | AWS SDK uses MD5 for API request/response by default |
+| warn_for_delay | string | No | - | Given a threshold to treat events as delay, output warning logs if delayed events were put into s3 |
+| use_bundled_cert | string | No | - | Use aws-sdk-ruby bundled cert |
+| s3_endpoint | string | No | - | Custom S3 endpoint (like minio) |
+| ssekms_key_id | string | No | - | Specifies the AWS KMS key ID to use for object encryption |
+| s3_metadata | string | No | - | Arbitrary S3 metadata headers to set for the object |
+| force_path_style | string | No | - | If true, the bucket name is always left in the request URI and never moved to the host as a sub-domain |
+| auto_create_bucket | string | No | - | Create S3 bucket if it does not exists |
+| index_format | string | No | - | `sprintf` format for `%{index}` |
+| signature_version | string | No | - | Signature version for API Request (s3,v4) |
+| enable_transfer_acceleration | string | No | - | If true, S3 Transfer Acceleration will be enabled for uploads. IMPORTANT: You must first enable this feature on your destination S3 bucket |
+| ssl_verify_peer | string | No | - | If false, the certificate of endpoint will not be verified |
+| proxy_uri | string | No | - | URI of proxy environment |
+| grant_read_acp | string | No | - | Allows grantee to read the object ACL |
+| check_object | string | No | - | Check object before creation |
+| sse_customer_algorithm | string | No | - | Specifies the algorithm to use to when encrypting the object |
+| use_server_side_encryption | string | No | - | The Server-side encryption algorithm used when storing this object in S3 (AES256, aws:kms) |
+| s3_region | string | No | - | S3 region name |
+| acl | string | No | - | Permission for the object in S3 |
+| grant_full_control | string | No | - | Allows grantee READ, READ_ACP, and WRITE_ACP permissions on the object |
+| hex_random_length | string | No | - | The length of `%{hex_random}` placeholder(4-16) |
+| s3_object_key_format | string | No | %{path}%{time_slice}_%{index}.%{file_extension} | The format of S3 object keys |
+| s3_bucket | string | Yes | - | S3 bucket name |
+| store_as | string | No | - | Archive format on S3 |
+| storage_class | string | No | - | The type of storage to use for the object(STANDARD,REDUCED_REDUNDANCY,STANDARD_IA) |
+| aws_iam_retries | string | No | - | The number of attempts to load instance profile credentials from the EC2 metadata service using IAM role |
+| buffer | *Buffer | No | - | [Buffer](./buffer.md) |
+| format | *Format | No | - | [Format](./format.md) |
+| assume_role_credentials | *S3AssumeRoleCredentials | No | - | [Assume Role Credentials](#Assume-Role-Credentials) |
+| instance_profile_credentials | *S3InstanceProfileCredentials | No | - | [Instance Profile Credentials](#Instance-Profile-Credentials) |
+| shared_credentials | *S3SharedCredentials | No | - | [Shared Credentials](#Shared-Credentials) |
+### Assume Role Credentials
+#### assume_role_credentials
+
+| Variable Name | Type | Required | Default | Description |
+|---|---|---|---|---|
+| role_arn | string | Yes | - | The Amazon Resource Name (ARN) of the role to assume |
+| role_session_name | string | Yes | - | An identifier for the assumed role session |
+| policy | string | No | - | An IAM policy in JSON format |
+| duration_seconds | string | No | - | The duration, in seconds, of the role session (900-3600) |
+| external_id | string | No | - | A unique identifier that is used by third parties when assuming roles in their customers' accounts. |
+### Instance Profile Credentials
+#### instance_profile_credentials
+
+| Variable Name | Type | Required | Default | Description |
+|---|---|---|---|---|
+| ip_address | string | No | 169.254.169.254 | IP address |
+| port | string | No | 80 | Port number |
+| http_open_timeout | string | No | - | Number of seconds to wait for the connection to open |
+| http_read_timeout | string | No | - | Number of seconds to wait for one block to be read |
+| retries | string | No | - | Number of times to retry when retrieving credentials |
+### Shared Credentials
+#### shared_credentials
+
+| Variable Name | Type | Required | Default | Description |
+|---|---|---|---|---|
+| profile_name | string | No | - | Profile name. Default to 'default' or ENV['AWS_PROFILE'] |
+| path | string | No | $HOME/.aws/credentials | Path to the shared file. |
diff --git a/docs/plugins/outputs/secret.md b/docs/plugins/outputs/secret.md
new file mode 100644
index 000000000..19835a8fa
--- /dev/null
+++ b/docs/plugins/outputs/secret.md
@@ -0,0 +1,22 @@
+# Secret definition
+
+## Define secret
+
+Secrets can be used in logging-operator `Output` definitions.
+
+> Secrets *MUST* be in the *SAME* namespace as the `Output` or `ClusterOutput` custom resource
+
+**Example secret definition**
+```yaml
+aws_key_id:
+ valueFrom:
+ secretKeyRef:
+ name:
+ key:
+```
+
+For debug purposes you can define secret values directly. However this is *NOT* recommended in production.
+```yaml
+aws_key_id:
+ value: "secretvalue"
+```
\ No newline at end of file
diff --git a/docs/plugins/outputs/sumologic.md b/docs/plugins/outputs/sumologic.md
new file mode 100644
index 000000000..21e061652
--- /dev/null
+++ b/docs/plugins/outputs/sumologic.md
@@ -0,0 +1,18 @@
+### SumologicOutput
+| Variable Name | Type | Required | Default | Description |
+|---|---|---|---|---|
+| data_type | string | No | logs | The type of data that will be sent to Sumo Logic, either logs or metrics |
+| endpoint | *secret.Secret | Yes | - | SumoLogic HTTP Collector URL |
+| verify_ssl | bool | No | true | Verify ssl certificate. |
+| metric_data_format | string | No | graphite | The format of metrics you will be sending, either graphite or carbon2 or prometheus |
+| log_format | string | No | json | Format to post logs into Sumo. |
+| log_key | string | No | message | Used to specify the key when merging json or sending logs in text format |
+| source_category | string | No | nil | Set _sourceCategory metadata field within SumoLogic |
+| source_name | string | Yes | - | Set _sourceName metadata field within SumoLogic - overrides source_name_key (default is nil) |
+| source_name_key | string | No | source_name | Set as source::path_key's value so that the source_name can be extracted from Fluentd's buffer |
+| source_host | string | No | nil | Set _sourceHost metadata field within SumoLogic |
+| open_timeout | int | No | 60 | Set timeout seconds to wait until connection is opened. |
+| add_timestamp | bool | No | true | Add timestamp (or timestamp_key) field to logs before sending to sumologic |
+| timestamp_key | string | No | timestamp | Field name when add_timestamp is on |
+| proxy_uri | string | No | - | Add the uri of the proxy environment if present. |
+| disable_cookies | bool | No | false | Option to disable cookies on the HTTP Client. |
diff --git a/docs/plugins/parser.md b/docs/plugins/parser.md
deleted file mode 100644
index a0ff417f0..000000000
--- a/docs/plugins/parser.md
+++ /dev/null
@@ -1,24 +0,0 @@
-# Plugin parser
-## Variables
-| Variable name | Default | Applied function |
-|---|---|---|
-| pattern | - | |
-| format | - | |
-| timeFormat | - | |
-| keyName | log | |
-| reserveData | true | |
-| removeKeyNameField | true | |
-| replaceInvalidSequence | false | |
-## Plugin template
-```
-
- @type parser
- format {{ .format }}
- time_format {{ .timeFormat }}
- key_name {{ .keyName }}
- reserve_data {{ .reserveData }}
- remove_key_name_field {{ .removeKeyNameField }}
- replace_invalid_sequence {{ .replaceInvalidSequence }}
-
-
-```
\ No newline at end of file
diff --git a/docs/plugins/s3.md b/docs/plugins/s3.md
deleted file mode 100644
index e0a22f967..000000000
--- a/docs/plugins/s3.md
+++ /dev/null
@@ -1,69 +0,0 @@
-# Plugin s3
-## Variables
-| Variable name | Default | Applied function |
-|---|---|---|
-| pattern | - | |
-| role_arn | - | |
-| role_session_name | - | |
-| instance_profile_ip_address | - | |
-| instance_profile_port | - | |
-| aws_key_id | - | |
-| aws_sec_key | - | |
-| s3_bucket | - | |
-| s3_region | - | |
-| s3_endpoint | - | |
-| s3_object_key_format | %{path}%{time_slice}_%{index}.%{file_extension} | |
-| bufferPath | /buffers/s3 | |
-| bufferTimeKey | 3600 | |
-| bufferTimeWait | 10m | |
-| timekey_use_utc | true | |
-| format | json | |
-| path | logs/${tag}/%Y/%m/%d/ | |
-## Plugin template
-```
-
- @type s3
-
- {{- if .role_arn }}
-
- role_arn {{ .role_arn }}
- role_session_name {{ .role_session_name }}
-
- {{- end }}
-
- {{- if .instance_profile_ip_address }}
-
- ip_address {{ .instance_profile_ip_address }}
- port {{ .instance_profile_port }}
-
- {{- end }}
-
- {{- if .aws_key_id }}
- aws_key_id {{ .aws_key_id }}
- aws_sec_key {{ .aws_sec_key }}
- {{- end }}
- s3_bucket {{ .s3_bucket }}
- s3_region {{ .s3_region }}
- {{- if .s3_endpoint }}
- s3_endpoint {{ .s3_endpoint }}
- force_path_style true # This prevents AWS SDK from breaking endpoint URL
- {{- end }}
- store_as gzip_command
-
- path {{ .path }}
- s3_object_key_format {{ .s3_object_key_format }}
-
- # if you want to use ${tag} or %Y/%m/%d/ like syntax in path / s3_object_key_format,
- # need to specify tag for ${tag} and time for %Y/%m/%d in argument.
-
- @type file
- path {{ .bufferPath }}
- timekey {{ .bufferTimeKey }}
- timekey_wait {{ .bufferTimeWait }}
- timekey_use_utc {{ .timekey_use_utc }}
-
-
- @type {{ .format }}
-
-
-```
diff --git a/docs/plugins/stdout.md b/docs/plugins/stdout.md
deleted file mode 100644
index 9998f7f52..000000000
--- a/docs/plugins/stdout.md
+++ /dev/null
@@ -1,11 +0,0 @@
-# Plugin stdout
-## Variables
-| Variable name | Default | Applied function |
-|---|---|---|
-| pattern | - | |
-## Plugin template
-```
-
- @type stdout
-
-```
\ No newline at end of file
diff --git a/example/cluster_forward.yaml b/example/cluster_forward.yaml
deleted file mode 100644
index 80f236dc8..000000000
--- a/example/cluster_forward.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
-apiVersion: logging.banzaicloud.com/v1alpha1
-kind: Plugin
-metadata:
- name: forward
-spec:
- input:
- label:
- app: "*"
- output:
- - type: forward
- name: forward
- parameters:
- - name: host
- value: "fluentd.target.svc"
- - name: port
- value: "24240"
- - name: name
- value: target
\ No newline at end of file
diff --git a/example/elasticsearch_output.yaml b/example/elasticsearch_output.yaml
deleted file mode 100644
index d518e0e54..000000000
--- a/example/elasticsearch_output.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-apiVersion: "logging.banzaicloud.com/v1alpha1"
-kind: "Plugin"
-metadata:
- name: "istio-ingressgateway"
-spec:
- input:
- label:
- app: "istio-ingressgateway"
- output:
- - type: "elasticsearch"
- name: "istio-ingressgateway"
- parameters:
- - name: host
- value: "elasticsearch.monitoring.svc.cluster.local"
- - name: port
- value: "9200"
- - name: scheme
- value: "http"
- - name: logstashPrefix
- value: "istio-ingressgateway"
- - name: bufferPath
- value: "/buffers/istio-ingressgateway"
diff --git a/example/forward.md b/example/forward.md
deleted file mode 100644
index eef795854..000000000
--- a/example/forward.md
+++ /dev/null
@@ -1,56 +0,0 @@
-Create the namespaces
-```
-kubectl create ns source
-kubectl create ns target
-```
-
-To test local changes we can install the operator requirements through the chart but we will start the operator
-locally watching different namespaces.
-```
-helm upgrade --install logging-operator charts/logging-operator --set replicaCount=0
-```
-
-Setup the `target` namespace
-```
-kubens target
-
-# create the fluentd resource
-helm upgrade --install logging-operator-target charts/logging-operator-fluent \
- --set fluentbit.enabled=false
-
-# send everything to stdout for for this simple demonstration
-kubectl apply -f example/stdout.yaml
-
-# start the operator to reconcile the desired state on the target namespace
-# stop it once it created all resources successfully
-WATCH_NAMESPACE=target go run cmd/manager/main.go
-
-kubectl rollout status deployment fluentd
-```
-
-Setup the `source` namespace to collect logs and forward to `target`
-```
-kubens source
-
-# create the fluentd resource
-helm upgrade --install logging-operator-source charts/logging-operator-fluent
-
-# install the demo app that writes logs
-helm upgrade --install nginx-logging-demo charts/nginx-logging-demo \
- --set forwarding.enabled=true \
- --set forwarding.targetHost=fluentd.target.svc \
- --set forwarding.targetPort=24240
-
-# start the operator to reconcile the desired state on the source namespace
-# stop it once it created all resources successfully
-WATCH_NAMESPACE=source go run cmd/manager/main.go
-
-# both fluent-bit and fluentd should be successfully rolled out
-kubectl rollout status daemonset fluent-bit-daemon
-kubectl rollout status deployment fluentd
-```
-
-Watch the logs as they arrive to the target cluster
-```
-kubetail -n target
-```
\ No newline at end of file
diff --git a/example/forward_tls.md b/example/forward_tls.md
deleted file mode 100644
index 6edd53df5..000000000
--- a/example/forward_tls.md
+++ /dev/null
@@ -1,83 +0,0 @@
-This is an example to demonstrate fluentd event forwarding.
-For the sake of simplicity it is demonstrated between namespaces.
-
-### Create the TLS certificate
-
-In this example we will create a single TLS bundle with the following contents:
-- CA cert
-- Server cert + key for the `source` and the `target` fluentd instance
-- Client cert + key for the `source` fluentbit and the `source` fluentd forwarder
-
-Caveats:
- - certs on the source and target side must use the same CA
- - the fluentd forwarder will look for the client cert in the same bundle that is used by the fluentd server
-
-Enough said, let's create the namespaces and the cert
-```
-kubectl create ns target
-kubectl create ns source
-(cd example/tls-cluster-forward; ./gencert.sh)
-```
-
-To test local changes we can install the operator requirements through the chart but we will start the operator
-locally watching different namespaces.
-```
-helm upgrade --install logging-operator charts/logging-operator --set replicaCount=0
-```
-
-> use https://github.com/ahmetb/kubectx to switch namespaces with the `kubens` command
-> use https://github.com/johanhaleby/kubetail to tail logs with the `kubetail` command
-
-### Create and setup fluentd in the target namespace
-```
-kubens target
-
-# create the fluentd resource
-helm upgrade --install logging-operator-target charts/logging-operator-fluent \
- --set fluentbit.enabled=false \
- --set tls.enabled=true \
- --set tls.secretName=fluentd-tls \
- --set tls.sharedKey=example
-
-# send everything to stdout for checking the forwarded logs from the `source` cluster
-kubectl apply -f example/stdout.yaml
-
-# start the operator to reconcile the desired state on the target namespace
-# stop it once it created all resources successfully
-WATCH_NAMESPACE=target go run cmd/manager/main.go
-
-kubectl rollout status deployment fluentd
-```
-
-### Setup the `source` namespace to collect logs and forward to `target`
-```
-kubens source
-
-# create the fluentd resource
-helm upgrade --install logging-operator-source charts/logging-operator-fluent \
- --set tls.enabled=true \
- --set tls.secretName=fluentd-tls \
- --set tls.sharedKey=example
-
-# install the demo app that writes logs
-helm upgrade --install nginx-logging-demo charts/nginx-logging-demo \
- --set forwarding.enabled=true \
- --set forwarding.tlsSharedKey=example \
- --set forwarding.targetHost=fluentd.target.svc \
- --set forwarding.targetPort=24240
-
-# start the operator to reconcile the desired state on the source namespace
-# stop it once it created all resources successfully
-WATCH_NAMESPACE=source go run cmd/manager/main.go
-
-# both fluent-bit and fluentd should be successfully rolled out
-kubectl rollout status daemonset fluent-bit-daemon
-kubectl rollout status deployment fluentd
-
-```
-
-
-Watch the logs as they arrive to the target cluster
-```
-kubetail -n target
-```
\ No newline at end of file
diff --git a/example/loki_output.yaml b/example/loki_output.yaml
deleted file mode 100644
index 42e08b48d..000000000
--- a/example/loki_output.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-apiVersion: "logging.banzaicloud.com/v1alpha1"
-kind: "Plugin"
-metadata:
- name: "loki-demo"
-spec:
- input:
- label:
- app: "nginx-logging-demo"
- output:
- - type: "loki"
- name: "loki-demo"
- parameters:
- - name: url
- value: "http://loki:3100"
- - name: username
- value: ""
- - name: password
- value: ""
- - name: extraLabels
- value: "{\"env\":\"dev\"}"
- - name: flushInterval
- value: "10s"
- - name: bufferChunkLimit
- value: "1m"
diff --git a/example/stdout.yaml b/example/stdout.yaml
deleted file mode 100644
index 2ac1d12e8..000000000
--- a/example/stdout.yaml
+++ /dev/null
@@ -1,11 +0,0 @@
-apiVersion: logging.banzaicloud.com/v1alpha1
-kind: Plugin
-metadata:
- name: stdout
-spec:
- input:
- label:
- app: "*"
- output:
- - type: stdout
- name: stdout
diff --git a/example/tls-cluster-forward/cfssl-ca.json b/example/tls-cluster-forward/cfssl-ca.json
deleted file mode 100644
index 0f3c44513..000000000
--- a/example/tls-cluster-forward/cfssl-ca.json
+++ /dev/null
@@ -1,28 +0,0 @@
-{
- "CN": "logging.banzaicloud.com",
- "key": {
- "algo": "rsa",
- "size": 2048
- },
- "names": [
- {
- "C": "HU",
- "O": "banzaicloud"
- }
- ],
- "ca": {
- "expiry": "87600h"
- },
- "signing": {
- "profiles": {
- "server": {
- "usages": ["digital signature", "key encipherment", "client auth", "server auth"],
- "expiry": "87600h"
- },
- "client": {
- "usages": ["digital signature", "key encipherment", "client auth"],
- "expiry": "87600h"
- }
- }
- }
-}
diff --git a/example/tls-cluster-forward/cfssl-csr.json b/example/tls-cluster-forward/cfssl-csr.json
deleted file mode 100644
index 9e7774710..000000000
--- a/example/tls-cluster-forward/cfssl-csr.json
+++ /dev/null
@@ -1,14 +0,0 @@
-{
- "CN": "fluentd",
- "key": {
- "algo": "rsa",
- "size": 2048
- },
- "names": [
- {
- "C": "HU",
- "O": "banzaicloud"
- }
- ],
- "Hosts": ["fluentd.source.svc", "fluentd.target.svc", "fluentd.default.svc"]
-}
diff --git a/example/tls-cluster-forward/gencert.sh b/example/tls-cluster-forward/gencert.sh
deleted file mode 100755
index fd7efe1e8..000000000
--- a/example/tls-cluster-forward/gencert.sh
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/bash -x
-
-if ! which cfssl; then
- go get -tags nopkcs11 github.com/cloudflare/cfssl/cmd/cfssl
- go get -tags nopkcs11 github.com/cloudflare/cfssl/cmd/cfssljson
-fi
-
-cfssl gencert -initca cfssl-ca.json | cfssljson -bare ca
-cfssl gencert -ca ca.pem -ca-key ca-key.pem -config cfssl-ca.json -profile server cfssl-csr.json | cfssljson -bare server
-cfssl gencert -ca ca.pem -ca-key ca-key.pem -config cfssl-ca.json -profile client cfssl-csr.json | cfssljson -bare client
-
-FILE_ARGS=()
-
-for i in ca server client; do
- FILE_ARGS+=(--from-file "${i}Cert=${i}.pem" --from-file "${i}Key=${i}-key.pem")
-done
-
-kubectl create secret generic fluentd-tls -n target "${FILE_ARGS[@]}"
-kubectl create secret generic fluentd-tls -n source "${FILE_ARGS[@]}"
diff --git a/go.mod b/go.mod
new file mode 100644
index 000000000..781e338f1
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,39 @@
+module github.com/banzaicloud/logging-operator
+
+go 1.12
+
+require (
+ emperror.dev/errors v0.4.2
+ github.com/MakeNowJust/heredoc v0.0.0-20171113091838-e9091a26100e
+ github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883
+ github.com/banzaicloud/k8s-objectmatcher v1.0.0
+ github.com/client9/misspell v0.3.4 // indirect
+ github.com/go-logr/logr v0.1.0
+ github.com/golang/protobuf v1.3.2 // indirect
+ github.com/googleapis/gnostic v0.3.0 // indirect
+ github.com/goph/emperror v0.17.2
+ github.com/gordonklaus/ineffassign v0.0.0-20190601041439-ed7b1b5ee0f8 // indirect
+ github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0
+ github.com/onsi/ginkgo v1.8.0
+ github.com/onsi/gomega v1.5.0
+ github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c
+ github.com/sergi/go-diff v1.0.0 // indirect
+ golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 // indirect
+ golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac // indirect
+ golang.org/x/net v0.0.0-20190628185345-da137c7871d7
+ golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7 // indirect
+ k8s.io/api v0.0.0-20190528154508-67ef80593b24
+ k8s.io/apimachinery v0.0.0-20190528154326-e59c2fb0a8e5
+ k8s.io/client-go v11.0.1-0.20190516230509-ae8359b20417+incompatible
+ k8s.io/klog v0.3.3 // indirect
+ sigs.k8s.io/controller-runtime v0.2.0
+ sigs.k8s.io/controller-tools v0.2.1 // indirect
+)
+
+replace (
+ k8s.io/api => k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b
+ // required for test deps only
+ k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8
+ k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d
+ k8s.io/client-go => k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible
+)
diff --git a/go.sum b/go.sum
new file mode 100644
index 000000000..6034fb3df
--- /dev/null
+++ b/go.sum
@@ -0,0 +1,261 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+emperror.dev/errors v0.4.2 h1:snD5ODyv4c9DOBBZh645dy/TziVHZivuFtRRMZP8zK8=
+emperror.dev/errors v0.4.2/go.mod h1:cA5SMsyzo+KXq997DKGK+lTV1DGx5TXLQUNtYe9p2p0=
+github.com/MakeNowJust/heredoc v0.0.0-20171113091838-e9091a26100e h1:eb0Pzkt15Bm7f2FFYv7sjY7NPFi3cPkS3tv1CcrFBWA=
+github.com/MakeNowJust/heredoc v0.0.0-20171113091838-e9091a26100e/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E=
+github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
+github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/airbrake/gobrake v3.6.1+incompatible/go.mod h1:wM4gu3Cn0W0K7GUuVWnlXZU11AGBXMILnrdOU8Kn00o=
+github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ=
+github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
+github.com/banzaicloud/k8s-objectmatcher v1.0.0 h1:RM4L5Ugcxq5afc48m1+1WyXmTq6HCQUq6fSu8ecDS30=
+github.com/banzaicloud/k8s-objectmatcher v1.0.0/go.mod h1:voKp6FglIaG+qciEA1bcolOdCTEYZt1J85SiKvnqsms=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
+github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
+github.com/bugsnag/bugsnag-go v1.4.0/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
+github.com/bugsnag/panicwrap v1.2.0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
+github.com/certifi/gocertifi v0.0.0-20190105021004-abcd57078448/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4=
+github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I=
+github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M=
+github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
+github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
+github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg=
+github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
+github.com/go-logr/zapr v0.1.0 h1:h+WVe9j6HAA01niTJPA/kKH0i7e0rLZBCwauQFcRE54=
+github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk=
+github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
+github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
+github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
+github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
+github.com/gobuffalo/flect v0.1.5 h1:xpKq9ap8MbYfhuPCF0dBH854Gp9CxZjr/IocxELFflo=
+github.com/gobuffalo/flect v0.1.5/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80=
+github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
+github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7 h1:u4bArs140e9+AfE52mFHOXVFnOSBJBRlzTHrOPLOIhE=
+github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
+github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
+github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/googleapis/gnostic v0.0.0-20170426233943-68f4ded48ba9/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
+github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g=
+github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
+github.com/googleapis/gnostic v0.3.0 h1:CcQijm0XKekKjP/YCz28LXVSpgguuB+nCxaSjCe09y0=
+github.com/googleapis/gnostic v0.3.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
+github.com/goph/emperror v0.17.1/go.mod h1:+ZbQ+fUNO/6FNiUo0ujtMjhgad9Xa6fQL9KhH4LNHic=
+github.com/goph/emperror v0.17.2 h1:yLapQcmEsO0ipe9p5TaN22djm3OFV/TfM/fcYP0/J18=
+github.com/goph/emperror v0.17.2/go.mod h1:+ZbQ+fUNO/6FNiUo0ujtMjhgad9Xa6fQL9KhH4LNHic=
+github.com/gordonklaus/ineffassign v0.0.0-20190601041439-ed7b1b5ee0f8 h1:ehVe1P3MbhHjeN/Rn66N2fGLrP85XXO1uxpLhv0jtX8=
+github.com/gordonklaus/ineffassign v0.0.0-20190601041439-ed7b1b5ee0f8/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU=
+github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
+github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47 h1:UnszMmmmm5vLwWzDjTFVIkfhvWF1NdrmChl8L2NUDCw=
+github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0 h1:i462o439ZjprVSFSZLZxcsoAe592sZB1rci2Z8j4wdk=
+github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA=
+github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI=
+github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
+github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
+github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
+github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE=
+github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w=
+github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo=
+github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c h1:MUyE44mTvnI5A0xrxIxaMqoWFzPfQvtE2IWUollMDMs=
+github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34=
+github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_golang v0.9.0 h1:tXuTFVHC03mW0D+Ua1Q2d1EAVqLTuggX50V0VLICCzY=
+github.com/prometheus/client_golang v0.9.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e h1:n/3MEhJQjQxrOUCzh1Y3Re6aJUUWRp2M9+Oc3eVn/54=
+github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 h1:agujYaXJSxSo18YNX3jzl+4G6Bstwt+kqv47GS12uL0=
+github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/rollbar/rollbar-go v1.0.2/go.mod h1:AcFs5f0I+c71bpHlXNNDbOWJiKwjFDtISeXco0L5PKQ=
+github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
+github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
+github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=
+github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
+github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4=
+go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
+go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
+go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
+go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o=
+go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc=
+golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac h1:8R1esu+8QioDxo4E4mX6bFztO+dMTM49DNAaWfO5OeY=
+golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190628185345-da137c7871d7 h1:rTIdg5QFRR7XCaK4LCjBiPbx8j4DQRpdYMnGn/bJUEU=
+golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190429190828-d89cdac9e872/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7 h1:LepdCS8Gf/MVejFIt8lsiexZATdoGVyp5bcyS+rYoUI=
+golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd h1:/e+gpKk9r3dJobndpTytxS2gOy6m5uvpg+ISQoEcusQ=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190501045030-23463209683d h1:D7DVZUZEUgsSIDTivnUtVeGfN5AvhDIKtdIZAqx0ieE=
+golang.org/x/tools v0.0.0-20190501045030-23463209683d/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+gomodules.xyz/jsonpatch/v2 v2.0.1 h1:xyiBuvkD2g5n7cYzx6u2sxQvsAy4QJsZFCzGVdzOXZ0=
+gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v3 v3.0.0-20190709130402-674ba3eaed22 h1:0efs3hwEZhFKsCoP8l6dDB1AZWMgnEl3yWXWRZTOaEA=
+gopkg.in/yaml.v3 v3.0.0-20190709130402-674ba3eaed22/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b h1:aBGgKJUM9Hk/3AE8WaZIApnTxG35kbuQba2w+SXqezo=
+k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA=
+k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8 h1:q1Qvjzs/iEdXF6A1a8H3AKVFDzJNcJn3nXMs6R6qFtA=
+k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE=
+k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d h1:Jmdtdt1ZnoGfWWIIik61Z7nKYgO3J+swQJtPYsP9wHA=
+k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0=
+k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible h1:U5Bt+dab9K8qaUmXINrkXO135kA11/i5Kg1RUydgaMQ=
+k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s=
+k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
+k8s.io/klog v0.2.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
+k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
+k8s.io/klog v0.3.1 h1:RVgyDHY/kFKtLqh67NvEWIgkMneNoIrdkN0CxDSQc68=
+k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
+k8s.io/klog v0.3.3 h1:niceAagH1tzskmaie/icWd7ci1wbG7Bf2c6YGcQv+3c=
+k8s.io/klog v0.3.3/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
+k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
+k8s.io/kube-openapi v0.0.0-20190603182131-db7b694dc208 h1:5sW+fEHvlJI3Ngolx30CmubFulwH28DhKjGf70Xmtco=
+k8s.io/kube-openapi v0.0.0-20190603182131-db7b694dc208/go.mod h1:nfDlWeOsu3pUf4yWGL+ERqohP4YsZcBJXWMK+gkzOA4=
+k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5 h1:VBM/0P5TWxwk+Nw6Z+lAw3DKgO76g90ETOiA6rfLV1Y=
+k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
+sigs.k8s.io/controller-runtime v0.2.0 h1:5gL30PXOisGZl+Osi4CmLhvMUj77BO3wJeouKF2va50=
+sigs.k8s.io/controller-runtime v0.2.0/go.mod h1:ZHqrRDZi3f6BzONcvlUxkqCKgwasGk5FZrnSv9TVZF4=
+sigs.k8s.io/controller-tools v0.2.1 h1:HoCik83vXOpPi7KSJWdPRmiGntyOzK0v0BTV4U+pl8o=
+sigs.k8s.io/controller-tools v0.2.1/go.mod h1:cenyhL7t2e7izk/Zy7ZxDqQ9YEj0niU5VDL1PWMgZ5s=
+sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
+sigs.k8s.io/testing_frameworks v0.1.1 h1:cP2l8fkA3O9vekpy5Ks8mmA0NW/F7yBdXf8brkWhVrs=
+sigs.k8s.io/testing_frameworks v0.1.1/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U=
+sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
+sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt
new file mode 100644
index 000000000..bda8a7a97
--- /dev/null
+++ b/hack/boilerplate.go.txt
@@ -0,0 +1,13 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
\ No newline at end of file
diff --git a/hack/minio-mc.yaml b/hack/minio-mc.yaml
deleted file mode 100644
index fbae8fe86..000000000
--- a/hack/minio-mc.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
- name: minio-mc-deployment
-spec:
- strategy:
- type: Recreate
- template:
- metadata:
- labels:
- app: minio-mc
- spec:
- containers:
- - name: minio-mc
- image: minio/mc
- command: ["tail", "-f", "/dev/null"]
diff --git a/hack/minio.yaml b/hack/minio.yaml
deleted file mode 100644
index 9a893d2e4..000000000
--- a/hack/minio.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
- name: minio-deployment
-spec:
- strategy:
- type: Recreate
- template:
- metadata:
- labels:
- app: minio
- spec:
- containers:
- - name: minio
- image: minio/minio
- args:
- - server
- - /storage
- readinessProbe:
- httpGet:
- path: /minio/health/ready
- port: 9000
- initialDelaySeconds: 10
- periodSeconds: 5
- env:
- - name: MINIO_ACCESS_KEY
- value: "minio_access_key"
- - name: MINIO_SECRET_KEY
- value: "minio_secret_key"
- - name: MINIO_REGION
- value: 'test_region'
- ports:
- - containerPort: 9000
----
-kind: Service
-apiVersion: v1
-metadata:
- name: minio-service
-spec:
- selector:
- app: minio
- ports:
- - protocol: TCP
- port: 9000
- targetPort: 9000
diff --git a/hack/test-s3-output.yaml b/hack/test-s3-output.yaml
deleted file mode 100644
index 8ead4fc09..000000000
--- a/hack/test-s3-output.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-apiVersion: "logging.banzaicloud.com/v1alpha1"
-kind: "Plugin"
-metadata:
- name: "logging-operator"
-spec:
- input:
- label:
- app: "*"
- output:
- - type: s3
- name: outputS3
- parameters:
- - name: aws_key_id
- value: minio_access_key
- - name: aws_sec_key
- value: minio_secret_key
- - name: s3_bucket
- value: logs
- - name: s3_region
- value: test_region
- - name: s3_endpoint
- value: "http://minio-service.default.svc.cluster.local:9000"
- - name: bufferTimeKey
- value: "10"
- - name: bufferTimeWait
- value: "0"
diff --git a/hack/test.sh b/hack/test.sh
deleted file mode 100755
index a403d9718..000000000
--- a/hack/test.sh
+++ /dev/null
@@ -1,85 +0,0 @@
-#!/bin/bash
-
-set -eufo pipefail
-
-SCRIPT_PATH="$(dirname "$(readlink -f "$0")")"
-BUCKET='minio/logs'
-
-function main()
-{
- helm_deploy_logging_operator
- helm_deploy_logging_operator_fluent
-
- apply_s3_output
- mc_pod="$(get_mc_pod_name)"
- wait_for_log_files "${mc_pod}" 300
- print_logs "${mc_pod}"
-}
-
-function helm_deploy_logging_operator()
-{
- helm install \
- --wait \
- --name logging-operator \
- --set image.tag='local' \
- banzaicloud-stable/logging-operator
-}
-
-function helm_deploy_logging_operator_fluent()
-{
- helm install \
- --wait \
- --name logging-operator-fluent \
- banzaicloud-stable/logging-operator-fluent
-}
-
-
-function apply_s3_output()
-{
- kubectl apply -f "${SCRIPT_PATH}/test-s3-output.yaml"
-}
-
-function get_mc_pod_name()
-{
- kubectl get pod -l app=minio-mc -o 'jsonpath={.items[0].metadata.name}'
-}
-
-function wait_for_log_files()
-{
- local mc_pod="$1"
- local deadline="$(( $(date +%s) + $2 ))"
-
- echo 'Waiting for log files...'
- while [ $(date +%s) -lt ${deadline} ]; do
- if [ $(count_log_files "${mc_pod}") -gt 0 ]; then
- return
- fi
- sleep 5
- done
-
- echo 'Cannot find any log files within timeout'
- exit 1
-}
-
-function count_log_files()
-{
- local mc_pod="$1"
-
- get_log_files "${mc_pod}" | wc -l
-}
-
-function get_log_files()
-{
- local mc_pod="$1"
-
- kubectl exec "${mc_pod}" -- mc find "${BUCKET}" --name '*.gz'
-}
-
-function print_logs()
-{
- local mc_pod="$1"
-
- kubectl exec "${mc_pod}" -- mc find "${BUCKET}" --name '*.gz' -exec 'mc cat {}' | gzip -d
-}
-
-main "$@"
diff --git a/main.go b/main.go
new file mode 100644
index 000000000..285c89cfc
--- /dev/null
+++ b/main.go
@@ -0,0 +1,82 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "flag"
+ "os"
+
+ loggingv1alpha2 "github.com/banzaicloud/logging-operator/api/v1beta1"
+ "github.com/banzaicloud/logging-operator/controllers"
+ "k8s.io/apimachinery/pkg/runtime"
+ clientgoscheme "k8s.io/client-go/kubernetes/scheme"
+ _ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/log/zap"
+ // +kubebuilder:scaffold:imports
+)
+
+var (
+ scheme = runtime.NewScheme()
+ setupLog = ctrl.Log.WithName("setup")
+)
+
+func init() {
+ clientgoscheme.AddToScheme(scheme)
+ _ = loggingv1alpha2.AddToScheme(scheme)
+ // +kubebuilder:scaffold:scheme
+}
+
+func main() {
+ var metricsAddr string
+ var enableLeaderElection bool
+ var verboseLogging bool
+
+ flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.")
+ flag.BoolVar(&enableLeaderElection, "enable-leader-election", false,
+ "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.")
+ flag.BoolVar(&verboseLogging, "verbose", false, "Enable verbose logging")
+ flag.Parse()
+
+ ctrl.SetLogger(zap.Logger(verboseLogging))
+
+ mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
+ Scheme: scheme,
+ MetricsBindAddress: metricsAddr,
+ LeaderElection: enableLeaderElection,
+ })
+
+ if err != nil {
+ setupLog.Error(err, "unable to start manager")
+ os.Exit(1)
+ }
+
+ loggingReconciler := &controllers.LoggingReconciler{
+ Client: mgr.GetClient(),
+ Log: ctrl.Log.WithName("controllers").WithName("Logging"),
+ }
+
+ if err := controllers.SetupLoggingWithManager(mgr, ctrl.Log.WithName("manager")).Complete(loggingReconciler); err != nil {
+ setupLog.Error(err, "unable to create controller", "controller", "Logging")
+ os.Exit(1)
+ }
+ // +kubebuilder:scaffold:builder
+
+ setupLog.Info("starting manager")
+ if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
+ setupLog.Error(err, "problem running manager")
+ os.Exit(1)
+ }
+}
diff --git a/pkg/apis/addtoscheme_logging_v1alpha1.go b/pkg/apis/addtoscheme_logging_v1alpha1.go
deleted file mode 100644
index 4b34138ab..000000000
--- a/pkg/apis/addtoscheme_logging_v1alpha1.go
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package apis
-
-import (
- "github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1"
-)
-
-func init() {
- // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back
- AddToSchemes = append(AddToSchemes, v1alpha1.SchemeBuilder.AddToScheme)
-}
diff --git a/pkg/apis/apis.go b/pkg/apis/apis.go
deleted file mode 100644
index cad99d308..000000000
--- a/pkg/apis/apis.go
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package apis
-
-import (
- "k8s.io/apimachinery/pkg/runtime"
-)
-
-// AddToSchemes may be used to add all resources defined in the project to a Scheme
-var AddToSchemes runtime.SchemeBuilder
-
-// AddToScheme adds all Resources to the Scheme
-func AddToScheme(s *runtime.Scheme) error {
- return AddToSchemes.AddToScheme(s)
-}
diff --git a/pkg/apis/logging/v1alpha1/common_types.go b/pkg/apis/logging/v1alpha1/common_types.go
deleted file mode 100644
index 22b365b5a..000000000
--- a/pkg/apis/logging/v1alpha1/common_types.go
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package v1alpha1
-
-// ImageSpec struct hold information about image specification
-type ImageSpec struct {
- Repository string `json:"repository"`
- Tag string `json:"tag"`
- PullPolicy string `json:"pullPolicy"`
-}
diff --git a/pkg/apis/logging/v1alpha1/doc.go b/pkg/apis/logging/v1alpha1/doc.go
deleted file mode 100644
index f63c9a86c..000000000
--- a/pkg/apis/logging/v1alpha1/doc.go
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Package v1alpha1 contains API Schema definitions for the logging v1alpha1 API group
-// +k8s:deepcopy-gen=package,register
-// +groupName=logging.banzaicloud.com
-package v1alpha1
diff --git a/pkg/apis/logging/v1alpha1/fluentbit_types.go b/pkg/apis/logging/v1alpha1/fluentbit_types.go
deleted file mode 100644
index 90229b6ad..000000000
--- a/pkg/apis/logging/v1alpha1/fluentbit_types.go
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package v1alpha1
-
-import (
- "strconv"
-
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
-// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
-
-// FluentbitSpec defines the desired state of Fluentbit
-// +k8s:openapi-gen=true
-type FluentbitSpec struct {
- // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
- // Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file
- // Add custom validation using kubebuilder tags: https://book.kubebuilder.io/beyond_basics/generating_crd.html
- Namespace string `json:"namespace"`
- Annotations map[string]string `json:"annotations"`
- Image ImageSpec `json:"image"`
- TLS FluentbitTLS `json:"tls"`
- Resources corev1.ResourceRequirements `json:"resources,omitempty"`
- Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
-}
-
-// FluentbitTLS defines the TLS configs
-type FluentbitTLS struct {
- Enabled bool `json:"enabled"`
- SecretName string `json:"secretName"`
- SecretType string `json:"secretType,omitempty"`
- SharedKey string `json:"sharedKey"`
-}
-
-// FluentbitStatus defines the observed state of Fluentbit
-// +k8s:openapi-gen=true
-type FluentbitStatus struct {
- // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
- // Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file
- // Add custom validation using kubebuilder tags: https://book.kubebuilder.io/beyond_basics/generating_crd.html
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// Fluentbit is the Schema for the fluentbits API
-// +k8s:openapi-gen=true
-type Fluentbit struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- Spec FluentbitSpec `json:"spec,omitempty"`
- Status FluentbitStatus `json:"status,omitempty"`
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// FluentbitList contains a list of Fluentbit
-type FluentbitList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata,omitempty"`
- Items []Fluentbit `json:"items"`
-}
-
-// GetPrometheusPortFromAnnotation gets the port value from annotation
-func (spec FluentbitSpec) GetPrometheusPortFromAnnotation() int32 {
- port, err := strconv.ParseInt(spec.Annotations["prometheus.io/port"], 10, 32)
- if err != nil {
- panic(err)
- }
- return int32(port)
-}
-
-func init() {
- SchemeBuilder.Register(&Fluentbit{}, &FluentbitList{})
-}
diff --git a/pkg/apis/logging/v1alpha1/fluentd_types.go b/pkg/apis/logging/v1alpha1/fluentd_types.go
deleted file mode 100644
index ad09f71c8..000000000
--- a/pkg/apis/logging/v1alpha1/fluentd_types.go
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package v1alpha1
-
-import (
- "strconv"
-
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
-// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
-
-// FluentdSpec defines the desired state of Fluentd
-// +k8s:openapi-gen=true
-type FluentdSpec struct {
- // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
- // Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file
- // Add custom validation using kubebuilder tags: https://book.kubebuilder.io/beyond_basics/generating_crd.html
- Namespace string `json:"namespace"`
- Annotations map[string]string `json:"annotations"`
- TLS FluentdTLS `json:"tls"`
- Image ImageSpec `json:"image"`
- FluentdPvcSpec corev1.PersistentVolumeClaimSpec `json:"fluentdPvcSpec"`
- VolumeModImage ImageSpec `json:"volumeModImage"`
- ConfigReloaderImage ImageSpec `json:"configReloaderImage"`
- Resources corev1.ResourceRequirements `json:"resources,omitempty"`
- ServiceType corev1.ServiceType `json:"serviceType,omitempty"`
- Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
-}
-
-// FluentdTLS defines the TLS configs
-type FluentdTLS struct {
- Enabled bool `json:"enabled"`
- SecretName string `json:"secretName"`
- SecretType string `json:"secretType,omitempty"`
- SharedKey string `json:"sharedKey"`
-}
-
-// FluentdStatus defines the observed state of Fluentd
-// +k8s:openapi-gen=true
-type FluentdStatus struct {
- // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
- // Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file
- // Add custom validation using kubebuilder tags: https://book.kubebuilder.io/beyond_basics/generating_crd.html
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// Fluentd is the Schema for the fluentds API
-// +k8s:openapi-gen=true
-type Fluentd struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- Spec FluentdSpec `json:"spec,omitempty"`
- Status FluentdStatus `json:"status,omitempty"`
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// FluentdList contains a list of Fluentd
-type FluentdList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata,omitempty"`
- Items []Fluentd `json:"items"`
-}
-
-// GetPrometheusPortFromAnnotation gets the port value from annotation
-func (spec FluentdSpec) GetPrometheusPortFromAnnotation() int32 {
- port, err := strconv.ParseInt(spec.Annotations["prometheus.io/port"], 10, 32)
- if err != nil {
- panic(err)
- }
- return int32(port)
-}
-
-// GetServiceType gets the service type if set or ClusterIP as the default
-func (spec FluentdSpec) GetServiceType() corev1.ServiceType {
- if spec.ServiceType == "" {
- return corev1.ServiceTypeClusterIP
- }
- return spec.ServiceType
-}
-
-func init() {
- SchemeBuilder.Register(&Fluentd{}, &FluentdList{})
-}
diff --git a/pkg/apis/logging/v1alpha1/loggingplugin_types.go b/pkg/apis/logging/v1alpha1/loggingplugin_types.go
deleted file mode 100644
index fa82767f8..000000000
--- a/pkg/apis/logging/v1alpha1/loggingplugin_types.go
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package v1alpha1
-
-import (
- "context"
- "fmt"
- "k8s.io/apimachinery/pkg/types"
- "sigs.k8s.io/controller-runtime/pkg/client"
-
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
-// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
-
-// PluginSpec defines the desired state of Plugin
-// +k8s:openapi-gen=true
-type PluginSpec struct {
- // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
- // Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file
- // Add custom validation using kubebuilder tags: https://book.kubebuilder.io/beyond_basics/generating_crd.html
- Input Input `json:"input,omitempty"`
- Filter []FPlugin `json:"filter,omitempty"`
- Output []FPlugin `json:"output,omitempty"`
-}
-
-// Input this determines the log origin
-type Input struct {
- Label map[string]string `json:"label"`
-}
-
-// FPlugin struct for fluentd plugins
-type FPlugin struct {
- Type string `json:"type"`
- Name string `json:"name"`
- Parameters []Parameter `json:"parameters,omitempty"`
-}
-
-// Parameter generic parameter type to handle values from different sources
-type Parameter struct {
- Name string `json:"name"`
- ValueFrom *ValueFrom `json:"valueFrom,omitempty"`
- Value string `json:"value"`
-}
-
-// GetValue for a Parameter
-func (p Parameter) GetValue(namespace string, client client.Client) (string, string) {
- if p.ValueFrom != nil {
- value, err := p.ValueFrom.GetValue(namespace, client)
- if err != nil {
- return "", ""
- }
- return p.Name, value
- }
- return p.Name, p.Value
-}
-
-// ValueFrom generic type to determine value origin
-type ValueFrom struct {
- SecretKeyRef KubernetesSecret `json:"secretKeyRef"`
-}
-
-// GetValue handles the different origin of ValueFrom
-func (vf *ValueFrom) GetValue(namespace string, client client.Client) (string, error) {
- return vf.SecretKeyRef.GetValue(namespace, client)
-}
-
-// KubernetesSecret is a ValueFrom type
-type KubernetesSecret struct {
- Name string `json:"name"`
- Key string `json:"key"`
- Namespace string `json:"namespace"`
-}
-
-// GetValue implement GetValue interface
-func (ks KubernetesSecret) GetValue(namespace string, client client.Client) (string, error) {
- secret := &corev1.Secret{}
- nSpace := namespace
- if ks.Namespace != "" {
- nSpace = ks.Namespace
- }
- err := client.Get(context.TODO(), types.NamespacedName{Name: ks.Name, Namespace: nSpace}, secret)
- if err != nil {
- return "", err
- }
- value, ok := secret.Data[ks.Key]
- if !ok {
- return "", fmt.Errorf("key %q not found in secret %q in namespace %q", ks.Key, secret.ObjectMeta.Name, secret.ObjectMeta.Namespace)
- }
- return string(value), nil
-}
-
-// PluginStatus defines the observed state of Plugin
-// +k8s:openapi-gen=true
-type PluginStatus struct {
- // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
- // Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file
- // Add custom validation using kubebuilder tags: https://book.kubebuilder.io/beyond_basics/generating_crd.html
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// Plugin is the Schema for the Plugin API
-// +k8s:openapi-gen=true
-type Plugin struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- Spec PluginSpec `json:"spec,omitempty"`
- Status PluginStatus `json:"status,omitempty"`
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// PluginList contains a list of Plugin
-type PluginList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata,omitempty"`
- Items []Plugin `json:"items"`
-}
-
-func init() {
- SchemeBuilder.Register(&Plugin{}, &PluginList{})
-}
diff --git a/pkg/apis/logging/v1alpha1/register.go b/pkg/apis/logging/v1alpha1/register.go
deleted file mode 100644
index 332e1f383..000000000
--- a/pkg/apis/logging/v1alpha1/register.go
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// NOTE: Boilerplate only. Ignore this file.
-
-// Package v1alpha1 contains API Schema definitions for the logging v1alpha1 API group
-// +k8s:deepcopy-gen=package,register
-// +groupName=logging.banzaicloud.com
-package v1alpha1
-
-import (
- "k8s.io/apimachinery/pkg/runtime/schema"
- "sigs.k8s.io/controller-runtime/pkg/runtime/scheme"
-)
-
-var (
- // SchemeGroupVersion is group version used to register these objects
- SchemeGroupVersion = schema.GroupVersion{Group: "logging.banzaicloud.com", Version: "v1alpha1"}
-
- // SchemeBuilder is used to add go types to the GroupVersionKind scheme
- SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion}
-)
diff --git a/pkg/apis/logging/v1alpha1/zz_generated.defaults.go b/pkg/apis/logging/v1alpha1/zz_generated.defaults.go
deleted file mode 100644
index dd621a3ac..000000000
--- a/pkg/apis/logging/v1alpha1/zz_generated.defaults.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// +build !ignore_autogenerated
-
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by defaulter-gen. DO NOT EDIT.
-
-package v1alpha1
-
-import (
- runtime "k8s.io/apimachinery/pkg/runtime"
-)
-
-// RegisterDefaults adds defaulters functions to the given scheme.
-// Public to allow building arbitrary schemes.
-// All generated defaulters are covering - they call all nested defaulters.
-func RegisterDefaults(scheme *runtime.Scheme) error {
- return nil
-}
diff --git a/pkg/apis/logging/v1alpha1/zz_generated.openapi.go b/pkg/apis/logging/v1alpha1/zz_generated.openapi.go
deleted file mode 100644
index 772edb8a0..000000000
--- a/pkg/apis/logging/v1alpha1/zz_generated.openapi.go
+++ /dev/null
@@ -1,350 +0,0 @@
-// +build !ignore_autogenerated
-
-// Code generated by openapi-gen. DO NOT EDIT.
-
-// This file was autogenerated by openapi-gen. Do not edit it manually!
-
-package v1alpha1
-
-import (
- spec "github.com/go-openapi/spec"
- common "k8s.io/kube-openapi/pkg/common"
-)
-
-func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition {
- return map[string]common.OpenAPIDefinition{
- "github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1.Fluentbit": schema_pkg_apis_logging_v1alpha1_Fluentbit(ref),
- "github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1.FluentbitSpec": schema_pkg_apis_logging_v1alpha1_FluentbitSpec(ref),
- "github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1.FluentbitStatus": schema_pkg_apis_logging_v1alpha1_FluentbitStatus(ref),
- "github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1.Fluentd": schema_pkg_apis_logging_v1alpha1_Fluentd(ref),
- "github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1.FluentdSpec": schema_pkg_apis_logging_v1alpha1_FluentdSpec(ref),
- "github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1.FluentdStatus": schema_pkg_apis_logging_v1alpha1_FluentdStatus(ref),
- "github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1.Plugin": schema_pkg_apis_logging_v1alpha1_Plugin(ref),
- "github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1.PluginSpec": schema_pkg_apis_logging_v1alpha1_PluginSpec(ref),
- "github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1.PluginStatus": schema_pkg_apis_logging_v1alpha1_PluginStatus(ref),
- }
-}
-
-func schema_pkg_apis_logging_v1alpha1_Fluentbit(ref common.ReferenceCallback) common.OpenAPIDefinition {
- return common.OpenAPIDefinition{
- Schema: spec.Schema{
- SchemaProps: spec.SchemaProps{
- Description: "Fluentbit is the Schema for the fluentbits API",
- Properties: map[string]spec.Schema{
- "kind": {
- SchemaProps: spec.SchemaProps{
- Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
- Type: []string{"string"},
- Format: "",
- },
- },
- "apiVersion": {
- SchemaProps: spec.SchemaProps{
- Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources",
- Type: []string{"string"},
- Format: "",
- },
- },
- "metadata": {
- SchemaProps: spec.SchemaProps{
- Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
- },
- },
- "spec": {
- SchemaProps: spec.SchemaProps{
- Ref: ref("github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1.FluentbitSpec"),
- },
- },
- "status": {
- SchemaProps: spec.SchemaProps{
- Ref: ref("github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1.FluentbitStatus"),
- },
- },
- },
- },
- },
- Dependencies: []string{
- "github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1.FluentbitSpec", "github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1.FluentbitStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
- }
-}
-
-func schema_pkg_apis_logging_v1alpha1_FluentbitSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
- return common.OpenAPIDefinition{
- Schema: spec.Schema{
- SchemaProps: spec.SchemaProps{
- Description: "FluentbitSpec defines the desired state of Fluentbit",
- Properties: map[string]spec.Schema{
- "namespace": {
- SchemaProps: spec.SchemaProps{
- Description: "INSERT ADDITIONAL SPEC FIELDS - desired state of cluster Important: Run \"operator-sdk generate k8s\" to regenerate code after modifying this file Add custom validation using kubebuilder tags: https://book.kubebuilder.io/beyond_basics/generating_crd.html",
- Type: []string{"string"},
- Format: "",
- },
- },
- "annotations": {
- SchemaProps: spec.SchemaProps{
- Type: []string{"object"},
- AdditionalProperties: &spec.SchemaOrBool{
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Type: []string{"string"},
- Format: "",
- },
- },
- },
- },
- },
- "image": {
- SchemaProps: spec.SchemaProps{
- Ref: ref("github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1.ImageSpec"),
- },
- },
- "tls": {
- SchemaProps: spec.SchemaProps{
- Ref: ref("github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1.FluentbitTLS"),
- },
- },
- "resources": {
- SchemaProps: spec.SchemaProps{
- Ref: ref("k8s.io/api/core/v1.ResourceRequirements"),
- },
- },
- },
- Required: []string{"namespace", "annotations", "image", "tls"},
- },
- },
- Dependencies: []string{
- "github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1.FluentbitTLS", "github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1.ImageSpec", "k8s.io/api/core/v1.ResourceRequirements"},
- }
-}
-
-func schema_pkg_apis_logging_v1alpha1_FluentbitStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
- return common.OpenAPIDefinition{
- Schema: spec.Schema{
- SchemaProps: spec.SchemaProps{
- Description: "FluentbitStatus defines the observed state of Fluentbit",
- Properties: map[string]spec.Schema{},
- },
- },
- Dependencies: []string{},
- }
-}
-
-func schema_pkg_apis_logging_v1alpha1_Fluentd(ref common.ReferenceCallback) common.OpenAPIDefinition {
- return common.OpenAPIDefinition{
- Schema: spec.Schema{
- SchemaProps: spec.SchemaProps{
- Description: "Fluentd is the Schema for the fluentds API",
- Properties: map[string]spec.Schema{
- "kind": {
- SchemaProps: spec.SchemaProps{
- Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
- Type: []string{"string"},
- Format: "",
- },
- },
- "apiVersion": {
- SchemaProps: spec.SchemaProps{
- Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources",
- Type: []string{"string"},
- Format: "",
- },
- },
- "metadata": {
- SchemaProps: spec.SchemaProps{
- Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
- },
- },
- "spec": {
- SchemaProps: spec.SchemaProps{
- Ref: ref("github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1.FluentdSpec"),
- },
- },
- "status": {
- SchemaProps: spec.SchemaProps{
- Ref: ref("github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1.FluentdStatus"),
- },
- },
- },
- },
- },
- Dependencies: []string{
- "github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1.FluentdSpec", "github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1.FluentdStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
- }
-}
-
-func schema_pkg_apis_logging_v1alpha1_FluentdSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
- return common.OpenAPIDefinition{
- Schema: spec.Schema{
- SchemaProps: spec.SchemaProps{
- Description: "FluentdSpec defines the desired state of Fluentd",
- Properties: map[string]spec.Schema{
- "namespace": {
- SchemaProps: spec.SchemaProps{
- Description: "INSERT ADDITIONAL SPEC FIELDS - desired state of cluster Important: Run \"operator-sdk generate k8s\" to regenerate code after modifying this file Add custom validation using kubebuilder tags: https://book.kubebuilder.io/beyond_basics/generating_crd.html",
- Type: []string{"string"},
- Format: "",
- },
- },
- "annotations": {
- SchemaProps: spec.SchemaProps{
- Type: []string{"object"},
- AdditionalProperties: &spec.SchemaOrBool{
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Type: []string{"string"},
- Format: "",
- },
- },
- },
- },
- },
- "tls": {
- SchemaProps: spec.SchemaProps{
- Ref: ref("github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1.FluentdTLS"),
- },
- },
- "image": {
- SchemaProps: spec.SchemaProps{
- Ref: ref("github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1.ImageSpec"),
- },
- },
- "fluentdPvcSpec": {
- SchemaProps: spec.SchemaProps{
- Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimSpec"),
- },
- },
- "volumeModImage": {
- SchemaProps: spec.SchemaProps{
- Ref: ref("github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1.ImageSpec"),
- },
- },
- "configReloaderImage": {
- SchemaProps: spec.SchemaProps{
- Ref: ref("github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1.ImageSpec"),
- },
- },
- "resources": {
- SchemaProps: spec.SchemaProps{
- Ref: ref("k8s.io/api/core/v1.ResourceRequirements"),
- },
- },
- },
- Required: []string{"namespace", "annotations", "tls", "image", "fluentdPvcSpec", "volumeModImage", "configReloaderImage"},
- },
- },
- Dependencies: []string{
- "github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1.FluentdTLS", "github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1.ImageSpec", "k8s.io/api/core/v1.PersistentVolumeClaimSpec", "k8s.io/api/core/v1.ResourceRequirements"},
- }
-}
-
-func schema_pkg_apis_logging_v1alpha1_FluentdStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
- return common.OpenAPIDefinition{
- Schema: spec.Schema{
- SchemaProps: spec.SchemaProps{
- Description: "FluentdStatus defines the observed state of Fluentd",
- Properties: map[string]spec.Schema{},
- },
- },
- Dependencies: []string{},
- }
-}
-
-func schema_pkg_apis_logging_v1alpha1_Plugin(ref common.ReferenceCallback) common.OpenAPIDefinition {
- return common.OpenAPIDefinition{
- Schema: spec.Schema{
- SchemaProps: spec.SchemaProps{
- Description: "Plugin is the Schema for the Plugin API",
- Properties: map[string]spec.Schema{
- "kind": {
- SchemaProps: spec.SchemaProps{
- Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
- Type: []string{"string"},
- Format: "",
- },
- },
- "apiVersion": {
- SchemaProps: spec.SchemaProps{
- Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources",
- Type: []string{"string"},
- Format: "",
- },
- },
- "metadata": {
- SchemaProps: spec.SchemaProps{
- Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
- },
- },
- "spec": {
- SchemaProps: spec.SchemaProps{
- Ref: ref("github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1.PluginSpec"),
- },
- },
- "status": {
- SchemaProps: spec.SchemaProps{
- Ref: ref("github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1.PluginStatus"),
- },
- },
- },
- },
- },
- Dependencies: []string{
- "github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1.PluginSpec", "github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1.PluginStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
- }
-}
-
-func schema_pkg_apis_logging_v1alpha1_PluginSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
- return common.OpenAPIDefinition{
- Schema: spec.Schema{
- SchemaProps: spec.SchemaProps{
- Description: "PluginSpec defines the desired state of Plugin",
- Properties: map[string]spec.Schema{
- "input": {
- SchemaProps: spec.SchemaProps{
- Description: "INSERT ADDITIONAL SPEC FIELDS - desired state of cluster Important: Run \"operator-sdk generate k8s\" to regenerate code after modifying this file Add custom validation using kubebuilder tags: https://book.kubebuilder.io/beyond_basics/generating_crd.html",
- Ref: ref("github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1.Input"),
- },
- },
- "filter": {
- SchemaProps: spec.SchemaProps{
- Type: []string{"array"},
- Items: &spec.SchemaOrArray{
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Ref: ref("github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1.FPlugin"),
- },
- },
- },
- },
- },
- "output": {
- SchemaProps: spec.SchemaProps{
- Type: []string{"array"},
- Items: &spec.SchemaOrArray{
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Ref: ref("github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1.FPlugin"),
- },
- },
- },
- },
- },
- },
- },
- },
- Dependencies: []string{
- "github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1.FPlugin", "github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1.Input"},
- }
-}
-
-func schema_pkg_apis_logging_v1alpha1_PluginStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
- return common.OpenAPIDefinition{
- Schema: spec.Schema{
- SchemaProps: spec.SchemaProps{
- Description: "PluginStatus defines the observed state of Plugin",
- Properties: map[string]spec.Schema{},
- },
- },
- Dependencies: []string{},
- }
-}
diff --git a/pkg/controller/add_fluentbit.go b/pkg/controller/add_fluentbit.go
deleted file mode 100644
index 39264377a..000000000
--- a/pkg/controller/add_fluentbit.go
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package controller
-
-import (
- "github.com/banzaicloud/logging-operator/pkg/controller/fluentbit"
-)
-
-func init() {
- // AddToManagerFuncs is a list of functions to create controllers and add them to a manager.
- AddToManagerFuncs = append(AddToManagerFuncs, fluentbit.Add)
-}
diff --git a/pkg/controller/add_fluentd.go b/pkg/controller/add_fluentd.go
deleted file mode 100644
index 7956c4639..000000000
--- a/pkg/controller/add_fluentd.go
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package controller
-
-import (
- "github.com/banzaicloud/logging-operator/pkg/controller/fluentd"
-)
-
-func init() {
- // AddToManagerFuncs is a list of functions to create controllers and add them to a manager.
- AddToManagerFuncs = append(AddToManagerFuncs, fluentd.Add)
-}
diff --git a/pkg/controller/add_loggingplugin.go b/pkg/controller/add_loggingplugin.go
deleted file mode 100644
index 018764010..000000000
--- a/pkg/controller/add_loggingplugin.go
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package controller
-
-import (
- "github.com/banzaicloud/logging-operator/pkg/controller/plugin"
-)
-
-func init() {
- // AddToManagerFuncs is a list of functions to create controllers and add them to a manager.
- AddToManagerFuncs = append(AddToManagerFuncs, plugin.Add)
-}
diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go
deleted file mode 100644
index d5730d072..000000000
--- a/pkg/controller/controller.go
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package controller
-
-import (
- "sigs.k8s.io/controller-runtime/pkg/manager"
-)
-
-// AddToManagerFuncs is a list of functions to add all Controllers to the Manager
-var AddToManagerFuncs []func(manager.Manager) error
-
-// AddToManager adds all Controllers to the Manager
-func AddToManager(m manager.Manager) error {
- for _, f := range AddToManagerFuncs {
- if err := f(m); err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/pkg/controller/fluentbit/fluentbit_controller.go b/pkg/controller/fluentbit/fluentbit_controller.go
deleted file mode 100644
index 5d0860259..000000000
--- a/pkg/controller/fluentbit/fluentbit_controller.go
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package fluentbit
-
-import (
- "context"
- loggingv1alpha1 "github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1"
- "github.com/banzaicloud/logging-operator/pkg/resources"
- "github.com/banzaicloud/logging-operator/pkg/resources/fluentbit"
-
- "k8s.io/apimachinery/pkg/api/errors"
- "k8s.io/apimachinery/pkg/runtime"
- "sigs.k8s.io/controller-runtime/pkg/client"
- "sigs.k8s.io/controller-runtime/pkg/controller"
- "sigs.k8s.io/controller-runtime/pkg/handler"
- "sigs.k8s.io/controller-runtime/pkg/manager"
- "sigs.k8s.io/controller-runtime/pkg/reconcile"
- logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
- "sigs.k8s.io/controller-runtime/pkg/source"
-)
-
-var log = logf.Log.WithName("controller_fluentbit")
-
-/**
-* USER ACTION REQUIRED: This is a scaffold file intended for the user to modify with their own Controller
-* business logic. Delete these comments after modifying this file.*
- */
-
-// Add creates a new Fluentbit Controller and adds it to the Manager. The Manager will set fields on the Controller
-// and Start it when the Manager is Started.
-func Add(mgr manager.Manager) error {
- return add(mgr, newReconciler(mgr))
-}
-
-// newReconciler returns a new reconcile.Reconciler
-func newReconciler(mgr manager.Manager) reconcile.Reconciler {
- return &ReconcileFluentbit{client: mgr.GetClient(), scheme: mgr.GetScheme()}
-}
-
-// add adds a new Controller to mgr with r as the reconcile.Reconciler
-func add(mgr manager.Manager, r reconcile.Reconciler) error {
- // Create a new controller
- c, err := controller.New("fluentbit-controller", mgr, controller.Options{Reconciler: r})
- if err != nil {
- return err
- }
-
- // Watch for changes to primary resource Fluentbit
- err = c.Watch(&source.Kind{Type: &loggingv1alpha1.Fluentbit{}}, &handler.EnqueueRequestForObject{})
- if err != nil {
- return err
- }
-
- return nil
-}
-
-var _ reconcile.Reconciler = &ReconcileFluentbit{}
-
-// ReconcileFluentbit reconciles a Fluentbit object
-type ReconcileFluentbit struct {
- // This client, initialized using mgr.Client() above, is a split client
- // that reads objects from the cache and writes to the apiserver
- client client.Client
- scheme *runtime.Scheme
-}
-
-// Reconcile reads that state of the cluster for a Fluentbit object and makes changes based on the state read
-// and what is in the Fluentbit.Spec
-// TODO(user): Modify this Reconcile function to implement your Controller logic. This example creates
-// a Pod as an example
-// Note:
-// The Controller will requeue the Request to be processed again if the returned error is non-nil or
-// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
-func (r *ReconcileFluentbit) Reconcile(request reconcile.Request) (reconcile.Result, error) {
- reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
- reqLogger.Info("Reconciling Fluentbit")
-
- // Fetch the Fluentbit instance
- instance := &loggingv1alpha1.Fluentbit{}
- err := r.client.Get(context.TODO(), request.NamespacedName, instance)
- if err != nil {
- if errors.IsNotFound(err) {
- // Request object not found, could have been deleted after reconcile request.
- // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
- // Return and don't requeue
- return reconcile.Result{}, nil
- }
- // Error reading the object - requeue the request.
- return reconcile.Result{}, err
- }
-
- reconcilers := []resources.ComponentReconciler{
- fluentbit.New(r.client, instance),
- }
-
- for _, rec := range reconcilers {
- err = rec.Reconcile(reqLogger)
- if err != nil {
- return reconcile.Result{}, err
- }
- }
-
- return reconcile.Result{}, nil
-}
diff --git a/pkg/controller/fluentd/fluentd_controller.go b/pkg/controller/fluentd/fluentd_controller.go
deleted file mode 100644
index 80c689472..000000000
--- a/pkg/controller/fluentd/fluentd_controller.go
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package fluentd
-
-import (
- "context"
- loggingv1alpha1 "github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1"
- "github.com/banzaicloud/logging-operator/pkg/resources"
- "github.com/banzaicloud/logging-operator/pkg/resources/fluentd"
-
- "k8s.io/apimachinery/pkg/api/errors"
- "k8s.io/apimachinery/pkg/runtime"
- "sigs.k8s.io/controller-runtime/pkg/client"
- "sigs.k8s.io/controller-runtime/pkg/controller"
- "sigs.k8s.io/controller-runtime/pkg/handler"
- "sigs.k8s.io/controller-runtime/pkg/manager"
- "sigs.k8s.io/controller-runtime/pkg/reconcile"
- logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
- "sigs.k8s.io/controller-runtime/pkg/source"
-)
-
-var log = logf.Log.WithName("controller_fluentd")
-
-/**
-* USER ACTION REQUIRED: This is a scaffold file intended for the user to modify with their own Controller
-* business logic. Delete these comments after modifying this file.*
- */
-
-// Add creates a new Fluentd Controller and adds it to the Manager. The Manager will set fields on the Controller
-// and Start it when the Manager is Started.
-func Add(mgr manager.Manager) error {
- return add(mgr, newReconciler(mgr))
-}
-
-// newReconciler returns a new reconcile.Reconciler
-func newReconciler(mgr manager.Manager) reconcile.Reconciler {
- return &ReconcileFluentd{client: mgr.GetClient(), scheme: mgr.GetScheme()}
-}
-
-// add adds a new Controller to mgr with r as the reconcile.Reconciler
-func add(mgr manager.Manager, r reconcile.Reconciler) error {
- // Create a new controller
- c, err := controller.New("fluentd-controller", mgr, controller.Options{Reconciler: r})
- if err != nil {
- return err
- }
-
- // Watch for changes to primary resource Fluentd
- err = c.Watch(&source.Kind{Type: &loggingv1alpha1.Fluentd{}}, &handler.EnqueueRequestForObject{})
- if err != nil {
- return err
- }
- return nil
-}
-
-var _ reconcile.Reconciler = &ReconcileFluentd{}
-
-// ReconcileFluentd reconciles a Fluentd object
-type ReconcileFluentd struct {
- // This client, initialized using mgr.Client() above, is a split client
- // that reads objects from the cache and writes to the apiserver
- client client.Client
- scheme *runtime.Scheme
-}
-
-// Reconcile reads that state of the cluster for a Fluentd object and makes changes based on the state read
-// and what is in the Fluentd.Spec
-// Note:
-// The Controller will requeue the Request to be processed again if the returned error is non-nil or
-// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
-func (r *ReconcileFluentd) Reconcile(request reconcile.Request) (reconcile.Result, error) {
- reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
- reqLogger.Info("Reconciling Fluentd")
-
- // Fetch the Fluentd instance
- instance := &loggingv1alpha1.Fluentd{}
- err := r.client.Get(context.TODO(), request.NamespacedName, instance)
- if err != nil {
- if errors.IsNotFound(err) {
- // Request object not found, could have been deleted after reconcile request.
- // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
- // Return and don't requeue
- return reconcile.Result{}, nil
- }
- // Error reading the object - requeue the request.
- return reconcile.Result{}, err
- }
- reconcilers := []resources.ComponentReconciler{
- fluentd.New(r.client, instance),
- }
-
- for _, rec := range reconcilers {
- err = rec.Reconcile(reqLogger)
- if err != nil {
- return reconcile.Result{}, err
- }
- }
-
- return reconcile.Result{}, nil
-}
diff --git a/pkg/controller/plugin/plugin_controller.go b/pkg/controller/plugin/plugin_controller.go
deleted file mode 100644
index 57f903163..000000000
--- a/pkg/controller/plugin/plugin_controller.go
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package plugin
-
-import (
- "context"
-
- "github.com/banzaicloud/logging-operator/pkg/resources"
- "github.com/banzaicloud/logging-operator/pkg/resources/plugins"
-
- loggingv1alpha1 "github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1"
-
- "k8s.io/apimachinery/pkg/runtime"
- "sigs.k8s.io/controller-runtime/pkg/client"
- "sigs.k8s.io/controller-runtime/pkg/controller"
- "sigs.k8s.io/controller-runtime/pkg/handler"
- "sigs.k8s.io/controller-runtime/pkg/manager"
- "sigs.k8s.io/controller-runtime/pkg/reconcile"
- logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
- "sigs.k8s.io/controller-runtime/pkg/source"
-)
-
-var log = logf.Log.WithName("controller_plugin")
-
-/**
-* USER ACTION REQUIRED: This is a scaffold file intended for the user to modify with their own Controller
-* business logic. Delete these comments after modifying this file.*
- */
-
-// Add creates a new Plugin Controller and adds it to the Manager. The Manager will set fields on the Controller
-// and Start it when the Manager is Started.
-func Add(mgr manager.Manager) error {
- return add(mgr, newReconciler(mgr))
-}
-
-// newReconciler returns a new reconcile.Reconciler
-func newReconciler(mgr manager.Manager) reconcile.Reconciler {
- return &ReconcilePlugin{client: mgr.GetClient(), scheme: mgr.GetScheme()}
-}
-
-// add adds a new Controller to mgr with r as the reconcile.Reconciler
-func add(mgr manager.Manager, r reconcile.Reconciler) error {
- // Create a new controller
- c, err := controller.New("plugin-controller", mgr, controller.Options{Reconciler: r})
- if err != nil {
- return err
- }
-
- // Watch for changes to primary resource Plugin
- err = c.Watch(&source.Kind{Type: &loggingv1alpha1.Plugin{}}, &handler.EnqueueRequestForObject{})
- if err != nil {
- return err
- }
-
- return nil
-}
-
-var _ reconcile.Reconciler = &ReconcilePlugin{}
-
-// ReconcilePlugin reconciles a Plugin object
-type ReconcilePlugin struct {
- // This client, initialized using mgr.Client() above, is a split client
- // that reads objects from the cache and writes to the apiserver
- client client.Client
- scheme *runtime.Scheme
-}
-
-// Reconcile reads that state of the cluster for a Plugin object and makes changes based on the state read
-// and what is in the Plugin.Spec
-// Note:
-// The Controller will requeue the Request to be processed again if the returned error is non-nil or
-// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
-func (r *ReconcilePlugin) Reconcile(request reconcile.Request) (reconcile.Result, error) {
- reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
- reqLogger.Info("Reconciling Plugin")
-
- // Fetch the Plugin instance
- instanceList := &loggingv1alpha1.PluginList{}
-
- err := r.client.List(context.TODO(), client.MatchingLabels(map[string]string{}), instanceList)
- if err != nil {
- return reconcile.Result{}, err
- }
-
- reconcilers := []resources.ComponentReconciler{
- plugins.New(r.client, instanceList, request.Namespace),
- }
-
- for _, rec := range reconcilers {
- err = rec.Reconcile(reqLogger)
- if err != nil {
- return reconcile.Result{}, err
- }
- }
-
- return reconcile.Result{}, nil
-}
diff --git a/pkg/k8sutil/resource.go b/pkg/k8sutil/resource.go
index 6da039323..591603c29 100644
--- a/pkg/k8sutil/resource.go
+++ b/pkg/k8sutil/resource.go
@@ -1,95 +1,135 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
package k8sutil
import (
"context"
- "fmt"
"reflect"
+ "emperror.dev/errors"
+ "github.com/banzaicloud/k8s-objectmatcher/patch"
"github.com/go-logr/logr"
"github.com/goph/emperror"
- appsv1 "k8s.io/api/apps/v1"
- corev1 "k8s.io/api/core/v1"
- rbacv1 "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
runtimeClient "sigs.k8s.io/controller-runtime/pkg/client"
)
-// Reconcile reconciles various kubernetes types
-func Reconcile(log logr.Logger, client runtimeClient.Client, desired runtime.Object) error {
- log = log.WithValues("type", reflect.TypeOf(desired))
+// GenericResourceReconciler generic resource reconciler
+type GenericResourceReconciler struct {
+ Log logr.Logger
+ Client runtimeClient.Client
+}
+
+// NewReconciler returns GenericResourceReconciler
+func NewReconciler(client runtimeClient.Client, log logr.Logger) *GenericResourceReconciler {
+ return &GenericResourceReconciler{
+ Log: log,
+ Client: client,
+ }
+}
+
+// CreateResource creates a resource if it doesn't exist
+func (r *GenericResourceReconciler) CreateResource(desired runtime.Object) error {
+ _, _, err := r.createIfNotExists(desired)
+ return err
+}
+
+// ReconcileResource reconciles various kubernetes types
+func (r *GenericResourceReconciler) ReconcileResource(desired runtime.Object) error {
+ log := r.Log.WithValues("type", reflect.TypeOf(desired))
+ created, current, err := r.createIfNotExists(desired)
+ if err == nil && created {
+ return nil
+ }
+ if err != nil {
+ return errors.Wrapf(err, "failed to create resource %+v", desired)
+ }
+ key, err := runtimeClient.ObjectKeyFromObject(current)
+ if err != nil {
+ return errors.Wrapf(err, "meta accessor failed %+v", current)
+ }
+ if err == nil {
+ patchResult, err := patch.DefaultPatchMaker.Calculate(current, desired)
+ if err != nil {
+ log.Error(err, "could not match objects",
+ "kind", desired.GetObjectKind().GroupVersionKind(), "name", key.Name)
+ } else if patchResult.IsEmpty() {
+ log.V(1).Info("resource is in sync",
+ "kind", desired.GetObjectKind().GroupVersionKind(), "name", key.Name)
+ return nil
+ } else {
+ log.V(1).Info("resource diffs",
+ "patch", string(patchResult.Patch),
+ "current", string(patchResult.Current),
+ "modified", string(patchResult.Modified),
+ "original", string(patchResult.Original))
+ }
+
+ if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(desired); err != nil {
+ log.Error(err, "Failed to set last applied annotation", "desired", desired)
+ }
+
+ metaAccessor := meta.NewAccessor()
+
+ currentResourceVersion, err := metaAccessor.ResourceVersion(current)
+ if err != nil {
+ return errors.Wrap(err, "failed to access resourceVersion from metadata")
+ }
+ metaAccessor.SetResourceVersion(desired, currentResourceVersion)
+
+ var name string
+ if name, err = metaAccessor.Name(current); err != nil {
+ return errors.Wrap(err, "failed to access Name from metadata")
+ }
+
+ log.V(1).Info("Updating resource",
+ "gvk", desired.GetObjectKind().GroupVersionKind(), "name", name)
+ if err := r.Client.Update(context.TODO(), desired); err != nil {
+ return emperror.WrapWith(err, "updating resource failed",
+ "resource", desired.GetObjectKind().GroupVersionKind(), "type", reflect.TypeOf(desired))
+ }
+ log.Info("resource updated", "resource", desired.GetObjectKind().GroupVersionKind())
+ }
+ return nil
+}
+
+func (r *GenericResourceReconciler) createIfNotExists(desired runtime.Object) (bool, runtime.Object, error) {
+ log := r.Log.WithValues("type", reflect.TypeOf(desired))
var current = desired.DeepCopyObject()
key, err := runtimeClient.ObjectKeyFromObject(current)
if err != nil {
- return emperror.With(err)
+ return false, nil, emperror.With(err)
}
- err = client.Get(context.TODO(), key, current)
+ err = r.Client.Get(context.TODO(), key, current)
if err != nil && !apierrors.IsNotFound(err) {
- return emperror.WrapWith(err, "getting resource failed", "resource", desired.GetObjectKind().GroupVersionKind(), "type", reflect.TypeOf(desired))
+ return false, nil, emperror.WrapWith(err, "getting resource failed",
+ "resource", desired.GetObjectKind().GroupVersionKind(), "type", reflect.TypeOf(desired))
}
if apierrors.IsNotFound(err) {
- if err := client.Create(context.TODO(), desired); err != nil {
- return emperror.WrapWith(err, "creating resource failed", "resource", desired.GetObjectKind().GroupVersionKind(), "type", reflect.TypeOf(desired))
+ if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(desired); err != nil {
+ log.Error(err, "Failed to set last applied annotation", "desired", desired)
}
- log.Info("resource created", "resource", desired.GetObjectKind().GroupVersionKind())
- }
- if err == nil {
- switch desired.(type) {
- default:
- return emperror.With(fmt.Errorf("unexpected resource type %s", reflect.TypeOf(desired)))
- case *corev1.ServiceAccount:
- sa := desired.(*corev1.ServiceAccount)
- sa.ResourceVersion = current.(*corev1.ServiceAccount).ResourceVersion
- desired = sa
- case *corev1.PersistentVolumeClaim:
- log.Info("Could not update PersistentVolumeClaim yet")
- desired = current
- case *rbacv1.ClusterRole:
- cr := desired.(*rbacv1.ClusterRole)
- cr.ResourceVersion = current.(*rbacv1.ClusterRole).ResourceVersion
- desired = cr
- case *rbacv1.ClusterRoleBinding:
- crb := desired.(*rbacv1.ClusterRoleBinding)
- crb.ResourceVersion = current.(*rbacv1.ClusterRoleBinding).ResourceVersion
- desired = crb
- case *corev1.ConfigMap:
- cm := desired.(*corev1.ConfigMap)
- cm.ResourceVersion = current.(*corev1.ConfigMap).ResourceVersion
- desired = cm
- case *corev1.Service:
- svc := desired.(*corev1.Service)
- svc.ResourceVersion = current.(*corev1.Service).ResourceVersion
- svc.Spec.ClusterIP = current.(*corev1.Service).Spec.ClusterIP
- desired = svc
- case *appsv1.Deployment:
- deploy := desired.(*appsv1.Deployment)
- deploy.ResourceVersion = current.(*appsv1.Deployment).ResourceVersion
- desired = deploy
- case *appsv1.DaemonSet:
- daemon := desired.(*appsv1.DaemonSet)
- daemon.ResourceVersion = current.(*appsv1.DaemonSet).ResourceVersion
- desired = daemon
+ if err := r.Client.Create(context.TODO(), desired); err != nil {
+ return false, nil, emperror.WrapWith(err, "creating resource failed",
+ "resource", desired.GetObjectKind().GroupVersionKind(), "type", reflect.TypeOf(desired))
}
- if err := client.Update(context.TODO(), desired); err != nil {
- return emperror.WrapWith(err, "updating resource failed", "resource", desired.GetObjectKind().GroupVersionKind(), "type", reflect.TypeOf(desired))
- }
- log.Info("resource updated", "resource", desired.GetObjectKind().GroupVersionKind())
+ log.Info("resource created", "resource", desired.GetObjectKind().GroupVersionKind())
+ return true, current, nil
}
- return nil
+ log.V(1).Info("resource already exists", "resource", desired.GetObjectKind().GroupVersionKind())
+ return false, current, nil
}
diff --git a/pkg/model/common/security.go b/pkg/model/common/security.go
new file mode 100644
index 000000000..6ad4969a9
--- /dev/null
+++ b/pkg/model/common/security.go
@@ -0,0 +1,38 @@
+/*
+ * Copyright © 2019 Banzai Cloud
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package common
+
+import (
+ "github.com/banzaicloud/logging-operator/pkg/model/secret"
+ "github.com/banzaicloud/logging-operator/pkg/model/types"
+)
+
+type Security struct {
+ // Hostname
+ SelfHostname string `json:"self_hostname"`
+ // Shared key for authentication.
+ SharedKey string `json:"shared_key"`
+ // If true, use user based authentication.
+ UserAuth bool `json:"user_auth,omitempty"`
+ // Allow anonymous source. sections are required if disabled.
+ AllowAnonymousSource bool `json:"allow_anonymous_source,omitempty"`
+}
+
+func (s *Security) ToDirective(secretLoader secret.SecretLoader) (types.Directive, error) {
+ return types.NewFlatDirective(types.PluginMeta{
+ Directive: "security",
+ }, s, secretLoader)
+}
diff --git a/pkg/model/common/transport.go b/pkg/model/common/transport.go
new file mode 100644
index 000000000..8321b6d89
--- /dev/null
+++ b/pkg/model/common/transport.go
@@ -0,0 +1,57 @@
+/*
+ * Copyright © 2019 Banzai Cloud
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package common
+
+import (
+ "github.com/banzaicloud/logging-operator/pkg/model/secret"
+ "github.com/banzaicloud/logging-operator/pkg/model/types"
+)
+
+type Transport struct {
+ // Protocol Default: :tcp
+ Protocol string `json:"protocol,omitempty"`
+ // Version Default: 'TLSv1_2'
+ Version string `json:"version,omitempty"`
+ // Ciphers Default: "ALL:!aNULL:!eNULL:!SSLv2"
+ Ciphers string `json:"ciphers,omitempty"`
+ // Use secure connection when use tls) Default: false
+ Insecure bool `json:"insecure,omitempty"`
+ // Specify path to CA certificate file
+ CaPath string `json:"ca_path,omitempty"`
+ // Specify path to Certificate file
+ CertPath string `json:"cert_path,omitempty"`
+ // Specify path to private Key file
+ PrivateKeyPath string `json:"private_key_path,omitempty"`
+ // public CA private key passphrase contained path
+ PrivateKeyPassphrase string `json:"private_key_passphrase,omitempty"`
+ // When this is set Fluentd will check all incoming HTTPS requests
+ // for a client certificate signed by the trusted CA, requests that
+ // don't supply a valid client certificate will fail.
+ ClientCertAuth bool `json:"client_cert_auth,omitempty"`
+ // Specify private CA contained path
+ CaCertPath string `json:"ca_cert_path,omitempty"`
+ // private CA private key contained path
+ CaPrivateKeyPath string `json:"ca_private_key_path,omitempty"`
+ // private CA private key passphrase contained path
+ CaPrivateKeyPassphrase string `json:"ca_private_key_passphrase,omitempty"`
+}
+
+func (t *Transport) ToDirective(secretLoader secret.SecretLoader) (types.Directive, error) {
+ return types.NewFlatDirective(types.PluginMeta{
+ Directive: "transport",
+ Tag: "tls",
+ }, t, secretLoader)
+}
diff --git a/pkg/model/filter/parser.go b/pkg/model/filter/parser.go
new file mode 100644
index 000000000..b26973730
--- /dev/null
+++ b/pkg/model/filter/parser.go
@@ -0,0 +1,87 @@
+package filter
+
+import (
+ "github.com/banzaicloud/logging-operator/pkg/model/secret"
+ "github.com/banzaicloud/logging-operator/pkg/model/types"
+)
+
+// +kubebuilder:object:generate=true
+// +docName:"Parser"
+// https://docs.fluentd.org/filter/parser
+type ParserConfig struct {
+ // Specify field name in the record to parse.
+ KeyName string `json:"key_name"`
+ // Keep original event time in parsed result.
+ ReserveTime bool `json:"reserve_time,omitempty"`
+ // Keep original key-value pair in parsed result.
+ ReserveData bool `json:"reserve_data,omitempty"`
+ // Remove key_name field when parsing is succeeded
+ RemoveKeyNameField bool `json:"remove_key_name_field,omitempty"`
+ // If true, invalid string is replaced with safe characters and re-parse it.
+ ReplaceInvalidSequence bool `json:"replace_invalid_sequence,omitempty"`
+ // Store parsed values with specified key name prefix.
+ InjectKeyPrefix string `json:"inject_key_prefix,omitempty"`
+ // Store parsed values as a hash value in a field.
+ HashValueField string `json:"hash_value_fiel,omitempty"`
+ // Emit invalid record to @ERROR label. Invalid cases are: key not exist, format is not matched, unexpected error
+ EmitInvalidRecordToError bool `json:"emit_invalid_record_to_error,omitempty"`
+ // +docLink:"Parse Section,#Parse-Section"
+ Parsers []ParseSection `json:"parsers,omitempty"`
+}
+
+// +kubebuilder:object:generate=true
+// +docName:"Parse Section"
+type ParseSection struct {
+ // Parse type: apache2, apache_error, nginx, syslog, csv, tsv, ltsv, json, multiline, none
+ Type string `json:"type,omitempty"`
+ // Regexp expression to evaluate
+ Expression string `json:"expression,omitempty"`
+ // Specify time field for event time. If the event doesn't have this field, current time is used.
+ TimeKey string `json:"time_key,omitempty"`
+ // Specify null value pattern.
+ NullValuePattern string `json:"null_value_pattern,omitempty"`
+ // If true, empty string field is replaced with nil
+ NullEmptyString bool `json:"null_empty_string,omitempty"`
+ // If true, use Fluent::EventTime.now(current time) as a timestamp when time_key is specified.
+ EstimateCurrentEvent bool `json:"estimate_current_event,omitempty"`
+ // If true, keep time field in the record.
+ KeepTimeKey bool `json:"keep_time_key,omitempty"`
+}
+
+func (p *ParseSection) ToDirective(secretLoader secret.SecretLoader) (types.Directive, error) {
+ parseMeta := types.PluginMeta{
+ Directive: "parse",
+ Type: p.Type,
+ }
+ p.Type = ""
+ return types.NewFlatDirective(parseMeta, p, secretLoader)
+}
+
+func NewParserConfig() *ParserConfig {
+ return &ParserConfig{}
+}
+
+func (p *ParserConfig) ToDirective(secretLoader secret.SecretLoader) (types.Directive, error) {
+ parser := &types.GenericDirective{
+ PluginMeta: types.PluginMeta{
+ Type: "parser",
+ Directive: "filter",
+ Tag: "**",
+ },
+ }
+ if params, err := types.NewStructToStringMapper(secretLoader).StringsMap(p); err != nil {
+ return nil, err
+ } else {
+ parser.Params = params
+ }
+ if len(p.Parsers) > 0 {
+ for _, parseRule := range p.Parsers {
+ if meta, err := parseRule.ToDirective(secretLoader); err != nil {
+ return nil, err
+ } else {
+ parser.SubDirectives = append(parser.SubDirectives, meta)
+ }
+ }
+ }
+ return parser, nil
+}
diff --git a/pkg/model/filter/stdout.go b/pkg/model/filter/stdout.go
new file mode 100644
index 000000000..1face2e85
--- /dev/null
+++ b/pkg/model/filter/stdout.go
@@ -0,0 +1,37 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package filter
+
+import (
+ "github.com/banzaicloud/logging-operator/pkg/model/secret"
+ "github.com/banzaicloud/logging-operator/pkg/model/types"
+)
+
+// +kubebuilder:object:generate=true
+
+type StdOutFilterConfig struct {
+}
+
+func NewStdOutFilterConfig() *StdOutFilterConfig {
+ return &StdOutFilterConfig{}
+}
+
+func (c *StdOutFilterConfig) ToDirective(secretLoader secret.SecretLoader) (types.Directive, error) {
+ return types.NewFlatDirective(types.PluginMeta{
+ Type: "stdout",
+ Directive: "filter",
+ Tag: "**",
+ }, c, secretLoader)
+}
diff --git a/pkg/model/filter/tagnormaliser.go b/pkg/model/filter/tagnormaliser.go
new file mode 100644
index 000000000..0e03ff929
--- /dev/null
+++ b/pkg/model/filter/tagnormaliser.go
@@ -0,0 +1,36 @@
+package filter
+
+import (
+ "github.com/banzaicloud/logging-operator/pkg/model/secret"
+ "github.com/banzaicloud/logging-operator/pkg/model/types"
+)
+
+// +docName:"Fluentd Plugin to re-tag based on log metadata"
+// More info at https://github.com/banzaicloud/fluent-plugin-tag-normaliser
+//
+// Available kubernetes metadata
+//
+// | Parameter | Description | Example |
+// |-----------|-------------|---------|
+// | ${pod_name} | Pod name | understood-butterfly-nginx-logging-demo-7dcdcfdcd7-h7p9n |
+// | ${container_name} | Container name inside the Pod | nginx-logging-demo |
+// | ${namespace_name} | Namespace name | default |
+// | ${pod_id} | Kubernetes UUID for Pod | 1f50d309-45a6-11e9-b795-025000000001 |
+// | ${labels} | Kubernetes Pod labels. This is a nested map. You can access nested attributes via `.` | {"app":"nginx-logging-demo", "pod-template-hash":"7dcdcfdcd7" } |
+// | ${host} | Node hostname the Pod runs on | docker-desktop |
+// | ${docker_id} | Docker UUID of the container | 3a38148aa37aa3... |
+type _doc interface{}
+
+// +docName:"Tag Normaliser parameters"
+type TagNormaliser struct {
+ // Re-Tag log messages info at [github](https://github.com/banzaicloud/fluent-plugin-tag-normaliser)
+ Format string `json:"format,omitempty" plugin:"default:${namespace_name}.${pod_name}.${container_name}"`
+}
+
+func (t *TagNormaliser) ToDirective(secretLoader secret.SecretLoader) (types.Directive, error) {
+ return types.NewFlatDirective(types.PluginMeta{
+ Type: "tag_normaliser",
+ Directive: "match",
+ Tag: "kubernetes.**",
+ }, t, secretLoader)
+}
diff --git a/pkg/model/filter/zz_generated.deepcopy.go b/pkg/model/filter/zz_generated.deepcopy.go
new file mode 100644
index 000000000..e1bd43e6b
--- /dev/null
+++ b/pkg/model/filter/zz_generated.deepcopy.go
@@ -0,0 +1,71 @@
+// +build !ignore_autogenerated
+
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by controller-gen. DO NOT EDIT.
+
+package filter
+
+import ()
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ParseSection) DeepCopyInto(out *ParseSection) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParseSection.
+func (in *ParseSection) DeepCopy() *ParseSection {
+ if in == nil {
+ return nil
+ }
+ out := new(ParseSection)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ParserConfig) DeepCopyInto(out *ParserConfig) {
+ *out = *in
+ if in.Parsers != nil {
+ in, out := &in.Parsers, &out.Parsers
+ *out = make([]ParseSection, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParserConfig.
+func (in *ParserConfig) DeepCopy() *ParserConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(ParserConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StdOutFilterConfig) DeepCopyInto(out *StdOutFilterConfig) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StdOutFilterConfig.
+func (in *StdOutFilterConfig) DeepCopy() *StdOutFilterConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(StdOutFilterConfig)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/pkg/model/input/forward.go b/pkg/model/input/forward.go
new file mode 100644
index 000000000..bf4a10ce6
--- /dev/null
+++ b/pkg/model/input/forward.go
@@ -0,0 +1,63 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package input
+
+import (
+ "github.com/banzaicloud/logging-operator/pkg/model/common"
+ "github.com/banzaicloud/logging-operator/pkg/model/secret"
+ "github.com/banzaicloud/logging-operator/pkg/model/types"
+)
+
+// +kubebuilder:object:generate=true
+
+type ForwardInputConfig struct {
+ Port string `json:"port,omitempty" plugin:"default:24240"`
+ Bind string `json:"bind,omitempty" plugin:"default:0.0.0.0"`
+ Transport *common.Transport `json:"transport,omitempty"`
+ Security *common.Security `json:"security,omitempty"`
+}
+
+func NewForwardInputConfig() *ForwardInputConfig {
+ return &ForwardInputConfig{}
+}
+
+func (f *ForwardInputConfig) ToDirective(secretLoader secret.SecretLoader) (types.Directive, error) {
+ forward := &types.GenericDirective{
+ PluginMeta: types.PluginMeta{
+ Type: "forward",
+ Directive: "source",
+ },
+ }
+ if params, err := types.NewStructToStringMapper(secretLoader).StringsMap(f); err != nil {
+ return nil, err
+ } else {
+ forward.Params = params
+ }
+ if f.Transport != nil {
+ if transport, err := f.Transport.ToDirective(secretLoader); err != nil {
+ return nil, err
+ } else {
+ forward.SubDirectives = append(forward.SubDirectives, transport)
+ }
+ }
+ if f.Security != nil {
+ if security, err := f.Security.ToDirective(secretLoader); err != nil {
+ return nil, err
+ } else {
+ forward.SubDirectives = append(forward.SubDirectives, security)
+ }
+ }
+ return forward, nil
+}
diff --git a/pkg/model/input/tail.go b/pkg/model/input/tail.go
new file mode 100644
index 000000000..d5f293f8c
--- /dev/null
+++ b/pkg/model/input/tail.go
@@ -0,0 +1,37 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package input
+
+import (
+ "github.com/banzaicloud/logging-operator/pkg/model/secret"
+ "github.com/banzaicloud/logging-operator/pkg/model/types"
+)
+
+// +kubebuilder:object:generate=true
+
+type TailInputConfig struct {
+ Path string `json:"path"`
+}
+
+func NewTailInputConfig(path string) *TailInputConfig {
+ return &TailInputConfig{Path: path}
+}
+
+func (c *TailInputConfig) ToDirective(secretLoader secret.SecretLoader) (types.Directive, error) {
+ return types.NewFlatDirective(types.PluginMeta{
+ Type: "tail",
+ Directive: "source",
+ }, c, secretLoader)
+}
diff --git a/pkg/model/input/zz_generated.deepcopy.go b/pkg/model/input/zz_generated.deepcopy.go
new file mode 100644
index 000000000..f660c315d
--- /dev/null
+++ b/pkg/model/input/zz_generated.deepcopy.go
@@ -0,0 +1,63 @@
+// +build !ignore_autogenerated
+
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by controller-gen. DO NOT EDIT.
+
+package input
+
+import (
+ "github.com/banzaicloud/logging-operator/pkg/model/common"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ForwardInputConfig) DeepCopyInto(out *ForwardInputConfig) {
+ *out = *in
+ if in.Transport != nil {
+ in, out := &in.Transport, &out.Transport
+ *out = new(common.Transport)
+ **out = **in
+ }
+ if in.Security != nil {
+ in, out := &in.Security, &out.Security
+ *out = new(common.Security)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForwardInputConfig.
+func (in *ForwardInputConfig) DeepCopy() *ForwardInputConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(ForwardInputConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TailInputConfig) DeepCopyInto(out *TailInputConfig) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TailInputConfig.
+func (in *TailInputConfig) DeepCopy() *TailInputConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(TailInputConfig)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/pkg/model/output/azurestore.go b/pkg/model/output/azurestore.go
new file mode 100644
index 000000000..5c70e3377
--- /dev/null
+++ b/pkg/model/output/azurestore.go
@@ -0,0 +1,56 @@
+package output
+
+import (
+ "github.com/banzaicloud/logging-operator/pkg/model/secret"
+ "github.com/banzaicloud/logging-operator/pkg/model/types"
+)
+
+// +kubebuilder:object:generate=true
+
+type AzureStorage struct {
+ // Path prefix of the files on Azure
+ Path string `json:"path,omitempty"`
+ // Your azure storage account
+ // +docLink:"Secret,./secret.md"
+ AzureStorageAccount *secret.Secret `json:"azure_storage_account"`
+ // Your azure storage access key
+ // +docLink:"Secret,./secret.md"
+ AzureStorageAccessKey *secret.Secret `json:"azure_storage_access_key"`
+ // Your azure storage container
+ AzureContainer string `json:"azure_container"`
+ // Azure storage type currently only "blob" supported (default: blob)
+ AzureStorageType string `json:"azure_storage_type,omitempty"`
+ // Object key format (default: %{path}%{time_slice}_%{index}.%{file_extension})
+ AzureObjectKeyFormat string `json:"azure_object_key_format,omitempty"`
+ // Store as: gzip, json, text, lzo, lzma2 (default: gzip)
+ StoreAs string `json:"store_as,omitempty"`
+ // Automatically create container if not exists(default: true)
+ AutoCreateContainer bool `json:"auto_create_container,omitempty"`
+ // Compat format type: out_file, json, ltsv (default: out_file)
+ Format string `json:"format,omitempty" plugin:"default:json"`
+ // +docLink:"Buffer,./buffer.md"
+ Buffer *Buffer `json:"buffer,omitempty"`
+}
+
+func (a *AzureStorage) ToDirective(secretLoader secret.SecretLoader) (types.Directive, error) {
+ azure := &types.OutputPlugin{
+ PluginMeta: types.PluginMeta{
+ Type: "azurestorage",
+ Directive: "match",
+ Tag: "**",
+ },
+ }
+ if params, err := types.NewStructToStringMapper(secretLoader).StringsMap(a); err != nil {
+ return nil, err
+ } else {
+ azure.Params = params
+ }
+ if a.Buffer != nil {
+ if buffer, err := a.Buffer.ToDirective(secretLoader); err != nil {
+ return nil, err
+ } else {
+ azure.SubDirectives = append(azure.SubDirectives, buffer)
+ }
+ }
+ return azure, nil
+}
diff --git a/pkg/model/output/buffer.go b/pkg/model/output/buffer.go
new file mode 100644
index 000000000..1300662f7
--- /dev/null
+++ b/pkg/model/output/buffer.go
@@ -0,0 +1,112 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package output
+
+import (
+ "github.com/banzaicloud/logging-operator/pkg/model/secret"
+ "github.com/banzaicloud/logging-operator/pkg/model/types"
+)
+
+// +kubebuilder:object:generate=true
+
+type Buffer struct {
+ // Fluentd core bundles memory and file plugins. 3rd party plugins are also available when installed.
+ Type string `json:"type,omitempty"`
+ // When tag is specified as buffer chunk key, output plugin writes events into chunks separately per tags. (default: tag,time)
+ Tags string `json:"tags,omitempty"`
+ // The path where buffer chunks are stored. The '*' is replaced with random characters. This parameter is required.
+ Path string `json:"path,omitempty" plugin:"default:/buffers/default.*.buffer"`
+ // The max size of each chunks: events will be written into chunks until the size of chunks become this size
+ ChunkLimitSize string `json:"chunk_limit_size,omitempty"`
+ // The max number of events that each chunks can store in it
+ ChunkLimitRecords int `json:"chunk_limit_records,omitempty"`
+ // The size limitation of this buffer plugin instance. Once the total size of stored buffer reached this threshold, all append operations will fail with error (and data will be lost)
+ TotalLimitSize string `json:"total_limit_size,omitempty"`
+ //The queue length limitation of this buffer plugin instance
+ QueueLimitLength int `json:"queue_limit_length,omitempty"`
+ // The percentage of chunk size threshold for flushing. output plugin will flush the chunk when actual size reaches chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in default)
+ ChunkFullThreshold string `json:"chunk_full_threshold,omitempty"`
+ //Limit the number of queued chunks. If you set smaller flush_interval, e.g. 1s, there are lots of small queued chunks in buffer. This is not good with file buffer because it consumes lots of fd resources when output destination has a problem. This parameter mitigates such situations.
+ QueuedChunksLimitSize int `json:"queued_chunks_limit_size,omitempty"`
+ // If you set this option to gzip, you can get Fluentd to compress data records before writing to buffer chunks.
+ Compress string `json:"compress,omitempty"`
+ // The value to specify to flush/write all buffer chunks at shutdown, or not
+ FlushAtShutdown bool `json:"flush_at_shutdown,omitempty"`
+ // Default: default (equals to lazy if time is specified as chunk key, interval otherwise)
+ // lazy: flush/write chunks once per timekey
+ // interval: flush/write chunks per specified time via flush_interval
+ // immediate: flush/write chunks immediately after events are appended into chunks
+ FlushMode string `json:"flush_mode,omitempty"`
+ // Default: 60s
+ FlushInterval string `json:"flush_interval,omitempty"`
+ // The number of threads of output plugins, which is used to write chunks in parallel
+ FlushThreadCount int `json:"flush_thread_count,omitempty"`
+ // The sleep interval seconds of threads to wait next flush trial (when no chunks are waiting)
+ FlushThreadInterval string `json:"flush_thread_interval,omitempty"`
+ // The sleep interval seconds of threads between flushes when output plugin flushes waiting chunks next to next
+ FlushThreadBurstInterval string `json:"flush_thread_burst_interval,omitempty"`
+ // The timeout seconds until output plugin decides that async write operation fails
+ DelayedCommitTimeout string `json:"delayed_commit_timeout,omitempty"`
+ // How output plugin behaves when its buffer queue is full
+ // throw_exception: raise exception to show this error in log
+ // block: block processing of input plugin to emit events into that buffer
+ // drop_oldest_chunk: drop/purge oldest chunk to accept newly incoming chunk
+ OverflowAction string `json:"overflow_action,omitempty"`
+ // The maximum seconds to retry to flush while failing, until plugin discards buffer chunks
+ RetryTimeout string `json:"retry_timeout,omitempty"`
+ // If true, plugin will ignore retry_timeout and retry_max_times options and retry flushing forever
+ RetryForever bool `json:"retry_forever,omitempty" plugin:"default:true"`
+ // The maximum number of times to retry to flush while failing
+ RetryMaxTimes int `json:"retry_max_times,omitempty"`
+ // The ratio of retry_timeout to switch to use secondary while failing (Maximum valid value is 1.0)
+ RetrySecondaryThreshold string `json:"retry_secondary_threshold,omitempty"`
+ // exponential_backoff: wait seconds will become large exponentially per failures
+ // periodic: output plugin will retry periodically with fixed intervals (configured via retry_wait)
+ RetryType string `json:"retry_type,omitempty"`
+ // Seconds to wait before next retry to flush, or constant factor of exponential backoff
+ RetryWait string `json:"retry_wait,omitempty"`
+ // The base number of exponential backoff for retries
+ RetryExponentialBackoffBase string `json:"retry_exponential_backoff_base,omitempty"`
+ // The maximum interval seconds for exponential backoff between retries while failing
+ RetryMaxInterval string `json:"retry_max_interval,omitempty"`
+ // If true, output plugin will retry after randomized interval not to do burst retries
+ RetryRandomize bool `json:"retry_randomize,omitempty"`
+ // Instead of storing unrecoverable chunks in the backup directory, just discard them. This option is new in Fluentd v1.2.6.
+ DisableChunkBackup bool `json:"disable_chunk_backup,omitempty"`
+ // Output plugin will flush chunks per specified time (enabled when time is specified in chunk keys)
+ Timekey string `json:"timekey" plugin:"default:10m"`
+ // Output plugin writes chunks after timekey_wait seconds later after timekey expiration
+ TimekeyWait string `json:"timekey_wait,omitempty"`
+ // Output plugin decides to use UTC or not to format placeholders using timekey
+ TimekeyUseUtc bool `json:"timekey_use_utc,omitempty"`
+ // The timezone (-0700 or Asia/Tokyo) string for formatting timekey placeholders
+ TimekeyZone string `json:"timekey_zone,omitempty"`
+}
+
+func (b *Buffer) ToDirective(secretLoader secret.SecretLoader) (types.Directive, error) {
+ metadata := types.PluginMeta{
+ Type: "file",
+ Directive: "buffer",
+ }
+ // Set default values for tags
+ if b.Tags != "" {
+ metadata.Tag = b.Tags
+ } else {
+ metadata.Tag = "tag,time"
+ }
+
+ b.Tags = ""
+ return types.NewFlatDirective(metadata, b, secretLoader)
+}
diff --git a/pkg/model/output/elasticsearch.go b/pkg/model/output/elasticsearch.go
new file mode 100644
index 000000000..4c8aea6a8
--- /dev/null
+++ b/pkg/model/output/elasticsearch.go
@@ -0,0 +1,152 @@
+package output
+
+import (
+ "github.com/banzaicloud/logging-operator/pkg/model/secret"
+ "github.com/banzaicloud/logging-operator/pkg/model/types"
+)
+
+// +kubebuilder:object:generate=true
+// +docName:"Elasticsearch"
+// Send your logs to Elasticsearch
+type ElasticsearchOutput struct {
+ // You can specify Elasticsearch host by this parameter. (default:localhost)
+ Host string `json:"host,omitempty"`
+ // You can specify Elasticsearch port by this parameter.(default: 9200)
+ Port int `json:"port,omitempty"`
+ // You can specify multiple Elasticsearch hosts with separator ",". If you specify hosts option, host and port options are ignored.
+ Hosts string `json:"hosts,omitempty"`
+ // User for HTTP Basic authentication. This plugin will escape required URL encoded characters within %{} placeholders. e.g. %{demo+}
+ User string `json:"user,omitempty"`
+ // Password for HTTP Basic authentication.
+ // +docLink:"Secret,./secret.md"
+ Password *secret.Secret `json:"password,omitempty"`
+ // Path for HTTP Basic authentication.
+ Path string `json:"path,omitempty"`
+ // Connection scheme (default: http)
+ Scheme string `json:"scheme,omitempty"`
+ // Skip ssl verification (default: true)
+ SslVerify bool `json:"ssl_verify" plugin:"default:true"`
+ // If you want to configure SSL/TLS version, you can specify ssl_version parameter. [SSLv23, TLSv1, TLSv1_1, TLSv1_2]
+ SslVersion string `json:"ssl_version,omitempty"`
+ // Enable Logstash log format.(default: false)
+ LogstashFormat bool `json:"logstash_format,omitempty"`
+ // Adds a @timestamp field to the log, following all settings logstash_format does, except without the restrictions on index_name. This allows one to log to an alias in Elasticsearch and utilize the rollover API.
+ IncludeTimestamp bool `json:"include_timestamp,omitempty"`
+ // Set the Logstash prefix.(default: true)
+ LogstashPrefix string `json:"logstash_prefix,omitempty"`
+ // Set the Logstash prefix separator.(default: -)
+ LogstashPrefixSeparator string `json:"logstash_prefix_separator,omitempty"`
+ // Set the Logstash date format.(default: %Y.%m.%d)
+ LogstashDateformat string `json:"logstash_dateformat,omitempty"`
+ // This param is to set a pipeline id of your elasticsearch to be added into the request, you can configure ingest node.
+ Pipeline string `json:"pipeline,omitempty"`
+ // The format of the time stamp field (@timestamp or what you specify with time_key). This parameter only has an effect when logstash_format is true as it only affects the name of the index we write to.
+ TimeKeyFormat string `json:"time_key_format,omitempty"`
+ // Should the record not include a time_key, define the degree of sub-second time precision to preserve from the time portion of the routed event.
+ TimePrecision string `json:"time_precision,omitempty"`
+ // By default, when inserting records in Logstash format, @timestamp is dynamically created with the time at log ingestion. If you'd like to use a custom time, include an @timestamp with your record.
+ TimeKey string `json:"time_key,omitempty"`
+ // By default, the records inserted into index logstash-YYMMDD with UTC (Coordinated Universal Time). This option allows to use local time if you describe utc_index to false.(default: true)
+ UtcIndex bool `json:"utc_index,omitempty"`
+ // Tell this plugin to find the index name to write to in the record under this key in preference to other mechanisms. Key can be specified as path to nested record using dot ('.') as a separator. https://github.com/uken/fluent-plugin-elasticsearch#target_index_key
+ TargetIndexKey string `json:"target_index_key,omitempty"`
+ // Similar to target_index_key config, find the type name to write to in the record under this key (or nested record). If key not found in record - fallback to type_name.(default: true)
+ TargetTypeKey string `json:"target_type_key,omitempty"`
+ // The name of the template to define. If a template by the name given is already present, it will be left unchanged, unless template_overwrite is set, in which case the template will be updated.
+ TemplateName string `json:"template_name,omitempty"`
+
+ // The path to the file containing the template to install.
+ TemplateFile string `json:"template_file,omitempty"`
+
+ // Specify index templates in form of hash. Can contain multiple templates.
+ Templates string `json:"templates,omitempty"`
+ // Specify the string and its value to be replaced in form of hash. Can contain multiple key value pair that would be replaced in the specified template_file. This setting only creates template and to add rollover index please check the rollover_index configuration.
+ CustomizeTemplate string `json:"customize_template,omitempty"`
+ // Specify this as true when an index with rollover capability needs to be created.(default: false) https://github.com/uken/fluent-plugin-elasticsearch#rollover_index
+ RolloverIndex bool `json:"rollover_index,omitempty"`
+ // Specify this to override the index date pattern for creating a rollover index.(default: now/d)
+ IndexDatePattern string `json:"index_date_pattern,omitempty"`
+ // Specify the deflector alias which would be assigned to the rollover index created. This is useful in case of using the Elasticsearch rollover API
+ DeflectorAlias string `json:"deflector_alias,omitempty"`
+ // Specify the index prefix for the rollover index to be created.
+ IndexPrefix string `json:"index_prefix,omitempty"`
+ // Specify the application name for the rollover index to be created.(default: default)
+ ApplicationName string `json:"application_name,omitempty"`
+ // Always update the template, even if it already exists.(default: false)
+ TemplateOverwrite bool `json:"template_overwrite,omitempty"`
+ // You can specify times of retry putting template.(default: 10)
+ MaxRetryPuttingTemplate string `json:"max_retry_putting_template,omitempty"`
+ // Indicates whether to fail when max_retry_putting_template is exceeded. If you have multiple output plugin, you could use this property to do not fail on fluentd statup.(default: true)
+ FailOnPuttingTemplateRetryExceed bool `json:"fail_on_putting_template_retry_exceed,omitempty"`
+
+ // You can specify times of retry obtaining Elasticsearch version.(default: 15)
+ MaxRetryGetEsVersion string `json:"max_retry_get_es_version,omitempty"`
+
+ // You can specify HTTP request timeout.(default: 5s)
+ RequestTimeout string `json:"request_timeout,omitempty"`
+ // You can tune how the elasticsearch-transport host reloading feature works.(default: true)
+ ReloadConnections bool `json:"reload_connections,omitempty"`
+ //Indicates that the elasticsearch-transport will try to reload the nodes addresses if there is a failure while making the request, this can be useful to quickly remove a dead node from the list of addresses.(default: false)
+ ReloadOnFailure bool `json:"reload_on_failure,omitempty"`
+ // You can set in the elasticsearch-transport how often dead connections from the elasticsearch-transport's pool will be resurrected.(default: 60s)
+ ResurrectAfter string `json:"resurrect_after,omitempty"`
+
+ // This will add the Fluentd tag in the JSON record.(default: false)
+ IncludeTagKey bool `json:"include_tag_key,omitempty"`
+ // This will add the Fluentd tag in the JSON record.(default: tag)
+ TagKey string `json:"tag_key,omitempty"`
+
+ // https://github.com/uken/fluent-plugin-elasticsearch#id_key
+ IdKey string `json:"id_key,omitempty"`
+ // Similar to parent_key config, will add _routing into elasticsearch command if routing_key is set and the field does exist in input event.
+ RoutingKey string `json:"routing_key,omitempty"`
+ // Remove keys on update will not update the configured keys in elasticsearch when a record is being updated. This setting only has any effect if the write operation is update or upsert.
+ RemoveKeysOnUpdate string `json:"remove_keys_on_update,omitempty"`
+ // This setting allows remove_keys_on_update to be configured with a key in each record, in much the same way as target_index_key works.
+ RemoveKeysOnUpdateKey string `json:"remove_keys_on_update_key,omitempty"`
+ // This setting allows custom routing of messages in response to bulk request failures. The default behavior is to emit failed records using the same tag that was provided.
+ RetryTag string `json:"retry_tag,omitempty"`
+ // The write_operation can be any of: (index,create,update,upsert)(default: index)
+ WriteOperation string `json:"write_operation,omitempty"`
+ // Indicates that the plugin should reset connection on any error (reconnect on next send). By default it will reconnect only on "host unreachable exceptions". We recommended to set this true in the presence of elasticsearch shield.(default: false)
+ ReconnectOnError bool `json:"reconnect_on_error,omitempty"`
+ // This is debugging purpose option to enable to obtain transporter layer log. (default: false)
+ WithTransporterLog bool `json:"with_transporter_log,omitempty"`
+ // With content_type application/x-ndjson, elasticsearch plugin adds application/x-ndjson as Content-Type in payload. (default: application/json)
+ ContentType string `json:"content_type,omitempty"`
+ //With this option set to true, Fluentd manifests the index name in the request URL (rather than in the request body). You can use this option to enforce an URL-based access control.
+ IncludeIndexInUrl bool `json:"include_index_in_url,omitempty"`
+ // With logstash_format true, elasticsearch plugin parses timestamp field for generating index name. If the record has invalid timestamp value, this plugin emits an error event to @ERROR label with time_parse_error_tag configured tag.
+ TimeParseErrorTag string `json:"time_parse_error_tag,omitempty"`
+ // With http_backend typhoeus, elasticsearch plugin uses typhoeus faraday http backend. Typhoeus can handle HTTP keepalive. (default: excon)
+ HttpBackend string `json:"http_backend,omitempty"`
+
+ // With default behavior, Elasticsearch client uses Yajl as JSON encoder/decoder. Oj is the alternative high performance JSON encoder/decoder. When this parameter sets as true, Elasticsearch client uses Oj as JSON encoder/decoder. (default: fqlse)
+ OreferOjSerializer bool `json:"prefer_oj_serializer,omitempty"`
+
+ // +docLink:"Buffer,./buffer.md"
+ Buffer *Buffer `json:"buffer,omitempty"`
+}
+
+func (e *ElasticsearchOutput) ToDirective(secretLoader secret.SecretLoader) (types.Directive, error) {
+ elasticsearch := &types.OutputPlugin{
+ PluginMeta: types.PluginMeta{
+ Type: "elasticsearch",
+ Directive: "match",
+ Tag: "**",
+ },
+ }
+ if params, err := types.NewStructToStringMapper(secretLoader).StringsMap(e); err != nil {
+ return nil, err
+ } else {
+ elasticsearch.Params = params
+ }
+ if e.Buffer != nil {
+ if buffer, err := e.Buffer.ToDirective(secretLoader); err != nil {
+ return nil, err
+ } else {
+ elasticsearch.SubDirectives = append(elasticsearch.SubDirectives, buffer)
+ }
+ }
+ return elasticsearch, nil
+}
diff --git a/pkg/model/output/file.go b/pkg/model/output/file.go
new file mode 100644
index 000000000..ece8a5db6
--- /dev/null
+++ b/pkg/model/output/file.go
@@ -0,0 +1,34 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package output
+
+import (
+ "github.com/banzaicloud/logging-operator/pkg/model/secret"
+ "github.com/banzaicloud/logging-operator/pkg/model/types"
+)
+
+// +kubebuilder:object:generate=true
+
+type FileOutputConfig struct {
+ Path string `json:"path"`
+}
+
+func (c *FileOutputConfig) ToDirective(secretLoader secret.SecretLoader) (types.Directive, error) {
+ return types.NewFlatDirective(types.PluginMeta{
+ Type: "file",
+ Directive: "match",
+ Tag: "**",
+ }, c, secretLoader)
+}
diff --git a/pkg/model/output/format.go b/pkg/model/output/format.go
new file mode 100644
index 000000000..f57fc24d9
--- /dev/null
+++ b/pkg/model/output/format.go
@@ -0,0 +1,26 @@
+package output
+
+import (
+ "github.com/banzaicloud/logging-operator/pkg/model/secret"
+ "github.com/banzaicloud/logging-operator/pkg/model/types"
+)
+
+// +kubebuilder:object:generate=true
+type Format struct {
+ // Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value (default: json)
+ // +kubebuilder:validation:Enum=out_file;json;ltsv;csv;msgpack;hash;single_value
+ Type string `json:"type,omitempty"`
+}
+
+func (f *Format) ToDirective(secretLoader secret.SecretLoader) (types.Directive, error) {
+ metadata := types.PluginMeta{
+ Directive: "format",
+ }
+ if f.Type != "" {
+ metadata.Type = f.Type
+ } else {
+ metadata.Type = "json"
+ }
+ f.Type = ""
+ return types.NewFlatDirective(metadata, f, secretLoader)
+}
diff --git a/pkg/model/output/forward.go b/pkg/model/output/forward.go
new file mode 100644
index 000000000..7d68ab2d1
--- /dev/null
+++ b/pkg/model/output/forward.go
@@ -0,0 +1,143 @@
+package output
+
+import (
+ "github.com/banzaicloud/logging-operator/pkg/model/common"
+ "github.com/banzaicloud/logging-operator/pkg/model/secret"
+ "github.com/banzaicloud/logging-operator/pkg/model/types"
+)
+
+// +kubebuilder:object:generate=true
+type ForwardOutput struct {
+ // Server definitions at least one is required
+ // +docLink:"Server,#Fluentd-Server"
+ FluentdServers []FluentdServer `json:"servers"`
+ // Change the protocol to at-least-once. The plugin waits the ack from destination's in_forward plugin.
+ RequireAckResponse bool `json:"require_ack_response,omitempty"`
+ // This option is used when require_ack_response is true. This default value is based on popular tcp_syn_retries. (default: 190)
+ AckResponseTimeout int `json:"ack_response_timeout,omitempty"`
+ // The timeout time when sending event logs. (default: 60)
+ SendTimeout int `json:"send_timeout,omitempty"`
+ // The timeout time for socket connect. When the connection timed out during establishment, Errno::ETIMEDOUT is raised.
+ ConnectTimeout int `json:"connect_timeout,omitempty"`
+ // The wait time before accepting a server fault recovery. (default: 10)
+ RecoverWait int `json:"recover_wait,omitempty"`
+ // The transport protocol to use for heartbeats. Set "none" to disable heartbeat. [transport, tcp, udp, none]
+ HeartbeatType string `json:"heartbeat_type,omitempty"`
+ // The interval of the heartbeat packer. (default: 1)
+ HeartbeatInterval int `json:"heartbeat_interval,omitempty"`
+ // Use the "Phi accrual failure detector" to detect server failure. (default: true)
+ PhiFailureDetector bool `json:"phi_failure_detector,omitempty"`
+ // The threshold parameter used to detect server faults. (default: 16)
+ //`phi_threshold` is deeply related to `heartbeat_interval`. If you are using longer `heartbeat_interval`, please use the larger `phi_threshold`. Otherwise you will see frequent detachments of destination servers. The default value 16 is tuned for `heartbeat_interval` 1s.
+ PhiThreshold int `json:"phi_threshold,omitempty"`
+ // The hard timeout used to detect server failure. The default value is equal to the send_timeout parameter. (default: 60)
+ HardTimeout int `json:"hard_timeout,omitempty"`
+ // Set TTL to expire DNS cache in seconds. Set 0 not to use DNS Cache. (defult: 0)
+ ExpireDnsCache int `json:"expire_dns_cache,omitempty"`
+ // Enable client-side DNS round robin. Uniform randomly pick an IP address to send data when a hostname has several IP addresses.
+ // `heartbeat_type udp` is not available with `dns_round_robin true`. Use `heartbeat_type tcp` or `heartbeat_type none`.
+ DnsRoundRobin bool `json:"dns_round_robin,omitempty"`
+ // Ignore DNS resolution and errors at startup time.
+ IgnoreNetworkErrorsAtStartup bool `json:"ignore_network_errors_at_startup,omitempty"`
+ // The default version of TLS transport. [TLSv1_1, TLSv1_2] (default: TLSv1_2)
+ TlsVersion string `json:"tls_version,omitempty"`
+ // The cipher configuration of TLS transport. (default: ALL:!aNULL:!eNULL:!SSLv2)
+ TlsCiphers string `json:"tls_ciphers,omitempty"`
+ // Skip all verification of certificates or not. (default: false)
+ TlsInsecureMode bool `json:"tls_insecure_mode,omitempty"`
+ // Allow self signed certificates or not. (default: false)
+ TlsAllowSelfSignedCert bool `json:"tls_allow_self_signed_cert,omitempty"`
+ // Verify hostname of servers and certificates or not in TLS transport. (default: true)
+ TlsVerifyHostname bool `json:"tls_verify_hostname,omitempty"`
+ // The additional CA certificate path for TLS.
+ TlsCertPath string `json:"tls_cert_path,omitempty"`
+ // The client certificate path for TLS
+ TlsClientCertPath string `json:"tls_client_cert_path,omitempty"`
+ // The client private key path for TLS.
+ TlsClientPrivateKeyPath string `json:"tls_client_private_key_path,omitempty"`
+ // The client private key passphrase for TLS.
+ TlsClientPrivateKeyPassphrase string `json:"tls_client_private_key_passphrase,omitempty"`
+ // The certificate thumbprint for searching from Windows system certstore This parameter is for Windows only.
+ TlsCertThumbprint string `json:"tls_cert_thumbprint,omitempty"`
+ // The certificate logical store name on Windows system certstore. This parameter is for Windows only.
+ TlsCertLogicalStoreName string `json:"tls_cert_logical_store_name,omitempty"`
+ // Enable to use certificate enterprise store on Windows system certstore. This parameter is for Windows only.
+ TlsCertUseEnterpriseStore bool `json:"tls_cert_use_enterprise_store,omitempty"`
+ // Enable keepalive connection. (default: false)
+ Keepalive bool `json:"keepalive,omitempty"`
+ // Expired time of keepalive. Default value is nil, which means to keep connection as long as possible. (default: 0)
+ KeepaliveTimeout int `json:"keepalive_timeout,omitempty"`
+ // +docLink:"Security,/docs/plugins/common/security.md"
+ Security *common.Security `json:"security,omitempty"`
+ // Verify that a connection can be made with one of out_forward nodes at the time of startup. (default: false)
+ VerifyConnectionAtStartup bool `json:"verify_connection_at_startup,omitempty"`
+ // +docLink:"Buffer,./buffer.md"
+ Buffer *Buffer `json:"buffer,omitempty"`
+}
+
+func (f *ForwardOutput) ToDirective(secretLoader secret.SecretLoader) (types.Directive, error) {
+ forward := &types.OutputPlugin{
+ PluginMeta: types.PluginMeta{
+ Type: "forward",
+ Directive: "match",
+ Tag: "**",
+ },
+ }
+ if params, err := types.NewStructToStringMapper(secretLoader).StringsMap(f); err != nil {
+ return nil, err
+ } else {
+ forward.Params = params
+ }
+ if f.Buffer != nil {
+ if buffer, err := f.Buffer.ToDirective(secretLoader); err != nil {
+ return nil, err
+ } else {
+ forward.SubDirectives = append(forward.SubDirectives, buffer)
+ }
+ }
+ if f.Security != nil {
+ if format, err := f.Security.ToDirective(secretLoader); err != nil {
+ return nil, err
+ } else {
+ forward.SubDirectives = append(forward.SubDirectives, format)
+ }
+ }
+ if len(f.FluentdServers) > 0 {
+ for _, server := range f.FluentdServers {
+ if serv, err := server.ToDirective(secretLoader); err != nil {
+ return nil, err
+ } else {
+ forward.SubDirectives = append(forward.SubDirectives, serv)
+ }
+ }
+ }
+ return forward, nil
+}
+
+// +kubebuilder:object:generate=true
+// +docName:"Fluentd Server"
+// server
+type FluentdServer struct {
+ // The IP address or host name of the server.
+ Host string `json:"host"`
+ // The name of the server. Used for logging and certificate verification in TLS transport (when host is address).
+ Name string `json:"name,omitempty"`
+ // The port number of the host. Note that both TCP packets (event stream) and UDP packets (heartbeat message) are sent to this port. (default: 24224)
+ Port int `json:"port,omitempty"`
+ // The shared key per server.
+ SharedKey *secret.Secret `json:"shared_key,omitempty"`
+ // The username for authentication.
+ Username *secret.Secret `json:"username,omitempty"`
+ // The password for authentication.
+ Password *secret.Secret `json:"password,omitempty"`
+ // Marks a node as the standby node for an Active-Standby model between Fluentd nodes. When an active node goes down, the standby node is promoted to an active node. The standby node is not used by the out_forward plugin until then.
+ Standby bool `json:"standby,omitempty"`
+ // The load balancing weight. If the weight of one server is 20 and the weight of the other server is 30, events are sent in a 2:3 ratio. (default: 60).
+ Weight int `json:"weight,omitempty"`
+}
+
+func (f *FluentdServer) ToDirective(secretLoader secret.SecretLoader) (types.Directive, error) {
+ return types.NewFlatDirective(types.PluginMeta{
+ Directive: "server",
+ }, f, secretLoader)
+}
diff --git a/pkg/model/output/gcs.go b/pkg/model/output/gcs.go
new file mode 100644
index 000000000..e0b0d7b92
--- /dev/null
+++ b/pkg/model/output/gcs.go
@@ -0,0 +1,104 @@
+package output
+
+import (
+ "github.com/banzaicloud/logging-operator/pkg/model/secret"
+ "github.com/banzaicloud/logging-operator/pkg/model/types"
+)
+
+// +kubebuilder:object:generate=true
+type GCSOutput struct {
+ // Project identifier for GCS
+ Project string `json:"project"`
+ // Path of GCS service account credentials JSON file
+ Keyfile string `json:"keyfile,omitempty"`
+ // GCS service account credentials in JSON format
+ // +docLink:"Secret,./secret.md"
+ CredentialsJson *secret.Secret `json:"credentials_json,omitempty"`
+ // Number of times to retry requests on server error
+ ClientRetries int `json:"client_retries,omitempty"`
+ // Default timeout to use in requests
+ ClientTimeout int `json:"client_timeout,omitempty"`
+ // Name of a GCS bucket
+ Bucket string `json:"bucket"`
+ // Format of GCS object keys (default: %{path}%{time_slice}_%{index}.%{file_extension})
+ ObjectKeyFormat string `json:"object_key_format,omitempty"`
+ // Path prefix of the files on GCS
+ Path string `json:"path,omitempty"`
+ // Archive format on GCS: gzip json text (default: gzip)
+ StoreAs string `json:"store_as,omitempty"`
+ // Enable the decompressive form of transcoding
+ Transcoding bool `json:"transcoding,omitempty"`
+ // Create GCS bucket if it does not exists (default: true)
+ AutoCreateBucket bool `json:"auto_create_bucket,omitempty"`
+ // Max length of `%{hex_random}` placeholder(4-16) (default: 4)
+ HexRandomLength int `json:"hex_random_length,omitempty"`
+ // Overwrite already existing path (default: false)
+ Overwrite bool `json:"overwrite,omitempty"`
+ // Permission for the object in GCS: auth_read owner_full owner_read private project_private public_read
+ // +kubebuilder:validation:enum=auth_read,owner_full,owner_read,private,project_private,public_read
+ Acl string `json:"acl,omitempty"`
+ // Storage class of the file: dra nearline coldline multi_regional regional standard
+ // +kubebuilder:validation:enum=dra,nearline,coldline,multi_regional,regional,standard
+ StorageClass string `json:"storage_class,omitempty"`
+ // Customer-supplied, AES-256 encryption key
+ EncryptionKey string `json:"encryption_key,omitempty"`
+ // User provided web-safe keys and arbitrary string values that will returned with requests for the file as "x-goog-meta-" response headers.
+ // +docLink:"Object Metadata,#ObjectMetadata"
+ ObjectMetadata []ObjectMetadata `json:"object_metadata,omitempty"`
+ // +docLink:"Format,./format.md"
+ Format *Format `json:"format,omitempty"`
+ // +docLink:"Buffer,./buffer.md"
+ Buffer *Buffer `json:"buffer,omitempty"`
+}
+
+func (g *GCSOutput) ToDirective(secretLoader secret.SecretLoader) (types.Directive, error) {
+ gcs := &types.OutputPlugin{
+ PluginMeta: types.PluginMeta{
+ Type: "gcs",
+ Directive: "match",
+ Tag: "**",
+ },
+ }
+ if params, err := types.NewStructToStringMapper(secretLoader).StringsMap(g); err != nil {
+ return nil, err
+ } else {
+ gcs.Params = params
+ }
+ if g.Buffer != nil {
+ if buffer, err := g.Buffer.ToDirective(secretLoader); err != nil {
+ return nil, err
+ } else {
+ gcs.SubDirectives = append(gcs.SubDirectives, buffer)
+ }
+ }
+ if g.Format != nil {
+ if format, err := g.Format.ToDirective(secretLoader); err != nil {
+ return nil, err
+ } else {
+ gcs.SubDirectives = append(gcs.SubDirectives, format)
+ }
+ }
+ if len(g.ObjectMetadata) > 0 {
+ for _, metadata := range g.ObjectMetadata {
+ if meta, err := metadata.ToDirective(secretLoader); err != nil {
+ return nil, err
+ } else {
+ gcs.SubDirectives = append(gcs.SubDirectives, meta)
+ }
+ }
+ }
+ return gcs, nil
+}
+
+type ObjectMetadata struct {
+ // Key
+ Key string `json:"key"`
+ // Value
+ Value string `json:"value"`
+}
+
+func (o *ObjectMetadata) ToDirective(secretLoader secret.SecretLoader) (types.Directive, error) {
+ return types.NewFlatDirective(types.PluginMeta{
+ Directive: "object_metadata",
+ }, o, secretLoader)
+}
diff --git a/pkg/model/output/loki.go b/pkg/model/output/loki.go
new file mode 100644
index 000000000..e07e286a4
--- /dev/null
+++ b/pkg/model/output/loki.go
@@ -0,0 +1,49 @@
+package output
+
+import (
+ "github.com/banzaicloud/logging-operator/pkg/model/secret"
+ "github.com/banzaicloud/logging-operator/pkg/model/types"
+)
+
+// +kubebuilder:object:generate=true
+// +docName:"Loki"
+// Fluentd output plugin to ship logs to a Loki server.
+type LokiOutput struct {
+ // The url of the Loki server to send logs to. (default:https://logs-us-west1.grafana.net)
+ Url string `json:"url,omitempty"`
+ // Specify a username if the Loki server requires authentication.
+ // +docLink:"Secret,./secret.md"
+ Username *secret.Secret `json:"username,omitempty"`
+ // Specify password if the Loki server requires authentication.
+ // +docLink:"Secret,./secret.md"
+ Password *secret.Secret `json:"password,omitempty"`
+ // Loki is a multi-tenant log storage platform and all requests sent must include a tenant.
+ Tenant string `json:"tenant,omitempty"`
+ // Set of labels to include with every Loki stream.(default: nil)
+ ExtraLabels bool `json:"extra_labels,omitempty"`
+ // +docLink:"Buffer,./buffer.md"
+ Buffer *Buffer `json:"buffer,omitempty"`
+}
+
+func (l *LokiOutput) ToDirective(secretLoader secret.SecretLoader) (types.Directive, error) {
+ loki := &types.OutputPlugin{
+ PluginMeta: types.PluginMeta{
+ Type: "kubernetes_loki",
+ Directive: "match",
+ Tag: "**",
+ },
+ }
+ if params, err := types.NewStructToStringMapper(secretLoader).StringsMap(l); err != nil {
+ return nil, err
+ } else {
+ loki.Params = params
+ }
+ if l.Buffer != nil {
+ if buffer, err := l.Buffer.ToDirective(secretLoader); err != nil {
+ return nil, err
+ } else {
+ loki.SubDirectives = append(loki.SubDirectives, buffer)
+ }
+ }
+ return loki, nil
+}
diff --git a/pkg/model/output/null.go b/pkg/model/output/null.go
new file mode 100644
index 000000000..2279f9a21
--- /dev/null
+++ b/pkg/model/output/null.go
@@ -0,0 +1,37 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package output
+
+import (
+ "github.com/banzaicloud/logging-operator/pkg/model/secret"
+ "github.com/banzaicloud/logging-operator/pkg/model/types"
+)
+
+// +kubebuilder:object:generate=true
+
+type NullOutputConfig struct {
+}
+
+func NewNullOutputConfig() *NullOutputConfig {
+ return &NullOutputConfig{}
+}
+
+func (c *NullOutputConfig) ToDirective(secretLoader secret.SecretLoader) (types.Directive, error) {
+ return types.NewFlatDirective(types.PluginMeta{
+ Type: "null",
+ Directive: "match",
+ Tag: "**",
+ }, c, secretLoader)
+}
diff --git a/pkg/model/output/oss.go b/pkg/model/output/oss.go
new file mode 100644
index 000000000..e9d8518a4
--- /dev/null
+++ b/pkg/model/output/oss.go
@@ -0,0 +1,85 @@
+package output
+
+import (
+ "github.com/banzaicloud/logging-operator/pkg/model/secret"
+ "github.com/banzaicloud/logging-operator/pkg/model/types"
+)
+
+// +kubebuilder:object:generate=true
+// +docName:"Aliyun OSS"
+type OSSOutput struct {
+ // OSS endpoint to connect to'
+ Endpoint string `json:"endpoint"`
+ // Your bucket name
+ Bucket string `json:"bucket"`
+ // Your access key id
+ // +docLink:"Secret,./secret.md"
+ AccessKeyId *secret.Secret `json:"access_key_id"`
+ // Your access secret key
+ // +docLink:"Secret,./secret.md"
+ AaccessKeySecret *secret.Secret `json:"aaccess_key_secret"`
+ // Path prefix of the files on OSS (default: fluent/logs)
+ Path string `json:"path,omitempty"`
+ // Upload crc enabled (default: true)
+ UploadCrcEnable bool `json:"upload_crc_enable,omitempty"`
+ // Download crc enabled (default: true)
+ DownloadCrcEnable bool `json:"download_crc_enable,omitempty"`
+ // Timeout for open connections (default: 10)
+ OpenTimeout int `json:"open_timeout,omitempty"`
+ // Timeout for read response (default: 120)
+ ReadTimeout int `json:"read_timeout,omitempty"`
+ // OSS SDK log directory (default: /var/log/td-agent)
+ OssSdkLogDir string `json:"oss_sdk_log_dir,omitempty"`
+ // The format of OSS object keys (default: %{path}/%{time_slice}_%{index}_%{thread_id}.%{file_extension})
+ KeyFormat string `json:"key_format,omitempty"`
+ // Archive format on OSS: gzip, json, text, lzo, lzma2 (default: gzip)
+ StoreAs string `json:"store_as,omitempty"`
+ // desc 'Create OSS bucket if it does not exists (default: false)
+ AutoCreateBucket bool `json:"auto_create_bucket,omitempty"`
+ // Overwrite already existing path (default: false)
+ Overwrite bool `json:"overwrite,omitempty"`
+ // Check bucket if exists or not (default: true)
+ CheckBucket bool `json:"check_bucket,omitempty"`
+ // Check object before creation (default: true)
+ CheckObject bool `json:"check_object,omitempty"`
+ // The length of `%{hex_random}` placeholder(4-16) (default: 4)
+ HexRandomLength int `json:"hex_random_length,omitempty"`
+ // `sprintf` format for `%{index}` (default: %d)
+ IndexFormat string `json:"index_format,omitempty"`
+ // Given a threshold to treat events as delay, output warning logs if delayed events were put into OSS
+ WarnForDelay string `json:"warn_for_delay,omitempty"`
+ // +docLink:"Format,./format.md"
+ Format *Format `json:"format,omitempty"`
+ // +docLink:"Buffer,./buffer.md"
+ Buffer *Buffer `json:"buffer,omitempty"`
+}
+
+func (o *OSSOutput) ToDirective(secretLoader secret.SecretLoader) (types.Directive, error) {
+ oss := &types.OutputPlugin{
+ PluginMeta: types.PluginMeta{
+ Type: "gcs",
+ Directive: "match",
+ Tag: "**",
+ },
+ }
+ if params, err := types.NewStructToStringMapper(secretLoader).StringsMap(o); err != nil {
+ return nil, err
+ } else {
+ oss.Params = params
+ }
+ if o.Buffer != nil {
+ if buffer, err := o.Buffer.ToDirective(secretLoader); err != nil {
+ return nil, err
+ } else {
+ oss.SubDirectives = append(oss.SubDirectives, buffer)
+ }
+ }
+ if o.Format != nil {
+ if format, err := o.Format.ToDirective(secretLoader); err != nil {
+ return nil, err
+ } else {
+ oss.SubDirectives = append(oss.SubDirectives, format)
+ }
+ }
+ return oss, nil
+}
diff --git a/pkg/model/output/s3.go b/pkg/model/output/s3.go
new file mode 100644
index 000000000..03c1dab06
--- /dev/null
+++ b/pkg/model/output/s3.go
@@ -0,0 +1,233 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package output
+
+import (
+ "errors"
+
+ "github.com/banzaicloud/logging-operator/pkg/model/secret"
+ "github.com/banzaicloud/logging-operator/pkg/model/types"
+)
+
+// +docName:"Amazon S3 plugin for Fluentd"
+// **s3** output plugin buffers event logs in local file and upload it to S3 periodically. This plugin splits files exactly by using the time of event logs (not the time when the logs are received). For example, a log '2011-01-02 message B' is reached, and then another log '2011-01-03 message B' is reached in this order, the former one is stored in "20110102.gz" file, and latter one in "20110103.gz" file.
+type _doc interface{}
+
+// +kubebuilder:object:generate=true
+// +docName:"Output Config"
+type S3OutputConfig struct {
+ // AWS access key id
+ // +docLink:"Secret,./secret.md"
+ AwsAccessKey *secret.Secret `json:"aws_key_id,omitempty"`
+ // AWS secret key.
+ // +docLink:"Secret,./secret.md"
+ AwsSecretKey *secret.Secret `json:"aws_sec_key,omitempty"`
+ // Check AWS key on start
+ CheckApikeyOnStart string `json:"check_apikey_on_start,omitempty"`
+ // Allows grantee to read the object data and its metadata
+ GrantRead string `json:"grant_read,omitempty"`
+ // Overwrite already existing path
+ Overwrite string `json:"overwrite,omitempty"`
+ // Path prefix of the files on S3
+ Path string `json:"path,omitempty"`
+ // Allows grantee to write the ACL for the applicable object
+ GrantWriteAcp string `json:"grant_write_acp,omitempty"`
+ // Check bucket if exists or not
+ CheckBucket string `json:"check_bucket,omitempty"`
+ // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data
+ SseCustomerKey string `json:"sse_customer_key,omitempty" default:"10m"`
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321
+ SseCustomerKeyMd5 string `json:"sse_customer_key_md5,omitempty"`
+ // AWS SDK uses MD5 for API request/response by default
+ ComputeChecksums string `json:"compute_checksums,omitempty"`
+ // Given a threshold to treat events as delay, output warning logs if delayed events were put into s3
+ WarnForDelay string `json:"warn_for_delay,omitempty"`
+ // Use aws-sdk-ruby bundled cert
+ UseBundledCert string `json:"use_bundled_cert,omitempty"`
+ // Custom S3 endpoint (like minio)
+ S3Endpoint string `json:"s3_endpoint,omitempty"`
+ // Specifies the AWS KMS key ID to use for object encryption
+ SsekmsKeyId string `json:"ssekms_key_id,omitempty"`
+ // Arbitrary S3 metadata headers to set for the object
+ S3Metadata string `json:"s3_metadata,omitempty"`
+ // If true, the bucket name is always left in the request URI and never moved to the host as a sub-domain
+ ForcePathStyle string `json:"force_path_style,omitempty"`
+ // Create S3 bucket if it does not exists
+ AutoCreateBucket string `json:"auto_create_bucket,omitempty"`
+ // `sprintf` format for `%{index}`
+ IndexFormat string `json:"index_format,omitempty"`
+ // Signature version for API Request (s3,v4)
+ SignatureVersion string `json:"signature_version,omitempty"`
+ // If true, S3 Transfer Acceleration will be enabled for uploads. IMPORTANT: You must first enable this feature on your destination S3 bucket
+ EnableTransferAcceleration string `json:"enable_transfer_acceleration,omitempty"`
+ // If false, the certificate of endpoint will not be verified
+ SslVerifyPeer string `json:"ssl_verify_peer,omitempty"`
+ // URI of proxy environment
+ ProxyUri string `json:"proxy_uri,omitempty"`
+ // Allows grantee to read the object ACL
+ GrantReadAcp string `json:"grant_read_acp,omitempty"`
+ // Check object before creation
+ CheckObject string `json:"check_object,omitempty"`
+ // Specifies the algorithm to use to when encrypting the object
+ SseCustomerAlgorithm string `json:"sse_customer_algorithm,omitempty"`
+ // The Server-side encryption algorithm used when storing this object in S3 (AES256, aws:kms)
+ UseServerSideEncryption string `json:"use_server_side_encryption,omitempty"`
+ // S3 region name
+ S3Region string `json:"s3_region,omitempty"`
+ // Permission for the object in S3
+ Acl string `json:"acl,omitempty"`
+ // Allows grantee READ, READ_ACP, and WRITE_ACP permissions on the object
+ GrantFullControl string `json:"grant_full_control,omitempty"`
+ // The length of `%{hex_random}` placeholder(4-16)
+ HexRandomLength string `json:"hex_random_length,omitempty"`
+ // The format of S3 object keys (default: %{path}%{time_slice}_%{index}.%{file_extension})
+ S3ObjectKeyFormat string `json:"s3_object_key_format,omitempty"`
+ // S3 bucket name
+ S3Bucket string `json:"s3_bucket"`
+ // Archive format on S3
+ StoreAs string `json:"store_as,omitempty"`
+ // The type of storage to use for the object(STANDARD,REDUCED_REDUNDANCY,STANDARD_IA)
+ StorageClass string `json:"storage_class,omitempty"`
+ // The number of attempts to load instance profile credentials from the EC2 metadata service using IAM role
+ AwsIamRetries string `json:"aws_iam_retries,omitempty"`
+ // +docLink:"Buffer,./buffer.md"
+ Buffer *Buffer `json:"buffer,omitempty"`
+ // +docLink:"Format,./format.md"
+ Format *Format `json:"format,omitempty"`
+ // +docLink:"Assume Role Credentials,#Assume-Role-Credentials"
+ AssumeRoleCredentials *S3AssumeRoleCredentials `json:"assume_role_credentials,omitempty"`
+ // +docLink:"Instance Profile Credentials,#Instance-Profile-Credentials"
+ InstanceProfileCredentials *S3InstanceProfileCredentials `json:"instance_profile_credentials,omitempty"`
+ // +docLink:"Shared Credentials,#Shared-Credentials"
+ SharedCredentials *S3SharedCredentials `json:"shared_credentials,omitempty"`
+}
+
+// +kubebuilder:object:generate=true
+// +docName:"Assume Role Credentials"
+// assume_role_credentials
+type S3AssumeRoleCredentials struct {
+ // The Amazon Resource Name (ARN) of the role to assume
+ RoleArn string `json:"role_arn"`
+ // An identifier for the assumed role session
+ RoleSessionName string `json:"role_session_name"`
+ // An IAM policy in JSON format
+ Policy string `json:"policy,omitempty"`
+ // The duration, in seconds, of the role session (900-3600)
+ DurationSeconds string `json:"duration_seconds,omitempty"`
+ // A unique identifier that is used by third parties when assuming roles in their customers' accounts.
+ ExternalId string `json:"external_id,omitempty"`
+}
+
+// +kubebuilder:object:generate=true
+// +docName:"Instance Profile Credentials"
+// instance_profile_credentials
+type S3InstanceProfileCredentials struct {
+ // IP address (default:169.254.169.254)
+ IpAddress string `json:"ip_address,omitempty"`
+ // Port number (default:80)
+ Port string `json:"port,omitempty"`
+ // Number of seconds to wait for the connection to open
+ HttpOpenTimeout string `json:"http_open_timeout,omitempty"`
+ // Number of seconds to wait for one block to be read
+ HttpReadTimeout string `json:"http_read_timeout,omitempty"`
+ // Number of times to retry when retrieving credentials
+ Retries string `json:"retries,omitempty"`
+}
+
+// +kubebuilder:object:generate=true
+// +docName:"Shared Credentials"
+// shared_credentials
+type S3SharedCredentials struct {
+ // Profile name. Default to 'default' or ENV['AWS_PROFILE']
+ ProfileName string `json:"profile_name,omitempty"`
+ // Path to the shared file. (default: $HOME/.aws/credentials)
+ Path string `json:"path,omitempty"`
+}
+
+func (c *S3OutputConfig) ToDirective(secretLoader secret.SecretLoader) (types.Directive, error) {
+ s3 := &types.OutputPlugin{
+ PluginMeta: types.PluginMeta{
+ Type: "s3",
+ Directive: "match",
+ Tag: "**",
+ },
+ }
+ if params, err := types.NewStructToStringMapper(secretLoader).StringsMap(c); err != nil {
+ return nil, err
+ } else {
+ s3.Params = params
+ }
+ if c.Buffer != nil {
+ if buffer, err := c.Buffer.ToDirective(secretLoader); err != nil {
+ return nil, err
+ } else {
+ s3.SubDirectives = append(s3.SubDirectives, buffer)
+ }
+ }
+ if c.Format != nil {
+ if format, err := c.Format.ToDirective(secretLoader); err != nil {
+ return nil, err
+ } else {
+ s3.SubDirectives = append(s3.SubDirectives, format)
+ }
+ }
+ if err := c.validateAndSetCredentials(s3, secretLoader); err != nil {
+ return nil, err
+ }
+ return s3, nil
+}
+
+func (c *S3OutputConfig) validateAndSetCredentials(s3 *types.OutputPlugin, secretLoader secret.SecretLoader) error {
+ if c.AssumeRoleCredentials != nil {
+ if directive, err := types.NewFlatDirective(types.PluginMeta{Directive: "assume_role_credentials"},
+ c.AssumeRoleCredentials, secretLoader); err != nil {
+ return err
+ } else {
+ s3.SubDirectives = append(s3.SubDirectives, directive)
+ }
+ }
+ if c.InstanceProfileCredentials != nil {
+ if c.AssumeRoleCredentials != nil {
+ return errors.New("assume_role_credentials and instance_profile_credentials cannot be set simultaneously")
+ }
+ if directive, err := types.NewFlatDirective(types.PluginMeta{Directive: "instance_profile_credentials"},
+ c.InstanceProfileCredentials, secretLoader); err != nil {
+ return err
+ } else {
+ s3.SubDirectives = append(s3.SubDirectives, directive)
+ }
+ }
+ if c.SharedCredentials != nil {
+ if c.AssumeRoleCredentials != nil {
+ return errors.New("assume_role_credentials and shared_credentials cannot be set simultaneously")
+ }
+ if c.InstanceProfileCredentials != nil {
+ return errors.New("instance_profile_credentials and shared_credentials cannot be set simultaneously")
+ }
+ if directive, err := types.NewFlatDirective(types.PluginMeta{Directive: "shared_credentials"},
+ c.SharedCredentials, secretLoader); err != nil {
+ return err
+ } else {
+ s3.SubDirectives = append(s3.SubDirectives, directive)
+ }
+ }
+ if c.AssumeRoleCredentials == nil &&
+ c.InstanceProfileCredentials == nil &&
+ c.SharedCredentials == nil &&
+ (c.AwsAccessKey == nil || c.AwsSecretKey == nil) {
+ return errors.New("One of AssumeRoleCredentials or SharedCredentials or InstanceProfileCredentials must be configured")
+ }
+ return nil
+}
diff --git a/pkg/model/output/sumologic.go b/pkg/model/output/sumologic.go
new file mode 100644
index 000000000..9afd89bd5
--- /dev/null
+++ b/pkg/model/output/sumologic.go
@@ -0,0 +1,48 @@
+package output
+
+import (
+ "github.com/banzaicloud/logging-operator/pkg/model/secret"
+ "github.com/banzaicloud/logging-operator/pkg/model/types"
+)
+
+// +kubebuilder:object:generate=true
+type SumologicOutput struct {
+ // The type of data that will be sent to Sumo Logic, either logs or metrics (default: logs)
+ DataType string `json:"data_type,omitempty"`
+ // SumoLogic HTTP Collector URL
+ Endpoint *secret.Secret `json:"endpoint"`
+ // Verify ssl certificate. (default: true)
+ VerifySsl bool `json:"verify_ssl,omitempty"`
+ // The format of metrics you will be sending, either graphite or carbon2 or prometheus (default: graphite)
+ MetricDataFormat string `json:"metric_data_format,omitempty"`
+ // Format to post logs into Sumo. (default: json)
+ LogFormat string `json:"log_format,omitempty"`
+ // Used to specify the key when merging json or sending logs in text format (default: message)
+ LogKey string `json:"log_key,omitempty"`
+ // Set _sourceCategory metadata field within SumoLogic (default: nil)
+ SourceCategory string `json:"source_category,omitempty"`
+ // Set _sourceName metadata field within SumoLogic - overrides source_name_key (default is nil)
+ SourceName string `json:"source_name"`
+ // Set as source::path_key's value so that the source_name can be extracted from Fluentd's buffer (default: source_name)
+ SourceNameKey string `json:"source_name_key,omitempty"`
+ // Set _sourceHost metadata field within SumoLogic (default: nil)
+ SourceHost string `json:"source_host,omitempty"`
+ // Set timeout seconds to wait until connection is opened. (default: 60)
+ OpenTimeout int `json:"open_timeout,omitempty"`
+ // Add timestamp (or timestamp_key) field to logs before sending to sumologic (default: true)
+ AddTimestamp bool `json:"add_timestamp,omitempty"`
+ // Field name when add_timestamp is on (default: timestamp)
+ TimestampKey string `json:"timestamp_key,omitempty"`
+ // Add the uri of the proxy environment if present.
+ ProxyUri string `json:"proxy_uri,omitempty"`
+ // Option to disable cookies on the HTTP Client. (default: false)
+ DisableCookies bool `json:"disable_cookies,omitempty"`
+}
+
+func (s *SumologicOutput) ToDirective(secretLoader secret.SecretLoader) (types.Directive, error) {
+ return types.NewFlatDirective(types.PluginMeta{
+ Type: "sumologic",
+ Directive: "match",
+ Tag: "**",
+ }, s, secretLoader)
+}
diff --git a/pkg/model/output/zz_generated.deepcopy.go b/pkg/model/output/zz_generated.deepcopy.go
new file mode 100644
index 000000000..3a30e18d6
--- /dev/null
+++ b/pkg/model/output/zz_generated.deepcopy.go
@@ -0,0 +1,416 @@
+// +build !ignore_autogenerated
+
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by controller-gen. DO NOT EDIT.
+
+package output
+
+import (
+ "github.com/banzaicloud/logging-operator/pkg/model/common"
+ "github.com/banzaicloud/logging-operator/pkg/model/secret"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AzureStorage) DeepCopyInto(out *AzureStorage) {
+ *out = *in
+ if in.AzureStorageAccount != nil {
+ in, out := &in.AzureStorageAccount, &out.AzureStorageAccount
+ *out = new(secret.Secret)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.AzureStorageAccessKey != nil {
+ in, out := &in.AzureStorageAccessKey, &out.AzureStorageAccessKey
+ *out = new(secret.Secret)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Buffer != nil {
+ in, out := &in.Buffer, &out.Buffer
+ *out = new(Buffer)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureStorage.
+func (in *AzureStorage) DeepCopy() *AzureStorage {
+ if in == nil {
+ return nil
+ }
+ out := new(AzureStorage)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Buffer) DeepCopyInto(out *Buffer) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Buffer.
+func (in *Buffer) DeepCopy() *Buffer {
+ if in == nil {
+ return nil
+ }
+ out := new(Buffer)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ElasticsearchOutput) DeepCopyInto(out *ElasticsearchOutput) {
+ *out = *in
+ if in.Password != nil {
+ in, out := &in.Password, &out.Password
+ *out = new(secret.Secret)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Buffer != nil {
+ in, out := &in.Buffer, &out.Buffer
+ *out = new(Buffer)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchOutput.
+func (in *ElasticsearchOutput) DeepCopy() *ElasticsearchOutput {
+ if in == nil {
+ return nil
+ }
+ out := new(ElasticsearchOutput)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FileOutputConfig) DeepCopyInto(out *FileOutputConfig) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileOutputConfig.
+func (in *FileOutputConfig) DeepCopy() *FileOutputConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(FileOutputConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FluentdServer) DeepCopyInto(out *FluentdServer) {
+ *out = *in
+ if in.SharedKey != nil {
+ in, out := &in.SharedKey, &out.SharedKey
+ *out = new(secret.Secret)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Username != nil {
+ in, out := &in.Username, &out.Username
+ *out = new(secret.Secret)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Password != nil {
+ in, out := &in.Password, &out.Password
+ *out = new(secret.Secret)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentdServer.
+func (in *FluentdServer) DeepCopy() *FluentdServer {
+ if in == nil {
+ return nil
+ }
+ out := new(FluentdServer)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Format) DeepCopyInto(out *Format) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Format.
+func (in *Format) DeepCopy() *Format {
+ if in == nil {
+ return nil
+ }
+ out := new(Format)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ForwardOutput) DeepCopyInto(out *ForwardOutput) {
+ *out = *in
+ if in.FluentdServers != nil {
+ in, out := &in.FluentdServers, &out.FluentdServers
+ *out = make([]FluentdServer, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Security != nil {
+ in, out := &in.Security, &out.Security
+ *out = new(common.Security)
+ **out = **in
+ }
+ if in.Buffer != nil {
+ in, out := &in.Buffer, &out.Buffer
+ *out = new(Buffer)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForwardOutput.
+func (in *ForwardOutput) DeepCopy() *ForwardOutput {
+ if in == nil {
+ return nil
+ }
+ out := new(ForwardOutput)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GCSOutput) DeepCopyInto(out *GCSOutput) {
+ *out = *in
+ if in.CredentialsJson != nil {
+ in, out := &in.CredentialsJson, &out.CredentialsJson
+ *out = new(secret.Secret)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ObjectMetadata != nil {
+ in, out := &in.ObjectMetadata, &out.ObjectMetadata
+ *out = make([]ObjectMetadata, len(*in))
+ copy(*out, *in)
+ }
+ if in.Format != nil {
+ in, out := &in.Format, &out.Format
+ *out = new(Format)
+ **out = **in
+ }
+ if in.Buffer != nil {
+ in, out := &in.Buffer, &out.Buffer
+ *out = new(Buffer)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCSOutput.
+func (in *GCSOutput) DeepCopy() *GCSOutput {
+ if in == nil {
+ return nil
+ }
+ out := new(GCSOutput)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LokiOutput) DeepCopyInto(out *LokiOutput) {
+ *out = *in
+ if in.Username != nil {
+ in, out := &in.Username, &out.Username
+ *out = new(secret.Secret)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Password != nil {
+ in, out := &in.Password, &out.Password
+ *out = new(secret.Secret)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Buffer != nil {
+ in, out := &in.Buffer, &out.Buffer
+ *out = new(Buffer)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LokiOutput.
+func (in *LokiOutput) DeepCopy() *LokiOutput {
+ if in == nil {
+ return nil
+ }
+ out := new(LokiOutput)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NullOutputConfig) DeepCopyInto(out *NullOutputConfig) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NullOutputConfig.
+func (in *NullOutputConfig) DeepCopy() *NullOutputConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(NullOutputConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OSSOutput) DeepCopyInto(out *OSSOutput) {
+ *out = *in
+ if in.AccessKeyId != nil {
+ in, out := &in.AccessKeyId, &out.AccessKeyId
+ *out = new(secret.Secret)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.AaccessKeySecret != nil {
+ in, out := &in.AaccessKeySecret, &out.AaccessKeySecret
+ *out = new(secret.Secret)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Format != nil {
+ in, out := &in.Format, &out.Format
+ *out = new(Format)
+ **out = **in
+ }
+ if in.Buffer != nil {
+ in, out := &in.Buffer, &out.Buffer
+ *out = new(Buffer)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSSOutput.
+func (in *OSSOutput) DeepCopy() *OSSOutput {
+ if in == nil {
+ return nil
+ }
+ out := new(OSSOutput)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *S3AssumeRoleCredentials) DeepCopyInto(out *S3AssumeRoleCredentials) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3AssumeRoleCredentials.
+func (in *S3AssumeRoleCredentials) DeepCopy() *S3AssumeRoleCredentials {
+ if in == nil {
+ return nil
+ }
+ out := new(S3AssumeRoleCredentials)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *S3InstanceProfileCredentials) DeepCopyInto(out *S3InstanceProfileCredentials) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3InstanceProfileCredentials.
+func (in *S3InstanceProfileCredentials) DeepCopy() *S3InstanceProfileCredentials {
+ if in == nil {
+ return nil
+ }
+ out := new(S3InstanceProfileCredentials)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *S3OutputConfig) DeepCopyInto(out *S3OutputConfig) {
+ *out = *in
+ if in.AwsAccessKey != nil {
+ in, out := &in.AwsAccessKey, &out.AwsAccessKey
+ *out = new(secret.Secret)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.AwsSecretKey != nil {
+ in, out := &in.AwsSecretKey, &out.AwsSecretKey
+ *out = new(secret.Secret)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Buffer != nil {
+ in, out := &in.Buffer, &out.Buffer
+ *out = new(Buffer)
+ **out = **in
+ }
+ if in.Format != nil {
+ in, out := &in.Format, &out.Format
+ *out = new(Format)
+ **out = **in
+ }
+ if in.AssumeRoleCredentials != nil {
+ in, out := &in.AssumeRoleCredentials, &out.AssumeRoleCredentials
+ *out = new(S3AssumeRoleCredentials)
+ **out = **in
+ }
+ if in.InstanceProfileCredentials != nil {
+ in, out := &in.InstanceProfileCredentials, &out.InstanceProfileCredentials
+ *out = new(S3InstanceProfileCredentials)
+ **out = **in
+ }
+ if in.SharedCredentials != nil {
+ in, out := &in.SharedCredentials, &out.SharedCredentials
+ *out = new(S3SharedCredentials)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3OutputConfig.
+func (in *S3OutputConfig) DeepCopy() *S3OutputConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(S3OutputConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *S3SharedCredentials) DeepCopyInto(out *S3SharedCredentials) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3SharedCredentials.
+func (in *S3SharedCredentials) DeepCopy() *S3SharedCredentials {
+ if in == nil {
+ return nil
+ }
+ out := new(S3SharedCredentials)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SumologicOutput) DeepCopyInto(out *SumologicOutput) {
+ *out = *in
+ if in.Endpoint != nil {
+ in, out := &in.Endpoint, &out.Endpoint
+ *out = new(secret.Secret)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SumologicOutput.
+func (in *SumologicOutput) DeepCopy() *SumologicOutput {
+ if in == nil {
+ return nil
+ }
+ out := new(SumologicOutput)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/pkg/model/render/fluent.go b/pkg/model/render/fluent.go
new file mode 100644
index 000000000..8be294809
--- /dev/null
+++ b/pkg/model/render/fluent.go
@@ -0,0 +1,89 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package render
+
+import (
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/banzaicloud/logging-operator/pkg/model/types"
+ "github.com/banzaicloud/logging-operator/pkg/util"
+ "github.com/goph/emperror"
+)
+
+type FluentRender struct {
+ Out io.Writer
+ Indent int
+}
+
+func (f *FluentRender) Render(config types.FluentConfig) error {
+ return f.RenderDirectives(config.GetDirectives(), 0)
+}
+
+func (f *FluentRender) RenderDirectives(directives []types.Directive, indent int) error {
+ for _, d := range directives {
+ meta := d.GetPluginMeta()
+ if meta.Directive == "" {
+ return fmt.Errorf("Directive must have a name %s", meta)
+ }
+ f.indented(indent, "<%s%s>", meta.Directive, tag(meta.Tag))
+ if meta.Type != "" {
+ f.indented(indent+f.Indent, "@type %s", meta.Type)
+ }
+ if meta.Id != "" {
+ f.indented(indent+f.Indent, "@id %s", meta.Id)
+ }
+ if meta.Label != "" {
+ f.indented(indent+f.Indent, "@label %s", meta.Label)
+ }
+ if meta.LogLevel != "" {
+ f.indented(indent+f.Indent, "@log_level %s", meta.LogLevel)
+ }
+ if len(d.GetParams()) > 0 {
+ for _, k := range util.OrderedStringMap(d.GetParams()).Keys() {
+ f.indented(indent+f.Indent, "%s %s", k, d.GetParams()[k])
+ }
+ }
+ if len(d.GetSections()) > 0 {
+ err := f.RenderDirectives(d.GetSections(), indent+f.Indent)
+ if err != nil {
+ return emperror.Wrapf(err, "failed to render sections for %s", d.GetPluginMeta().Directive)
+ }
+ }
+ f.indented(indent, "%s>", meta.Directive)
+ }
+ return nil
+}
+
+func (f *FluentRender) indented(indent int, format string, values ...interface{}) {
+ indentString := strings.Repeat(" ", indent)
+ in := fmt.Sprintf(format, values...)
+ for _, line := range strings.Split(in, "\n") {
+ if line != "" {
+ fmt.Fprint(f.Out, indentString+line+"\n")
+ } else {
+ fmt.Fprintln(f.Out, "")
+ }
+ }
+
+}
+
+func tag(tag string) string {
+ if tag != "" {
+ return " " + tag
+ }
+ return tag
+}
diff --git a/pkg/model/render/fluent_test.go b/pkg/model/render/fluent_test.go
new file mode 100644
index 000000000..4483e4766
--- /dev/null
+++ b/pkg/model/render/fluent_test.go
@@ -0,0 +1,571 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package render_test
+
+import (
+ "bytes"
+ "fmt"
+ "testing"
+
+ "github.com/MakeNowJust/heredoc"
+ "github.com/andreyvit/diff"
+ "github.com/banzaicloud/logging-operator/pkg/model/filter"
+ "github.com/banzaicloud/logging-operator/pkg/model/input"
+ "github.com/banzaicloud/logging-operator/pkg/model/output"
+ "github.com/banzaicloud/logging-operator/pkg/model/render"
+ "github.com/banzaicloud/logging-operator/pkg/model/secret"
+ "github.com/banzaicloud/logging-operator/pkg/model/types"
+ "github.com/banzaicloud/logging-operator/pkg/plugins"
+)
+
+func TestRenderDirective(t *testing.T) {
+
+ var tests = []struct {
+ name string
+ directive types.Directive
+ expected string
+ reproduce int
+ }{
+ {
+ name: "single level just name",
+ directive: &types.GenericDirective{
+ PluginMeta: types.PluginMeta{
+ Directive: "match",
+ },
+ },
+ expected: heredoc.Doc(`
+
+ `,
+ ),
+ },
+ {
+ name: "single level with tag and attributes",
+ directive: &types.GenericDirective{
+ PluginMeta: types.PluginMeta{
+ Directive: "match",
+ Tag: "tag",
+ },
+ Params: map[string]string{
+ "path": "file",
+ },
+ },
+ expected: heredoc.Doc(`
+
+ path file
+ `,
+ ),
+ },
+ {
+ name: "single level with just tag",
+ directive: &types.GenericDirective{
+ PluginMeta: types.PluginMeta{
+ Directive: "match",
+ Tag: "tag",
+ },
+ },
+ expected: heredoc.Doc(`
+
+ `,
+ ),
+ },
+ {
+ name: "single level with just attributes",
+ directive: &types.GenericDirective{
+ PluginMeta: types.PluginMeta{
+ Directive: "match",
+ },
+ Params: map[string]string{
+ "path": "file",
+ },
+ },
+ expected: heredoc.Doc(`
+
+ path file
+ `,
+ ),
+ },
+ {
+ name: "two levels",
+ directive: &types.GenericDirective{
+ PluginMeta: types.PluginMeta{
+ Directive: "match",
+ },
+ Params: map[string]string{
+ "path": "file",
+ },
+ SubDirectives: []types.Directive{
+ &types.GenericDirective{
+ PluginMeta: types.PluginMeta{
+ Directive: "router1",
+ },
+ Params: map[string]string{
+ "namespace": "asd",
+ "labels": "{\"a\":\"b\"}",
+ },
+ },
+ &types.GenericDirective{
+ PluginMeta: types.PluginMeta{
+ Directive: "router2",
+ },
+ Params: map[string]string{
+ "namespace": "asd2",
+ },
+ },
+ },
+ },
+ expected: heredoc.Doc(`
+
+ path file
+
+ labels {"a":"b"}
+ namespace asd
+
+
+ namespace asd2
+
+ `,
+ ),
+ },
+ {
+ name: "tail input",
+ directive: toDirective(t, input.NewTailInputConfig("/path/to/input")),
+ expected: heredoc.Doc(`
+
+ @type tail
+ path /path/to/input
+ `,
+ ),
+ },
+ {
+ name: "stdout filter",
+ directive: toDirective(t, filter.NewStdOutFilterConfig()),
+ expected: heredoc.Doc(`
+
+ @type stdout
+ `,
+ ),
+ },
+ {
+ name: "stdout filter",
+ directive: toDirective(t, output.NewNullOutputConfig()),
+ expected: heredoc.Doc(`
+
+ @type null
+ `,
+ ),
+ },
+ {
+ name: "empty flow",
+ directive: newFlowOrPanic("", nil),
+ expected: heredoc.Doc(`
+
+ `,
+ ),
+ },
+ {
+ name: "namespace flow",
+ directive: newFlowOrPanic("test", nil),
+ expected: heredoc.Doc(`
+
+ `,
+ ),
+ },
+ {
+ name: "namespace and labels flow",
+ directive: newFlowOrPanic("test", map[string]string{
+ "key": "value",
+ "a": "b",
+ }),
+ expected: heredoc.Doc(`
+
+ `,
+ ),
+ // run multiple times to make sure the label is stable
+ reproduce: 10,
+ },
+ {
+ name: "global router",
+ directive: types.NewRouter().
+ AddRoute(
+ newFlowOrPanic("", nil),
+ ),
+ expected: heredoc.Doc(`
+
+ @type label_router
+
+ @label @d41d8cd98f00b204e9800998ecf8427e
+
+ `,
+ ),
+ },
+ {
+ name: "namespaced router",
+ directive: types.NewRouter().
+ AddRoute(
+ newFlowOrPanic("test", nil),
+ ),
+ expected: heredoc.Doc(`
+
+ @type label_router
+
+ @label @098f6bcd4621d373cade4e832627b4f6
+ namespace test
+
+ `,
+ ),
+ },
+ {
+ name: "namespaced router with labels",
+ directive: types.NewRouter().
+ AddRoute(
+ newFlowOrPanic("test", map[string]string{"a": "b", "c": "d"}),
+ ),
+ expected: heredoc.Doc(`
+
+ @type label_router
+
+ @label @092f5fa58e4f619d739f5b65f2ed38bc
+ labels a:b,c:d
+ namespace test
+
+ `,
+ ),
+ // run multiple times to make sure the label is stable
+ reproduce: 10,
+ },
+ }
+ for _, test := range tests {
+ for i := 0; i <= test.reproduce; i++ {
+ b := bytes.Buffer{}
+ renderer := render.FluentRender{
+ Out: &b,
+ Indent: 2,
+ }
+ _ = renderer.RenderDirectives([]types.Directive{
+ test.directive,
+ }, 0)
+ if a, e := diff.TrimLinesInString(b.String()), diff.TrimLinesInString(test.expected); a != e {
+ t.Errorf("[%s] Result does not match (-actual vs +expected):\n%v", test.name, diff.LineDiff(a, e))
+ }
+ }
+
+ }
+}
+
+func TestMultipleOutput(t *testing.T) {
+ system := types.NewSystem(toDirective(t, input.NewTailInputConfig("input.log")), types.NewRouter())
+
+ flowObj, err := types.NewFlow(
+ "ns-test",
+ map[string]string{
+ "key1": "val1",
+ "key2": "val2",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ flowObj.
+ WithFilters(toDirective(t, filter.NewStdOutFilterConfig())).
+ WithOutputs(toDirective(t, output.NewNullOutputConfig())).
+ WithOutputs(toDirective(t, output.NewNullOutputConfig()))
+
+ err = system.RegisterFlow(flowObj)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ fluentConfig, err := system.Build()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ b := &bytes.Buffer{}
+ renderer := render.FluentRender{
+ Out: b,
+ Indent: 2,
+ }
+ err = renderer.Render(fluentConfig)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expected := `
+
+ @type tail
+ path input.log
+
+
+ @type label_router
+
+ @label @901f778f9602a78e8fd702c1973d8d8d
+ labels key1:val1,key2:val2
+ namespace ns-test
+
+
+
+
+ @type stdout
+
+
+ @type copy
+
+ @type null
+
+
+ @type null
+
+
+ `
+
+ if a, e := diff.TrimLinesInString(b.String()), diff.TrimLinesInString(expected); a != e {
+ t.Errorf("Result does not match (-actual vs +expected):\n%v\nActual: %s", diff.LineDiff(a, e), b.String())
+ }
+}
+
+func TestRenderFullFluentConfig(t *testing.T) {
+ system := types.NewSystem(toDirective(t, input.NewTailInputConfig("input.log")), types.NewRouter())
+
+ flowObj, err := types.NewFlow(
+ "ns-test",
+ map[string]string{
+ "key1": "val1",
+ "key2": "val2",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ flowObj.
+ WithFilters(toDirective(t, filter.NewStdOutFilterConfig())).
+ WithOutputs(toDirective(t, output.NewNullOutputConfig()))
+
+ err = system.RegisterFlow(flowObj)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ fluentConfig, err := system.Build()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ b := &bytes.Buffer{}
+ renderer := render.FluentRender{
+ Out: b,
+ Indent: 2,
+ }
+ err = renderer.Render(fluentConfig)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expected := `
+
+ @type tail
+ path input.log
+
+
+ @type label_router
+
+ @label @901f778f9602a78e8fd702c1973d8d8d
+ labels key1:val1,key2:val2
+ namespace ns-test
+
+
+
+
+ @type stdout
+
+
+ @type null
+
+ `
+
+ if a, e := diff.TrimLinesInString(b.String()), diff.TrimLinesInString(expected); a != e {
+ t.Errorf("Result does not match (-actual vs +expected):\n%v\nActual: %s", diff.LineDiff(a, e), b.String())
+ }
+}
+
+func TestRenderS3(t *testing.T) {
+ table := []struct {
+ name string
+ s3Config output.S3OutputConfig
+ expected string
+ err string
+ }{
+ {
+ name: "assumerole",
+ s3Config: output.S3OutputConfig{
+ Path: "/var/buffer",
+ S3Bucket: "test_bucket",
+ Buffer: &output.Buffer{
+ RetryForever: true,
+ Path: "asd",
+ },
+ AssumeRoleCredentials: &output.S3AssumeRoleCredentials{
+ RoleArn: "asd",
+ RoleSessionName: "lkj",
+ },
+ },
+ expected: ` @type s3
+ path /var/buffer
+ s3_bucket test_bucket
+
+ @type file
+ path asd
+ retry_forever true
+ timekey 10m
+
+
+ role_arn asd
+ role_session_name lkj
+ `,
+ },
+ {
+ name: "instanceprofile",
+ s3Config: output.S3OutputConfig{
+ Path: "/var/buffer",
+ S3Bucket: "test_bucket",
+ InstanceProfileCredentials: &output.S3InstanceProfileCredentials{},
+ },
+ expected: ` @type s3
+ path /var/buffer
+ s3_bucket test_bucket
+
+ `,
+ },
+ {
+ name: "shared",
+ s3Config: output.S3OutputConfig{
+ Path: "/var/buffer",
+ S3Bucket: "test_bucket",
+ SharedCredentials: &output.S3SharedCredentials{
+ Path: "e",
+ ProfileName: "f",
+ },
+ },
+ expected: ` @type s3
+ path /var/buffer
+ s3_bucket test_bucket
+
+ path e
+ profile_name f
+ `,
+ },
+ {
+ name: "missing auth",
+ s3Config: output.S3OutputConfig{
+ Path: "/var/buffer",
+ S3Bucket: "test_bucket",
+ },
+ err: "One of AssumeRoleCredentials or SharedCredentials or InstanceProfileCredentials must be configured",
+ },
+ }
+ for _, item := range table {
+ t.Logf("> %s\n", item.name)
+ err := ValidateRenderS3(t, item.s3Config, item.expected)
+ if item.err != "" {
+ if err == nil {
+ t.Errorf("expected error: %s", item.err)
+ continue
+ }
+ if err.Error() != item.err {
+ t.Errorf("expected error: %s got %s", item.err, err)
+ continue
+ }
+ continue
+ }
+ if err != nil {
+ t.Error(err)
+ }
+ }
+}
+
+func ValidateRenderS3(t *testing.T, s3Config output.S3OutputConfig, expected string) error {
+ system := types.NewSystem(toDirective(t, input.NewTailInputConfig("input.log")), types.NewRouter())
+
+ s3Plugin, err := s3Config.ToDirective(secret.NewSecretLoader(nil, ""))
+ if err != nil {
+ return err
+ }
+ flowObj, err := types.NewFlow(
+ "ns-test",
+ map[string]string{
+ "key1": "val1",
+ "key2": "val2",
+ })
+ if err != nil {
+ return err
+ }
+ flowObj.WithOutputs(s3Plugin)
+
+ err = system.RegisterFlow(flowObj)
+ if err != nil {
+ return err
+ }
+
+ fluentConfig, err := system.Build()
+ if err != nil {
+ return err
+ }
+
+ b := &bytes.Buffer{}
+ renderer := render.FluentRender{
+ Out: b,
+ Indent: 2,
+ }
+ err = renderer.Render(fluentConfig)
+ if err != nil {
+ return err
+ }
+
+ expected = fmt.Sprintf(`
+
+ @type tail
+ path input.log
+
+
+ @type label_router
+
+ @label @901f778f9602a78e8fd702c1973d8d8d
+ labels key1:val1,key2:val2
+ namespace ns-test
+
+
+
+
+ %s
+
+ `, expected)
+ if a, e := diff.TrimLinesInString(b.String()), diff.TrimLinesInString(expected); a != e {
+ t.Errorf("Result does not match (-actual vs +expected):\n%v\nActual: %s", diff.LineDiff(a, e), b.String())
+ }
+ return nil
+}
+
+func newFlowOrPanic(namespace string, labels map[string]string) *types.Flow {
+ flowObj, err := types.NewFlow(namespace, labels)
+ if err != nil {
+ panic(err)
+ }
+ return flowObj
+}
+
+func toDirective(t *testing.T, converter plugins.DirectiveConverter) types.Directive {
+ directive, err := converter.ToDirective(secret.NewSecretLoader(nil, ""))
+ if err != nil {
+ t.Fatalf("%+v", err)
+ }
+ return directive
+}
diff --git a/pkg/model/render/interface.go b/pkg/model/render/interface.go
new file mode 100644
index 000000000..5ad5b9fb4
--- /dev/null
+++ b/pkg/model/render/interface.go
@@ -0,0 +1,23 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package render
+
+import (
+ "github.com/banzaicloud/logging-operator/pkg/model/types"
+)
+
+type Renderer interface {
+ Render(config types.FluentConfig) error
+}
diff --git a/pkg/model/render/json.go b/pkg/model/render/json.go
new file mode 100644
index 000000000..12e2750fc
--- /dev/null
+++ b/pkg/model/render/json.go
@@ -0,0 +1,46 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package render
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/banzaicloud/logging-operator/pkg/model/types"
+ "github.com/goph/emperror"
+)
+
+type JsonRender struct {
+ out io.Writer
+ indent int
+}
+
+func (t *JsonRender) Render(config types.FluentConfig) error {
+ var out []byte
+ var err error
+ if t.indent > 0 {
+ out, err = json.MarshalIndent(config, "", strings.Repeat(" ", t.indent))
+ } else {
+ out, err = json.Marshal(config)
+ }
+
+ if err != nil {
+ return emperror.Wrap(err, "Failed to marshal model into yaml")
+ }
+ fmt.Fprintf(t.out, "%s", out)
+ return nil
+}
diff --git a/pkg/model/render/json_test.go b/pkg/model/render/json_test.go
new file mode 100644
index 000000000..01e27f62e
--- /dev/null
+++ b/pkg/model/render/json_test.go
@@ -0,0 +1,125 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package render
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/andreyvit/diff"
+ "github.com/banzaicloud/logging-operator/pkg/model/filter"
+ "github.com/banzaicloud/logging-operator/pkg/model/input"
+ "github.com/banzaicloud/logging-operator/pkg/model/output"
+ "github.com/banzaicloud/logging-operator/pkg/model/secret"
+ "github.com/banzaicloud/logging-operator/pkg/model/types"
+)
+
+func TestJsonRender(t *testing.T) {
+ input, err := input.NewTailInputConfig("input.log").ToDirective(secret.NewSecretLoader(nil, ""))
+ if err != nil {
+ t.Fatalf("%+v", err)
+ }
+
+ system := types.NewSystem(input, types.NewRouter())
+
+ flow, err := types.NewFlow(
+ "ns-test",
+ map[string]string{
+ "key1": "val1",
+ "key2": "val2",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ filter, err := filter.NewStdOutFilterConfig().ToDirective(secret.NewSecretLoader(nil, ""))
+ if err != nil {
+ t.Fatalf("%+v", err)
+ }
+
+ nullOut, err := output.NewNullOutputConfig().ToDirective(secret.NewSecretLoader(nil, ""))
+ if err != nil {
+ t.Fatalf("%+v", err)
+ }
+
+ flow.WithFilters(filter).
+ WithOutputs(nullOut)
+
+ err = system.RegisterFlow(flow)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ configuredSystem, err := system.Build()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ b := &bytes.Buffer{}
+ jsonRender := JsonRender{out: b, indent: 2}
+ err = jsonRender.Render(configuredSystem)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expected := `{
+ "input": {
+ "type": "tail",
+ "directive": "source",
+ "params": {
+ "path": "input.log"
+ }
+ },
+ "router": {
+ "type": "label_router",
+ "directive": "match",
+ "tag": "**",
+ "routes": [
+ {
+ "directive": "route",
+ "label": "@901f778f9602a78e8fd702c1973d8d8d",
+ "labels": {
+ "key1": "val1",
+ "key2": "val2"
+ },
+ "namespace": "ns-test"
+ }
+ ]
+ },
+ "flows": [
+ {
+ "directive": "label",
+ "tag": "@901f778f9602a78e8fd702c1973d8d8d",
+ "filters": [
+ {
+ "type": "stdout",
+ "directive": "filter",
+ "tag": "**"
+ }
+ ],
+ "outputs": [
+ {
+ "type": "null",
+ "directive": "match",
+ "tag": "**"
+ }
+ ]
+ }
+ ]
+ }`
+ if a, e := diff.TrimLinesInString(b.String()), diff.TrimLinesInString(expected); a != e {
+ t.Errorf("Result not as expected:\n%v \nActual: %s", diff.LineDiff(a, e), b.String())
+ }
+}
diff --git a/pkg/model/secret/secret.go b/pkg/model/secret/secret.go
new file mode 100644
index 000000000..b8f69e8e4
--- /dev/null
+++ b/pkg/model/secret/secret.go
@@ -0,0 +1,91 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package secret
+
+import (
+ "context"
+
+ "emperror.dev/errors"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// +kubebuilder:object:generate=true
+
+type Secret struct {
+ Value string `json:"value,omitempty"`
+ ValueFrom *ValueFrom `json:"valueFrom,omitemtpy"`
+}
+
+// +kubebuilder:object:generate=true
+
+type ValueFrom struct {
+ SecretKeyRef *KubernetesSecret `json:"secretKeyRef,omitempty"`
+}
+
+// +kubebuilder:object:generate=true
+
+type KubernetesSecret struct {
+ // Name of the kubernetes secret
+ Name string `json:"name"`
+ // Secret key for the value
+ Key string `json:"key"`
+}
+
+type SecretLoader interface {
+ Load(secret *Secret) (string, error)
+}
+
+type secretLoader struct {
+ // secretLoader is limited to a single namespace, to avoid hijacking other namespace's secrets
+ namespace string
+ client client.Reader
+}
+
+func NewSecretLoader(client client.Reader, namespace string) *secretLoader {
+ return &secretLoader{
+ client: client,
+ namespace: namespace,
+ }
+}
+
+func (k *secretLoader) Load(secret *Secret) (string, error) {
+ if secret.Value != "" {
+ return secret.Value, nil
+ }
+
+ if secret.ValueFrom.SecretKeyRef != nil {
+ k8sSecret := &corev1.Secret{}
+ err := k.client.Get(context.TODO(), types.NamespacedName{
+ Name: secret.ValueFrom.SecretKeyRef.Name,
+ Namespace: k.namespace}, k8sSecret)
+ if err != nil {
+ return "", errors.WrapIff(err, "failed to get kubernetes secret %s:%s",
+ k.namespace,
+ secret.ValueFrom.SecretKeyRef.Name)
+ }
+ value, ok := k8sSecret.Data[secret.ValueFrom.SecretKeyRef.Key]
+ if !ok {
+ return "", errors.Errorf("key %q not found in secret %q in namespace %q",
+ secret.ValueFrom.SecretKeyRef.Key,
+ secret.ValueFrom.SecretKeyRef.Name,
+ k.namespace)
+ }
+ return string(value), nil
+ }
+
+ return "", errors.New("No secret Value or ValueFrom defined for field")
+}
diff --git a/pkg/model/secret/zz_generated.deepcopy.go b/pkg/model/secret/zz_generated.deepcopy.go
new file mode 100644
index 000000000..98be8931f
--- /dev/null
+++ b/pkg/model/secret/zz_generated.deepcopy.go
@@ -0,0 +1,76 @@
+// +build !ignore_autogenerated
+
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by controller-gen. DO NOT EDIT.
+
+package secret
+
+import ()
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubernetesSecret) DeepCopyInto(out *KubernetesSecret) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesSecret.
+func (in *KubernetesSecret) DeepCopy() *KubernetesSecret {
+ if in == nil {
+ return nil
+ }
+ out := new(KubernetesSecret)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Secret) DeepCopyInto(out *Secret) {
+ *out = *in
+ if in.ValueFrom != nil {
+ in, out := &in.ValueFrom, &out.ValueFrom
+ *out = new(ValueFrom)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Secret.
+func (in *Secret) DeepCopy() *Secret {
+ if in == nil {
+ return nil
+ }
+ out := new(Secret)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ValueFrom) DeepCopyInto(out *ValueFrom) {
+ *out = *in
+ if in.SecretKeyRef != nil {
+ in, out := &in.SecretKeyRef, &out.SecretKeyRef
+ *out = new(KubernetesSecret)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValueFrom.
+func (in *ValueFrom) DeepCopy() *ValueFrom {
+ if in == nil {
+ return nil
+ }
+ out := new(ValueFrom)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/pkg/model/types/builder.go b/pkg/model/types/builder.go
new file mode 100644
index 000000000..20fcb413c
--- /dev/null
+++ b/pkg/model/types/builder.go
@@ -0,0 +1,52 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "errors"
+ "reflect"
+)
+
+type Builder struct {
+ input Input
+ flows []*Flow
+ router *Router
+}
+
+func NewSystem(input Input, router *Router) *Builder {
+ return &Builder{
+ input: input,
+ router: router,
+ }
+}
+
+func (s *Builder) RegisterFlow(f *Flow) error {
+ for _, e := range s.flows {
+ if e.Namespace == f.Namespace && reflect.DeepEqual(e.Labels, f.Labels) {
+ return errors.New("Flow already exists")
+ }
+ }
+ s.flows = append(s.flows, f)
+ s.router.AddRoute(f)
+ return nil
+}
+
+func (s *Builder) Build() (*System, error) {
+ return &System{
+ Input: s.input,
+ Router: s.router,
+ Flows: s.flows,
+ }, nil
+}
diff --git a/pkg/model/types/flow.go b/pkg/model/types/flow.go
new file mode 100644
index 000000000..6fddca6ca
--- /dev/null
+++ b/pkg/model/types/flow.go
@@ -0,0 +1,135 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "crypto/md5"
+ "fmt"
+ "io"
+ "sort"
+)
+
+type FluentConfig interface {
+ GetDirectives() []Directive
+}
+
+type System struct {
+ Input Input `json:"input"`
+ Router *Router `json:"router"`
+ Flows []*Flow `json:"flows"`
+}
+
+func (s *System) GetDirectives() []Directive {
+ directives := []Directive{
+ s.Input,
+ s.Router,
+ }
+ for _, flow := range s.Flows {
+ directives = append(directives, flow)
+ }
+ return directives
+}
+
+type Flow struct {
+ PluginMeta
+
+ // Chain of Filters that will process the event. Can be zero or more.
+ Filters []Filter `json:"filters,omitempty"`
+ // List of Outputs that will emit the event, at least one output is required.
+ Outputs []Output `json:"outputs"`
+
+ // Optional set of kubernetes labels
+ Labels map[string]string `json:"-"`
+ // Optional namespace
+ Namespace string `json:"-"`
+
+ // Fluentd label
+ FlowLabel string `json:"-"`
+}
+
+func (f *Flow) GetPluginMeta() *PluginMeta {
+ return &f.PluginMeta
+}
+
+func (f *Flow) GetParams() map[string]string {
+ return nil
+}
+
+func (f *Flow) GetSections() []Directive {
+ sections := []Directive{}
+ for _, filter := range f.Filters {
+ sections = append(sections, filter)
+ }
+ if len(f.Outputs) > 1 {
+ // We have to convert to General directive
+ sections = append(sections, NewCopyDirective(f.Outputs))
+ } else {
+ for _, output := range f.Outputs {
+ sections = append(sections, output)
+ }
+ }
+
+ return sections
+}
+
+func (f *Flow) WithFilters(filter ...Filter) *Flow {
+ f.Filters = append(f.Filters, filter...)
+ return f
+}
+
+func (f *Flow) WithOutputs(output ...Output) *Flow {
+ f.Outputs = append(f.Outputs, output...)
+ return f
+}
+
+func NewFlow(namespace string, labels map[string]string) (*Flow, error) {
+ flowLabel, err := calculateFlowLabel(namespace, labels)
+ if err != nil {
+ return nil, err
+ }
+ return &Flow{
+ PluginMeta: PluginMeta{
+ Directive: "label",
+ Tag: flowLabel,
+ },
+ FlowLabel: flowLabel,
+ Labels: labels,
+ Namespace: namespace,
+ }, nil
+}
+
+func calculateFlowLabel(namespace string, labels map[string]string) (string, error) {
+ b := md5.New()
+ if _, err := io.WriteString(b, namespace); err != nil {
+ return "", err
+ }
+
+ // Make sure the generated label is consistent
+ keys := []string{}
+ for k := range labels {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ if _, err := io.WriteString(b, k); err != nil {
+ return "", err
+ }
+ if _, err := io.WriteString(b, labels[k]); err != nil {
+ return "", err
+ }
+ }
+ return fmt.Sprintf("@%x", b.Sum(nil)), nil
+}
diff --git a/pkg/model/types/router.go b/pkg/model/types/router.go
new file mode 100644
index 000000000..399d61f47
--- /dev/null
+++ b/pkg/model/types/router.go
@@ -0,0 +1,92 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "strings"
+
+ "github.com/banzaicloud/logging-operator/pkg/util"
+)
+
+// OutputPlugin plugin: https://github.com/banzaicloud/fluent-plugin-label-router
+type Router struct {
+ PluginMeta
+ Routes []Directive `json:"routes"`
+}
+
+func (r *Router) GetPluginMeta() *PluginMeta {
+ return &r.PluginMeta
+}
+
+func (r *Router) GetParams() map[string]string {
+ return nil
+}
+
+func (r *Router) GetSections() []Directive {
+ return r.Routes
+}
+
+type FlowRoute struct {
+ PluginMeta
+ // Optional set of kubernetes labels
+ Labels map[string]string `json:"labels,omitempty"`
+ // Optional namespace
+ Namespace string `json:"namespace,omitempty"`
+}
+
+func (f *FlowRoute) GetPluginMeta() *PluginMeta {
+ return &f.PluginMeta
+}
+
+func (f *FlowRoute) GetParams() map[string]string {
+ params := map[string]string{}
+ if f.Namespace != "" {
+ params["namespace"] = f.Namespace
+ }
+ if len(f.Labels) > 0 {
+ var sb []string
+ for _, key := range util.OrderedStringMap(f.Labels).Keys() {
+ sb = append(sb, key+":"+f.Labels[key])
+ }
+ params["labels"] = strings.Join(sb, ",")
+ }
+ return params
+}
+
+func (f *FlowRoute) GetSections() []Directive {
+ return nil
+}
+
+func (r *Router) AddRoute(flow *Flow) *Router {
+ r.Routes = append(r.Routes, &FlowRoute{
+ PluginMeta: PluginMeta{
+ Directive: "route",
+ Label: flow.FlowLabel,
+ },
+ Labels: flow.Labels,
+ Namespace: flow.Namespace,
+ })
+ return r
+}
+
+func NewRouter() *Router {
+ return &Router{
+ PluginMeta: PluginMeta{
+ Type: "label_router",
+ Directive: "match",
+ Tag: "**",
+ },
+ }
+}
diff --git a/pkg/model/types/stringmaps.go b/pkg/model/types/stringmaps.go
new file mode 100644
index 000000000..42579f433
--- /dev/null
+++ b/pkg/model/types/stringmaps.go
@@ -0,0 +1,235 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "emperror.dev/errors"
+ "github.com/banzaicloud/logging-operator/pkg/model/secret"
+)
+
+type Converter func(interface{}) (string, error)
+
+type StructToStringMapper struct {
+ TagName string
+ PluginTagName string
+ ConversionHooks map[string]Converter
+ SecretLoader secret.SecretLoader
+}
+
+type NullSecretLoader struct {
+}
+
+func NewStructToStringMapper(secretLoader secret.SecretLoader) *StructToStringMapper {
+ return &StructToStringMapper{
+ TagName: "json",
+ PluginTagName: "plugin",
+ ConversionHooks: make(map[string]Converter),
+ SecretLoader: secretLoader,
+ }
+}
+
+func (s *StructToStringMapper) WithConverter(name string, c Converter) *StructToStringMapper {
+ s.ConversionHooks[name] = c
+ return s
+}
+
+func (s *StructToStringMapper) StringsMap(in interface{}) (map[string]string, error) {
+ out := make(map[string]string)
+ err := s.fillMap(strctVal(in), out)
+ return out, err
+}
+
+func (s *StructToStringMapper) fillMap(value reflect.Value, out map[string]string) error {
+ if out == nil {
+ return nil
+ }
+
+ fields := s.structFields(value)
+
+ var multierror error
+ for _, field := range fields {
+ name := field.Name
+ val := value.FieldByName(name)
+ var finalVal string
+
+ tagName, tagOpts := parseTagWithName(field.Tag.Get(s.TagName))
+ if tagName != "" {
+ name = tagName
+ }
+
+ pluginTagOpts := parseTag(field.Tag.Get(s.PluginTagName))
+ required := pluginTagOpts.Has("required")
+
+ if tagOpts.Has("omitempty") {
+ if pluginTagOpts.Has("required") {
+ multierror = errors.Combine(multierror, errors.Errorf(
+ "tags for field %s are conflicting: required and omitempty cannot be set simultaneously", name))
+ continue
+ }
+ zero := reflect.Zero(val.Type()).Interface()
+ current := val.Interface()
+ if reflect.DeepEqual(current, zero) {
+ if ok, def := pluginTagOpts.ValueForPrefix("default:"); ok {
+ out[name] = def
+ }
+ continue
+ }
+ }
+
+ var v reflect.Value
+ if ok, converterName := pluginTagOpts.ValueForPrefix("converter:"); ok {
+ if hook, ok := s.ConversionHooks[converterName]; ok {
+ convertedValue, err := hook(val.Interface())
+ if err != nil {
+ multierror = errors.Combine(err, errors.Errorf(
+ "failed to convert field `%s` with converter %s", name, converterName))
+ } else {
+ v = reflect.ValueOf(convertedValue)
+ }
+ } else {
+ multierror = errors.Combine(multierror, errors.Errorf(
+ "unable to convert field `%s` as the specified converter `%s` is not registered", name, converterName))
+ continue
+ }
+ } else {
+ v = reflect.ValueOf(val.Interface())
+ }
+
+ if s.SecretLoader != nil {
+ if v.Kind() == reflect.Ptr && !v.IsNil() {
+ if secret, ok := val.Interface().(*secret.Secret); ok {
+ loadedSecret, err := s.SecretLoader.Load(secret)
+ if err != nil {
+ multierror = errors.Combine(multierror, errors.Errorf("failed to load secret for field %s", name))
+ } else {
+ out[name] = loadedSecret
+ }
+ continue
+ }
+ }
+ }
+
+ if v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+
+ // if the field is of string type and not empty, use it's value over the default
+ switch v.Kind() {
+ case reflect.String, reflect.Int, reflect.Bool:
+ stringVal := fmt.Sprintf("%v", v)
+ if stringVal != "" {
+ finalVal = stringVal
+ } else {
+ // check if default has been set and use it
+ if ok, def := pluginTagOpts.ValueForPrefix("default:"); ok {
+ finalVal = def
+ }
+ }
+ // can't let to return an empty string when it's required
+ if finalVal == "" && required {
+ multierror = errors.Combine(multierror, errors.Errorf("field %s is required", name))
+ } else {
+ out[name] = finalVal
+ }
+ }
+ }
+ return multierror
+}
+
+func strctVal(s interface{}) reflect.Value {
+ v := reflect.ValueOf(s)
+
+ // if pointer get the underlying element≤
+ for v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+
+ if v.Kind() != reflect.Struct {
+ panic("not struct")
+ }
+
+ return v
+}
+
+func (s *StructToStringMapper) structFields(value reflect.Value) []reflect.StructField {
+ t := value.Type()
+
+ var f []reflect.StructField
+
+ for i := 0; i < t.NumField(); i++ {
+ field := t.Field(i)
+ // we can't access the value of unexported fields
+ if field.PkgPath != "" {
+ continue
+ }
+
+ // don't check if it's omitted
+ if tag := field.Tag.Get(s.TagName); tag == "-" {
+ continue
+ }
+
+ f = append(f, field)
+ }
+
+ return f
+}
+
+// parseTag splits a struct field's tag into its name and a list of options
+// which comes after a name. A tag is in the form of: "name,option1,option2".
+// The name can be neglectected.
+func parseTagWithName(tag string) (string, tagOptions) {
+ // tag is one of followings:
+ // ""
+ // "name"
+ // "name,opt"
+ // "name,opt,opt2"
+ // ",opt"
+
+ res := strings.Split(tag, ",")
+ return res[0], res[1:]
+}
+
+// tagOptions contains a slice of tag options
+type tagOptions []string
+
+// Has returns true if the given option is available in tagOptions
+func (t tagOptions) Has(opt string) bool {
+ for _, tagOpt := range t {
+ if tagOpt == opt {
+ return true
+ }
+ }
+
+ return false
+}
+
+// Has returns true if the given option is available in tagOptions
+func (t tagOptions) ValueForPrefix(opt string) (bool, string) {
+ for _, tagOpt := range t {
+ if strings.HasPrefix(tagOpt, opt) {
+ return true, strings.Replace(tagOpt, opt, "", 1)
+ }
+ }
+ return false, ""
+}
+
+// parseTag returns all the options in the tag
+func parseTag(tag string) tagOptions {
+ return tagOptions(strings.Split(tag, ","))
+}
diff --git a/pkg/model/types/stringmaps_test.go b/pkg/model/types/stringmaps_test.go
new file mode 100644
index 000000000..a0f5cb893
--- /dev/null
+++ b/pkg/model/types/stringmaps_test.go
@@ -0,0 +1,235 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "testing"
+
+ "emperror.dev/errors"
+ "github.com/banzaicloud/logging-operator/pkg/model/secret"
+)
+
+func TestRequired(t *testing.T) {
+ expectedError := "field field1 is required"
+ type Asd struct {
+ Field1 string `json:"field1" plugin:"required"`
+ }
+ _, err := NewStructToStringMapper(secret.NewSecretLoader(nil, "")).StringsMap(Asd{})
+ if err == nil {
+ t.Fatalf("required error is expected")
+ } else {
+ if err.Error() != expectedError {
+ t.Fatalf("error message `%s` does not match expected `%s`", err.Error(), expectedError)
+ }
+ }
+}
+
+func TestRequiredMeansItCannotEvenBeEmpty(t *testing.T) {
+ expectedError := "field field1 is required"
+ type Asd struct {
+ Field1 string `json:"field1" plugin:"required"`
+ }
+ _, err := NewStructToStringMapper(secret.NewSecretLoader(nil, "")).StringsMap(Asd{Field1: ""})
+ if err == nil {
+ t.Fatalf("required error is expected")
+ } else {
+ if err.Error() != expectedError {
+ t.Fatalf("error message `%s` does not match expected `%s`", err.Error(), expectedError)
+ }
+ }
+}
+
+func TestJsonTagsWithDefaultsAndOmitempty(t *testing.T) {
+ type Asd struct {
+ Field1 string `json:"field1"`
+ Field2 string `json:"field2,omitempty" plugin:"default:http://asdf and some space"`
+ Field3 string `json:"field3,omitempty"`
+ }
+ actual, err := NewStructToStringMapper(secret.NewSecretLoader(nil, "")).StringsMap(Asd{Field1: "value"})
+ if err != nil {
+ t.Fatalf("%+v", err)
+ }
+ expected := map[string]string{
+ "field1": "value",
+ "field2": "http://asdf and some space",
+ }
+ if !reflect.DeepEqual(expected, actual) {
+ t.Fatalf("failed to match expected %+v with %+v", expected, actual)
+ }
+}
+
+func TestConflictingTags(t *testing.T) {
+ expectedError := "tags for field field2 are conflicting: required and omitempty cannot be set simultaneously"
+ type Asd struct {
+ Field2 string `json:"field2,omitempty" plugin:"required"`
+ }
+ _, err := NewStructToStringMapper(secret.NewSecretLoader(nil, "")).StringsMap(Asd{})
+ if err == nil {
+ t.Fatalf("required error is expected")
+ } else {
+ if err.Error() != expectedError {
+ t.Fatalf("error message `%s` does not match expected `%s`", err.Error(), expectedError)
+ }
+ }
+}
+
+func TestIgnoreNestedStructs(t *testing.T) {
+ type Nested struct {
+ Field string `json:"asd"`
+ }
+ type Asd struct {
+ Field2 string `json:"field2"`
+ Field3 *Nested `json:"nested"`
+ }
+ actual, err := NewStructToStringMapper(secret.NewSecretLoader(nil, "")).StringsMap(Asd{Field2: "val"})
+ if err != nil {
+ t.Fatalf("%+v", err)
+ }
+ expected := map[string]string{
+ "field2": "val",
+ }
+ if !reflect.DeepEqual(expected, actual) {
+ t.Fatalf("failed to match expected %+v with %+v", expected, actual)
+ }
+}
+
+func TestEmptyStructStructs(t *testing.T) {
+ type Asd struct {
+ }
+ actual, err := NewStructToStringMapper(secret.NewSecretLoader(nil, "")).StringsMap(Asd{})
+ if err != nil {
+ t.Fatalf("%+v", err)
+ }
+ expected := map[string]string{}
+ if !reflect.DeepEqual(expected, actual) {
+ t.Fatalf("failed to match expected %+v with %+v", expected, actual)
+ }
+}
+
+func TestConversion(t *testing.T) {
+ type Asd struct {
+ Field int `json:"field" plugin:"converter:magic"`
+ }
+
+ converter := func(f interface{}) (string, error) {
+ if converted, ok := f.(int); ok {
+ return strconv.Itoa(converted), nil
+ }
+ return "", errors.Errorf("unable to convert %+v to int", f)
+ }
+
+ testStruct := Asd{Field: 2}
+
+ actual, err := NewStructToStringMapper(secret.NewSecretLoader(nil, "")).
+ WithConverter("magic", converter).
+ StringsMap(testStruct)
+ if err != nil {
+ t.Fatalf("%+v", err)
+ }
+ expected := map[string]string{
+ "field": "2",
+ }
+ if !reflect.DeepEqual(expected, actual) {
+ t.Fatalf("failed to match expected %+v with %+v", expected, actual)
+ }
+}
+
+type FakeLoader struct {
+}
+
+func (d *FakeLoader) Load(secret *secret.Secret) (string, error) {
+ if secret.Value != "" {
+ return secret.Value, nil
+ }
+ if secret.ValueFrom != nil && secret.ValueFrom.SecretKeyRef != nil {
+ return fmt.Sprintf("%s:%s",
+ secret.ValueFrom.SecretKeyRef.Name,
+ secret.ValueFrom.SecretKeyRef.Key), nil
+ }
+ return "", errors.New("no value found")
+}
+
+func TestSecretValue(t *testing.T) {
+ type Asd struct {
+ Field *secret.Secret `json:"field"`
+ }
+
+ testStruct := Asd{Field: &secret.Secret{Value: "asd"}}
+
+ actual, err := NewStructToStringMapper(&FakeLoader{}).
+ StringsMap(testStruct)
+ if err != nil {
+ t.Fatalf("%+v", err)
+ }
+ expected := map[string]string{
+ "field": "asd",
+ }
+ if !reflect.DeepEqual(expected, actual) {
+ t.Fatalf("failed to match expected %+v with %+v", expected, actual)
+ }
+}
+
+func TestSecretValueFrom(t *testing.T) {
+ type Asd struct {
+ Field *secret.Secret `json:"field"`
+ }
+
+ testStruct := Asd{
+ Field: &secret.Secret{
+ ValueFrom: &secret.ValueFrom{
+ SecretKeyRef: &secret.KubernetesSecret{
+ Name: "a",
+ Key: "b",
+ },
+ },
+ },
+ }
+
+ actual, err := NewStructToStringMapper(&FakeLoader{}).
+ StringsMap(testStruct)
+ if err != nil {
+ t.Fatalf("%+v", err)
+ }
+ expected := map[string]string{
+ "field": "a:b",
+ }
+ if !reflect.DeepEqual(expected, actual) {
+ t.Fatalf("failed to match expected %+v with %+v", expected, actual)
+ }
+}
+
+func TestSecretErrorWhenEmpty(t *testing.T) {
+ type Asd struct {
+ Field *secret.Secret `json:"field"`
+ }
+
+ testStruct := Asd{
+ Field: &secret.Secret{},
+ }
+
+ _, err := NewStructToStringMapper(&FakeLoader{}).
+ StringsMap(testStruct)
+ if err == nil {
+ t.Fatal("expected an error when secret contains no value or valuefrom")
+ }
+
+ expectedError := "failed to load secret for field field"
+ if err.Error() != expectedError {
+ t.Fatalf("Expected `%s` got `%s`", expectedError, err.Error())
+ }
+}
diff --git a/pkg/model/types/types.go b/pkg/model/types/types.go
new file mode 100644
index 000000000..13c1997b3
--- /dev/null
+++ b/pkg/model/types/types.go
@@ -0,0 +1,180 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+
+ "emperror.dev/errors"
+ "github.com/banzaicloud/logging-operator/pkg/model/secret"
+)
+
+type Directive interface {
+ GetPluginMeta() *PluginMeta
+ GetParams() map[string]string
+ GetSections() []Directive
+}
+
+type Filter interface {
+ Directive
+}
+
+type Input interface {
+ Directive
+}
+
+type Output interface {
+ Directive
+}
+
+func Value(value string) *PluginParam {
+ return &PluginParam{
+ Value: value,
+ }
+}
+
+type OutputPlugin struct {
+ PluginMeta
+ Output
+ Params Params
+ SubDirectives []Directive
+}
+
+func (s *OutputPlugin) GetPluginMeta() *PluginMeta {
+ return &s.PluginMeta
+}
+
+func (s *OutputPlugin) GetParams() map[string]string {
+ return s.Params
+}
+
+func (s *OutputPlugin) GetSections() []Directive {
+ return s.SubDirectives
+}
+
+type PluginMeta struct {
+ Type string `json:"type,omitempty"`
+ Id string `json:"id,omitempty"`
+ LogLevel string `json:"log_level,omitempty"`
+ Directive string `json:"directive"`
+ Label string `json:"label,omitempty"`
+ Tag string `json:"tag,omitempty"`
+}
+
+type GenericDirective struct {
+ PluginMeta
+ Params map[string]string `json:"params,omitempty"`
+ SubDirectives []Directive `json:"sections,omitempty"`
+}
+
+func (d *GenericDirective) GetPluginMeta() *PluginMeta {
+ return &d.PluginMeta
+}
+
+func (d *GenericDirective) GetParams() map[string]string {
+ return d.Params
+}
+
+func (d *GenericDirective) GetSections() []Directive {
+ return d.SubDirectives
+}
+
+type PluginParam struct {
+ Description string
+ Default string
+ Value string
+ Required bool
+}
+
+type PluginParams map[string]*PluginParam
+
+// Equals check for exact matching of 2 PluginParams by Values
+func (p PluginParams) Equals(target PluginParams) error {
+ keySet := map[string]bool{}
+ // Iterate through the p
+ for key, body := range p {
+ // Check keys at the matching
+ matchBody, ok := target[key]
+ if !ok {
+ // There is no such key in the target PluginParams
+ return fmt.Errorf("missing key %q from target", key)
+ }
+ if body != nil {
+ if matchBody != nil {
+ // The values does not target
+ if body.Value != matchBody.Value {
+ return fmt.Errorf("the values at %q mistmatch %q != %q", key, body.Value, matchBody.Value)
+ }
+ } else {
+ // There is no body at the target PluginParams for the matching key
+ return fmt.Errorf("missing body at %q from target", key)
+ }
+ }
+ // Add key to the keySet
+ keySet[key] = true
+ }
+ for key := range target {
+ if _, ok := keySet[key]; !ok {
+ // We have more keys int the matching PluginParams
+ return fmt.Errorf("unexpected key %q at target", key)
+ }
+ }
+ return nil
+}
+
+type Params map[string]string
+
+func (p Params) Merge(input map[string]string) Params {
+ for k, v := range input {
+ p[k] = v
+ }
+ return p
+}
+
+func NewFlatDirective(meta PluginMeta, config interface{}, secretLoader secret.SecretLoader) (Directive, error) {
+ directive := &GenericDirective{
+ PluginMeta: meta,
+ }
+ if params, err := NewStructToStringMapper(secretLoader).StringsMap(config); err != nil {
+ return nil, errors.WrapIf(err, "failed to convert struct to map[string]string params")
+ } else {
+ directive.Params = params
+ }
+ return directive, nil
+}
+
+func NewCopyDirective(directives []Output) Directive {
+ directive := &GenericDirective{
+ PluginMeta: PluginMeta{
+ Directive: "match",
+ Type: "copy",
+ Tag: "**",
+ },
+ }
+ for _, d := range directives {
+ newCopySection := &GenericDirective{
+ PluginMeta: PluginMeta{
+ Type: d.GetPluginMeta().Type,
+ Id: d.GetPluginMeta().Id,
+ LogLevel: d.GetPluginMeta().LogLevel,
+ Directive: "store",
+ },
+ Params: d.GetParams(),
+ SubDirectives: d.GetSections(),
+ }
+ directive.SubDirectives = append(directive.SubDirectives, newCopySection)
+ }
+ return directive
+}
diff --git a/pkg/plugins/plugin.go b/pkg/plugins/plugin.go
new file mode 100644
index 000000000..f004c3492
--- /dev/null
+++ b/pkg/plugins/plugin.go
@@ -0,0 +1,68 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package plugins
+
+import (
+ "reflect"
+
+ "emperror.dev/errors"
+ "github.com/banzaicloud/logging-operator/api/v1beta1"
+ "github.com/banzaicloud/logging-operator/pkg/model/secret"
+ "github.com/banzaicloud/logging-operator/pkg/model/types"
+)
+
+type DirectiveConverter interface {
+ ToDirective(secret.SecretLoader) (types.Directive, error)
+}
+
+func CreateOutput(outputSpec v1beta1.OutputSpec, secretLoader secret.SecretLoader) (types.Directive, error) {
+ v := reflect.ValueOf(outputSpec)
+ var converters []DirectiveConverter
+ for i := 0; i < v.NumField(); i++ {
+ if v.Field(i).Kind() == reflect.Ptr && !v.Field(i).IsNil() {
+ if converter, ok := v.Field(i).Interface().(DirectiveConverter); ok {
+ converters = append(converters, converter)
+ }
+ }
+ }
+ switch len(converters) {
+ case 0:
+ return nil, errors.New("no plugin config available for output")
+ case 1:
+ return converters[0].ToDirective(secretLoader)
+ default:
+ return nil, errors.Errorf("more then one plugin config is not allowed for an output")
+ }
+}
+
+func CreateFilter(filter v1beta1.Filter, secretLoader secret.SecretLoader) (types.Directive, error) {
+ v := reflect.ValueOf(filter)
+ var converters []DirectiveConverter
+ for i := 0; i < v.NumField(); i++ {
+ if v.Field(i).Kind() == reflect.Ptr && !v.Field(i).IsNil() {
+ if converter, ok := v.Field(i).Interface().(DirectiveConverter); ok {
+ converters = append(converters, converter)
+ }
+ }
+ }
+ switch len(converters) {
+ case 0:
+ return nil, errors.New("no plugin config available for filter")
+ case 1:
+ return converters[0].ToDirective(secretLoader)
+ default:
+ return nil, errors.Errorf("more then one plugin config is not allowed for a filter")
+ }
+}
diff --git a/pkg/resources/fluentbit/config.go b/pkg/resources/fluentbit/config.go
index 4a7437781..f97b20ff2 100644
--- a/pkg/resources/fluentbit/config.go
+++ b/pkg/resources/fluentbit/config.go
@@ -1,18 +1,16 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
package fluentbit
@@ -22,14 +20,16 @@ var fluentBitConfigTemplate = `
Daemon Off
Log_Level info
Parsers_File parsers.conf
+{{- if .Monitor.Port }}
HTTP_Server On
HTTP_Listen 0.0.0.0
HTTP_Port {{ .Monitor.Port }}
+{{- end }}
[INPUT]
Name tail
Path /var/log/containers/*.log
- Parser docker
+ Parser {{ .Parser }}
Tag kubernetes.*
Refresh_Interval 5
Mem_Buf_Limit 5MB
@@ -49,14 +49,14 @@ var fluentBitConfigTemplate = `
[OUTPUT]
Name forward
Match *
- Host fluentd.{{ .Namespace }}.svc
- Port 24240
+ Host {{ .TargetHost }}
+ Port {{ .TargetPort }}
{{ if .TLS.Enabled }}
tls On
tls.verify Off
- tls.ca_file {{ .TLS.CACertFile }}
- tls.crt_file {{ .TLS.CertFile }}
- tls.key_file {{ .TLS.KeyFile }}
+ tls.ca_file /fluent-bit/tls/ca.crt
+ tls.crt_file /fluent-bit/tls/tls.crt
+ tls.key_file /fluent-bit/tls/tls.key
Shared_Key {{ .TLS.SharedKey }}
{{- end }}
Retry_Limit False
diff --git a/pkg/resources/fluentbit/configmap.go b/pkg/resources/fluentbit/configmap.go
deleted file mode 100644
index 91d7091f5..000000000
--- a/pkg/resources/fluentbit/configmap.go
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package fluentbit
-
-import (
- "bytes"
- "github.com/banzaicloud/logging-operator/pkg/resources/templates"
- corev1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "text/template"
-)
-
-type fluentbitTLSConfig struct {
- Enabled bool
- SharedKey string
- CACertFile string
- CertFile string
- KeyFile string
-}
-
-type fluentBitConfig struct {
- Namespace string
- TLS fluentbitTLSConfig
- Monitor map[string]string
- Output map[string]string
-}
-
-func (r *Reconciler) configMap() runtime.Object {
- var monitorConfig map[string]string
- if _, ok := r.Fluentbit.Spec.Annotations["prometheus.io/port"]; ok {
- monitorConfig = map[string]string{
- "Port": r.Fluentbit.Spec.Annotations["prometheus.io/port"],
- }
- }
- tlsConfig := fluentbitTLSConfig{
- Enabled: r.Fluentbit.Spec.TLS.Enabled,
- SharedKey: r.Fluentbit.Spec.TLS.SharedKey,
- }
- if r.Fluentbit.Spec.TLS.SecretType == "tls" {
- tlsConfig.CertFile = "/fluent-bit/tls/tls.crt"
- tlsConfig.KeyFile = "/fluent-bit/tls/tls.key"
- tlsConfig.CACertFile = "/fluent-bit/tls/ca.crt"
- } else {
- tlsConfig.CertFile = "/fluent-bit/tls/clientCert"
- tlsConfig.KeyFile = "/fluent-bit/tls/clientKey"
- tlsConfig.CACertFile = "/fluent-bit/tls/caCert"
- }
- input := fluentBitConfig{
- Namespace: r.Fluentbit.Namespace,
- TLS: tlsConfig,
- Monitor: monitorConfig,
- }
- return &corev1.ConfigMap{
- ObjectMeta: templates.FluentbitObjectMeta(fluentbitConfigMapName, r.Fluentbit.Labels, r.Fluentbit),
- Data: map[string]string{
- "fluent-bit.conf": generateConfig(input),
- },
- }
-}
-
-func generateConfig(input fluentBitConfig) string {
- output := new(bytes.Buffer)
- tmpl, err := template.New("test").Parse(fluentBitConfigTemplate)
- if err != nil {
- return ""
- }
- err = tmpl.Execute(output, input)
- if err != nil {
- return ""
- }
- outputString := output.String()
- return outputString
-}
diff --git a/pkg/resources/fluentbit/configsecret.go b/pkg/resources/fluentbit/configsecret.go
new file mode 100644
index 000000000..18e1745bf
--- /dev/null
+++ b/pkg/resources/fluentbit/configsecret.go
@@ -0,0 +1,93 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fluentbit
+
+import (
+ "bytes"
+ "fmt"
+ "text/template"
+
+ "github.com/banzaicloud/logging-operator/pkg/resources/fluentd"
+ "github.com/banzaicloud/logging-operator/pkg/resources/templates"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+type fluentBitConfig struct {
+ Namespace string
+ TLS struct {
+ Enabled bool
+ SharedKey string
+ }
+ Monitor map[string]string
+ Output map[string]string
+ TargetHost string
+ TargetPort int32
+ Parser string
+}
+
+func (r *Reconciler) configSecret() runtime.Object {
+ var monitorConfig map[string]string
+ if _, ok := r.Logging.Spec.FluentbitSpec.Annotations["prometheus.io/port"]; ok {
+ monitorConfig = map[string]string{
+ "Port": r.Logging.Spec.FluentbitSpec.Annotations["prometheus.io/port"],
+ }
+ }
+ input := fluentBitConfig{
+ Namespace: r.Logging.Spec.ControlNamespace,
+ TLS: struct {
+ Enabled bool
+ SharedKey string
+ }{
+ Enabled: r.Logging.Spec.FluentbitSpec.TLS.Enabled,
+ SharedKey: r.Logging.Spec.FluentbitSpec.TLS.SharedKey,
+ },
+ Monitor: monitorConfig,
+ TargetHost: fmt.Sprintf("%s.%s.svc", r.Logging.QualifiedName(fluentd.ServiceName), r.Logging.Spec.ControlNamespace),
+ TargetPort: r.Logging.Spec.FluentdSpec.Port,
+ }
+ if r.Logging.Spec.FluentbitSpec.Parser != "" {
+ input.Parser = r.Logging.Spec.FluentbitSpec.Parser
+ } else {
+ input.Parser = "cri"
+ }
+ if r.Logging.Spec.FluentbitSpec.TargetHost != "" {
+ input.TargetHost = r.Logging.Spec.FluentbitSpec.TargetHost
+ }
+ if r.Logging.Spec.FluentbitSpec.TargetPort != 0 {
+ input.TargetPort = r.Logging.Spec.FluentbitSpec.TargetPort
+ }
+ return &corev1.Secret{
+ ObjectMeta: templates.FluentbitObjectMeta(
+ r.Logging.QualifiedName(fluentBitSecretConfigName), r.Logging.Labels, r.Logging),
+ Data: map[string][]byte{
+ "fluent-bit.conf": []byte(generateConfig(input)),
+ },
+ }
+}
+
+func generateConfig(input fluentBitConfig) string {
+ output := new(bytes.Buffer)
+ tmpl, err := template.New("test").Parse(fluentBitConfigTemplate)
+ if err != nil {
+ return ""
+ }
+ err = tmpl.Execute(output, input)
+ if err != nil {
+ return ""
+ }
+ outputString := output.String()
+ return outputString
+}
diff --git a/pkg/resources/fluentbit/daemonset.go b/pkg/resources/fluentbit/daemonset.go
index 27df0da61..1864c48d7 100644
--- a/pkg/resources/fluentbit/daemonset.go
+++ b/pkg/resources/fluentbit/daemonset.go
@@ -1,23 +1,20 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
package fluentbit
import (
- loggingv1alpha1 "github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1"
"github.com/banzaicloud/logging-operator/pkg/resources/templates"
"github.com/banzaicloud/logging-operator/pkg/util"
appsv1 "k8s.io/api/apps/v1"
@@ -31,44 +28,47 @@ func (r *Reconciler) daemonSet() runtime.Object {
var containerPorts []corev1.ContainerPort
- if _, ok := r.Fluentbit.Spec.Annotations["prometheus.io/port"]; ok {
+ if _, ok := r.Logging.Spec.FluentbitSpec.Annotations["prometheus.io/port"]; ok {
containerPorts = append(containerPorts, corev1.ContainerPort{
Name: "monitor",
- ContainerPort: r.Fluentbit.Spec.GetPrometheusPortFromAnnotation(),
+ ContainerPort: r.Logging.Spec.FluentbitSpec.GetPrometheusPortFromAnnotation(),
Protocol: corev1.ProtocolTCP,
})
}
+ labels := util.MergeLabels(r.Logging.Labels, labelSelector)
+
return &appsv1.DaemonSet{
- ObjectMeta: templates.FluentbitObjectMeta(fluentbitDeaemonSetName, util.MergeLabels(r.Fluentbit.Labels, labelSelector), r.Fluentbit),
+ ObjectMeta: templates.FluentbitObjectMeta(
+ r.Logging.QualifiedName(fluentbitDaemonSetName), labels, r.Logging),
Spec: appsv1.DaemonSetSpec{
- Selector: &metav1.LabelSelector{MatchLabels: util.MergeLabels(r.Fluentbit.Labels, labelSelector)},
+ Selector: &metav1.LabelSelector{MatchLabels: util.MergeLabels(r.Logging.Labels, labelSelector)},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
- Labels: util.MergeLabels(r.Fluentbit.Labels, labelSelector),
- Annotations: r.Fluentbit.Spec.Annotations,
+ Labels: labels,
+ Annotations: r.Logging.Spec.FluentbitSpec.Annotations,
},
Spec: corev1.PodSpec{
- ServiceAccountName: serviceAccountName,
- Volumes: generateVolume(r.Fluentbit),
+ ServiceAccountName: r.Logging.QualifiedName(serviceAccountName),
+ Volumes: r.generateVolume(),
+ Tolerations: r.Logging.Spec.FluentbitSpec.Tolerations,
Containers: []corev1.Container{
{
Name: "fluent-bit",
- Image: r.Fluentbit.Spec.Image.Repository + ":" + r.Fluentbit.Spec.Image.Tag,
- ImagePullPolicy: corev1.PullPolicy(r.Fluentbit.Spec.Image.PullPolicy),
+ Image: r.Logging.Spec.FluentbitSpec.Image.Repository + ":" + r.Logging.Spec.FluentbitSpec.Image.Tag,
+ ImagePullPolicy: corev1.PullPolicy(r.Logging.Spec.FluentbitSpec.Image.PullPolicy),
Ports: containerPorts,
- Resources: r.Fluentbit.Spec.Resources,
- VolumeMounts: generateVolumeMounts(r.Fluentbit),
+ Resources: r.Logging.Spec.FluentbitSpec.Resources,
+ VolumeMounts: r.generateVolumeMounts(),
},
},
- Tolerations: r.Fluentbit.Spec.Tolerations,
},
},
},
}
}
-func generateVolumeMounts(fluentbit *loggingv1alpha1.Fluentbit) (v []corev1.VolumeMount) {
+func (r *Reconciler) generateVolumeMounts() (v []corev1.VolumeMount) {
v = []corev1.VolumeMount{
{
Name: "varlibcontainers",
@@ -90,11 +90,11 @@ func generateVolumeMounts(fluentbit *loggingv1alpha1.Fluentbit) (v []corev1.Volu
MountPath: "/var/log/",
},
}
- if fluentbit.Spec.TLS.Enabled {
+ if r.Logging.Spec.FluentbitSpec.TLS.Enabled {
tlsRelatedVolume := []corev1.VolumeMount{
{
- Name: "fluent-tls",
- MountPath: "/fluent-bit/tls",
+ Name: "fluent-bit-tls",
+ MountPath: "/fluent-bit/tls/",
},
}
v = append(v, tlsRelatedVolume...)
@@ -102,7 +102,7 @@ func generateVolumeMounts(fluentbit *loggingv1alpha1.Fluentbit) (v []corev1.Volu
return
}
-func generateVolume(fluentbit *loggingv1alpha1.Fluentbit) (v []corev1.Volume) {
+func (r *Reconciler) generateVolume() (v []corev1.Volume) {
v = []corev1.Volume{
{
Name: "varlibcontainers",
@@ -115,9 +115,13 @@ func generateVolume(fluentbit *loggingv1alpha1.Fluentbit) (v []corev1.Volume) {
{
Name: "config",
VolumeSource: corev1.VolumeSource{
- ConfigMap: &corev1.ConfigMapVolumeSource{
- LocalObjectReference: corev1.LocalObjectReference{
- Name: "fluent-bit-config",
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: r.Logging.QualifiedName(fluentBitSecretConfigName),
+ Items: []corev1.KeyToPath{
+ {
+ Key: "fluent-bit.conf",
+ Path: "fluent-bit.conf",
+ },
},
},
},
@@ -137,12 +141,12 @@ func generateVolume(fluentbit *loggingv1alpha1.Fluentbit) (v []corev1.Volume) {
},
},
}
- if fluentbit.Spec.TLS.Enabled {
+ if r.Logging.Spec.FluentbitSpec.TLS.Enabled {
tlsRelatedVolume := corev1.Volume{
- Name: "fluent-tls",
+ Name: "fluent-bit-tls",
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
- SecretName: fluentbit.Spec.TLS.SecretName,
+ SecretName: r.Logging.Spec.FluentbitSpec.TLS.SecretName,
},
},
}
diff --git a/pkg/resources/fluentbit/fluentbit.go b/pkg/resources/fluentbit/fluentbit.go
index a84a3387e..54c8a1ed3 100644
--- a/pkg/resources/fluentbit/fluentbit.go
+++ b/pkg/resources/fluentbit/fluentbit.go
@@ -1,36 +1,35 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
package fluentbit
import (
- loggingv1alpha1 "github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1"
+ "github.com/banzaicloud/logging-operator/api/v1beta1"
"github.com/banzaicloud/logging-operator/pkg/k8sutil"
"github.com/banzaicloud/logging-operator/pkg/resources"
"github.com/go-logr/logr"
"github.com/goph/emperror"
"sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
)
const (
- serviceAccountName = "logging"
- clusterRoleBindingName = "logging"
- clusterRoleName = "LoggingRole"
- fluentbitConfigMapName = "fluent-bit-config"
- fluentbitDeaemonSetName = "fluent-bit-daemon"
+ serviceAccountName = "logging"
+ clusterRoleBindingName = "logging"
+ clusterRoleName = "logging"
+ fluentBitSecretConfigName = "fluentbit"
+ fluentbitDaemonSetName = "fluentbit"
)
var labelSelector = map[string]string{
@@ -39,32 +38,31 @@ var labelSelector = map[string]string{
// Reconciler holds info what resource to reconcile
type Reconciler struct {
- resources.FluentbitReconciler
+ Logging *v1beta1.Logging
+ *k8sutil.GenericResourceReconciler
}
-// New creates a new Fluentbit reconciler
-func New(client client.Client, fluentbit *loggingv1alpha1.Fluentbit) *Reconciler {
+// NewReconciler creates a new Fluentbit reconciler
+func New(client client.Client, logger logr.Logger, logging *v1beta1.Logging) *Reconciler {
return &Reconciler{
- FluentbitReconciler: resources.FluentbitReconciler{
- Client: client,
- Fluentbit: fluentbit,
- },
+ Logging: logging,
+ GenericResourceReconciler: k8sutil.NewReconciler(client, logger),
}
}
// Reconcile reconciles the fluentBit resource
-func (r *Reconciler) Reconcile(log logr.Logger) error {
+func (r *Reconciler) Reconcile() (*reconcile.Result, error) {
for _, res := range []resources.Resource{
r.serviceAccount,
r.clusterRole,
r.clusterRoleBinding,
- r.configMap, r.daemonSet,
+ r.configSecret, r.daemonSet,
} {
o := res()
- err := k8sutil.Reconcile(log, r.Client, o)
+ err := r.ReconcileResource(o)
if err != nil {
- return emperror.WrapWith(err, "failed to reconcile resource", "resource", o.GetObjectKind().GroupVersionKind())
+ return nil, emperror.WrapWith(err, "failed to reconcile resource", "resource", o.GetObjectKind().GroupVersionKind())
}
}
- return nil
+ return nil, nil
}
diff --git a/pkg/resources/fluentbit/rbac.go b/pkg/resources/fluentbit/rbac.go
index bfba4562a..bec216c90 100644
--- a/pkg/resources/fluentbit/rbac.go
+++ b/pkg/resources/fluentbit/rbac.go
@@ -1,18 +1,16 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
package fluentbit
@@ -25,12 +23,13 @@ import (
func (r *Reconciler) clusterRole() runtime.Object {
return &rbacv1.ClusterRole{
- ObjectMeta: templates.FluentbitObjectMetaClusterScope(clusterRoleName, r.Fluentbit.Labels, r.Fluentbit),
+ ObjectMeta: templates.FluentbitObjectMetaClusterScope(
+ r.Logging.QualifiedName(clusterRoleName), r.Logging.Labels, r.Logging),
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{""},
- Resources: []string{"pods"},
- Verbs: []string{"get"},
+ Resources: []string{"pods", "namespaces"},
+ Verbs: []string{"get", "list", "watch"},
},
},
}
@@ -38,17 +37,18 @@ func (r *Reconciler) clusterRole() runtime.Object {
func (r *Reconciler) clusterRoleBinding() runtime.Object {
return &rbacv1.ClusterRoleBinding{
- ObjectMeta: templates.FluentbitObjectMetaClusterScope(clusterRoleBindingName, r.Fluentbit.Labels, r.Fluentbit),
+ ObjectMeta: templates.FluentbitObjectMetaClusterScope(
+ r.Logging.QualifiedNamespacedName(clusterRoleBindingName), r.Logging.Labels, r.Logging),
RoleRef: rbacv1.RoleRef{
Kind: "ClusterRole",
APIGroup: "rbac.authorization.k8s.io",
- Name: clusterRoleName,
+ Name: r.Logging.QualifiedName(clusterRoleName),
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
- Name: serviceAccountName,
- Namespace: r.Fluentbit.Namespace,
+ Name: r.Logging.QualifiedName(serviceAccountName),
+ Namespace: r.Logging.Spec.ControlNamespace,
},
},
}
@@ -56,6 +56,7 @@ func (r *Reconciler) clusterRoleBinding() runtime.Object {
func (r *Reconciler) serviceAccount() runtime.Object {
return &corev1.ServiceAccount{
- ObjectMeta: templates.FluentbitObjectMeta(serviceAccountName, r.Fluentbit.Labels, r.Fluentbit),
+ ObjectMeta: templates.FluentbitObjectMeta(
+ r.Logging.QualifiedName(serviceAccountName), r.Logging.Labels, r.Logging),
}
}
diff --git a/pkg/resources/fluentd/appconfigmap.go b/pkg/resources/fluentd/appconfigmap.go
index fd096cf8b..6ad59c511 100644
--- a/pkg/resources/fluentd/appconfigmap.go
+++ b/pkg/resources/fluentd/appconfigmap.go
@@ -1,39 +1,204 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
package fluentd
import (
"context"
+ "fmt"
+ "hash/fnv"
+ "emperror.dev/errors"
"github.com/banzaicloud/logging-operator/pkg/resources/templates"
"github.com/banzaicloud/logging-operator/pkg/util"
corev1 "k8s.io/api/core/v1"
+ v1 "k8s.io/api/core/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
)
+type ConfigCheckResult struct {
+ Valid bool
+ Ready bool
+}
+
+const ConfigKey = "generated.conf"
+
func (r *Reconciler) appconfigMap() runtime.Object {
- current := &corev1.ConfigMap{}
- err := r.Client.Get(context.TODO(), types.NamespacedName{Namespace: r.Fluentd.Namespace, Name: appConfigMapName}, current)
+ data := make(map[string][]byte)
+ data[AppConfigKey] = []byte(*r.config)
+ return &corev1.Secret{
+ ObjectMeta: templates.FluentdObjectMeta(
+ r.Logging.QualifiedName(AppSecretConfigName), util.MergeLabels(r.Logging.Labels, labelSelector), r.Logging),
+ Data: data,
+ }
+}
+
+func (r *Reconciler) configHash() (string, error) {
+ hasher := fnv.New32()
+ _, err := hasher.Write([]byte(*r.config))
+ if err != nil {
+ return "", errors.WrapIf(err, "failed to calculate hash for the configmap data")
+ }
+ return fmt.Sprintf("%x", hasher.Sum32()), nil
+}
+
+func (r *Reconciler) configCheck() (*ConfigCheckResult, error) {
+ hashKey, err := r.configHash()
if err != nil {
- return &corev1.ConfigMap{
- ObjectMeta: templates.FluentdObjectMeta(appConfigMapName, util.MergeLabels(r.Fluentd.Labels, labelSelector), r.Fluentd),
- Data: map[string]string{},
+ return nil, err
+ }
+ pod := r.newCheckPod(hashKey)
+
+ err = r.Client.Get(context.TODO(), types.NamespacedName{Namespace: pod.Namespace, Name: pod.Name}, pod)
+ if err == nil {
+ // check pod status and write into the configmap
+ switch pod.Status.Phase {
+ case v1.PodSucceeded:
+ return &ConfigCheckResult{
+ Valid: true,
+ Ready: true,
+ }, nil
+ case v1.PodPending:
+ fallthrough
+ case v1.PodRunning:
+ return &ConfigCheckResult{}, nil
+ case v1.PodFailed:
+ return &ConfigCheckResult{
+ Ready: true,
+ Valid: false,
+ }, nil
+ default:
+ return nil, errors.Errorf("invalid pod status %s, unable to a validate config", pod.Status.Phase)
}
}
- return current
+
+ if err != nil && !apierrors.IsNotFound(err) {
+ return nil, errors.WrapIff(err, "failed to get configcheck pod %s:%s", pod.Namespace, pod.Name)
+ }
+
+ checkSecret := r.newCheckSecret(hashKey)
+
+ err = r.Client.Create(context.TODO(), checkSecret)
+ if err != nil && !apierrors.IsAlreadyExists(err) {
+ return nil, errors.WrapIf(err, "failed to create secret for fluentd configcheck")
+ }
+
+ err = r.Client.Create(context.TODO(), pod)
+ if err != nil {
+ return nil, errors.WrapIf(err, "failed to create pod for fluentd configcheck")
+ }
+
+ return &ConfigCheckResult{}, nil
+}
+
+func (r *Reconciler) configCheckCleanup(currentHash string) ([]string, error) {
+ var multierr error
+ var removedHashes = make([]string, 0)
+ for configHash, _ := range r.Logging.Status.ConfigCheckResults {
+ if configHash == currentHash {
+ continue
+ }
+ if err := r.Client.Delete(context.TODO(), r.newCheckSecret(configHash)); err != nil {
+ if !apierrors.IsNotFound(err) {
+ multierr = errors.Combine(multierr,
+ errors.Wrapf(err, "failed to remove config check secret %s", configHash))
+ continue
+ }
+ }
+ if err := r.Client.Delete(context.TODO(), r.newCheckPod(configHash)); err != nil {
+ if !apierrors.IsNotFound(err) {
+ multierr = errors.Combine(multierr,
+ errors.Wrapf(err, "failed to remove config check pod %s", configHash))
+ continue
+ }
+ }
+ removedHashes = append(removedHashes, configHash)
+ }
+ return removedHashes, multierr
+}
+
+func (r *Reconciler) newCheckSecret(hashKey string) *v1.Secret {
+ return &v1.Secret{
+ ObjectMeta: templates.FluentdObjectMeta(
+ r.Logging.QualifiedName(fmt.Sprintf("fluentd-configcheck-%s", hashKey)),
+ util.MergeLabels(r.Logging.Labels, labelSelector),
+ r.Logging,
+ ),
+ Data: map[string][]byte{
+ ConfigKey: []byte(*r.config),
+ },
+ }
+}
+
+func (r *Reconciler) newCheckPod(hashKey string) *v1.Pod {
+ pod := &v1.Pod{
+ ObjectMeta: templates.FluentdObjectMeta(
+ r.Logging.QualifiedName(fmt.Sprintf("fluentd-configcheck-%s", hashKey)),
+ util.MergeLabels(r.Logging.Labels, labelSelector),
+ r.Logging,
+ ),
+ Spec: v1.PodSpec{
+ RestartPolicy: v1.RestartPolicyNever,
+ Volumes: []v1.Volume{
+ {
+ Name: "config",
+ VolumeSource: v1.VolumeSource{
+ Secret: &v1.SecretVolumeSource{
+ SecretName: r.Logging.QualifiedName(fmt.Sprintf("fluentd-configcheck-%s", hashKey)),
+ },
+ },
+ },
+ },
+ Containers: []v1.Container{
+ {
+ Name: "fluentd",
+ Image: fmt.Sprintf("%s:%s",
+ r.Logging.Spec.FluentdSpec.Image.Repository, r.Logging.Spec.FluentdSpec.Image.Tag),
+ ImagePullPolicy: v1.PullPolicy(r.Logging.Spec.FluentdSpec.Image.PullPolicy),
+ Args: []string{
+ "fluentd", "-c",
+ fmt.Sprintf("/fluentd/etc/%s", ConfigKey),
+ "--dry-run",
+ },
+ VolumeMounts: []v1.VolumeMount{
+ {
+ Name: "config",
+ MountPath: "/fluentd/etc/",
+ },
+ },
+ },
+ },
+ },
+ }
+ if r.Logging.Spec.FluentdSpec.TLS.Enabled {
+ tlsVolume := corev1.Volume{
+ Name: "fluentd-tls",
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: r.Logging.Spec.FluentdSpec.TLS.SecretName,
+ },
+ },
+ }
+ pod.Spec.Volumes = append(pod.Spec.Volumes, tlsVolume)
+ volumeMount := corev1.VolumeMount{
+ Name: "fluentd-tls",
+ MountPath: "/fluentd/tls/",
+ }
+ pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts, volumeMount)
+
+ }
+ return pod
}
diff --git a/pkg/resources/fluentd/config.go b/pkg/resources/fluentd/config.go
index f9c32c0b2..5aa5e642a 100644
--- a/pkg/resources/fluentd/config.go
+++ b/pkg/resources/fluentd/config.go
@@ -1,18 +1,16 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
package fluentd
@@ -39,25 +37,6 @@ var fluentdInputTemplate = `
@type prometheus_output_monitor
-# Input plugin
-
- @type forward
- port 24240
- {{ if .TLS.Enabled }}
-
- self_hostname fluentd
- shared_key {{ .TLS.SharedKey }}
-
-
- version TLSv1_2
- ca_path {{ .TLS.CACertFile }}
- cert_path {{ .TLS.CertFile }}
- private_key_path {{ .TLS.KeyFile }}
- client_cert_auth true
-
- {{- end }}
-
-
# Prevent fluentd from handling records containing its own logs. Otherwise
# it can lead to an infinite loop, when error in sending one message generates
# another message which also fails to be sent and so on.
@@ -69,23 +48,6 @@ var fluentdInputTemplate = `
@type null
-
- @type rewrite_tag_filter
-
- key $.kubernetes.namespace_name
- pattern ^(.+)$
- tag $1.${tag_parts[0]}
-
-
-
-
- @type rewrite_tag_filter
-
- key $.kubernetes.labels.app
- pattern ^(.+)$
- tag $1.${tag_parts[0]}.${tag_parts[1]}
-
-
`
var fluentdOutputTemplate = `
diff --git a/pkg/resources/fluentd/configmap.go b/pkg/resources/fluentd/configmap.go
index 88f80b39a..cf7e049cf 100644
--- a/pkg/resources/fluentd/configmap.go
+++ b/pkg/resources/fluentd/configmap.go
@@ -1,78 +1,15 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
package fluentd
-
-import (
- "bytes"
- "fmt"
- "github.com/banzaicloud/logging-operator/pkg/resources/templates"
- "github.com/banzaicloud/logging-operator/pkg/util"
- corev1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "text/template"
-)
-
-type fluentdTLSConfig struct {
- Enabled bool
- SharedKey string
- CACertFile string
- CertFile string
- KeyFile string
-}
-
-type fluentdConfig struct {
- TLS fluentdTLSConfig
-}
-
-func (r *Reconciler) configMap() runtime.Object {
- tlsConfig := fluentdTLSConfig{
- Enabled: r.Fluentd.Spec.TLS.Enabled,
- SharedKey: r.Fluentd.Spec.TLS.SharedKey,
- }
- if r.Fluentd.Spec.TLS.SecretType == "tls" {
- tlsConfig.CertFile = "/fluentd/tls/tls.crt"
- tlsConfig.KeyFile = "/fluentd/tls/tls.key"
- tlsConfig.CACertFile = "/fluentd/tls/ca.crt"
- } else {
- tlsConfig.CertFile = "/fluentd/tls/serverCert"
- tlsConfig.KeyFile = "/fluentd/tls/serverKey"
- tlsConfig.CACertFile = "/fluentd/tls/caCert"
- }
- input := fluentdConfig{TLS: tlsConfig}
- return &corev1.ConfigMap{
- ObjectMeta: templates.FluentdObjectMeta(configMapName, util.MergeLabels(r.Fluentd.Labels, labelSelector), r.Fluentd),
- Data: map[string]string{
- "fluent.conf": fluentdDefaultTemplate,
- "input.conf": generateConfig(input),
- "devnull.conf": fluentdOutputTemplate,
- },
- }
-}
-
-func generateConfig(input fluentdConfig) string {
- output := new(bytes.Buffer)
- tmpl, err := template.New("test").Parse(fluentdInputTemplate)
- if err != nil {
- return ""
- }
- err = tmpl.Execute(output, input)
- if err != nil {
- return ""
- }
- outputString := fmt.Sprint(output.String())
- return outputString
-}
diff --git a/pkg/resources/fluentd/configsecret.go b/pkg/resources/fluentd/configsecret.go
new file mode 100644
index 000000000..ca6a6dd1c
--- /dev/null
+++ b/pkg/resources/fluentd/configsecret.go
@@ -0,0 +1,68 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fluentd
+
+import (
+ "bytes"
+ "fmt"
+ "html/template"
+
+ "github.com/banzaicloud/logging-operator/pkg/resources/templates"
+ "github.com/banzaicloud/logging-operator/pkg/util"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+type fluentdConfig struct {
+ TLS struct {
+ Enabled bool
+ SharedKey string
+ }
+}
+
+func generateConfig(input fluentdConfig) string {
+ output := new(bytes.Buffer)
+ tmpl, err := template.New("test").Parse(fluentdInputTemplate)
+ if err != nil {
+ return ""
+ }
+ err = tmpl.Execute(output, input)
+ if err != nil {
+ return ""
+ }
+ outputString := fmt.Sprint(output.String())
+ return outputString
+}
+
+func (r *Reconciler) secretConfig() runtime.Object {
+ input := fluentdConfig{
+ TLS: struct {
+ Enabled bool
+ SharedKey string
+ }{
+ Enabled: r.Logging.Spec.FluentdSpec.TLS.Enabled,
+ SharedKey: r.Logging.Spec.FluentdSpec.TLS.SharedKey,
+ },
+ }
+ return &corev1.Secret{
+ ObjectMeta: templates.FluentdObjectMeta(
+ r.Logging.QualifiedName(SecretConfigName), util.MergeLabels(r.Logging.Labels, labelSelector), r.Logging),
+ Data: map[string][]byte{
+ "fluent.conf": []byte(fluentdDefaultTemplate),
+ "input.conf": []byte(generateConfig(input)),
+ "devnull.conf": []byte(fluentdOutputTemplate),
+ },
+ }
+}
diff --git a/pkg/resources/fluentd/deployment.go b/pkg/resources/fluentd/deployment.go
deleted file mode 100644
index fda22764d..000000000
--- a/pkg/resources/fluentd/deployment.go
+++ /dev/null
@@ -1,187 +0,0 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package fluentd
-
-import (
- loggingv1alpha1 "github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1"
- "github.com/banzaicloud/logging-operator/pkg/resources/templates"
- "github.com/banzaicloud/logging-operator/pkg/util"
- appsv1 "k8s.io/api/apps/v1"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
-)
-
-func (r *Reconciler) deployment() runtime.Object {
- deploymentName := "fluentd"
- if r.Fluentd.Labels["release"] != "" {
- deploymentName = r.Fluentd.Labels["release"] + "-fluentd"
- }
-
- return &appsv1.Deployment{
- ObjectMeta: templates.FluentdObjectMeta(deploymentName, util.MergeLabels(r.Fluentd.Labels, labelSelector), r.Fluentd),
- Spec: appsv1.DeploymentSpec{
- Replicas: util.IntPointer(1),
- Selector: &metav1.LabelSelector{
- MatchLabels: labelSelector,
- },
- Template: corev1.PodTemplateSpec{
- ObjectMeta: metav1.ObjectMeta{
- Labels: util.MergeLabels(r.Fluentd.Labels, labelSelector),
- Annotations: r.Fluentd.Spec.Annotations,
- },
- Spec: corev1.PodSpec{
- Volumes: generateVolume(r.Fluentd),
- InitContainers: []corev1.Container{
- {
- Name: "volume-mount-hack",
- Image: r.Fluentd.Spec.VolumeModImage.Repository + ":" + r.Fluentd.Spec.VolumeModImage.Tag,
- ImagePullPolicy: corev1.PullPolicy(r.Fluentd.Spec.VolumeModImage.PullPolicy),
- Command: []string{"sh", "-c", "chmod -R 777 /buffers"},
- VolumeMounts: []corev1.VolumeMount{
- {
- Name: "buffer",
- MountPath: "/buffers",
- },
- },
- },
- },
- Containers: []corev1.Container{
- {
- Name: "fluentd",
- Image: r.Fluentd.Spec.Image.Repository + ":" + r.Fluentd.Spec.Image.Tag,
- ImagePullPolicy: corev1.PullPolicy(r.Fluentd.Spec.Image.PullPolicy),
- Ports: []corev1.ContainerPort{
- {
- Name: "monitor",
- ContainerPort: r.Fluentd.Spec.GetPrometheusPortFromAnnotation(),
- Protocol: "TCP",
- },
- {
- Name: "fluent-input",
- ContainerPort: 24240,
- Protocol: "TCP",
- },
- },
-
- VolumeMounts: generateVolumeMounts(r.Fluentd),
- Resources: r.Fluentd.Spec.Resources,
- },
- *newConfigMapReloader(r.Fluentd.Spec.ConfigReloaderImage),
- },
- Tolerations: r.Fluentd.Spec.Tolerations,
- },
- },
- },
- }
-}
-
-func newConfigMapReloader(spec loggingv1alpha1.ImageSpec) *corev1.Container {
- return &corev1.Container{
- Name: "config-reloader",
- ImagePullPolicy: corev1.PullPolicy(spec.PullPolicy),
- Image: spec.Repository + ":" + spec.Tag,
- Args: []string{
- "-volume-dir=/fluentd/etc",
- "-volume-dir=/fluentd/app-config/",
- "-webhook-url=http://127.0.0.1:24444/api/config.reload",
- },
- VolumeMounts: []corev1.VolumeMount{
- {
- Name: "config",
- MountPath: "/fluentd/etc",
- },
- {
- Name: "app-config",
- MountPath: "/fluentd/app-config/",
- },
- },
- }
-}
-
-func generateVolumeMounts(fluentd *loggingv1alpha1.Fluentd) (v []corev1.VolumeMount) {
- v = []corev1.VolumeMount{
- {
- Name: "config",
- MountPath: "/fluentd/etc/",
- },
- {
- Name: "app-config",
- MountPath: "/fluentd/app-config/",
- },
- {
- Name: "buffer",
- MountPath: "/buffers",
- },
- }
- if fluentd.Spec.TLS.Enabled {
- tlsRelatedVolume := []corev1.VolumeMount{
- {
- Name: "fluentd-tls",
- MountPath: "/fluentd/tls/",
- },
- }
- v = append(v, tlsRelatedVolume...)
- }
- return
-}
-
-func generateVolume(fluentd *loggingv1alpha1.Fluentd) (v []corev1.Volume) {
- v = []corev1.Volume{
- {
- Name: "config",
- VolumeSource: corev1.VolumeSource{
- ConfigMap: &corev1.ConfigMapVolumeSource{
- LocalObjectReference: corev1.LocalObjectReference{
- Name: "fluentd-config",
- },
- },
- },
- },
- {
- Name: "app-config",
- VolumeSource: corev1.VolumeSource{
- ConfigMap: &corev1.ConfigMapVolumeSource{
- LocalObjectReference: corev1.LocalObjectReference{
- Name: "fluentd-app-config",
- },
- },
- },
- },
- {
- Name: "buffer",
- VolumeSource: corev1.VolumeSource{
- PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
- ClaimName: "fluentd-buffer",
- ReadOnly: false,
- },
- },
- },
- }
- if fluentd.Spec.TLS.Enabled {
- tlsRelatedVolume := corev1.Volume{
- Name: "fluentd-tls",
- VolumeSource: corev1.VolumeSource{
- Secret: &corev1.SecretVolumeSource{
- SecretName: fluentd.Spec.TLS.SecretName,
- },
- },
- }
- v = append(v, tlsRelatedVolume)
- }
- return
-}
diff --git a/pkg/resources/fluentd/fluentd.go b/pkg/resources/fluentd/fluentd.go
index f4ba4b5e3..f013399d5 100644
--- a/pkg/resources/fluentd/fluentd.go
+++ b/pkg/resources/fluentd/fluentd.go
@@ -1,38 +1,43 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
package fluentd
import (
- loggingv1alpha1 "github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1"
+ "context"
+ "time"
+
+ "emperror.dev/errors"
+ "github.com/banzaicloud/logging-operator/api/v1beta1"
"github.com/banzaicloud/logging-operator/pkg/k8sutil"
"github.com/banzaicloud/logging-operator/pkg/resources"
"github.com/go-logr/logr"
- "github.com/goph/emperror"
"sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
)
const (
- configMapName = "fluentd-config"
- persistentVolumeName = "fluentd-buffer"
- serviceName = "fluentd"
- appConfigMapName = "fluentd-app-config"
- serviceAccountName = "logging-fluentd"
- clusterRoleBindingName = "logging-fluentd"
- clusterRoleName = "logging-role-fluentd"
+ SecretConfigName = "fluentd"
+ AppSecretConfigName = "fluentd-app"
+ AppConfigKey = "fluentd.conf"
+ StatefulSetName = "fluentd"
+ ServiceName = "fluentd"
+
+ bufferVolumeName = "fluentd-buffer"
+ serviceAccountName = "fluentd"
+ roleBindingName = "fluentd"
+ roleName = "fluentd"
)
var labelSelector = map[string]string{
@@ -41,36 +46,87 @@ var labelSelector = map[string]string{
// Reconciler holds info what resource to reconcile
type Reconciler struct {
- resources.FluentdReconciler
+ Logging *v1beta1.Logging
+ *k8sutil.GenericResourceReconciler
+ config *string
}
-// New creates a new Fluentd reconciler
-func New(client client.Client, fluentd *loggingv1alpha1.Fluentd) *Reconciler {
+func New(client client.Client, log logr.Logger, logging *v1beta1.Logging, config *string) *Reconciler {
return &Reconciler{
- FluentdReconciler: resources.FluentdReconciler{
- Client: client,
- Fluentd: fluentd,
- },
+ Logging: logging,
+ GenericResourceReconciler: k8sutil.NewReconciler(client, log),
+ config: config,
}
}
// Reconcile reconciles the fluentd resource
-func (r *Reconciler) Reconcile(log logr.Logger) error {
+func (r *Reconciler) Reconcile() (*reconcile.Result, error) {
+ // Config check and cleanup if enabled
+ if !r.Logging.Spec.FlowConfigCheckDisabled {
+ hash, err := r.configHash()
+ if err != nil {
+ return nil, err
+ }
+ if result, ok := r.Logging.Status.ConfigCheckResults[hash]; ok {
+ // We already have an existing configcheck result:
+ // - bail out if it was unsuccessful
+ // - cleanup previous results if it's successful
+ if !result {
+ return nil, errors.Errorf("current config is invalid")
+ }
+ var removedHashes []string
+ if removedHashes, err = r.configCheckCleanup(hash); err != nil {
+ r.Log.Error(err, "failed to cleanup resources")
+ }
+ if len(removedHashes) > 0 {
+ for _, removedHash := range removedHashes {
+ delete(r.Logging.Status.ConfigCheckResults, removedHash)
+ }
+ if err := r.Client.Status().Update(context.TODO(), r.Logging); err != nil {
+ return nil, errors.WrapWithDetails(err, "failed to update status", "logging", r.Logging)
+ } else {
+ // explicitly ask for a requeue to short circuit the controller loop after the status update
+ return &reconcile.Result{Requeue: true}, nil
+ }
+ }
+ } else {
+ // We don't have an existing result
+ // - let's create what's necessary to have one
+ // - if the result is ready write it into the status
+ result, err := r.configCheck()
+ if err != nil {
+ return nil, errors.WrapIf(err, "failed to validate config")
+ }
+ if result.Ready {
+ r.Logging.Status.ConfigCheckResults[hash] = result.Valid
+ if err := r.Client.Status().Update(context.TODO(), r.Logging); err != nil {
+ return nil, errors.WrapWithDetails(err, "failed to update status", "logging", r.Logging)
+ } else {
+ // explicitly ask for a requeue to short circuit the controller loop after the status update
+ return &reconcile.Result{Requeue: true}, nil
+ }
+ } else {
+ r.Log.Info("still waiting for the configcheck result...")
+ return &reconcile.Result{RequeueAfter: time.Second}, nil
+ }
+ }
+ }
+
for _, res := range []resources.Resource{
r.serviceAccount,
r.clusterRole,
r.clusterRoleBinding,
- r.configMap,
+ r.secretConfig,
r.appconfigMap,
- r.pvc,
- r.deployment,
+ r.statefulset,
r.service,
} {
o := res()
- err := k8sutil.Reconcile(log, r.Client, o)
+ err := r.ReconcileResource(o)
if err != nil {
- return emperror.WrapWith(err, "failed to reconcile resource", "resource", o.GetObjectKind().GroupVersionKind())
+ return nil, errors.WrapIf(err, "failed to reconcile resource")
}
}
- return nil
+
+ return nil, nil
}
diff --git a/pkg/resources/fluentd/pvc.go b/pkg/resources/fluentd/pvc.go
deleted file mode 100644
index 86ac7c233..000000000
--- a/pkg/resources/fluentd/pvc.go
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package fluentd
-
-import (
- "github.com/banzaicloud/logging-operator/pkg/resources/templates"
- "github.com/banzaicloud/logging-operator/pkg/util"
- corev1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/runtime"
-)
-
-func (r *Reconciler) pvc() runtime.Object {
- return &corev1.PersistentVolumeClaim{
- ObjectMeta: templates.FluentdObjectMeta(persistentVolumeName, util.MergeLabels(r.Fluentd.Labels, labelSelector), r.Fluentd),
- Spec: r.Fluentd.Spec.FluentdPvcSpec,
- }
-}
diff --git a/pkg/resources/fluentd/rbac.go b/pkg/resources/fluentd/rbac.go
index f2acea28d..8cde06357 100644
--- a/pkg/resources/fluentd/rbac.go
+++ b/pkg/resources/fluentd/rbac.go
@@ -1,18 +1,16 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
package fluentd
@@ -24,8 +22,8 @@ import (
)
func (r *Reconciler) clusterRole() runtime.Object {
- return &rbacv1.ClusterRole{
- ObjectMeta: templates.FluentdObjectMetaClusterScope(clusterRoleName, r.Fluentd.Labels, r.Fluentd),
+ return &rbacv1.Role{
+ ObjectMeta: templates.FluentdObjectMeta(r.Logging.QualifiedName(roleName), r.Logging.Labels, r.Logging),
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{""},
@@ -37,18 +35,18 @@ func (r *Reconciler) clusterRole() runtime.Object {
}
func (r *Reconciler) clusterRoleBinding() runtime.Object {
- return &rbacv1.ClusterRoleBinding{
- ObjectMeta: templates.FluentdObjectMetaClusterScope(clusterRoleBindingName, r.Fluentd.Labels, r.Fluentd),
+ return &rbacv1.RoleBinding{
+ ObjectMeta: templates.FluentdObjectMeta(r.Logging.QualifiedName(roleBindingName), r.Logging.Labels, r.Logging),
RoleRef: rbacv1.RoleRef{
- Kind: "ClusterRole",
+ Kind: "Role",
APIGroup: "rbac.authorization.k8s.io",
- Name: clusterRoleName,
+ Name: r.Logging.QualifiedName(roleName),
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
- Name: serviceAccountName,
- Namespace: r.Fluentd.Namespace,
+ Name: r.Logging.QualifiedName(serviceAccountName),
+ Namespace: r.Logging.Spec.ControlNamespace,
},
},
}
@@ -56,6 +54,6 @@ func (r *Reconciler) clusterRoleBinding() runtime.Object {
func (r *Reconciler) serviceAccount() runtime.Object {
return &corev1.ServiceAccount{
- ObjectMeta: templates.FluentdObjectMeta(serviceAccountName, r.Fluentd.Labels, r.Fluentd),
+ ObjectMeta: templates.FluentdObjectMeta(r.Logging.QualifiedName(serviceAccountName), r.Logging.Labels, r.Logging),
}
}
diff --git a/pkg/resources/fluentd/service.go b/pkg/resources/fluentd/service.go
index 44a0b5d7f..6a6522c24 100644
--- a/pkg/resources/fluentd/service.go
+++ b/pkg/resources/fluentd/service.go
@@ -1,18 +1,16 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
package fluentd
@@ -26,7 +24,8 @@ import (
func (r *Reconciler) service() runtime.Object {
return &corev1.Service{
- ObjectMeta: templates.FluentdObjectMeta(serviceName, util.MergeLabels(r.Fluentd.Labels, labelSelector), r.Fluentd),
+ ObjectMeta: templates.FluentdObjectMeta(
+ r.Logging.QualifiedName(ServiceName), util.MergeLabels(r.Logging.Labels, labelSelector), r.Logging),
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
{
@@ -36,7 +35,7 @@ func (r *Reconciler) service() runtime.Object {
},
},
Selector: labelSelector,
- Type: r.Fluentd.Spec.GetServiceType(),
+ Type: corev1.ServiceTypeClusterIP,
},
}
}
diff --git a/pkg/resources/fluentd/statefulset.go b/pkg/resources/fluentd/statefulset.go
new file mode 100644
index 000000000..b8676647e
--- /dev/null
+++ b/pkg/resources/fluentd/statefulset.go
@@ -0,0 +1,220 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fluentd
+
+import (
+ "github.com/banzaicloud/logging-operator/api/v1beta1"
+ "github.com/banzaicloud/logging-operator/pkg/resources/templates"
+ "github.com/banzaicloud/logging-operator/pkg/util"
+ appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+func (r *Reconciler) statefulset() runtime.Object {
+ spec := *r.statefulsetSpec()
+ if !r.Logging.Spec.FluentdSpec.DisablePvc {
+ spec.VolumeClaimTemplates = []corev1.PersistentVolumeClaim{
+ {
+ ObjectMeta: templates.FluentdObjectMeta(
+ r.Logging.QualifiedName(bufferVolumeName), util.MergeLabels(r.Logging.Labels, labelSelector), r.Logging),
+ Spec: r.Logging.Spec.FluentdSpec.FluentdPvcSpec,
+ },
+ }
+ }
+ return &appsv1.StatefulSet{
+ ObjectMeta: templates.FluentdObjectMeta(
+ r.Logging.QualifiedName(StatefulSetName), util.MergeLabels(r.Logging.Labels, labelSelector), r.Logging),
+ Spec: spec,
+ }
+}
+
+func (r *Reconciler) statefulsetSpec() *appsv1.StatefulSetSpec {
+ return &appsv1.StatefulSetSpec{
+ Replicas: util.IntPointer(1),
+ Selector: &metav1.LabelSelector{
+ MatchLabels: labelSelector,
+ },
+ Template: corev1.PodTemplateSpec{
+ ObjectMeta: r.generatePodMeta(),
+ Spec: corev1.PodSpec{
+ Volumes: r.generateVolume(),
+ InitContainers: []corev1.Container{
+ {
+ Name: "volume-mount-hack",
+ Image: r.Logging.Spec.FluentdSpec.VolumeModImage.Repository + ":" + r.Logging.Spec.FluentdSpec.VolumeModImage.Tag,
+ ImagePullPolicy: corev1.PullPolicy(r.Logging.Spec.FluentdSpec.VolumeModImage.PullPolicy),
+ Command: []string{"sh", "-c", "chmod -R 777 /buffers"},
+ VolumeMounts: []corev1.VolumeMount{
+ {
+ Name: r.Logging.QualifiedName(bufferVolumeName),
+ MountPath: "/buffers",
+ },
+ },
+ },
+ },
+ Containers: []corev1.Container{
+ *r.fluentContainer(),
+ *newConfigMapReloader(r.Logging.Spec.FluentdSpec.ConfigReloaderImage),
+ },
+ NodeSelector: r.Logging.Spec.FluentdSpec.NodeSelector,
+ Tolerations: r.Logging.Spec.FluentdSpec.Tolerations,
+ },
+ },
+ }
+}
+
+func (r *Reconciler) fluentContainer() *corev1.Container {
+ return &corev1.Container{
+ Name: "fluentd",
+ Image: r.Logging.Spec.FluentdSpec.Image.Repository + ":" + r.Logging.Spec.FluentdSpec.Image.Tag,
+ ImagePullPolicy: corev1.PullPolicy(r.Logging.Spec.FluentdSpec.Image.PullPolicy),
+ Ports: generatePorts(r.Logging.Spec.FluentdSpec),
+ VolumeMounts: r.generateVolumeMounts(),
+ Resources: r.Logging.Spec.FluentdSpec.Resources,
+ }
+}
+
+func (r *Reconciler) generatePodMeta() metav1.ObjectMeta {
+ meta := metav1.ObjectMeta{
+ Labels: util.MergeLabels(r.Logging.Labels, labelSelector),
+ }
+ if r.Logging.Spec.FluentdSpec.Annotations != nil {
+ meta.Annotations = r.Logging.Spec.FluentdSpec.Annotations
+ }
+ return meta
+}
+
+func newConfigMapReloader(spec v1beta1.ImageSpec) *corev1.Container {
+ return &corev1.Container{
+ Name: "config-reloader",
+ ImagePullPolicy: corev1.PullPolicy(spec.PullPolicy),
+ Image: spec.Repository + ":" + spec.Tag,
+ Args: []string{
+ "-volume-dir=/fluentd/etc",
+ "-volume-dir=/fluentd/app-config/",
+ "-webhook-url=http://127.0.0.1:24444/api/config.reload",
+ },
+ VolumeMounts: []corev1.VolumeMount{
+ {
+ Name: "config",
+ MountPath: "/fluentd/etc",
+ },
+ {
+ Name: "app-config",
+ MountPath: "/fluentd/app-config/",
+ },
+ },
+ }
+}
+
+func generatePorts(spec *v1beta1.FluentdSpec) []corev1.ContainerPort {
+ ports := []corev1.ContainerPort{
+ {
+ Name: "fluent-input",
+ ContainerPort: spec.Port,
+ Protocol: "TCP",
+ },
+ }
+ if spec.GetPrometheusPortFromAnnotation() != 0 {
+ ports = append(ports, corev1.ContainerPort{
+ Name: "monitor",
+ ContainerPort: spec.GetPrometheusPortFromAnnotation(),
+ Protocol: "TCP",
+ })
+ }
+ return ports
+}
+
+func (r *Reconciler) generateVolumeMounts() (v []corev1.VolumeMount) {
+ v = []corev1.VolumeMount{
+ {
+ Name: "config",
+ MountPath: "/fluentd/etc/",
+ },
+ {
+ Name: "app-config",
+ MountPath: "/fluentd/app-config/",
+ },
+ {
+ Name: r.Logging.QualifiedName(bufferVolumeName),
+ MountPath: "/buffers",
+ },
+ }
+ if r.Logging.Spec.FluentdSpec.TLS.Enabled {
+ tlsRelatedVolume := []corev1.VolumeMount{
+ {
+ Name: "fluentd-tls",
+ MountPath: "/fluentd/tls/",
+ },
+ }
+ v = append(v, tlsRelatedVolume...)
+ }
+ return
+}
+
+func (r *Reconciler) generateVolume() (v []corev1.Volume) {
+ v = []corev1.Volume{
+ {
+ Name: "config",
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: r.Logging.QualifiedName(SecretConfigName),
+ },
+ },
+ },
+ {
+ Name: "app-config",
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: r.Logging.QualifiedName(AppSecretConfigName),
+ },
+ },
+ },
+ }
+ if !r.Logging.Spec.FluentdSpec.DisablePvc {
+ bufferVolume := corev1.Volume{
+ Name: r.Logging.QualifiedName(bufferVolumeName),
+ VolumeSource: corev1.VolumeSource{
+ PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
+ ClaimName: r.Logging.QualifiedName(bufferVolumeName),
+ ReadOnly: false,
+ },
+ },
+ }
+ v = append(v, bufferVolume)
+ } else {
+ bufferVolume := corev1.Volume{
+ Name: r.Logging.QualifiedName(bufferVolumeName),
+ VolumeSource: corev1.VolumeSource{
+ EmptyDir: &corev1.EmptyDirVolumeSource{},
+ },
+ }
+ v = append(v, bufferVolume)
+ }
+ if r.Logging.Spec.FluentdSpec.TLS.Enabled {
+ tlsRelatedVolume := corev1.Volume{
+ Name: "fluentd-tls",
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: r.Logging.Spec.FluentdSpec.TLS.SecretName,
+ },
+ },
+ }
+ v = append(v, tlsRelatedVolume)
+ }
+ return
+}
diff --git a/pkg/resources/model/system.go b/pkg/resources/model/system.go
new file mode 100644
index 000000000..978ef3f45
--- /dev/null
+++ b/pkg/resources/model/system.go
@@ -0,0 +1,165 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "emperror.dev/errors"
+ "github.com/banzaicloud/logging-operator/api/v1beta1"
+ "github.com/banzaicloud/logging-operator/pkg/model/common"
+ "github.com/banzaicloud/logging-operator/pkg/model/input"
+ "github.com/banzaicloud/logging-operator/pkg/model/secret"
+ "github.com/banzaicloud/logging-operator/pkg/model/types"
+ "github.com/banzaicloud/logging-operator/pkg/plugins"
+ "github.com/go-logr/logr"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+type LoggingResources struct {
+ client client.Reader
+ logger logr.Logger
+ logging *v1beta1.Logging
+ Outputs []v1beta1.Output
+ Flows []v1beta1.Flow
+ ClusterOutputs []v1beta1.ClusterOutput
+ ClusterFlows []v1beta1.ClusterFlow
+}
+
+func NewLoggingResources(logging *v1beta1.Logging, client client.Reader, logger logr.Logger) *LoggingResources {
+ return &LoggingResources{
+ client: client,
+ logger: logger,
+ logging: logging,
+ Outputs: make([]v1beta1.Output, 0),
+ ClusterOutputs: make([]v1beta1.ClusterOutput, 0),
+ Flows: make([]v1beta1.Flow, 0),
+ ClusterFlows: make([]v1beta1.ClusterFlow, 0),
+ }
+}
+
+func (l *LoggingResources) CreateModel() (*types.Builder, error) {
+ forwardInput := input.NewForwardInputConfig()
+ if l.logging.Spec.FluentdSpec != nil && l.logging.Spec.FluentdSpec.TLS.Enabled {
+ forwardInput.Transport = &common.Transport{
+ Version: "TLSv1_2",
+ CaPath: "/fluentd/tls/ca.crt",
+ CertPath: "/fluentd/tls/tls.crt",
+ PrivateKeyPath: "/fluentd/tls/tls.key",
+ ClientCertAuth: true,
+ }
+ forwardInput.Security = &common.Security{
+ SelfHostname: "fluentd",
+ SharedKey: l.logging.Spec.FluentdSpec.TLS.SharedKey,
+ }
+ }
+ rootInput, err := forwardInput.ToDirective(secret.NewSecretLoader(l.client, l.logging.Spec.ControlNamespace))
+ if err != nil {
+ return nil, errors.WrapIf(err, "failed to create root input")
+ }
+ system := types.NewSystem(rootInput, types.NewRouter())
+ for _, flow := range l.Flows {
+ flow, err := l.CreateFlowFromCustomResource(flow, flow.Namespace)
+ if err != nil {
+ // TODO set flow status to error?
+ return nil, err
+ }
+ err = system.RegisterFlow(flow)
+ if err != nil {
+ return nil, err
+ }
+ }
+ for _, flowCr := range l.ClusterFlows {
+ flow, err := l.CreateFlowFromCustomResource(v1beta1.Flow{
+ TypeMeta: flowCr.TypeMeta,
+ ObjectMeta: flowCr.ObjectMeta,
+ Spec: flowCr.Spec,
+ Status: flowCr.Status,
+ }, "")
+ if err != nil {
+ // TODO set flow status to error?
+ return nil, err
+ }
+ err = system.RegisterFlow(flow)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if len(l.Flows) == 0 && len(l.ClusterFlows) == 0 {
+ l.logger.Info("no flows found, generating empty model")
+ }
+ return system, nil
+}
+
+func (l *LoggingResources) CreateFlowFromCustomResource(flowCr v1beta1.Flow, namespace string) (*types.Flow, error) {
+ flow, err := types.NewFlow(namespace, flowCr.Spec.Selectors)
+ if err != nil {
+ return nil, err
+ }
+ outputs := []types.Output{}
+ var multierr error
+FindOutputForAllRefs:
+ for _, outputRef := range flowCr.Spec.OutputRefs {
+ // only namespaced flows should use namespaced outputs
+ if namespace != "" {
+ for _, output := range l.Outputs {
+ // only an output from the same namespace can be used with a matching name
+ if output.Namespace == namespace && outputRef == output.Name {
+ plugin, err := plugins.CreateOutput(output.Spec, secret.NewSecretLoader(l.client, output.Namespace))
+ if err != nil {
+ multierr = errors.Combine(multierr, errors.WrapIff(err, "failed to create configured output %s", outputRef))
+ continue FindOutputForAllRefs
+ }
+ outputs = append(outputs, plugin)
+ continue FindOutputForAllRefs
+ }
+ }
+ }
+ for _, clusterOutput := range l.ClusterOutputs {
+ if outputRef == clusterOutput.Name {
+ plugin, err := plugins.CreateOutput(clusterOutput.Spec.OutputSpec, secret.NewSecretLoader(l.client, clusterOutput.Namespace))
+ if err != nil {
+ multierr = errors.Combine(multierr, errors.WrapIff(err, "failed to create configured output %s", outputRef))
+ continue FindOutputForAllRefs
+ }
+ outputs = append(outputs, plugin)
+ continue FindOutputForAllRefs
+ }
+ }
+ multierr = errors.Combine(multierr, errors.Errorf("referenced output not found: %s", outputRef))
+ }
+ flow.WithOutputs(outputs...)
+
+ // Filter
+ var filters []types.Filter
+ for i, f := range flowCr.Spec.Filters {
+ filter, err := plugins.CreateFilter(f, secret.NewSecretLoader(l.client, flowCr.Namespace))
+ if err != nil {
+ multierr = errors.Combine(multierr, errors.Errorf("failed to create filter with index %d for flow %s", i, flowCr.Name))
+ continue
+ }
+ filters = append(filters, filter)
+ }
+ flow.WithFilters(filters...)
+
+ return flow, multierr
+}
+
+func isEnabled(namespace string, output v1beta1.ClusterOutputSpec) bool {
+ for _, enabledNs := range output.EnabledNamespaces {
+ if enabledNs == namespace {
+ return true
+ }
+ }
+ return false
+}
diff --git a/pkg/resources/plugins/alibaba.go b/pkg/resources/plugins/alibaba.go
deleted file mode 100644
index 0b198087a..000000000
--- a/pkg/resources/plugins/alibaba.go
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package plugins
-
-// AlibabaOutput CRD name
-const AlibabaOutput = "alibaba"
-
-// AlibabaDefaultValues for Alibaba OSS output plugin
-var AlibabaDefaultValues = map[string]string{
- "buffer_chunk_limit": "1m",
- "buffer_path": "/buffers/ali",
- "time_slice_format": "%Y%m%d",
- "time_slice_wait": "10m",
- "oss_object_key_format": "%{time_slice}/%{host}-%{uuid}.%{file_ext}",
-}
-
-// AlibabaTemplate for Alibaba OSS output plugin
-const AlibabaTemplate = `
-
- @type oss
- oss_key_id {{ .aliKeyId }}
- oss_key_secret {{ .aliKeySecret }}
- oss_bucket {{ .bucket }}
- oss_endpoint {{ .aliBucketEndpoint }}
- oss_object_key_format {{ .oss_object_key_format }}
-
- buffer_path {{ .buffer_path }}
- buffer_chunk_limit {{ .buffer_chunk_limit }}
- time_slice_format {{ .time_slice_format }}
- time_slice_wait {{ .time_slice_wait }}
- `
diff --git a/pkg/resources/plugins/azure.go b/pkg/resources/plugins/azure.go
deleted file mode 100644
index 5ec13ff42..000000000
--- a/pkg/resources/plugins/azure.go
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package plugins
-
-// AzureOutput CRD name
-const AzureOutput = "azure"
-
-// AzureDefaultValues for Azure ObjectStore output plugin
-var AzureDefaultValues = map[string]string{
- "bufferTimeKey": "3600",
- "bufferTimeWait": "10m",
- "bufferPath": "/buffers/azure",
- "format": "json",
- "timekey": "1h",
- "timekey_wait": "10m",
- "timekey_use_utc": "true",
- "time_slice_format": "%Y%m%d-%H",
- "azure_object_key_format": "%{path}%{time_slice}_%{index}.%{file_extension}",
- "path": "logs/${tag}/%Y/%m/%d/",
-}
-
-// AzureTemplate for Azure ObjectStore output plugin
-const AzureTemplate = `
-
- @type azurestorage
-
- azure_storage_account {{ .storageAccountName }}
- azure_storage_access_key {{ .storageAccountKey }}
- azure_container {{ .bucket }}
- azure_storage_type blob
- store_as gzip
- auto_create_container true
- azure_object_key_format {{ .azure_object_key_format }}
- path {{ .path }}
- time_slice_format {{ .time_slice_format }}
- # if you want to use ${tag} or %Y/%m/%d/ like syntax in path / object_key_format,
- # need to specify tag for ${tag} and time for %Y/%m/%d in argument.
-
- @type file
- path {{ .bufferPath }}
- timekey {{ .timekey }}
- timekey_wait {{ .timekey_wait }}
- timekey_use_utc {{ .timekey_use_utc }}
-
-
-
- @type {{ .format }}
-
- `
diff --git a/pkg/resources/plugins/configmap.go b/pkg/resources/plugins/configmap.go
deleted file mode 100644
index aa804d66c..000000000
--- a/pkg/resources/plugins/configmap.go
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package plugins
-
-import (
- "bytes"
- "context"
- logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
- "text/template"
-
- "github.com/Masterminds/sprig"
- loggingv1alpha1 "github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1"
- "github.com/banzaicloud/logging-operator/pkg/resources/templates"
- "github.com/banzaicloud/logging-operator/pkg/util"
- "github.com/sirupsen/logrus"
- corev1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-var log = logf.Log.WithName("plugins.configmap")
-
-func generateFluentdConfig(plugin *loggingv1alpha1.Plugin, client client.Client) (string, string) {
- var finalConfig string
- // Generate filters
- for _, filter := range plugin.Spec.Filter {
- values, err := GetDefaultValues(filter.Type)
- if err != nil {
- logrus.Infof("Error in rendering template: %s", err)
- return "", ""
- }
- values["pattern"] = plugin.Spec.Input.Label["app"]
- config, err := renderPlugin(filter, values, plugin.Namespace, client)
- if err != nil {
- logrus.Infof("Error in rendering template: %s", err)
- return "", ""
- }
- finalConfig += config
- }
-
- // Generate output
- for _, output := range plugin.Spec.Output {
- values, err := GetDefaultValues(output.Type)
- if err != nil {
- logrus.Infof("Error in rendering template: %s", err)
- return "", ""
- }
- values["pattern"] = plugin.Spec.Input.Label["app"]
- config, err := renderPlugin(output, values, plugin.Namespace, client)
- if err != nil {
- logrus.Infof("Error in rendering template: %s", err)
- return "", ""
- }
- finalConfig += config
- }
- return plugin.Name, finalConfig
-
-}
-
-// RenderPlugin general FPlugin renderer
-func renderPlugin(plugin loggingv1alpha1.FPlugin, baseMap map[string]string, namespace string, client client.Client) (string, error) {
- rawTemplate, err := GetTemplate(plugin.Type)
- if err != nil {
- return "", err
- }
- for _, param := range plugin.Parameters {
- k, v := param.GetValue(namespace, client)
- baseMap[k] = v
- }
-
- t := template.New("PluginTemplate").Funcs(sprig.TxtFuncMap())
- t, err = t.Parse(rawTemplate)
- if err != nil {
- return "", err
- }
- tpl := new(bytes.Buffer)
- err = t.Execute(tpl, baseMap)
- if err != nil {
- return "", err
- }
- return tpl.String(), nil
-}
-
-func (r *Reconciler) appConfigMap() runtime.Object {
- appConfigData := map[string]string{}
- labels := map[string]string{}
- for _, plugin := range r.PluginList.Items {
- labels = util.MergeLabels(labels, plugin.Labels)
- name, data := generateFluentdConfig(&plugin, r.Client)
- if name != "" {
- name = name + ".conf"
- }
- appConfigData[name] = data
- }
- pluginConfigMapNamespace := r.Namespace
- cmLog := log.WithValues("pluginConfigMapNamespace", pluginConfigMapNamespace)
- fluentdList := loggingv1alpha1.FluentdList{}
- err := r.Client.List(context.TODO(), client.MatchingLabels(map[string]string{}), &fluentdList)
- if err != nil {
- cmLog.Error(err, "Reconciler query failed.")
- }
-
- if len(fluentdList.Items) > 0 {
- cmLog = log.WithValues("pluginConfigMapNamespace", pluginConfigMapNamespace, "FluentdNamespace", fluentdList.Items[0].Namespace)
- cmLog.Info("Check Fluentd Namespace")
- if pluginConfigMapNamespace != fluentdList.Items[0].Namespace {
- pluginConfigMapNamespace = fluentdList.Items[0].Namespace
- cmLog = log.WithValues("pluginConfigMapNamespace", pluginConfigMapNamespace)
- cmLog.Info("Plugin ConfigMap Namespace Updated")
-
- }
- } else {
- log.Info("The is no Fluentd resource available")
- }
- return &corev1.ConfigMap{
- ObjectMeta: templates.PluginsObjectMeta(appConfigMapName, util.MergeLabels(map[string]string{}, labelSelector), pluginConfigMapNamespace),
- Data: appConfigData,
- }
-}
diff --git a/pkg/resources/plugins/elasticsearch.go b/pkg/resources/plugins/elasticsearch.go
deleted file mode 100644
index 21d103169..000000000
--- a/pkg/resources/plugins/elasticsearch.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package plugins
-
-// ElasticsearchOutput CRD name
-const ElasticsearchOutput = "elasticsearch"
-
-// ElasticsearchDefaultValues for Elasticsearch output plugin
-var ElasticsearchDefaultValues = map[string]string{
- "bufferPath": "/buffers/elasticsearch",
- "logLevel": "info",
- "logstashFormat": "true",
- "logstashPrefix": "logstash",
- "scheme": "scheme",
- "sslVerify": "true",
- "sslVersion": "TLSv1_2",
- "chunkLimit": "2M",
- "queueLimit": "8",
- "timekey": "1h",
- "timekey_wait": "10m",
- "timekey_use_utc": "true",
- "retry_max_interval": "30",
- "flush_interval": "5s",
- "flush_thread_count": "2",
- "retry_forever": "true",
- "user": "",
- "password": "",
- "log_es_400_reason": "false",
-}
-
-// ElasticsearchTemplate for Elasticsearch output plugin
-const ElasticsearchTemplate = `
-
- @type elasticsearch
- @log_level {{ .logLevel }}
- include_tag_key true
- type_name fluentd
- host {{ .host }}
- port {{ .port }}
- scheme {{ .scheme }}
- {{- if .sslVerify }}
- ssl_verify {{ .sslVerify }}
- {{- end}}
- {{- if .sslVersion }}
- ssl_version {{ .sslVersion }}
- {{- end}}
- logstash_format {{ .logstashFormat }}
- logstash_prefix {{ .logstashPrefix }}
- reconnect_on_error true
- {{- if .user }}
- user {{ .user }}
- {{- end}}
- {{- if .password }}
- password {{ .password }}
- {{- end}}
- log_es_400_reason {{ .log_es_400_reason }}
-
- @type file
- path {{ .bufferPath }}
- timekey {{ .timekey }}
- timekey_wait {{ .timekey_wait }}
- timekey_use_utc {{ .timekey_use_utc }}
- flush_mode interval
- retry_type exponential_backoff
- flush_thread_count {{ .flush_thread_count }}
- flush_interval {{ .flush_interval }}
- retry_forever {{ .retry_forever }}
- retry_max_interval {{ .retry_max_interval }}
- chunk_limit_size {{ .chunkLimit }}
- queue_limit_length {{ .queueLimit }}
- overflow_action block
-
- `
diff --git a/pkg/resources/plugins/forward.go b/pkg/resources/plugins/forward.go
deleted file mode 100644
index fa40477af..000000000
--- a/pkg/resources/plugins/forward.go
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package plugins
-
-// ForwardOutput plugin name
-const ForwardOutput = "forward"
-
-// ForwardOutputDefaultValues default values for the plugin
-var ForwardOutputDefaultValues = map[string]string{
- "name": "target",
- "bufferPath": "/buffers/forward",
- "chunkLimit": "2M",
- "queueLimit": "8",
- "timekey": "1h",
- "timekey_wait": "10m",
- "timekey_use_utc": "true",
- "retry_max_interval": "30",
- "flush_interval": "5s",
- "flush_thread_count": "2",
- "retry_forever": "true",
- "tlsSharedKey": "", // enables tls and must match with the shared key on the remote side
- "tlsCACertFile": "/fluentd/tls/caCert",
- "tlsCertFile": "/fluentd/tls/clientCert",
- "tlsKeyFile": "/fluentd/tls/clientKey",
- "clientHostname": "fluentd.client", // this must be different from the hostname on the remote side
-}
-
-// ForwardOutputTemplate for the ForwardOutput plugin
-const ForwardOutputTemplate = `
-
- @type forward
-
- {{ if not (eq .tlsSharedKey "") -}}
- transport tls
- tls_version TLSv1_2
- tls_cert_path {{ .tlsCACertFile }}
- tls_client_cert_path {{ .tlsCertFile }}
- tls_client_private_key_path {{ .tlsKeyFile }}
-
- self_hostname {{ .clientHostname }}
- shared_key {{ .tlsSharedKey }}
-
- {{ end -}}
-
-
- name {{ .name }}
- host {{ .host }}
- port {{ .port }}
-
-
-
- @type file
- path {{ .bufferPath }}
- timekey {{ .timekey }}
- timekey_wait {{ .timekey_wait }}
- timekey_use_utc {{ .timekey_use_utc }}
- flush_mode interval
- retry_type exponential_backoff
- flush_thread_count {{ .flush_thread_count }}
- flush_interval {{ .flush_interval }}
- retry_forever {{ .retry_forever }}
- retry_max_interval {{ .retry_max_interval }}
- chunk_limit_size {{ .chunkLimit }}
- queue_limit_length {{ .queueLimit }}
- overflow_action block
-
- `
diff --git a/pkg/resources/plugins/gcs.go b/pkg/resources/plugins/gcs.go
deleted file mode 100644
index 2114dd52b..000000000
--- a/pkg/resources/plugins/gcs.go
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package plugins
-
-// GCSOutput CRD name
-const GCSOutput = "gcs"
-
-// GCSDefaultValues for Google Cloud Storage output plugin
-var GCSDefaultValues = map[string]string{
- "bufferTimeKey": "3600",
- "bufferTimeWait": "10m",
- "bufferPath": "/buffers/gcs",
- "object_key_format": "%{path}%{time_slice}_%{index}.%{file_extension}",
- "path": "logs/${tag}/%Y/%m/%d/",
- "timekey": "1h",
- "timekey_wait": "10m",
- "timekey_use_utc": "true",
- "format": "json",
-}
-
-// GCSTemplate for Google Cloud Storage output plugin
-const GCSTemplate = `
-
- @type gcs
-
- project {{ .project }}
- credentialsJson { "private_key": {{ toJson .private_key }}, "client_email": "{{ .client_email }}" }
- bucket {{ .bucket }}
- object_key_format {{ .object_key_format }}
- path {{ .path }}
-
- # if you want to use ${tag} or %Y/%m/%d/ like syntax in path / object_key_format,
- # need to specify tag for ${tag} and time for %Y/%m/%d in argument.
-
- @type file
- path {{ .bufferPath }}
- timekey {{ .timekey }}
- timekey_wait {{ .timekey_wait }}
- timekey_use_utc {{ .timekey_use_utc }}
-
-
-
- @type {{ .format }}
-
- `
diff --git a/pkg/resources/plugins/init.go b/pkg/resources/plugins/init.go
deleted file mode 100644
index ed7950238..000000000
--- a/pkg/resources/plugins/init.go
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package plugins
-
-import (
- "fmt"
-
- "github.com/sirupsen/logrus"
-)
-
-// Plugin register map
-var pluginRegister = map[string]Plugin{}
-
-// Plugin struct to store plugin informations
-type Plugin struct {
- Template string
- DefaultValues map[string]string
-}
-
-// GetAll returns the plugin registry
-func GetAll() map[string]Plugin {
- return pluginRegister
-}
-
-// RegisterPlugin to use in CRD file
-func RegisterPlugin(name string, template string, values map[string]string) {
- logrus.Infof("Registering plugin: %s", name)
- pluginRegister[name] = Plugin{Template: template, DefaultValues: values}
-}
-
-// GetDefaultValues get default values by name
-func GetDefaultValues(name string) (map[string]string, error) {
- var err error
- value, ok := pluginRegister[name]
- if !ok {
- err = fmt.Errorf("plugin %q not found", name)
- }
- newMap := make(map[string]string)
- for k, v := range value.DefaultValues {
- newMap[k] = v
- }
- return newMap, err
-}
-
-// GetTemplate get template string by name
-func GetTemplate(name string) (string, error) {
- var err error
- value, ok := pluginRegister[name]
- if !ok {
- err = fmt.Errorf("plugin %q not found", name)
- }
- return value.Template, err
-}
-
-// Register plugins
-func init() {
- RegisterPlugin(S3Output, S3Template, S3DefaultValues)
- RegisterPlugin(GCSOutput, GCSTemplate, GCSDefaultValues)
- RegisterPlugin(AzureOutput, AzureTemplate, AzureDefaultValues)
- RegisterPlugin(AlibabaOutput, AlibabaTemplate, AlibabaDefaultValues)
- RegisterPlugin(ParserFilter, ParserFilterTemplate, ParserFilterDefaultValues)
- RegisterPlugin(ElasticsearchOutput, ElasticsearchTemplate, ElasticsearchDefaultValues)
- RegisterPlugin(LokiOutput, LokiTemplate, LokiDefaultValues)
- RegisterPlugin(ForwardOutput, ForwardOutputTemplate, ForwardOutputDefaultValues)
- RegisterPlugin(StdOutput, StdOutputTemplate, make(map[string]string))
-}
diff --git a/pkg/resources/plugins/loki.go b/pkg/resources/plugins/loki.go
deleted file mode 100644
index 02186af5b..000000000
--- a/pkg/resources/plugins/loki.go
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package plugins
-
-// LokiOutput CRD name
-const LokiOutput = "loki"
-
-// LokiDefaultValues for Loki output plugin
-var LokiDefaultValues = map[string]string{
- "url": "",
- "username": "",
- "password": "",
- "extraLabels": "",
- "flushInterval": "10s",
- "chunkLimitSize": "1m",
- "flushAtShutdown": "true",
-}
-
-// LokiTemplate for Loki output plugin
-const LokiTemplate = `
-
- @type kubernetes_loki
- url {{ .url }}
- username {{ .username }}
- password {{ .password }}
- extra_labels {{ .extraLabels }}
-
- flush_interval {{ .flushInterval }}
- chunk_limit_size {{ .chunkLimitSize }}
- flush_at_shutdown {{ .flushAtShutdown }}
-
- `
diff --git a/pkg/resources/plugins/parser.go b/pkg/resources/plugins/parser.go
deleted file mode 100644
index 35c7a0f63..000000000
--- a/pkg/resources/plugins/parser.go
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package plugins
-
-// ParserFilter CRD name
-const ParserFilter = "parser"
-
-// ParserFilterDefaultValues for parser plugin
-var ParserFilterDefaultValues = map[string]string{
- "keyName": "log",
- "reserveData": "true",
- "removeKeyNameField": "true",
- "replaceInvalidSequence": "false",
-}
-
-// ParserFilterTemplate for parser plugin
-const ParserFilterTemplate = `
-
- @type parser
- format {{ .format }}
- time_format {{ .timeFormat }}
- key_name {{ .keyName }}
- reserve_data {{ .reserveData }}
- remove_key_name_field {{ .removeKeyNameField }}
- replace_invalid_sequence {{ .replaceInvalidSequence }}
-
-`
diff --git a/pkg/resources/plugins/plugins.go b/pkg/resources/plugins/plugins.go
deleted file mode 100644
index 12fbd1474..000000000
--- a/pkg/resources/plugins/plugins.go
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package plugins
-
-import (
- loggingv1alpha1 "github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1"
- "github.com/banzaicloud/logging-operator/pkg/k8sutil"
- "github.com/banzaicloud/logging-operator/pkg/resources"
- "github.com/go-logr/logr"
- "github.com/goph/emperror"
- "sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-const (
- appConfigMapName = "fluentd-app-config"
-)
-
-var labelSelector = map[string]string{
- "app": "fluentd",
-}
-
-// Reconciler holds info what resource to reconcile
-type Reconciler struct {
- resources.PluginReconciler
-}
-
-// New creates a new FPlugin reconciler
-func New(client client.Client, pluginList *loggingv1alpha1.PluginList, namespace string) *Reconciler {
- return &Reconciler{
- PluginReconciler: resources.PluginReconciler{
- Client: client,
- Namespace: namespace,
- PluginList: pluginList,
- },
- }
-}
-
-// Reconcile reconciles the plugin resource
-func (r *Reconciler) Reconcile(log logr.Logger) error {
- for _, res := range []resources.Resource{
- r.appConfigMap,
- } {
- o := res()
- err := k8sutil.Reconcile(log, r.Client, o)
- if err != nil {
- return emperror.WrapWith(err, "failed to reconcile resource", "resource", o.GetObjectKind().GroupVersionKind())
- }
- }
- return nil
-}
diff --git a/pkg/resources/plugins/s3.go b/pkg/resources/plugins/s3.go
deleted file mode 100644
index dac3bc973..000000000
--- a/pkg/resources/plugins/s3.go
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package plugins
-
-// S3Output CRD name
-const S3Output = "s3"
-
-// S3DefaultValues for Amazaon S3 output plugin
-var S3DefaultValues = map[string]string{
- "bufferTimeKey": "3600",
- "bufferTimeWait": "10m",
- "bufferPath": "/buffers/s3",
- "format": "json",
- "timekey_use_utc": "true",
- "s3_object_key_format": "%{path}%{time_slice}_%{index}.%{file_extension}",
- "path": "logs/${tag}/%Y/%m/%d/",
-}
-
-// S3Template for Amazaon S3 output plugin
-const S3Template = `
-
- @type s3
-
- {{- if .role_arn }}
-
- role_arn {{ .role_arn }}
- role_session_name {{ .role_session_name }}
-
- {{- end }}
-
- {{- if .instance_profile_ip_address }}
-
- ip_address {{ .instance_profile_ip_address }}
- port {{ .instance_profile_port }}
-
- {{- end }}
-
- {{- if .aws_key_id }}
- aws_key_id {{ .aws_key_id }}
- aws_sec_key {{ .aws_sec_key }}
- {{- end }}
- s3_bucket {{ .s3_bucket }}
- s3_region {{ .s3_region }}
- {{- if .s3_endpoint }}
- s3_endpoint {{ .s3_endpoint }}
- force_path_style true # This prevents AWS SDK from breaking endpoint URL
- {{- end }}
- store_as gzip_command
-
- path {{ .path }}
- s3_object_key_format {{ .s3_object_key_format }}
-
- # if you want to use ${tag} or %Y/%m/%d/ like syntax in path / s3_object_key_format,
- # need to specify tag for ${tag} and time for %Y/%m/%d in argument.
-
- @type file
- path {{ .bufferPath }}
- timekey {{ .bufferTimeKey }}
- timekey_wait {{ .bufferTimeWait }}
- timekey_use_utc {{ .timekey_use_utc }}
-
-
- @type {{ .format }}
-
- `
diff --git a/pkg/resources/plugins/stdout.go b/pkg/resources/plugins/stdout.go
deleted file mode 100644
index eaf7570f7..000000000
--- a/pkg/resources/plugins/stdout.go
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package plugins
-
-// StdOutput plugin name
-const StdOutput = "stdout"
-
-// StdOutputTemplate for the StdOutput plugin
-const StdOutputTemplate = `
-
- @type stdout
- `
diff --git a/pkg/resources/reconciler.go b/pkg/resources/reconciler.go
index 87c85d500..6d83ea0db 100644
--- a/pkg/resources/reconciler.go
+++ b/pkg/resources/reconciler.go
@@ -1,51 +1,27 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
package resources
import (
- loggingv1alpha1 "github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/runtime"
- "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
)
-// PluginReconciler reconciler struct for plugin
-type PluginReconciler struct {
- Client client.Client
- Namespace string
- PluginList *loggingv1alpha1.PluginList
-}
-
-// FluentdReconciler reconciler struct for fluentd
-type FluentdReconciler struct {
- Client client.Client
- Fluentd *loggingv1alpha1.Fluentd
-}
-
-// FluentbitReconciler reconciler struct for fluentbit
-type FluentbitReconciler struct {
- Client client.Client
- Fluentbit *loggingv1alpha1.Fluentbit
-}
-
// ComponentReconciler reconciler interface
-type ComponentReconciler interface {
- Reconcile(log logr.Logger) error
-}
+type ComponentReconciler func() (*reconcile.Result, error)
// Resource redeclaration of function with return type kubernetes Object
type Resource func() runtime.Object
diff --git a/pkg/resources/templates/templates.go b/pkg/resources/templates/templates.go
index cf6f7ea6c..aeac82ec4 100644
--- a/pkg/resources/templates/templates.go
+++ b/pkg/resources/templates/templates.go
@@ -1,100 +1,75 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
package templates
import (
- loggingv1alpha1 "github.com/banzaicloud/logging-operator/pkg/apis/logging/v1alpha1"
+ "github.com/banzaicloud/logging-operator/api/v1beta1"
+ "github.com/banzaicloud/logging-operator/pkg/util"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
-// PluginsObjectMeta creates an objectMeta for resource plugin
-func PluginsObjectMeta(name string, labels map[string]string, namespace string) metav1.ObjectMeta {
- o := metav1.ObjectMeta{
- Name: name,
- Namespace: namespace,
- Labels: labels,
- }
- return o
-}
-
-// FluentdObjectMeta creates an objectMeta for resource fluentd
-func FluentdObjectMeta(name string, labels map[string]string, fluentd *loggingv1alpha1.Fluentd) metav1.ObjectMeta {
+// FluentbitObjectMeta creates an objectMeta for resource fluentbit
+func FluentbitObjectMeta(name string, labels map[string]string, logging *v1beta1.Logging) metav1.ObjectMeta {
o := metav1.ObjectMeta{
Name: name,
- Namespace: fluentd.Namespace,
+ Namespace: logging.Spec.ControlNamespace,
Labels: labels,
OwnerReferences: []metav1.OwnerReference{
{
- APIVersion: fluentd.APIVersion,
- Kind: fluentd.Kind,
- Name: fluentd.Name,
- UID: fluentd.UID,
+ APIVersion: logging.APIVersion,
+ Kind: logging.Kind,
+ Name: logging.Name,
+ UID: logging.UID,
+ Controller: util.BoolPointer(true),
},
},
}
return o
}
-// FluentdObjectMetaClusterScope creates an objectMeta for resource fluentd
-func FluentdObjectMetaClusterScope(name string, labels map[string]string, fluentd *loggingv1alpha1.Fluentd) metav1.ObjectMeta {
+// FluentbitObjectMetaClusterScope creates an cluster scoped objectMeta for resource fluentbit
+func FluentbitObjectMetaClusterScope(name string, labels map[string]string, logging *v1beta1.Logging) metav1.ObjectMeta {
o := metav1.ObjectMeta{
Name: name,
Labels: labels,
OwnerReferences: []metav1.OwnerReference{
{
- APIVersion: fluentd.APIVersion,
- Kind: fluentd.Kind,
- Name: fluentd.Name,
- UID: fluentd.UID,
+ APIVersion: logging.APIVersion,
+ Kind: logging.Kind,
+ Name: logging.Name,
+ UID: logging.UID,
+ Controller: util.BoolPointer(true),
},
},
}
return o
}
-// FluentbitObjectMeta creates an objectMeta for resource fluentbit
-func FluentbitObjectMeta(name string, labels map[string]string, fluentbit *loggingv1alpha1.Fluentbit) metav1.ObjectMeta {
+// FluentdObjectMeta creates an objectMeta for resource fluentd
+func FluentdObjectMeta(name string, labels map[string]string, logging *v1beta1.Logging) metav1.ObjectMeta {
o := metav1.ObjectMeta{
Name: name,
- Namespace: fluentbit.Namespace,
+ Namespace: logging.Spec.ControlNamespace,
Labels: labels,
OwnerReferences: []metav1.OwnerReference{
{
- APIVersion: fluentbit.APIVersion,
- Kind: fluentbit.Kind,
- Name: fluentbit.Name,
- UID: fluentbit.UID,
- },
- },
- }
- return o
-}
-
-// FluentbitObjectMetaClusterScope creates an cluster scoped objectMeta for resource fluentbit
-func FluentbitObjectMetaClusterScope(name string, labels map[string]string, fluentbit *loggingv1alpha1.Fluentbit) metav1.ObjectMeta {
- o := metav1.ObjectMeta{
- Name: name,
- Labels: labels,
- OwnerReferences: []metav1.OwnerReference{
- {
- APIVersion: fluentbit.APIVersion,
- Kind: fluentbit.Kind,
- Name: fluentbit.Name,
- UID: fluentbit.UID,
+ APIVersion: logging.APIVersion,
+ Kind: logging.Kind,
+ Name: logging.Name,
+ UID: logging.UID,
+ Controller: util.BoolPointer(true),
},
},
}
diff --git a/pkg/util/util.go b/pkg/util/util.go
index f69706ebe..8469d1809 100644
--- a/pkg/util/util.go
+++ b/pkg/util/util.go
@@ -1,33 +1,52 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
package util
+import (
+ "sort"
+
+ "github.com/iancoleman/orderedmap"
+)
+
// MergeLabels merges two map[string]string map
func MergeLabels(l map[string]string, l2 map[string]string) map[string]string {
- if len(l) == 0 {
- l = map[string]string{}
+ merged := make(map[string]string)
+ for lKey, lValue := range l {
+ merged[lKey] = lValue
}
for lKey, lValue := range l2 {
- l[lKey] = lValue
+ merged[lKey] = lValue
}
- return l
+ return merged
}
// IntPointer converts int32 to *int32
func IntPointer(i int32) *int32 {
return &i
}
+
+// BoolPointer converts bool to *bool
+func BoolPointer(b bool) *bool {
+ return &b
+}
+
+func OrderedStringMap(original map[string]string) *orderedmap.OrderedMap {
+ o := orderedmap.New()
+ for k, v := range original {
+ o.Set(k, v)
+ }
+ o.SortKeys(sort.Strings)
+ return o
+}
diff --git a/scripts/check-header.sh b/scripts/check-header.sh
new file mode 100755
index 000000000..58c9b518f
--- /dev/null
+++ b/scripts/check-header.sh
@@ -0,0 +1,35 @@
+#!/usr/bin/env bash
+
+read -r -d '' EXPECTED </dev/null; then
+ # Replace the actual year with DATE so we can ignore the year when
+ # checking for the license header.
+ HEADER=$(head -n 13 $FILE | sed -E -e 's/Copyright © [0-9]+/Copyright © DATE/')
+ if [ "$HEADER" != "$EXPECTED" ]; then
+ echo "incorrect license header: $FILE"
+ STATUS=1
+ fi
+ fi
+done
+
+exit $STATUS
diff --git a/scripts/fmt-check.sh b/scripts/fmt-check.sh
index f9fc18399..8816c5e8b 100755
--- a/scripts/fmt-check.sh
+++ b/scripts/fmt-check.sh
@@ -8,4 +8,4 @@ if [[ -n "${bad_files}" ]]; then
echo "✖ gofmt needs to be run on the following files: "
echo "${bad_files}"
exit 1
-fi
+fi
\ No newline at end of file
diff --git a/scripts/generate.sh b/scripts/generate.sh
new file mode 100755
index 000000000..415e9eecb
--- /dev/null
+++ b/scripts/generate.sh
@@ -0,0 +1,220 @@
+#!/bin/bash
+
+function main {
+ case $1 in
+ cleanup)
+ cleanup
+ ;;
+ init)
+ init
+ openssl_conf
+ ;;
+ generate)
+ gen_ca
+ gen_server
+ gen_client
+ ;;
+ install)
+ install_fluentbit_secret
+ install_fluentd_secret
+ ;;
+ all)
+ init
+ openssl_conf
+ gen_ca
+ gen_server
+ gen_client
+ install_fluentbit_secret
+ install_fluentd_secret
+ ;;
+ *)
+ commands
+ ;;
+ esac
+}
+
+function commands {
+ echo "Usage: "
+ echo "$0 cleanup - Remove all files and folders created during process"
+ echo "$0 init - Create directories and generate openssl.conf"
+ echo "$0 generate - Generate CA, Server, Client keys and certificates"
+ echo "$0 install - Installs fluentbit-tls and fluentd-tls secrets on the kubernetes cluster"
+ echo "$0 all - Do all the steps automatically"
+}
+
+function cleanup {
+ rm -rf certs csr private
+ rm index.txt openssl.cnf serial
+}
+
+function init {
+ mkdir -p certs csr private
+ touch index.txt
+ echo "1000" > serial
+}
+
+function openssl_conf {
+cat << EOF > openssl.cnf
+[ ca ]
+#
+default_ca = CA_default
+
+[ CA_default ]
+# Directory and file locations.
+dir = .
+certs = ./certs
+crl_dir = ./crl
+new_certs_dir = ./newcerts
+database = ./index.txt
+serial = ./serial
+RANDFILE = ./private/.rand
+
+# The root key and root certificate.
+private_key = /private/ca.key.pem
+certificate = /certs/ca.crt.pem
+
+# SHA-1 is deprecated, so use SHA-2 instead.
+default_md = sha256
+
+name_opt = ca_default
+cert_opt = ca_default
+default_days = 365
+preserve = no
+policy = policy_strict
+
+[ req ]
+# Options for the tool ().
+default_bits = 4096
+distinguished_name = req_distinguished_name
+string_mask = utf8only
+
+# SHA-1 is deprecated, so use SHA-2 instead.
+default_md = sha256
+
+# Extension to add when the -x509 option is used.
+x509_extensions = v3_ca
+
+[ req_distinguished_name ]
+# See .
+countryName = Country Name (2 letter code)
+stateOrProvinceName = State or Province Name
+localityName = Locality Name
+0.organizationName = Organization Name
+organizationalUnitName = Organizational Unit Name
+commonName = Common Name (required)
+emailAddress = Email Address
+
+# Optionally, specify some defaults.
+countryName_default = US
+stateOrProvinceName_default = CA
+#localityName_default = Mountain View
+0.organizationName_default = Your company name
+#organizationalUnitName_default =
+emailAddress_default = foo@example.com
+
+[v3_ca]
+subjectKeyIdentifier = hash
+authorityKeyIdentifier = keyid:always, issuer
+basicConstraints = critical,CA:true
+keyUsage = critical, cRLSign, digitalSignature, keyCertSign
+
+[ client_cert ]
+# Extensions for client certificates ().
+basicConstraints = CA:FALSE
+nsCertType = client, email
+nsComment = "OpenSSL Generated Client Certificate"
+subjectKeyIdentifier = hash
+authorityKeyIdentifier = keyid,issuer
+keyUsage = critical, nonRepudiation, digitalSignature, keyEncipherment
+extendedKeyUsage = clientAuth, emailProtection
+
+[ server_cert ]
+# Extensions for server certificates ().
+basicConstraints = CA:FALSE
+nsCertType = server
+nsComment = "OpenSSL Generated Server Certificate"
+subjectKeyIdentifier = hash
+authorityKeyIdentifier = keyid,issuer:always
+keyUsage = critical, digitalSignature, keyEncipherment
+extendedKeyUsage = serverAuth
+
+[ policy_strict ]
+# The root CA should only sign intermediate certificates that match.
+# See the POLICY FORMAT section of .
+countryName = match
+stateOrProvinceName = match
+organizationName = match
+organizationalUnitName = optional
+commonName = supplied
+emailAddress = optional
+EOF
+}
+
+function gen_ca {
+ echo "Generate CA private key..."
+ openssl genrsa -out private/ca.key 4096
+
+ echo "Generate CA certificate..."
+ openssl req -config openssl.cnf \
+ -key private/ca.key \
+ -new -x509 -days 365 -sha256 -extensions v3_ca \
+ -out certs/ca.crt \
+ -subj '/CN=logging/O=Banzai Cloud/C=US'
+}
+
+function gen_server {
+ echo "Generate Server (fluentd) private key..."
+ openssl genrsa -out private/server.key 4096
+
+ echo "Generate Server CSR (fluentd)..."
+ openssl req -config openssl.cnf \
+ -key private/server.key \
+ -new -out csr/server.csr \
+ -subj '/CN=logging/O=Banzai Cloud/C=US'
+
+ echo "Generate Server certificate (fluentd)..."
+ openssl ca -config openssl.cnf -outdir certs \
+ -cert certs/ca.crt \
+ -keyfile private/ca.key \
+ -extensions server_cert -days 365 -notext -md sha256 \
+ -in csr/server.csr \
+ -out certs/server.crt \
+ -subj '/CN=fluentd/O=Banzai Cloud/C=US'
+}
+
+function gen_client {
+ echo "Generate Client (fluent-bit) private key..."
+ openssl genrsa -out private/client.key 4096
+
+ echo "Generate Client CSR (fluent-bit)..."
+ openssl req -config openssl.cnf \
+ -key private/client.key \
+ -new \
+ -out csr/client.csr \
+ -subj '/CN=fluent-bit/O=Banzai Cloud/C=US'
+
+ echo "Generate Client certificate (fluent-bit)..."
+ openssl ca -config openssl.cnf -outdir certs \
+ -cert certs/ca.crt \
+ -keyfile private/ca.key \
+ -extensions client_cert -days 365 -notext -md sha256 \
+ -in csr/client.csr \
+ -out certs/client.crt \
+ -subj '/CN=fluent-bit/O=Banzai Cloud/C=US'
+}
+
+function install_fluentbit_secret {
+ kubectl create secret generic fluentbit-tls \
+ --from-file=ca.crt=./certs/ca.crt \
+ --from-file=tls.crt=./certs/client.crt \
+ --from-file=tls.key=./private/client.key
+}
+
+function install_fluentd_secret {
+kubectl create secret generic fluentd-tls \
+ --from-file=ca.crt=./certs/ca.crt \
+ --from-file=tls.crt=./certs/server.crt \
+ --from-file=tls.key=./private/server.key
+}
+
+main $@
diff --git a/scripts/misspell-check.sh b/scripts/misspell-check.sh
index 7e18fe7af..d3b28e28b 100755
--- a/scripts/misspell-check.sh
+++ b/scripts/misspell-check.sh
@@ -9,4 +9,4 @@ if [[ -n "${bad_files}" ]]; then
echo "✖ misspell needs to be run on the following files: "
echo "${bad_files}"
exit 1
-fi
+fi
\ No newline at end of file
diff --git a/version/version.go b/version/version.go
deleted file mode 100644
index 91f0e68c1..000000000
--- a/version/version.go
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package version
-
-// Version the operators version
-var (
- Version = "0.0.1"
-)