From 52750ab571fa352d7eb521fd079f85821b056b0e Mon Sep 17 00:00:00 2001
From: tarokkk
Date: Fri, 13 Sep 2019 22:44:11 +0200
Subject: [PATCH 01/35] initial logging-operator v2
---
.circleci/config.yml | 156 --
.github/ISSUE_TEMPLATE/bug_report.md | 27 -
.github/ISSUE_TEMPLATE/feature_request.md | 20 -
.github/PULL_REQUEST_TEMPLATE.md | 34 -
Dockerfile | 50 +-
Gopkg.lock | 1012 ---------
Gopkg.toml | 79 -
LICENCE => LICENSE | 2 +-
Makefile | 126 +-
Makefile.app | 6 +
PROJECT | 25 +
README.md | 88 +-
api/v1alpha2/clusterflow_types.go | 44 +
api/v1alpha2/clusterflow_types_test.go | 81 +
api/v1alpha2/clusteroutput_types.go | 50 +
api/v1alpha2/clusteroutput_types_test.go | 84 +
api/v1alpha2/common_types.go | 22 +
api/v1alpha2/flow_types.go | 63 +
api/v1alpha2/flow_types_test.go | 81 +
api/v1alpha2/fluentbit_types.go | 55 +
api/v1alpha2/fluentd_types.go | 58 +
api/v1alpha2/groupversion_info.go | 34 +
api/v1alpha2/logging_types.go | 178 ++
api/v1alpha2/output_types.go | 63 +
api/v1alpha2/output_types_test.go | 82 +
api/v1alpha2/suite_test.go | 73 +
.../v1alpha2}/zz_generated.deepcopy.go | 509 +++--
build/Dockerfile | 15 -
build/bin/entrypoint | 12 -
build/bin/user_setup | 13 -
charts/logging-operator-fluent/Chart.yaml | 18 -
.../templates/_helpers.tpl | 32 -
.../templates/fluentbit-cr.yaml | 33 -
.../templates/fluentd-cr.yaml | 37 -
.../templates/psp.yaml | 73 -
.../templates/rbac.yaml | 71 -
.../templates/secret.yaml | 22 -
charts/logging-operator-fluent/values.yaml | 45 -
.../.helmignore | 0
charts/logging-operator-logging/Chart.yaml | 5 +
.../README.md | 29 +-
.../templates/NOTES.txt | 0
.../templates/_helpers.tpl | 19 +-
.../templates/logging.yaml | 38 +
.../templates/secret.yaml | 34 +
charts/logging-operator-logging/values.yaml | 35 +
charts/logging-operator/Chart.yaml | 19 +-
charts/logging-operator/README.md | 35 +-
.../logging-dashboard_rev1.json | 1069 ----------
charts/logging-operator/templates/NOTES.txt | 0
.../logging-operator/templates/_helpers.tpl | 13 +
.../logging-operator/templates/configmap.yaml | 16 -
charts/logging-operator/templates/crd.yaml | 109 -
.../templates/deployment.yaml | 74 +-
.../templates/grafana-dashboard-logging.yaml | 12 -
.../logging.banzaicloud.com_clusterflows.yaml | 139 ++
...ogging.banzaicloud.com_clusteroutputs.yaml | 1795 ++++++++++++++++
.../logging.banzaicloud.com_flows.yaml | 142 ++
.../logging.banzaicloud.com_loggings.yaml | 332 +++
.../logging.banzaicloud.com_outputs.yaml | 1790 ++++++++++++++++
charts/logging-operator/templates/psp.yaml | 6 +-
charts/logging-operator/templates/rbac.yaml | 151 +-
.../logging-operator/templates/service.yaml | 18 +
charts/logging-operator/values.yaml | 83 +-
charts/nginx-logging-demo/Chart.yaml | 8 -
charts/nginx-logging-demo/README.md | 63 -
charts/nginx-logging-demo/templates/NOTES.txt | 24 -
.../templates/deployment.yaml | 51 -
.../nginx-logging-demo/templates/ingress.yaml | 39 -
.../nginx-logging-demo/templates/logging.yaml | 37 -
.../nginx-logging-demo/templates/service.yaml | 19 -
.../templates/tests/test-connection.yaml | 18 -
charts/nginx-logging-demo/values.yaml | 56 -
charts/nginx-logging-es-demo/.helmignore | 22 -
charts/nginx-logging-es-demo/Chart.yaml | 8 -
charts/nginx-logging-es-demo/README.md | 63 -
.../nginx-logging-es-demo/templates/NOTES.txt | 24 -
.../templates/_helpers.tpl | 32 -
.../templates/deployment.yaml | 51 -
.../templates/ingress.yaml | 39 -
.../templates/logging.yaml | 37 -
.../templates/service.yaml | 19 -
.../templates/tests/test-connection.yaml | 18 -
charts/nginx-logging-es-demo/values.yaml | 49 -
cmd/docgen/docgen.go | 92 -
cmd/docs.go | 300 +++
cmd/manager/main.go | 137 --
config/certmanager/certificate.yaml | 24 +
config/certmanager/kustomization.yaml | 26 +
config/certmanager/kustomizeconfig.yaml | 16 +
.../logging.banzaicloud.com_clusterflows.yaml | 139 ++
...ogging.banzaicloud.com_clusteroutputs.yaml | 1881 +++++++++++++++++
.../bases/logging.banzaicloud.com_flows.yaml | 142 ++
.../logging.banzaicloud.com_loggings.yaml | 332 +++
.../logging.banzaicloud.com_outputs.yaml | 1876 ++++++++++++++++
config/crd/kustomization.yaml | 37 +
config/crd/kustomizeconfig.yaml | 17 +
.../patches/cainjection_in_clusterflows.yaml | 8 +
.../cainjection_in_clusteroutputs.yaml | 8 +
config/crd/patches/cainjection_in_flows.yaml | 8 +
.../patches/cainjection_in_fluentbits.yaml | 8 +
.../crd/patches/cainjection_in_fluentds.yaml | 8 +
.../crd/patches/cainjection_in_loggings.yaml | 8 +
.../crd/patches/cainjection_in_outputs.yaml | 8 +
.../crd/patches/webhook_in_clusterflows.yaml | 17 +
.../patches/webhook_in_clusteroutputs.yaml | 17 +
config/crd/patches/webhook_in_flows.yaml | 17 +
config/crd/patches/webhook_in_fluentbits.yaml | 17 +
config/crd/patches/webhook_in_fluentds.yaml | 17 +
config/crd/patches/webhook_in_loggings.yaml | 17 +
config/crd/patches/webhook_in_outputs.yaml | 17 +
config/default/kustomization.yaml | 43 +
config/default/manager_auth_proxy_patch.yaml | 24 +
config/default/manager_image_patch.yaml | 12 +
.../manager_prometheus_metrics_patch.yaml | 19 +
config/default/manager_webhook_patch.yaml | 23 +
config/default/webhookcainjection_patch.yaml | 15 +
config/manager/kustomization.yaml | 2 +
config/manager/manager.yaml | 39 +
config/rbac/auth_proxy_role.yaml | 13 +
.../rbac/auth_proxy_role_binding.yaml | 16 +-
config/rbac/auth_proxy_service.yaml | 18 +
config/rbac/kustomization.yaml | 11 +
config/rbac/leader_election_role.yaml | 26 +
config/rbac/leader_election_role_binding.yaml | 12 +
config/rbac/role.yaml | 28 +
config/rbac/role_binding.yaml | 12 +
...ogging_v1alpha2_cluster_output_custom.yaml | 13 +
config/samples/logging_v1alpha2_flow.yaml | 16 +
.../samples/logging_v1alpha2_flow_custom.yaml | 14 +
.../logging_v1alpha2_logging_custom.yaml | 12 +
.../logging_v1alpha2_logging_default.yaml | 11 +
config/samples/logging_v1alpha2_output.yaml | 6 +
.../logging_v1alpha2_output_custom.yaml | 8 +
config/webhook/kustomization.yaml | 6 +
config/webhook/kustomizeconfig.yaml | 25 +
config/webhook/manifests.yaml | 0
config/webhook/service.yaml | 12 +
controllers/logging_controller.go | 283 +++
controllers/logging_controller_test.go | 552 +++++
controllers/suite_test.go | 142 ++
deploy/clusterrole.yaml | 60 -
.../crds/logging_v1alpha1_fluentbit_cr.yaml | 19 -
.../crds/logging_v1alpha1_fluentbit_crd.yaml | 33 -
deploy/crds/logging_v1alpha1_fluentd_cr.yaml | 33 -
deploy/crds/logging_v1alpha1_fluentd_crd.yaml | 33 -
deploy/crds/logging_v1alpha1_plugin_cr.yaml | 31 -
deploy/crds/logging_v1alpha1_plugin_crd.yaml | 33 -
deploy/operator.yaml | 33 -
deploy/service_account.yaml | 4 -
developer.md | 68 -
docs/crds.md | 276 +++
docs/developers.md | 154 ++
docs/example-s3.md | 114 +
docs/examples/es.md | 50 -
.../logging_flow_multiple_output.yaml | 11 +
docs/examples/logging_flow_single_output.yaml | 10 +
docs/examples/logging_flow_with_filters.yaml | 18 +
docs/examples/logging_logging_simple.yaml | 9 +
docs/examples/logging_logging_tls.yaml | 18 +
.../examples/logging_output_azurestorage.yaml | 23 +
docs/examples/logging_output_gcs.yaml | 19 +
docs/examples/logging_output_s3.yaml | 26 +
docs/examples/logging_output_sumologic.yaml | 14 +
docs/examples/s3.md | 107 -
docs/examples/tls.md | 101 -
docs/img/helm_logo.png | Bin 166878 -> 0 bytes
docs/img/ll_es.gif | Bin 987272 -> 0 bytes
docs/img/lll.png | Bin 178126 -> 0 bytes
docs/img/log_helm.gif | Bin 1158941 -> 0 bytes
docs/img/log_man.png | Bin 219589 -> 0 bytes
docs/img/logging-operator-v2-architecture.png | Bin 0 -> 950821 bytes
...ngo_flow.png => logging_operator_flow.png} | Bin
docs/img/logo.png | Bin 102200 -> 0 bytes
docs/img/s3_logo.png | Bin 139830 -> 0 bytes
docs/model.md | 44 +
docs/plugins/alibaba.md | 30 -
docs/plugins/azure.md | 45 -
docs/plugins/elasticsearch.md | 70 -
docs/plugins/filters/parser.md | 24 +
docs/plugins/filters/stdout.md | 3 +
docs/plugins/filters/tagnormaliser.md | 21 +
docs/plugins/forward.md | 64 -
docs/plugins/gcs.md | 42 -
docs/plugins/index.md | 24 +
docs/plugins/loki.md | 27 -
docs/plugins/outputs/azurestore.md | 13 +
docs/plugins/outputs/buffer.md | 35 +
docs/plugins/outputs/elasticsearch.md | 57 +
docs/plugins/outputs/file.md | 4 +
docs/plugins/outputs/format.md | 4 +
docs/plugins/outputs/gcs.md | 27 +
docs/plugins/outputs/loki.md | 11 +
docs/plugins/outputs/oss.md | 24 +
docs/plugins/outputs/s3.md | 76 +
docs/plugins/outputs/secret.md | 22 +
docs/plugins/outputs/sumologic.md | 18 +
docs/plugins/parser.md | 24 -
docs/plugins/s3.md | 69 -
docs/plugins/stdout.md | 11 -
example/cluster_forward.yaml | 18 -
example/elasticsearch_output.yaml | 22 -
example/forward.md | 56 -
example/forward_tls.md | 83 -
example/loki_output.yaml | 24 -
example/stdout.yaml | 11 -
example/tls-cluster-forward/cfssl-ca.json | 28 -
example/tls-cluster-forward/cfssl-csr.json | 14 -
example/tls-cluster-forward/gencert.sh | 19 -
go.mod | 35 +
go.sum | 224 ++
hack/boilerplate.go.txt | 13 +
hack/minio-mc.yaml | 16 -
hack/minio.yaml | 45 -
hack/test-s3-output.yaml | 26 -
hack/test.sh | 85 -
main.go | 82 +
pkg/apis/addtoscheme_logging_v1alpha1.go | 26 -
pkg/apis/apis.go | 29 -
pkg/apis/logging/v1alpha1/common_types.go | 24 -
pkg/apis/logging/v1alpha1/doc.go | 20 -
pkg/apis/logging/v1alpha1/fluentbit_types.go | 91 -
pkg/apis/logging/v1alpha1/fluentd_types.go | 103 -
.../logging/v1alpha1/loggingplugin_types.go | 140 --
pkg/apis/logging/v1alpha1/register.go | 35 -
.../logging/v1alpha1/zz_generated.defaults.go | 32 -
.../logging/v1alpha1/zz_generated.openapi.go | 350 ---
pkg/controller/add_fluentbit.go | 26 -
pkg/controller/add_fluentd.go | 26 -
pkg/controller/add_loggingplugin.go | 26 -
pkg/controller/controller.go | 34 -
.../fluentbit/fluentbit_controller.go | 118 --
pkg/controller/fluentd/fluentd_controller.go | 114 -
pkg/controller/plugin/plugin_controller.go | 111 -
pkg/k8sutil/resource.go | 178 +-
pkg/model/filter/parser.go | 87 +
pkg/model/filter/stdout.go | 37 +
pkg/model/filter/tagnormaliser.go | 36 +
pkg/model/filter/zz_generated.deepcopy.go | 71 +
pkg/model/input/fluenthelpers.go | 59 +
pkg/model/input/forward.go | 62 +
pkg/model/input/tail.go | 37 +
pkg/model/input/zz_generated.deepcopy.go | 61 +
pkg/model/output/azurestore.go | 56 +
pkg/model/output/buffer.go | 112 +
pkg/model/output/elasticsearch.go | 148 ++
pkg/model/output/file.go | 34 +
pkg/model/output/format.go | 26 +
pkg/model/output/gcs.go | 104 +
pkg/model/output/loki.go | 49 +
pkg/model/output/null.go | 37 +
pkg/model/output/oss.go | 85 +
pkg/model/output/s3.go | 233 ++
pkg/model/output/sumologic.go | 48 +
pkg/model/output/zz_generated.deepcopy.go | 353 ++++
pkg/model/render/fluent.go | 89 +
pkg/model/render/fluent_test.go | 571 +++++
pkg/model/render/interface.go | 23 +
pkg/model/render/json.go | 46 +
pkg/model/render/json_test.go | 125 ++
pkg/model/secret/secret.go | 91 +
pkg/model/secret/zz_generated.deepcopy.go | 76 +
pkg/model/types/builder.go | 52 +
pkg/model/types/flow.go | 135 ++
pkg/model/types/router.go | 92 +
pkg/model/types/stringmaps.go | 235 ++
pkg/model/types/stringmaps_test.go | 235 ++
pkg/model/types/types.go | 180 ++
pkg/plugins/plugin.go | 68 +
pkg/resources/fluentbit/config.go | 40 +-
pkg/resources/fluentbit/configmap.go | 87 -
pkg/resources/fluentbit/configsecret.go | 87 +
pkg/resources/fluentbit/daemonset.go | 83 +-
pkg/resources/fluentbit/fluentbit.go | 64 +-
pkg/resources/fluentbit/rbac.go | 47 +-
pkg/resources/fluentd/appconfigmap.go | 207 +-
pkg/resources/fluentd/config.go | 64 +-
pkg/resources/fluentd/configmap.go | 89 +-
pkg/resources/fluentd/configsecret.go | 68 +
pkg/resources/fluentd/deployment.go | 187 --
pkg/resources/fluentd/fluentd.go | 132 +-
pkg/resources/fluentd/pvc.go | 31 -
pkg/resources/fluentd/rbac.go | 46 +-
pkg/resources/fluentd/service.go | 33 +-
pkg/resources/fluentd/statefulset.go | 218 ++
pkg/resources/model/system.go | 164 ++
pkg/resources/plugins/alibaba.go | 45 -
pkg/resources/plugins/azure.go | 63 -
pkg/resources/plugins/configmap.go | 133 --
pkg/resources/plugins/elasticsearch.go | 71 -
pkg/resources/plugins/forward.go | 81 -
pkg/resources/plugins/gcs.go | 59 -
pkg/resources/plugins/init.go | 80 -
pkg/resources/plugins/loki.go | 46 -
pkg/resources/plugins/parser.go | 41 -
pkg/resources/plugins/plugins.go | 64 -
pkg/resources/plugins/s3.go | 79 -
pkg/resources/plugins/stdout.go | 26 -
pkg/resources/reconciler.go | 54 +-
pkg/resources/templates/templates.go | 101 +-
pkg/util/util.go | 57 +-
scripts/check-header.sh | 35 +
scripts/fmt-check.sh | 11 -
scripts/generate.sh | 220 ++
scripts/misspell-check.sh | 12 -
version/version.go | 22 -
306 files changed, 18999 insertions(+), 8791 deletions(-)
delete mode 100644 .circleci/config.yml
delete mode 100644 .github/ISSUE_TEMPLATE/bug_report.md
delete mode 100644 .github/ISSUE_TEMPLATE/feature_request.md
delete mode 100644 .github/PULL_REQUEST_TEMPLATE.md
delete mode 100644 Gopkg.lock
delete mode 100644 Gopkg.toml
rename LICENCE => LICENSE (99%)
create mode 100644 Makefile.app
create mode 100644 PROJECT
create mode 100644 api/v1alpha2/clusterflow_types.go
create mode 100644 api/v1alpha2/clusterflow_types_test.go
create mode 100644 api/v1alpha2/clusteroutput_types.go
create mode 100644 api/v1alpha2/clusteroutput_types_test.go
create mode 100644 api/v1alpha2/common_types.go
create mode 100644 api/v1alpha2/flow_types.go
create mode 100644 api/v1alpha2/flow_types_test.go
create mode 100644 api/v1alpha2/fluentbit_types.go
create mode 100644 api/v1alpha2/fluentd_types.go
create mode 100644 api/v1alpha2/groupversion_info.go
create mode 100644 api/v1alpha2/logging_types.go
create mode 100644 api/v1alpha2/output_types.go
create mode 100644 api/v1alpha2/output_types_test.go
create mode 100644 api/v1alpha2/suite_test.go
rename {pkg/apis/logging/v1alpha1 => api/v1alpha2}/zz_generated.deepcopy.go (50%)
delete mode 100644 build/Dockerfile
delete mode 100755 build/bin/entrypoint
delete mode 100755 build/bin/user_setup
delete mode 100644 charts/logging-operator-fluent/Chart.yaml
delete mode 100644 charts/logging-operator-fluent/templates/_helpers.tpl
delete mode 100644 charts/logging-operator-fluent/templates/fluentbit-cr.yaml
delete mode 100644 charts/logging-operator-fluent/templates/fluentd-cr.yaml
delete mode 100644 charts/logging-operator-fluent/templates/psp.yaml
delete mode 100644 charts/logging-operator-fluent/templates/rbac.yaml
delete mode 100644 charts/logging-operator-fluent/templates/secret.yaml
delete mode 100644 charts/logging-operator-fluent/values.yaml
rename charts/{nginx-logging-demo => logging-operator-logging}/.helmignore (100%)
create mode 100644 charts/logging-operator-logging/Chart.yaml
rename charts/{logging-operator-fluent => logging-operator-logging}/README.md (63%)
create mode 100644 charts/logging-operator-logging/templates/NOTES.txt
rename charts/{nginx-logging-demo => logging-operator-logging}/templates/_helpers.tpl (62%)
create mode 100644 charts/logging-operator-logging/templates/logging.yaml
create mode 100644 charts/logging-operator-logging/templates/secret.yaml
create mode 100644 charts/logging-operator-logging/values.yaml
delete mode 100644 charts/logging-operator/grafana-dashboards/logging-dashboard_rev1.json
create mode 100644 charts/logging-operator/templates/NOTES.txt
delete mode 100644 charts/logging-operator/templates/configmap.yaml
delete mode 100644 charts/logging-operator/templates/crd.yaml
delete mode 100644 charts/logging-operator/templates/grafana-dashboard-logging.yaml
create mode 100644 charts/logging-operator/templates/logging.banzaicloud.com_clusterflows.yaml
create mode 100644 charts/logging-operator/templates/logging.banzaicloud.com_clusteroutputs.yaml
create mode 100644 charts/logging-operator/templates/logging.banzaicloud.com_flows.yaml
create mode 100644 charts/logging-operator/templates/logging.banzaicloud.com_loggings.yaml
create mode 100644 charts/logging-operator/templates/logging.banzaicloud.com_outputs.yaml
create mode 100644 charts/logging-operator/templates/service.yaml
delete mode 100644 charts/nginx-logging-demo/Chart.yaml
delete mode 100644 charts/nginx-logging-demo/README.md
delete mode 100644 charts/nginx-logging-demo/templates/NOTES.txt
delete mode 100644 charts/nginx-logging-demo/templates/deployment.yaml
delete mode 100644 charts/nginx-logging-demo/templates/ingress.yaml
delete mode 100644 charts/nginx-logging-demo/templates/logging.yaml
delete mode 100644 charts/nginx-logging-demo/templates/service.yaml
delete mode 100644 charts/nginx-logging-demo/templates/tests/test-connection.yaml
delete mode 100644 charts/nginx-logging-demo/values.yaml
delete mode 100644 charts/nginx-logging-es-demo/.helmignore
delete mode 100644 charts/nginx-logging-es-demo/Chart.yaml
delete mode 100644 charts/nginx-logging-es-demo/README.md
delete mode 100644 charts/nginx-logging-es-demo/templates/NOTES.txt
delete mode 100644 charts/nginx-logging-es-demo/templates/_helpers.tpl
delete mode 100644 charts/nginx-logging-es-demo/templates/deployment.yaml
delete mode 100644 charts/nginx-logging-es-demo/templates/ingress.yaml
delete mode 100644 charts/nginx-logging-es-demo/templates/logging.yaml
delete mode 100644 charts/nginx-logging-es-demo/templates/service.yaml
delete mode 100644 charts/nginx-logging-es-demo/templates/tests/test-connection.yaml
delete mode 100644 charts/nginx-logging-es-demo/values.yaml
delete mode 100644 cmd/docgen/docgen.go
create mode 100644 cmd/docs.go
delete mode 100644 cmd/manager/main.go
create mode 100644 config/certmanager/certificate.yaml
create mode 100644 config/certmanager/kustomization.yaml
create mode 100644 config/certmanager/kustomizeconfig.yaml
create mode 100644 config/crd/bases/logging.banzaicloud.com_clusterflows.yaml
create mode 100644 config/crd/bases/logging.banzaicloud.com_clusteroutputs.yaml
create mode 100644 config/crd/bases/logging.banzaicloud.com_flows.yaml
create mode 100644 config/crd/bases/logging.banzaicloud.com_loggings.yaml
create mode 100644 config/crd/bases/logging.banzaicloud.com_outputs.yaml
create mode 100644 config/crd/kustomization.yaml
create mode 100644 config/crd/kustomizeconfig.yaml
create mode 100644 config/crd/patches/cainjection_in_clusterflows.yaml
create mode 100644 config/crd/patches/cainjection_in_clusteroutputs.yaml
create mode 100644 config/crd/patches/cainjection_in_flows.yaml
create mode 100644 config/crd/patches/cainjection_in_fluentbits.yaml
create mode 100644 config/crd/patches/cainjection_in_fluentds.yaml
create mode 100644 config/crd/patches/cainjection_in_loggings.yaml
create mode 100644 config/crd/patches/cainjection_in_outputs.yaml
create mode 100644 config/crd/patches/webhook_in_clusterflows.yaml
create mode 100644 config/crd/patches/webhook_in_clusteroutputs.yaml
create mode 100644 config/crd/patches/webhook_in_flows.yaml
create mode 100644 config/crd/patches/webhook_in_fluentbits.yaml
create mode 100644 config/crd/patches/webhook_in_fluentds.yaml
create mode 100644 config/crd/patches/webhook_in_loggings.yaml
create mode 100644 config/crd/patches/webhook_in_outputs.yaml
create mode 100644 config/default/kustomization.yaml
create mode 100644 config/default/manager_auth_proxy_patch.yaml
create mode 100644 config/default/manager_image_patch.yaml
create mode 100644 config/default/manager_prometheus_metrics_patch.yaml
create mode 100644 config/default/manager_webhook_patch.yaml
create mode 100644 config/default/webhookcainjection_patch.yaml
create mode 100644 config/manager/kustomization.yaml
create mode 100644 config/manager/manager.yaml
create mode 100644 config/rbac/auth_proxy_role.yaml
rename deploy/clusterrole_binding.yaml => config/rbac/auth_proxy_role_binding.yaml (64%)
create mode 100644 config/rbac/auth_proxy_service.yaml
create mode 100644 config/rbac/kustomization.yaml
create mode 100644 config/rbac/leader_election_role.yaml
create mode 100644 config/rbac/leader_election_role_binding.yaml
create mode 100644 config/rbac/role.yaml
create mode 100644 config/rbac/role_binding.yaml
create mode 100644 config/samples/logging_v1alpha2_cluster_output_custom.yaml
create mode 100644 config/samples/logging_v1alpha2_flow.yaml
create mode 100644 config/samples/logging_v1alpha2_flow_custom.yaml
create mode 100644 config/samples/logging_v1alpha2_logging_custom.yaml
create mode 100644 config/samples/logging_v1alpha2_logging_default.yaml
create mode 100644 config/samples/logging_v1alpha2_output.yaml
create mode 100644 config/samples/logging_v1alpha2_output_custom.yaml
create mode 100644 config/webhook/kustomization.yaml
create mode 100644 config/webhook/kustomizeconfig.yaml
create mode 100644 config/webhook/manifests.yaml
create mode 100644 config/webhook/service.yaml
create mode 100644 controllers/logging_controller.go
create mode 100644 controllers/logging_controller_test.go
create mode 100644 controllers/suite_test.go
delete mode 100644 deploy/clusterrole.yaml
delete mode 100644 deploy/crds/logging_v1alpha1_fluentbit_cr.yaml
delete mode 100644 deploy/crds/logging_v1alpha1_fluentbit_crd.yaml
delete mode 100644 deploy/crds/logging_v1alpha1_fluentd_cr.yaml
delete mode 100644 deploy/crds/logging_v1alpha1_fluentd_crd.yaml
delete mode 100644 deploy/crds/logging_v1alpha1_plugin_cr.yaml
delete mode 100644 deploy/crds/logging_v1alpha1_plugin_crd.yaml
delete mode 100644 deploy/operator.yaml
delete mode 100644 deploy/service_account.yaml
delete mode 100644 developer.md
create mode 100644 docs/crds.md
create mode 100644 docs/developers.md
create mode 100644 docs/example-s3.md
delete mode 100644 docs/examples/es.md
create mode 100644 docs/examples/logging_flow_multiple_output.yaml
create mode 100644 docs/examples/logging_flow_single_output.yaml
create mode 100644 docs/examples/logging_flow_with_filters.yaml
create mode 100644 docs/examples/logging_logging_simple.yaml
create mode 100644 docs/examples/logging_logging_tls.yaml
create mode 100644 docs/examples/logging_output_azurestorage.yaml
create mode 100644 docs/examples/logging_output_gcs.yaml
create mode 100644 docs/examples/logging_output_s3.yaml
create mode 100644 docs/examples/logging_output_sumologic.yaml
delete mode 100644 docs/examples/s3.md
delete mode 100644 docs/examples/tls.md
delete mode 100644 docs/img/helm_logo.png
delete mode 100644 docs/img/ll_es.gif
delete mode 100644 docs/img/lll.png
delete mode 100644 docs/img/log_helm.gif
delete mode 100644 docs/img/log_man.png
create mode 100644 docs/img/logging-operator-v2-architecture.png
rename docs/img/{loggingo_flow.png => logging_operator_flow.png} (100%)
delete mode 100644 docs/img/logo.png
delete mode 100644 docs/img/s3_logo.png
create mode 100644 docs/model.md
delete mode 100644 docs/plugins/alibaba.md
delete mode 100644 docs/plugins/azure.md
delete mode 100644 docs/plugins/elasticsearch.md
create mode 100644 docs/plugins/filters/parser.md
create mode 100644 docs/plugins/filters/stdout.md
create mode 100644 docs/plugins/filters/tagnormaliser.md
delete mode 100644 docs/plugins/forward.md
delete mode 100644 docs/plugins/gcs.md
create mode 100644 docs/plugins/index.md
delete mode 100644 docs/plugins/loki.md
create mode 100644 docs/plugins/outputs/azurestore.md
create mode 100644 docs/plugins/outputs/buffer.md
create mode 100644 docs/plugins/outputs/elasticsearch.md
create mode 100644 docs/plugins/outputs/file.md
create mode 100644 docs/plugins/outputs/format.md
create mode 100644 docs/plugins/outputs/gcs.md
create mode 100644 docs/plugins/outputs/loki.md
create mode 100644 docs/plugins/outputs/oss.md
create mode 100644 docs/plugins/outputs/s3.md
create mode 100644 docs/plugins/outputs/secret.md
create mode 100644 docs/plugins/outputs/sumologic.md
delete mode 100644 docs/plugins/parser.md
delete mode 100644 docs/plugins/s3.md
delete mode 100644 docs/plugins/stdout.md
delete mode 100644 example/cluster_forward.yaml
delete mode 100644 example/elasticsearch_output.yaml
delete mode 100644 example/forward.md
delete mode 100644 example/forward_tls.md
delete mode 100644 example/loki_output.yaml
delete mode 100644 example/stdout.yaml
delete mode 100644 example/tls-cluster-forward/cfssl-ca.json
delete mode 100644 example/tls-cluster-forward/cfssl-csr.json
delete mode 100755 example/tls-cluster-forward/gencert.sh
create mode 100644 go.mod
create mode 100644 go.sum
create mode 100644 hack/boilerplate.go.txt
delete mode 100644 hack/minio-mc.yaml
delete mode 100644 hack/minio.yaml
delete mode 100644 hack/test-s3-output.yaml
delete mode 100755 hack/test.sh
create mode 100644 main.go
delete mode 100644 pkg/apis/addtoscheme_logging_v1alpha1.go
delete mode 100644 pkg/apis/apis.go
delete mode 100644 pkg/apis/logging/v1alpha1/common_types.go
delete mode 100644 pkg/apis/logging/v1alpha1/doc.go
delete mode 100644 pkg/apis/logging/v1alpha1/fluentbit_types.go
delete mode 100644 pkg/apis/logging/v1alpha1/fluentd_types.go
delete mode 100644 pkg/apis/logging/v1alpha1/loggingplugin_types.go
delete mode 100644 pkg/apis/logging/v1alpha1/register.go
delete mode 100644 pkg/apis/logging/v1alpha1/zz_generated.defaults.go
delete mode 100644 pkg/apis/logging/v1alpha1/zz_generated.openapi.go
delete mode 100644 pkg/controller/add_fluentbit.go
delete mode 100644 pkg/controller/add_fluentd.go
delete mode 100644 pkg/controller/add_loggingplugin.go
delete mode 100644 pkg/controller/controller.go
delete mode 100644 pkg/controller/fluentbit/fluentbit_controller.go
delete mode 100644 pkg/controller/fluentd/fluentd_controller.go
delete mode 100644 pkg/controller/plugin/plugin_controller.go
create mode 100644 pkg/model/filter/parser.go
create mode 100644 pkg/model/filter/stdout.go
create mode 100644 pkg/model/filter/tagnormaliser.go
create mode 100644 pkg/model/filter/zz_generated.deepcopy.go
create mode 100644 pkg/model/input/fluenthelpers.go
create mode 100644 pkg/model/input/forward.go
create mode 100644 pkg/model/input/tail.go
create mode 100644 pkg/model/input/zz_generated.deepcopy.go
create mode 100644 pkg/model/output/azurestore.go
create mode 100644 pkg/model/output/buffer.go
create mode 100644 pkg/model/output/elasticsearch.go
create mode 100644 pkg/model/output/file.go
create mode 100644 pkg/model/output/format.go
create mode 100644 pkg/model/output/gcs.go
create mode 100644 pkg/model/output/loki.go
create mode 100644 pkg/model/output/null.go
create mode 100644 pkg/model/output/oss.go
create mode 100644 pkg/model/output/s3.go
create mode 100644 pkg/model/output/sumologic.go
create mode 100644 pkg/model/output/zz_generated.deepcopy.go
create mode 100644 pkg/model/render/fluent.go
create mode 100644 pkg/model/render/fluent_test.go
create mode 100644 pkg/model/render/interface.go
create mode 100644 pkg/model/render/json.go
create mode 100644 pkg/model/render/json_test.go
create mode 100644 pkg/model/secret/secret.go
create mode 100644 pkg/model/secret/zz_generated.deepcopy.go
create mode 100644 pkg/model/types/builder.go
create mode 100644 pkg/model/types/flow.go
create mode 100644 pkg/model/types/router.go
create mode 100644 pkg/model/types/stringmaps.go
create mode 100644 pkg/model/types/stringmaps_test.go
create mode 100644 pkg/model/types/types.go
create mode 100644 pkg/plugins/plugin.go
delete mode 100644 pkg/resources/fluentbit/configmap.go
create mode 100644 pkg/resources/fluentbit/configsecret.go
create mode 100644 pkg/resources/fluentd/configsecret.go
delete mode 100644 pkg/resources/fluentd/deployment.go
delete mode 100644 pkg/resources/fluentd/pvc.go
create mode 100644 pkg/resources/fluentd/statefulset.go
create mode 100644 pkg/resources/model/system.go
delete mode 100644 pkg/resources/plugins/alibaba.go
delete mode 100644 pkg/resources/plugins/azure.go
delete mode 100644 pkg/resources/plugins/configmap.go
delete mode 100644 pkg/resources/plugins/elasticsearch.go
delete mode 100644 pkg/resources/plugins/forward.go
delete mode 100644 pkg/resources/plugins/gcs.go
delete mode 100644 pkg/resources/plugins/init.go
delete mode 100644 pkg/resources/plugins/loki.go
delete mode 100644 pkg/resources/plugins/parser.go
delete mode 100644 pkg/resources/plugins/plugins.go
delete mode 100644 pkg/resources/plugins/s3.go
delete mode 100644 pkg/resources/plugins/stdout.go
create mode 100755 scripts/check-header.sh
delete mode 100755 scripts/fmt-check.sh
create mode 100755 scripts/generate.sh
delete mode 100755 scripts/misspell-check.sh
delete mode 100644 version/version.go
diff --git a/.circleci/config.yml b/.circleci/config.yml
deleted file mode 100644
index 7853fbb8e..000000000
--- a/.circleci/config.yml
+++ /dev/null
@@ -1,156 +0,0 @@
-# Golang CircleCI 2.0 configuration file
-#
-# Check https://circleci.com/docs/2.0/language-go/ for more details
-version: 2.1
-
-orbs:
- helm: banzaicloud/helm@0.0.5
-
-
-jobs:
- build:
- machine:
- image: circleci/classic:201808-01
- docker_layer_caching: true
-
- environment:
- GO_VERSION: '1.11.4'
- K8S_VERSION: 'v1.12.0'
- VAULT_VERSION: '1.0.0'
- KUBECONFIG: '/home/circleci/.kube/config'
- MINIKUBE_VERSION: 'v0.31.0'
- MINIKUBE_WANTUPDATENOTIFICATION: 'false'
- MINIKUBE_WANTREPORTERRORPROMPT: 'false'
- MINIKUBE_HOME: '/home/circleci'
- CHANGE_MINIKUBE_NONE_USER: 'true'
- GOPATH: '/home/circleci/go'
-
- working_directory: /home/circleci/go/src/github.com/banzaicloud/logging-operator
-
- steps:
- - checkout
-
- - run:
- name: Setup golang
- command: |
- sudo rm -rf /usr/local/go
- curl \
- -Lo go.linux-amd64.tar.gz \
- "https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz" \
- && sudo tar -C /usr/local -xzf go.linux-amd64.tar.gz
- echo 'export PATH="$GOPATH/bin:$PATH"' >> "${BASH_ENV}"
-
- - run:
- name: Run fmt
- command: |
- make check-fmt
-
- - run:
- name: Run golint
- command: |
- make lint
-
- - run:
- name: Run misspell
- command: |
- make check-misspell
-
- - run:
- name: Run ineffassign
- command: |
- make ineffassign
-
- - run:
- name: Setup kubectl
- command: |
- curl \
- -Lo kubectl \
- "https://storage.googleapis.com/kubernetes-release/release/${K8S_VERSION}/bin/linux/amd64/kubectl" \
- && chmod +x kubectl \
- && sudo mv kubectl /usr/local/bin/
- mkdir -p "${HOME}/.kube"
- touch "${HOME}/.kube/config"
-
- - run:
- name: Setup minikube
- command: |
- curl \
- -Lo minikube \
- "https://github.com/kubernetes/minikube/releases/download/${MINIKUBE_VERSION}/minikube-linux-amd64" \
- && chmod +x minikube \
- && sudo mv minikube /usr/local/bin/
-
- - run:
- name: Start minikube
- command: |
- sudo -E minikube start --vm-driver=none --cpus 2 --memory 4096 --kubernetes-version="${K8S_VERSION}"
-
- - run:
- name: Install Helm
- command: |
- curl https://raw.githubusercontent.com/helm/helm/master/scripts/get > get_helm.sh
- chmod 700 get_helm.sh
- ./get_helm.sh
-
- helm init
- helm repo add banzaicloud-stable http://kubernetes-charts.banzaicloud.com/branch/master
- helm repo update
-
- - run:
- name: Start Minio
- command: |
- kubectl create -f hack/minio.yaml
- kubectl wait --for=condition=available deployment/minio-deployment --timeout=120s
- minio="$(kubectl get pod -l app=minio -o 'jsonpath={.items[0].metadata.name}')"
- kubectl wait --for=condition=Ready pod "${minio}" --timeout=120s
-
- - run:
- name: Setup minio cli
- command: |
- kubectl create -f hack/minio-mc.yaml
- kubectl wait --for=condition=available deployment/minio-mc-deployment --timeout=120s
- mc_pod="$(kubectl get pod -l app=minio-mc -o 'jsonpath={.items[0].metadata.name}')"
- kubectl wait --for=condition=Ready pod "${mc_pod}" --timeout=120s
- kubectl exec "${mc_pod}" -- \
- mc config host add minio \
- 'http://minio-service.default.svc.cluster.local:9000' \
- 'minio_access_key' \
- 'minio_secret_key'
-
- - run:
- name: Create test bucket
- command: |
- mc_pod="$(kubectl get pod -l app=minio-mc -o 'jsonpath={.items[0].metadata.name}')"
- kubectl exec "${mc_pod}" -- \
- mc mb --region 'test_region' minio/logs
-
- - run:
- name: Build docker image
- command: |
- make docker DOCKER_TAG=local
-
- - run:
- name: Test
- command: |
- hack/test.sh
-
-workflows:
- version: 2
- build:
- jobs:
- - build
- helm-chart:
- jobs:
- - helm/lint-chart:
- filters:
- tags:
- ignore: /.*/
-
- - helm/publish-chart:
- context: helm
- filters:
- branches:
- ignore: /.*/
- tags:
- only: /chart\/.*\/\d+.\d+.\d+.*/
-
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
deleted file mode 100644
index 8e266a19c..000000000
--- a/.github/ISSUE_TEMPLATE/bug_report.md
+++ /dev/null
@@ -1,27 +0,0 @@
----
-name: Bug report
-about: Report a bug or features that are not working as intended
-title: ''
-labels: ''
-assignees: ''
-
----
-
-**Describe the bug**
-A clear and concise description of what the bug is.
-
-**To Reproduce**
-Steps to reproduce the behavior:
-1. Go to '...'
-2. Click on '....'
-3. Scroll down to '....'
-4. See error
-
-**Expected behavior**
-A clear and concise description of what you expected to happen.
-
-**Screenshots**
-If applicable, add screenshots to help explain your problem.
-
-**Additional context**
-Add any other context about the problem here.
diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md
deleted file mode 100644
index bbcbbe7d6..000000000
--- a/.github/ISSUE_TEMPLATE/feature_request.md
+++ /dev/null
@@ -1,20 +0,0 @@
----
-name: Feature request
-about: Suggest an idea for this project
-title: ''
-labels: ''
-assignees: ''
-
----
-
-**Is your feature request related to a problem? Please describe.**
-A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
-
-**Describe the solution you'd like**
-A clear and concise description of what you want to happen.
-
-**Describe alternatives you've considered**
-A clear and concise description of any alternative solutions or features you've considered.
-
-**Additional context**
-Add any other context or screenshots about the feature request here.
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
deleted file mode 100644
index 1da98c4e1..000000000
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ /dev/null
@@ -1,34 +0,0 @@
-| Q | A
-| --------------- | ---
-| Bug fix? | no|yes
-| New feature? | no|yes
-| API breaks? | no|yes
-| Deprecations? | no|yes
-| Related tickets | fixes #X, partially #Y, mentioned in #Z
-| License | Apache 2.0
-
-
-### What's in this PR?
-
-
-
-### Why?
-
-
-
-### Additional context
-
-
-
-### Checklist
-
-
-- [ ] Implementation tested (with at least one cloud provider)
-- [ ] Error handling code meets the [guideline](https://github.com/banzaicloud/pipeline/blob/master/docs/error-handling-guide.md)
-- [ ] Logging code meets the guideline (TODO)
-- [ ] User guide and development docs updated (if needed)
-- [ ] Related Helm chart(s) updated (if needed)
-
-### To Do
-
-- [ ] If the PR is not complete but you want to discuss the approach, list what remains to be done here
diff --git a/Dockerfile b/Dockerfile
index 7178a61af..71ce731a8 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,24 +1,26 @@
-FROM golang:1.11-alpine as golang
-
-RUN apk add --update --no-cache ca-certificates curl git make
-RUN go get -u github.com/golang/dep/cmd/dep
-
-ADD Gopkg.toml /go/src/github.com/banzaicloud/logging-operator/Gopkg.toml
-ADD Gopkg.lock /go/src/github.com/banzaicloud/logging-operator/Gopkg.lock
-
-WORKDIR /go/src/github.com/banzaicloud/logging-operator
-RUN dep ensure -v -vendor-only
-ADD . /go/src/github.com/banzaicloud/logging-operator
-RUN go install ./cmd/manager
-
-
-FROM alpine:3.8
-
-RUN apk add --no-cache ca-certificates
-
-COPY --from=golang /go/bin/manager /usr/local/bin/logging-operator
-
-RUN adduser -D logging-operator
-USER logging-operator
-
-ENTRYPOINT ["/usr/local/bin/logging-operator"]
\ No newline at end of file
+# Build the manager binary
+FROM golang:1.13 as builder
+
+WORKDIR /workspace
+# Copy the Go Modules manifests
+COPY go.mod go.mod
+COPY go.sum go.sum
+# cache deps before building and copying source so that we don't need to re-download as much
+# and so that source changes don't invalidate our downloaded layer
+RUN go mod download
+
+# Copy the go source
+COPY main.go main.go
+COPY api/ api/
+COPY controllers/ controllers/
+COPY pkg/ pkg/
+
+# Build
+RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o manager main.go
+
+# Use distroless as minimal base image to package the manager binary
+# Refer to https://github.com/GoogleContainerTools/distroless for more details
+FROM gcr.io/distroless/static:latest
+WORKDIR /
+COPY --from=builder /workspace/manager .
+ENTRYPOINT ["/manager"]
diff --git a/Gopkg.lock b/Gopkg.lock
deleted file mode 100644
index a8c55c94b..000000000
--- a/Gopkg.lock
+++ /dev/null
@@ -1,1012 +0,0 @@
-# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
-
-
-[[projects]]
- digest = "1:2173c429b0c4654deb4f3e8d1f503c374f93a6b5549d74f9cba797c1e787f8e4"
- name = "cloud.google.com/go"
- packages = ["compute/metadata"]
- pruneopts = "NT"
- revision = "c9474f2f8deb81759839474b6bd1726bbfe1c1c4"
- version = "v0.36.0"
-
-[[projects]]
- digest = "1:25870183293a3fb61cc9afd060a61d63a486f091db72af01a8ea3449f5ca530d"
- name = "github.com/Masterminds/goutils"
- packages = ["."]
- pruneopts = "NT"
- revision = "41ac8693c5c10a92ea1ff5ac3a7f95646f6123b0"
- version = "v1.1.0"
-
-[[projects]]
- digest = "1:a26f8da48b22e6176c1c6a2459904bb30bd0c49ada04b2963c2c3a203e81a620"
- name = "github.com/Masterminds/semver"
- packages = ["."]
- pruneopts = "NT"
- revision = "c7af12943936e8c39859482e61f0574c2fd7fc75"
- version = "v1.4.2"
-
-[[projects]]
- digest = "1:b3bf7ebdab400adfa4d81687848571417ded5618231ef58124adf5544cab5e59"
- name = "github.com/Masterminds/sprig"
- packages = ["."]
- pruneopts = "NT"
- revision = "b1fe2752acccf8c3d7f8a1e7c75c7ae7d83a1975"
- version = "v2.18.0"
-
-[[projects]]
- digest = "1:0a111edd8693fd977f42a0c4f199a0efb13c20aec9da99ad8830c7bb6a87e8d6"
- name = "github.com/PuerkitoBio/purell"
- packages = ["."]
- pruneopts = "NT"
- revision = "44968752391892e1b0d0b821ee79e9a85fa13049"
- version = "v1.1.1"
-
-[[projects]]
- branch = "master"
- digest = "1:8098cd40cd09879efbf12e33bcd51ead4a66006ac802cd563a66c4f3373b9727"
- name = "github.com/PuerkitoBio/urlesc"
- packages = ["."]
- pruneopts = "NT"
- revision = "de5bf2ad457846296e2031421a34e2568e304e35"
-
-[[projects]]
- digest = "1:680b63a131506e668818d630d3ca36123ff290afa0afc9f4be21940adca3f27d"
- name = "github.com/appscode/jsonpatch"
- packages = ["."]
- pruneopts = "NT"
- revision = "7c0e3b262f30165a8ec3d0b4c6059fd92703bfb2"
- version = "1.0.0"
-
-[[projects]]
- branch = "master"
- digest = "1:c819830f4f5ef85874a90ac3cbcc96cd322c715f5c96fbe4722eacd3dafbaa07"
- name = "github.com/beorn7/perks"
- packages = ["quantile"]
- pruneopts = "NT"
- revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
-
-[[projects]]
- digest = "1:c61f4f97321a37adcb5b4fd4fd61209cd553e46c99ee606c465553541b12a229"
- name = "github.com/coreos/prometheus-operator"
- packages = [
- "pkg/apis/monitoring",
- "pkg/apis/monitoring/v1",
- "pkg/client/versioned/scheme",
- "pkg/client/versioned/typed/monitoring/v1",
- ]
- pruneopts = "NT"
- revision = "72ec4b9b16ef11700724dc71fec77112536eed40"
- version = "v0.26.0"
-
-[[projects]]
- digest = "1:4b8b5811da6970495e04d1f4e98bb89518cc3cfc3b3f456bdb876ed7b6c74049"
- name = "github.com/davecgh/go-spew"
- packages = ["spew"]
- pruneopts = "NT"
- revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
- version = "v1.1.1"
-
-[[projects]]
- digest = "1:2453249730493850718f891fb40b8f1bc932a0265384fc85b269dc04a01d4673"
- name = "github.com/emicklei/go-restful"
- packages = [
- ".",
- "log",
- ]
- pruneopts = "NT"
- revision = "85d198d05a92d31823b852b4a5928114912e8949"
- version = "v2.9.0"
-
-[[projects]]
- digest = "1:81466b4218bf6adddac2572a30ac733a9255919bc2f470b4827a317bd4ee1756"
- name = "github.com/ghodss/yaml"
- packages = ["."]
- pruneopts = "NT"
- revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7"
- version = "v1.0.0"
-
-[[projects]]
- branch = "master"
- digest = "1:d421af4c4fe51d399667d573982d663fe1fa67020a88d3ae43466ebfe8e2b5c9"
- name = "github.com/go-logr/logr"
- packages = ["."]
- pruneopts = "NT"
- revision = "9fb12b3b21c5415d16ac18dc5cd42c1cfdd40c4e"
-
-[[projects]]
- digest = "1:340497a512995aa69c0add901d79a2096b3449d35a44a6f1f1115091a9f8c687"
- name = "github.com/go-logr/zapr"
- packages = ["."]
- pruneopts = "NT"
- revision = "7536572e8d55209135cd5e7ccf7fce43dca217ab"
- version = "v0.1.0"
-
-[[projects]]
- digest = "1:260f7ebefc63024c8dfe2c9f1a2935a89fa4213637a1f522f592f80c001cc441"
- name = "github.com/go-openapi/jsonpointer"
- packages = ["."]
- pruneopts = "NT"
- revision = "ef5f0afec364d3b9396b7b77b43dbe26bf1f8004"
- version = "v0.18.0"
-
-[[projects]]
- digest = "1:98abd61947ff5c7c6fcfec5473d02a4821ed3a2dd99a4fbfdb7925b0dd745546"
- name = "github.com/go-openapi/jsonreference"
- packages = ["."]
- pruneopts = "NT"
- revision = "8483a886a90412cd6858df4ea3483dce9c8e35a3"
- version = "v0.18.0"
-
-[[projects]]
- branch = "master"
- digest = "1:8f80caf2fa31f78a035f33981c9685013033073b53f344f579e60fa69f0c6670"
- name = "github.com/go-openapi/spec"
- packages = ["."]
- pruneopts = "NT"
- revision = "53d776530bf78a11b03a7b52dd8a083086b045e5"
-
-[[projects]]
- digest = "1:dc0f590770e5a6c70ea086232324f7b7dc4857c60eca63ab8ff78e0a5cfcdbf3"
- name = "github.com/go-openapi/swag"
- packages = ["."]
- pruneopts = "NT"
- revision = "1d29f06aebd59ccdf11ae04aa0334ded96e2d909"
- version = "v0.18.0"
-
-[[projects]]
- digest = "1:9059915429f7f3a5f18cfa6b7cab9a28721d7ac6db4079a62044aa229eb7f2a8"
- name = "github.com/gobuffalo/envy"
- packages = ["."]
- pruneopts = "NT"
- revision = "fa0dfdc10b5366ce365b7d9d1755a03e4e797bc5"
- version = "v1.6.15"
-
-[[projects]]
- digest = "1:0b39706cfa32c1ba9e14435b5844d04aef81b60f44b6077e61e0607d56692603"
- name = "github.com/gogo/protobuf"
- packages = [
- "proto",
- "sortkeys",
- ]
- pruneopts = "NT"
- revision = "ba06b47c162d49f2af050fb4c75bcbc86a159d5c"
- version = "v1.2.1"
-
-[[projects]]
- branch = "master"
- digest = "1:e2b86e41f3d669fc36b50d31d32d22c8ac656c75aa5ea89717ce7177e134ff2a"
- name = "github.com/golang/glog"
- packages = ["."]
- pruneopts = "NT"
- revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998"
-
-[[projects]]
- branch = "master"
- digest = "1:20b774dcfdf0fff3148432beb828c52404f3eb3d70b7ce71ae0356ed6cbc2bae"
- name = "github.com/golang/groupcache"
- packages = ["lru"]
- pruneopts = "NT"
- revision = "5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b"
-
-[[projects]]
- digest = "1:d7cb4458ea8782e6efacd8f4940796ec559c90833509c436f40c4085b98156dd"
- name = "github.com/golang/protobuf"
- packages = [
- "proto",
- "ptypes",
- "ptypes/any",
- "ptypes/duration",
- "ptypes/timestamp",
- ]
- pruneopts = "NT"
- revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5"
- version = "v1.2.0"
-
-[[projects]]
- branch = "master"
- digest = "1:05f95ffdfcf651bdb0f05b40b69e7f5663047f8da75c72d58728acb59b5cc107"
- name = "github.com/google/btree"
- packages = ["."]
- pruneopts = "NT"
- revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306"
-
-[[projects]]
- branch = "master"
- digest = "1:52c5834e2bebac9030c97cc0798ac11c3aa8a39f098aeb419f142533da6cd3cc"
- name = "github.com/google/gofuzz"
- packages = ["."]
- pruneopts = "NT"
- revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1"
-
-[[projects]]
- digest = "1:56a1f3949ebb7fa22fa6b4e4ac0fe0f77cc4faee5b57413e6fa9199a8458faf1"
- name = "github.com/google/uuid"
- packages = ["."]
- pruneopts = "NT"
- revision = "9b3b1e0f5f99ae461456d768e7d301a7acdaa2d8"
- version = "v1.1.0"
-
-[[projects]]
- digest = "1:289332c13b80edfefc88397cce5266c16845dcf204fa2f6ac7e464ee4c7f6e96"
- name = "github.com/googleapis/gnostic"
- packages = [
- "OpenAPIv2",
- "compiler",
- "extensions",
- ]
- pruneopts = "NT"
- revision = "7c663266750e7d82587642f65e60bc4083f1f84e"
- version = "v0.2.0"
-
-[[projects]]
- digest = "1:c44f4c3b105e9a06f278c0d12982c915d12cd1537d252391904444777a6791df"
- name = "github.com/goph/emperror"
- packages = ["."]
- pruneopts = "NT"
- revision = "4cdd86c173cfed1f47be88bd88327140f81bcede"
- version = "v0.16.0"
-
-[[projects]]
- branch = "master"
- digest = "1:bb7bd892abcb75ef819ce2efab9d54d22b7e38dc05ffac55428bb0578b52912b"
- name = "github.com/gregjones/httpcache"
- packages = [
- ".",
- "diskcache",
- ]
- pruneopts = "NT"
- revision = "3befbb6ad0cc97d4c25d851e9528915809e1a22f"
-
-[[projects]]
- digest = "1:b42cde0e1f3c816dd57f57f7bbcf05ca40263ad96f168714c130c611fc0856a6"
- name = "github.com/hashicorp/golang-lru"
- packages = [
- ".",
- "simplelru",
- ]
- pruneopts = "NT"
- revision = "20f1fb78b0740ba8c3cb143a61e86ba5c8669768"
- version = "v0.5.0"
-
-[[projects]]
- digest = "1:dc54242755f5b6721dd880843de6e45fe234838ea9149ec8249951880fd5802f"
- name = "github.com/huandu/xstrings"
- packages = ["."]
- pruneopts = "NT"
- revision = "f02667b379e2fb5916c3cda2cf31e0eb885d79f8"
- version = "v1.2.0"
-
-[[projects]]
- digest = "1:aaa38889f11896ee3644d77e17dc7764cc47f5f3d3b488268df2af2b52541c5f"
- name = "github.com/imdario/mergo"
- packages = ["."]
- pruneopts = "NT"
- revision = "7c29201646fa3de8506f701213473dd407f19646"
- version = "v0.3.7"
-
-[[projects]]
- digest = "1:f5b9328966ccea0970b1d15075698eff0ddb3e75889560aad2e9f76b289b536a"
- name = "github.com/joho/godotenv"
- packages = ["."]
- pruneopts = "NT"
- revision = "23d116af351c84513e1946b527c88823e476be13"
- version = "v1.3.0"
-
-[[projects]]
- digest = "1:1d39c063244ad17c4b18e8da1551163b6ffb52bd1640a49a8ec5c3b7bf4dbd5d"
- name = "github.com/json-iterator/go"
- packages = ["."]
- pruneopts = "NT"
- revision = "1624edc4454b8682399def8740d46db5e4362ba4"
- version = "v1.1.5"
-
-[[projects]]
- digest = "1:4059c14e87a2de3a434430340521b5feece186c1469eff0834c29a63870de3ed"
- name = "github.com/konsorten/go-windows-terminal-sequences"
- packages = ["."]
- pruneopts = "NT"
- revision = "5c8c8bd35d3832f5d134ae1e1e375b69a4d25242"
- version = "v1.0.1"
-
-[[projects]]
- branch = "master"
- digest = "1:4925ec3736ef6c299cfcf61597782e3d66ec13114f7476019d04c742a7be55d0"
- name = "github.com/mailru/easyjson"
- packages = [
- "buffer",
- "jlexer",
- "jwriter",
- ]
- pruneopts = "NT"
- revision = "6243d8e04c3f819e79757e8bc3faa15c3cb27003"
-
-[[projects]]
- digest = "1:56dbf15e091bf7926cb33a57cb6bdfc658fc6d3498d2f76f10a97ce7856f1fde"
- name = "github.com/markbates/inflect"
- packages = ["."]
- pruneopts = "NT"
- revision = "24b83195037b3bc61fcda2d28b7b0518bce293b6"
- version = "v1.0.4"
-
-[[projects]]
- digest = "1:ea1db000388d88b31db7531c83016bef0d6db0d908a07794bfc36aca16fbf935"
- name = "github.com/matttproud/golang_protobuf_extensions"
- packages = ["pbutil"]
- pruneopts = "NT"
- revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
- version = "v1.0.1"
-
-[[projects]]
- digest = "1:2f42fa12d6911c7b7659738758631bec870b7e9b4c6be5444f963cdcfccc191f"
- name = "github.com/modern-go/concurrent"
- packages = ["."]
- pruneopts = "NT"
- revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94"
- version = "1.0.3"
-
-[[projects]]
- digest = "1:c6aca19413b13dc59c220ad7430329e2ec454cc310bc6d8de2c7e2b93c18a0f6"
- name = "github.com/modern-go/reflect2"
- packages = ["."]
- pruneopts = "NT"
- revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd"
- version = "1.0.1"
-
-[[projects]]
- digest = "1:df8e741cd0f86087367f3bcfeb1cf237e96fada71194b6d4cee9412d221ec763"
- name = "github.com/operator-framework/operator-sdk"
- packages = [
- "pkg/k8sutil",
- "pkg/leader",
- "pkg/log/zap",
- "pkg/metrics",
- "version",
- ]
- pruneopts = "NT"
- revision = "6754b70169f1b62355516947270e33b9f73d8159"
- version = "v0.5.0"
-
-[[projects]]
- digest = "1:93b1d84c5fa6d1ea52f4114c37714cddd84d5b78f151b62bb101128dd51399bf"
- name = "github.com/pborman/uuid"
- packages = ["."]
- pruneopts = "NT"
- revision = "adf5a7427709b9deb95d29d3fa8a2bf9cfd388f1"
- version = "v1.2"
-
-[[projects]]
- branch = "master"
- digest = "1:bf2ac97824a7221eb16b096aecc1c390d4c8a4e49524386aaa2e2dd215cbfb31"
- name = "github.com/petar/GoLLRB"
- packages = ["llrb"]
- pruneopts = "NT"
- revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4"
-
-[[projects]]
- digest = "1:e4e9e026b8e4c5630205cd0208efb491b40ad40552e57f7a646bb8a46896077b"
- name = "github.com/peterbourgon/diskv"
- packages = ["."]
- pruneopts = "NT"
- revision = "5f041e8faa004a95c88a202771f4cc3e991971e6"
- version = "v2.0.1"
-
-[[projects]]
- digest = "1:14715f705ff5dfe0ffd6571d7d201dd8e921030f8070321a79380d8ca4ec1a24"
- name = "github.com/pkg/errors"
- packages = ["."]
- pruneopts = "NT"
- revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4"
- version = "v0.8.1"
-
-[[projects]]
- digest = "1:ec2a29e3bd141038ae5c3d3a4f57db0c341fcc1d98055a607aedd683aed124ee"
- name = "github.com/prometheus/client_golang"
- packages = [
- "prometheus",
- "prometheus/internal",
- "prometheus/promhttp",
- ]
- pruneopts = "NT"
- revision = "505eaef017263e299324067d40ca2c48f6a2cf50"
- version = "v0.9.2"
-
-[[projects]]
- branch = "master"
- digest = "1:c2cc5049e927e2749c0d5163c9f8d924880d83e84befa732b9aad0b6be227bed"
- name = "github.com/prometheus/client_model"
- packages = ["go"]
- pruneopts = "NT"
- revision = "fd36f4220a901265f90734c3183c5f0c91daa0b8"
-
-[[projects]]
- digest = "1:30261b5e263b5c4fb40571b53a41a99c96016c6b1b2c45c1cefd226fc3f6304b"
- name = "github.com/prometheus/common"
- packages = [
- "expfmt",
- "internal/bitbucket.org/ww/goautoneg",
- "model",
- ]
- pruneopts = "NT"
- revision = "cfeb6f9992ffa54aaa4f2170ade4067ee478b250"
- version = "v0.2.0"
-
-[[projects]]
- branch = "master"
- digest = "1:1c282f5c094061ce301d1ea3098799fc907ac1399e9f064c463787323a7b7340"
- name = "github.com/prometheus/procfs"
- packages = [
- ".",
- "internal/util",
- "iostats",
- "nfs",
- "xfs",
- ]
- pruneopts = "NT"
- revision = "6ed1f7e1041181781dd2826d3001075d011a80cc"
-
-[[projects]]
- digest = "1:fcef1ce61da6f8f6f115154fb0e0e5b159fe11656839ba1e6061372711c013ee"
- name = "github.com/rogpeppe/go-internal"
- packages = [
- "modfile",
- "module",
- "semver",
- ]
- pruneopts = "NT"
- revision = "1cf9852c553c5b7da2d5a4a091129a7822fed0c9"
- version = "v1.2.2"
-
-[[projects]]
- digest = "1:1f84287a4ca2c8f729d8155ba4c45915f5854ebbd214e406070779753da68422"
- name = "github.com/sirupsen/logrus"
- packages = ["."]
- pruneopts = "NT"
- revision = "e1e72e9de974bd926e5c56f83753fba2df402ce5"
- version = "v1.3.0"
-
-[[projects]]
- digest = "1:1bc08ec221c4fb25e6f2c019b23fe989fb44573c696983d8e403a3b76cc378e1"
- name = "github.com/spf13/afero"
- packages = [
- ".",
- "mem",
- ]
- pruneopts = "NT"
- revision = "f4711e4db9e9a1d3887343acb72b2bbfc2f686f5"
- version = "v1.2.1"
-
-[[projects]]
- digest = "1:9d8420bbf131d1618bde6530af37c3799340d3762cc47210c1d9532a4c3a2779"
- name = "github.com/spf13/pflag"
- packages = ["."]
- pruneopts = "NT"
- revision = "298182f68c66c05229eb03ac171abe6e309ee79a"
- version = "v1.0.3"
-
-[[projects]]
- digest = "1:22f696cee54865fb8e9ff91df7b633f6b8f22037a8015253c6b6a71ca82219c7"
- name = "go.uber.org/atomic"
- packages = ["."]
- pruneopts = "NT"
- revision = "1ea20fb1cbb1cc08cbd0d913a96dead89aa18289"
- version = "v1.3.2"
-
-[[projects]]
- digest = "1:58ca93bdf81bac106ded02226b5395a0595d5346cdc4caa8d9c1f3a5f8f9976e"
- name = "go.uber.org/multierr"
- packages = ["."]
- pruneopts = "NT"
- revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a"
- version = "v1.1.0"
-
-[[projects]]
- digest = "1:572fa4496563920f3e3107a2294cf2621d6cc4ffd03403fb6397b1bab9fa082a"
- name = "go.uber.org/zap"
- packages = [
- ".",
- "buffer",
- "internal/bufferpool",
- "internal/color",
- "internal/exit",
- "zapcore",
- ]
- pruneopts = "NT"
- revision = "ff33455a0e382e8a81d14dd7c922020b6b5e7982"
- version = "v1.9.1"
-
-[[projects]]
- branch = "master"
- digest = "1:b19fb19351db5de242e3f1203e63c207c69bf4f4df4822b4ef15220e0204e0e4"
- name = "golang.org/x/crypto"
- packages = [
- "pbkdf2",
- "scrypt",
- "ssh/terminal",
- ]
- pruneopts = "NT"
- revision = "7f87c0fbb88b590338857bcb720678c2583d4dea"
-
-[[projects]]
- branch = "master"
- digest = "1:60c1f5371132225f21f849a13e379d55c4512ac9ed4b37e7fa33ea0fedeb8480"
- name = "golang.org/x/net"
- packages = [
- "context",
- "context/ctxhttp",
- "http/httpguts",
- "http2",
- "http2/hpack",
- "idna",
- ]
- pruneopts = "NT"
- revision = "fe579d43d83210096a79b46dcca0e3721058393a"
-
-[[projects]]
- branch = "master"
- digest = "1:22a51305a9f13b8c8ca91c335a0da16a1a7b537155e677e45d7905465e457e87"
- name = "golang.org/x/oauth2"
- packages = [
- ".",
- "google",
- "internal",
- "jws",
- "jwt",
- ]
- pruneopts = "NT"
- revision = "529b322ea34655aa15fb32e063f3d4d3cf803cac"
-
-[[projects]]
- branch = "master"
- digest = "1:90abfd79711e2d0ce66e6d23a1b652f8e16c76e12a2ef4b255d1bf0ff4f254b8"
- name = "golang.org/x/sys"
- packages = [
- "unix",
- "windows",
- ]
- pruneopts = "NT"
- revision = "cc5685c2db1239775905f3911f0067c0fa74762f"
-
-[[projects]]
- digest = "1:8c74f97396ed63cc2ef04ebb5fc37bb032871b8fd890a25991ed40974b00cd2a"
- name = "golang.org/x/text"
- packages = [
- "collate",
- "collate/build",
- "internal/colltab",
- "internal/gen",
- "internal/tag",
- "internal/triegen",
- "internal/ucd",
- "language",
- "secure/bidirule",
- "transform",
- "unicode/bidi",
- "unicode/cldr",
- "unicode/norm",
- "unicode/rangetable",
- "width",
- ]
- pruneopts = "NT"
- revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
- version = "v0.3.0"
-
-[[projects]]
- branch = "master"
- digest = "1:9fdc2b55e8e0fafe4b41884091e51e77344f7dc511c5acedcfd98200003bff90"
- name = "golang.org/x/time"
- packages = ["rate"]
- pruneopts = "NT"
- revision = "85acf8d2951cb2a3bde7632f9ff273ef0379bcbd"
-
-[[projects]]
- branch = "master"
- digest = "1:dfddac8ab4fec08ac3679d4f64f6054a6be3c849faf6ea05e525e40f7aeeb133"
- name = "golang.org/x/tools"
- packages = [
- "go/ast/astutil",
- "go/gcexportdata",
- "go/internal/cgo",
- "go/internal/gcimporter",
- "go/internal/packagesdriver",
- "go/packages",
- "go/types/typeutil",
- "imports",
- "internal/fastwalk",
- "internal/gopathwalk",
- "internal/module",
- "internal/semver",
- ]
- pruneopts = "NT"
- revision = "2dc4ef2775b8122dd5afe2c18fd6f775e87f89e5"
-
-[[projects]]
- digest = "1:902ffa11f1d8c19c12b05cabffe69e1a16608ad03a8899ebcb9c6bde295660ae"
- name = "google.golang.org/appengine"
- packages = [
- ".",
- "internal",
- "internal/app_identity",
- "internal/base",
- "internal/datastore",
- "internal/log",
- "internal/modules",
- "internal/remote_api",
- "internal/urlfetch",
- "urlfetch",
- ]
- pruneopts = "NT"
- revision = "e9657d882bb81064595ca3b56cbe2546bbabf7b1"
- version = "v1.4.0"
-
-[[projects]]
- digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a"
- name = "gopkg.in/inf.v0"
- packages = ["."]
- pruneopts = "NT"
- revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf"
- version = "v0.9.1"
-
-[[projects]]
- digest = "1:18108594151654e9e696b27b181b953f9a90b16bf14d253dd1b397b025a1487f"
- name = "gopkg.in/yaml.v2"
- packages = ["."]
- pruneopts = "NT"
- revision = "51d6538a90f86fe93ac480b35f37b2be17fef232"
- version = "v2.2.2"
-
-[[projects]]
- digest = "1:6fa82ea248029bbbdddade20c06ab177ff6e485e5e45e48b045707415b7efd34"
- name = "k8s.io/api"
- packages = [
- "admission/v1beta1",
- "admissionregistration/v1alpha1",
- "admissionregistration/v1beta1",
- "apps/v1",
- "apps/v1beta1",
- "apps/v1beta2",
- "auditregistration/v1alpha1",
- "authentication/v1",
- "authentication/v1beta1",
- "authorization/v1",
- "authorization/v1beta1",
- "autoscaling/v1",
- "autoscaling/v2beta1",
- "autoscaling/v2beta2",
- "batch/v1",
- "batch/v1beta1",
- "batch/v2alpha1",
- "certificates/v1beta1",
- "coordination/v1beta1",
- "core/v1",
- "events/v1beta1",
- "extensions/v1beta1",
- "networking/v1",
- "policy/v1beta1",
- "rbac/v1",
- "rbac/v1alpha1",
- "rbac/v1beta1",
- "scheduling/v1alpha1",
- "scheduling/v1beta1",
- "settings/v1alpha1",
- "storage/v1",
- "storage/v1alpha1",
- "storage/v1beta1",
- ]
- pruneopts = "NT"
- revision = "05914d821849570fba9eacfb29466f2d8d3cd229"
-
-[[projects]]
- digest = "1:c6f23048e162e65d586c809fd02e263e180ad157f110df17437c22517bb59a4b"
- name = "k8s.io/apiextensions-apiserver"
- packages = [
- "pkg/apis/apiextensions",
- "pkg/apis/apiextensions/v1beta1",
- ]
- pruneopts = "NT"
- revision = "0fe22c71c47604641d9aa352c785b7912c200562"
-
-[[projects]]
- digest = "1:15b5c41ff6faa4d0400557d4112d6337e1abc961c65513d44fce7922e32c9ca7"
- name = "k8s.io/apimachinery"
- packages = [
- "pkg/api/errors",
- "pkg/api/meta",
- "pkg/api/resource",
- "pkg/apis/meta/internalversion",
- "pkg/apis/meta/v1",
- "pkg/apis/meta/v1/unstructured",
- "pkg/apis/meta/v1beta1",
- "pkg/conversion",
- "pkg/conversion/queryparams",
- "pkg/fields",
- "pkg/labels",
- "pkg/runtime",
- "pkg/runtime/schema",
- "pkg/runtime/serializer",
- "pkg/runtime/serializer/json",
- "pkg/runtime/serializer/protobuf",
- "pkg/runtime/serializer/recognizer",
- "pkg/runtime/serializer/streaming",
- "pkg/runtime/serializer/versioning",
- "pkg/selection",
- "pkg/types",
- "pkg/util/cache",
- "pkg/util/clock",
- "pkg/util/diff",
- "pkg/util/errors",
- "pkg/util/framer",
- "pkg/util/intstr",
- "pkg/util/json",
- "pkg/util/mergepatch",
- "pkg/util/naming",
- "pkg/util/net",
- "pkg/util/runtime",
- "pkg/util/sets",
- "pkg/util/strategicpatch",
- "pkg/util/uuid",
- "pkg/util/validation",
- "pkg/util/validation/field",
- "pkg/util/wait",
- "pkg/util/yaml",
- "pkg/version",
- "pkg/watch",
- "third_party/forked/golang/json",
- "third_party/forked/golang/reflect",
- ]
- pruneopts = "NT"
- revision = "2b1284ed4c93a43499e781493253e2ac5959c4fd"
-
-[[projects]]
- digest = "1:c904a3d70131b33df36e4e51b574226b82308fc1ea66964aa21095a95d453fc9"
- name = "k8s.io/client-go"
- packages = [
- "discovery",
- "dynamic",
- "kubernetes",
- "kubernetes/scheme",
- "kubernetes/typed/admissionregistration/v1alpha1",
- "kubernetes/typed/admissionregistration/v1beta1",
- "kubernetes/typed/apps/v1",
- "kubernetes/typed/apps/v1beta1",
- "kubernetes/typed/apps/v1beta2",
- "kubernetes/typed/auditregistration/v1alpha1",
- "kubernetes/typed/authentication/v1",
- "kubernetes/typed/authentication/v1beta1",
- "kubernetes/typed/authorization/v1",
- "kubernetes/typed/authorization/v1beta1",
- "kubernetes/typed/autoscaling/v1",
- "kubernetes/typed/autoscaling/v2beta1",
- "kubernetes/typed/autoscaling/v2beta2",
- "kubernetes/typed/batch/v1",
- "kubernetes/typed/batch/v1beta1",
- "kubernetes/typed/batch/v2alpha1",
- "kubernetes/typed/certificates/v1beta1",
- "kubernetes/typed/coordination/v1beta1",
- "kubernetes/typed/core/v1",
- "kubernetes/typed/events/v1beta1",
- "kubernetes/typed/extensions/v1beta1",
- "kubernetes/typed/networking/v1",
- "kubernetes/typed/policy/v1beta1",
- "kubernetes/typed/rbac/v1",
- "kubernetes/typed/rbac/v1alpha1",
- "kubernetes/typed/rbac/v1beta1",
- "kubernetes/typed/scheduling/v1alpha1",
- "kubernetes/typed/scheduling/v1beta1",
- "kubernetes/typed/settings/v1alpha1",
- "kubernetes/typed/storage/v1",
- "kubernetes/typed/storage/v1alpha1",
- "kubernetes/typed/storage/v1beta1",
- "pkg/apis/clientauthentication",
- "pkg/apis/clientauthentication/v1alpha1",
- "pkg/apis/clientauthentication/v1beta1",
- "pkg/version",
- "plugin/pkg/client/auth/exec",
- "plugin/pkg/client/auth/gcp",
- "rest",
- "rest/watch",
- "restmapper",
- "third_party/forked/golang/template",
- "tools/auth",
- "tools/cache",
- "tools/clientcmd",
- "tools/clientcmd/api",
- "tools/clientcmd/api/latest",
- "tools/clientcmd/api/v1",
- "tools/leaderelection",
- "tools/leaderelection/resourcelock",
- "tools/metrics",
- "tools/pager",
- "tools/record",
- "tools/reference",
- "transport",
- "util/buffer",
- "util/cert",
- "util/connrotation",
- "util/flowcontrol",
- "util/homedir",
- "util/integer",
- "util/jsonpath",
- "util/retry",
- "util/workqueue",
- ]
- pruneopts = "NT"
- revision = "8d9ed539ba3134352c586810e749e58df4e94e4f"
-
-[[projects]]
- digest = "1:dc1ae99dcab96913d81ae970b1f7a7411a54199b14bfb17a7e86f9a56979c720"
- name = "k8s.io/code-generator"
- packages = [
- "cmd/client-gen",
- "cmd/client-gen/args",
- "cmd/client-gen/generators",
- "cmd/client-gen/generators/fake",
- "cmd/client-gen/generators/scheme",
- "cmd/client-gen/generators/util",
- "cmd/client-gen/path",
- "cmd/client-gen/types",
- "cmd/conversion-gen",
- "cmd/conversion-gen/args",
- "cmd/conversion-gen/generators",
- "cmd/deepcopy-gen",
- "cmd/deepcopy-gen/args",
- "cmd/defaulter-gen",
- "cmd/defaulter-gen/args",
- "cmd/informer-gen",
- "cmd/informer-gen/args",
- "cmd/informer-gen/generators",
- "cmd/lister-gen",
- "cmd/lister-gen/args",
- "cmd/lister-gen/generators",
- "pkg/util",
- ]
- pruneopts = "T"
- revision = "c2090bec4d9b1fb25de3812f868accc2bc9ecbae"
-
-[[projects]]
- branch = "master"
- digest = "1:2b9071c93303f1196cfe959c7f7f69ed1e4a5180f240a259536c5886f79f86d4"
- name = "k8s.io/gengo"
- packages = [
- "args",
- "examples/deepcopy-gen/generators",
- "examples/defaulter-gen/generators",
- "examples/set-gen/sets",
- "generator",
- "namer",
- "parser",
- "types",
- ]
- pruneopts = "T"
- revision = "0689ccc1d7d65d9dd1bedcc3b0b1ed7df91ba266"
-
-[[projects]]
- digest = "1:29f93bb84d907a2c035e729e19d66fe52165d8c905cb3ef1920140d76ae6afaf"
- name = "k8s.io/klog"
- packages = ["."]
- pruneopts = "NT"
- revision = "71442cd4037d612096940ceb0f3fec3f7fff66e0"
- version = "v0.2.0"
-
-[[projects]]
- digest = "1:c48a795cd7048bb1888273bc604b6e69b22f9b8089c3df65f77cc527757b515c"
- name = "k8s.io/kube-openapi"
- packages = [
- "cmd/openapi-gen",
- "cmd/openapi-gen/args",
- "pkg/common",
- "pkg/generators",
- "pkg/generators/rules",
- "pkg/util/proto",
- "pkg/util/sets",
- ]
- pruneopts = "NT"
- revision = "0cf8f7e6ed1d2e3d47d02e3b6e559369af24d803"
-
-[[projects]]
- digest = "1:06035489efbd51ccface65fc878ceeb849aba05b2f9443c8993f363fc96e80ac"
- name = "sigs.k8s.io/controller-runtime"
- packages = [
- "pkg/cache",
- "pkg/cache/internal",
- "pkg/client",
- "pkg/client/apiutil",
- "pkg/client/config",
- "pkg/controller",
- "pkg/event",
- "pkg/handler",
- "pkg/internal/controller",
- "pkg/internal/controller/metrics",
- "pkg/internal/recorder",
- "pkg/leaderelection",
- "pkg/manager",
- "pkg/metrics",
- "pkg/patch",
- "pkg/predicate",
- "pkg/reconcile",
- "pkg/recorder",
- "pkg/runtime/inject",
- "pkg/runtime/log",
- "pkg/runtime/scheme",
- "pkg/runtime/signals",
- "pkg/source",
- "pkg/source/internal",
- "pkg/webhook/admission",
- "pkg/webhook/admission/types",
- "pkg/webhook/internal/metrics",
- "pkg/webhook/types",
- ]
- pruneopts = "NT"
- revision = "12d98582e72927b6cd0123e2b4e819f9341ce62c"
- version = "v0.1.10"
-
-[[projects]]
- digest = "1:0a14ea9a2647d064bb9d48b2de78306e74b196681efd7b654eb0b518d90c2e8d"
- name = "sigs.k8s.io/controller-tools"
- packages = [
- "pkg/crd/generator",
- "pkg/crd/util",
- "pkg/internal/codegen",
- "pkg/internal/codegen/parse",
- "pkg/internal/general",
- "pkg/util",
- ]
- pruneopts = "NT"
- revision = "950a0e88e4effb864253b3c7504b326cc83b9d11"
- version = "v0.1.8"
-
-[[projects]]
- digest = "1:8730e0150dfb2b7e173890c8b9868e7a273082ef8e39f4940e3506a481cf895c"
- name = "sigs.k8s.io/yaml"
- packages = ["."]
- pruneopts = "NT"
- revision = "fd68e9863619f6ec2fdd8625fe1f02e7c877e480"
- version = "v1.1.0"
-
-[solve-meta]
- analyzer-name = "dep"
- analyzer-version = 1
- input-imports = [
- "github.com/Masterminds/sprig",
- "github.com/go-logr/logr",
- "github.com/go-openapi/spec",
- "github.com/goph/emperror",
- "github.com/operator-framework/operator-sdk/pkg/k8sutil",
- "github.com/operator-framework/operator-sdk/pkg/leader",
- "github.com/operator-framework/operator-sdk/pkg/log/zap",
- "github.com/operator-framework/operator-sdk/pkg/metrics",
- "github.com/operator-framework/operator-sdk/version",
- "github.com/sirupsen/logrus",
- "github.com/spf13/pflag",
- "k8s.io/api/apps/v1",
- "k8s.io/api/core/v1",
- "k8s.io/api/rbac/v1",
- "k8s.io/apimachinery/pkg/api/errors",
- "k8s.io/apimachinery/pkg/apis/meta/v1",
- "k8s.io/apimachinery/pkg/runtime",
- "k8s.io/apimachinery/pkg/runtime/schema",
- "k8s.io/apimachinery/pkg/types",
- "k8s.io/apimachinery/pkg/util/intstr",
- "k8s.io/client-go/plugin/pkg/client/auth/gcp",
- "k8s.io/code-generator/cmd/client-gen",
- "k8s.io/code-generator/cmd/conversion-gen",
- "k8s.io/code-generator/cmd/deepcopy-gen",
- "k8s.io/code-generator/cmd/defaulter-gen",
- "k8s.io/code-generator/cmd/informer-gen",
- "k8s.io/code-generator/cmd/lister-gen",
- "k8s.io/gengo/args",
- "k8s.io/kube-openapi/cmd/openapi-gen",
- "k8s.io/kube-openapi/pkg/common",
- "sigs.k8s.io/controller-runtime/pkg/client",
- "sigs.k8s.io/controller-runtime/pkg/client/config",
- "sigs.k8s.io/controller-runtime/pkg/controller",
- "sigs.k8s.io/controller-runtime/pkg/handler",
- "sigs.k8s.io/controller-runtime/pkg/manager",
- "sigs.k8s.io/controller-runtime/pkg/reconcile",
- "sigs.k8s.io/controller-runtime/pkg/runtime/log",
- "sigs.k8s.io/controller-runtime/pkg/runtime/scheme",
- "sigs.k8s.io/controller-runtime/pkg/runtime/signals",
- "sigs.k8s.io/controller-runtime/pkg/source",
- "sigs.k8s.io/controller-tools/pkg/crd/generator",
- ]
- solver-name = "gps-cdcl"
- solver-version = 1
diff --git a/Gopkg.toml b/Gopkg.toml
deleted file mode 100644
index b4c4b8cda..000000000
--- a/Gopkg.toml
+++ /dev/null
@@ -1,79 +0,0 @@
-# Force dep to vendor the code generators, which aren't imported just used at dev time.
-required = [
- "k8s.io/code-generator/cmd/defaulter-gen",
- "k8s.io/code-generator/cmd/deepcopy-gen",
- "k8s.io/code-generator/cmd/conversion-gen",
- "k8s.io/code-generator/cmd/client-gen",
- "k8s.io/code-generator/cmd/lister-gen",
- "k8s.io/code-generator/cmd/informer-gen",
- "k8s.io/kube-openapi/cmd/openapi-gen",
- "k8s.io/gengo/args",
- "sigs.k8s.io/controller-tools/pkg/crd/generator",
-]
-
-[[override]]
- name = "k8s.io/code-generator"
- # revision for tag "kubernetes-1.13.1"
- revision = "c2090bec4d9b1fb25de3812f868accc2bc9ecbae"
-
-[[override]]
- name = "k8s.io/kube-openapi"
- revision = "0cf8f7e6ed1d2e3d47d02e3b6e559369af24d803"
-
-[[override]]
- name = "github.com/go-openapi/spec"
- branch = "master"
-
-[[override]]
- name = "sigs.k8s.io/controller-tools"
- version = "=v0.1.8"
-
-[[override]]
- name = "k8s.io/api"
- # revision for tag "kubernetes-1.13.1"
- revision = "05914d821849570fba9eacfb29466f2d8d3cd229"
-
-[[override]]
- name = "k8s.io/apiextensions-apiserver"
- # revision for tag "kubernetes-1.13.1"
- revision = "0fe22c71c47604641d9aa352c785b7912c200562"
-
-[[override]]
- name = "k8s.io/apimachinery"
- # revision for tag "kubernetes-1.13.1"
- revision = "2b1284ed4c93a43499e781493253e2ac5959c4fd"
-
-[[override]]
- name = "k8s.io/client-go"
- # revision for tag "kubernetes-1.13.1"
- revision = "8d9ed539ba3134352c586810e749e58df4e94e4f"
-
-[[override]]
- name = "github.com/coreos/prometheus-operator"
- version = "=v0.26.0"
-
-[[override]]
- name = "sigs.k8s.io/controller-runtime"
- version = "=v0.1.10"
-
-[[constraint]]
- name = "github.com/operator-framework/operator-sdk"
- # The version rule is used for a specific release and the master branch for in between releases.
- # branch = "master" #osdk_branch_annotation
- version = "=v0.5.0" #osdk_version_annotation
-
-[prune]
- go-tests = true
- non-go = true
-
- [[prune.project]]
- name = "k8s.io/code-generator"
- non-go = false
-
- [[prune.project]]
- name = "k8s.io/gengo"
- non-go = false
-
-[[constraint]]
- name = "github.com/Masterminds/sprig"
- version = "2.18.0"
diff --git a/LICENCE b/LICENSE
similarity index 99%
rename from LICENCE
rename to LICENSE
index 261eeb9e9..f49a4e16e 100644
--- a/LICENCE
+++ b/LICENSE
@@ -198,4 +198,4 @@
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
- limitations under the License.
+ limitations under the License.
\ No newline at end of file
diff --git a/Makefile b/Makefile
index 344a6f983..3f6e8c96e 100644
--- a/Makefile
+++ b/Makefile
@@ -1,66 +1,90 @@
-VERSION := $(shell git describe --abbrev=0 --tags)
-DOCKER_IMAGE = banzaicloud/logging-operator
-DOCKER_TAG ?= ${VERSION}
-GOFILES_NOVENDOR = $(shell find . -type f -name '*.go' -not -path "./vendor/*" -not -path "./client/*")
-PKGS=$(shell go list ./... | grep -v /vendor)
+# this makefile was generated by
+include Makefile.app
-DEP_VERSION = 0.5.0
+# Image URL to use all building/pushing image targets
+IMG ?= controller:latest
+# Produce CRDs that work back to Kubernetes 1.11 (no version conversion)
+CRD_OPTIONS ?= "crd:trivialVersions=true"
-bin/dep: bin/dep-${DEP_VERSION}
- @ln -sf dep-${DEP_VERSION} bin/dep
+KUBEBUILDER_VERSION = 2.0.0
-bin/dep-${DEP_VERSION}:
- @mkdir -p bin
- curl https://raw.githubusercontent.com/golang/dep/master/install.sh | INSTALL_DIRECTORY=bin DEP_RELEASE_TAG=v${DEP_VERSION} sh
- @mv bin/dep $@
-
-.PHONY: vendor
-vendor: bin/dep ## Install dependencies
- bin/dep ensure -v -vendor-only
-
-build: vendor
- go build -v $(PKGS)
+export PATH := $(PWD)/bin:$(PATH)
-check-fmt:
- PKGS="${GOFILES_NOVENDOR}" GOFMT="gofmt" ./scripts/fmt-check.sh
+all: manager
-fmt:
- gofmt -w ${GOFILES_NOVENDOR}
+# Generate docs
+.PHONY: docs
+docs:
+ go run cmd/docs.go
-lint: install-golint
- golint -min_confidence 0.9 -set_exit_status $(PKGS)
+# Run tests
+test: generate fmt vet manifests bin/kubebuilder
+ @which kubebuilder
+ kubebuilder version
+ go test ./api/... ./controllers/... ./pkg/... -coverprofile cover.out
-install-golint:
- GOLINT_CMD=$(shell command -v golint 2> /dev/null)
-ifndef GOLINT_CMD
- go get golang.org/x/lint/golint
-endif
+# Build manager binary
+manager: generate fmt vet
+ go build -o bin/manager main.go
-check-misspell: install-misspell
- PKGS="${GOFILES_NOVENDOR}" MISSPELL="misspell" ./scripts/misspell-check.sh
+# Run against the configured Kubernetes cluster in ~/.kube/config
+run: generate fmt vet
+ go run ./main.go --verbose
-misspell: install-misspell
- misspell -w ${GOFILES_NOVENDOR}
+# Install CRDs into a cluster
+install: manifests
+ kubectl apply -f config/crd/bases
-install-misspell:
- MISSPELL_CMD=$(shell command -v misspell 2> /dev/null)
-ifndef MISSPELL_CMD
- go get -u github.com/client9/misspell/cmd/misspell
-endif
+# Deploy controller in the configured Kubernetes cluster in ~/.kube/config
+deploy: manifests
+ kubectl apply -f config/crd/bases
+ kustomize build config/default | kubectl apply -f -
-ineffassign: install-ineffassign
- ineffassign ${GOFILES_NOVENDOR}
+# Generate manifests e.g. CRD, RBAC etc.
+manifests: controller-gen
+ $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases
-install-ineffassign:
- INEFFASSIGN_CMD=$(shell command -v ineffassign 2> /dev/null)
-ifndef INEFFASSIGN_CMD
- go get -u github.com/gordonklaus/ineffassign
+# Run go fmt against code
+fmt:
+ go fmt ./...
+
+# Run go vet against code
+vet:
+ go vet ./...
+
+# Generate code
+generate: controller-gen
+ $(CONTROLLER_GEN) object:headerFile=./hack/boilerplate.go.txt paths=./api/...
+ $(CONTROLLER_GEN) object:headerFile=./hack/boilerplate.go.txt paths=./pkg/model/...
+
+# Build the docker image
+docker-build: test
+ docker build . -t ${IMG}
+ @echo "updating kustomize image patch file for manager resource"
+ sed -i'' -e 's@image: .*@image: '"${IMG}"'@' ./config/default/manager_image_patch.yaml
+
+# Push the docker image
+docker-push:
+ docker push ${IMG}
+
+# find or download controller-gen
+# download controller-gen if necessary
+controller-gen:
+ifeq (, $(shell which controller-gen))
+ go get sigs.k8s.io/controller-tools/cmd/controller-gen@v0.2.1
+CONTROLLER_GEN=$(shell go env GOPATH)/bin/controller-gen
+else
+CONTROLLER_GEN=$(shell which controller-gen)
endif
-.PHONY: docker
-docker: ## Build Docker image
- docker build -t ${DOCKER_IMAGE}:${DOCKER_TAG} -f Dockerfile .
+bin/kubebuilder: bin/kubebuilder_${KUBEBUILDER_VERSION}
+ @ln -sf kubebuilder_${KUBEBUILDER_VERSION}/kubebuilder bin/kubebuilder
+ @ln -sf kubebuilder_${KUBEBUILDER_VERSION}/kube-apiserver bin/kube-apiserver
+ @ln -sf kubebuilder_${KUBEBUILDER_VERSION}/etcd bin/etcd
+ @ln -sf kubebuilder_${KUBEBUILDER_VERSION}/kubectl bin/kubectl
-.PHONY: docs
-docs:
- go run cmd/docgen/docgen.go
\ No newline at end of file
+bin/kubebuilder_${KUBEBUILDER_VERSION}:
+ @mkdir -p bin
+ curl -L https://github.com/kubernetes-sigs/kubebuilder/releases/download/v${KUBEBUILDER_VERSION}/kubebuilder_${KUBEBUILDER_VERSION}_darwin_amd64.tar.gz | tar xvz - -C bin
+ @ln -sf kubebuilder_${KUBEBUILDER_VERSION}_darwin_amd64/bin bin/kubebuilder_${KUBEBUILDER_VERSION}
+#todo implement for linux (CI)
diff --git a/Makefile.app b/Makefile.app
new file mode 100644
index 000000000..8e59b88a8
--- /dev/null
+++ b/Makefile.app
@@ -0,0 +1,6 @@
+.PHONY: check
+check: test license
+
+.PHONY: license
+license:
+ ./scripts/check-header.sh
\ No newline at end of file
diff --git a/PROJECT b/PROJECT
new file mode 100644
index 000000000..1ca5d55ef
--- /dev/null
+++ b/PROJECT
@@ -0,0 +1,25 @@
+version: "2"
+domain: banzaicloud.com
+repo: github.com/banzaicloud/logging-operator
+resources:
+- group: logging
+ version: v1alpha2
+ kind: Fluentbit
+- group: logging
+ version: v1alpha2
+ kind: Fluentd
+- group: logging
+ version: v1alpha2
+ kind: Flow
+- group: logging
+ version: v1alpha2
+ kind: ClusterFlow
+- group: logging
+ version: v1alpha2
+ kind: Output
+- group: logging
+ version: v1alpha2
+ kind: ClusterOutput
+- group: logging
+ version: v1alpha2
+ kind: Logging
diff --git a/README.md b/README.md
index 26ebd0fb7..3021ca743 100644
--- a/README.md
+++ b/README.md
@@ -24,22 +24,25 @@
-# logging-operator
+# logging-operator v2
-Logging operator for Kubernetes based on Fluentd and Fluent-bit. For more details please follow up with this [post](https://banzaicloud.com/blog/k8s-logging-operator/).
+Logging operator for Kubernetes based on Fluentd and Fluent-bit.
## What is this operator for?
This operator helps you to pack together logging information with your applications. With the help of Custom Resource Definition you can describe the behaviour of your application within its charts. The operator does the rest.
-
+
### Motivation
-The logging operator automates the deployment and configuration of a Kubernetes logging pipeline. Under the hood the operator configures a fluent-bit daemonset for collecting container logs from the node file system. Fluent-bit enriches the logs with Kubernetes metadata and transfers them to fluentd. Fluentd receives, filters and transfer logs to multiple outputs. The whole flow can be defined in a single custom resource. Your logs will always be transferred on authenticated and encrypted channels.
+The logging operator automates the deployment and configuration of a Kubernetes logging pipeline. Under the hood the operator configures a fluent-bit daemonset for collecting container logs from the node file system. Fluent-bit enriches the logs with Kubernetes metadata and transfers them to fluentd. Fluentd receives, filters and transfer logs to multiple outputs. Your logs will always be transferred on authenticated and encrypted channels.
##### Blogs
+ - [Logging-Operator v2](https://banzaicloud.com/blog/logging-operator-v2/) (soon)
+
+##### Blogs (general logging and operator v1)
- [Advanced logging on Kubernetes](https://banzaicloud.com/blog/k8s-logging-advanced/)
- [Secure logging on Kubernetes with Fluentd and Fluent Bit](https://banzaicloud.com/blog/k8s-logging-tls/)
- [Centralized logging under Kubernetes](https://banzaicloud.com/blog/k8s-logging/)
@@ -47,7 +50,6 @@ The logging operator automates the deployment and configuration of a Kubernetes
- [And more...](https://banzaicloud.com/tags/logging/)
-
Logging-operator is a core part of the [Pipeline](https://beta.banzaicloud.io) platform, a Cloud Native application and devops platform that natively supports multi- and hybrid-cloud deployments with multiple authentication backends. Check out the developer beta:
@@ -55,16 +57,30 @@ Logging-operator is a core part of the [Pipeline](https://beta.banzaicloud.io) p
+## Architecture
+
+Available custom resources:
+- [logging](/docs/crds.md#loggings) - Represents a logging system. Includes `Fluentd` and `Fluent-bit` configuration. Specifies the `controlNamespace`. Fluentd and Fluent-bit will be deployed in the `controlNamespace`
+- [output](/docs/crds.md#outputs-clusteroutputs) - Defines an Output for a logging flow. This is a namespaced resource.
+- [flow](/docs/crds.md#flows-clusterflows) - Defines a logging flow with `filters` and `outputs`. You can specify `selectors` to filter logs by labels. Outputs can be `output` or `clusteroutput`. This is a namespaced resource.
+- [clusteroutput](/docs/crds.md#outputs-clusteroutputs) - Defines an output without namespace restriction. Only effective in `controlNamespace`.
+- [clusterflow](/docs/crds.md#flows-clusterflows) - Defines a logging flow without namespace restriction.
+
+The detailed CRD documentation can be found [here](/docs/crds.md).
+
+
+
+*connection between custom resources*
+
---
## Contents
- Installation
- [Deploy with Helm](#deploying-with-helm-chart)
- - [Deploy with Manifest](#deploying-with-kubernetes-manifest)
- [Supported Plugins](#supported-plugins)
- Examples
- - [S3 Output](./docs/examples/s3.md)
- - [Elasticsearch Output](./docs/examples/es.md)
+ - [S3 Output](./docs/example-s3.md)
+ - [Elasticsearch Output](./docs/example-es.md)
- [Troubleshooting](#troubleshooting)
- [Contributing](#contributing)
---
@@ -85,57 +101,29 @@ $ helm repo update
$ helm install banzaicloud-stable/logging-operator
```
-#### Install FluentD, FluentBit CRs from chart
-```bash
-$ helm install banzaicloud-stable/logging-operator-fluent
-```
-
-
---
-## Deploying with Kubernetes Manifest
-
-```
-# Create all the CRDs used by the Operator
-kubectl create -f deploy/crds/logging_v1alpha1_plugin_crd.yaml
-kubectl create -f deploy/crds/logging_v1alpha1_fluentbit_crd.yaml
-kubectl create -f deploy/crds/logging_v1alpha1_fluentd_crd.yaml
-
-# If RBAC enabled create the required resources
-kubectl create -f deploy/clusterrole.yaml
-kubectl create -f deploy/clusterrole_binding.yaml
-kubectl create -f deploy/service_account.yaml
-
-# Create the Operator
-kubectl create -f deploy/operator.yaml
-
-# Create the fluent-bit daemonset by submiting a fluent-bit CR
-kubectl create -f deploy/crds/logging_v1alpha1_fluentbit_cr.yaml
+## Supported Plugins
-# Create the fluentd deployment by submitting a fluentd CR
-kubectl create -f deploy/crds/logging_v1alpha1_fluentd_cr.yaml
+For complete list of supported plugins pleas checkl the [plugins index](/docs/plugins/index.md).
-```
+| Name | Type | Description | Status | Version |
+|---------------------------------------------------------|:------:|:-------------------------------------------------------------------------:|---------|-------------------------------------------------------------------------------------------|
+| [Alibaba](./docs/plugins/outputs/oss.md) | Output | Store logs the Alibaba Cloud Object Storage Service | GA | [0.0.1](https://github.com/aliyun/fluent-plugin-oss) |
+| [Amazon S3](./docs/plugins/outputs/s3.md) | Output | Store logs in Amazon S3 | GA | [1.1.10](https://github.com/fluent/fluent-plugin-s3/releases/tag/v1.1.10) |
+| [Azure](./docs/plugins/outputs/azurestore.md) | Output | Store logs in Azure Storega | GA | [0.1.1](https://github.com/htgc/fluent-plugin-azurestorage/releases/tag/v0.1.0) |
+| [Google Storage](./docs/plugins/outputs/gcs.md) | Output | Store logs in Google Cloud Storage | GA | [0.4.0.beta1](https://github.com/banzaicloud/fluent-plugin-gcs) |
+| [Grafana Loki](./docs/plugins/outputs/loki.md) | Output | Transfer logs to Loki | Testing | [0.2](https://github.com/banzaicloud/fluent-plugin-kubernetes-loki/releases/tag/v0.2) |
+| [ElasticSearch](./docs/plugins/outputs/elasticsearch.md) | Output | Send your logs to Elasticsearch | GA | [3.5.2](https://github.com/uken/fluent-plugin-elasticsearch/releases/tag/v3.5.2) |
+| [Tag Normaliser](./docs/plugins/filters/tagnormaliser.md) | Parser | Normalise tags for outputs | GA | |
+| [Parser](./docs/plugins/filters/parser.md) | Parser | Parse logs with parser plugin | GA | |
-## Supported Plugins
-| Name | Type | Description | Status | Version |
-|-------------------------------------------------|:------:|:-------------------------------------------------------------------------:|---------|------------------------------------------------------------------------------------------|
-| [Alibaba](./docs/plugins/alibaba.md) | Output | Store logs the Alibaba Cloud Object Storage Service | GA | [0.0.2](https://github.com/jicong/fluent-plugin-oss) |
-| [Amazon S3](./docs/plugins/s3.md) | Output | Store logs in Amazon S3 | GA | [1.1.10](https://github.com/fluent/fluent-plugin-s3/releases/tag/v1.1.10) |
-| [Azure](./docs/plugins/azure.md) | Output | Store logs in Azure Storega | GA | [0.1.1](https://github.com/htgc/fluent-plugin-azurestorage/releases/tag/v0.1.0) |
-| [Google Storage](./docs/plugins/gcs.md) | Output | Store logs in Google Cloud Storage | GA | [0.4.0.beta1](https://github.com/banzaicloud/fluent-plugin-gcs) |
-| [Grafana Loki](./docs/plugins/loki.md) | Output | Transfer logs to Loki | Testing | [0.2](https://github.com/banzaicloud/fluent-plugin-kubernetes-loki/releases/tag/v0.2) |
-| [ElasticSearch](./docs/plugins/parser.md) | Output | Send your logs to Elasticsearch | GA | [3.5.2](https://github.com/uken/fluent-plugin-elasticsearch/releases/tag/v3.5.2) |
-| [HDFS](https://docs.fluentd.org/output/webhdfs) | Output | Fluentd output plugin to write data into Hadoop HDFS over WebHDFS/HttpFs. | GA | [1.2.3](https://github.com/fluent/fluent-plugin-webhdfs/releases/tag/v1.2.3) |
-| [Parser](./docs/plugins/parser.md) | Parser | Parse logs with parser plugin | GA | |
---
## Troubleshooting
If you encounter any problems that the documentation does not address, please [file an issue](https://github.com/banzaicloud/logging-operator/issues) or talk to us on the Banzai Cloud Slack channel [#logging-operator](https://slack.banzaicloud.io/).
-
-
## Contributing
If you find this project useful here's how you can help:
@@ -144,6 +132,8 @@ If you find this project useful here's how you can help:
- Help new users with issues they may encounter
- Support the development of this project and star this repo!
+For more information please read the [developer documentation](./docs/developers.md)
+
## License
Copyright (c) 2017-2019 [Banzai Cloud, Inc.](https://banzaicloud.com)
@@ -158,4 +148,4 @@ Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
-limitations under the License.
+limitations under the License.
\ No newline at end of file
diff --git a/api/v1alpha2/clusterflow_types.go b/api/v1alpha2/clusterflow_types.go
new file mode 100644
index 000000000..bee039977
--- /dev/null
+++ b/api/v1alpha2/clusterflow_types.go
@@ -0,0 +1,44 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha2
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +kubebuilder:object:root=true
+
+// ClusterFlow is the Schema for the clusterflows API
+type ClusterFlow struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // Name of the logging cluster to be attached
+ Spec FlowSpec `json:"spec,omitempty"`
+ Status FlowStatus `json:"status,omitempty"`
+}
+
+// +kubebuilder:object:root=true
+
+// ClusterFlowList contains a list of ClusterFlow
+type ClusterFlowList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []ClusterFlow `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&ClusterFlow{}, &ClusterFlowList{})
+}
diff --git a/api/v1alpha2/clusterflow_types_test.go b/api/v1alpha2/clusterflow_types_test.go
new file mode 100644
index 000000000..229a90fd4
--- /dev/null
+++ b/api/v1alpha2/clusterflow_types_test.go
@@ -0,0 +1,81 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha2
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+ "k8s.io/apimachinery/pkg/types"
+
+ "golang.org/x/net/context"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// These tests are written in BDD-style using Ginkgo framework. Refer to
+// http://onsi.github.io/ginkgo to learn more.
+
+var _ = Describe("ClusterFlow", func() {
+ var (
+ key types.NamespacedName
+ created, fetched *ClusterFlow
+ )
+
+ BeforeEach(func() {
+ // Add any setup steps that needs to be executed before each test
+ })
+
+ AfterEach(func() {
+ // Add any teardown steps that needs to be executed after each test
+ })
+
+ // Add Tests for OpenAPI validation (or additonal CRD features) specified in
+ // your API definition.
+ // Avoid adding tests for vanilla CRUD operations because they would
+ // test Kubernetes API server, which isn't the goal here.
+ Context("Create API", func() {
+
+ It("should create an object successfully", func() {
+
+ key = types.NamespacedName{
+ Namespace: "foo",
+ Name: "foo",
+ }
+ created = &ClusterFlow{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "foo",
+ Namespace: "foo",
+ },
+ Spec: FlowSpec{
+ Selectors: map[string]string{},
+ OutputRefs: []string{},
+ },
+ Status: FlowStatus{},
+ }
+
+ By("creating an API obj")
+ Expect(k8sClient.Create(context.TODO(), created)).To(Succeed())
+
+ fetched = &ClusterFlow{}
+ Expect(k8sClient.Get(context.TODO(), key, fetched)).To(Succeed())
+ Expect(fetched).To(Equal(created))
+
+ By("deleting the created object")
+ Expect(k8sClient.Delete(context.TODO(), created)).To(Succeed())
+ Expect(k8sClient.Get(context.TODO(), key, created)).ToNot(Succeed())
+ })
+
+ })
+
+})
diff --git a/api/v1alpha2/clusteroutput_types.go b/api/v1alpha2/clusteroutput_types.go
new file mode 100644
index 000000000..25ac13451
--- /dev/null
+++ b/api/v1alpha2/clusteroutput_types.go
@@ -0,0 +1,50 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha2
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +kubebuilder:object:root=true
+
+// ClusterOutput is the Schema for the clusteroutputs API
+type ClusterOutput struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec ClusterOutputSpec `json:"spec"`
+ Status OutputStatus `json:"status,omitempty"`
+}
+
+// +kubebuilder:object:generate=true
+
+type ClusterOutputSpec struct {
+ OutputSpec `json:",inline"`
+ EnabledNamespaces []string `json:"enabledNamespaces,omitempty"`
+}
+
+// +kubebuilder:object:root=true
+
+// ClusterOutputList contains a list of ClusterOutput
+type ClusterOutputList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []ClusterOutput `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&ClusterOutput{}, &ClusterOutputList{})
+}
diff --git a/api/v1alpha2/clusteroutput_types_test.go b/api/v1alpha2/clusteroutput_types_test.go
new file mode 100644
index 000000000..7ce221880
--- /dev/null
+++ b/api/v1alpha2/clusteroutput_types_test.go
@@ -0,0 +1,84 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha2
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "golang.org/x/net/context"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+)
+
+// These tests are written in BDD-style using Ginkgo framework. Refer to
+// http://onsi.github.io/ginkgo to learn more.
+
+var _ = Describe("ClusterOutput", func() {
+ var (
+ key types.NamespacedName
+ created, fetched *ClusterOutput
+ )
+
+ BeforeEach(func() {
+ // Add any setup steps that needs to be executed before each test
+ })
+
+ AfterEach(func() {
+ // Add any teardown steps that needs to be executed after each test
+ })
+
+ // Add Tests for OpenAPI validation (or additonal CRD features) specified in
+ // your API definition.
+ // Avoid adding tests for vanilla CRUD operations because they would
+ // test Kubernetes API server, which isn't the goal here.
+ Context("Create API", func() {
+
+ It("should create an object successfully", func() {
+
+ key = types.NamespacedName{
+ Name: "foo",
+ Namespace: "foo",
+ }
+ created = &ClusterOutput{
+ TypeMeta: metav1.TypeMeta{},
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "foo",
+ Namespace: "foo",
+ },
+ Spec: ClusterOutputSpec{
+ OutputSpec: OutputSpec{
+ S3OutputConfig: nil,
+ NullOutputConfig: nil,
+ },
+ },
+ Status: OutputStatus{},
+ }
+
+ By("creating an API obj")
+ Expect(k8sClient.Create(context.TODO(), created)).To(Succeed())
+
+ fetched = &ClusterOutput{}
+ Expect(k8sClient.Get(context.TODO(), key, fetched)).To(Succeed())
+ Expect(fetched).To(Equal(created))
+
+ By("deleting the created object")
+ Expect(k8sClient.Delete(context.TODO(), created)).To(Succeed())
+ Expect(k8sClient.Get(context.TODO(), key, created)).ToNot(Succeed())
+ })
+
+ })
+
+})
diff --git a/api/v1alpha2/common_types.go b/api/v1alpha2/common_types.go
new file mode 100644
index 000000000..d8b530b1f
--- /dev/null
+++ b/api/v1alpha2/common_types.go
@@ -0,0 +1,22 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha2
+
+// ImageSpec struct hold information about image specification
+type ImageSpec struct {
+ Repository string `json:"repository"`
+ Tag string `json:"tag"`
+ PullPolicy string `json:"pullPolicy"`
+}
diff --git a/api/v1alpha2/flow_types.go b/api/v1alpha2/flow_types.go
new file mode 100644
index 000000000..431581bf8
--- /dev/null
+++ b/api/v1alpha2/flow_types.go
@@ -0,0 +1,63 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha2
+
+import (
+ "github.com/banzaicloud/logging-operator/pkg/model/filter"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+type FlowSpec struct {
+ Selectors map[string]string `json:"selectors"`
+ Filters []Filter `json:"filters,omitempty"`
+ LoggingRef string `json:"loggingRef,omitempty"`
+ OutputRefs []string `json:"outputRefs"`
+}
+
+type Filter struct {
+ StdOut *filter.StdOutFilterConfig `json:"stdout,omitempty"`
+ Parser *filter.ParserConfig `json:"parser,omitempty"`
+ TagNormaliser *filter.TagNormaliser `json:"tag_normaliser,omitempty"`
+}
+
+// FlowStatus defines the observed state of Flow
+type FlowStatus struct {
+ // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
+ // Important: Run "make" to regenerate code after modifying this file
+}
+
+// +kubebuilder:object:root=true
+
+// +kubebuilder:printcolumn:name="Logging",type=string,JSONPath=`.spec.loggingRef`
+type Flow struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec FlowSpec `json:"spec,omitempty"`
+ Status FlowStatus `json:"status,omitempty"`
+}
+
+// +kubebuilder:object:root=true
+
+// FlowList contains a list of Flow
+type FlowList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []Flow `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&Flow{}, &FlowList{})
+}
diff --git a/api/v1alpha2/flow_types_test.go b/api/v1alpha2/flow_types_test.go
new file mode 100644
index 000000000..d7a78389d
--- /dev/null
+++ b/api/v1alpha2/flow_types_test.go
@@ -0,0 +1,81 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha2
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "golang.org/x/net/context"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+)
+
+// These tests are written in BDD-style using Ginkgo framework. Refer to
+// http://onsi.github.io/ginkgo to learn more.
+
+var _ = Describe("Flow", func() {
+ var (
+ key types.NamespacedName
+ created, fetched *Flow
+ )
+
+ BeforeEach(func() {
+ // Add any setup steps that needs to be executed before each test
+ })
+
+ AfterEach(func() {
+ // Add any teardown steps that needs to be executed after each test
+ })
+
+ // Add Tests for OpenAPI validation (or additonal CRD features) specified in
+ // your API definition.
+ // Avoid adding tests for vanilla CRUD operations because they would
+ // test Kubernetes API server, which isn't the goal here.
+ Context("Create API", func() {
+
+ It("should create an object successfully", func() {
+
+ key = types.NamespacedName{
+ Name: "foo",
+ Namespace: "default",
+ }
+ created = &Flow{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "foo",
+ Namespace: "default",
+ },
+ Spec: FlowSpec{
+ Selectors: map[string]string{},
+ OutputRefs: []string{},
+ },
+ Status: FlowStatus{},
+ }
+
+ By("creating an API obj")
+ Expect(k8sClient.Create(context.TODO(), created)).To(Succeed())
+
+ fetched = &Flow{}
+ Expect(k8sClient.Get(context.TODO(), key, fetched)).To(Succeed())
+ Expect(fetched).To(Equal(created))
+
+ By("deleting the created object")
+ Expect(k8sClient.Delete(context.TODO(), created)).To(Succeed())
+ Expect(k8sClient.Get(context.TODO(), key, created)).ToNot(Succeed())
+ })
+
+ })
+
+})
diff --git a/api/v1alpha2/fluentbit_types.go b/api/v1alpha2/fluentbit_types.go
new file mode 100644
index 000000000..34d90885c
--- /dev/null
+++ b/api/v1alpha2/fluentbit_types.go
@@ -0,0 +1,55 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha2
+
+import (
+ "strconv"
+
+ corev1 "k8s.io/api/core/v1"
+)
+
+// +kubebuilder:object:generate=true
+
+// FluentbitSpec defines the desired state of Fluentbit
+type FluentbitSpec struct {
+ Annotations map[string]string `json:"annotations,omitempty"`
+ Image ImageSpec `json:"image,omitempty"`
+ TLS FluentbitTLS `json:"tls,omitempty"`
+ TargetHost string `json:"targetHost,omitempty"`
+ TargetPort int32 `json:"targetPort,omitempty"`
+ Resources corev1.ResourceRequirements `json:"resources,omitempty"`
+}
+
+// +kubebuilder:object:generate=true
+
+// FluentbitTLS defines the TLS configs
+type FluentbitTLS struct {
+ Enabled bool `json:"enabled"`
+ SecretName string `json:"secretName"`
+ SharedKey string `json:"sharedKey,omitempty"`
+}
+
+// GetPrometheusPortFromAnnotation gets the port value from annotation
+func (spec FluentbitSpec) GetPrometheusPortFromAnnotation() int32 {
+ var err error
+ var port int64
+ if spec.Annotations != nil {
+ port, err = strconv.ParseInt(spec.Annotations["prometheus.io/port"], 10, 32)
+ if err != nil {
+ panic(err)
+ }
+ }
+ return int32(port)
+}
diff --git a/api/v1alpha2/fluentd_types.go b/api/v1alpha2/fluentd_types.go
new file mode 100644
index 000000000..4438fda9f
--- /dev/null
+++ b/api/v1alpha2/fluentd_types.go
@@ -0,0 +1,58 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha2
+
+import (
+ "strconv"
+
+ corev1 "k8s.io/api/core/v1"
+)
+
+// +kubebuilder:object:generate=true
+
+// FluentdSpec defines the desired state of Fluentd
+type FluentdSpec struct {
+ Annotations map[string]string `json:"annotations,omitempty"`
+ TLS FluentdTLS `json:"tls,omitempty"`
+ Image ImageSpec `json:"image,omitempty"`
+ FluentdPvcSpec corev1.PersistentVolumeClaimSpec `json:"fluentdPvcSpec,omitempty"`
+ DisablePvc bool `json:"disablePvc,omitempty"`
+ VolumeModImage ImageSpec `json:"volumeModImage,omitempty"`
+ ConfigReloaderImage ImageSpec `json:"configReloaderImage,omitempty"`
+ Resources corev1.ResourceRequirements `json:"resources,omitempty"`
+ Port int32 `json:"port,omitempty"`
+}
+
+// +kubebuilder:object:generate=true
+
+// FluentdTLS defines the TLS configs
+type FluentdTLS struct {
+ Enabled bool `json:"enabled"`
+ SecretName string `json:"secretName"`
+ SharedKey string `json:"sharedKey,omitempty"`
+}
+
+// GetPrometheusPortFromAnnotation gets the port value from annotation
+func (spec FluentdSpec) GetPrometheusPortFromAnnotation() int32 {
+ var err error
+ var port int64
+ if spec.Annotations != nil {
+ port, err = strconv.ParseInt(spec.Annotations["prometheus.io/port"], 10, 32)
+ if err != nil {
+ return 0
+ }
+ }
+ return int32(port)
+}
diff --git a/api/v1alpha2/groupversion_info.go b/api/v1alpha2/groupversion_info.go
new file mode 100644
index 000000000..aac6e0d4e
--- /dev/null
+++ b/api/v1alpha2/groupversion_info.go
@@ -0,0 +1,34 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package v1alpha2 contains API Schema definitions for the logging v1alpha2 API group
+// +kubebuilder:object:generate=true
+// +groupName=logging.banzaicloud.com
+package v1alpha2
+
+import (
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "sigs.k8s.io/controller-runtime/pkg/scheme"
+)
+
+var (
+ // GroupVersion is group version used to register these objects
+ GroupVersion = schema.GroupVersion{Group: "logging.banzaicloud.com", Version: "v1alpha2"}
+
+ // SchemeBuilder is used to add go types to the GroupVersionKind scheme
+ SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
+
+ // AddToScheme adds the types in this group-version to the given scheme.
+ AddToScheme = SchemeBuilder.AddToScheme
+)
diff --git a/api/v1alpha2/logging_types.go b/api/v1alpha2/logging_types.go
new file mode 100644
index 000000000..2010a3ba8
--- /dev/null
+++ b/api/v1alpha2/logging_types.go
@@ -0,0 +1,178 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha2
+
+import (
+ "fmt"
+
+ v1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
+// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
+
+// LoggingSpec defines the desired state of Logging
+type LoggingSpec struct {
+ LoggingRef string `json:"loggingRef,omitempty"`
+ FlowConfigCheckDisabled bool `json:"flowConfigCheckDisabled,omitempty"`
+ FlowConfigOverride string `json:"flowConfigOverride,omitempty"`
+ FluentbitSpec *FluentbitSpec `json:"fluentbit,omitempty"`
+ FluentdSpec *FluentdSpec `json:"fluentd,omitempty"`
+ WatchNamespaces []string `json:"watchNamespaces,omitempty"`
+ ControlNamespace string `json:"controlNamespace"`
+}
+
+// LoggingStatus defines the observed state of Logging
+type LoggingStatus struct {
+ ConfigCheckResults map[string]bool `json:"configCheckResults,omitempty"`
+}
+
+// +kubebuilder:object:root=true
+// +kubebuilder:subresource:status
+// +kubebuilder:resource:path=loggings,scope=Cluster
+
+// Logging is the Schema for the loggings API
+type Logging struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec LoggingSpec `json:"spec,omitempty"`
+ Status LoggingStatus `json:"status,omitempty"`
+}
+
+// +kubebuilder:object:root=true
+
+// LoggingList contains a list of Logging
+type LoggingList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []Logging `json:"items"`
+}
+
+func (l *Logging) SetDefaults() *Logging {
+ copy := l.DeepCopy()
+ if !copy.Spec.FlowConfigCheckDisabled && copy.Status.ConfigCheckResults == nil {
+ copy.Status.ConfigCheckResults = make(map[string]bool)
+ }
+ if copy.Spec.WatchNamespaces == nil {
+ copy.Spec.WatchNamespaces = []string{}
+ }
+ if copy.Spec.FluentdSpec != nil {
+ if copy.Spec.FluentdSpec.Image.Repository == "" {
+ copy.Spec.FluentdSpec.Image.Repository = "banzaicloud/fluentd"
+ }
+ if copy.Spec.FluentdSpec.Image.Tag == "" {
+ copy.Spec.FluentdSpec.Image.Tag = "v1.6.3-alpine"
+ }
+ if copy.Spec.FluentdSpec.Image.PullPolicy == "" {
+ copy.Spec.FluentdSpec.Image.PullPolicy = "IfNotPresent"
+ }
+ if copy.Spec.FluentdSpec.Annotations == nil {
+ copy.Spec.FluentdSpec.Annotations = map[string]string{
+ "prometheus.io/scrape": "true",
+ "prometheus.io/path": "/metrics",
+ "prometheus.io/port": "25000",
+ }
+ }
+ if copy.Spec.FluentdSpec.FluentdPvcSpec.AccessModes == nil {
+ copy.Spec.FluentdSpec.FluentdPvcSpec.AccessModes = []v1.PersistentVolumeAccessMode{
+ v1.ReadWriteOnce,
+ }
+ }
+ if copy.Spec.FluentdSpec.FluentdPvcSpec.Resources.Requests == nil {
+ copy.Spec.FluentdSpec.FluentdPvcSpec.Resources.Requests = map[v1.ResourceName]resource.Quantity{
+ "storage": resource.MustParse("20Gi"),
+ }
+ }
+ if copy.Spec.FluentdSpec.VolumeModImage.Repository == "" {
+ copy.Spec.FluentdSpec.VolumeModImage.Repository = "busybox"
+ }
+ if copy.Spec.FluentdSpec.VolumeModImage.Tag == "" {
+ copy.Spec.FluentdSpec.VolumeModImage.Tag = "latest"
+ }
+ if copy.Spec.FluentdSpec.VolumeModImage.PullPolicy == "" {
+ copy.Spec.FluentdSpec.VolumeModImage.PullPolicy = "IfNotPresent"
+ }
+ if copy.Spec.FluentdSpec.ConfigReloaderImage.Repository == "" {
+ copy.Spec.FluentdSpec.ConfigReloaderImage.Repository = "jimmidyson/configmap-reload"
+ }
+ if copy.Spec.FluentdSpec.ConfigReloaderImage.Tag == "" {
+ copy.Spec.FluentdSpec.ConfigReloaderImage.Tag = "v0.2.2"
+ }
+ if copy.Spec.FluentdSpec.ConfigReloaderImage.PullPolicy == "" {
+ copy.Spec.FluentdSpec.ConfigReloaderImage.PullPolicy = "IfNotPresent"
+ }
+ if copy.Spec.FluentdSpec.Resources.Limits == nil {
+ copy.Spec.FluentdSpec.Resources.Limits = v1.ResourceList{
+ v1.ResourceMemory: resource.MustParse("200M"),
+ v1.ResourceCPU: resource.MustParse("1000m"),
+ }
+ }
+ if copy.Spec.FluentdSpec.Resources.Requests == nil {
+ copy.Spec.FluentdSpec.Resources.Requests = v1.ResourceList{
+ v1.ResourceMemory: resource.MustParse("100M"),
+ v1.ResourceCPU: resource.MustParse("500m"),
+ }
+ }
+ if copy.Spec.FluentdSpec.Port == 0 {
+ copy.Spec.FluentdSpec.Port = 24240
+ }
+ }
+ if copy.Spec.FluentbitSpec != nil {
+ if copy.Spec.FluentbitSpec.Image.Repository == "" {
+ copy.Spec.FluentbitSpec.Image.Repository = "fluent/fluent-bit"
+ }
+ if copy.Spec.FluentbitSpec.Image.Tag == "" {
+ copy.Spec.FluentbitSpec.Image.Tag = "1.2.2"
+ }
+ if copy.Spec.FluentbitSpec.Image.PullPolicy == "" {
+ copy.Spec.FluentbitSpec.Image.PullPolicy = "IfNotPresent"
+ }
+ if copy.Spec.FluentbitSpec.Resources.Limits == nil {
+ copy.Spec.FluentbitSpec.Resources.Limits = v1.ResourceList{
+ v1.ResourceMemory: resource.MustParse("100M"),
+ v1.ResourceCPU: resource.MustParse("200m"),
+ }
+ }
+ if copy.Spec.FluentbitSpec.Resources.Requests == nil {
+ copy.Spec.FluentbitSpec.Resources.Requests = v1.ResourceList{
+ v1.ResourceMemory: resource.MustParse("50M"),
+ v1.ResourceCPU: resource.MustParse("100m"),
+ }
+ }
+ if copy.Spec.FluentbitSpec.Annotations == nil {
+ copy.Spec.FluentbitSpec.Annotations = map[string]string{
+ "prometheus.io/scrape": "true",
+ "prometheus.io/path": "/api/v1/metrics/prometheus",
+ "prometheus.io/port": "2020",
+ }
+ }
+ }
+ return copy
+}
+
+func (l *Logging) QualifiedName(name string) string {
+ return fmt.Sprintf("%s-%s", l.Name, name)
+}
+
+func (l *Logging) QualifiedNamespacedName(name string) string {
+ return fmt.Sprintf("%s-%s-%s", l.Spec.ControlNamespace, l.Name, name)
+}
+
+func init() {
+ SchemeBuilder.Register(&Logging{}, &LoggingList{})
+}
diff --git a/api/v1alpha2/output_types.go b/api/v1alpha2/output_types.go
new file mode 100644
index 000000000..a9748c93b
--- /dev/null
+++ b/api/v1alpha2/output_types.go
@@ -0,0 +1,63 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha2
+
+import (
+ "github.com/banzaicloud/logging-operator/pkg/model/output"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// OutputSpec defines the desired state of Output
+type OutputSpec struct {
+ LoggingRef string `json:"loggingRef,omitempty"`
+ S3OutputConfig *output.S3OutputConfig `json:"s3,omitempty"`
+ AzureStorage *output.AzureStorage `json:"azurestorage,omitempty"`
+ GCSOutput *output.GCSOutput `json:"gcs,omitempty"`
+ OSSOutput *output.OSSOutput `json:"oss,omitempty"`
+ ElasticsearchOutput *output.ElasticsearchOutput `json:"elasticsearch,omitempty"`
+ LokiOutput *output.LokiOutput `json:"loki,omitempty"`
+ SumologicOutput *output.SumologicOutput `json:"sumologic"`
+ NullOutputConfig *output.NullOutputConfig `json:"nullout,omitempty"`
+}
+
+// OutputStatus defines the observed state of Output
+type OutputStatus struct {
+ // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
+ // Important: Run "make" to regenerate code after modifying this file
+}
+
+// +kubebuilder:object:root=true
+
+// Output is the Schema for the outputs API
+type Output struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec OutputSpec `json:"spec,omitempty"`
+ Status OutputStatus `json:"status,omitempty"`
+}
+
+// +kubebuilder:object:root=true
+
+// OutputList contains a list of Output
+type OutputList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []Output `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&Output{}, &OutputList{})
+}
diff --git a/api/v1alpha2/output_types_test.go b/api/v1alpha2/output_types_test.go
new file mode 100644
index 000000000..5f9a88d78
--- /dev/null
+++ b/api/v1alpha2/output_types_test.go
@@ -0,0 +1,82 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha2
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "golang.org/x/net/context"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+)
+
+// These tests are written in BDD-style using Ginkgo framework. Refer to
+// http://onsi.github.io/ginkgo to learn more.
+
+var _ = Describe("Output", func() {
+ var (
+ key types.NamespacedName
+ created, fetched *Output
+ )
+
+ BeforeEach(func() {
+ // Add any setup steps that needs to be executed before each test
+ })
+
+ AfterEach(func() {
+ // Add any teardown steps that needs to be executed after each test
+ })
+
+ // Add Tests for OpenAPI validation (or additonal CRD features) specified in
+ // your API definition.
+ // Avoid adding tests for vanilla CRUD operations because they would
+ // test Kubernetes API server, which isn't the goal here.
+ Context("Create API", func() {
+
+ It("should create an object successfully", func() {
+
+ key = types.NamespacedName{
+ Name: "foo",
+ Namespace: "default",
+ }
+ created = &Output{
+ TypeMeta: metav1.TypeMeta{},
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "foo",
+ Namespace: "default",
+ },
+ Spec: OutputSpec{
+ S3OutputConfig: nil,
+ NullOutputConfig: nil,
+ },
+ Status: OutputStatus{},
+ }
+
+ By("creating an API obj")
+ Expect(k8sClient.Create(context.TODO(), created)).To(Succeed())
+
+ fetched = &Output{}
+ Expect(k8sClient.Get(context.TODO(), key, fetched)).To(Succeed())
+ Expect(fetched).To(Equal(created))
+
+ By("deleting the created object")
+ Expect(k8sClient.Delete(context.TODO(), created)).To(Succeed())
+ Expect(k8sClient.Get(context.TODO(), key, created)).ToNot(Succeed())
+ })
+
+ })
+
+})
diff --git a/api/v1alpha2/suite_test.go b/api/v1alpha2/suite_test.go
new file mode 100644
index 000000000..3e764132a
--- /dev/null
+++ b/api/v1alpha2/suite_test.go
@@ -0,0 +1,73 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha2
+
+import (
+ "path/filepath"
+ "testing"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "k8s.io/client-go/kubernetes/scheme"
+ "k8s.io/client-go/rest"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/envtest"
+ logf "sigs.k8s.io/controller-runtime/pkg/log"
+ "sigs.k8s.io/controller-runtime/pkg/log/zap"
+)
+
+// These tests use Ginkgo (BDD-style Go testing framework). Refer to
+// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
+
+var cfg *rest.Config
+var k8sClient client.Client
+var testEnv *envtest.Environment
+
+func TestAPIs(t *testing.T) {
+ RegisterFailHandler(Fail)
+
+ RunSpecsWithDefaultAndCustomReporters(t,
+ "v1alpha2 Suite",
+ []Reporter{envtest.NewlineReporter{}})
+}
+
+var _ = BeforeSuite(func(done Done) {
+ logf.SetLogger(zap.LoggerTo(GinkgoWriter, true))
+
+ By("bootstrapping test environment")
+ testEnv = &envtest.Environment{
+ CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")},
+ }
+
+ err := SchemeBuilder.AddToScheme(scheme.Scheme)
+ Expect(err).NotTo(HaveOccurred())
+
+ cfg, err = testEnv.Start()
+ Expect(err).ToNot(HaveOccurred())
+ Expect(cfg).ToNot(BeNil())
+
+ k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
+ Expect(err).ToNot(HaveOccurred())
+ Expect(k8sClient).ToNot(BeNil())
+
+ close(done)
+}, 60)
+
+var _ = AfterSuite(func() {
+ By("tearing down the test environment")
+ err := testEnv.Stop()
+ Expect(err).ToNot(HaveOccurred())
+})
diff --git a/pkg/apis/logging/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha2/zz_generated.deepcopy.go
similarity index 50%
rename from pkg/apis/logging/v1alpha1/zz_generated.deepcopy.go
rename to api/v1alpha2/zz_generated.deepcopy.go
index 69a9e38f2..5467dd9be 100644
--- a/pkg/apis/logging/v1alpha1/zz_generated.deepcopy.go
+++ b/api/v1alpha2/zz_generated.deepcopy.go
@@ -1,75 +1,109 @@
// +build !ignore_autogenerated
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by operator-sdk. DO NOT EDIT.
-
-package v1alpha1
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by controller-gen. DO NOT EDIT.
+
+package v1alpha2
import (
- v1 "k8s.io/api/core/v1"
+ "github.com/banzaicloud/logging-operator/pkg/model/filter"
+ "github.com/banzaicloud/logging-operator/pkg/model/output"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *FPlugin) DeepCopyInto(out *FPlugin) {
+func (in *ClusterFlow) DeepCopyInto(out *ClusterFlow) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ out.Status = in.Status
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterFlow.
+func (in *ClusterFlow) DeepCopy() *ClusterFlow {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterFlow)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterFlow) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterFlowList) DeepCopyInto(out *ClusterFlowList) {
*out = *in
- if in.Parameters != nil {
- in, out := &in.Parameters, &out.Parameters
- *out = make([]Parameter, len(*in))
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ClusterFlow, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
- return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FPlugin.
-func (in *FPlugin) DeepCopy() *FPlugin {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterFlowList.
+func (in *ClusterFlowList) DeepCopy() *ClusterFlowList {
if in == nil {
return nil
}
- out := new(FPlugin)
+ out := new(ClusterFlowList)
in.DeepCopyInto(out)
return out
}
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterFlowList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Fluentbit) DeepCopyInto(out *Fluentbit) {
+func (in *ClusterOutput) DeepCopyInto(out *ClusterOutput) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status
- return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Fluentbit.
-func (in *Fluentbit) DeepCopy() *Fluentbit {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOutput.
+func (in *ClusterOutput) DeepCopy() *ClusterOutput {
if in == nil {
return nil
}
- out := new(Fluentbit)
+ out := new(ClusterOutput)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *Fluentbit) DeepCopyObject() runtime.Object {
+func (in *ClusterOutput) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
@@ -77,32 +111,31 @@ func (in *Fluentbit) DeepCopyObject() runtime.Object {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *FluentbitList) DeepCopyInto(out *FluentbitList) {
+func (in *ClusterOutputList) DeepCopyInto(out *ClusterOutputList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
- *out = make([]Fluentbit, len(*in))
+ *out = make([]ClusterOutput, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
- return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentbitList.
-func (in *FluentbitList) DeepCopy() *FluentbitList {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOutputList.
+func (in *ClusterOutputList) DeepCopy() *ClusterOutputList {
if in == nil {
return nil
}
- out := new(FluentbitList)
+ out := new(ClusterOutputList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *FluentbitList) DeepCopyObject() runtime.Object {
+func (in *ClusterOutputList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
@@ -110,92 +143,77 @@ func (in *FluentbitList) DeepCopyObject() runtime.Object {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *FluentbitSpec) DeepCopyInto(out *FluentbitSpec) {
+func (in *ClusterOutputSpec) DeepCopyInto(out *ClusterOutputSpec) {
*out = *in
- if in.Annotations != nil {
- in, out := &in.Annotations, &out.Annotations
- *out = make(map[string]string, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
- out.Image = in.Image
- out.TLS = in.TLS
- in.Resources.DeepCopyInto(&out.Resources)
- if in.Tolerations != nil {
- in, out := &in.Tolerations, &out.Tolerations
- *out = make([]v1.Toleration, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
+ in.OutputSpec.DeepCopyInto(&out.OutputSpec)
+ if in.EnabledNamespaces != nil {
+ in, out := &in.EnabledNamespaces, &out.EnabledNamespaces
+ *out = make([]string, len(*in))
+ copy(*out, *in)
}
- return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentbitSpec.
-func (in *FluentbitSpec) DeepCopy() *FluentbitSpec {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOutputSpec.
+func (in *ClusterOutputSpec) DeepCopy() *ClusterOutputSpec {
if in == nil {
return nil
}
- out := new(FluentbitSpec)
+ out := new(ClusterOutputSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *FluentbitStatus) DeepCopyInto(out *FluentbitStatus) {
+func (in *Filter) DeepCopyInto(out *Filter) {
*out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentbitStatus.
-func (in *FluentbitStatus) DeepCopy() *FluentbitStatus {
- if in == nil {
- return nil
+ if in.StdOut != nil {
+ in, out := &in.StdOut, &out.StdOut
+ *out = new(filter.StdOutFilterConfig)
+ **out = **in
+ }
+ if in.Parser != nil {
+ in, out := &in.Parser, &out.Parser
+ *out = new(filter.ParserConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.TagNormaliser != nil {
+ in, out := &in.TagNormaliser, &out.TagNormaliser
+ *out = new(filter.TagNormaliser)
+ **out = **in
}
- out := new(FluentbitStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *FluentbitTLS) DeepCopyInto(out *FluentbitTLS) {
- *out = *in
- return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentbitTLS.
-func (in *FluentbitTLS) DeepCopy() *FluentbitTLS {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Filter.
+func (in *Filter) DeepCopy() *Filter {
if in == nil {
return nil
}
- out := new(FluentbitTLS)
+ out := new(Filter)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Fluentd) DeepCopyInto(out *Fluentd) {
+func (in *Flow) DeepCopyInto(out *Flow) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status
- return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Fluentd.
-func (in *Fluentd) DeepCopy() *Fluentd {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Flow.
+func (in *Flow) DeepCopy() *Flow {
if in == nil {
return nil
}
- out := new(Fluentd)
+ out := new(Flow)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *Fluentd) DeepCopyObject() runtime.Object {
+func (in *Flow) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
@@ -203,32 +221,31 @@ func (in *Fluentd) DeepCopyObject() runtime.Object {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *FluentdList) DeepCopyInto(out *FluentdList) {
+func (in *FlowList) DeepCopyInto(out *FlowList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
- *out = make([]Fluentd, len(*in))
+ *out = make([]Flow, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
- return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentdList.
-func (in *FluentdList) DeepCopy() *FluentdList {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowList.
+func (in *FlowList) DeepCopy() *FlowList {
if in == nil {
return nil
}
- out := new(FluentdList)
+ out := new(FlowList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *FluentdList) DeepCopyObject() runtime.Object {
+func (in *FlowList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
@@ -236,171 +253,173 @@ func (in *FluentdList) DeepCopyObject() runtime.Object {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *FluentdSpec) DeepCopyInto(out *FluentdSpec) {
+func (in *FlowSpec) DeepCopyInto(out *FlowSpec) {
*out = *in
- if in.Annotations != nil {
- in, out := &in.Annotations, &out.Annotations
+ if in.Selectors != nil {
+ in, out := &in.Selectors, &out.Selectors
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
- out.TLS = in.TLS
- out.Image = in.Image
- in.FluentdPvcSpec.DeepCopyInto(&out.FluentdPvcSpec)
- out.VolumeModImage = in.VolumeModImage
- out.ConfigReloaderImage = in.ConfigReloaderImage
- in.Resources.DeepCopyInto(&out.Resources)
- if in.Tolerations != nil {
- in, out := &in.Tolerations, &out.Tolerations
- *out = make([]v1.Toleration, len(*in))
+ if in.Filters != nil {
+ in, out := &in.Filters, &out.Filters
+ *out = make([]Filter, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
- return
+ if in.OutputRefs != nil {
+ in, out := &in.OutputRefs, &out.OutputRefs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentdSpec.
-func (in *FluentdSpec) DeepCopy() *FluentdSpec {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSpec.
+func (in *FlowSpec) DeepCopy() *FlowSpec {
if in == nil {
return nil
}
- out := new(FluentdSpec)
+ out := new(FlowSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *FluentdStatus) DeepCopyInto(out *FluentdStatus) {
+func (in *FlowStatus) DeepCopyInto(out *FlowStatus) {
*out = *in
- return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentdStatus.
-func (in *FluentdStatus) DeepCopy() *FluentdStatus {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowStatus.
+func (in *FlowStatus) DeepCopy() *FlowStatus {
if in == nil {
return nil
}
- out := new(FluentdStatus)
+ out := new(FlowStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *FluentdTLS) DeepCopyInto(out *FluentdTLS) {
+func (in *FluentbitSpec) DeepCopyInto(out *FluentbitSpec) {
*out = *in
- return
+ if in.Annotations != nil {
+ in, out := &in.Annotations, &out.Annotations
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ out.Image = in.Image
+ out.TLS = in.TLS
+ in.Resources.DeepCopyInto(&out.Resources)
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentdTLS.
-func (in *FluentdTLS) DeepCopy() *FluentdTLS {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentbitSpec.
+func (in *FluentbitSpec) DeepCopy() *FluentbitSpec {
if in == nil {
return nil
}
- out := new(FluentdTLS)
+ out := new(FluentbitSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ImageSpec) DeepCopyInto(out *ImageSpec) {
+func (in *FluentbitTLS) DeepCopyInto(out *FluentbitTLS) {
*out = *in
- return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSpec.
-func (in *ImageSpec) DeepCopy() *ImageSpec {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentbitTLS.
+func (in *FluentbitTLS) DeepCopy() *FluentbitTLS {
if in == nil {
return nil
}
- out := new(ImageSpec)
+ out := new(FluentbitTLS)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Input) DeepCopyInto(out *Input) {
+func (in *FluentdSpec) DeepCopyInto(out *FluentdSpec) {
*out = *in
- if in.Label != nil {
- in, out := &in.Label, &out.Label
+ if in.Annotations != nil {
+ in, out := &in.Annotations, &out.Annotations
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
- return
+ out.TLS = in.TLS
+ out.Image = in.Image
+ in.FluentdPvcSpec.DeepCopyInto(&out.FluentdPvcSpec)
+ out.VolumeModImage = in.VolumeModImage
+ out.ConfigReloaderImage = in.ConfigReloaderImage
+ in.Resources.DeepCopyInto(&out.Resources)
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Input.
-func (in *Input) DeepCopy() *Input {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentdSpec.
+func (in *FluentdSpec) DeepCopy() *FluentdSpec {
if in == nil {
return nil
}
- out := new(Input)
+ out := new(FluentdSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *KubernetesSecret) DeepCopyInto(out *KubernetesSecret) {
+func (in *FluentdTLS) DeepCopyInto(out *FluentdTLS) {
*out = *in
- return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesSecret.
-func (in *KubernetesSecret) DeepCopy() *KubernetesSecret {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentdTLS.
+func (in *FluentdTLS) DeepCopy() *FluentdTLS {
if in == nil {
return nil
}
- out := new(KubernetesSecret)
+ out := new(FluentdTLS)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Parameter) DeepCopyInto(out *Parameter) {
+func (in *ImageSpec) DeepCopyInto(out *ImageSpec) {
*out = *in
- if in.ValueFrom != nil {
- in, out := &in.ValueFrom, &out.ValueFrom
- *out = new(ValueFrom)
- **out = **in
- }
- return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Parameter.
-func (in *Parameter) DeepCopy() *Parameter {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSpec.
+func (in *ImageSpec) DeepCopy() *ImageSpec {
if in == nil {
return nil
}
- out := new(Parameter)
+ out := new(ImageSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Plugin) DeepCopyInto(out *Plugin) {
+func (in *Logging) DeepCopyInto(out *Logging) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
- out.Status = in.Status
- return
+ in.Status.DeepCopyInto(&out.Status)
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plugin.
-func (in *Plugin) DeepCopy() *Plugin {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Logging.
+func (in *Logging) DeepCopy() *Logging {
if in == nil {
return nil
}
- out := new(Plugin)
+ out := new(Logging)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *Plugin) DeepCopyObject() runtime.Object {
+func (in *Logging) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
@@ -408,32 +427,31 @@ func (in *Plugin) DeepCopyObject() runtime.Object {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *PluginList) DeepCopyInto(out *PluginList) {
+func (in *LoggingList) DeepCopyInto(out *LoggingList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
- *out = make([]Plugin, len(*in))
+ *out = make([]Logging, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
- return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginList.
-func (in *PluginList) DeepCopy() *PluginList {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingList.
+func (in *LoggingList) DeepCopy() *LoggingList {
if in == nil {
return nil
}
- out := new(PluginList)
+ out := new(LoggingList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *PluginList) DeepCopyObject() runtime.Object {
+func (in *LoggingList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
@@ -441,65 +459,182 @@ func (in *PluginList) DeepCopyObject() runtime.Object {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *PluginSpec) DeepCopyInto(out *PluginSpec) {
+func (in *LoggingSpec) DeepCopyInto(out *LoggingSpec) {
*out = *in
- in.Input.DeepCopyInto(&out.Input)
- if in.Filter != nil {
- in, out := &in.Filter, &out.Filter
- *out = make([]FPlugin, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
+ if in.FluentbitSpec != nil {
+ in, out := &in.FluentbitSpec, &out.FluentbitSpec
+ *out = new(FluentbitSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.FluentdSpec != nil {
+ in, out := &in.FluentdSpec, &out.FluentdSpec
+ *out = new(FluentdSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.WatchNamespaces != nil {
+ in, out := &in.WatchNamespaces, &out.WatchNamespaces
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingSpec.
+func (in *LoggingSpec) DeepCopy() *LoggingSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(LoggingSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LoggingStatus) DeepCopyInto(out *LoggingStatus) {
+ *out = *in
+ if in.ConfigCheckResults != nil {
+ in, out := &in.ConfigCheckResults, &out.ConfigCheckResults
+ *out = make(map[string]bool, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
}
}
- if in.Output != nil {
- in, out := &in.Output, &out.Output
- *out = make([]FPlugin, len(*in))
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingStatus.
+func (in *LoggingStatus) DeepCopy() *LoggingStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(LoggingStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Output) DeepCopyInto(out *Output) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ out.Status = in.Status
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Output.
+func (in *Output) DeepCopy() *Output {
+ if in == nil {
+ return nil
+ }
+ out := new(Output)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Output) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OutputList) DeepCopyInto(out *OutputList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Output, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
- return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginSpec.
-func (in *PluginSpec) DeepCopy() *PluginSpec {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputList.
+func (in *OutputList) DeepCopy() *OutputList {
if in == nil {
return nil
}
- out := new(PluginSpec)
+ out := new(OutputList)
in.DeepCopyInto(out)
return out
}
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OutputList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *PluginStatus) DeepCopyInto(out *PluginStatus) {
+func (in *OutputSpec) DeepCopyInto(out *OutputSpec) {
*out = *in
- return
+ if in.S3OutputConfig != nil {
+ in, out := &in.S3OutputConfig, &out.S3OutputConfig
+ *out = new(output.S3OutputConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.AzureStorage != nil {
+ in, out := &in.AzureStorage, &out.AzureStorage
+ *out = new(output.AzureStorage)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.GCSOutput != nil {
+ in, out := &in.GCSOutput, &out.GCSOutput
+ *out = new(output.GCSOutput)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.OSSOutput != nil {
+ in, out := &in.OSSOutput, &out.OSSOutput
+ *out = new(output.OSSOutput)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ElasticsearchOutput != nil {
+ in, out := &in.ElasticsearchOutput, &out.ElasticsearchOutput
+ *out = new(output.ElasticsearchOutput)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.LokiOutput != nil {
+ in, out := &in.LokiOutput, &out.LokiOutput
+ *out = new(output.LokiOutput)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.SumologicOutput != nil {
+ in, out := &in.SumologicOutput, &out.SumologicOutput
+ *out = new(output.SumologicOutput)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.NullOutputConfig != nil {
+ in, out := &in.NullOutputConfig, &out.NullOutputConfig
+ *out = new(output.NullOutputConfig)
+ **out = **in
+ }
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginStatus.
-func (in *PluginStatus) DeepCopy() *PluginStatus {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputSpec.
+func (in *OutputSpec) DeepCopy() *OutputSpec {
if in == nil {
return nil
}
- out := new(PluginStatus)
+ out := new(OutputSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ValueFrom) DeepCopyInto(out *ValueFrom) {
+func (in *OutputStatus) DeepCopyInto(out *OutputStatus) {
*out = *in
- out.SecretKeyRef = in.SecretKeyRef
- return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValueFrom.
-func (in *ValueFrom) DeepCopy() *ValueFrom {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputStatus.
+func (in *OutputStatus) DeepCopy() *OutputStatus {
if in == nil {
return nil
}
- out := new(ValueFrom)
+ out := new(OutputStatus)
in.DeepCopyInto(out)
return out
}
diff --git a/build/Dockerfile b/build/Dockerfile
deleted file mode 100644
index c2f7eec44..000000000
--- a/build/Dockerfile
+++ /dev/null
@@ -1,15 +0,0 @@
-FROM alpine:3.8
-
-ENV OPERATOR=/usr/local/bin/logging-operator \
- USER_UID=1001 \
- USER_NAME=logging-operator
-
-# install operator binary
-COPY build/_output/bin/logging-operator ${OPERATOR}
-
-COPY build/bin /usr/local/bin
-RUN /usr/local/bin/user_setup
-
-ENTRYPOINT ["/usr/local/bin/entrypoint"]
-
-USER ${USER_UID}
diff --git a/build/bin/entrypoint b/build/bin/entrypoint
deleted file mode 100755
index 76d31a162..000000000
--- a/build/bin/entrypoint
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/sh -e
-
-# This is documented here:
-# https://docs.openshift.com/container-platform/3.11/creating_images/guidelines.html#openshift-specific-guidelines
-
-if ! whoami &>/dev/null; then
- if [ -w /etc/passwd ]; then
- echo "${USER_NAME:-logging-operator}:x:$(id -u):$(id -g):${USER_NAME:-logging-operator} user:${HOME}:/sbin/nologin" >> /etc/passwd
- fi
-fi
-
-exec ${OPERATOR} $@
diff --git a/build/bin/user_setup b/build/bin/user_setup
deleted file mode 100755
index 1e36064cb..000000000
--- a/build/bin/user_setup
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/sh
-set -x
-
-# ensure $HOME exists and is accessible by group 0 (we don't know what the runtime UID will be)
-mkdir -p ${HOME}
-chown ${USER_UID}:0 ${HOME}
-chmod ug+rwx ${HOME}
-
-# runtime user will need to be able to self-insert in /etc/passwd
-chmod g+rw /etc/passwd
-
-# no need for this script to remain in the image after running
-rm $0
diff --git a/charts/logging-operator-fluent/Chart.yaml b/charts/logging-operator-fluent/Chart.yaml
deleted file mode 100644
index 3341c6f4b..000000000
--- a/charts/logging-operator-fluent/Chart.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
-apiVersion: v1
-description: Logging operator CR for Fluentd and Fluent-bit.
-name: logging-operator-fluent
-version: 0.1.5
-home: https://github.com/banzaicloud/logging-operator
-icon: https://banzaicloud.com/img/banzai-cloud-logo.png
-keywords:
- - logging
- - monitoring
- - fluentd
- - fluenbit
- - operator
-sources:
- - https://github.com/banzaicloud/logging-operator
-maintainers:
- - name: Banzai Cloud
- email: info@banzaicloud.com
-
diff --git a/charts/logging-operator-fluent/templates/_helpers.tpl b/charts/logging-operator-fluent/templates/_helpers.tpl
deleted file mode 100644
index 149739ce7..000000000
--- a/charts/logging-operator-fluent/templates/_helpers.tpl
+++ /dev/null
@@ -1,32 +0,0 @@
-{{/* vim: set filetype=mustache: */}}
-{{/*
-Expand the name of the chart.
-*/}}
-{{- define "logging-operator-fluent.name" -}}
-{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
-{{- end -}}
-
-{{/*
-Create a default fully qualified app name.
-We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
-If release name contains chart name it will be used as a full name.
-*/}}
-{{- define "logging-operator-fluent.fullname" -}}
-{{- if .Values.fullnameOverride -}}
-{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
-{{- else -}}
-{{- $name := default .Chart.Name .Values.nameOverride -}}
-{{- if contains $name .Release.Name -}}
-{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
-{{- else -}}
-{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
-{{- end -}}
-{{- end -}}
-{{- end -}}
-
-{{/*
-Create chart name and version as used by the chart label.
-*/}}
-{{- define "logging-operator-fluent.chart" -}}
-{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
-{{- end -}}
diff --git a/charts/logging-operator-fluent/templates/fluentbit-cr.yaml b/charts/logging-operator-fluent/templates/fluentbit-cr.yaml
deleted file mode 100644
index f65ef07b8..000000000
--- a/charts/logging-operator-fluent/templates/fluentbit-cr.yaml
+++ /dev/null
@@ -1,33 +0,0 @@
-{{- if .Values.fluentbit.enabled }}
-{{ $fluentbitUseGenericSecret := or .Values.tls.secretName (not .Values.fluentbit.tlsSecret ) }}
-apiVersion: logging.banzaicloud.com/v1alpha1
-kind: Fluentbit
-metadata:
- name: {{ template "logging-operator-fluent.fullname" . }}-fluentbit
- labels:
- app.kubernetes.io/name: {{ include "logging-operator-fluent.name" . }}
- helm.sh/chart: {{ include "logging-operator-fluent.chart" . }}
- app.kubernetes.io/instance: {{ .Release.Name }}
- app.kubernetes.io/managed-by: {{ .Release.Service }}
-spec:
- namespace: {{ default .Release.Namespace .Values.watchNamespace }}
- annotations:
- prometheus.io/scrape: "true"
- prometheus.io/path: "/api/v1/metrics/prometheus"
- prometheus.io/port: "2020"
- image: {{ toYaml .Values.fluentbit.image | nindent 4 }}
- resources: {}
- {{- if .Values.fluentbit.tolerations }}
- tolerations: {{ toYaml .Values.fluentbit.tolerations | nindent 4 }}
- {{- end }}
- tls:
- enabled: {{ .Values.tls.enabled }}
-{{- if $fluentbitUseGenericSecret }}
- secretName: {{ .Values.tls.secretName | default (include "logging-operator-fluent.fullname" .) }}
- secretType: generic
-{{- else }}
- secretName: {{ .Values.fluentbit.tlsSecret }}
- secretType: tls
-{{- end }}
- sharedKey: {{ .Values.tls.sharedKey | default (derivePassword 1 "long" (.Release.Time | toString) .Release.Name .Chart.Name ) | b64enc | quote }}
-{{ end }}
diff --git a/charts/logging-operator-fluent/templates/fluentd-cr.yaml b/charts/logging-operator-fluent/templates/fluentd-cr.yaml
deleted file mode 100644
index f205e3d61..000000000
--- a/charts/logging-operator-fluent/templates/fluentd-cr.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
-{{- if .Values.fluentd.enabled }}
-{{ $fluentdUseGenericSecret := or .Values.tls.secretName (not .Values.fluentd.tlsSecret) }}
-apiVersion: logging.banzaicloud.com/v1alpha1
-kind: Fluentd
-metadata:
- name: {{ template "logging-operator-fluent.fullname" . }}-fluentd
- labels:
- app.kubernetes.io/name: {{ include "logging-operator-fluent.name" . }}
- helm.sh/chart: {{ include "logging-operator-fluent.chart" . }}
- app.kubernetes.io/instance: {{ .Release.Name }}
- app.kubernetes.io/managed-by: {{ .Release.Service }}
-spec:
- namespace: {{ default .Release.Namespace .Values.watchNamespace }}
- annotations:
- prometheus.io/scrape: "true"
- prometheus.io/path: "/metrics"
- prometheus.io/port: "25000"
- image: {{ toYaml .Values.fluentd.image | nindent 4 }}
- volumeModImage: {{ toYaml .Values.fluentd.volumeModImage | nindent 4 }}
- configReloaderImage: {{ toYaml .Values.fluentd.configReloaderImage | nindent 4 }}
- resources: {}
- fluentdPvcSpec: {{ toYaml .Values.fluentd.fluentdPvcSpec | nindent 4 }}
- {{- if .Values.fluentd.tolerations }}
- tolerations: {{ toYaml .Values.fluentd.tolerations | nindent 4 }}
- {{- end }}
- tls:
- enabled: {{ .Values.tls.enabled }}
-{{- if $fluentdUseGenericSecret }}
- secretName: {{ .Values.tls.secretName | default (include "logging-operator-fluent.fullname" .) }}
- secretType: generic
-{{- else }}
- secretName: {{ .Values.fluentd.tlsSecret }}
- secretType: tls
-{{- end }}
- sharedKey: {{ .Values.tls.sharedKey | default (derivePassword 1 "long" (.Release.Time | toString) .Release.Name .Chart.Name ) | b64enc | quote }}
- serviceType: {{ .Values.fluentd.serviceType | default "ClusterIP" | quote }}
-{{ end }}
diff --git a/charts/logging-operator-fluent/templates/psp.yaml b/charts/logging-operator-fluent/templates/psp.yaml
deleted file mode 100644
index bfa1f488a..000000000
--- a/charts/logging-operator-fluent/templates/psp.yaml
+++ /dev/null
@@ -1,73 +0,0 @@
-{{ if .Values.psp.enabled }}
-apiVersion: policy/v1beta1
-kind: PodSecurityPolicy
-metadata:
- creationTimestamp: null
- name: psp.fluent-bit
- annotations:
- seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
- seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default'
-spec:
- allowedHostPaths:
- - pathPrefix: /var/lib/docker/containers
- readOnly: true
- - pathPrefix: /var/log
- readOnly: true
- fsGroup:
- rule: MustRunAs
- ranges:
- - min: 1
- max: 65535
- readOnlyRootFilesystem: true
- allowPrivilegeEscalation: false
- runAsUser:
- rule: MustRunAs
- ranges:
- - min: 1
- max: 65535
- seLinux:
- rule: RunAsAny
- supplementalGroups:
- rule: MustRunAs
- ranges:
- - min: 1
- max: 65535
- volumes:
- - configMap
- - emptyDir
- - secret
- - hostPath
----
-apiVersion: policy/v1beta1
-kind: PodSecurityPolicy
-metadata:
- creationTimestamp: null
- name: psp.fluentd
- annotations:
- seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
- seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default'
-spec:
- fsGroup:
- rule: MustRunAs
- ranges:
- - min: 1
- max: 65535
- allowPrivilegeEscalation: false
- runAsUser:
- rule: MustRunAs
- ranges:
- - min: 1
- max: 65535
- seLinux:
- rule: RunAsAny
- supplementalGroups:
- rule: MustRunAs
- ranges:
- - min: 1
- max: 65535
- volumes:
- - configMap
- - emptyDir
- - secret
- - persistentVolumeClaim
-{{ end }}
diff --git a/charts/logging-operator-fluent/templates/rbac.yaml b/charts/logging-operator-fluent/templates/rbac.yaml
deleted file mode 100644
index eabb1b020..000000000
--- a/charts/logging-operator-fluent/templates/rbac.yaml
+++ /dev/null
@@ -1,71 +0,0 @@
-{{ if .Values.psp.enabled }}
-apiVersion: rbac.authorization.k8s.io/v1
-kind: Role
-metadata:
- name: {{ template "logging-operator-fluent.fullname" . }}-fluent-bit
- namespace: {{ .Release.Namespace }}
- labels:
- kubernetes.io/cluster-service: "true"
- addonmanager.kubernetes.io/mode: Reconcile
-rules:
-- apiGroups:
- - policy
- resourceNames:
- - psp.fluent-bit
- resources:
- - podsecuritypolicies
- verbs:
- - use
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: Role
-metadata:
- name: {{ template "logging-operator-fluent.fullname" . }}-fluentd
- namespace: {{ .Release.Namespace }}
- labels:
- kubernetes.io/cluster-service: "true"
- addonmanager.kubernetes.io/mode: Reconcile
-rules:
-- apiGroups:
- - policy
- resourceNames:
- - psp.fluentd
- resources:
- - podsecuritypolicies
- verbs:
- - use
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
- name: {{ template "logging-operator-fluent.fullname" . }}-fluent-bit
- namespace: {{ .Release.Namespace }}
- labels:
- addonmanager.kubernetes.io/mode: Reconcile
- kubernetes.io/cluster-service: 'true'
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: Role
- name: {{ template "logging-operator-fluent.fullname" . }}-fluent-bit
-subjects:
- - kind: ServiceAccount
- name: logging
- namespace: {{ .Release.Namespace }}
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
- name: {{ template "logging-operator-fluent.fullname" . }}-fluentd
- namespace: {{ .Release.Namespace }}
- labels:
- addonmanager.kubernetes.io/mode: Reconcile
- kubernetes.io/cluster-service: 'true'
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: Role
- name: {{ template "logging-operator-fluent.fullname" . }}-fluentd
-subjects:
- - kind: ServiceAccount
- name: logging-fluentd
- namespace: {{ .Release.Namespace }}
-{{ end }}
diff --git a/charts/logging-operator-fluent/templates/secret.yaml b/charts/logging-operator-fluent/templates/secret.yaml
deleted file mode 100644
index f0b93e661..000000000
--- a/charts/logging-operator-fluent/templates/secret.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-{{- if and .Values.tls.enabled (not .Values.tls.secretName) }}
-{{ $ca := genCA "svc-cat-ca" 3650 }}
-{{ $cn := printf "fluentd.%s.svc.cluster.local" .Release.Namespace }}
-{{ $server := genSignedCert $cn nil nil 365 $ca }}
-{{ $client := genSignedCert "" nil nil 365 $ca }}
-
-apiVersion: v1
-kind: Secret
-metadata:
- name: {{ template "logging-operator-fluent.fullname" . }}
- labels:
- app.kubernetes.io/name: {{ include "logging-operator-fluent.name" . }}
- helm.sh/chart: {{ include "logging-operator-fluent.chart" . }}
- app.kubernetes.io/instance: {{ .Release.Name }}
- app.kubernetes.io/managed-by: {{ .Release.Service }}
-data:
- caCert: {{ b64enc $ca.Cert }}
- clientCert: {{ b64enc $client.Cert }}
- clientKey: {{ b64enc $client.Key }}
- serverCert: {{ b64enc $server.Cert }}
- serverKey: {{ b64enc $server.Key }}
-{{ end }}
\ No newline at end of file
diff --git a/charts/logging-operator-fluent/values.yaml b/charts/logging-operator-fluent/values.yaml
deleted file mode 100644
index 409356406..000000000
--- a/charts/logging-operator-fluent/values.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-# Default values for logging-operator-fluent.
-# This is a YAML-formatted file.
-# Declare variables to be passed into your templates.
-
-tls:
- enabled: false
- secretName: ""
- sharedKey: ""
-
-fluentbit:
- enabled: true
- namespace: ""
- tolerations:
- image:
- tag: "1.1.3"
- repository: "fluent/fluent-bit"
- pullPolicy: "IfNotPresent"
- tlsSecret: ""
-
-fluentd:
- enabled: true
- namespace: ""
- image:
- tag: "v1.5.0"
- repository: "banzaicloud/fluentd"
- pullPolicy: "IfNotPresent"
- volumeModImage:
- tag: "latest"
- repository: "busybox"
- pullPolicy: "IfNotPresent"
- configReloaderImage:
- tag: "v0.2.2"
- repository: "jimmidyson/configmap-reload"
- pullPolicy: "IfNotPresent"
- tolerations:
- fluentdPvcSpec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 21Gi
- tlsSecret: ""
-
-psp:
- enabled: false
diff --git a/charts/nginx-logging-demo/.helmignore b/charts/logging-operator-logging/.helmignore
similarity index 100%
rename from charts/nginx-logging-demo/.helmignore
rename to charts/logging-operator-logging/.helmignore
diff --git a/charts/logging-operator-logging/Chart.yaml b/charts/logging-operator-logging/Chart.yaml
new file mode 100644
index 000000000..c3ef5ff3e
--- /dev/null
+++ b/charts/logging-operator-logging/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+appVersion: "2.0.0"
+description: A Helm chart for Kubernetes
+name: logging-operator-logging
+version: 2.0.0
diff --git a/charts/logging-operator-fluent/README.md b/charts/logging-operator-logging/README.md
similarity index 63%
rename from charts/logging-operator-fluent/README.md
rename to charts/logging-operator-logging/README.md
index 37e4950bf..74f2b61c7 100644
--- a/charts/logging-operator-fluent/README.md
+++ b/charts/logging-operator-logging/README.md
@@ -1,41 +1,30 @@
-# Logging Operator Fluent Chart
+# Installing logging resource to logging-operator
## tl;dr:
```bash
$ helm repo add banzaicloud-stable https://kubernetes-charts.banzaicloud.com
$ helm repo update
-# Prerequisites
-$ helm install banzaicloud-stable/logging-operator
-# Install fluent and fluent-bit cr
-$ helm install banzaicloud-stable/logging-operator-fluent
+$ helm install banzaicloud-stable/logging-operator-logging
```
-## Introduction
-
-This chart applies Fluentd and Fluent-bit custom resources to [Logging Operator](https://github.com/banzaicloud/banzai-charts/logging-operator) deployment on a [Kubernetes](http://kubernetes.io)
-
-## Prerequisites
-
-- Kubernetes 1.8+ with Beta APIs enabled
-- [Logging Operator](https://github.com/banzaicloud/logging-operator)
+## Configuration
+The following tables lists the configurable parameters of the logging-operator-logging chart and their default values.
| Parameter | Description | Default |
| --------------------------------------------------- | ------------------------------------------------------ | ------------------------------ |
| `tls.enabled` | Enabled TLS communication between components | true |
-| `tls.secretName` | Specified generic secret name, which contain tls certs | This will overwrite automatic Helm certificate generation and overrides `fluentbit.tlsSecret` and `fluentd.tlsSecret`. |
+| `tls.fluentdSecretName` | Specified secret name, which contain tls certs | This will overwrite automatic Helm certificate generation. |
+| `tls.fluentbitSecretName` | Specified secret name, which contain tls certs | This will overwrite automatic Helm certificate generation. |
| `tls.sharedKey` | Shared key between nodes (fluentd-fluentbit) | [autogenerated] |
| `fluentbit.enabled` | Install fluent-bit | true |
| `fluentbit.namespace` | Specified fluentbit installation namespace | same as operator namespace |
| `fluentbit.image.tag` | Fluentbit container image tag | `1.1.3` |
| `fluentbit.image.repository` | Fluentbit container image repository | `fluent/fluent-bit` |
| `fluentbit.image.pullPolicy` | Fluentbit container pull policy | `IfNotPresent` |
-| `fluentbit.tolerations` | Fluentbit tolerations | `nil` |
-| `fluentbit.tlsSecret` | Secret name that contains Fluentbit TLS client cert | Ignored if `tls.secretName` is specified. Must refer to a secret of type `kubernetes.io/tls` |
| `fluentd.enabled` | Install fluentd | true |
-| `fluentd.namespace` | Specified fluentd installation namespace | same as operator namespace |
-| `fluentd.image.tag` | Fluentd container image tag | `v1.5.0` |
+| `fluentd.image.tag` | Fluentd container image tag | `v1.6.3-alpine` |
| `fluentd.image.repository` | Fluentd container image repository | `banzaicloud/fluentd` |
| `fluentd.image.pullPolicy` | Fluentd container pull policy | `IfNotPresent` |
| `fluentd.volumeModImage.tag` | Fluentd volumeModImage container image tag | `latest` |
@@ -46,6 +35,4 @@ This chart applies Fluentd and Fluent-bit custom resources to [Logging Operator]
| `fluentd.configReloaderImage.pullPolicy` | Fluentd configReloaderImage container pull policy | `IfNotPresent` |
| `fluentd.fluentdPvcSpec.accessModes` | Fluentd persistence volume access modes | `[ReadWriteOnce]` |
| `fluentd.fluentdPvcSpec.resources.requests.storage` | Fluentd persistence volume size | `21Gi` |
-| `fluentd.tolerations` | Fluentd tolerations | `nil` |
-| `fluentd.tlsSecret` | Secret name that contains Fluentd TLS client cert | Ignored if `tls.secretName` is specified. Must refer to a secret of type `kubernetes.io/tls`. |
-| `psp.enabled` | Install PodSecurityPolicy | `false` |
+| `fluentd.fluentdPvcSpec.resources.storageClassName` | Fluentd persistence volume storageclass | `"""` |
\ No newline at end of file
diff --git a/charts/logging-operator-logging/templates/NOTES.txt b/charts/logging-operator-logging/templates/NOTES.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/charts/nginx-logging-demo/templates/_helpers.tpl b/charts/logging-operator-logging/templates/_helpers.tpl
similarity index 62%
rename from charts/nginx-logging-demo/templates/_helpers.tpl
rename to charts/logging-operator-logging/templates/_helpers.tpl
index 86303e7c0..adf39e886 100644
--- a/charts/nginx-logging-demo/templates/_helpers.tpl
+++ b/charts/logging-operator-logging/templates/_helpers.tpl
@@ -2,7 +2,7 @@
{{/*
Expand the name of the chart.
*/}}
-{{- define "nginx-logging-demo.name" -}}
+{{- define "logging-operator-logging.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
@@ -11,7 +11,7 @@ Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
-{{- define "nginx-logging-demo.fullname" -}}
+{{- define "logging-operator-logging.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
@@ -27,6 +27,19 @@ If release name contains chart name it will be used as a full name.
{{/*
Create chart name and version as used by the chart label.
*/}}
-{{- define "nginx-logging-demo.chart" -}}
+{{- define "logging-operator-logging.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "logging-operator-logging.labels" -}}
+app.kubernetes.io/name: {{ include "logging-operator-logging.name" . }}
+helm.sh/chart: {{ include "logging-operator-logging.chart" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
diff --git a/charts/logging-operator-logging/templates/logging.yaml b/charts/logging-operator-logging/templates/logging.yaml
new file mode 100644
index 000000000..f340aac60
--- /dev/null
+++ b/charts/logging-operator-logging/templates/logging.yaml
@@ -0,0 +1,38 @@
+apiVersion: logging.banzaicloud.com/v1alpha2
+kind: Logging
+metadata:
+ name: {{ include "logging-operator-logging.name" . }}
+ labels:
+{{ include "logging-operator-logging.labels" . | indent 4 }}
+spec:
+ {{- with .Values.loggingRef }}
+ loggingRef: {{ . }}
+ {{- end }}
+ {{- with .Values.flowConfigCheckDisabled }}
+ flowConfigCheckDisabled: {{ . }}
+ {{- end }}
+ {{- with .Values.flowConfigOverride }}
+ flowConfigOverride: {{ . }}
+ {{- end }}
+ controlNamespace: {{ .Values.controlNamespace | default .Release.Namespace }}
+ fluentd:
+ {{- if .Values.tls.enabled }}
+ tls:
+ enabled: true
+ secretName: {{ .Values.tls.fluentdSecretName | default (printf "%s-%s" (include "logging-operator-logging.name" . ) "fluentd-tls" ) }}
+ sharedKey: "{{ .Values.tls.sharedKey | default (derivePassword 1 "long" (.Release.Time | toString) .Release.Name .Chart.Name ) }}"
+ {{- end }}
+ {{- if .Values.fluentd }}
+{{ toYaml .Values.fluentd | indent 4}}
+ {{- end}}
+ fluentbit:
+ {{- if .Values.tls.enabled }}
+ tls:
+ enabled: true
+ secretName: {{ .Values.tls.fluentbitSecretName | default (printf "%s-%s" (include "logging-operator-logging.name" . ) "fluentbit-tls" ) }}
+ sharedKey: "{{ .Values.tls.sharedKey | default (derivePassword 1 "long" (.Release.Time | toString) .Release.Name .Chart.Name ) }}"
+ {{- end }}
+ {{- if .Values.fluentbit }}
+{{ toYaml .Values.fluentbit | indent 4}}
+ {{- end}}
+
diff --git a/charts/logging-operator-logging/templates/secret.yaml b/charts/logging-operator-logging/templates/secret.yaml
new file mode 100644
index 000000000..721f68621
--- /dev/null
+++ b/charts/logging-operator-logging/templates/secret.yaml
@@ -0,0 +1,34 @@
+{{- if .Values.tls.enabled }}
+{{ $ca := genCA "svc-cat-ca" 3650 }}
+{{ $cn := printf "%s-%s.%s.svc.cluster.local" (include "logging-operator-logging.name" .) "fluentd" .Release.Namespace }}
+{{ $server := genSignedCert $cn nil nil 365 $ca }}
+{{ $client := genSignedCert "" nil nil 365 $ca }}
+
+{{- if not .Values.tls.fluentdSecretName }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ include "logging-operator-logging.name" . }}-fluentd-tls
+ labels:
+{{ include "logging-operator-logging.labels" . | indent 4 }}
+data:
+ ca.crt: {{ b64enc $ca.Cert }}
+ tls.crt: {{ b64enc $server.Cert }}
+ tls.key: {{ b64enc $server.Key }}
+{{ end }}
+
+---
+
+{{- if not .Values.tls.fluentbitSecretName }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ include "logging-operator-logging.name" . }}-fluentbit-tls
+ labels:
+{{ include "logging-operator-logging.labels" . | indent 4 }}
+data:
+ ca.crt: {{ b64enc $ca.Cert }}
+ tls.crt: {{ b64enc $client.Cert }}
+ tls.key: {{ b64enc $client.Key }}
+{{ end }}
+{{ end }}
diff --git a/charts/logging-operator-logging/values.yaml b/charts/logging-operator-logging/values.yaml
new file mode 100644
index 000000000..eb2e0766e
--- /dev/null
+++ b/charts/logging-operator-logging/values.yaml
@@ -0,0 +1,35 @@
+# Default values for logging-operator-logging.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+# Reference name of the logging deployment
+loggingRef: ""
+# Disable configuration check before deploy
+flowConfigCheckDisabled: false
+# Use static configuration instead of generated config.
+flowConfigOverride: ""
+
+# Fluent-bit configurations
+fluentbit: {}
+# Fluentd configurations
+fluentd: {}
+# fluentdPvcSpec:
+# accessModes:
+# - ReadWriteOnce
+# resources:
+# requests:
+# storage: 40Gi
+# storageClassName: fast
+
+# Enable secure connection between fluentd and fluent-bit
+tls:
+ enabled: true
+ # Shared key for fluentd authentication
+ sharedKey: ""
+ fluentbitSecretName: ""
+ fluentdSecretName: ""
+
+# Limit namespaces from where to read Flow and Output specs
+watchNamespaces: []
+# Control namespace that contains ClusterOutput and ClusterFlow resources
+controlNamespace: ""
\ No newline at end of file
diff --git a/charts/logging-operator/Chart.yaml b/charts/logging-operator/Chart.yaml
index 361b7fc6c..aa6cc149d 100644
--- a/charts/logging-operator/Chart.yaml
+++ b/charts/logging-operator/Chart.yaml
@@ -1,18 +1,5 @@
apiVersion: v1
-description: Logging operator for Kubernetes based on Fluentd and Fluent-bit.
+appVersion: "2.0.0"
+description: A Helm chart for Kubernetes
name: logging-operator
-version: 0.3.3
-appVersion: 0.2.2
-home: https://github.com/banzaicloud/logging-operator
-icon: https://banzaicloud.com/img/banzai-cloud-logo.png
-keywords:
- - logging
- - monitoring
- - fluentd
- - fluenbit
- - operator
-sources:
-- https://github.com/banzaicloud/logging-operator
-maintainers:
-- name: Banzai Cloud
- email: info@banzaicloud.com
+version: 2.0.0
diff --git a/charts/logging-operator/README.md b/charts/logging-operator/README.md
index cad88b1dd..0a54a1747 100644
--- a/charts/logging-operator/README.md
+++ b/charts/logging-operator/README.md
@@ -12,7 +12,7 @@ $ helm install banzaicloud-stable/logging-operator
## Introduction
-This chart bootstraps an [Logging Operator](https://github.com/banzaicloud/banzai-charts/logging-operator) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
+This chart bootstraps an [Logging Operator](https://github.com/banzaicloud/logging-operator) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites
@@ -45,12 +45,11 @@ The following tables lists the configurable parameters of the logging-operator c
| Parameter | Description | Default |
| --------------------------------------------------- | ------------------------------------------------------ | ------------------------------ |
| `image.repository` | Container image repository | `banzaicloud/logging-operator` |
-| `image.tag` | Container image tag | `0.2.2` |
+| `image.tag` | Container image tag | `2.0.0` |
| `image.pullPolicy` | Container pull policy | `IfNotPresent` |
| `nameOverride` | Override name of app | `` |
| `fullnameOverride` | Override full name of app | `` |
| `watchNamespace` | Namespace to watch fot LoggingOperator CRD | `` |
-| `grafana.dashboard.enabled` | Install grafana logging-operator dashboard | `true` |
| `rbac.enabled` | Create rbac service account and roles | `true` |
| `rbac.psp.enabled` | Must be used with `rbac.enabled` true. If true, creates & uses RBAC resources required in the cluster with [Pod Security Policies](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) enabled. | `false` |
| `affinity` | Node Affinity | `{}` |
@@ -68,31 +67,46 @@ $ helm install --name my-release -f values.yaml banzaicloud-stable/logging-opera
> **Tip**: You can use the default [values.yaml](values.yaml)
-## Installing Fluentd and Fluent-bit
+## Installing Fluentd and Fluent-bit via logging
-The previous chart does **not** install Fluentd or Fluent-bit custom resource. To install them please use the [Logging Operator Fluent](https://github.com/banzaicloud/banzai-charts/logging-operator-fluent) chart.
+The previous chart does **not** install `logging` resource to deploy Fluentd and Fluent-bit on luster. To install them please use the [Logging Operator Logging](https://github.com/banzaicloud/logging-operator/tree/master/charts/logging-operator-logging) chart.
## tl;dr:
```bash
$ helm repo add banzaicloud-stable https://kubernetes-charts.banzaicloud.com
$ helm repo update
-$ helm install banzaicloud-stable/logging-operator-fluent
+$ helm install banzaicloud-stable/logging-operator-logging
```
+## Configuration
+
+The following tables lists the configurable parameters of the logging-operator-logging chart and their default values.
+## tl;dr:
+
+```bash
+$ helm repo add banzaicloud-stable https://kubernetes-charts.banzaicloud.com
+$ helm repo update
+$ helm install banzaicloud-stable/logging-operator-logging
+```
+
+## Configuration
+
+The following tables lists the configurable parameters of the logging-operator-logging chart and their default values.
+
| Parameter | Description | Default |
| --------------------------------------------------- | ------------------------------------------------------ | ------------------------------ |
| `tls.enabled` | Enabled TLS communication between components | true |
-| `tls.secretName` | Specified secret name, which contain tls certs | This will overwrite automatic Helm certificate generation. |
+| `tls.fluentdSecretName` | Specified secret name, which contain tls certs | This will overwrite automatic Helm certificate generation. |
+| `tls.fluentbitSecretName` | Specified secret name, which contain tls certs | This will overwrite automatic Helm certificate generation. |
| `tls.sharedKey` | Shared key between nodes (fluentd-fluentbit) | [autogenerated] |
| `fluentbit.enabled` | Install fluent-bit | true |
| `fluentbit.namespace` | Specified fluentbit installation namespace | same as operator namespace |
-| `fluentbit.image.tag` | Fluentbit container image tag | `1.1.3` |
+| `fluentbit.image.tag` | Fluentbit container image tag | `1.1.3` |
| `fluentbit.image.repository` | Fluentbit container image repository | `fluent/fluent-bit` |
| `fluentbit.image.pullPolicy` | Fluentbit container pull policy | `IfNotPresent` |
| `fluentd.enabled` | Install fluentd | true |
-| `fluentd.namespace` | Specified fluentd installation namespace | same as operator namespace |
-| `fluentd.image.tag` | Fluentd container image tag | `v1.5.0` |
+| `fluentd.image.tag` | Fluentd container image tag | `v1.6.3-alpine` |
| `fluentd.image.repository` | Fluentd container image repository | `banzaicloud/fluentd` |
| `fluentd.image.pullPolicy` | Fluentd container pull policy | `IfNotPresent` |
| `fluentd.volumeModImage.tag` | Fluentd volumeModImage container image tag | `latest` |
@@ -103,3 +117,4 @@ $ helm install banzaicloud-stable/logging-operator-fluent
| `fluentd.configReloaderImage.pullPolicy` | Fluentd configReloaderImage container pull policy | `IfNotPresent` |
| `fluentd.fluentdPvcSpec.accessModes` | Fluentd persistence volume access modes | `[ReadWriteOnce]` |
| `fluentd.fluentdPvcSpec.resources.requests.storage` | Fluentd persistence volume size | `21Gi` |
+| `fluentd.fluentdPvcSpec.resources.storageClassName` | Fluentd persistence volume storageclass | `"""` |
\ No newline at end of file
diff --git a/charts/logging-operator/grafana-dashboards/logging-dashboard_rev1.json b/charts/logging-operator/grafana-dashboards/logging-dashboard_rev1.json
deleted file mode 100644
index 676fa21a7..000000000
--- a/charts/logging-operator/grafana-dashboards/logging-dashboard_rev1.json
+++ /dev/null
@@ -1,1069 +0,0 @@
-{
- "__inputs": [
- {
- "name": "DS_PROMETHEUS",
- "label": "Prometheus",
- "description": "",
- "type": "datasource",
- "pluginId": "prometheus",
- "pluginName": "Prometheus"
- }
- ],
- "__requires": [
- {
- "type": "grafana",
- "id": "grafana",
- "name": "Grafana",
- "version": "5.1.3"
- },
- {
- "type": "panel",
- "id": "graph",
- "name": "Graph",
- "version": "5.0.0"
- },
- {
- "type": "datasource",
- "id": "prometheus",
- "name": "Prometheus",
- "version": "5.0.0"
- },
- {
- "type": "panel",
- "id": "singlestat",
- "name": "Singlestat",
- "version": "5.0.0"
- }
- ],
- "annotations": {
- "list": [
- {
- "builtIn": 1,
- "datasource": "-- Grafana --",
- "enable": true,
- "hide": true,
- "iconColor": "rgba(0, 211, 255, 1)",
- "name": "Annotations & Alerts",
- "type": "dashboard"
- }
- ]
- },
- "editable": true,
- "gnetId": 7752,
- "graphTooltip": 0,
- "id": null,
- "links": [],
- "panels": [
- {
- "cacheTimeout": null,
- "colorBackground": false,
- "colorValue": true,
- "colors": [
- "#d44a3a",
- "rgba(237, 129, 40, 0.89)",
- "#299c46"
- ],
- "datasource": "Prometheus",
- "format": "none",
- "gauge": {
- "maxValue": 100,
- "minValue": 0,
- "show": false,
- "thresholdLabels": false,
- "thresholdMarkers": true
- },
- "gridPos": {
- "h": 5,
- "w": 2,
- "x": 0,
- "y": 0
- },
- "id": 4,
- "interval": null,
- "links": [],
- "mappingType": 1,
- "mappingTypes": [
- {
- "name": "value to text",
- "value": 1
- },
- {
- "name": "range to text",
- "value": 2
- }
- ],
- "maxDataPoints": 100,
- "nullPointMode": "connected",
- "nullText": null,
- "postfix": "",
- "postfixFontSize": "50%",
- "prefix": "",
- "prefixFontSize": "50%",
- "rangeMaps": [
- {
- "from": "null",
- "text": "N/A",
- "to": "null"
- }
- ],
- "sparkline": {
- "fillColor": "rgba(31, 118, 189, 0.18)",
- "full": false,
- "lineColor": "rgb(31, 120, 193)",
- "show": true
- },
- "tableColumn": "",
- "targets": [
- {
- "expr": "sum(kube_node_status_condition{condition=\"Ready\"})",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "",
- "refId": "A"
- }
- ],
- "thresholds": "0,1",
- "title": "Active Nodes",
- "type": "singlestat",
- "valueFontSize": "100%",
- "valueMaps": [
- {
- "op": "=",
- "text": "N/A",
- "value": "null"
- }
- ],
- "valueName": "current"
- },
- {
- "cacheTimeout": null,
- "colorBackground": false,
- "colorValue": true,
- "colors": [
- "#d44a3a",
- "rgba(237, 129, 40, 0.89)",
- "#299c46"
- ],
- "datasource": "Prometheus",
- "format": "none",
- "gauge": {
- "maxValue": 100,
- "minValue": 0,
- "show": false,
- "thresholdLabels": false,
- "thresholdMarkers": true
- },
- "gridPos": {
- "h": 5,
- "w": 2,
- "x": 2,
- "y": 0
- },
- "id": 6,
- "interval": null,
- "links": [],
- "mappingType": 1,
- "mappingTypes": [
- {
- "name": "value to text",
- "value": 1
- },
- {
- "name": "range to text",
- "value": 2
- }
- ],
- "maxDataPoints": 100,
- "nullPointMode": "connected",
- "nullText": null,
- "postfix": "",
- "postfixFontSize": "50%",
- "prefix": "",
- "prefixFontSize": "50%",
- "rangeMaps": [
- {
- "from": "null",
- "text": "N/A",
- "to": "null"
- }
- ],
- "sparkline": {
- "fillColor": "rgba(31, 118, 189, 0.18)",
- "full": false,
- "lineColor": "rgb(31, 120, 193)",
- "show": true
- },
- "tableColumn": "",
- "targets": [
- {
- "expr": "sum(kube_pod_info{pod=~\"fluent-bit.*\"})",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "Active Fluent-bit",
- "refId": "A"
- }
- ],
- "thresholds": "0,1",
- "title": "Fluent-bit",
- "type": "singlestat",
- "valueFontSize": "100%",
- "valueMaps": [
- {
- "op": "=",
- "text": "N/A",
- "value": "null"
- }
- ],
- "valueName": "avg"
- },
- {
- "cacheTimeout": null,
- "colorBackground": false,
- "colorValue": true,
- "colors": [
- "#d44a3a",
- "rgba(237, 129, 40, 0.89)",
- "#299c46"
- ],
- "datasource": "Prometheus",
- "format": "none",
- "gauge": {
- "maxValue": 100,
- "minValue": 0,
- "show": false,
- "thresholdLabels": false,
- "thresholdMarkers": true
- },
- "gridPos": {
- "h": 5,
- "w": 2,
- "x": 4,
- "y": 0
- },
- "id": 8,
- "interval": null,
- "links": [],
- "mappingType": 1,
- "mappingTypes": [
- {
- "name": "value to text",
- "value": 1
- },
- {
- "name": "range to text",
- "value": 2
- }
- ],
- "maxDataPoints": 100,
- "nullPointMode": "connected",
- "nullText": null,
- "postfix": "",
- "postfixFontSize": "50%",
- "prefix": "",
- "prefixFontSize": "50%",
- "rangeMaps": [
- {
- "from": "null",
- "text": "N/A",
- "to": "null"
- }
- ],
- "sparkline": {
- "fillColor": "rgba(31, 118, 189, 0.18)",
- "full": false,
- "lineColor": "rgb(31, 120, 193)",
- "show": true
- },
- "tableColumn": "",
- "targets": [
- {
- "expr": "sum(kube_pod_info{pod=~\"fluentd.*\"})",
- "format": "time_series",
- "instant": false,
- "intervalFactor": 1,
- "refId": "A"
- }
- ],
- "thresholds": "0,1",
- "title": "Fluentd",
- "type": "singlestat",
- "valueFontSize": "100%",
- "valueMaps": [
- {
- "op": "=",
- "text": "N/A",
- "value": "null"
- }
- ],
- "valueName": "current"
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "Prometheus",
- "fill": 1,
- "gridPos": {
- "h": 5,
- "w": 9,
- "x": 6,
- "y": 0
- },
- "id": 2,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": true,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "sort": "current",
- "sortDesc": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(fluentbit_input_bytes_total[1m])",
- "format": "time_series",
- "hide": false,
- "intervalFactor": 1,
- "legendFormat": "{{ kubernetes_pod_name }}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Fluent-bit input bytes/s",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "Prometheus",
- "fill": 1,
- "gridPos": {
- "h": 5,
- "w": 9,
- "x": 15,
- "y": 0
- },
- "id": 9,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": true,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "sort": "current",
- "sortDesc": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(fluentbit_output_proc_bytes_total[1m])",
- "format": "time_series",
- "hide": false,
- "intervalFactor": 1,
- "legendFormat": "{{ kubernetes_pod_name }}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Fluent-bit output bytes/s",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "Prometheus",
- "fill": 1,
- "gridPos": {
- "h": 5,
- "w": 8,
- "x": 0,
- "y": 5
- },
- "id": 10,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": true,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "sort": "current",
- "sortDesc": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(fluentbit_output_errors_total[1m])",
- "format": "time_series",
- "hide": false,
- "intervalFactor": 1,
- "legendFormat": "{{ kubernetes_pod_name }}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Fluent-bit error/s",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": "0",
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "Prometheus",
- "fill": 1,
- "gridPos": {
- "h": 5,
- "w": 8,
- "x": 8,
- "y": 5
- },
- "id": 12,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": true,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "sort": "current",
- "sortDesc": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(fluentd_output_status_emit_count[1m])",
- "format": "time_series",
- "hide": false,
- "intervalFactor": 1,
- "legendFormat": "{{ type }}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Fluentd output emit/s by Plugin",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "ops",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": "0",
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "Prometheus",
- "fill": 1,
- "gridPos": {
- "h": 5,
- "w": 8,
- "x": 16,
- "y": 5
- },
- "id": 15,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": true,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "sort": "current",
- "sortDesc": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "fluentd_output_status_buffer_queue_length",
- "format": "time_series",
- "hide": false,
- "intervalFactor": 1,
- "legendFormat": "{{ type }}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Fluentd output buffer queue",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": "0",
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "Prometheus",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 8,
- "x": 0,
- "y": 10
- },
- "id": 11,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": true,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "sort": "current",
- "sortDesc": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(fluentbit_output_retries_total[1m])",
- "format": "time_series",
- "hide": false,
- "intervalFactor": 1,
- "legendFormat": "Retries {{ kubernetes_pod_name }}",
- "refId": "A"
- },
- {
- "expr": "rate(fluentbit_output_retries_failed_total[1m])",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "Failed {{ kubernetes_pod_name }}",
- "refId": "B"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Fluent-bit retries/fails",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "decimals": null,
- "format": "short",
- "label": "",
- "logBase": 1,
- "max": null,
- "min": "0",
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": "0",
- "show": false
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "Prometheus",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 8,
- "x": 8,
- "y": 10
- },
- "id": 14,
- "legend": {
- "alignAsTable": false,
- "avg": false,
- "current": true,
- "max": true,
- "min": true,
- "rightSide": false,
- "show": true,
- "sort": "current",
- "sortDesc": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum(rate(fluentd_output_status_retry_count[1m]))",
- "format": "time_series",
- "hide": false,
- "intervalFactor": 1,
- "legendFormat": "Retry rate",
- "refId": "A"
- },
- {
- "expr": "sum(rate(fluentd_output_status_num_errors[1m]))",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "Error rate",
- "refId": "C"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Fluentd output error/retry rate",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "ops",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": "0",
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "Prometheus",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 8,
- "x": 16,
- "y": 10
- },
- "id": 13,
- "legend": {
- "alignAsTable": false,
- "avg": false,
- "current": true,
- "max": true,
- "min": true,
- "rightSide": false,
- "show": true,
- "sort": "current",
- "sortDesc": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "fluentd_output_status_buffer_total_bytes",
- "format": "time_series",
- "hide": false,
- "intervalFactor": 1,
- "legendFormat": "{{ type }}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Fluentd output buffer size",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": "0",
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- }
- ],
- "refresh": "30s",
- "schemaVersion": 16,
- "style": "dark",
- "tags": [],
- "templating": {
- "list": []
- },
- "time": {
- "from": "now-15m",
- "to": "now"
- },
- "timepicker": {
- "refresh_intervals": [
- "5s",
- "10s",
- "30s",
- "1m",
- "5m",
- "15m",
- "30m",
- "1h",
- "2h",
- "1d"
- ],
- "time_options": [
- "5m",
- "15m",
- "1h",
- "6h",
- "12h",
- "24h",
- "2d",
- "7d",
- "30d"
- ]
- },
- "timezone": "",
- "title": "Logging Dashboard",
- "uid": "bNn5LUtiz",
- "version": 10,
- "description": "This is a simple dashboard for: https://github.com/banzaicloud/logging-operator utilising Fluent-bit and Fluentd"
-}
\ No newline at end of file
diff --git a/charts/logging-operator/templates/NOTES.txt b/charts/logging-operator/templates/NOTES.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/charts/logging-operator/templates/_helpers.tpl b/charts/logging-operator/templates/_helpers.tpl
index a58c97189..a5e197e22 100644
--- a/charts/logging-operator/templates/_helpers.tpl
+++ b/charts/logging-operator/templates/_helpers.tpl
@@ -30,3 +30,16 @@ Create chart name and version as used by the chart label.
{{- define "logging-operator.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "logging-operator.labels" -}}
+app.kubernetes.io/name: {{ include "logging-operator.name" . }}
+helm.sh/chart: {{ include "logging-operator.chart" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
diff --git a/charts/logging-operator/templates/configmap.yaml b/charts/logging-operator/templates/configmap.yaml
deleted file mode 100644
index 127e65b30..000000000
--- a/charts/logging-operator/templates/configmap.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-kind: ConfigMap
-apiVersion: v1
-metadata:
- name: {{ template "logging-operator.fullname" . }}
- labels:
- app.kubernetes.io/name: {{ include "logging-operator.name" . }}
- helm.sh/chart: {{ include "logging-operator.chart" . }}
- app.kubernetes.io/instance: {{ .Release.Name }}
- app.kubernetes.io/managed-by: {{ .Release.Service }}
-
-data:
- config.toml: |-
- # This is the config for the logging operator
-
- [logging-operator]
- rbac = {{ .Values.rbac.enabled }}
\ No newline at end of file
diff --git a/charts/logging-operator/templates/crd.yaml b/charts/logging-operator/templates/crd.yaml
deleted file mode 100644
index f15d981c7..000000000
--- a/charts/logging-operator/templates/crd.yaml
+++ /dev/null
@@ -1,109 +0,0 @@
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: plugins.logging.banzaicloud.com
- annotations:
- "helm.sh/hook": crd-install
- "helm.sh/hook-delete-policy": "before-hook-creation"
-spec:
- group: logging.banzaicloud.com
- names:
- kind: Plugin
- listKind: PluginList
- plural: plugins
- singular: plugin
- scope: Namespaced
- subresources:
- status: {}
- validation:
- openAPIV3Schema:
- properties:
- apiVersion:
- type: string
- kind:
- type: string
- metadata:
- type: object
- spec:
- type: object
- status:
- type: object
- version: v1alpha1
- versions:
- - name: v1alpha1
- served: true
- storage: true
----
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: fluentbits.logging.banzaicloud.com
- annotations:
- "helm.sh/hook": crd-install
- "helm.sh/hook-delete-policy": "before-hook-creation"
-spec:
- group: logging.banzaicloud.com
- names:
- kind: Fluentbit
- listKind: FluentbitList
- plural: fluentbits
- singular: fluentbit
- scope: Namespaced
- subresources:
- status: {}
- validation:
- openAPIV3Schema:
- properties:
- apiVersion:
- type: string
- kind:
- type: string
- metadata:
- type: object
- spec:
- type: object
- status:
- type: object
- version: v1alpha1
- versions:
- - name: v1alpha1
- served: true
- storage: true
-
----
-
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: fluentds.logging.banzaicloud.com
- annotations:
- "helm.sh/hook": crd-install
- "helm.sh/hook-delete-policy": "before-hook-creation"
-spec:
- group: logging.banzaicloud.com
- names:
- kind: Fluentd
- listKind: FluentdList
- plural: fluentds
- singular: fluentd
- scope: Namespaced
- subresources:
- status: {}
- validation:
- openAPIV3Schema:
- properties:
- apiVersion:
- type: string
- kind:
- type: string
- metadata:
- type: object
- spec:
- type: object
- status:
- type: object
- version: v1alpha1
- versions:
- - name: v1alpha1
- served: true
- storage: true
\ No newline at end of file
diff --git a/charts/logging-operator/templates/deployment.yaml b/charts/logging-operator/templates/deployment.yaml
index b4b29ebc8..97a16394b 100644
--- a/charts/logging-operator/templates/deployment.yaml
+++ b/charts/logging-operator/templates/deployment.yaml
@@ -1,12 +1,9 @@
apiVersion: apps/v1
kind: Deployment
metadata:
- name: {{ template "logging-operator.fullname" . }}
+ name: {{ include "logging-operator.fullname" . }}
labels:
- app.kubernetes.io/name: {{ include "logging-operator.name" . }}
- helm.sh/chart: {{ include "logging-operator.chart" . }}
- app.kubernetes.io/instance: {{ .Release.Name }}
- app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{ include "logging-operator.labels" . | indent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
@@ -15,56 +12,35 @@ spec:
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
- annotations:
- scheduler.alpha.kubernetes.io/tolerations: {{ toJson .Values.tolerations | quote }}
labels:
app.kubernetes.io/name: {{ include "logging-operator.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
- containers:
- - name: {{ template "logging-operator.name" . }}
- image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
- imagePullPolicy: "{{ .Values.image.pullPolicy }}"
- command:
- - logging-operator
- env:
- - name: WATCH_NAMESPACE
- value: {{ .Values.watchNamespace | quote }}
- - name: KUBERNETES_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- - name: OPERATOR_NAME
- value: {{ include "logging-operator.fullname" . | quote }}
- volumeMounts:
- - mountPath: /logging-operator/config
- name: config
-
- {{- if .Values.securityContext }}
- securityContext: {{ toYaml .Values.securityContext | nindent 10 }}
- {{- end }}
- resources: {{ toYaml .Values.resources | nindent 10 }}
- {{- if .Values.podSecurityContext }}
- securityContext: {{ toYaml .Values.podSecurityContext | nindent 8 }}
- {{- end }}
- {{- if .Values.rbac.enabled }}
- serviceAccountName: {{ template "logging-operator.fullname" . }}
- {{- end }}
- volumes:
- - configMap:
- name: {{ template "logging-operator.fullname" . }}
- name: config
-
- {{- with .Values.nodeSelector }}
- nodeSelector: {{ toYaml . | nindent 8 }}
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
{{- end }}
+ containers:
+ - name: {{ .Chart.Name }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ resources:
+ {{- toYaml .Values.resources | nindent 12 }}
+ ports:
+ - name: http
+ containerPort: {{ .Values.http.port }}
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
{{- with .Values.affinity }}
- affinity: {{ toYaml . | nindent 8 }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
- tolerations: {{ toYaml . | nindent 8 }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- if .Values.rbac.enabled }}
+ serviceAccountName: {{ include "logging-operator.fullname" . }}
{{- end }}
\ No newline at end of file
diff --git a/charts/logging-operator/templates/grafana-dashboard-logging.yaml b/charts/logging-operator/templates/grafana-dashboard-logging.yaml
deleted file mode 100644
index 353e96db7..000000000
--- a/charts/logging-operator/templates/grafana-dashboard-logging.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-{{- if .Values.grafana.dashboard.enabled }}
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: {{ template "logging-operator.fullname" . }}-grafana-dashboard-logging
- labels:
- pipeline_grafana_dashboard: "1"
-data:
- logging.json: |-2
-
-{{.Files.Get "grafana-dashboards/logging-dashboard_rev1.json"| indent 4}}
-{{- end }}
\ No newline at end of file
diff --git a/charts/logging-operator/templates/logging.banzaicloud.com_clusterflows.yaml b/charts/logging-operator/templates/logging.banzaicloud.com_clusterflows.yaml
new file mode 100644
index 000000000..ac7040257
--- /dev/null
+++ b/charts/logging-operator/templates/logging.banzaicloud.com_clusterflows.yaml
@@ -0,0 +1,139 @@
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ creationTimestamp: null
+ name: clusterflows.logging.banzaicloud.com
+spec:
+ group: logging.banzaicloud.com
+ names:
+ kind: ClusterFlow
+ listKind: ClusterFlowList
+ plural: clusterflows
+ singular: clusterflow
+ scope: ""
+ validation:
+ openAPIV3Schema:
+ description: ClusterFlow is the Schema for the clusterflows API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: Name of the logging cluster to be attached
+ properties:
+ filters:
+ items:
+ properties:
+ parser:
+ description: https://docs.fluentd.org/filter/parser
+ properties:
+ emit_invalid_record_to_error:
+ description: 'Emit invalid record to @ERROR label. Invalid
+ cases are: key not exist, format is not matched, unexpected
+ error'
+ type: boolean
+ hash_value_fiel:
+ description: Store parsed values as a hash value in a field.
+ type: string
+ inject_key_prefix:
+ description: Store parsed values with specified key name prefix.
+ type: string
+ key_name:
+ description: Specify field name in the record to parse.
+ type: string
+ parsers:
+ items:
+ properties:
+ estimate_current_event:
+ description: If true, use Fluent::EventTime.now(current
+ time) as a timestamp when time_key is specified.
+ type: boolean
+ expression:
+ description: Regexp expression to evaluate
+ type: string
+ keep_time_key:
+ description: If true, keep time field in the record.
+ type: boolean
+ null_empty_string:
+ description: If true, empty string field is replaced
+ with nil
+ type: boolean
+ null_value_pattern:
+ description: ' Specify null value pattern.'
+ type: string
+ time_key:
+ description: Specify time field for event time. If the
+ event doesn't have this field, current time is used.
+ type: string
+ type:
+ description: 'Parse type: apache2, apache_error, nginx,
+ syslog, csv, tsv, ltsv, json, multiline, none'
+ type: string
+ type: object
+ type: array
+ remove_key_name_field:
+ description: Remove key_name field when parsing is succeeded
+ type: boolean
+ replace_invalid_sequence:
+ description: If true, invalid string is replaced with safe
+ characters and re-parse it.
+ type: boolean
+ reserve_data:
+ description: Keep original key-value pair in parsed result.
+ type: boolean
+ reserve_time:
+ description: Keep original event time in parsed result.
+ type: boolean
+ required:
+ - key_name
+ type: object
+ stdout:
+ type: object
+ tag_normaliser:
+ properties:
+ format:
+ description: Re-Tag log messages info at [github](https://github.com/banzaicloud/fluent-plugin-tag-normaliser)
+ type: string
+ type: object
+ type: object
+ type: array
+ loggingRef:
+ type: string
+ outputRefs:
+ items:
+ type: string
+ type: array
+ selectors:
+ additionalProperties:
+ type: string
+ type: object
+ required:
+ - outputRefs
+ - selectors
+ type: object
+ status:
+ description: FlowStatus defines the observed state of Flow
+ type: object
+ type: object
+ version: v1alpha2
+ versions:
+ - name: v1alpha2
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
diff --git a/charts/logging-operator/templates/logging.banzaicloud.com_clusteroutputs.yaml b/charts/logging-operator/templates/logging.banzaicloud.com_clusteroutputs.yaml
new file mode 100644
index 000000000..a094d1c7b
--- /dev/null
+++ b/charts/logging-operator/templates/logging.banzaicloud.com_clusteroutputs.yaml
@@ -0,0 +1,1795 @@
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ creationTimestamp: null
+ name: clusteroutputs.logging.banzaicloud.com
+spec:
+ group: logging.banzaicloud.com
+ names:
+ kind: ClusterOutput
+ listKind: ClusterOutputList
+ plural: clusteroutputs
+ singular: clusteroutput
+ scope: ""
+ validation:
+ openAPIV3Schema:
+ description: ClusterOutput is the Schema for the clusteroutputs API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ properties:
+ azurestorage:
+ properties:
+ auto_create_container:
+ description: 'Automatically create container if not exists(default:
+ true)'
+ type: boolean
+ azure_container:
+ description: Your azure storage container
+ type: string
+ azure_object_key_format:
+ description: 'Object key format (default: %{path}%{time_slice}_%{index}.%{file_extension})'
+ type: string
+ azure_storage_access_key:
+ description: Your azure storage access key
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ azure_storage_account:
+ description: Your azure storage account
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ azure_storage_type:
+ description: 'Azure storage type currently only "blob" supported
+ (default: blob)'
+ type: string
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ format:
+ description: 'Compat format type: out_file, json, ltsv (default:
+ out_file)'
+ type: string
+ path:
+ description: Path prefix of the files on Azure
+ type: string
+ store_as:
+ description: 'Store as: gzip, json, text, lzo, lzma2 (default: gzip)'
+ type: string
+ required:
+ - azure_container
+ - azure_storage_access_key
+ - azure_storage_account
+ type: object
+ elasticsearch:
+ description: Send your logs to Elasticsearch
+ properties:
+ application_name:
+ description: 'Specify the application name for the rollover index
+ to be created.(default: default)'
+ type: string
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ content_type:
+ description: 'With content_type application/x-ndjson, elasticsearch
+ plugin adds application/x-ndjson as Content-Type in payload. (default:
+ application/json)'
+ type: string
+ customize_template:
+ description: Specify the string and its value to be replaced in
+ form of hash. Can contain multiple key value pair that would be
+ replaced in the specified template_file. This setting only creates
+ template and to add rollover index please check the rollover_index
+ configuration.
+ type: string
+ deflector_alias:
+ description: Specify the deflector alias which would be assigned
+ to the rollover index created. This is useful in case of using
+ the Elasticsearch rollover API
+ type: string
+ fail_on_putting_template_retry_exceed:
+ description: 'Indicates whether to fail when max_retry_putting_template
+ is exceeded. If you have multiple output plugin, you could use
+ this property to do not fail on fluentd statup.(default: true)'
+ type: boolean
+ host:
+ description: You can specify Elasticsearch host by this parameter.
+ (default:localhost)
+ type: string
+ hosts:
+ description: You can specify multiple Elasticsearch hosts with separator
+ ",". If you specify hosts option, host and port options are ignored.
+ type: string
+ http_backend:
+ description: 'With http_backend typhoeus, elasticsearch plugin uses
+ typhoeus faraday http backend. Typhoeus can handle HTTP keepalive.
+ (default: excon)'
+ type: string
+ id_key:
+ description: https://github.com/uken/fluent-plugin-elasticsearch#id_key
+ type: string
+ include_index_in_url:
+ description: With this option set to true, Fluentd manifests the
+ index name in the request URL (rather than in the request body).
+ You can use this option to enforce an URL-based access control.
+ type: boolean
+ include_tag_key:
+ description: 'This will add the Fluentd tag in the JSON record.(default:
+ false)'
+ type: boolean
+ include_timestamp:
+ description: Adds a @timestamp field to the log, following all settings
+ logstash_format does, except without the restrictions on index_name.
+ This allows one to log to an alias in Elasticsearch and utilize
+ the rollover API.
+ type: boolean
+ index_date_pattern:
+ description: 'Specify this to override the index date pattern for
+ creating a rollover index.(default: now/d)'
+ type: string
+ index_prefix:
+ description: Specify the index prefix for the rollover index to
+ be created.
+ type: string
+ logstash_dateformat:
+ description: 'Set the Logstash date format.(default: %Y.%m.%d)'
+ type: string
+ logstash_format:
+ description: 'Enable Logstash log format.(default: false)'
+ type: boolean
+ logstash_prefix:
+ description: 'Set the Logstash prefix.(default: true)'
+ type: string
+ logstash_prefix_separator:
+ description: 'Set the Logstash prefix separator.(default: -)'
+ type: string
+ max_retry_get_es_version:
+ description: 'You can specify times of retry obtaining Elasticsearch
+ version.(default: 15)'
+ type: string
+ max_retry_putting_template:
+ description: 'You can specify times of retry putting template.(default:
+ 10)'
+ type: string
+ password:
+ description: Password for HTTP Basic authentication.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ path:
+ description: Path for HTTP Basic authentication.
+ type: string
+ pipeline:
+ description: This param is to set a pipeline id of your elasticsearch
+ to be added into the request, you can configure ingest node.
+ type: string
+ port:
+ description: 'You can specify Elasticsearch port by this parameter.(default:
+ 9200)'
+ type: string
+ prefer_oj_serializer:
+ description: 'With default behavior, Elasticsearch client uses Yajl
+ as JSON encoder/decoder. Oj is the alternative high performance
+ JSON encoder/decoder. When this parameter sets as true, Elasticsearch
+ client uses Oj as JSON encoder/decoder. (default: fqlse)'
+ type: boolean
+ reconnect_on_error:
+ description: 'Indicates that the plugin should reset connection
+ on any error (reconnect on next send). By default it will reconnect
+ only on "host unreachable exceptions". We recommended to set this
+ true in the presence of elasticsearch shield.(default: false)'
+ type: boolean
+ reload_connections:
+ description: 'You can tune how the elasticsearch-transport host
+ reloading feature works.(default: true)'
+ type: boolean
+ reload_on_failure:
+ description: 'Indicates that the elasticsearch-transport will try
+ to reload the nodes addresses if there is a failure while making
+ the request, this can be useful to quickly remove a dead node
+ from the list of addresses.(default: false)'
+ type: boolean
+ remove_keys_on_update:
+ description: Remove keys on update will not update the configured
+ keys in elasticsearch when a record is being updated. This setting
+ only has any effect if the write operation is update or upsert.
+ type: string
+ remove_keys_on_update_key:
+ description: This setting allows remove_keys_on_update to be configured
+ with a key in each record, in much the same way as target_index_key
+ works.
+ type: string
+ request_timeout:
+ description: 'You can specify HTTP request timeout.(default: 5s)'
+ type: string
+ resurrect_after:
+ description: 'You can set in the elasticsearch-transport how often
+ dead connections from the elasticsearch-transport''s pool will
+ be resurrected.(default: 60s)'
+ type: string
+ retry_tag:
+ description: This setting allows custom routing of messages in response
+ to bulk request failures. The default behavior is to emit failed
+ records using the same tag that was provided.
+ type: string
+ rollover_index:
+ description: 'Specify this as true when an index with rollover capability
+ needs to be created.(default: false) https://github.com/uken/fluent-plugin-elasticsearch#rollover_index'
+ type: boolean
+ routing_key:
+ description: Similar to parent_key config, will add _routing into
+ elasticsearch command if routing_key is set and the field does
+ exist in input event.
+ type: string
+ scheme:
+ description: 'Scheme for HTTP Basic authentication.(default: true)'
+ type: boolean
+ tag_key:
+ description: 'This will add the Fluentd tag in the JSON record.(default:
+ tag)'
+ type: string
+ target_index_key:
+ description: Tell this plugin to find the index name to write to
+ in the record under this key in preference to other mechanisms.
+ Key can be specified as path to nested record using dot ('.')
+ as a separator. https://github.com/uken/fluent-plugin-elasticsearch#target_index_key
+ type: string
+ target_type_key:
+ description: 'Similar to target_index_key config, find the type
+ name to write to in the record under this key (or nested record).
+ If key not found in record - fallback to type_name.(default: true)'
+ type: string
+ template_file:
+ description: The path to the file containing the template to install.
+ type: string
+ template_name:
+ description: The name of the template to define. If a template by
+ the name given is already present, it will be left unchanged,
+ unless template_overwrite is set, in which case the template will
+ be updated.
+ type: string
+ template_overwrite:
+ description: 'Always update the template, even if it already exists.(default:
+ false)'
+ type: boolean
+ templates:
+ description: Specify index templates in form of hash. Can contain
+ multiple templates.
+ type: string
+ time_key:
+ description: By default, when inserting records in Logstash format,
+ @timestamp is dynamically created with the time at log ingestion.
+ If you'd like to use a custom time, include an @timestamp with
+ your record.
+ type: string
+ time_key_format:
+ description: The format of the time stamp field (@timestamp or what
+ you specify with time_key). This parameter only has an effect
+ when logstash_format is true as it only affects the name of the
+ index we write to.
+ type: string
+ time_parse_error_tag:
+ description: With logstash_format true, elasticsearch plugin parses
+ timestamp field for generating index name. If the record has invalid
+ timestamp value, this plugin emits an error event to @ERROR label
+ with time_parse_error_tag configured tag.
+ type: string
+ time_precision:
+ description: Should the record not include a time_key, define the
+ degree of sub-second time precision to preserve from the time
+ portion of the routed event.
+ type: string
+ user:
+ description: User for HTTP Basic authentication. This plugin will
+ escape required URL encoded characters within %{} placeholders.
+ e.g. %{demo+}
+ type: string
+ utc_index:
+ description: 'By default, the records inserted into index logstash-YYMMDD
+ with UTC (Coordinated Universal Time). This option allows to use
+ local time if you describe utc_index to false.(default: true)'
+ type: boolean
+ with_transporter_log:
+ description: 'This is debugging purpose option to enable to obtain
+ transporter layer log. (default: false)'
+ type: boolean
+ write_operation:
+ description: 'The write_operation can be any of: (index,create,update,upsert)(default:
+ index)'
+ type: string
+ type: object
+ enabledNamespaces:
+ items:
+ type: string
+ type: array
+ gcs:
+ properties:
+ acl:
+ description: 'Permission for the object in GCS: auth_read owner_full
+ owner_read private project_private public_read'
+ type: string
+ auto_create_bucket:
+ description: 'Create GCS bucket if it does not exists (default:
+ true)'
+ type: boolean
+ bucket:
+ description: Name of a GCS bucket
+ type: string
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ client_retries:
+ description: Number of times to retry requests on server error
+ type: integer
+ client_timeout:
+ description: Default timeout to use in requests
+ type: integer
+ credentials_json:
+ description: GCS service account credentials in JSON format
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ encryption_key:
+ description: Customer-supplied, AES-256 encryption key
+ type: string
+ format:
+ properties:
+ type:
+ description: 'Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value'
+ enum:
+ - out_file
+ - json
+ - ltsv
+ - csv
+ - msgpack
+ - hash
+ - single_value
+ type: string
+ type: object
+ hex_random_length:
+ description: 'Max length of `%{hex_random}` placeholder(4-16) (default:
+ 4)'
+ type: integer
+ keyfile:
+ description: Path of GCS service account credentials JSON file
+ type: string
+ object_key_format:
+ description: 'Format of GCS object keys (default: %{path}%{time_slice}_%{index}.%{file_extension})'
+ type: string
+ object_metadata:
+ description: User provided web-safe keys and arbitrary string values
+ that will returned with requests for the file as "x-goog-meta-"
+ response headers.
+ items:
+ properties:
+ key:
+ description: Key
+ type: string
+ value:
+ description: Value
+ type: string
+ required:
+ - key
+ - value
+ type: object
+ type: array
+ overwrite:
+ description: 'Overwrite already existing path (default: false)'
+ type: boolean
+ path:
+ description: Path prefix of the files on GCS
+ type: string
+ project:
+ description: Project identifier for GCS
+ type: string
+ storage_class:
+ description: 'Storage class of the file: dra nearline coldline multi_regional
+ regional standard'
+ type: string
+ store_as:
+ description: 'Archive format on GCS: gzip json text (default: gzip)'
+ type: string
+ transcoding:
+ description: Enable the decompressive form of transcoding
+ type: boolean
+ required:
+ - bucket
+ - project
+ type: object
+ loggingRef:
+ type: string
+ loki:
+ description: Fluentd output plugin to ship logs to a Loki server.
+ properties:
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ extra_labels:
+ description: 'Set of labels to include with every Loki stream.(default:
+ nil)'
+ type: boolean
+ password:
+ description: Specify password if the Loki server requires authentication.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ tenant:
+ description: Loki is a multi-tenant log storage platform and all
+ requests sent must include a tenant.
+ type: string
+ url:
+ description: The url of the Loki server to send logs to. (default:https://logs-us-west1.grafana.net)
+ type: string
+ username:
+ description: Specify a username if the Loki server requires authentication.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ type: object
+ nullout:
+ type: object
+ oss:
+ properties:
+ aaccess_key_secret:
+ description: Your access secret key
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ access_key_id:
+ description: Your access key id
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ auto_create_bucket:
+ description: 'desc ''Create OSS bucket if it does not exists (default:
+ false)'
+ type: boolean
+ bucket:
+ description: Your bucket name
+ type: string
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ check_bucket:
+ description: 'Check bucket if exists or not (default: true)'
+ type: boolean
+ check_object:
+ description: 'Check object before creation (default: true)'
+ type: boolean
+ download_crc_enable:
+ description: 'Download crc enabled (default: true)'
+ type: boolean
+ endpoint:
+ description: OSS endpoint to connect to'
+ type: string
+ format:
+ properties:
+ type:
+ description: 'Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value'
+ enum:
+ - out_file
+ - json
+ - ltsv
+ - csv
+ - msgpack
+ - hash
+ - single_value
+ type: string
+ type: object
+ hex_random_length:
+ description: 'The length of `%{hex_random}` placeholder(4-16) (default:
+ 4)'
+ type: integer
+ index_format:
+ description: '`sprintf` format for `%{index}` (default: %d)'
+ type: string
+ key_format:
+ description: 'The format of OSS object keys (default: %{path}/%{time_slice}_%{index}_%{thread_id}.%{file_extension})'
+ type: string
+ open_timeout:
+ description: 'Timeout for open connections (default: 10)'
+ type: integer
+ oss_sdk_log_dir:
+ description: 'OSS SDK log directory (default: /var/log/td-agent)'
+ type: string
+ overwrite:
+ description: 'Overwrite already existing path (default: false)'
+ type: boolean
+ path:
+ description: 'Path prefix of the files on OSS (default: fluent/logs)'
+ type: string
+ read_timeout:
+ description: 'Timeout for read response (default: 120)'
+ type: integer
+ store_as:
+ description: 'Archive format on OSS: gzip, json, text, lzo, lzma2
+ (default: gzip)'
+ type: string
+ upload_crc_enable:
+ description: 'Upload crc enabled (default: true)'
+ type: boolean
+ warn_for_delay:
+ description: Given a threshold to treat events as delay, output
+ warning logs if delayed events were put into OSS
+ type: string
+ required:
+ - aaccess_key_secret
+ - access_key_id
+ - bucket
+ - endpoint
+ type: object
+ s3:
+ properties:
+ acl:
+ description: Permission for the object in S3
+ type: string
+ assume_role_credentials:
+ description: assume_role_credentials
+ properties:
+ duration_seconds:
+ description: The duration, in seconds, of the role session (900-3600)
+ type: string
+ external_id:
+ description: A unique identifier that is used by third parties
+ when assuming roles in their customers' accounts.
+ type: string
+ policy:
+ description: An IAM policy in JSON format
+ type: string
+ role_arn:
+ description: The Amazon Resource Name (ARN) of the role to assume
+ type: string
+ role_session_name:
+ description: An identifier for the assumed role session
+ type: string
+ required:
+ - role_arn
+ - role_session_name
+ type: object
+ auto_create_bucket:
+ description: Create S3 bucket if it does not exists
+ type: string
+ aws_iam_retries:
+ description: The number of attempts to load instance profile credentials
+ from the EC2 metadata service using IAM role
+ type: string
+ aws_key_id:
+ description: AWS access key id
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ aws_sec_key:
+ description: AWS secret key.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ check_apikey_on_start:
+ description: Check AWS key on start
+ type: string
+ check_bucket:
+ description: Check bucket if exists or not
+ type: string
+ check_object:
+ description: Check object before creation
+ type: string
+ compute_checksums:
+ description: AWS SDK uses MD5 for API request/response by default
+ type: string
+ enable_transfer_acceleration:
+ description: 'If true, S3 Transfer Acceleration will be enabled
+ for uploads. IMPORTANT: You must first enable this feature on
+ your destination S3 bucket'
+ type: string
+ force_path_style:
+ description: If true, the bucket name is always left in the request
+ URI and never moved to the host as a sub-domain
+ type: string
+ format:
+ properties:
+ type:
+ description: 'Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value'
+ enum:
+ - out_file
+ - json
+ - ltsv
+ - csv
+ - msgpack
+ - hash
+ - single_value
+ type: string
+ type: object
+ grant_full_control:
+ description: Allows grantee READ, READ_ACP, and WRITE_ACP permissions
+ on the object
+ type: string
+ grant_read:
+ description: Allows grantee to read the object data and its metadata
+ type: string
+ grant_read_acp:
+ description: Allows grantee to read the object ACL
+ type: string
+ grant_write_acp:
+ description: Allows grantee to write the ACL for the applicable
+ object
+ type: string
+ hex_random_length:
+ description: The length of `%{hex_random}` placeholder(4-16)
+ type: string
+ index_format:
+ description: '`sprintf` format for `%{index}`'
+ type: string
+ instance_profile_credentials:
+ description: instance_profile_credentials
+ properties:
+ http_open_timeout:
+ description: Number of seconds to wait for the connection to
+ open
+ type: string
+ http_read_timeout:
+ description: Number of seconds to wait for one block to be read
+ type: string
+ ip_address:
+ description: IP address (default:169.254.169.254)
+ type: string
+ port:
+ description: Port number (default:80)
+ type: string
+ retries:
+ description: Number of times to retry when retrieving credentials
+ type: string
+ type: object
+ overwrite:
+ description: Overwrite already existing path
+ type: string
+ path:
+ description: Path prefix of the files on S3
+ type: string
+ proxy_uri:
+ description: URI of proxy environment
+ type: string
+ s3_bucket:
+ description: S3 bucket name
+ type: string
+ s3_endpoint:
+ description: Custom S3 endpoint (like minio)
+ type: string
+ s3_metadata:
+ description: Arbitrary S3 metadata headers to set for the object
+ type: string
+ s3_object_key_format:
+ description: 'The format of S3 object keys (default: %{path}%{time_slice}_%{index}.%{file_extension})'
+ type: string
+ s3_region:
+ description: S3 region name
+ type: string
+ shared_credentials:
+ description: shared_credentials
+ properties:
+ path:
+ description: 'Path to the shared file. (default: $HOME/.aws/credentials)'
+ type: string
+ profile_name:
+ description: Profile name. Default to 'default' or ENV['AWS_PROFILE']
+ type: string
+ type: object
+ signature_version:
+ description: Signature version for API Request (s3,v4)
+ type: string
+ sse_customer_algorithm:
+ description: Specifies the algorithm to use to when encrypting the
+ object
+ type: string
+ sse_customer_key:
+ description: Specifies the customer-provided encryption key for
+ Amazon S3 to use in encrypting data
+ type: string
+ sse_customer_key_md5:
+ description: Specifies the 128-bit MD5 digest of the encryption
+ key according to RFC 1321
+ type: string
+ ssekms_key_id:
+ description: Specifies the AWS KMS key ID to use for object encryption
+ type: string
+ ssl_verify_peer:
+ description: If false, the certificate of endpoint will not be verified
+ type: string
+ storage_class:
+ description: The type of storage to use for the object(STANDARD,REDUCED_REDUNDANCY,STANDARD_IA)
+ type: string
+ store_as:
+ description: Archive format on S3
+ type: string
+ use_bundled_cert:
+ description: Use aws-sdk-ruby bundled cert
+ type: string
+ use_server_side_encryption:
+ description: The Server-side encryption algorithm used when storing
+ this object in S3 (AES256, aws:kms)
+ type: string
+ warn_for_delay:
+ description: Given a threshold to treat events as delay, output
+ warning logs if delayed events were put into s3
+ type: string
+ required:
+ - s3_bucket
+ type: object
+ type: object
+ status:
+ description: OutputStatus defines the observed state of Output
+ type: object
+ required:
+ - spec
+ type: object
+ version: v1alpha2
+ versions:
+ - name: v1alpha2
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
diff --git a/charts/logging-operator/templates/logging.banzaicloud.com_flows.yaml b/charts/logging-operator/templates/logging.banzaicloud.com_flows.yaml
new file mode 100644
index 000000000..b68f6efec
--- /dev/null
+++ b/charts/logging-operator/templates/logging.banzaicloud.com_flows.yaml
@@ -0,0 +1,142 @@
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ creationTimestamp: null
+ name: flows.logging.banzaicloud.com
+spec:
+ additionalPrinterColumns:
+ - JSONPath: .spec.loggingRef
+ name: Logging
+ type: string
+ group: logging.banzaicloud.com
+ names:
+ kind: Flow
+ listKind: FlowList
+ plural: flows
+ singular: flow
+ scope: ""
+ subresources: {}
+ validation:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ properties:
+ filters:
+ items:
+ properties:
+ parser:
+ description: https://docs.fluentd.org/filter/parser
+ properties:
+ emit_invalid_record_to_error:
+ description: 'Emit invalid record to @ERROR label. Invalid
+ cases are: key not exist, format is not matched, unexpected
+ error'
+ type: boolean
+ hash_value_fiel:
+ description: Store parsed values as a hash value in a field.
+ type: string
+ inject_key_prefix:
+ description: Store parsed values with specified key name prefix.
+ type: string
+ key_name:
+ description: Specify field name in the record to parse.
+ type: string
+ parsers:
+ items:
+ properties:
+ estimate_current_event:
+ description: If true, use Fluent::EventTime.now(current
+ time) as a timestamp when time_key is specified.
+ type: boolean
+ expression:
+ description: Regexp expression to evaluate
+ type: string
+ keep_time_key:
+ description: If true, keep time field in the record.
+ type: boolean
+ null_empty_string:
+ description: If true, empty string field is replaced
+ with nil
+ type: boolean
+ null_value_pattern:
+ description: ' Specify null value pattern.'
+ type: string
+ time_key:
+ description: Specify time field for event time. If the
+ event doesn't have this field, current time is used.
+ type: string
+ type:
+ description: 'Parse type: apache2, apache_error, nginx,
+ syslog, csv, tsv, ltsv, json, multiline, none'
+ type: string
+ type: object
+ type: array
+ remove_key_name_field:
+ description: Remove key_name field when parsing is succeeded
+ type: boolean
+ replace_invalid_sequence:
+ description: If true, invalid string is replaced with safe
+ characters and re-parse it.
+ type: boolean
+ reserve_data:
+ description: Keep original key-value pair in parsed result.
+ type: boolean
+ reserve_time:
+ description: Keep original event time in parsed result.
+ type: boolean
+ required:
+ - key_name
+ type: object
+ stdout:
+ type: object
+ tag_normaliser:
+ properties:
+ format:
+ description: Re-Tag log messages info at [github](https://github.com/banzaicloud/fluent-plugin-tag-normaliser)
+ type: string
+ type: object
+ type: object
+ type: array
+ loggingRef:
+ type: string
+ outputRefs:
+ items:
+ type: string
+ type: array
+ selectors:
+ additionalProperties:
+ type: string
+ type: object
+ required:
+ - outputRefs
+ - selectors
+ type: object
+ status:
+ description: FlowStatus defines the observed state of Flow
+ type: object
+ type: object
+ version: v1alpha2
+ versions:
+ - name: v1alpha2
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
diff --git a/charts/logging-operator/templates/logging.banzaicloud.com_loggings.yaml b/charts/logging-operator/templates/logging.banzaicloud.com_loggings.yaml
new file mode 100644
index 000000000..adcf63897
--- /dev/null
+++ b/charts/logging-operator/templates/logging.banzaicloud.com_loggings.yaml
@@ -0,0 +1,332 @@
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ creationTimestamp: null
+ name: loggings.logging.banzaicloud.com
+spec:
+ group: logging.banzaicloud.com
+ names:
+ kind: Logging
+ listKind: LoggingList
+ plural: loggings
+ singular: logging
+ scope: Cluster
+ subresources:
+ status: {}
+ validation:
+ openAPIV3Schema:
+ description: Logging is the Schema for the loggings API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: LoggingSpec defines the desired state of Logging
+ properties:
+ controlNamespace:
+ type: string
+ flowConfigCheckDisabled:
+ type: boolean
+ flowConfigOverride:
+ type: string
+ fluentbit:
+ description: FluentbitSpec defines the desired state of Fluentbit
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ image:
+ description: ImageSpec struct hold information about image specification
+ properties:
+ pullPolicy:
+ type: string
+ repository:
+ type: string
+ tag:
+ type: string
+ required:
+ - pullPolicy
+ - repository
+ - tag
+ type: object
+ resources:
+ description: ResourceRequirements describes the compute resource
+ requirements.
+ properties:
+ limits:
+ additionalProperties:
+ type: string
+ description: 'Limits describes the maximum amount of compute
+ resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ requests:
+ additionalProperties:
+ type: string
+ description: 'Requests describes the minimum amount of compute
+ resources required. If Requests is omitted for a container,
+ it defaults to Limits if that is explicitly specified, otherwise
+ to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ type: object
+ targetHost:
+ type: string
+ targetPort:
+ format: int32
+ type: integer
+ tls:
+ description: FluentbitTLS defines the TLS configs
+ properties:
+ enabled:
+ type: boolean
+ secretName:
+ type: string
+ sharedKey:
+ type: string
+ required:
+ - enabled
+ - secretName
+ type: object
+ type: object
+ fluentd:
+ description: FluentdSpec defines the desired state of Fluentd
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ configReloaderImage:
+ description: ImageSpec struct hold information about image specification
+ properties:
+ pullPolicy:
+ type: string
+ repository:
+ type: string
+ tag:
+ type: string
+ required:
+ - pullPolicy
+ - repository
+ - tag
+ type: object
+ disablePvc:
+ type: boolean
+ fluentdPvcSpec:
+ description: PersistentVolumeClaimSpec describes the common attributes
+ of storage devices and allows a Source for provider-specific attributes
+ properties:
+ accessModes:
+ description: 'AccessModes contains the desired access modes
+ the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1'
+ items:
+ type: string
+ type: array
+ dataSource:
+ description: This field requires the VolumeSnapshotDataSource
+ alpha feature gate to be enabled and currently VolumeSnapshot
+ is the only supported data source. If the provisioner can
+ support VolumeSnapshot data source, it will create a new volume
+ and data will be restored to the volume at the same time.
+ If the provisioner does not support VolumeSnapshot data source,
+ volume will not be created and the failure will be reported
+ as an event. In the future, we plan to support more data source
+ types and the behavior of the provisioner may change.
+ properties:
+ apiGroup:
+ description: APIGroup is the group for the resource being
+ referenced. If APIGroup is not specified, the specified
+ Kind must be in the core API group. For any other third-party
+ types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ description: 'Resources represents the minimum resources the
+ volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources'
+ properties:
+ limits:
+ additionalProperties:
+ type: string
+ description: 'Limits describes the maximum amount of compute
+ resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ requests:
+ additionalProperties:
+ type: string
+ description: 'Requests describes the minimum amount of compute
+ resources required. If Requests is omitted for a container,
+ it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. More info:
+ https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ type: object
+ selector:
+ description: A label query over volumes to consider for binding.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector
+ that contains values, a key, and an operator that relates
+ the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In, NotIn,
+ Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values.
+ If the operator is In or NotIn, the values array
+ must be non-empty. If the operator is Exists or
+ DoesNotExist, the values array must be empty. This
+ array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs.
+ A single {key,value} in the matchLabels map is equivalent
+ to an element of matchExpressions, whose key field is
+ "key", the operator is "In", and the values array contains
+ only "value". The requirements are ANDed.
+ type: object
+ type: object
+ storageClassName:
+ description: 'Name of the StorageClass required by the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1'
+ type: string
+ volumeMode:
+ description: volumeMode defines what type of volume is required
+ by the claim. Value of Filesystem is implied when not included
+ in claim spec. This is a beta feature.
+ type: string
+ volumeName:
+ description: VolumeName is the binding reference to the PersistentVolume
+ backing this claim.
+ type: string
+ type: object
+ image:
+ description: ImageSpec struct hold information about image specification
+ properties:
+ pullPolicy:
+ type: string
+ repository:
+ type: string
+ tag:
+ type: string
+ required:
+ - pullPolicy
+ - repository
+ - tag
+ type: object
+ port:
+ format: int32
+ type: integer
+ resources:
+ description: ResourceRequirements describes the compute resource
+ requirements.
+ properties:
+ limits:
+ additionalProperties:
+ type: string
+ description: 'Limits describes the maximum amount of compute
+ resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ requests:
+ additionalProperties:
+ type: string
+ description: 'Requests describes the minimum amount of compute
+ resources required. If Requests is omitted for a container,
+ it defaults to Limits if that is explicitly specified, otherwise
+ to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ type: object
+ tls:
+ description: FluentdTLS defines the TLS configs
+ properties:
+ enabled:
+ type: boolean
+ secretName:
+ type: string
+ sharedKey:
+ type: string
+ required:
+ - enabled
+ - secretName
+ type: object
+ volumeModImage:
+ description: ImageSpec struct hold information about image specification
+ properties:
+ pullPolicy:
+ type: string
+ repository:
+ type: string
+ tag:
+ type: string
+ required:
+ - pullPolicy
+ - repository
+ - tag
+ type: object
+ type: object
+ loggingRef:
+ type: string
+ watchNamespaces:
+ items:
+ type: string
+ type: array
+ required:
+ - controlNamespace
+ type: object
+ status:
+ description: LoggingStatus defines the observed state of Logging
+ properties:
+ configCheckResults:
+ additionalProperties:
+ type: boolean
+ type: object
+ type: object
+ type: object
+ version: v1alpha2
+ versions:
+ - name: v1alpha2
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
diff --git a/charts/logging-operator/templates/logging.banzaicloud.com_outputs.yaml b/charts/logging-operator/templates/logging.banzaicloud.com_outputs.yaml
new file mode 100644
index 000000000..dff3427aa
--- /dev/null
+++ b/charts/logging-operator/templates/logging.banzaicloud.com_outputs.yaml
@@ -0,0 +1,1790 @@
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ creationTimestamp: null
+ name: outputs.logging.banzaicloud.com
+spec:
+ group: logging.banzaicloud.com
+ names:
+ kind: Output
+ listKind: OutputList
+ plural: outputs
+ singular: output
+ scope: ""
+ validation:
+ openAPIV3Schema:
+ description: Output is the Schema for the outputs API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: OutputSpec defines the desired state of Output
+ properties:
+ azurestorage:
+ properties:
+ auto_create_container:
+ description: 'Automatically create container if not exists(default:
+ true)'
+ type: boolean
+ azure_container:
+ description: Your azure storage container
+ type: string
+ azure_object_key_format:
+ description: 'Object key format (default: %{path}%{time_slice}_%{index}.%{file_extension})'
+ type: string
+ azure_storage_access_key:
+ description: Your azure storage access key
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ azure_storage_account:
+ description: Your azure storage account
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ azure_storage_type:
+ description: 'Azure storage type currently only "blob" supported
+ (default: blob)'
+ type: string
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ format:
+ description: 'Compat format type: out_file, json, ltsv (default:
+ out_file)'
+ type: string
+ path:
+ description: Path prefix of the files on Azure
+ type: string
+ store_as:
+ description: 'Store as: gzip, json, text, lzo, lzma2 (default: gzip)'
+ type: string
+ required:
+ - azure_container
+ - azure_storage_access_key
+ - azure_storage_account
+ type: object
+ elasticsearch:
+ description: Send your logs to Elasticsearch
+ properties:
+ application_name:
+ description: 'Specify the application name for the rollover index
+ to be created.(default: default)'
+ type: string
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ content_type:
+ description: 'With content_type application/x-ndjson, elasticsearch
+ plugin adds application/x-ndjson as Content-Type in payload. (default:
+ application/json)'
+ type: string
+ customize_template:
+ description: Specify the string and its value to be replaced in
+ form of hash. Can contain multiple key value pair that would be
+ replaced in the specified template_file. This setting only creates
+ template and to add rollover index please check the rollover_index
+ configuration.
+ type: string
+ deflector_alias:
+ description: Specify the deflector alias which would be assigned
+ to the rollover index created. This is useful in case of using
+ the Elasticsearch rollover API
+ type: string
+ fail_on_putting_template_retry_exceed:
+ description: 'Indicates whether to fail when max_retry_putting_template
+ is exceeded. If you have multiple output plugin, you could use
+ this property to do not fail on fluentd statup.(default: true)'
+ type: boolean
+ host:
+ description: You can specify Elasticsearch host by this parameter.
+ (default:localhost)
+ type: string
+ hosts:
+ description: You can specify multiple Elasticsearch hosts with separator
+ ",". If you specify hosts option, host and port options are ignored.
+ type: string
+ http_backend:
+ description: 'With http_backend typhoeus, elasticsearch plugin uses
+ typhoeus faraday http backend. Typhoeus can handle HTTP keepalive.
+ (default: excon)'
+ type: string
+ id_key:
+ description: https://github.com/uken/fluent-plugin-elasticsearch#id_key
+ type: string
+ include_index_in_url:
+ description: With this option set to true, Fluentd manifests the
+ index name in the request URL (rather than in the request body).
+ You can use this option to enforce an URL-based access control.
+ type: boolean
+ include_tag_key:
+ description: 'This will add the Fluentd tag in the JSON record.(default:
+ false)'
+ type: boolean
+ include_timestamp:
+ description: Adds a @timestamp field to the log, following all settings
+ logstash_format does, except without the restrictions on index_name.
+ This allows one to log to an alias in Elasticsearch and utilize
+ the rollover API.
+ type: boolean
+ index_date_pattern:
+ description: 'Specify this to override the index date pattern for
+ creating a rollover index.(default: now/d)'
+ type: string
+ index_prefix:
+ description: Specify the index prefix for the rollover index to
+ be created.
+ type: string
+ logstash_dateformat:
+ description: 'Set the Logstash date format.(default: %Y.%m.%d)'
+ type: string
+ logstash_format:
+ description: 'Enable Logstash log format.(default: false)'
+ type: boolean
+ logstash_prefix:
+ description: 'Set the Logstash prefix.(default: true)'
+ type: string
+ logstash_prefix_separator:
+ description: 'Set the Logstash prefix separator.(default: -)'
+ type: string
+ max_retry_get_es_version:
+ description: 'You can specify times of retry obtaining Elasticsearch
+ version.(default: 15)'
+ type: string
+ max_retry_putting_template:
+ description: 'You can specify times of retry putting template.(default:
+ 10)'
+ type: string
+ password:
+ description: Password for HTTP Basic authentication.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ path:
+ description: Path for HTTP Basic authentication.
+ type: string
+ pipeline:
+ description: This param is to set a pipeline id of your elasticsearch
+ to be added into the request, you can configure ingest node.
+ type: string
+ port:
+ description: 'You can specify Elasticsearch port by this parameter.(default:
+ 9200)'
+ type: string
+ prefer_oj_serializer:
+ description: 'With default behavior, Elasticsearch client uses Yajl
+ as JSON encoder/decoder. Oj is the alternative high performance
+ JSON encoder/decoder. When this parameter sets as true, Elasticsearch
+ client uses Oj as JSON encoder/decoder. (default: fqlse)'
+ type: boolean
+ reconnect_on_error:
+ description: 'Indicates that the plugin should reset connection
+ on any error (reconnect on next send). By default it will reconnect
+ only on "host unreachable exceptions". We recommended to set this
+ true in the presence of elasticsearch shield.(default: false)'
+ type: boolean
+ reload_connections:
+ description: 'You can tune how the elasticsearch-transport host
+ reloading feature works.(default: true)'
+ type: boolean
+ reload_on_failure:
+ description: 'Indicates that the elasticsearch-transport will try
+ to reload the nodes addresses if there is a failure while making
+ the request, this can be useful to quickly remove a dead node
+ from the list of addresses.(default: false)'
+ type: boolean
+ remove_keys_on_update:
+ description: Remove keys on update will not update the configured
+ keys in elasticsearch when a record is being updated. This setting
+ only has any effect if the write operation is update or upsert.
+ type: string
+ remove_keys_on_update_key:
+ description: This setting allows remove_keys_on_update to be configured
+ with a key in each record, in much the same way as target_index_key
+ works.
+ type: string
+ request_timeout:
+ description: 'You can specify HTTP request timeout.(default: 5s)'
+ type: string
+ resurrect_after:
+ description: 'You can set in the elasticsearch-transport how often
+ dead connections from the elasticsearch-transport''s pool will
+ be resurrected.(default: 60s)'
+ type: string
+ retry_tag:
+ description: This setting allows custom routing of messages in response
+ to bulk request failures. The default behavior is to emit failed
+ records using the same tag that was provided.
+ type: string
+ rollover_index:
+ description: 'Specify this as true when an index with rollover capability
+ needs to be created.(default: false) https://github.com/uken/fluent-plugin-elasticsearch#rollover_index'
+ type: boolean
+ routing_key:
+ description: Similar to parent_key config, will add _routing into
+ elasticsearch command if routing_key is set and the field does
+ exist in input event.
+ type: string
+ scheme:
+ description: 'Scheme for HTTP Basic authentication.(default: true)'
+ type: boolean
+ tag_key:
+ description: 'This will add the Fluentd tag in the JSON record.(default:
+ tag)'
+ type: string
+ target_index_key:
+ description: Tell this plugin to find the index name to write to
+ in the record under this key in preference to other mechanisms.
+ Key can be specified as path to nested record using dot ('.')
+ as a separator. https://github.com/uken/fluent-plugin-elasticsearch#target_index_key
+ type: string
+ target_type_key:
+ description: 'Similar to target_index_key config, find the type
+ name to write to in the record under this key (or nested record).
+ If key not found in record - fallback to type_name.(default: true)'
+ type: string
+ template_file:
+ description: The path to the file containing the template to install.
+ type: string
+ template_name:
+ description: The name of the template to define. If a template by
+ the name given is already present, it will be left unchanged,
+ unless template_overwrite is set, in which case the template will
+ be updated.
+ type: string
+ template_overwrite:
+ description: 'Always update the template, even if it already exists.(default:
+ false)'
+ type: boolean
+ templates:
+ description: Specify index templates in form of hash. Can contain
+ multiple templates.
+ type: string
+ time_key:
+ description: By default, when inserting records in Logstash format,
+ @timestamp is dynamically created with the time at log ingestion.
+ If you'd like to use a custom time, include an @timestamp with
+ your record.
+ type: string
+ time_key_format:
+ description: The format of the time stamp field (@timestamp or what
+ you specify with time_key). This parameter only has an effect
+ when logstash_format is true as it only affects the name of the
+ index we write to.
+ type: string
+ time_parse_error_tag:
+ description: With logstash_format true, elasticsearch plugin parses
+ timestamp field for generating index name. If the record has invalid
+ timestamp value, this plugin emits an error event to @ERROR label
+ with time_parse_error_tag configured tag.
+ type: string
+ time_precision:
+ description: Should the record not include a time_key, define the
+ degree of sub-second time precision to preserve from the time
+ portion of the routed event.
+ type: string
+ user:
+ description: User for HTTP Basic authentication. This plugin will
+ escape required URL encoded characters within %{} placeholders.
+ e.g. %{demo+}
+ type: string
+ utc_index:
+ description: 'By default, the records inserted into index logstash-YYMMDD
+ with UTC (Coordinated Universal Time). This option allows to use
+ local time if you describe utc_index to false.(default: true)'
+ type: boolean
+ with_transporter_log:
+ description: 'This is debugging purpose option to enable to obtain
+ transporter layer log. (default: false)'
+ type: boolean
+ write_operation:
+ description: 'The write_operation can be any of: (index,create,update,upsert)(default:
+ index)'
+ type: string
+ type: object
+ gcs:
+ properties:
+ acl:
+ description: 'Permission for the object in GCS: auth_read owner_full
+ owner_read private project_private public_read'
+ type: string
+ auto_create_bucket:
+ description: 'Create GCS bucket if it does not exists (default:
+ true)'
+ type: boolean
+ bucket:
+ description: Name of a GCS bucket
+ type: string
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ client_retries:
+ description: Number of times to retry requests on server error
+ type: integer
+ client_timeout:
+ description: Default timeout to use in requests
+ type: integer
+ credentials_json:
+ description: GCS service account credentials in JSON format
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ encryption_key:
+ description: Customer-supplied, AES-256 encryption key
+ type: string
+ format:
+ properties:
+ type:
+ description: 'Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value'
+ enum:
+ - out_file
+ - json
+ - ltsv
+ - csv
+ - msgpack
+ - hash
+ - single_value
+ type: string
+ type: object
+ hex_random_length:
+ description: 'Max length of `%{hex_random}` placeholder(4-16) (default:
+ 4)'
+ type: integer
+ keyfile:
+ description: Path of GCS service account credentials JSON file
+ type: string
+ object_key_format:
+ description: 'Format of GCS object keys (default: %{path}%{time_slice}_%{index}.%{file_extension})'
+ type: string
+ object_metadata:
+ description: User provided web-safe keys and arbitrary string values
+ that will returned with requests for the file as "x-goog-meta-"
+ response headers.
+ items:
+ properties:
+ key:
+ description: Key
+ type: string
+ value:
+ description: Value
+ type: string
+ required:
+ - key
+ - value
+ type: object
+ type: array
+ overwrite:
+ description: 'Overwrite already existing path (default: false)'
+ type: boolean
+ path:
+ description: Path prefix of the files on GCS
+ type: string
+ project:
+ description: Project identifier for GCS
+ type: string
+ storage_class:
+ description: 'Storage class of the file: dra nearline coldline multi_regional
+ regional standard'
+ type: string
+ store_as:
+ description: 'Archive format on GCS: gzip json text (default: gzip)'
+ type: string
+ transcoding:
+ description: Enable the decompressive form of transcoding
+ type: boolean
+ required:
+ - bucket
+ - project
+ type: object
+ loggingRef:
+ type: string
+ loki:
+ description: Fluentd output plugin to ship logs to a Loki server.
+ properties:
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ extra_labels:
+ description: 'Set of labels to include with every Loki stream.(default:
+ nil)'
+ type: boolean
+ password:
+ description: Specify password if the Loki server requires authentication.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ tenant:
+ description: Loki is a multi-tenant log storage platform and all
+ requests sent must include a tenant.
+ type: string
+ url:
+ description: The url of the Loki server to send logs to. (default:https://logs-us-west1.grafana.net)
+ type: string
+ username:
+ description: Specify a username if the Loki server requires authentication.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ type: object
+ nullout:
+ type: object
+ oss:
+ properties:
+ aaccess_key_secret:
+ description: Your access secret key
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ access_key_id:
+ description: Your access key id
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ auto_create_bucket:
+ description: 'desc ''Create OSS bucket if it does not exists (default:
+ false)'
+ type: boolean
+ bucket:
+ description: Your bucket name
+ type: string
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ check_bucket:
+ description: 'Check bucket if exists or not (default: true)'
+ type: boolean
+ check_object:
+ description: 'Check object before creation (default: true)'
+ type: boolean
+ download_crc_enable:
+ description: 'Download crc enabled (default: true)'
+ type: boolean
+ endpoint:
+ description: OSS endpoint to connect to'
+ type: string
+ format:
+ properties:
+ type:
+ description: 'Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value'
+ enum:
+ - out_file
+ - json
+ - ltsv
+ - csv
+ - msgpack
+ - hash
+ - single_value
+ type: string
+ type: object
+ hex_random_length:
+ description: 'The length of `%{hex_random}` placeholder(4-16) (default:
+ 4)'
+ type: integer
+ index_format:
+ description: '`sprintf` format for `%{index}` (default: %d)'
+ type: string
+ key_format:
+ description: 'The format of OSS object keys (default: %{path}/%{time_slice}_%{index}_%{thread_id}.%{file_extension})'
+ type: string
+ open_timeout:
+ description: 'Timeout for open connections (default: 10)'
+ type: integer
+ oss_sdk_log_dir:
+ description: 'OSS SDK log directory (default: /var/log/td-agent)'
+ type: string
+ overwrite:
+ description: 'Overwrite already existing path (default: false)'
+ type: boolean
+ path:
+ description: 'Path prefix of the files on OSS (default: fluent/logs)'
+ type: string
+ read_timeout:
+ description: 'Timeout for read response (default: 120)'
+ type: integer
+ store_as:
+ description: 'Archive format on OSS: gzip, json, text, lzo, lzma2
+ (default: gzip)'
+ type: string
+ upload_crc_enable:
+ description: 'Upload crc enabled (default: true)'
+ type: boolean
+ warn_for_delay:
+ description: Given a threshold to treat events as delay, output
+ warning logs if delayed events were put into OSS
+ type: string
+ required:
+ - aaccess_key_secret
+ - access_key_id
+ - bucket
+ - endpoint
+ type: object
+ s3:
+ properties:
+ acl:
+ description: Permission for the object in S3
+ type: string
+ assume_role_credentials:
+ description: assume_role_credentials
+ properties:
+ duration_seconds:
+ description: The duration, in seconds, of the role session (900-3600)
+ type: string
+ external_id:
+ description: A unique identifier that is used by third parties
+ when assuming roles in their customers' accounts.
+ type: string
+ policy:
+ description: An IAM policy in JSON format
+ type: string
+ role_arn:
+ description: The Amazon Resource Name (ARN) of the role to assume
+ type: string
+ role_session_name:
+ description: An identifier for the assumed role session
+ type: string
+ required:
+ - role_arn
+ - role_session_name
+ type: object
+ auto_create_bucket:
+ description: Create S3 bucket if it does not exists
+ type: string
+ aws_iam_retries:
+ description: The number of attempts to load instance profile credentials
+ from the EC2 metadata service using IAM role
+ type: string
+ aws_key_id:
+ description: AWS access key id
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ aws_sec_key:
+ description: AWS secret key.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ check_apikey_on_start:
+ description: Check AWS key on start
+ type: string
+ check_bucket:
+ description: Check bucket if exists or not
+ type: string
+ check_object:
+ description: Check object before creation
+ type: string
+ compute_checksums:
+ description: AWS SDK uses MD5 for API request/response by default
+ type: string
+ enable_transfer_acceleration:
+ description: 'If true, S3 Transfer Acceleration will be enabled
+ for uploads. IMPORTANT: You must first enable this feature on
+ your destination S3 bucket'
+ type: string
+ force_path_style:
+ description: If true, the bucket name is always left in the request
+ URI and never moved to the host as a sub-domain
+ type: string
+ format:
+ properties:
+ type:
+ description: 'Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value'
+ enum:
+ - out_file
+ - json
+ - ltsv
+ - csv
+ - msgpack
+ - hash
+ - single_value
+ type: string
+ type: object
+ grant_full_control:
+ description: Allows grantee READ, READ_ACP, and WRITE_ACP permissions
+ on the object
+ type: string
+ grant_read:
+ description: Allows grantee to read the object data and its metadata
+ type: string
+ grant_read_acp:
+ description: Allows grantee to read the object ACL
+ type: string
+ grant_write_acp:
+ description: Allows grantee to write the ACL for the applicable
+ object
+ type: string
+ hex_random_length:
+ description: The length of `%{hex_random}` placeholder(4-16)
+ type: string
+ index_format:
+ description: '`sprintf` format for `%{index}`'
+ type: string
+ instance_profile_credentials:
+ description: instance_profile_credentials
+ properties:
+ http_open_timeout:
+ description: Number of seconds to wait for the connection to
+ open
+ type: string
+ http_read_timeout:
+ description: Number of seconds to wait for one block to be read
+ type: string
+ ip_address:
+ description: IP address (default:169.254.169.254)
+ type: string
+ port:
+ description: Port number (default:80)
+ type: string
+ retries:
+ description: Number of times to retry when retrieving credentials
+ type: string
+ type: object
+ overwrite:
+ description: Overwrite already existing path
+ type: string
+ path:
+ description: Path prefix of the files on S3
+ type: string
+ proxy_uri:
+ description: URI of proxy environment
+ type: string
+ s3_bucket:
+ description: S3 bucket name
+ type: string
+ s3_endpoint:
+ description: Custom S3 endpoint (like minio)
+ type: string
+ s3_metadata:
+ description: Arbitrary S3 metadata headers to set for the object
+ type: string
+ s3_object_key_format:
+ description: 'The format of S3 object keys (default: %{path}%{time_slice}_%{index}.%{file_extension})'
+ type: string
+ s3_region:
+ description: S3 region name
+ type: string
+ shared_credentials:
+ description: shared_credentials
+ properties:
+ path:
+ description: 'Path to the shared file. (default: $HOME/.aws/credentials)'
+ type: string
+ profile_name:
+ description: Profile name. Default to 'default' or ENV['AWS_PROFILE']
+ type: string
+ type: object
+ signature_version:
+ description: Signature version for API Request (s3,v4)
+ type: string
+ sse_customer_algorithm:
+ description: Specifies the algorithm to use to when encrypting the
+ object
+ type: string
+ sse_customer_key:
+ description: Specifies the customer-provided encryption key for
+ Amazon S3 to use in encrypting data
+ type: string
+ sse_customer_key_md5:
+ description: Specifies the 128-bit MD5 digest of the encryption
+ key according to RFC 1321
+ type: string
+ ssekms_key_id:
+ description: Specifies the AWS KMS key ID to use for object encryption
+ type: string
+ ssl_verify_peer:
+ description: If false, the certificate of endpoint will not be verified
+ type: string
+ storage_class:
+ description: The type of storage to use for the object(STANDARD,REDUCED_REDUNDANCY,STANDARD_IA)
+ type: string
+ store_as:
+ description: Archive format on S3
+ type: string
+ use_bundled_cert:
+ description: Use aws-sdk-ruby bundled cert
+ type: string
+ use_server_side_encryption:
+ description: The Server-side encryption algorithm used when storing
+ this object in S3 (AES256, aws:kms)
+ type: string
+ warn_for_delay:
+ description: Given a threshold to treat events as delay, output
+ warning logs if delayed events were put into s3
+ type: string
+ required:
+ - s3_bucket
+ type: object
+ type: object
+ status:
+ description: OutputStatus defines the observed state of Output
+ type: object
+ type: object
+ version: v1alpha2
+ versions:
+ - name: v1alpha2
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
diff --git a/charts/logging-operator/templates/psp.yaml b/charts/logging-operator/templates/psp.yaml
index 25be0f127..515d29448 100644
--- a/charts/logging-operator/templates/psp.yaml
+++ b/charts/logging-operator/templates/psp.yaml
@@ -26,6 +26,6 @@ spec:
seLinux:
rule: RunAsAny
volumes:
- - secret
- - configMap
-{{ end }}
+ - secret
+ - configMap
+{{ end }}
\ No newline at end of file
diff --git a/charts/logging-operator/templates/rbac.yaml b/charts/logging-operator/templates/rbac.yaml
index b49e8f3ae..6167ae844 100644
--- a/charts/logging-operator/templates/rbac.yaml
+++ b/charts/logging-operator/templates/rbac.yaml
@@ -20,90 +20,73 @@ metadata:
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
rules:
+ - apiGroups:
+ - logging.banzaicloud.com
+ resources:
+ - loggings
+ - flows
+ - clusterflows
+ - outputs
+ - clusteroutputs
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - logging.banzaicloud.com
+ resources:
+ - loggings/status
+ verbs:
+ - get
+ - patch
+ - update
+ - apiGroups:
+ - ""
+ - apps
+ - batch
+ - extensions
+ - policy
+ - rbac.authorization.k8s.io
+ resources:
+ - namespaces
+ - nodes
+ - persistentvolumeclaims
+ - pods
+ - services
+ - resourcequotas
+ - replicationcontrollers
+ - limitranges
+ - persistentvolumeclaims
+ - persistentvolumes
+ - endpoints
+ - secrets
+ - configmaps
+ - serviceaccounts
+ - roles
+ - rolebindings
+ - clusterroles
+ - clusterrolebindings
+ - daemonsets
+ - deployments
+ - replicasets
+ - statefulsets
+ - jobs
+ verbs:
+ - "*"
+ {{- if .Values.rbac.psp.enabled }}
- apiGroups:
- - logging.banzaicloud.com
+ - extensions
resources:
- - plugins
- - fluentds
- - fluentbits
- verbs:
- - "*"
-- apiGroups:
- - ""
- - apps
- - autoscaling
- - batch
- - extensions
- - policy
- - rbac.authorization.k8s.io
- resources:
- - namespaces
- - nodes
- - persistentvolumeclaims
- - pods
- - services
- - resourcequotas
- - replicationcontrollers
- - limitranges
- - persistentvolumeclaims
- - persistentvolumes
- - endpoints
- - secrets
- - configmaps
- - serviceaccounts
- - clusterroles
- - clusterrolebindings
- verbs:
- - "*"
-- apiGroups:
- - apps
- resources:
- - daemonsets
- - deployments
- - replicasets
- verbs:
- - "*"
-- apiGroups:
- - extensions
- resources:
- - daemonsets
- - deployments
- - replicasets
- verbs:
- - "*"
-- apiGroups:
- - apps
- resources:
- - statefulsets
- verbs:
- - get
- - list
- - watch
-- apiGroups:
- - batch
- resources:
- - cronjobs
- - jobs
- verbs:
- - list
- - watch
-- apiGroups:
- - autoscaling
- resources:
- - horizontalpodautoscalers
- verbs:
- - list
- - watch
-{{- if .Values.rbac.psp.enabled }}
-- apiGroups:
- - extensions
- resources:
- - podsecuritypolicies
+ - podsecuritypolicies
resourceNames:
- - psp.logging-operator
+ - psp.logging-operator
verbs:
- - use
-{{- end }}
+ - use
+ {{- end }}
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
@@ -115,12 +98,12 @@ metadata:
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
subjects:
-- kind: ServiceAccount
- name: {{ template "logging-operator.fullname" . }}
- namespace: {{ .Release.Namespace }}
+ - kind: ServiceAccount
+ name: {{ template "logging-operator.fullname" . }}
+ namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "logging-operator.fullname" . }}
-{{- end }}
+ {{- end }}
\ No newline at end of file
diff --git a/charts/logging-operator/templates/service.yaml b/charts/logging-operator/templates/service.yaml
new file mode 100644
index 000000000..b1aee6fca
--- /dev/null
+++ b/charts/logging-operator/templates/service.yaml
@@ -0,0 +1,18 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "logging-operator.fullname" . }}
+ labels:
+{{ include "logging-operator.labels" . | indent 4 }}
+spec:
+ type: ClusterIP
+ clusterIP: None
+ ports:
+ - port: {{ .Values.http.port }}
+ targetPort: http
+ protocol: TCP
+ name: http
+ selector:
+ app.kubernetes.io/name: {{ include "logging-operator.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: query
\ No newline at end of file
diff --git a/charts/logging-operator/values.yaml b/charts/logging-operator/values.yaml
index cb60d8e0c..e4fcbb248 100644
--- a/charts/logging-operator/values.yaml
+++ b/charts/logging-operator/values.yaml
@@ -6,39 +6,18 @@ replicaCount: 1
image:
repository: banzaicloud/logging-operator
- tag: 0.2.2
+ tag: 2.0.0
pullPolicy: IfNotPresent
+imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
-# Namespace to watch fot LoggingOperator CRD
-watchNamespace: ""
-
-grafana:
- dashboard:
- enabled: true
-
-
-## Role Based Access
-## Ref: https://kubernetes.io/docs/admin/authorization/rbac/
-##
-rbac:
- enabled: true
- ## Pod Security Policy
- ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
- ##
- psp:
- enabled: false
-
-## Define resources requests and limits for single Pods.
-## ref: https://kubernetes.io/docs/user-guide/compute-resources/
-## We usually recommend not to specify default resources and to leave this as a conscious
-## choice for the user. This also increases chances charts run on environments with little
-## resources, such as Minikube. If you do want to specify resources, uncomment the following
-## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
-##
resources: {}
+ # We usually recommend not to specify default resources and to leave this as a conscious
+ # choice for the user. This also increases chances charts run on environments with little
+ # resources, such as Minikube. If you do want to specify resources, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
@@ -46,46 +25,24 @@ resources: {}
# cpu: 100m
# memory: 128Mi
-
-## Define which Nodes the Pods are scheduled on.
-## ref: https://kubernetes.io/docs/user-guide/node-selection/
-##
nodeSelector: {}
-
-## If specified, the pod's tolerations.
-## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
-##
tolerations: []
-# - key: "key"
-# operator: "Equal"
-# value: "value"
-# effect: "NoSchedule"
-## Assign the Logging operator to run on specific nodes
-## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
-##
affinity: {}
-# requiredDuringSchedulingIgnoredDuringExecution:
-# nodeSelectorTerms:
-# - matchExpressions:
-# - key: kubernetes.io/e2e-az-name
-# operator: In
-# values:
-# - e2e-az1
-# - e2e-az2
+http:
+ # http listen port number
+ port: 8080
+ # Service definition for query http service
+ service:
+ type: ClusterIP
+ # Annotations to query http service
+ annotations: {}
+ # Labels to query http service
+ labels: {}
-## SecurityContext holds pod-level security attributes and common container settings.
-## This defaults to non root user with uid 1000 and gid 2000. *v1.PodSecurityContext false
-## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
-##
-podSecurityContext:
- runAsNonRoot: true
- runAsUser: 1000
- fsGroup: 2000
-securityContext:
- allowPrivilegeEscalation: false
- readOnlyRootFilesystem: true
- # capabilities:
- # drop: ["ALL"]
+rbac:
+ enabled: true
+ psp:
+ enabled: false
\ No newline at end of file
diff --git a/charts/nginx-logging-demo/Chart.yaml b/charts/nginx-logging-demo/Chart.yaml
deleted file mode 100644
index 92515dcaa..000000000
--- a/charts/nginx-logging-demo/Chart.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
-apiVersion: v1
-appVersion: "1.0"
-description: A Demo application for the logging-operator
-name: nginx-logging-demo
-version: 0.1.2
-maintainers:
-- name: Banzai Cloud
- email: info@banzaicloud.com
diff --git a/charts/nginx-logging-demo/README.md b/charts/nginx-logging-demo/README.md
deleted file mode 100644
index a40dc5cf8..000000000
--- a/charts/nginx-logging-demo/README.md
+++ /dev/null
@@ -1,63 +0,0 @@
-
-# Logging Operator Nginx demonstration Chart
-
-[Logging Operator](https://github.com/banzaicloud/logging-operator) is a managed centralized logging component based on fluentd and fluent-bit.
-## tl;dr:
-
-```bash
-$ helm repo add banzaicloud-stable http://kubernetes-charts.banzaicloud.com/branch/master
-$ helm repo update
-$ helm install banzaicloud-stable/nginx-logging-demo
-```
-
-## Introduction
-
-This chart demonstrates the use of the [Logging Operator](https://github.com/banzaicloud/banzai-charts/logging-operator) with an nginx deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
-
-## Prerequisites
-
-- [Logging Operator](https://github.com/banzaicloud/logging-operator) available on the cluster
-
-
-## Installing the Chart
-
-To install the chart with the release name `log-test-nginx`:
-
-```bash
-$ helm install --name log-test-nginx banzaicloud-stable/nginx-logging-demo
-```
-## Uninstalling the Chart
-
-To uninstall/delete the `log-test-nginx` deployment:
-
-```bash
-$ helm delete log-test-nginx
-```
-
-The command removes all the Kubernetes components associated with the chart and deletes the release.
-
-## Configuration
-
-The following tables lists the configurable parameters of the nginx-logging-demo chart and their default values.
-
-| Parameter | Description | Default |
-| --------------------------------------------------- | ------------------------------------------------------ | ------------------------------ |
-| `image.repository` | Container image repository | `nginx` |
-| `image.tag` | Container image tag | `stable` |
-| `image.pullPolicy` | Container pull policy | `IfNotPresent` |
-| `nameOverride` | Override name of app | `` |
-| `fullnameOverride` | Override full name of app | `` |
-| `affinity` | Node Affinity | `{}` |
-| `resources` | CPU/Memory resource requests/limits | `{}` |
-| `tolerations` | Node Tolerations | `[]` |
-| `nodeSelector` | Define which Nodes the Pods are scheduled on. | `{}` |
-
-
-Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example:
-
-```bash
-$ helm install --name my-release -f values.yaml banzaicloud-stable/nginx-logging-demo
-```
-
-> **Tip**: You can use the default [values.yaml](values.yaml)
-
diff --git a/charts/nginx-logging-demo/templates/NOTES.txt b/charts/nginx-logging-demo/templates/NOTES.txt
deleted file mode 100644
index 46c4f729a..000000000
--- a/charts/nginx-logging-demo/templates/NOTES.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-1. Get the application URL by running these commands:
-{{- if .Values.ingress.enabled }}
-{{- range $host := .Values.ingress.hosts }}
- {{- range .paths }}
- http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }}
- {{- end }}
-{{- end }}
-{{- else if contains "NodePort" .Values.service.type }}
- export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "nginx-logging-demo.fullname" . }})
- export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
- echo http://$NODE_IP:$NODE_PORT
-{{- else if contains "LoadBalancer" .Values.service.type }}
- NOTE: It may take a few minutes for the LoadBalancer IP to be available.
- You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "nginx-logging-demo.fullname" . }}'
- export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "nginx-logging-demo.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
- echo http://$SERVICE_IP:{{ .Values.service.port }}
-{{- else if contains "ClusterIP" .Values.service.type }}
- export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ include "nginx-logging-demo.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
- echo "Visit http://127.0.0.1:8080 to use your application"
- kubectl port-forward $POD_NAME 8080:80
-{{- end }}
-
-
-
diff --git a/charts/nginx-logging-demo/templates/deployment.yaml b/charts/nginx-logging-demo/templates/deployment.yaml
deleted file mode 100644
index bf70c6fba..000000000
--- a/charts/nginx-logging-demo/templates/deployment.yaml
+++ /dev/null
@@ -1,51 +0,0 @@
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: {{ include "nginx-logging-demo.fullname" . }}
- labels:
- app: {{ include "nginx-logging-demo.name" . }}
- chart: {{ include "nginx-logging-demo.chart" . }}
- heritage: {{ .Release.Service }}
- release: {{ .Release.Name }}
-spec:
- replicas: {{ .Values.replicaCount }}
- selector:
- matchLabels:
- app: {{ include "nginx-logging-demo.name" . }}
- release: {{ .Release.Name }}
- template:
- metadata:
- labels:
- app: {{ include "nginx-logging-demo.name" . }}
- release: {{ .Release.Name }}
- spec:
- containers:
- - name: {{ .Chart.Name }}
- image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
- imagePullPolicy: {{ .Values.image.pullPolicy }}
- ports:
- - name: http
- containerPort: 80
- protocol: TCP
- livenessProbe:
- httpGet:
- path: /
- port: http
- readinessProbe:
- httpGet:
- path: /
- port: http
- resources:
- {{- toYaml .Values.resources | nindent 12 }}
- {{- with .Values.nodeSelector }}
- nodeSelector:
- {{- toYaml . | nindent 8 }}
- {{- end }}
- {{- with .Values.affinity }}
- affinity:
- {{- toYaml . | nindent 8 }}
- {{- end }}
- {{- with .Values.tolerations }}
- tolerations:
- {{- toYaml . | nindent 8 }}
- {{- end }}
diff --git a/charts/nginx-logging-demo/templates/ingress.yaml b/charts/nginx-logging-demo/templates/ingress.yaml
deleted file mode 100644
index b64548268..000000000
--- a/charts/nginx-logging-demo/templates/ingress.yaml
+++ /dev/null
@@ -1,39 +0,0 @@
-{{- if .Values.ingress.enabled -}}
-{{- $fullName := include "nginx-logging-demo.fullname" . -}}
-apiVersion: extensions/v1beta1
-kind: Ingress
-metadata:
- name: {{ $fullName }}
- labels:
- app: {{ include "nginx-logging-demo.name" . }}
- chart: {{ include "nginx-logging-demo.chart" . }}
- heritage: {{ .Release.Service }}
- release: {{ .Release.Name }}
- {{- with .Values.ingress.annotations }}
- annotations:
- {{- toYaml . | nindent 4 }}
- {{- end }}
-spec:
-{{- if .Values.ingress.tls }}
- tls:
- {{- range .Values.ingress.tls }}
- - hosts:
- {{- range .hosts }}
- - {{ . | quote }}
- {{- end }}
- secretName: {{ .secretName }}
- {{- end }}
-{{- end }}
- rules:
- {{- range .Values.ingress.hosts }}
- - host: {{ .host | quote }}
- http:
- paths:
- {{- range .paths }}
- - path: {{ . }}
- backend:
- serviceName: {{ $fullName }}
- servicePort: http
- {{- end }}
- {{- end }}
-{{- end }}
diff --git a/charts/nginx-logging-demo/templates/logging.yaml b/charts/nginx-logging-demo/templates/logging.yaml
deleted file mode 100644
index a0bfb0837..000000000
--- a/charts/nginx-logging-demo/templates/logging.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
-apiVersion: "logging.banzaicloud.com/v1alpha1"
-kind: "Plugin"
-metadata:
- name: {{ include "nginx-logging-demo.fullname" . }}
- labels:
- app: {{ include "nginx-logging-demo.name" . }}
- chart: {{ include "nginx-logging-demo.chart" . }}
- heritage: {{ .Release.Service }}
- release: {{ .Release.Name }}
-spec:
- input:
- label:
- app: {{ include "nginx-logging-demo.name" . }}
-{{- if .Values.forwarding.enabled }}
- output:
- - type: forward
- name: forward
- parameters:
- - name: host
- value: {{ .Values.forwarding.targetHost | quote }}
- - name: port
- value: {{ .Values.forwarding.targetPort | quote }}
- - name: name
- value: {{ .Values.forwarding.targetHost | quote }}
-{{- if .Values.forwarding.tlsSharedKey }}
- - name: tlsSharedKey
- value: {{ .Values.forwarding.tlsSharedKey | b64enc | quote }}
-{{- end }}
-{{- end }}
- filter:
- - type: parser
- name: parser-nginx
- parameters:
- - name: format
- value: '/^(?[^ ]*) (?[^ ]*) (?[^ ]*) \[(?[^\]]*)\] "(?\S+)(?: +(?[^\"]*?)(?: +\S*)?)?" (?[^ ]*) (?[^ ]*)(?: "(?[^\"]*)" "(?[^\"]*)"(?:\s+(?[^ ]+))?)?$/'
- - name: timeFormat
- value: "%d/%b/%Y:%H:%M:%S %z"
\ No newline at end of file
diff --git a/charts/nginx-logging-demo/templates/service.yaml b/charts/nginx-logging-demo/templates/service.yaml
deleted file mode 100644
index f07f68c51..000000000
--- a/charts/nginx-logging-demo/templates/service.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:
- name: {{ include "nginx-logging-demo.fullname" . }}
- labels:
- app: {{ include "nginx-logging-demo.name" . }}
- chart: {{ include "nginx-logging-demo.chart" . }}
- heritage: {{ .Release.Service }}
- release: {{ .Release.Name }}
-spec:
- type: {{ .Values.service.type }}
- ports:
- - port: {{ .Values.service.port }}
- targetPort: http
- protocol: TCP
- name: http
- selector:
- app: {{ include "nginx-logging-demo.name" . }}
- release: {{ .Release.Name }}
diff --git a/charts/nginx-logging-demo/templates/tests/test-connection.yaml b/charts/nginx-logging-demo/templates/tests/test-connection.yaml
deleted file mode 100644
index 026771b04..000000000
--- a/charts/nginx-logging-demo/templates/tests/test-connection.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
-apiVersion: v1
-kind: Pod
-metadata:
- name: "{{ include "nginx-logging-demo.fullname" . }}-test-connection"
- labels:
- app: {{ include "nginx-logging-demo.name" . }}
- chart: {{ include "nginx-logging-demo.chart" . }}
- heritage: {{ .Release.Service }}
- release: {{ .Release.Name }}
- annotations:
- "helm.sh/hook": test-success
-spec:
- containers:
- - name: wget
- image: busybox
- command: ['wget']
- args: ['{{ include "nginx-logging-demo.fullname" . }}:{{ .Values.service.port }}']
- restartPolicy: Never
diff --git a/charts/nginx-logging-demo/values.yaml b/charts/nginx-logging-demo/values.yaml
deleted file mode 100644
index 2bdee916f..000000000
--- a/charts/nginx-logging-demo/values.yaml
+++ /dev/null
@@ -1,56 +0,0 @@
-# Default values for nginx-logging-demo.
-# This is a YAML-formatted file.
-# Declare variables to be passed into your templates.
-
-replicaCount: 1
-
-image:
- repository: nginx
- tag: stable
- pullPolicy: IfNotPresent
-
-nameOverride: ""
-fullnameOverride: ""
-
-service:
- type: ClusterIP
- port: 80
-
-ingress:
- enabled: false
- annotations: {}
- # kubernetes.io/ingress.class: nginx
- # kubernetes.io/tls-acme: "true"
- hosts:
- - host: chart-example.local
- paths: []
-
- tls: []
- # - secretName: chart-example-tls
- # hosts:
- # - chart-example.local
-
-forwarding:
- enabled: false
- # will use the the existing tls secret used by the fluentd input
- #tlsSharedKey: example
- #targetHost: fluentd.target.svc
- #targetPort: 24240
-
-resources: {}
- # We usually recommend not to specify default resources and to leave this as a conscious
- # choice for the user. This also increases chances charts run on environments with little
- # resources, such as Minikube. If you do want to specify resources, uncomment the following
- # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
- # limits:
- # cpu: 100m
- # memory: 128Mi
- # requests:
- # cpu: 100m
- # memory: 128Mi
-
-nodeSelector: {}
-
-tolerations: []
-
-affinity: {}
diff --git a/charts/nginx-logging-es-demo/.helmignore b/charts/nginx-logging-es-demo/.helmignore
deleted file mode 100644
index 50af03172..000000000
--- a/charts/nginx-logging-es-demo/.helmignore
+++ /dev/null
@@ -1,22 +0,0 @@
-# Patterns to ignore when building packages.
-# This supports shell glob matching, relative path matching, and
-# negation (prefixed with !). Only one pattern per line.
-.DS_Store
-# Common VCS dirs
-.git/
-.gitignore
-.bzr/
-.bzrignore
-.hg/
-.hgignore
-.svn/
-# Common backup files
-*.swp
-*.bak
-*.tmp
-*~
-# Various IDEs
-.project
-.idea/
-*.tmproj
-.vscode/
diff --git a/charts/nginx-logging-es-demo/Chart.yaml b/charts/nginx-logging-es-demo/Chart.yaml
deleted file mode 100644
index dd088068d..000000000
--- a/charts/nginx-logging-es-demo/Chart.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
-apiVersion: v1
-appVersion: "1.0"
-description: A Demo application for the logging-operator
-name: nginx-logging-es-demo
-version: 0.1.1
-maintainers:
-- name: Banzai Cloud
- email: info@banzaicloud.com
diff --git a/charts/nginx-logging-es-demo/README.md b/charts/nginx-logging-es-demo/README.md
deleted file mode 100644
index d7ea449a2..000000000
--- a/charts/nginx-logging-es-demo/README.md
+++ /dev/null
@@ -1,63 +0,0 @@
-
-# Logging Operator Nginx & Elasticsearch output demonstration Chart
-
-[Logging Operator](https://github.com/banzaicloud/logging-operator) is a managed centralized logging component based on fluentd and fluent-bit.
-## tl;dr:
-
-```bash
-$ helm repo add banzaicloud-stable https://kubernetes-charts.banzaicloud.com/
-$ helm repo update
-$ helm install banzaicloud-stable/nginx-logging-es-demo
-```
-
-## Introduction
-
-This chart demonstrates the use of the [Logging Operator](https://github.com/banzaicloud/banzai-charts/logging-operator) with an Nginx deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
-
-## Prerequisites
-
-- [Logging Operator](https://github.com/banzaicloud/logging-operator) available on the cluster
-
-
-## Installing the Chart
-
-To install the chart with the release name `log-test-nginx`:
-
-```bash
-$ helm install --name log-test-nginx banzaicloud-stable/nginx-logging-es-demo
-```
-## Uninstalling the Chart
-
-To uninstall/delete the `log-test-nginx` deployment:
-
-```bash
-$ helm delete log-test-nginx
-```
-
-The command removes all the Kubernetes components associated with the chart and deletes the release.
-
-## Configuration
-
-The following tables lists the configurable parameters of the nginx-logging-es-demo chart and their default values.
-
-| Parameter | Description | Default |
-| --------------------------------------------------- | ------------------------------------------------------ | ------------------------------ |
-| `image.repository` | Container image repository | `nginx` |
-| `image.tag` | Container image tag | `stable` |
-| `image.pullPolicy` | Container pull policy | `IfNotPresent` |
-| `nameOverride` | Override name of app | `` |
-| `fullnameOverride` | Override full name of app | `` |
-| `affinity` | Node Affinity | `{}` |
-| `resources` | CPU/Memory resource requests/limits | `{}` |
-| `tolerations` | Node Tolerations | `[]` |
-| `nodeSelector` | Define which Nodes the Pods are scheduled on. | `{}` |
-
-
-Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example:
-
-```bash
-$ helm install --name my-release -f values.yaml banzaicloud-stable/nginx-logging-es-demo
-```
-
-> **Tip**: You can use the default [values.yaml](values.yaml)
-
diff --git a/charts/nginx-logging-es-demo/templates/NOTES.txt b/charts/nginx-logging-es-demo/templates/NOTES.txt
deleted file mode 100644
index 9768319ad..000000000
--- a/charts/nginx-logging-es-demo/templates/NOTES.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-1. Get the application URL by running these commands:
-{{- if .Values.ingress.enabled }}
-{{- range $host := .Values.ingress.hosts }}
- {{- range .paths }}
- http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }}
- {{- end }}
-{{- end }}
-{{- else if contains "NodePort" .Values.service.type }}
- export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "nginx-logging-es-demo.fullname" . }})
- export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
- echo http://$NODE_IP:$NODE_PORT
-{{- else if contains "LoadBalancer" .Values.service.type }}
- NOTE: It may take a few minutes for the LoadBalancer IP to be available.
- You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "nginx-logging-es-demo.fullname" . }}'
- export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "nginx-logging-es-demo.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
- echo http://$SERVICE_IP:{{ .Values.service.port }}
-{{- else if contains "ClusterIP" .Values.service.type }}
- export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ include "nginx-logging-es-demo.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
- echo "Visit http://127.0.0.1:8080 to use your application"
- kubectl port-forward $POD_NAME 8080:80
-{{- end }}
-
-
-
diff --git a/charts/nginx-logging-es-demo/templates/_helpers.tpl b/charts/nginx-logging-es-demo/templates/_helpers.tpl
deleted file mode 100644
index ee718207a..000000000
--- a/charts/nginx-logging-es-demo/templates/_helpers.tpl
+++ /dev/null
@@ -1,32 +0,0 @@
-{{/* vim: set filetype=mustache: */}}
-{{/*
-Expand the name of the chart.
-*/}}
-{{- define "nginx-logging-es-demo.name" -}}
-{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
-{{- end -}}
-
-{{/*
-Create a default fully qualified app name.
-We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
-If release name contains chart name it will be used as a full name.
-*/}}
-{{- define "nginx-logging-es-demo.fullname" -}}
-{{- if .Values.fullnameOverride -}}
-{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
-{{- else -}}
-{{- $name := default .Chart.Name .Values.nameOverride -}}
-{{- if contains $name .Release.Name -}}
-{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
-{{- else -}}
-{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
-{{- end -}}
-{{- end -}}
-{{- end -}}
-
-{{/*
-Create chart name and version as used by the chart label.
-*/}}
-{{- define "nginx-logging-es-demo.chart" -}}
-{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
-{{- end -}}
diff --git a/charts/nginx-logging-es-demo/templates/deployment.yaml b/charts/nginx-logging-es-demo/templates/deployment.yaml
deleted file mode 100644
index 6b0416299..000000000
--- a/charts/nginx-logging-es-demo/templates/deployment.yaml
+++ /dev/null
@@ -1,51 +0,0 @@
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: {{ include "nginx-logging-es-demo.fullname" . }}
- labels:
- app: {{ include "nginx-logging-es-demo.name" . }}
- chart: {{ include "nginx-logging-es-demo.chart" . }}
- heritage: {{ .Release.Service }}
- release: {{ .Release.Name }}
-spec:
- replicas: {{ .Values.replicaCount }}
- selector:
- matchLabels:
- app: {{ include "nginx-logging-es-demo.name" . }}
- release: {{ .Release.Name }}
- template:
- metadata:
- labels:
- app: {{ include "nginx-logging-es-demo.name" . }}
- release: {{ .Release.Name }}
- spec:
- containers:
- - name: {{ .Chart.Name }}
- image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
- imagePullPolicy: {{ .Values.image.pullPolicy }}
- ports:
- - name: http
- containerPort: 80
- protocol: TCP
- livenessProbe:
- httpGet:
- path: /
- port: http
- readinessProbe:
- httpGet:
- path: /
- port: http
- resources:
- {{- toYaml .Values.resources | nindent 12 }}
- {{- with .Values.nodeSelector }}
- nodeSelector:
- {{- toYaml . | nindent 8 }}
- {{- end }}
- {{- with .Values.affinity }}
- affinity:
- {{- toYaml . | nindent 8 }}
- {{- end }}
- {{- with .Values.tolerations }}
- tolerations:
- {{- toYaml . | nindent 8 }}
- {{- end }}
diff --git a/charts/nginx-logging-es-demo/templates/ingress.yaml b/charts/nginx-logging-es-demo/templates/ingress.yaml
deleted file mode 100644
index 6e62c9765..000000000
--- a/charts/nginx-logging-es-demo/templates/ingress.yaml
+++ /dev/null
@@ -1,39 +0,0 @@
-{{- if .Values.ingress.enabled -}}
-{{- $fullName := include "nginx-logging-es-demo.fullname" . -}}
-apiVersion: extensions/v1beta1
-kind: Ingress
-metadata:
- name: {{ $fullName }}
- labels:
- app: {{ include "nginx-logging-es-demo.name" . }}
- chart: {{ include "nginx-logging-es-demo.chart" . }}
- heritage: {{ .Release.Service }}
- release: {{ .Release.Name }}
- {{- with .Values.ingress.annotations }}
- annotations:
- {{- toYaml . | nindent 4 }}
- {{- end }}
-spec:
-{{- if .Values.ingress.tls }}
- tls:
- {{- range .Values.ingress.tls }}
- - hosts:
- {{- range .hosts }}
- - {{ . | quote }}
- {{- end }}
- secretName: {{ .secretName }}
- {{- end }}
-{{- end }}
- rules:
- {{- range .Values.ingress.hosts }}
- - host: {{ .host | quote }}
- http:
- paths:
- {{- range .paths }}
- - path: {{ . }}
- backend:
- serviceName: {{ $fullName }}
- servicePort: http
- {{- end }}
- {{- end }}
-{{- end }}
diff --git a/charts/nginx-logging-es-demo/templates/logging.yaml b/charts/nginx-logging-es-demo/templates/logging.yaml
deleted file mode 100644
index 8040b9b5c..000000000
--- a/charts/nginx-logging-es-demo/templates/logging.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
-apiVersion: "logging.banzaicloud.com/v1alpha1"
-kind: "Plugin"
-metadata:
- name: {{ include "nginx-logging-es-demo.fullname" . }}
- labels:
- app: {{ include "nginx-logging-es-demo.name" . }}
- chart: {{ include "nginx-logging-es-demo.chart" . }}
- heritage: {{ .Release.Service }}
- release: {{ .Release.Name }}
-spec:
- input:
- label:
- app: {{ include "nginx-logging-es-demo.name" . }}
-
- output:
- - type: "elasticsearch"
- name: {{ include "nginx-logging-es-demo.name" . }}
- parameters:
- - name: host
- value: "elasticsearch-elasticsearch-cluster"
- - name: port
- value: "9200"
- - name: scheme
- value: "https"
- - name: sslVerify
- value: "false"
- - name: sslVersion
- value: "TLSv1_2"
-
- filter:
- - type: parser
- name: parser-nginx
- parameters:
- - name: format
- value: '/^(?[^ ]*) (?[^ ]*) (?[^ ]*) \[(?[^\]]*)\] "(?\S+)(?: +(?[^\"]*?)(?: +\S*)?)?" (?[^ ]*) (?[^ ]*)(?: "(?[^\"]*)" "(?[^\"]*)"(?:\s+(?[^ ]+))?)?$/'
- - name: timeFormat
- value: "%d/%b/%Y:%H:%M:%S %z"
\ No newline at end of file
diff --git a/charts/nginx-logging-es-demo/templates/service.yaml b/charts/nginx-logging-es-demo/templates/service.yaml
deleted file mode 100644
index e6ffc3854..000000000
--- a/charts/nginx-logging-es-demo/templates/service.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:
- name: {{ include "nginx-logging-es-demo.fullname" . }}
- labels:
- app: {{ include "nginx-logging-es-demo.name" . }}
- chart: {{ include "nginx-logging-es-demo.chart" . }}
- heritage: {{ .Release.Service }}
- release: {{ .Release.Name }}
-spec:
- type: {{ .Values.service.type }}
- ports:
- - port: {{ .Values.service.port }}
- targetPort: http
- protocol: TCP
- name: http
- selector:
- app: {{ include "nginx-logging-es-demo.name" . }}
- release: {{ .Release.Name }}
diff --git a/charts/nginx-logging-es-demo/templates/tests/test-connection.yaml b/charts/nginx-logging-es-demo/templates/tests/test-connection.yaml
deleted file mode 100644
index ea3bb4ca6..000000000
--- a/charts/nginx-logging-es-demo/templates/tests/test-connection.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
-apiVersion: v1
-kind: Pod
-metadata:
- name: "{{ include "nginx-logging-es-demo.fullname" . }}-test-connection"
- labels:
- app: {{ include "nginx-logging-es-demo.name" . }}
- chart: {{ include "nginx-logging-es-demo.chart" . }}
- heritage: {{ .Release.Service }}
- release: {{ .Release.Name }}
- annotations:
- "helm.sh/hook": test-success
-spec:
- containers:
- - name: wget
- image: busybox
- command: ['wget']
- args: ['{{ include "nginx-logging-es-demo.fullname" . }}:{{ .Values.service.port }}']
- restartPolicy: Never
diff --git a/charts/nginx-logging-es-demo/values.yaml b/charts/nginx-logging-es-demo/values.yaml
deleted file mode 100644
index a171a692f..000000000
--- a/charts/nginx-logging-es-demo/values.yaml
+++ /dev/null
@@ -1,49 +0,0 @@
-# Default values for nginx-logging-es-demo.
-# This is a YAML-formatted file.
-# Declare variables to be passed into your templates.
-
-replicaCount: 1
-
-image:
- repository: nginx
- tag: stable
- pullPolicy: IfNotPresent
-
-nameOverride: ""
-fullnameOverride: ""
-
-service:
- type: ClusterIP
- port: 80
-
-ingress:
- enabled: false
- annotations: {}
- # kubernetes.io/ingress.class: nginx
- # kubernetes.io/tls-acme: "true"
- hosts:
- - host: chart-example.local
- paths: []
-
- tls: []
- # - secretName: chart-example-tls
- # hosts:
- # - chart-example.local
-
-resources: {}
- # We usually recommend not to specify default resources and to leave this as a conscious
- # choice for the user. This also increases chances charts run on environments with little
- # resources, such as Minikube. If you do want to specify resources, uncomment the following
- # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
- # limits:
- # cpu: 100m
- # memory: 128Mi
- # requests:
- # cpu: 100m
- # memory: 128Mi
-
-nodeSelector: {}
-
-tolerations: []
-
-affinity: {}
diff --git a/cmd/docgen/docgen.go b/cmd/docgen/docgen.go
deleted file mode 100644
index cec38d83e..000000000
--- a/cmd/docgen/docgen.go
+++ /dev/null
@@ -1,92 +0,0 @@
-package main
-
-import (
- "bytes"
- "fmt"
- "io/ioutil"
- "regexp"
- "text/template"
- "text/template/parse"
-
- "github.com/Masterminds/sprig"
- "github.com/banzaicloud/logging-operator/pkg/resources/plugins"
-)
-
-//TODO handle parameters
-func main() {
- pluginMap := plugins.GetAll()
- var indexPage bytes.Buffer
- indexPage.WriteString("# List of ")
- for name, plugin := range pluginMap {
- var data bytes.Buffer
- data.WriteString(fmt.Sprintf("# Plugin %s\n", name))
- t := template.New("PluginTemplate").Funcs(sprig.TxtFuncMap())
- t, err := t.Parse(plugin.Template)
- if err != nil {
- panic(err)
- }
- data.WriteString("## Variables\n")
- data.WriteString("| Variable name | Default | Applied function |\n")
- data.WriteString(fmt.Sprintf("|---|---|---|\n"))
- for _, item := range listTemplateFields(t) {
- regExp, err := regexp.Compile(`{{(?P\w*)?\s*.(?P.*)}}`)
- if err != nil {
- panic(err)
- }
- matches := regExp.FindStringSubmatch(item)
- vairableName := matches[2]
- variableFunc := matches[1]
- defaultValue, ok := plugin.DefaultValues[matches[2]]
- if !ok {
- defaultValue = "-"
- }
- data.WriteString(fmt.Sprintf("| %s | %s | %s |\n", vairableName, defaultValue, variableFunc))
-
- }
- data.WriteString("## Plugin template\n")
- data.WriteString("```" + plugin.Template + "\n```")
- err = ioutil.WriteFile("docs/plugins/"+name+".md", data.Bytes(), 0644)
- if err != nil {
- panic(err)
- }
- }
-}
-
-func listTemplateFields(t *template.Template) []string {
- return listNodeFields(t.Tree.Root, nil)
-}
-
-func listNodeFields(node parse.Node, res []string) []string {
- if node.Type() == parse.NodeAction {
- if !contains(node.String(), res) {
- res = append(res, node.String())
- }
- }
-
- if ifn, ok := node.(*parse.IfNode); ok {
- for _, n := range ifn.List.Nodes {
- res = listNodeFields(n, res)
- }
- if ifn.ElseList != nil {
- for _, n := range ifn.ElseList.Nodes {
- res = listNodeFields(n, res)
- }
- }
- }
-
- if ln, ok := node.(*parse.ListNode); ok {
- for _, n := range ln.Nodes {
- res = listNodeFields(n, res)
- }
- }
- return res
-}
-
-func contains(s string, sl []string) bool {
- for _, i := range sl {
- if i == s {
- return true
- }
- }
- return false
-}
diff --git a/cmd/docs.go b/cmd/docs.go
new file mode 100644
index 000000000..3e36b7b92
--- /dev/null
+++ b/cmd/docs.go
@@ -0,0 +1,300 @@
+/*
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/printer"
+ "go/token"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+
+ "github.com/go-logr/logr"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/log/zap"
+)
+
+var log logr.Logger
+
+type Doc struct {
+ Name string
+ Content string
+ Type string
+ RootNode *ast.File
+}
+
+func (d *Doc) Append(line string) {
+ d.Content = d.Content + line + "\n"
+}
+
+func (d *Doc) CheckNodes(n ast.Node) bool {
+ generic, ok := n.(*ast.GenDecl)
+ if ok {
+ typeName, ok := generic.Specs[0].(*ast.TypeSpec)
+ if ok {
+ _, ok := typeName.Type.(*ast.InterfaceType)
+ if ok && typeName.Name.Name == "_doc" {
+ d.Append(fmt.Sprintf("# %s", getTypeName(generic, d.Name)))
+ d.Append("## Overview")
+ d.Append(getTypeDocs(generic))
+ d.Append("## Configuration")
+ }
+ structure, ok := typeName.Type.(*ast.StructType)
+ if ok {
+ d.Append(fmt.Sprintf("### %s", getTypeName(generic, typeName.Name.Name)))
+ if getTypeDocs(generic) != "" {
+ d.Append(fmt.Sprintf("#### %s", getTypeDocs(generic)))
+ }
+ d.Append("| Variable Name | Type | Required | Default | Description |")
+ d.Append("|---|---|---|---|---|")
+ for _, item := range structure.Fields.List {
+ name, com, def, required := getValuesFromItem(item)
+ d.Append(fmt.Sprintf("| %s | %s | %s | %s | %s |", name, normaliseType(item.Type), required, def, com))
+ }
+ }
+
+ }
+ }
+
+ return true
+}
+
+func normaliseType(fieldType ast.Expr) string {
+ fset := token.NewFileSet()
+ var typeNameBuf bytes.Buffer
+ err := printer.Fprint(&typeNameBuf, fset, fieldType)
+ if err != nil {
+ log.Error(err, "error getting type")
+ }
+ return typeNameBuf.String()
+}
+
+func (d *Doc) Generate() {
+ if d.RootNode != nil {
+ ast.Inspect(d.RootNode, d.CheckNodes)
+ log.Info("DocumentRoot not present skipping parse")
+ }
+ directory := fmt.Sprintf("./%s/%s/", DocsPath, d.Type)
+ err := os.MkdirAll(directory, os.ModePerm)
+ if err != nil {
+ log.Error(err, "Md file create error %s", err.Error())
+ }
+ filepath := fmt.Sprintf("./%s/%s/%s.md", DocsPath, d.Type, d.Name)
+ f, err := os.Create(filepath)
+ if err != nil {
+ log.Error(err, "Md file create error %s", err.Error())
+ }
+ defer closeFile(f)
+
+ _, err = f.WriteString(d.Content)
+ if err != nil {
+ log.Error(err, "Md file write error %s", err.Error())
+ }
+}
+
+var PluginDirs = map[string]string{
+ "filters": "./pkg/model/filter/",
+ "outputs": "./pkg/model/output/",
+}
+
+var DocsPath = "docs/plugins"
+
+type Plugin struct {
+ Name string
+ Type string
+ SourcePath string
+ DocumentationPath string
+}
+
+type Plugins []Plugin
+
+var ignoredPluginsList = []string{
+ "null",
+ ".*.deepcopy",
+}
+
+func main() {
+ verboseLogging := true
+ ctrl.SetLogger(zap.Logger(verboseLogging))
+ log = ctrl.Log.WithName("docs").WithName("main")
+ //log.Info("Plugin Directories:", "packageDir", packageDir)
+
+ fileList, err := GetPlugins(PluginDirs)
+ if err != nil {
+ log.Error(err, "Directory check error.")
+ }
+ for _, file := range fileList {
+ log.Info("Plugin", "Name", file.SourcePath)
+ document := GetDocumentParser(file)
+ document.Generate()
+ }
+
+ index := Doc{
+ Name: "index",
+ }
+ index.Append("## Table of Contents\n\n")
+ for pluginType := range PluginDirs {
+ index.Append(fmt.Sprintf("### %s\n", pluginType))
+ for _, plugin := range fileList {
+ if plugin.Type == pluginType {
+ index.Append(fmt.Sprintf("- [%s](%s)", plugin.Name, plugin.DocumentationPath))
+ }
+ }
+ index.Append("\n")
+ }
+
+ index.Generate()
+
+}
+
+func getPrefixedLine(origin, expression string) string {
+ r := regexp.MustCompile(expression)
+ result := r.FindStringSubmatch(origin)
+ if len(result) > 1 {
+ return fmt.Sprintf("%s", result[1])
+ }
+ return ""
+}
+
+func getTypeName(generic *ast.GenDecl, defaultName string) string {
+ structName := generic.Doc.Text()
+ result := getPrefixedLine(structName, `\+docName:\"(.*)\"`)
+ if result != "" {
+ return result
+ }
+ return defaultName
+}
+
+func getTypeDocs(generic *ast.GenDecl) string {
+ comment := ""
+ if generic.Doc != nil {
+ for _, line := range generic.Doc.List {
+ newLine := strings.TrimPrefix(line.Text, "//")
+ newLine = strings.TrimSpace(newLine)
+ if !strings.HasPrefix(newLine, "+kubebuilder") &&
+ !strings.HasPrefix(newLine, "+docName") {
+ comment += newLine + "\n"
+ }
+ }
+ }
+ return comment
+}
+
+func getLink(def string) string {
+ result := getPrefixedLine(def, `\+docLink:\"(.*)\"`)
+ if result != "" {
+ url := strings.Split(result, ",")
+ def = strings.Replace(def, fmt.Sprintf("+docLink:\"%s\"", result), fmt.Sprintf("[%s](%s)", url[0], url[1]), 1)
+ }
+ return def
+}
+
+func formatRequired(r bool) string {
+ if r {
+ return "Yes"
+ }
+ return "No"
+}
+
+func getValuesFromItem(item *ast.Field) (name, comment, def, required string) {
+ commentWithDefault := ""
+ if item.Doc != nil {
+ for _, line := range item.Doc.List {
+ newLine := strings.TrimPrefix(line.Text, "//")
+ newLine = strings.TrimSpace(newLine)
+ if !strings.HasPrefix(newLine, "+kubebuilder") {
+ commentWithDefault += newLine + " "
+ }
+ }
+ }
+ tag := item.Tag.Value
+ tagResult := getPrefixedLine(tag, `plugin:\"default:(.*)\"`)
+ nameResult := getPrefixedLine(tag, `json:\"([^,\"]*).*\"`)
+ required = formatRequired(!strings.Contains(getPrefixedLine(tag, `json:\"(.*)\"`), "omitempty"))
+ if tagResult != "" {
+ return nameResult, getLink(commentWithDefault), tagResult, required
+ } else {
+ result := getPrefixedLine(commentWithDefault, `\(default:(.*)\)`)
+ if result != "" {
+ ignore := fmt.Sprintf("(default:%s)", result)
+ comment = strings.Replace(commentWithDefault, ignore, "", 1)
+ return nameResult, comment, getLink(result), required
+ }
+
+ return nameResult, getLink(commentWithDefault), "-", required
+ }
+}
+
+func GetDocumentParser(file Plugin) *Doc {
+ fileSet := token.NewFileSet()
+ node, err := parser.ParseFile(fileSet, file.SourcePath, nil, parser.ParseComments)
+ if err != nil {
+ log.Error(err, "Error!")
+ }
+ newDoc := &Doc{
+ Name: file.Name,
+ RootNode: node,
+ Type: file.Type,
+ }
+ return newDoc
+}
+
+func GetPlugins(PluginDirs map[string]string) (Plugins, error) {
+ var PluginList Plugins
+ for pluginType, path := range PluginDirs {
+ files, err := ioutil.ReadDir(path)
+ if err != nil {
+ log.Error(err, err.Error())
+ return nil, err
+ }
+
+ for _, file := range files {
+ log.V(2).Info("fileListGenerator", "filename", "file")
+ fname := strings.Replace(file.Name(), ".go", "", 1)
+ if filepath.Ext(file.Name()) == ".go" && getPluginWhiteList(fname) {
+ fullPath := path + file.Name()
+ filepath := fmt.Sprintf("./%s/%s.md", pluginType, fname)
+ PluginList = append(PluginList, Plugin{Name: fname, SourcePath: fullPath, DocumentationPath: filepath, Type: pluginType})
+ }
+ }
+ }
+
+ return PluginList, nil
+}
+
+func closeFile(f *os.File) {
+ err := f.Close()
+ if err != nil {
+ log.Error(err, "File Close Error: %s", err.Error())
+ }
+}
+
+func getPluginWhiteList(pluginName string) bool {
+ for _, p := range ignoredPluginsList {
+ r := regexp.MustCompile(p)
+ if r.MatchString(pluginName) {
+ log.Info("fileListGenerator", "ignored plugin", pluginName)
+ return false
+ }
+ }
+ return true
+}
diff --git a/cmd/manager/main.go b/cmd/manager/main.go
deleted file mode 100644
index 6c8b93829..000000000
--- a/cmd/manager/main.go
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Copyright © 2019 Banzai Cloud
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
- "context"
- "flag"
- "fmt"
- "os"
- "runtime"
-
- "github.com/banzaicloud/logging-operator/pkg/apis"
- "github.com/banzaicloud/logging-operator/pkg/controller"
-
- "github.com/operator-framework/operator-sdk/pkg/k8sutil"
- "github.com/operator-framework/operator-sdk/pkg/leader"
- "github.com/operator-framework/operator-sdk/pkg/log/zap"
- "github.com/operator-framework/operator-sdk/pkg/metrics"
- sdkVersion "github.com/operator-framework/operator-sdk/version"
- "github.com/spf13/pflag"
- _ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
- "sigs.k8s.io/controller-runtime/pkg/client/config"
- "sigs.k8s.io/controller-runtime/pkg/manager"
- logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
- "sigs.k8s.io/controller-runtime/pkg/runtime/signals"
-)
-
-// Change below variables to serve metrics on different host or port.
-var (
- metricsHost = "0.0.0.0"
- metricsPort int32 = 8383
-)
-var log = logf.Log.WithName("cmd")
-
-func printVersion() {
- log.Info(fmt.Sprintf("Go Version: %s", runtime.Version()))
- log.Info(fmt.Sprintf("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH))
- log.Info(fmt.Sprintf("Version of operator-sdk: %v", sdkVersion.Version))
-}
-
-func main() {
- // Add the zap logger flag set to the CLI. The flag set must
- // be added before calling pflag.Parse().
- pflag.CommandLine.AddFlagSet(zap.FlagSet())
-
- // Add flags registered by imported packages (e.g. glog and
- // controller-runtime)
- pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
-
- pflag.Parse()
-
- // Use a zap logr.Logger implementation. If none of the zap
- // flags are configured (or if the zap flag set is not being
- // used), this defaults to a production zap logger.
- //
- // The logger instantiated here can be changed to any logger
- // implementing the logr.Logger interface. This logger will
- // be propagated through the whole operator, generating
- // uniform and structured logs.
- logf.SetLogger(zap.Logger())
-
- printVersion()
-
- namespace, err := k8sutil.GetWatchNamespace()
- if err != nil {
- log.Error(err, "Failed to get watch namespace")
- os.Exit(1)
- }
-
- // Get a config to talk to the apiserver
- cfg, err := config.GetConfig()
- if err != nil {
- log.Error(err, "")
- os.Exit(1)
- }
-
- ctx := context.TODO()
-
- // Become the leader before proceeding
- err = leader.Become(ctx, "logging-operator-lock")
- if err != nil {
- log.Error(err, "")
- os.Exit(1)
- }
-
- // Create a new Cmd to provide shared dependencies and start components
- mgr, err := manager.New(cfg, manager.Options{
- Namespace: namespace,
- MetricsBindAddress: fmt.Sprintf("%s:%d", metricsHost, metricsPort),
- })
- if err != nil {
- log.Error(err, "")
- os.Exit(1)
- }
-
- log.Info("Registering Components.")
-
- // Setup Scheme for all resources
- if err := apis.AddToScheme(mgr.GetScheme()); err != nil {
- log.Error(err, "")
- os.Exit(1)
- }
-
- // Setup all Controllers
- if err := controller.AddToManager(mgr); err != nil {
- log.Error(err, "")
- os.Exit(1)
- }
-
- // Create Service object to expose the metrics port.
- _, err = metrics.ExposeMetricsPort(ctx, metricsPort)
- if err != nil {
- log.Info(err.Error())
- }
-
- log.Info("Starting the Cmd.")
-
- // Start the Cmd
- if err := mgr.Start(signals.SetupSignalHandler()); err != nil {
- log.Error(err, "Manager exited non-zero")
- os.Exit(1)
- }
-}
diff --git a/config/certmanager/certificate.yaml b/config/certmanager/certificate.yaml
new file mode 100644
index 000000000..9d6bad1e1
--- /dev/null
+++ b/config/certmanager/certificate.yaml
@@ -0,0 +1,24 @@
+# The following manifests contain a self-signed issuer CR and a certificate CR.
+# More document can be found at https://docs.cert-manager.io
+apiVersion: certmanager.k8s.io/v1alpha1
+kind: Issuer
+metadata:
+ name: selfsigned-issuer
+ namespace: system
+spec:
+ selfSigned: {}
+---
+apiVersion: certmanager.k8s.io/v1alpha1
+kind: Certificate
+metadata:
+ name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml
+ namespace: system
+spec:
+ # $(SERVICENAME) and $(NAMESPACE) will be substituted by kustomize
+ commonName: $(SERVICENAME).$(NAMESPACE).svc
+ dnsNames:
+ - $(SERVICENAME).$(NAMESPACE).svc.cluster.local
+ issuerRef:
+ kind: Issuer
+ name: selfsigned-issuer
+ secretName: webhook-server-cert # this secret will not be prefixed, since it's not managed by kustomize
diff --git a/config/certmanager/kustomization.yaml b/config/certmanager/kustomization.yaml
new file mode 100644
index 000000000..8181bc3a2
--- /dev/null
+++ b/config/certmanager/kustomization.yaml
@@ -0,0 +1,26 @@
+resources:
+- certificate.yaml
+
+# the following config is for teaching kustomize how to do var substitution
+vars:
+- name: NAMESPACE # namespace of the service and the certificate CR
+ objref:
+ kind: Service
+ version: v1
+ name: webhook-service
+ fieldref:
+ fieldpath: metadata.namespace
+- name: CERTIFICATENAME
+ objref:
+ kind: Certificate
+ group: certmanager.k8s.io
+ version: v1alpha1
+ name: serving-cert # this name should match the one in certificate.yaml
+- name: SERVICENAME
+ objref:
+ kind: Service
+ version: v1
+ name: webhook-service
+
+configurations:
+- kustomizeconfig.yaml
diff --git a/config/certmanager/kustomizeconfig.yaml b/config/certmanager/kustomizeconfig.yaml
new file mode 100644
index 000000000..49e0b1e7a
--- /dev/null
+++ b/config/certmanager/kustomizeconfig.yaml
@@ -0,0 +1,16 @@
+# This configuration is for teaching kustomize how to update name ref and var substitution
+nameReference:
+- kind: Issuer
+ group: certmanager.k8s.io
+ fieldSpecs:
+ - kind: Certificate
+ group: certmanager.k8s.io
+ path: spec/issuerRef/name
+
+varReference:
+- kind: Certificate
+ group: certmanager.k8s.io
+ path: spec/commonName
+- kind: Certificate
+ group: certmanager.k8s.io
+ path: spec/dnsNames
diff --git a/config/crd/bases/logging.banzaicloud.com_clusterflows.yaml b/config/crd/bases/logging.banzaicloud.com_clusterflows.yaml
new file mode 100644
index 000000000..ac7040257
--- /dev/null
+++ b/config/crd/bases/logging.banzaicloud.com_clusterflows.yaml
@@ -0,0 +1,139 @@
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ creationTimestamp: null
+ name: clusterflows.logging.banzaicloud.com
+spec:
+ group: logging.banzaicloud.com
+ names:
+ kind: ClusterFlow
+ listKind: ClusterFlowList
+ plural: clusterflows
+ singular: clusterflow
+ scope: ""
+ validation:
+ openAPIV3Schema:
+ description: ClusterFlow is the Schema for the clusterflows API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: Name of the logging cluster to be attached
+ properties:
+ filters:
+ items:
+ properties:
+ parser:
+ description: https://docs.fluentd.org/filter/parser
+ properties:
+ emit_invalid_record_to_error:
+ description: 'Emit invalid record to @ERROR label. Invalid
+ cases are: key not exist, format is not matched, unexpected
+ error'
+ type: boolean
+ hash_value_fiel:
+ description: Store parsed values as a hash value in a field.
+ type: string
+ inject_key_prefix:
+ description: Store parsed values with specified key name prefix.
+ type: string
+ key_name:
+ description: Specify field name in the record to parse.
+ type: string
+ parsers:
+ items:
+ properties:
+ estimate_current_event:
+ description: If true, use Fluent::EventTime.now(current
+ time) as a timestamp when time_key is specified.
+ type: boolean
+ expression:
+ description: Regexp expression to evaluate
+ type: string
+ keep_time_key:
+ description: If true, keep time field in the record.
+ type: boolean
+ null_empty_string:
+ description: If true, empty string field is replaced
+ with nil
+ type: boolean
+ null_value_pattern:
+ description: ' Specify null value pattern.'
+ type: string
+ time_key:
+ description: Specify time field for event time. If the
+ event doesn't have this field, current time is used.
+ type: string
+ type:
+ description: 'Parse type: apache2, apache_error, nginx,
+ syslog, csv, tsv, ltsv, json, multiline, none'
+ type: string
+ type: object
+ type: array
+ remove_key_name_field:
+ description: Remove key_name field when parsing is succeeded
+ type: boolean
+ replace_invalid_sequence:
+ description: If true, invalid string is replaced with safe
+ characters and re-parse it.
+ type: boolean
+ reserve_data:
+ description: Keep original key-value pair in parsed result.
+ type: boolean
+ reserve_time:
+ description: Keep original event time in parsed result.
+ type: boolean
+ required:
+ - key_name
+ type: object
+ stdout:
+ type: object
+ tag_normaliser:
+ properties:
+ format:
+ description: Re-Tag log messages info at [github](https://github.com/banzaicloud/fluent-plugin-tag-normaliser)
+ type: string
+ type: object
+ type: object
+ type: array
+ loggingRef:
+ type: string
+ outputRefs:
+ items:
+ type: string
+ type: array
+ selectors:
+ additionalProperties:
+ type: string
+ type: object
+ required:
+ - outputRefs
+ - selectors
+ type: object
+ status:
+ description: FlowStatus defines the observed state of Flow
+ type: object
+ type: object
+ version: v1alpha2
+ versions:
+ - name: v1alpha2
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
diff --git a/config/crd/bases/logging.banzaicloud.com_clusteroutputs.yaml b/config/crd/bases/logging.banzaicloud.com_clusteroutputs.yaml
new file mode 100644
index 000000000..15001eed6
--- /dev/null
+++ b/config/crd/bases/logging.banzaicloud.com_clusteroutputs.yaml
@@ -0,0 +1,1881 @@
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ creationTimestamp: null
+ name: clusteroutputs.logging.banzaicloud.com
+spec:
+ group: logging.banzaicloud.com
+ names:
+ kind: ClusterOutput
+ listKind: ClusterOutputList
+ plural: clusteroutputs
+ singular: clusteroutput
+ scope: ""
+ validation:
+ openAPIV3Schema:
+ description: ClusterOutput is the Schema for the clusteroutputs API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ properties:
+ azurestorage:
+ properties:
+ auto_create_container:
+ description: 'Automatically create container if not exists(default:
+ true)'
+ type: boolean
+ azure_container:
+ description: Your azure storage container
+ type: string
+ azure_object_key_format:
+ description: 'Object key format (default: %{path}%{time_slice}_%{index}.%{file_extension})'
+ type: string
+ azure_storage_access_key:
+ description: Your azure storage access key
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ azure_storage_account:
+ description: Your azure storage account
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ azure_storage_type:
+ description: 'Azure storage type currently only "blob" supported
+ (default: blob)'
+ type: string
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ format:
+ description: 'Compat format type: out_file, json, ltsv (default:
+ out_file)'
+ type: string
+ path:
+ description: Path prefix of the files on Azure
+ type: string
+ store_as:
+ description: 'Store as: gzip, json, text, lzo, lzma2 (default: gzip)'
+ type: string
+ required:
+ - azure_container
+ - azure_storage_access_key
+ - azure_storage_account
+ type: object
+ elasticsearch:
+ description: Send your logs to Elasticsearch
+ properties:
+ application_name:
+ description: 'Specify the application name for the rollover index
+ to be created.(default: default)'
+ type: string
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ content_type:
+ description: 'With content_type application/x-ndjson, elasticsearch
+ plugin adds application/x-ndjson as Content-Type in payload. (default:
+ application/json)'
+ type: string
+ customize_template:
+ description: Specify the string and its value to be replaced in
+ form of hash. Can contain multiple key value pair that would be
+ replaced in the specified template_file. This setting only creates
+ template and to add rollover index please check the rollover_index
+ configuration.
+ type: string
+ deflector_alias:
+ description: Specify the deflector alias which would be assigned
+ to the rollover index created. This is useful in case of using
+ the Elasticsearch rollover API
+ type: string
+ fail_on_putting_template_retry_exceed:
+ description: 'Indicates whether to fail when max_retry_putting_template
+ is exceeded. If you have multiple output plugin, you could use
+ this property to do not fail on fluentd statup.(default: true)'
+ type: boolean
+ host:
+ description: You can specify Elasticsearch host by this parameter.
+ (default:localhost)
+ type: string
+ hosts:
+ description: You can specify multiple Elasticsearch hosts with separator
+ ",". If you specify hosts option, host and port options are ignored.
+ type: string
+ http_backend:
+ description: 'With http_backend typhoeus, elasticsearch plugin uses
+ typhoeus faraday http backend. Typhoeus can handle HTTP keepalive.
+ (default: excon)'
+ type: string
+ id_key:
+ description: https://github.com/uken/fluent-plugin-elasticsearch#id_key
+ type: string
+ include_index_in_url:
+ description: With this option set to true, Fluentd manifests the
+ index name in the request URL (rather than in the request body).
+ You can use this option to enforce an URL-based access control.
+ type: boolean
+ include_tag_key:
+ description: 'This will add the Fluentd tag in the JSON record.(default:
+ false)'
+ type: boolean
+ include_timestamp:
+ description: Adds a @timestamp field to the log, following all settings
+ logstash_format does, except without the restrictions on index_name.
+ This allows one to log to an alias in Elasticsearch and utilize
+ the rollover API.
+ type: boolean
+ index_date_pattern:
+ description: 'Specify this to override the index date pattern for
+ creating a rollover index.(default: now/d)'
+ type: string
+ index_prefix:
+ description: Specify the index prefix for the rollover index to
+ be created.
+ type: string
+ logstash_dateformat:
+ description: 'Set the Logstash date format.(default: %Y.%m.%d)'
+ type: string
+ logstash_format:
+ description: 'Enable Logstash log format.(default: false)'
+ type: boolean
+ logstash_prefix:
+ description: 'Set the Logstash prefix.(default: true)'
+ type: string
+ logstash_prefix_separator:
+ description: 'Set the Logstash prefix separator.(default: -)'
+ type: string
+ max_retry_get_es_version:
+ description: 'You can specify times of retry obtaining Elasticsearch
+ version.(default: 15)'
+ type: string
+ max_retry_putting_template:
+ description: 'You can specify times of retry putting template.(default:
+ 10)'
+ type: string
+ password:
+ description: Password for HTTP Basic authentication.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ path:
+ description: Path for HTTP Basic authentication.
+ type: string
+ pipeline:
+ description: This param is to set a pipeline id of your elasticsearch
+ to be added into the request, you can configure ingest node.
+ type: string
+ port:
+ description: 'You can specify Elasticsearch port by this parameter.(default:
+ 9200)'
+ type: string
+ prefer_oj_serializer:
+ description: 'With default behavior, Elasticsearch client uses Yajl
+ as JSON encoder/decoder. Oj is the alternative high performance
+ JSON encoder/decoder. When this parameter sets as true, Elasticsearch
+ client uses Oj as JSON encoder/decoder. (default: fqlse)'
+ type: boolean
+ reconnect_on_error:
+ description: 'Indicates that the plugin should reset connection
+ on any error (reconnect on next send). By default it will reconnect
+ only on "host unreachable exceptions". We recommended to set this
+ true in the presence of elasticsearch shield.(default: false)'
+ type: boolean
+ reload_connections:
+ description: 'You can tune how the elasticsearch-transport host
+ reloading feature works.(default: true)'
+ type: boolean
+ reload_on_failure:
+ description: 'Indicates that the elasticsearch-transport will try
+ to reload the nodes addresses if there is a failure while making
+ the request, this can be useful to quickly remove a dead node
+ from the list of addresses.(default: false)'
+ type: boolean
+ remove_keys_on_update:
+ description: Remove keys on update will not update the configured
+ keys in elasticsearch when a record is being updated. This setting
+ only has any effect if the write operation is update or upsert.
+ type: string
+ remove_keys_on_update_key:
+ description: This setting allows remove_keys_on_update to be configured
+ with a key in each record, in much the same way as target_index_key
+ works.
+ type: string
+ request_timeout:
+ description: 'You can specify HTTP request timeout.(default: 5s)'
+ type: string
+ resurrect_after:
+ description: 'You can set in the elasticsearch-transport how often
+ dead connections from the elasticsearch-transport''s pool will
+ be resurrected.(default: 60s)'
+ type: string
+ retry_tag:
+ description: This setting allows custom routing of messages in response
+ to bulk request failures. The default behavior is to emit failed
+ records using the same tag that was provided.
+ type: string
+ rollover_index:
+ description: 'Specify this as true when an index with rollover capability
+ needs to be created.(default: false) https://github.com/uken/fluent-plugin-elasticsearch#rollover_index'
+ type: boolean
+ routing_key:
+ description: Similar to parent_key config, will add _routing into
+ elasticsearch command if routing_key is set and the field does
+ exist in input event.
+ type: string
+ scheme:
+ description: 'Scheme for HTTP Basic authentication.(default: true)'
+ type: boolean
+ tag_key:
+ description: 'This will add the Fluentd tag in the JSON record.(default:
+ tag)'
+ type: string
+ target_index_key:
+ description: Tell this plugin to find the index name to write to
+ in the record under this key in preference to other mechanisms.
+ Key can be specified as path to nested record using dot ('.')
+ as a separator. https://github.com/uken/fluent-plugin-elasticsearch#target_index_key
+ type: string
+ target_type_key:
+ description: 'Similar to target_index_key config, find the type
+ name to write to in the record under this key (or nested record).
+ If key not found in record - fallback to type_name.(default: true)'
+ type: string
+ template_file:
+ description: The path to the file containing the template to install.
+ type: string
+ template_name:
+ description: The name of the template to define. If a template by
+ the name given is already present, it will be left unchanged,
+ unless template_overwrite is set, in which case the template will
+ be updated.
+ type: string
+ template_overwrite:
+ description: 'Always update the template, even if it already exists.(default:
+ false)'
+ type: boolean
+ templates:
+ description: Specify index templates in form of hash. Can contain
+ multiple templates.
+ type: string
+ time_key:
+ description: By default, when inserting records in Logstash format,
+ @timestamp is dynamically created with the time at log ingestion.
+ If you'd like to use a custom time, include an @timestamp with
+ your record.
+ type: string
+ time_key_format:
+ description: The format of the time stamp field (@timestamp or what
+ you specify with time_key). This parameter only has an effect
+ when logstash_format is true as it only affects the name of the
+ index we write to.
+ type: string
+ time_parse_error_tag:
+ description: With logstash_format true, elasticsearch plugin parses
+ timestamp field for generating index name. If the record has invalid
+ timestamp value, this plugin emits an error event to @ERROR label
+ with time_parse_error_tag configured tag.
+ type: string
+ time_precision:
+ description: Should the record not include a time_key, define the
+ degree of sub-second time precision to preserve from the time
+ portion of the routed event.
+ type: string
+ user:
+ description: User for HTTP Basic authentication. This plugin will
+ escape required URL encoded characters within %{} placeholders.
+ e.g. %{demo+}
+ type: string
+ utc_index:
+ description: 'By default, the records inserted into index logstash-YYMMDD
+ with UTC (Coordinated Universal Time). This option allows to use
+ local time if you describe utc_index to false.(default: true)'
+ type: boolean
+ with_transporter_log:
+ description: 'This is debugging purpose option to enable to obtain
+ transporter layer log. (default: false)'
+ type: boolean
+ write_operation:
+ description: 'The write_operation can be any of: (index,create,update,upsert)(default:
+ index)'
+ type: string
+ type: object
+ enabledNamespaces:
+ items:
+ type: string
+ type: array
+ gcs:
+ properties:
+ acl:
+ description: 'Permission for the object in GCS: auth_read owner_full
+ owner_read private project_private public_read'
+ type: string
+ auto_create_bucket:
+ description: 'Create GCS bucket if it does not exists (default:
+ true)'
+ type: boolean
+ bucket:
+ description: Name of a GCS bucket
+ type: string
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ client_retries:
+ description: Number of times to retry requests on server error
+ type: integer
+ client_timeout:
+ description: Default timeout to use in requests
+ type: integer
+ credentials_json:
+ description: GCS service account credentials in JSON format
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ encryption_key:
+ description: Customer-supplied, AES-256 encryption key
+ type: string
+ format:
+ properties:
+ type:
+ description: 'Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value
+ (default: json)'
+ enum:
+ - out_file
+ - json
+ - ltsv
+ - csv
+ - msgpack
+ - hash
+ - single_value
+ type: string
+ type: object
+ hex_random_length:
+ description: 'Max length of `%{hex_random}` placeholder(4-16) (default:
+ 4)'
+ type: integer
+ keyfile:
+ description: Path of GCS service account credentials JSON file
+ type: string
+ object_key_format:
+ description: 'Format of GCS object keys (default: %{path}%{time_slice}_%{index}.%{file_extension})'
+ type: string
+ object_metadata:
+ description: User provided web-safe keys and arbitrary string values
+ that will returned with requests for the file as "x-goog-meta-"
+ response headers.
+ items:
+ properties:
+ key:
+ description: Key
+ type: string
+ value:
+ description: Value
+ type: string
+ required:
+ - key
+ - value
+ type: object
+ type: array
+ overwrite:
+ description: 'Overwrite already existing path (default: false)'
+ type: boolean
+ path:
+ description: Path prefix of the files on GCS
+ type: string
+ project:
+ description: Project identifier for GCS
+ type: string
+ storage_class:
+ description: 'Storage class of the file: dra nearline coldline multi_regional
+ regional standard'
+ type: string
+ store_as:
+ description: 'Archive format on GCS: gzip json text (default: gzip)'
+ type: string
+ transcoding:
+ description: Enable the decompressive form of transcoding
+ type: boolean
+ required:
+ - bucket
+ - project
+ type: object
+ loggingRef:
+ type: string
+ loki:
+ description: Fluentd output plugin to ship logs to a Loki server.
+ properties:
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ extra_labels:
+ description: 'Set of labels to include with every Loki stream.(default:
+ nil)'
+ type: boolean
+ password:
+ description: Specify password if the Loki server requires authentication.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ tenant:
+ description: Loki is a multi-tenant log storage platform and all
+ requests sent must include a tenant.
+ type: string
+ url:
+ description: The url of the Loki server to send logs to. (default:https://logs-us-west1.grafana.net)
+ type: string
+ username:
+ description: Specify a username if the Loki server requires authentication.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ type: object
+ nullout:
+ type: object
+ oss:
+ properties:
+ aaccess_key_secret:
+ description: Your access secret key
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ access_key_id:
+ description: Your access key id
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ auto_create_bucket:
+ description: 'desc ''Create OSS bucket if it does not exists (default:
+ false)'
+ type: boolean
+ bucket:
+ description: Your bucket name
+ type: string
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ check_bucket:
+ description: 'Check bucket if exists or not (default: true)'
+ type: boolean
+ check_object:
+ description: 'Check object before creation (default: true)'
+ type: boolean
+ download_crc_enable:
+ description: 'Download crc enabled (default: true)'
+ type: boolean
+ endpoint:
+ description: OSS endpoint to connect to'
+ type: string
+ format:
+ properties:
+ type:
+ description: 'Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value
+ (default: json)'
+ enum:
+ - out_file
+ - json
+ - ltsv
+ - csv
+ - msgpack
+ - hash
+ - single_value
+ type: string
+ type: object
+ hex_random_length:
+ description: 'The length of `%{hex_random}` placeholder(4-16) (default:
+ 4)'
+ type: integer
+ index_format:
+ description: '`sprintf` format for `%{index}` (default: %d)'
+ type: string
+ key_format:
+ description: 'The format of OSS object keys (default: %{path}/%{time_slice}_%{index}_%{thread_id}.%{file_extension})'
+ type: string
+ open_timeout:
+ description: 'Timeout for open connections (default: 10)'
+ type: integer
+ oss_sdk_log_dir:
+ description: 'OSS SDK log directory (default: /var/log/td-agent)'
+ type: string
+ overwrite:
+ description: 'Overwrite already existing path (default: false)'
+ type: boolean
+ path:
+ description: 'Path prefix of the files on OSS (default: fluent/logs)'
+ type: string
+ read_timeout:
+ description: 'Timeout for read response (default: 120)'
+ type: integer
+ store_as:
+ description: 'Archive format on OSS: gzip, json, text, lzo, lzma2
+ (default: gzip)'
+ type: string
+ upload_crc_enable:
+ description: 'Upload crc enabled (default: true)'
+ type: boolean
+ warn_for_delay:
+ description: Given a threshold to treat events as delay, output
+ warning logs if delayed events were put into OSS
+ type: string
+ required:
+ - aaccess_key_secret
+ - access_key_id
+ - bucket
+ - endpoint
+ type: object
+ s3:
+ properties:
+ acl:
+ description: Permission for the object in S3
+ type: string
+ assume_role_credentials:
+ description: assume_role_credentials
+ properties:
+ duration_seconds:
+ description: The duration, in seconds, of the role session (900-3600)
+ type: string
+ external_id:
+ description: A unique identifier that is used by third parties
+ when assuming roles in their customers' accounts.
+ type: string
+ policy:
+ description: An IAM policy in JSON format
+ type: string
+ role_arn:
+ description: The Amazon Resource Name (ARN) of the role to assume
+ type: string
+ role_session_name:
+ description: An identifier for the assumed role session
+ type: string
+ required:
+ - role_arn
+ - role_session_name
+ type: object
+ auto_create_bucket:
+ description: Create S3 bucket if it does not exists
+ type: string
+ aws_iam_retries:
+ description: The number of attempts to load instance profile credentials
+ from the EC2 metadata service using IAM role
+ type: string
+ aws_key_id:
+ description: AWS access key id
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ aws_sec_key:
+ description: AWS secret key.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ check_apikey_on_start:
+ description: Check AWS key on start
+ type: string
+ check_bucket:
+ description: Check bucket if exists or not
+ type: string
+ check_object:
+ description: Check object before creation
+ type: string
+ compute_checksums:
+ description: AWS SDK uses MD5 for API request/response by default
+ type: string
+ enable_transfer_acceleration:
+ description: 'If true, S3 Transfer Acceleration will be enabled
+ for uploads. IMPORTANT: You must first enable this feature on
+ your destination S3 bucket'
+ type: string
+ force_path_style:
+ description: If true, the bucket name is always left in the request
+ URI and never moved to the host as a sub-domain
+ type: string
+ format:
+ properties:
+ type:
+ description: 'Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value
+ (default: json)'
+ enum:
+ - out_file
+ - json
+ - ltsv
+ - csv
+ - msgpack
+ - hash
+ - single_value
+ type: string
+ type: object
+ grant_full_control:
+ description: Allows grantee READ, READ_ACP, and WRITE_ACP permissions
+ on the object
+ type: string
+ grant_read:
+ description: Allows grantee to read the object data and its metadata
+ type: string
+ grant_read_acp:
+ description: Allows grantee to read the object ACL
+ type: string
+ grant_write_acp:
+ description: Allows grantee to write the ACL for the applicable
+ object
+ type: string
+ hex_random_length:
+ description: The length of `%{hex_random}` placeholder(4-16)
+ type: string
+ index_format:
+ description: '`sprintf` format for `%{index}`'
+ type: string
+ instance_profile_credentials:
+ description: instance_profile_credentials
+ properties:
+ http_open_timeout:
+ description: Number of seconds to wait for the connection to
+ open
+ type: string
+ http_read_timeout:
+ description: Number of seconds to wait for one block to be read
+ type: string
+ ip_address:
+ description: IP address (default:169.254.169.254)
+ type: string
+ port:
+ description: Port number (default:80)
+ type: string
+ retries:
+ description: Number of times to retry when retrieving credentials
+ type: string
+ type: object
+ overwrite:
+ description: Overwrite already existing path
+ type: string
+ path:
+ description: Path prefix of the files on S3
+ type: string
+ proxy_uri:
+ description: URI of proxy environment
+ type: string
+ s3_bucket:
+ description: S3 bucket name
+ type: string
+ s3_endpoint:
+ description: Custom S3 endpoint (like minio)
+ type: string
+ s3_metadata:
+ description: Arbitrary S3 metadata headers to set for the object
+ type: string
+ s3_object_key_format:
+ description: 'The format of S3 object keys (default: %{path}%{time_slice}_%{index}.%{file_extension})'
+ type: string
+ s3_region:
+ description: S3 region name
+ type: string
+ shared_credentials:
+ description: shared_credentials
+ properties:
+ path:
+ description: 'Path to the shared file. (default: $HOME/.aws/credentials)'
+ type: string
+ profile_name:
+ description: Profile name. Default to 'default' or ENV['AWS_PROFILE']
+ type: string
+ type: object
+ signature_version:
+ description: Signature version for API Request (s3,v4)
+ type: string
+ sse_customer_algorithm:
+ description: Specifies the algorithm to use to when encrypting the
+ object
+ type: string
+ sse_customer_key:
+ description: Specifies the customer-provided encryption key for
+ Amazon S3 to use in encrypting data
+ type: string
+ sse_customer_key_md5:
+ description: Specifies the 128-bit MD5 digest of the encryption
+ key according to RFC 1321
+ type: string
+ ssekms_key_id:
+ description: Specifies the AWS KMS key ID to use for object encryption
+ type: string
+ ssl_verify_peer:
+ description: If false, the certificate of endpoint will not be verified
+ type: string
+ storage_class:
+ description: The type of storage to use for the object(STANDARD,REDUCED_REDUNDANCY,STANDARD_IA)
+ type: string
+ store_as:
+ description: Archive format on S3
+ type: string
+ use_bundled_cert:
+ description: Use aws-sdk-ruby bundled cert
+ type: string
+ use_server_side_encryption:
+ description: The Server-side encryption algorithm used when storing
+ this object in S3 (AES256, aws:kms)
+ type: string
+ warn_for_delay:
+ description: Given a threshold to treat events as delay, output
+ warning logs if delayed events were put into s3
+ type: string
+ required:
+ - s3_bucket
+ type: object
+ sumologic:
+ properties:
+ add_timestamp:
+ description: 'Add timestamp (or timestamp_key) field to logs before
+ sending to sumologic (default: true)'
+ type: boolean
+ data_type:
+ description: 'The type of data that will be sent to Sumo Logic,
+ either logs or metrics (default: logs)'
+ type: string
+ disable_cookies:
+ description: 'Option to disable cookies on the HTTP Client. (default:
+ false)'
+ type: boolean
+ endpoint:
+ description: SumoLogic HTTP Collector URL
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ log_format:
+ description: 'Format to post logs into Sumo. (default: json)'
+ type: string
+ log_key:
+ description: 'Used to specify the key when merging json or sending
+ logs in text format (default: message)'
+ type: string
+ metric_data_format:
+ description: 'The format of metrics you will be sending, either
+ graphite or carbon2 or prometheus (default: graphite)'
+ type: string
+ open_timeout:
+ description: 'Set timeout seconds to wait until connection is opened.
+ (default: 60)'
+ type: integer
+ proxy_uri:
+ description: Add the uri of the proxy environment if present.
+ type: string
+ source_category:
+ description: 'Set _sourceCategory metadata field within SumoLogic
+ (default: nil)'
+ type: string
+ source_host:
+ description: 'Set _sourceHost metadata field within SumoLogic (default:
+ nil)'
+ type: string
+ source_name:
+ description: Set _sourceName metadata field within SumoLogic - overrides
+ source_name_key (default is nil)
+ type: string
+ source_name_key:
+ description: 'Set as source::path_key''s value so that the source_name
+ can be extracted from Fluentd''s buffer (default: source_name)'
+ type: string
+ timestamp_key:
+ description: 'Field name when add_timestamp is on (default: timestamp)'
+ type: string
+ verify_ssl:
+ description: 'Verify ssl certificate. (default: true)'
+ type: boolean
+ required:
+ - endpoint
+ - source_name
+ type: object
+ required:
+ - sumologic
+ type: object
+ status:
+ description: OutputStatus defines the observed state of Output
+ type: object
+ required:
+ - spec
+ type: object
+ version: v1alpha2
+ versions:
+ - name: v1alpha2
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
diff --git a/config/crd/bases/logging.banzaicloud.com_flows.yaml b/config/crd/bases/logging.banzaicloud.com_flows.yaml
new file mode 100644
index 000000000..b68f6efec
--- /dev/null
+++ b/config/crd/bases/logging.banzaicloud.com_flows.yaml
@@ -0,0 +1,142 @@
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ creationTimestamp: null
+ name: flows.logging.banzaicloud.com
+spec:
+ additionalPrinterColumns:
+ - JSONPath: .spec.loggingRef
+ name: Logging
+ type: string
+ group: logging.banzaicloud.com
+ names:
+ kind: Flow
+ listKind: FlowList
+ plural: flows
+ singular: flow
+ scope: ""
+ subresources: {}
+ validation:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ properties:
+ filters:
+ items:
+ properties:
+ parser:
+ description: https://docs.fluentd.org/filter/parser
+ properties:
+ emit_invalid_record_to_error:
+ description: 'Emit invalid record to @ERROR label. Invalid
+ cases are: key not exist, format is not matched, unexpected
+ error'
+ type: boolean
+ hash_value_fiel:
+ description: Store parsed values as a hash value in a field.
+ type: string
+ inject_key_prefix:
+ description: Store parsed values with specified key name prefix.
+ type: string
+ key_name:
+ description: Specify field name in the record to parse.
+ type: string
+ parsers:
+ items:
+ properties:
+ estimate_current_event:
+ description: If true, use Fluent::EventTime.now(current
+ time) as a timestamp when time_key is specified.
+ type: boolean
+ expression:
+ description: Regexp expression to evaluate
+ type: string
+ keep_time_key:
+ description: If true, keep time field in the record.
+ type: boolean
+ null_empty_string:
+ description: If true, empty string field is replaced
+ with nil
+ type: boolean
+ null_value_pattern:
+ description: ' Specify null value pattern.'
+ type: string
+ time_key:
+ description: Specify time field for event time. If the
+ event doesn't have this field, current time is used.
+ type: string
+ type:
+ description: 'Parse type: apache2, apache_error, nginx,
+ syslog, csv, tsv, ltsv, json, multiline, none'
+ type: string
+ type: object
+ type: array
+ remove_key_name_field:
+ description: Remove key_name field when parsing is succeeded
+ type: boolean
+ replace_invalid_sequence:
+ description: If true, invalid string is replaced with safe
+ characters and re-parse it.
+ type: boolean
+ reserve_data:
+ description: Keep original key-value pair in parsed result.
+ type: boolean
+ reserve_time:
+ description: Keep original event time in parsed result.
+ type: boolean
+ required:
+ - key_name
+ type: object
+ stdout:
+ type: object
+ tag_normaliser:
+ properties:
+ format:
+ description: Re-Tag log messages info at [github](https://github.com/banzaicloud/fluent-plugin-tag-normaliser)
+ type: string
+ type: object
+ type: object
+ type: array
+ loggingRef:
+ type: string
+ outputRefs:
+ items:
+ type: string
+ type: array
+ selectors:
+ additionalProperties:
+ type: string
+ type: object
+ required:
+ - outputRefs
+ - selectors
+ type: object
+ status:
+ description: FlowStatus defines the observed state of Flow
+ type: object
+ type: object
+ version: v1alpha2
+ versions:
+ - name: v1alpha2
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
diff --git a/config/crd/bases/logging.banzaicloud.com_loggings.yaml b/config/crd/bases/logging.banzaicloud.com_loggings.yaml
new file mode 100644
index 000000000..adcf63897
--- /dev/null
+++ b/config/crd/bases/logging.banzaicloud.com_loggings.yaml
@@ -0,0 +1,332 @@
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ creationTimestamp: null
+ name: loggings.logging.banzaicloud.com
+spec:
+ group: logging.banzaicloud.com
+ names:
+ kind: Logging
+ listKind: LoggingList
+ plural: loggings
+ singular: logging
+ scope: Cluster
+ subresources:
+ status: {}
+ validation:
+ openAPIV3Schema:
+ description: Logging is the Schema for the loggings API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: LoggingSpec defines the desired state of Logging
+ properties:
+ controlNamespace:
+ type: string
+ flowConfigCheckDisabled:
+ type: boolean
+ flowConfigOverride:
+ type: string
+ fluentbit:
+ description: FluentbitSpec defines the desired state of Fluentbit
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ image:
+ description: ImageSpec struct hold information about image specification
+ properties:
+ pullPolicy:
+ type: string
+ repository:
+ type: string
+ tag:
+ type: string
+ required:
+ - pullPolicy
+ - repository
+ - tag
+ type: object
+ resources:
+ description: ResourceRequirements describes the compute resource
+ requirements.
+ properties:
+ limits:
+ additionalProperties:
+ type: string
+ description: 'Limits describes the maximum amount of compute
+ resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ requests:
+ additionalProperties:
+ type: string
+ description: 'Requests describes the minimum amount of compute
+ resources required. If Requests is omitted for a container,
+ it defaults to Limits if that is explicitly specified, otherwise
+ to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ type: object
+ targetHost:
+ type: string
+ targetPort:
+ format: int32
+ type: integer
+ tls:
+ description: FluentbitTLS defines the TLS configs
+ properties:
+ enabled:
+ type: boolean
+ secretName:
+ type: string
+ sharedKey:
+ type: string
+ required:
+ - enabled
+ - secretName
+ type: object
+ type: object
+ fluentd:
+ description: FluentdSpec defines the desired state of Fluentd
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ configReloaderImage:
+ description: ImageSpec struct hold information about image specification
+ properties:
+ pullPolicy:
+ type: string
+ repository:
+ type: string
+ tag:
+ type: string
+ required:
+ - pullPolicy
+ - repository
+ - tag
+ type: object
+ disablePvc:
+ type: boolean
+ fluentdPvcSpec:
+ description: PersistentVolumeClaimSpec describes the common attributes
+ of storage devices and allows a Source for provider-specific attributes
+ properties:
+ accessModes:
+ description: 'AccessModes contains the desired access modes
+ the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1'
+ items:
+ type: string
+ type: array
+ dataSource:
+ description: This field requires the VolumeSnapshotDataSource
+ alpha feature gate to be enabled and currently VolumeSnapshot
+ is the only supported data source. If the provisioner can
+ support VolumeSnapshot data source, it will create a new volume
+ and data will be restored to the volume at the same time.
+ If the provisioner does not support VolumeSnapshot data source,
+ volume will not be created and the failure will be reported
+ as an event. In the future, we plan to support more data source
+ types and the behavior of the provisioner may change.
+ properties:
+ apiGroup:
+ description: APIGroup is the group for the resource being
+ referenced. If APIGroup is not specified, the specified
+ Kind must be in the core API group. For any other third-party
+ types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ description: 'Resources represents the minimum resources the
+ volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources'
+ properties:
+ limits:
+ additionalProperties:
+ type: string
+ description: 'Limits describes the maximum amount of compute
+ resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ requests:
+ additionalProperties:
+ type: string
+ description: 'Requests describes the minimum amount of compute
+ resources required. If Requests is omitted for a container,
+ it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. More info:
+ https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ type: object
+ selector:
+ description: A label query over volumes to consider for binding.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector
+ that contains values, a key, and an operator that relates
+ the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In, NotIn,
+ Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values.
+ If the operator is In or NotIn, the values array
+ must be non-empty. If the operator is Exists or
+ DoesNotExist, the values array must be empty. This
+ array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs.
+ A single {key,value} in the matchLabels map is equivalent
+ to an element of matchExpressions, whose key field is
+ "key", the operator is "In", and the values array contains
+ only "value". The requirements are ANDed.
+ type: object
+ type: object
+ storageClassName:
+ description: 'Name of the StorageClass required by the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1'
+ type: string
+ volumeMode:
+ description: volumeMode defines what type of volume is required
+ by the claim. Value of Filesystem is implied when not included
+ in claim spec. This is a beta feature.
+ type: string
+ volumeName:
+ description: VolumeName is the binding reference to the PersistentVolume
+ backing this claim.
+ type: string
+ type: object
+ image:
+ description: ImageSpec struct hold information about image specification
+ properties:
+ pullPolicy:
+ type: string
+ repository:
+ type: string
+ tag:
+ type: string
+ required:
+ - pullPolicy
+ - repository
+ - tag
+ type: object
+ port:
+ format: int32
+ type: integer
+ resources:
+ description: ResourceRequirements describes the compute resource
+ requirements.
+ properties:
+ limits:
+ additionalProperties:
+ type: string
+ description: 'Limits describes the maximum amount of compute
+ resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ requests:
+ additionalProperties:
+ type: string
+ description: 'Requests describes the minimum amount of compute
+ resources required. If Requests is omitted for a container,
+ it defaults to Limits if that is explicitly specified, otherwise
+ to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ type: object
+ tls:
+ description: FluentdTLS defines the TLS configs
+ properties:
+ enabled:
+ type: boolean
+ secretName:
+ type: string
+ sharedKey:
+ type: string
+ required:
+ - enabled
+ - secretName
+ type: object
+ volumeModImage:
+ description: ImageSpec struct hold information about image specification
+ properties:
+ pullPolicy:
+ type: string
+ repository:
+ type: string
+ tag:
+ type: string
+ required:
+ - pullPolicy
+ - repository
+ - tag
+ type: object
+ type: object
+ loggingRef:
+ type: string
+ watchNamespaces:
+ items:
+ type: string
+ type: array
+ required:
+ - controlNamespace
+ type: object
+ status:
+ description: LoggingStatus defines the observed state of Logging
+ properties:
+ configCheckResults:
+ additionalProperties:
+ type: boolean
+ type: object
+ type: object
+ type: object
+ version: v1alpha2
+ versions:
+ - name: v1alpha2
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
diff --git a/config/crd/bases/logging.banzaicloud.com_outputs.yaml b/config/crd/bases/logging.banzaicloud.com_outputs.yaml
new file mode 100644
index 000000000..20626f888
--- /dev/null
+++ b/config/crd/bases/logging.banzaicloud.com_outputs.yaml
@@ -0,0 +1,1876 @@
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ creationTimestamp: null
+ name: outputs.logging.banzaicloud.com
+spec:
+ group: logging.banzaicloud.com
+ names:
+ kind: Output
+ listKind: OutputList
+ plural: outputs
+ singular: output
+ scope: ""
+ validation:
+ openAPIV3Schema:
+ description: Output is the Schema for the outputs API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: OutputSpec defines the desired state of Output
+ properties:
+ azurestorage:
+ properties:
+ auto_create_container:
+ description: 'Automatically create container if not exists(default:
+ true)'
+ type: boolean
+ azure_container:
+ description: Your azure storage container
+ type: string
+ azure_object_key_format:
+ description: 'Object key format (default: %{path}%{time_slice}_%{index}.%{file_extension})'
+ type: string
+ azure_storage_access_key:
+ description: Your azure storage access key
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ azure_storage_account:
+ description: Your azure storage account
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ azure_storage_type:
+ description: 'Azure storage type currently only "blob" supported
+ (default: blob)'
+ type: string
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ format:
+ description: 'Compat format type: out_file, json, ltsv (default:
+ out_file)'
+ type: string
+ path:
+ description: Path prefix of the files on Azure
+ type: string
+ store_as:
+ description: 'Store as: gzip, json, text, lzo, lzma2 (default: gzip)'
+ type: string
+ required:
+ - azure_container
+ - azure_storage_access_key
+ - azure_storage_account
+ type: object
+ elasticsearch:
+ description: Send your logs to Elasticsearch
+ properties:
+ application_name:
+ description: 'Specify the application name for the rollover index
+ to be created.(default: default)'
+ type: string
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ content_type:
+ description: 'With content_type application/x-ndjson, elasticsearch
+ plugin adds application/x-ndjson as Content-Type in payload. (default:
+ application/json)'
+ type: string
+ customize_template:
+ description: Specify the string and its value to be replaced in
+ form of hash. Can contain multiple key value pair that would be
+ replaced in the specified template_file. This setting only creates
+ template and to add rollover index please check the rollover_index
+ configuration.
+ type: string
+ deflector_alias:
+ description: Specify the deflector alias which would be assigned
+ to the rollover index created. This is useful in case of using
+ the Elasticsearch rollover API
+ type: string
+ fail_on_putting_template_retry_exceed:
+ description: 'Indicates whether to fail when max_retry_putting_template
+ is exceeded. If you have multiple output plugin, you could use
+ this property to do not fail on fluentd statup.(default: true)'
+ type: boolean
+ host:
+ description: You can specify Elasticsearch host by this parameter.
+ (default:localhost)
+ type: string
+ hosts:
+ description: You can specify multiple Elasticsearch hosts with separator
+ ",". If you specify hosts option, host and port options are ignored.
+ type: string
+ http_backend:
+ description: 'With http_backend typhoeus, elasticsearch plugin uses
+ typhoeus faraday http backend. Typhoeus can handle HTTP keepalive.
+ (default: excon)'
+ type: string
+ id_key:
+ description: https://github.com/uken/fluent-plugin-elasticsearch#id_key
+ type: string
+ include_index_in_url:
+ description: With this option set to true, Fluentd manifests the
+ index name in the request URL (rather than in the request body).
+ You can use this option to enforce an URL-based access control.
+ type: boolean
+ include_tag_key:
+ description: 'This will add the Fluentd tag in the JSON record.(default:
+ false)'
+ type: boolean
+ include_timestamp:
+ description: Adds a @timestamp field to the log, following all settings
+ logstash_format does, except without the restrictions on index_name.
+ This allows one to log to an alias in Elasticsearch and utilize
+ the rollover API.
+ type: boolean
+ index_date_pattern:
+ description: 'Specify this to override the index date pattern for
+ creating a rollover index.(default: now/d)'
+ type: string
+ index_prefix:
+ description: Specify the index prefix for the rollover index to
+ be created.
+ type: string
+ logstash_dateformat:
+ description: 'Set the Logstash date format.(default: %Y.%m.%d)'
+ type: string
+ logstash_format:
+ description: 'Enable Logstash log format.(default: false)'
+ type: boolean
+ logstash_prefix:
+ description: 'Set the Logstash prefix.(default: true)'
+ type: string
+ logstash_prefix_separator:
+ description: 'Set the Logstash prefix separator.(default: -)'
+ type: string
+ max_retry_get_es_version:
+ description: 'You can specify times of retry obtaining Elasticsearch
+ version.(default: 15)'
+ type: string
+ max_retry_putting_template:
+ description: 'You can specify times of retry putting template.(default:
+ 10)'
+ type: string
+ password:
+ description: Password for HTTP Basic authentication.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ path:
+ description: Path for HTTP Basic authentication.
+ type: string
+ pipeline:
+ description: This param is to set a pipeline id of your elasticsearch
+ to be added into the request, you can configure ingest node.
+ type: string
+ port:
+ description: 'You can specify Elasticsearch port by this parameter.(default:
+ 9200)'
+ type: string
+ prefer_oj_serializer:
+ description: 'With default behavior, Elasticsearch client uses Yajl
+ as JSON encoder/decoder. Oj is the alternative high performance
+ JSON encoder/decoder. When this parameter sets as true, Elasticsearch
+ client uses Oj as JSON encoder/decoder. (default: fqlse)'
+ type: boolean
+ reconnect_on_error:
+ description: 'Indicates that the plugin should reset connection
+ on any error (reconnect on next send). By default it will reconnect
+ only on "host unreachable exceptions". We recommended to set this
+ true in the presence of elasticsearch shield.(default: false)'
+ type: boolean
+ reload_connections:
+ description: 'You can tune how the elasticsearch-transport host
+ reloading feature works.(default: true)'
+ type: boolean
+ reload_on_failure:
+ description: 'Indicates that the elasticsearch-transport will try
+ to reload the nodes addresses if there is a failure while making
+ the request, this can be useful to quickly remove a dead node
+ from the list of addresses.(default: false)'
+ type: boolean
+ remove_keys_on_update:
+ description: Remove keys on update will not update the configured
+ keys in elasticsearch when a record is being updated. This setting
+ only has any effect if the write operation is update or upsert.
+ type: string
+ remove_keys_on_update_key:
+ description: This setting allows remove_keys_on_update to be configured
+ with a key in each record, in much the same way as target_index_key
+ works.
+ type: string
+ request_timeout:
+ description: 'You can specify HTTP request timeout.(default: 5s)'
+ type: string
+ resurrect_after:
+ description: 'You can set in the elasticsearch-transport how often
+ dead connections from the elasticsearch-transport''s pool will
+ be resurrected.(default: 60s)'
+ type: string
+ retry_tag:
+ description: This setting allows custom routing of messages in response
+ to bulk request failures. The default behavior is to emit failed
+ records using the same tag that was provided.
+ type: string
+ rollover_index:
+ description: 'Specify this as true when an index with rollover capability
+ needs to be created.(default: false) https://github.com/uken/fluent-plugin-elasticsearch#rollover_index'
+ type: boolean
+ routing_key:
+ description: Similar to parent_key config, will add _routing into
+ elasticsearch command if routing_key is set and the field does
+ exist in input event.
+ type: string
+ scheme:
+ description: 'Scheme for HTTP Basic authentication.(default: true)'
+ type: boolean
+ tag_key:
+ description: 'This will add the Fluentd tag in the JSON record.(default:
+ tag)'
+ type: string
+ target_index_key:
+ description: Tell this plugin to find the index name to write to
+ in the record under this key in preference to other mechanisms.
+ Key can be specified as path to nested record using dot ('.')
+ as a separator. https://github.com/uken/fluent-plugin-elasticsearch#target_index_key
+ type: string
+ target_type_key:
+ description: 'Similar to target_index_key config, find the type
+ name to write to in the record under this key (or nested record).
+ If key not found in record - fallback to type_name.(default: true)'
+ type: string
+ template_file:
+ description: The path to the file containing the template to install.
+ type: string
+ template_name:
+ description: The name of the template to define. If a template by
+ the name given is already present, it will be left unchanged,
+ unless template_overwrite is set, in which case the template will
+ be updated.
+ type: string
+ template_overwrite:
+ description: 'Always update the template, even if it already exists.(default:
+ false)'
+ type: boolean
+ templates:
+ description: Specify index templates in form of hash. Can contain
+ multiple templates.
+ type: string
+ time_key:
+ description: By default, when inserting records in Logstash format,
+ @timestamp is dynamically created with the time at log ingestion.
+ If you'd like to use a custom time, include an @timestamp with
+ your record.
+ type: string
+ time_key_format:
+ description: The format of the time stamp field (@timestamp or what
+ you specify with time_key). This parameter only has an effect
+ when logstash_format is true as it only affects the name of the
+ index we write to.
+ type: string
+ time_parse_error_tag:
+ description: With logstash_format true, elasticsearch plugin parses
+ timestamp field for generating index name. If the record has invalid
+ timestamp value, this plugin emits an error event to @ERROR label
+ with time_parse_error_tag configured tag.
+ type: string
+ time_precision:
+ description: Should the record not include a time_key, define the
+ degree of sub-second time precision to preserve from the time
+ portion of the routed event.
+ type: string
+ user:
+ description: User for HTTP Basic authentication. This plugin will
+ escape required URL encoded characters within %{} placeholders.
+ e.g. %{demo+}
+ type: string
+ utc_index:
+ description: 'By default, the records inserted into index logstash-YYMMDD
+ with UTC (Coordinated Universal Time). This option allows to use
+ local time if you describe utc_index to false.(default: true)'
+ type: boolean
+ with_transporter_log:
+ description: 'This is debugging purpose option to enable to obtain
+ transporter layer log. (default: false)'
+ type: boolean
+ write_operation:
+ description: 'The write_operation can be any of: (index,create,update,upsert)(default:
+ index)'
+ type: string
+ type: object
+ gcs:
+ properties:
+ acl:
+ description: 'Permission for the object in GCS: auth_read owner_full
+ owner_read private project_private public_read'
+ type: string
+ auto_create_bucket:
+ description: 'Create GCS bucket if it does not exists (default:
+ true)'
+ type: boolean
+ bucket:
+ description: Name of a GCS bucket
+ type: string
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ client_retries:
+ description: Number of times to retry requests on server error
+ type: integer
+ client_timeout:
+ description: Default timeout to use in requests
+ type: integer
+ credentials_json:
+ description: GCS service account credentials in JSON format
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ encryption_key:
+ description: Customer-supplied, AES-256 encryption key
+ type: string
+ format:
+ properties:
+ type:
+ description: 'Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value
+ (default: json)'
+ enum:
+ - out_file
+ - json
+ - ltsv
+ - csv
+ - msgpack
+ - hash
+ - single_value
+ type: string
+ type: object
+ hex_random_length:
+ description: 'Max length of `%{hex_random}` placeholder(4-16) (default:
+ 4)'
+ type: integer
+ keyfile:
+ description: Path of GCS service account credentials JSON file
+ type: string
+ object_key_format:
+ description: 'Format of GCS object keys (default: %{path}%{time_slice}_%{index}.%{file_extension})'
+ type: string
+ object_metadata:
+ description: User provided web-safe keys and arbitrary string values
+ that will returned with requests for the file as "x-goog-meta-"
+ response headers.
+ items:
+ properties:
+ key:
+ description: Key
+ type: string
+ value:
+ description: Value
+ type: string
+ required:
+ - key
+ - value
+ type: object
+ type: array
+ overwrite:
+ description: 'Overwrite already existing path (default: false)'
+ type: boolean
+ path:
+ description: Path prefix of the files on GCS
+ type: string
+ project:
+ description: Project identifier for GCS
+ type: string
+ storage_class:
+ description: 'Storage class of the file: dra nearline coldline multi_regional
+ regional standard'
+ type: string
+ store_as:
+ description: 'Archive format on GCS: gzip json text (default: gzip)'
+ type: string
+ transcoding:
+ description: Enable the decompressive form of transcoding
+ type: boolean
+ required:
+ - bucket
+ - project
+ type: object
+ loggingRef:
+ type: string
+ loki:
+ description: Fluentd output plugin to ship logs to a Loki server.
+ properties:
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ extra_labels:
+ description: 'Set of labels to include with every Loki stream.(default:
+ nil)'
+ type: boolean
+ password:
+ description: Specify password if the Loki server requires authentication.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ tenant:
+ description: Loki is a multi-tenant log storage platform and all
+ requests sent must include a tenant.
+ type: string
+ url:
+ description: The url of the Loki server to send logs to. (default:https://logs-us-west1.grafana.net)
+ type: string
+ username:
+ description: Specify a username if the Loki server requires authentication.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ type: object
+ nullout:
+ type: object
+ oss:
+ properties:
+ aaccess_key_secret:
+ description: Your access secret key
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ access_key_id:
+ description: Your access key id
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ auto_create_bucket:
+ description: 'desc ''Create OSS bucket if it does not exists (default:
+ false)'
+ type: boolean
+ bucket:
+ description: Your bucket name
+ type: string
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ check_bucket:
+ description: 'Check bucket if exists or not (default: true)'
+ type: boolean
+ check_object:
+ description: 'Check object before creation (default: true)'
+ type: boolean
+ download_crc_enable:
+ description: 'Download crc enabled (default: true)'
+ type: boolean
+ endpoint:
+ description: OSS endpoint to connect to'
+ type: string
+ format:
+ properties:
+ type:
+ description: 'Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value
+ (default: json)'
+ enum:
+ - out_file
+ - json
+ - ltsv
+ - csv
+ - msgpack
+ - hash
+ - single_value
+ type: string
+ type: object
+ hex_random_length:
+ description: 'The length of `%{hex_random}` placeholder(4-16) (default:
+ 4)'
+ type: integer
+ index_format:
+ description: '`sprintf` format for `%{index}` (default: %d)'
+ type: string
+ key_format:
+ description: 'The format of OSS object keys (default: %{path}/%{time_slice}_%{index}_%{thread_id}.%{file_extension})'
+ type: string
+ open_timeout:
+ description: 'Timeout for open connections (default: 10)'
+ type: integer
+ oss_sdk_log_dir:
+ description: 'OSS SDK log directory (default: /var/log/td-agent)'
+ type: string
+ overwrite:
+ description: 'Overwrite already existing path (default: false)'
+ type: boolean
+ path:
+ description: 'Path prefix of the files on OSS (default: fluent/logs)'
+ type: string
+ read_timeout:
+ description: 'Timeout for read response (default: 120)'
+ type: integer
+ store_as:
+ description: 'Archive format on OSS: gzip, json, text, lzo, lzma2
+ (default: gzip)'
+ type: string
+ upload_crc_enable:
+ description: 'Upload crc enabled (default: true)'
+ type: boolean
+ warn_for_delay:
+ description: Given a threshold to treat events as delay, output
+ warning logs if delayed events were put into OSS
+ type: string
+ required:
+ - aaccess_key_secret
+ - access_key_id
+ - bucket
+ - endpoint
+ type: object
+ s3:
+ properties:
+ acl:
+ description: Permission for the object in S3
+ type: string
+ assume_role_credentials:
+ description: assume_role_credentials
+ properties:
+ duration_seconds:
+ description: The duration, in seconds, of the role session (900-3600)
+ type: string
+ external_id:
+ description: A unique identifier that is used by third parties
+ when assuming roles in their customers' accounts.
+ type: string
+ policy:
+ description: An IAM policy in JSON format
+ type: string
+ role_arn:
+ description: The Amazon Resource Name (ARN) of the role to assume
+ type: string
+ role_session_name:
+ description: An identifier for the assumed role session
+ type: string
+ required:
+ - role_arn
+ - role_session_name
+ type: object
+ auto_create_bucket:
+ description: Create S3 bucket if it does not exists
+ type: string
+ aws_iam_retries:
+ description: The number of attempts to load instance profile credentials
+ from the EC2 metadata service using IAM role
+ type: string
+ aws_key_id:
+ description: AWS access key id
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ aws_sec_key:
+ description: AWS secret key.
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ buffer:
+ properties:
+ chunk_full_threshold:
+ description: The percentage of chunk size threshold for flushing.
+ output plugin will flush the chunk when actual size reaches
+ chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
+ default)
+ type: string
+ chunk_limit_records:
+ description: The max number of events that each chunks can store
+ in it
+ type: integer
+ chunk_limit_size:
+ description: 'The max size of each chunks: events will be written
+ into chunks until the size of chunks become this size'
+ type: string
+ compress:
+ description: If you set this option to gzip, you can get Fluentd
+ to compress data records before writing to buffer chunks.
+ type: string
+ delayed_commit_timeout:
+ description: The timeout seconds until output plugin decides
+ that async write operation fails
+ type: string
+ disable_chunk_backup:
+ description: Instead of storing unrecoverable chunks in the
+ backup directory, just discard them. This option is new in
+ Fluentd v1.2.6.
+ type: boolean
+ flush_at_shutdown:
+ description: The value to specify to flush/write all buffer
+ chunks at shutdown, or not
+ type: boolean
+ flush_interval:
+ description: 'Default: 60s'
+ type: string
+ flush_mode:
+ description: 'Default: default (equals to lazy if time is specified
+ as chunk key, interval otherwise) lazy: flush/write chunks
+ once per timekey interval: flush/write chunks per specified
+ time via flush_interval immediate: flush/write chunks immediately
+ after events are appended into chunks'
+ type: string
+ flush_thread_burst_interval:
+ description: The sleep interval seconds of threads between flushes
+ when output plugin flushes waiting chunks next to next
+ type: string
+ flush_thread_count:
+ description: The number of threads of output plugins, which
+ is used to write chunks in parallel
+ type: integer
+ flush_thread_interval:
+ description: The sleep interval seconds of threads to wait next
+ flush trial (when no chunks are waiting)
+ type: string
+ overflow_action:
+ description: 'How output plugin behaves when its buffer queue
+ is full throw_exception: raise exception to show this error
+ in log block: block processing of input plugin to emit events
+ into that buffer drop_oldest_chunk: drop/purge oldest chunk
+ to accept newly incoming chunk'
+ type: string
+ path:
+ description: The path where buffer chunks are stored. The '*'
+ is replaced with random characters. This parameter is required.
+ type: string
+ queue_limit_length:
+ description: The queue length limitation of this buffer plugin
+ instance
+ type: integer
+ queued_chunks_limit_size:
+ description: Limit the number of queued chunks. If you set smaller
+ flush_interval, e.g. 1s, there are lots of small queued chunks
+ in buffer. This is not good with file buffer because it consumes
+ lots of fd resources when output destination has a problem.
+ This parameter mitigates such situations.
+ type: integer
+ retry_exponential_backoff_base:
+ description: The base number of exponential backoff for retries
+ type: string
+ retry_forever:
+ description: If true, plugin will ignore retry_timeout and retry_max_times
+ options and retry flushing forever
+ type: boolean
+ retry_max_interval:
+ description: The maximum interval seconds for exponential backoff
+ between retries while failing
+ type: string
+ retry_max_times:
+ description: The maximum number of times to retry to flush while
+ failing
+ type: integer
+ retry_randomize:
+ description: If true, output plugin will retry after randomized
+ interval not to do burst retries
+ type: boolean
+ retry_secondary_threshold:
+ description: The ratio of retry_timeout to switch to use secondary
+ while failing (Maximum valid value is 1.0)
+ type: string
+ retry_timeout:
+ description: The maximum seconds to retry to flush while failing,
+ until plugin discards buffer chunks
+ type: string
+ retry_type:
+ description: 'exponential_backoff: wait seconds will become
+ large exponentially per failures periodic: output plugin will
+ retry periodically with fixed intervals (configured via retry_wait)'
+ type: string
+ retry_wait:
+ description: Seconds to wait before next retry to flush, or
+ constant factor of exponential backoff
+ type: string
+ tags:
+ description: 'When tag is specified as buffer chunk key, output
+ plugin writes events into chunks separately per tags. (default:
+ tag,time)'
+ type: string
+ timekey:
+ description: Output plugin will flush chunks per specified time
+ (enabled when time is specified in chunk keys)
+ type: string
+ timekey_use_utc:
+ description: Output plugin decides to use UTC or not to format
+ placeholders using timekey
+ type: boolean
+ timekey_wait:
+ description: Output plugin writes chunks after timekey_wait
+ seconds later after timekey expiration
+ type: string
+ timekey_zone:
+ description: The timezone (-0700 or Asia/Tokyo) string for formatting
+ timekey placeholders
+ type: string
+ total_limit_size:
+ description: The size limitation of this buffer plugin instance.
+ Once the total size of stored buffer reached this threshold,
+ all append operations will fail with error (and data will
+ be lost)
+ type: string
+ type:
+ description: Fluentd core bundles memory and file plugins. 3rd
+ party plugins are also available when installed.
+ type: string
+ required:
+ - timekey
+ type: object
+ check_apikey_on_start:
+ description: Check AWS key on start
+ type: string
+ check_bucket:
+ description: Check bucket if exists or not
+ type: string
+ check_object:
+ description: Check object before creation
+ type: string
+ compute_checksums:
+ description: AWS SDK uses MD5 for API request/response by default
+ type: string
+ enable_transfer_acceleration:
+ description: 'If true, S3 Transfer Acceleration will be enabled
+ for uploads. IMPORTANT: You must first enable this feature on
+ your destination S3 bucket'
+ type: string
+ force_path_style:
+ description: If true, the bucket name is always left in the request
+ URI and never moved to the host as a sub-domain
+ type: string
+ format:
+ properties:
+ type:
+ description: 'Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value
+ (default: json)'
+ enum:
+ - out_file
+ - json
+ - ltsv
+ - csv
+ - msgpack
+ - hash
+ - single_value
+ type: string
+ type: object
+ grant_full_control:
+ description: Allows grantee READ, READ_ACP, and WRITE_ACP permissions
+ on the object
+ type: string
+ grant_read:
+ description: Allows grantee to read the object data and its metadata
+ type: string
+ grant_read_acp:
+ description: Allows grantee to read the object ACL
+ type: string
+ grant_write_acp:
+ description: Allows grantee to write the ACL for the applicable
+ object
+ type: string
+ hex_random_length:
+ description: The length of `%{hex_random}` placeholder(4-16)
+ type: string
+ index_format:
+ description: '`sprintf` format for `%{index}`'
+ type: string
+ instance_profile_credentials:
+ description: instance_profile_credentials
+ properties:
+ http_open_timeout:
+ description: Number of seconds to wait for the connection to
+ open
+ type: string
+ http_read_timeout:
+ description: Number of seconds to wait for one block to be read
+ type: string
+ ip_address:
+ description: IP address (default:169.254.169.254)
+ type: string
+ port:
+ description: Port number (default:80)
+ type: string
+ retries:
+ description: Number of times to retry when retrieving credentials
+ type: string
+ type: object
+ overwrite:
+ description: Overwrite already existing path
+ type: string
+ path:
+ description: Path prefix of the files on S3
+ type: string
+ proxy_uri:
+ description: URI of proxy environment
+ type: string
+ s3_bucket:
+ description: S3 bucket name
+ type: string
+ s3_endpoint:
+ description: Custom S3 endpoint (like minio)
+ type: string
+ s3_metadata:
+ description: Arbitrary S3 metadata headers to set for the object
+ type: string
+ s3_object_key_format:
+ description: 'The format of S3 object keys (default: %{path}%{time_slice}_%{index}.%{file_extension})'
+ type: string
+ s3_region:
+ description: S3 region name
+ type: string
+ shared_credentials:
+ description: shared_credentials
+ properties:
+ path:
+ description: 'Path to the shared file. (default: $HOME/.aws/credentials)'
+ type: string
+ profile_name:
+ description: Profile name. Default to 'default' or ENV['AWS_PROFILE']
+ type: string
+ type: object
+ signature_version:
+ description: Signature version for API Request (s3,v4)
+ type: string
+ sse_customer_algorithm:
+ description: Specifies the algorithm to use to when encrypting the
+ object
+ type: string
+ sse_customer_key:
+ description: Specifies the customer-provided encryption key for
+ Amazon S3 to use in encrypting data
+ type: string
+ sse_customer_key_md5:
+ description: Specifies the 128-bit MD5 digest of the encryption
+ key according to RFC 1321
+ type: string
+ ssekms_key_id:
+ description: Specifies the AWS KMS key ID to use for object encryption
+ type: string
+ ssl_verify_peer:
+ description: If false, the certificate of endpoint will not be verified
+ type: string
+ storage_class:
+ description: The type of storage to use for the object(STANDARD,REDUCED_REDUNDANCY,STANDARD_IA)
+ type: string
+ store_as:
+ description: Archive format on S3
+ type: string
+ use_bundled_cert:
+ description: Use aws-sdk-ruby bundled cert
+ type: string
+ use_server_side_encryption:
+ description: The Server-side encryption algorithm used when storing
+ this object in S3 (AES256, aws:kms)
+ type: string
+ warn_for_delay:
+ description: Given a threshold to treat events as delay, output
+ warning logs if delayed events were put into s3
+ type: string
+ required:
+ - s3_bucket
+ type: object
+ sumologic:
+ properties:
+ add_timestamp:
+ description: 'Add timestamp (or timestamp_key) field to logs before
+ sending to sumologic (default: true)'
+ type: boolean
+ data_type:
+ description: 'The type of data that will be sent to Sumo Logic,
+ either logs or metrics (default: logs)'
+ type: string
+ disable_cookies:
+ description: 'Option to disable cookies on the HTTP Client. (default:
+ false)'
+ type: boolean
+ endpoint:
+ description: SumoLogic HTTP Collector URL
+ properties:
+ value:
+ type: string
+ valueFrom:
+ properties:
+ secretKeyRef:
+ properties:
+ key:
+ description: Secret key for the value
+ type: string
+ name:
+ description: Name of the kubernetes secret
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ required:
+ - valueFrom
+ type: object
+ log_format:
+ description: 'Format to post logs into Sumo. (default: json)'
+ type: string
+ log_key:
+ description: 'Used to specify the key when merging json or sending
+ logs in text format (default: message)'
+ type: string
+ metric_data_format:
+ description: 'The format of metrics you will be sending, either
+ graphite or carbon2 or prometheus (default: graphite)'
+ type: string
+ open_timeout:
+ description: 'Set timeout seconds to wait until connection is opened.
+ (default: 60)'
+ type: integer
+ proxy_uri:
+ description: Add the uri of the proxy environment if present.
+ type: string
+ source_category:
+ description: 'Set _sourceCategory metadata field within SumoLogic
+ (default: nil)'
+ type: string
+ source_host:
+ description: 'Set _sourceHost metadata field within SumoLogic (default:
+ nil)'
+ type: string
+ source_name:
+ description: Set _sourceName metadata field within SumoLogic - overrides
+ source_name_key (default is nil)
+ type: string
+ source_name_key:
+ description: 'Set as source::path_key''s value so that the source_name
+ can be extracted from Fluentd''s buffer (default: source_name)'
+ type: string
+ timestamp_key:
+ description: 'Field name when add_timestamp is on (default: timestamp)'
+ type: string
+ verify_ssl:
+ description: 'Verify ssl certificate. (default: true)'
+ type: boolean
+ required:
+ - endpoint
+ - source_name
+ type: object
+ required:
+ - sumologic
+ type: object
+ status:
+ description: OutputStatus defines the observed state of Output
+ type: object
+ type: object
+ version: v1alpha2
+ versions:
+ - name: v1alpha2
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml
new file mode 100644
index 000000000..e0d001dd1
--- /dev/null
+++ b/config/crd/kustomization.yaml
@@ -0,0 +1,37 @@
+# This kustomization.yaml is not intended to be run by itself,
+# since it depends on service name and namespace that are out of this kustomize package.
+# It should be run by config/default
+resources:
+- bases/logging.banzaicloud.com_fluentbits.yaml
+- bases/logging.banzaicloud.com_flows.yaml
+- bases/logging.banzaicloud.com_clusterflows.yaml
+- bases/logging.banzaicloud.com_outputs.yaml
+- bases/logging.banzaicloud.com_clusteroutputs.yaml
+- bases/logging.banzaicloud.com_fluentds.yaml
+- bases/logging.banzaicloud.com_loggings.yaml
+# +kubebuilder:scaffold:crdkustomizeresource
+
+patches:
+# [WEBHOOK] patches here are for enabling the conversion webhook for each CRD
+#- patches/webhook_in_fluentbits.yaml
+#- patches/webhook_in_flows.yaml
+#- patches/webhook_in_clusterflows.yaml
+#- patches/webhook_in_outputs.yaml
+#- patches/webhook_in_clusteroutputs.yaml
+#- patches/webhook_in_fluentds.yaml
+#- patches/webhook_in_loggings.yaml
+# +kubebuilder:scaffold:crdkustomizewebhookpatch
+
+# [CAINJECTION] patches here are for enabling the CA injection for each CRD
+#- patches/cainjection_in_fluentbits.yaml
+#- patches/cainjection_in_flows.yaml
+#- patches/cainjection_in_clusterflows.yaml
+#- patches/cainjection_in_outputs.yaml
+#- patches/cainjection_in_clusteroutputs.yaml
+#- patches/cainjection_in_fluentds.yaml
+#- patches/cainjection_in_loggings.yaml
+# +kubebuilder:scaffold:crdkustomizecainjectionpatch
+
+# the following config is for teaching kustomize how to do kustomization for CRDs.
+configurations:
+- kustomizeconfig.yaml
diff --git a/config/crd/kustomizeconfig.yaml b/config/crd/kustomizeconfig.yaml
new file mode 100644
index 000000000..6f83d9a94
--- /dev/null
+++ b/config/crd/kustomizeconfig.yaml
@@ -0,0 +1,17 @@
+# This file is for teaching kustomize how to substitute name and namespace reference in CRD
+nameReference:
+- kind: Service
+ version: v1
+ fieldSpecs:
+ - kind: CustomResourceDefinition
+ group: apiextensions.k8s.io
+ path: spec/conversion/webhookClientConfig/service/name
+
+namespace:
+- kind: CustomResourceDefinition
+ group: apiextensions.k8s.io
+ path: spec/conversion/webhookClientConfig/service/namespace
+ create: false
+
+varReference:
+- path: metadata/annotations
diff --git a/config/crd/patches/cainjection_in_clusterflows.yaml b/config/crd/patches/cainjection_in_clusterflows.yaml
new file mode 100644
index 000000000..85f4630bb
--- /dev/null
+++ b/config/crd/patches/cainjection_in_clusterflows.yaml
@@ -0,0 +1,8 @@
+# The following patch adds a directive for certmanager to inject CA into the CRD
+# CRD conversion requires k8s 1.13 or later.
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ certmanager.k8s.io/inject-ca-from: $(NAMESPACE)/$(CERTIFICATENAME)
+ name: clusterflows.logging.banzaicloud.com
diff --git a/config/crd/patches/cainjection_in_clusteroutputs.yaml b/config/crd/patches/cainjection_in_clusteroutputs.yaml
new file mode 100644
index 000000000..ac861e7a3
--- /dev/null
+++ b/config/crd/patches/cainjection_in_clusteroutputs.yaml
@@ -0,0 +1,8 @@
+# The following patch adds a directive for certmanager to inject CA into the CRD
+# CRD conversion requires k8s 1.13 or later.
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ certmanager.k8s.io/inject-ca-from: $(NAMESPACE)/$(CERTIFICATENAME)
+ name: clusteroutputs.logging.banzaicloud.com
diff --git a/config/crd/patches/cainjection_in_flows.yaml b/config/crd/patches/cainjection_in_flows.yaml
new file mode 100644
index 000000000..1987bf665
--- /dev/null
+++ b/config/crd/patches/cainjection_in_flows.yaml
@@ -0,0 +1,8 @@
+# The following patch adds a directive for certmanager to inject CA into the CRD
+# CRD conversion requires k8s 1.13 or later.
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ certmanager.k8s.io/inject-ca-from: $(NAMESPACE)/$(CERTIFICATENAME)
+ name: flows.logging.banzaicloud.com
diff --git a/config/crd/patches/cainjection_in_fluentbits.yaml b/config/crd/patches/cainjection_in_fluentbits.yaml
new file mode 100644
index 000000000..58413f77e
--- /dev/null
+++ b/config/crd/patches/cainjection_in_fluentbits.yaml
@@ -0,0 +1,8 @@
+# The following patch adds a directive for certmanager to inject CA into the CRD
+# CRD conversion requires k8s 1.13 or later.
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ certmanager.k8s.io/inject-ca-from: $(NAMESPACE)/$(CERTIFICATENAME)
+ name: fluentbits.logging.banzaicloud.com
diff --git a/config/crd/patches/cainjection_in_fluentds.yaml b/config/crd/patches/cainjection_in_fluentds.yaml
new file mode 100644
index 000000000..f8267440c
--- /dev/null
+++ b/config/crd/patches/cainjection_in_fluentds.yaml
@@ -0,0 +1,8 @@
+# The following patch adds a directive for certmanager to inject CA into the CRD
+# CRD conversion requires k8s 1.13 or later.
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ certmanager.k8s.io/inject-ca-from: $(NAMESPACE)/$(CERTIFICATENAME)
+ name: fluentds.logging.banzaicloud.com
diff --git a/config/crd/patches/cainjection_in_loggings.yaml b/config/crd/patches/cainjection_in_loggings.yaml
new file mode 100644
index 000000000..dfa435d65
--- /dev/null
+++ b/config/crd/patches/cainjection_in_loggings.yaml
@@ -0,0 +1,8 @@
+# The following patch adds a directive for certmanager to inject CA into the CRD
+# CRD conversion requires k8s 1.13 or later.
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ certmanager.k8s.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
+ name: loggings.logging.banzaicloud.com
diff --git a/config/crd/patches/cainjection_in_outputs.yaml b/config/crd/patches/cainjection_in_outputs.yaml
new file mode 100644
index 000000000..331f970c0
--- /dev/null
+++ b/config/crd/patches/cainjection_in_outputs.yaml
@@ -0,0 +1,8 @@
+# The following patch adds a directive for certmanager to inject CA into the CRD
+# CRD conversion requires k8s 1.13 or later.
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ certmanager.k8s.io/inject-ca-from: $(NAMESPACE)/$(CERTIFICATENAME)
+ name: outputs.logging.banzaicloud.com
diff --git a/config/crd/patches/webhook_in_clusterflows.yaml b/config/crd/patches/webhook_in_clusterflows.yaml
new file mode 100644
index 000000000..d23a4f293
--- /dev/null
+++ b/config/crd/patches/webhook_in_clusterflows.yaml
@@ -0,0 +1,17 @@
+# The following patch enables conversion webhook for CRD
+# CRD conversion requires k8s 1.13 or later.
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: clusterflows.logging.banzaicloud.com
+spec:
+ conversion:
+ strategy: Webhook
+ webhookClientConfig:
+ # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank,
+ # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager)
+ caBundle: Cg==
+ service:
+ namespace: system
+ name: webhook-service
+ path: /convert
diff --git a/config/crd/patches/webhook_in_clusteroutputs.yaml b/config/crd/patches/webhook_in_clusteroutputs.yaml
new file mode 100644
index 000000000..fdcc7e7f5
--- /dev/null
+++ b/config/crd/patches/webhook_in_clusteroutputs.yaml
@@ -0,0 +1,17 @@
+# The following patch enables conversion webhook for CRD
+# CRD conversion requires k8s 1.13 or later.
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: clusteroutputs.logging.banzaicloud.com
+spec:
+ conversion:
+ strategy: Webhook
+ webhookClientConfig:
+ # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank,
+ # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager)
+ caBundle: Cg==
+ service:
+ namespace: system
+ name: webhook-service
+ path: /convert
diff --git a/config/crd/patches/webhook_in_flows.yaml b/config/crd/patches/webhook_in_flows.yaml
new file mode 100644
index 000000000..72512a3c7
--- /dev/null
+++ b/config/crd/patches/webhook_in_flows.yaml
@@ -0,0 +1,17 @@
+# The following patch enables conversion webhook for CRD
+# CRD conversion requires k8s 1.13 or later.
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: flows.logging.banzaicloud.com
+spec:
+ conversion:
+ strategy: Webhook
+ webhookClientConfig:
+ # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank,
+ # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager)
+ caBundle: Cg==
+ service:
+ namespace: system
+ name: webhook-service
+ path: /convert
diff --git a/config/crd/patches/webhook_in_fluentbits.yaml b/config/crd/patches/webhook_in_fluentbits.yaml
new file mode 100644
index 000000000..ae64a1ab5
--- /dev/null
+++ b/config/crd/patches/webhook_in_fluentbits.yaml
@@ -0,0 +1,17 @@
+# The following patch enables conversion webhook for CRD
+# CRD conversion requires k8s 1.13 or later.
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: fluentbits.logging.banzaicloud.com
+spec:
+ conversion:
+ strategy: Webhook
+ webhookClientConfig:
+ # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank,
+ # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager)
+ caBundle: Cg==
+ service:
+ namespace: system
+ name: webhook-service
+ path: /convert
diff --git a/config/crd/patches/webhook_in_fluentds.yaml b/config/crd/patches/webhook_in_fluentds.yaml
new file mode 100644
index 000000000..a6de204b4
--- /dev/null
+++ b/config/crd/patches/webhook_in_fluentds.yaml
@@ -0,0 +1,17 @@
+# The following patch enables conversion webhook for CRD
+# CRD conversion requires k8s 1.13 or later.
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: fluentds.logging.banzaicloud.com
+spec:
+ conversion:
+ strategy: Webhook
+ webhookClientConfig:
+ # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank,
+ # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager)
+ caBundle: Cg==
+ service:
+ namespace: system
+ name: webhook-service
+ path: /convert
diff --git a/config/crd/patches/webhook_in_loggings.yaml b/config/crd/patches/webhook_in_loggings.yaml
new file mode 100644
index 000000000..8ec98a06e
--- /dev/null
+++ b/config/crd/patches/webhook_in_loggings.yaml
@@ -0,0 +1,17 @@
+# The following patch enables conversion webhook for CRD
+# CRD conversion requires k8s 1.13 or later.
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: loggings.logging.banzaicloud.com
+spec:
+ conversion:
+ strategy: Webhook
+ webhookClientConfig:
+ # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank,
+ # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager)
+ caBundle: Cg==
+ service:
+ namespace: system
+ name: webhook-service
+ path: /convert
diff --git a/config/crd/patches/webhook_in_outputs.yaml b/config/crd/patches/webhook_in_outputs.yaml
new file mode 100644
index 000000000..140e7c7bb
--- /dev/null
+++ b/config/crd/patches/webhook_in_outputs.yaml
@@ -0,0 +1,17 @@
+# The following patch enables conversion webhook for CRD
+# CRD conversion requires k8s 1.13 or later.
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: outputs.logging.banzaicloud.com
+spec:
+ conversion:
+ strategy: Webhook
+ webhookClientConfig:
+ # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank,
+ # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager)
+ caBundle: Cg==
+ service:
+ namespace: system
+ name: webhook-service
+ path: /convert
diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml
new file mode 100644
index 000000000..072ac400d
--- /dev/null
+++ b/config/default/kustomization.yaml
@@ -0,0 +1,43 @@
+# Adds namespace to all resources.
+namespace: logging-operator-ws-system
+
+# Value of this field is prepended to the
+# names of all resources, e.g. a deployment named
+# "wordpress" becomes "alices-wordpress".
+# Note that it should also match with the prefix (text before '-') of the namespace
+# field above.
+namePrefix: logging-operator-ws-
+
+# Labels to add to all resources and selectors.
+#commonLabels:
+# someName: someValue
+
+bases:
+- ../crd
+- ../rbac
+- ../manager
+# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in crd/kustomization.yaml
+#- ../webhook
+# [CERTMANAGER] To enable cert-manager, uncomment next line. 'WEBHOOK' components are required.
+#- ../certmanager
+
+patches:
+- manager_image_patch.yaml
+ # Protect the /metrics endpoint by putting it behind auth.
+ # Only one of manager_auth_proxy_patch.yaml and
+ # manager_prometheus_metrics_patch.yaml should be enabled.
+- manager_auth_proxy_patch.yaml
+ # If you want your controller-manager to expose the /metrics
+ # endpoint w/o any authn/z, uncomment the following line and
+ # comment manager_auth_proxy_patch.yaml.
+ # Only one of manager_auth_proxy_patch.yaml and
+ # manager_prometheus_metrics_patch.yaml should be enabled.
+#- manager_prometheus_metrics_patch.yaml
+
+# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in crd/kustomization.yaml
+#- manager_webhook_patch.yaml
+
+# [CAINJECTION] Uncomment next line to enable the CA injection in the admission webhooks.
+# Uncomment 'CAINJECTION' in crd/kustomization.yaml to enable the CA injection in the admission webhooks.
+# 'CERTMANAGER' needs to be enabled to use ca injection
+#- webhookcainjection_patch.yaml
diff --git a/config/default/manager_auth_proxy_patch.yaml b/config/default/manager_auth_proxy_patch.yaml
new file mode 100644
index 000000000..d3994fb91
--- /dev/null
+++ b/config/default/manager_auth_proxy_patch.yaml
@@ -0,0 +1,24 @@
+# This patch inject a sidecar container which is a HTTP proxy for the controller manager,
+# it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews.
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: controller-manager
+ namespace: system
+spec:
+ template:
+ spec:
+ containers:
+ - name: kube-rbac-proxy
+ image: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.0
+ args:
+ - "--secure-listen-address=0.0.0.0:8443"
+ - "--upstream=http://127.0.0.1:8080/"
+ - "--logtostderr=true"
+ - "--v=10"
+ ports:
+ - containerPort: 8443
+ name: https
+ - name: manager
+ args:
+ - "--metrics-addr=127.0.0.1:8080"
diff --git a/config/default/manager_image_patch.yaml b/config/default/manager_image_patch.yaml
new file mode 100644
index 000000000..eb909570e
--- /dev/null
+++ b/config/default/manager_image_patch.yaml
@@ -0,0 +1,12 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: controller-manager
+ namespace: system
+spec:
+ template:
+ spec:
+ containers:
+ # Change the value of image field below to your controller image URL
+ - image: IMAGE_URL
+ name: manager
diff --git a/config/default/manager_prometheus_metrics_patch.yaml b/config/default/manager_prometheus_metrics_patch.yaml
new file mode 100644
index 000000000..0b96c6813
--- /dev/null
+++ b/config/default/manager_prometheus_metrics_patch.yaml
@@ -0,0 +1,19 @@
+# This patch enables Prometheus scraping for the manager pod.
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: controller-manager
+ namespace: system
+spec:
+ template:
+ metadata:
+ annotations:
+ prometheus.io/scrape: 'true'
+ spec:
+ containers:
+ # Expose the prometheus metrics on default port
+ - name: manager
+ ports:
+ - containerPort: 8080
+ name: metrics
+ protocol: TCP
diff --git a/config/default/manager_webhook_patch.yaml b/config/default/manager_webhook_patch.yaml
new file mode 100644
index 000000000..f2f7157b4
--- /dev/null
+++ b/config/default/manager_webhook_patch.yaml
@@ -0,0 +1,23 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: controller-manager
+ namespace: system
+spec:
+ template:
+ spec:
+ containers:
+ - name: manager
+ ports:
+ - containerPort: 443
+ name: webhook-server
+ protocol: TCP
+ volumeMounts:
+ - mountPath: /tmp/k8s-webhook-server/serving-certs
+ name: cert
+ readOnly: true
+ volumes:
+ - name: cert
+ secret:
+ defaultMode: 420
+ secretName: webhook-server-cert
diff --git a/config/default/webhookcainjection_patch.yaml b/config/default/webhookcainjection_patch.yaml
new file mode 100644
index 000000000..f6d71cb76
--- /dev/null
+++ b/config/default/webhookcainjection_patch.yaml
@@ -0,0 +1,15 @@
+# This patch add annotation to admission webhook config and
+# the variables $(NAMESPACE) and $(CERTIFICATENAME) will be substituted by kustomize.
+apiVersion: admissionregistration.k8s.io/v1beta1
+kind: MutatingWebhookConfiguration
+metadata:
+ name: mutating-webhook-configuration
+ annotations:
+ certmanager.k8s.io/inject-ca-from: $(NAMESPACE)/$(CERTIFICATENAME)
+---
+apiVersion: admissionregistration.k8s.io/v1beta1
+kind: ValidatingWebhookConfiguration
+metadata:
+ name: validating-webhook-configuration
+ annotations:
+ certmanager.k8s.io/inject-ca-from: $(NAMESPACE)/$(CERTIFICATENAME)
diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml
new file mode 100644
index 000000000..5c5f0b84c
--- /dev/null
+++ b/config/manager/kustomization.yaml
@@ -0,0 +1,2 @@
+resources:
+- manager.yaml
diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml
new file mode 100644
index 000000000..b6c85a52d
--- /dev/null
+++ b/config/manager/manager.yaml
@@ -0,0 +1,39 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ labels:
+ control-plane: controller-manager
+ name: system
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: controller-manager
+ namespace: system
+ labels:
+ control-plane: controller-manager
+spec:
+ selector:
+ matchLabels:
+ control-plane: controller-manager
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ control-plane: controller-manager
+ spec:
+ containers:
+ - command:
+ - /manager
+ args:
+ - --enable-leader-election
+ image: controller:latest
+ name: manager
+ resources:
+ limits:
+ cpu: 100m
+ memory: 30Mi
+ requests:
+ cpu: 100m
+ memory: 20Mi
+ terminationGracePeriodSeconds: 10
diff --git a/config/rbac/auth_proxy_role.yaml b/config/rbac/auth_proxy_role.yaml
new file mode 100644
index 000000000..618f5e417
--- /dev/null
+++ b/config/rbac/auth_proxy_role.yaml
@@ -0,0 +1,13 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: proxy-role
+rules:
+- apiGroups: ["authentication.k8s.io"]
+ resources:
+ - tokenreviews
+ verbs: ["create"]
+- apiGroups: ["authorization.k8s.io"]
+ resources:
+ - subjectaccessreviews
+ verbs: ["create"]
diff --git a/deploy/clusterrole_binding.yaml b/config/rbac/auth_proxy_role_binding.yaml
similarity index 64%
rename from deploy/clusterrole_binding.yaml
rename to config/rbac/auth_proxy_role_binding.yaml
index d4cca39a2..48ed1e4b8 100644
--- a/deploy/clusterrole_binding.yaml
+++ b/config/rbac/auth_proxy_role_binding.yaml
@@ -1,12 +1,12 @@
-kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
metadata:
- name: logging-operator
-subjects:
-- kind: ServiceAccount
- name: logging-operator
- namespace: default
+ name: proxy-rolebinding
roleRef:
- kind: ClusterRole
- name: logging-operator
apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: proxy-role
+subjects:
+- kind: ServiceAccount
+ name: default
+ namespace: system
diff --git a/config/rbac/auth_proxy_service.yaml b/config/rbac/auth_proxy_service.yaml
new file mode 100644
index 000000000..d61e5469f
--- /dev/null
+++ b/config/rbac/auth_proxy_service.yaml
@@ -0,0 +1,18 @@
+apiVersion: v1
+kind: Service
+metadata:
+ annotations:
+ prometheus.io/port: "8443"
+ prometheus.io/scheme: https
+ prometheus.io/scrape: "true"
+ labels:
+ control-plane: controller-manager
+ name: controller-manager-metrics-service
+ namespace: system
+spec:
+ ports:
+ - name: https
+ port: 8443
+ targetPort: https
+ selector:
+ control-plane: controller-manager
diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml
new file mode 100644
index 000000000..817f1fe61
--- /dev/null
+++ b/config/rbac/kustomization.yaml
@@ -0,0 +1,11 @@
+resources:
+- role.yaml
+- role_binding.yaml
+- leader_election_role.yaml
+- leader_election_role_binding.yaml
+# Comment the following 3 lines if you want to disable
+# the auth proxy (https://github.com/brancz/kube-rbac-proxy)
+# which protects your /metrics endpoint.
+- auth_proxy_service.yaml
+- auth_proxy_role.yaml
+- auth_proxy_role_binding.yaml
diff --git a/config/rbac/leader_election_role.yaml b/config/rbac/leader_election_role.yaml
new file mode 100644
index 000000000..85093a8c2
--- /dev/null
+++ b/config/rbac/leader_election_role.yaml
@@ -0,0 +1,26 @@
+# permissions to do leader election.
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: leader-election-role
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - delete
+- apiGroups:
+ - ""
+ resources:
+ - configmaps/status
+ verbs:
+ - get
+ - update
+ - patch
diff --git a/config/rbac/leader_election_role_binding.yaml b/config/rbac/leader_election_role_binding.yaml
new file mode 100644
index 000000000..eed16906f
--- /dev/null
+++ b/config/rbac/leader_election_role_binding.yaml
@@ -0,0 +1,12 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: leader-election-rolebinding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: leader-election-role
+subjects:
+- kind: ServiceAccount
+ name: default
+ namespace: system
diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml
new file mode 100644
index 000000000..3843ff6ac
--- /dev/null
+++ b/config/rbac/role.yaml
@@ -0,0 +1,28 @@
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ creationTimestamp: null
+ name: manager-role
+rules:
+- apiGroups:
+ - logging.banzaicloud.com
+ resources:
+ - loggings
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - logging.banzaicloud.com
+ resources:
+ - loggings/status
+ verbs:
+ - get
+ - patch
+ - update
diff --git a/config/rbac/role_binding.yaml b/config/rbac/role_binding.yaml
new file mode 100644
index 000000000..8f2658702
--- /dev/null
+++ b/config/rbac/role_binding.yaml
@@ -0,0 +1,12 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: manager-rolebinding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: manager-role
+subjects:
+- kind: ServiceAccount
+ name: default
+ namespace: system
diff --git a/config/samples/logging_v1alpha2_cluster_output_custom.yaml b/config/samples/logging_v1alpha2_cluster_output_custom.yaml
new file mode 100644
index 000000000..f74e562b3
--- /dev/null
+++ b/config/samples/logging_v1alpha2_cluster_output_custom.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: control
+---
+apiVersion: logging.banzaicloud.com/v1alpha2
+kind: ClusterOutput
+metadata:
+ name: output-custom-cluster
+ namespace: control
+spec:
+ loggingRef: customref
+ nullout: {}
\ No newline at end of file
diff --git a/config/samples/logging_v1alpha2_flow.yaml b/config/samples/logging_v1alpha2_flow.yaml
new file mode 100644
index 000000000..00c4657f2
--- /dev/null
+++ b/config/samples/logging_v1alpha2_flow.yaml
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: custom
+---
+apiVersion: logging.banzaicloud.com/v1alpha2
+kind: Flow
+metadata:
+ name: flow-sample
+spec:
+ selectors:
+ sampleKey: sampleValue
+ filters:
+ - stdout: {}
+ outputRefs:
+ - "output-sample"
diff --git a/config/samples/logging_v1alpha2_flow_custom.yaml b/config/samples/logging_v1alpha2_flow_custom.yaml
new file mode 100644
index 000000000..07cdde076
--- /dev/null
+++ b/config/samples/logging_v1alpha2_flow_custom.yaml
@@ -0,0 +1,14 @@
+apiVersion: logging.banzaicloud.com/v1alpha2
+kind: Flow
+metadata:
+ name: flow-custom
+ namespace: custom
+spec:
+ loggingRef: customref
+ selectors:
+ sampleKey: customValue
+ filters:
+ - stdout: {}
+ outputRefs:
+ - "output-custom"
+ - "output-custom-cluster"
diff --git a/config/samples/logging_v1alpha2_logging_custom.yaml b/config/samples/logging_v1alpha2_logging_custom.yaml
new file mode 100644
index 000000000..b1af230d1
--- /dev/null
+++ b/config/samples/logging_v1alpha2_logging_custom.yaml
@@ -0,0 +1,12 @@
+apiVersion: logging.banzaicloud.com/v1alpha2
+kind: Logging
+metadata:
+ name: customlogging
+spec:
+ loggingRef: customref
+ fluentd: {
+ disablePvc: true
+ }
+ watchNamespaces: ["custom"]
+ controlNamespace: control
+
diff --git a/config/samples/logging_v1alpha2_logging_default.yaml b/config/samples/logging_v1alpha2_logging_default.yaml
new file mode 100644
index 000000000..6e242e798
--- /dev/null
+++ b/config/samples/logging_v1alpha2_logging_default.yaml
@@ -0,0 +1,11 @@
+apiVersion: logging.banzaicloud.com/v1alpha2
+kind: Logging
+metadata:
+ name: defaultlogging
+spec:
+ fluentd: {
+ disablePvc: true
+ }
+ fluentbit: {}
+ controlNamespace: default
+
diff --git a/config/samples/logging_v1alpha2_output.yaml b/config/samples/logging_v1alpha2_output.yaml
new file mode 100644
index 000000000..e36b91f2c
--- /dev/null
+++ b/config/samples/logging_v1alpha2_output.yaml
@@ -0,0 +1,6 @@
+apiVersion: logging.banzaicloud.com/v1alpha2
+kind: Output
+metadata:
+ name: output-sample
+spec:
+ nullout: {}
\ No newline at end of file
diff --git a/config/samples/logging_v1alpha2_output_custom.yaml b/config/samples/logging_v1alpha2_output_custom.yaml
new file mode 100644
index 000000000..8cb8a0710
--- /dev/null
+++ b/config/samples/logging_v1alpha2_output_custom.yaml
@@ -0,0 +1,8 @@
+apiVersion: logging.banzaicloud.com/v1alpha2
+kind: Output
+metadata:
+ name: output-custom
+ namespace: custom
+spec:
+ loggingRef: customref
+ nullout: {}
\ No newline at end of file
diff --git a/config/webhook/kustomization.yaml b/config/webhook/kustomization.yaml
new file mode 100644
index 000000000..9cf26134e
--- /dev/null
+++ b/config/webhook/kustomization.yaml
@@ -0,0 +1,6 @@
+resources:
+- manifests.yaml
+- service.yaml
+
+configurations:
+- kustomizeconfig.yaml
diff --git a/config/webhook/kustomizeconfig.yaml b/config/webhook/kustomizeconfig.yaml
new file mode 100644
index 000000000..25e21e3c9
--- /dev/null
+++ b/config/webhook/kustomizeconfig.yaml
@@ -0,0 +1,25 @@
+# the following config is for teaching kustomize where to look at when substituting vars.
+# It requires kustomize v2.1.0 or newer to work properly.
+nameReference:
+- kind: Service
+ version: v1
+ fieldSpecs:
+ - kind: MutatingWebhookConfiguration
+ group: admissionregistration.k8s.io
+ path: webhooks/clientConfig/service/name
+ - kind: ValidatingWebhookConfiguration
+ group: admissionregistration.k8s.io
+ path: webhooks/clientConfig/service/name
+
+namespace:
+- kind: MutatingWebhookConfiguration
+ group: admissionregistration.k8s.io
+ path: webhooks/clientConfig/service/namespace
+ create: true
+- kind: ValidatingWebhookConfiguration
+ group: admissionregistration.k8s.io
+ path: webhooks/clientConfig/service/namespace
+ create: true
+
+varReference:
+- path: metadata/annotations
diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml
new file mode 100644
index 000000000..e69de29bb
diff --git a/config/webhook/service.yaml b/config/webhook/service.yaml
new file mode 100644
index 000000000..b4861025a
--- /dev/null
+++ b/config/webhook/service.yaml
@@ -0,0 +1,12 @@
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: webhook-service
+ namespace: system
+spec:
+ ports:
+ - port: 443
+ targetPort: 443
+ selector:
+ control-plane: controller-manager
diff --git a/controllers/logging_controller.go b/controllers/logging_controller.go
new file mode 100644
index 000000000..a3bb8534c
--- /dev/null
+++ b/controllers/logging_controller.go
@@ -0,0 +1,283 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package controllers
+
+import (
+ "bytes"
+ "context"
+
+ "emperror.dev/errors"
+ "github.com/banzaicloud/logging-operator/pkg/model/render"
+ "github.com/banzaicloud/logging-operator/pkg/resources"
+ "github.com/banzaicloud/logging-operator/pkg/resources/fluentbit"
+ "github.com/banzaicloud/logging-operator/pkg/resources/fluentd"
+ "github.com/banzaicloud/logging-operator/pkg/resources/model"
+ "github.com/go-logr/logr"
+ appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ rbacv1 "k8s.io/api/rbac/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/apimachinery/pkg/types"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/handler"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+ "sigs.k8s.io/controller-runtime/pkg/source"
+
+ loggingv1alpha2 "github.com/banzaicloud/logging-operator/api/v1alpha2"
+)
+
+// LoggingReconciler reconciles a Logging object
+type LoggingReconciler struct {
+ client.Client
+ Log logr.Logger
+}
+
+// +kubebuilder:rbac:groups=logging.banzaicloud.com,resources=loggings,verbs=get;list;watch;create;update;patch;delete
+// +kubebuilder:rbac:groups=logging.banzaicloud.com,resources=loggings/status,verbs=get;update;patch
+
+func (r *LoggingReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
+ _ = context.Background()
+ log := r.Log.WithValues("logging", req.NamespacedName)
+
+ logging := &loggingv1alpha2.Logging{}
+ err := r.Client.Get(context.TODO(), req.NamespacedName, logging)
+ if err != nil {
+ // Object not found, return. Created objects are automatically garbage collected.
+ // For additional cleanup logic use finalizers.
+ if apierrors.IsNotFound(err) {
+ return reconcile.Result{}, nil
+ }
+ return reconcile.Result{}, err
+ }
+
+ logging = logging.SetDefaults()
+
+ fluentdConfig, err := r.clusterConfiguration(logging)
+ if err != nil {
+ return reconcile.Result{}, err
+ }
+
+ log.V(1).Info("flow configuration", "config", fluentdConfig)
+
+ reconcilers := make([]resources.ComponentReconciler, 0)
+
+ if logging.Spec.FluentdSpec != nil {
+ reconcilers = append(reconcilers, fluentd.New(r.Client, r.Log, logging, &fluentdConfig).Reconcile)
+ }
+
+ if logging.Spec.FluentbitSpec != nil {
+ reconcilers = append(reconcilers, fluentbit.New(r.Client, r.Log, logging).Reconcile)
+ }
+
+ for _, rec := range reconcilers {
+ result, err := rec()
+ if err != nil {
+ return reconcile.Result{}, err
+ }
+ if result != nil {
+ // short circuit if requested explicitly
+ return *result, err
+ }
+ }
+
+ return ctrl.Result{}, nil
+}
+
+func (r *LoggingReconciler) clusterConfiguration(logging *loggingv1alpha2.Logging) (string, error) {
+ if logging.Spec.FlowConfigOverride != "" {
+ return logging.Spec.FlowConfigOverride, nil
+ }
+ loggingResources, err := r.GetResources(logging)
+ if err != nil {
+ return "", errors.WrapIfWithDetails(err, "failed to get logging resources", "logging", logging)
+ }
+ builder, err := loggingResources.CreateModel()
+ if err != nil {
+ return "", errors.WrapIfWithDetails(err, "failed to create model", "logging", logging)
+ }
+ fluentConfig, err := builder.Build()
+ if err != nil {
+ return "", errors.WrapIfWithDetails(err, "failed to build model", "logging", logging)
+ }
+ output := &bytes.Buffer{}
+ renderer := render.FluentRender{
+ Out: output,
+ Indent: 2,
+ }
+ err = renderer.Render(fluentConfig)
+ if err != nil {
+ return "", errors.WrapIfWithDetails(err, "failed to render fluentd config", "logging", logging)
+ }
+ return output.String(), nil
+}
+
+func SetupLoggingWithManager(mgr ctrl.Manager, logger logr.Logger) *ctrl.Builder {
+ clusterOutputSource := &source.Kind{Type: &loggingv1alpha2.ClusterOutput{}}
+ clusterFlowSource := &source.Kind{Type: &loggingv1alpha2.ClusterFlow{}}
+ outputSource := &source.Kind{Type: &loggingv1alpha2.Output{}}
+ flowSource := &source.Kind{Type: &loggingv1alpha2.Flow{}}
+
+ requestMapper := &handler.EnqueueRequestsFromMapFunc{
+ ToRequests: handler.ToRequestsFunc(func(mapObject handler.MapObject) []reconcile.Request {
+ object, err := meta.Accessor(mapObject.Object)
+ if err != nil {
+ return nil
+ }
+ // get all the logging resources from the cache
+ loggingList := &loggingv1alpha2.LoggingList{}
+ err = mgr.GetCache().List(context.TODO(), loggingList)
+ if err != nil {
+ logger.Error(err, "failed to list logging resources")
+ return nil
+ }
+ if o, ok := object.(*loggingv1alpha2.ClusterOutput); ok {
+ return reconcileRequestsForLoggingRef(loggingList, o.Spec.LoggingRef)
+ }
+ if o, ok := object.(*loggingv1alpha2.Output); ok {
+ return reconcileRequestsForLoggingRef(loggingList, o.Spec.LoggingRef)
+ }
+ if o, ok := object.(*loggingv1alpha2.Flow); ok {
+ return reconcileRequestsForLoggingRef(loggingList, o.Spec.LoggingRef)
+ }
+ if o, ok := object.(*loggingv1alpha2.ClusterFlow); ok {
+ return reconcileRequestsForLoggingRef(loggingList, o.Spec.LoggingRef)
+ }
+ return nil
+ }),
+ }
+
+ builder := ctrl.NewControllerManagedBy(mgr).
+ For(&loggingv1alpha2.Logging{}).
+ Owns(&corev1.Pod{}).
+ Watches(clusterOutputSource, requestMapper).
+ Watches(clusterFlowSource, requestMapper).
+ Watches(outputSource, requestMapper).
+ Watches(flowSource, requestMapper)
+
+ FluentdWatches(builder)
+ FluentbitWatches(builder)
+
+ return builder
+}
+
+func reconcileRequestsForLoggingRef(loggingList *loggingv1alpha2.LoggingList, loggingRef string) []reconcile.Request {
+ filtered := make([]reconcile.Request, 0)
+ for _, l := range loggingList.Items {
+ if l.Spec.LoggingRef == loggingRef {
+ filtered = append(filtered, reconcile.Request{
+ NamespacedName: types.NamespacedName{
+ // this happens to be empty as long as Logging is cluster scoped
+ Namespace: l.Namespace,
+ Name: l.Name,
+ },
+ })
+ }
+ }
+ return filtered
+}
+
+func FluentdWatches(builder *ctrl.Builder) *ctrl.Builder {
+ return builder.
+ Owns(&corev1.ConfigMap{}).
+ Owns(&corev1.Service{}).
+ Owns(&appsv1.Deployment{}).
+ Owns(&rbacv1.ClusterRole{}).
+ Owns(&rbacv1.ClusterRoleBinding{}).
+ Owns(&corev1.ServiceAccount{})
+}
+
+func FluentbitWatches(builder *ctrl.Builder) *ctrl.Builder {
+ return builder.
+ Owns(&corev1.ConfigMap{}).
+ Owns(&appsv1.DaemonSet{}).
+ Owns(&rbacv1.ClusterRole{}).
+ Owns(&rbacv1.ClusterRoleBinding{}).
+ Owns(&corev1.ServiceAccount{})
+}
+
+func (r *LoggingReconciler) GetResources(logging *loggingv1alpha2.Logging) (*model.LoggingResources, error) {
+ loggingResources := model.NewLoggingResources(logging, r.Client, r.Log)
+ var err error
+
+ clusterFlows := &loggingv1alpha2.ClusterFlowList{}
+ err = r.List(context.TODO(), clusterFlows, client.InNamespace(logging.Spec.ControlNamespace))
+ if err != nil {
+ return nil, err
+ }
+ if len(clusterFlows.Items) > 0 {
+ for _, i := range clusterFlows.Items {
+ if i.Spec.LoggingRef == logging.Spec.LoggingRef {
+ loggingResources.ClusterFlows = append(loggingResources.ClusterFlows, i)
+ }
+ }
+ }
+
+ clusterOutputs := &loggingv1alpha2.ClusterOutputList{}
+ err = r.List(context.TODO(), clusterOutputs, client.InNamespace(logging.Spec.ControlNamespace))
+ if err != nil {
+ return nil, err
+ }
+ if len(clusterOutputs.Items) > 0 {
+ for _, i := range clusterOutputs.Items {
+ if i.Spec.LoggingRef == logging.Spec.LoggingRef {
+ loggingResources.ClusterOutputs = append(loggingResources.ClusterOutputs, i)
+ }
+ }
+ }
+
+ watchNamespaces := logging.Spec.WatchNamespaces
+
+ if len(watchNamespaces) == 0 {
+ nsList := &corev1.NamespaceList{}
+ err = r.List(context.TODO(), nsList)
+ if err != nil {
+ return nil, errors.WrapIf(err, "failed to list all namespaces")
+ }
+ for _, ns := range nsList.Items {
+ watchNamespaces = append(watchNamespaces, ns.Name)
+ }
+ }
+
+ for _, ns := range watchNamespaces {
+ flows := &loggingv1alpha2.FlowList{}
+ err = r.List(context.TODO(), flows, client.InNamespace(ns))
+ if err != nil {
+ return nil, err
+ }
+ if len(flows.Items) > 0 {
+ for _, i := range flows.Items {
+ if i.Spec.LoggingRef == logging.Spec.LoggingRef {
+ loggingResources.Flows = append(loggingResources.Flows, i)
+ }
+ }
+ }
+ outputs := &loggingv1alpha2.OutputList{}
+ err = r.List(context.TODO(), outputs, client.InNamespace(ns))
+ if err != nil {
+ return nil, err
+ }
+ if len(outputs.Items) > 0 {
+ for _, i := range outputs.Items {
+ if i.Spec.LoggingRef == logging.Spec.LoggingRef {
+ loggingResources.Outputs = append(loggingResources.Outputs, i)
+ }
+ }
+ }
+ }
+
+ return loggingResources, nil
+}
diff --git a/controllers/logging_controller_test.go b/controllers/logging_controller_test.go
new file mode 100644
index 000000000..069d114ca
--- /dev/null
+++ b/controllers/logging_controller_test.go
@@ -0,0 +1,552 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package controllers_test
+
+import (
+ "context"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "emperror.dev/errors"
+ "github.com/banzaicloud/logging-operator/api/v1alpha2"
+ "github.com/banzaicloud/logging-operator/controllers"
+ "github.com/banzaicloud/logging-operator/pkg/model/output"
+ "github.com/banzaicloud/logging-operator/pkg/model/secret"
+ "github.com/banzaicloud/logging-operator/pkg/resources/fluentd"
+ "github.com/onsi/gomega"
+ appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/wait"
+ "k8s.io/client-go/kubernetes/scheme"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+)
+
+var (
+ err error
+ mgr ctrl.Manager
+ requests chan reconcile.Request
+ stopMgr chan struct{}
+ mgrStopped *sync.WaitGroup
+ reconcilerErrors chan error
+ g gomega.GomegaWithT
+)
+
+func TestFluentdResourcesCreatedAndRemoved(t *testing.T) {
+ g := gomega.NewGomegaWithT(t)
+ defer beforeEach(t)()
+
+ logging := &v1alpha2.Logging{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test",
+ },
+ Spec: v1alpha2.LoggingSpec{
+ WatchNamespaces: []string{testNamespace},
+ FluentdSpec: &v1alpha2.FluentdSpec{},
+ FlowConfigCheckDisabled: true,
+ ControlNamespace: controlNamespace,
+ },
+ }
+
+ defer ensureCreated(t, logging)()
+
+ cm := &corev1.Secret{}
+
+ defer ensureCreatedEventually(t, controlNamespace, logging.QualifiedName(fluentd.SecretConfigName), cm)()
+
+ g.Expect(cm.Data["fluent.conf"]).Should(gomega.And(
+ gomega.ContainSubstring("@include /fluentd/etc/input.conf"),
+ gomega.ContainSubstring("@include /fluentd/app-config/*"),
+ gomega.ContainSubstring("@include /fluentd/etc/devnull.conf"),
+ ))
+
+ deployment := &appsv1.StatefulSet{}
+
+ defer ensureCreatedEventually(t, controlNamespace, logging.QualifiedName(fluentd.StatefulSetName), deployment)()
+}
+
+func TestSingleFlowWithoutOutputRefs(t *testing.T) {
+ g := gomega.NewGomegaWithT(t)
+ defer beforeEach(t)()
+
+ logging := &v1alpha2.Logging{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test",
+ },
+ Spec: v1alpha2.LoggingSpec{
+ WatchNamespaces: []string{testNamespace},
+ FluentdSpec: &v1alpha2.FluentdSpec{},
+ FlowConfigCheckDisabled: true,
+ ControlNamespace: controlNamespace,
+ },
+ }
+
+ flow := &v1alpha2.Flow{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test-flow",
+ Namespace: testNamespace,
+ },
+ Spec: v1alpha2.FlowSpec{
+ Selectors: map[string]string{
+ "a": "b",
+ },
+ OutputRefs: []string{},
+ },
+ }
+
+ defer ensureCreated(t, logging)()
+ defer ensureCreated(t, flow)()
+
+ secret := &corev1.Secret{}
+
+ defer ensureCreatedEventually(t, controlNamespace, logging.QualifiedName(fluentd.AppSecretConfigName), secret)()
+
+ g.Expect(string(secret.Data[fluentd.AppConfigKey])).Should(gomega.ContainSubstring("a:b"))
+}
+
+func TestSingleFlowWithoutExistingLoggingRef(t *testing.T) {
+ g := gomega.NewGomegaWithT(t)
+ defer beforeEach(t)()
+
+ logging := &v1alpha2.Logging{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test",
+ },
+ Spec: v1alpha2.LoggingSpec{
+ WatchNamespaces: []string{testNamespace},
+ FluentdSpec: &v1alpha2.FluentdSpec{},
+ FlowConfigCheckDisabled: true,
+ ControlNamespace: controlNamespace,
+ },
+ }
+
+ flow := &v1alpha2.Flow{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test-flow",
+ Namespace: testNamespace,
+ },
+ Spec: v1alpha2.FlowSpec{
+ LoggingRef: "nonexistent",
+ Selectors: map[string]string{
+ "a": "b",
+ },
+ OutputRefs: []string{},
+ },
+ }
+
+ defer ensureCreated(t, logging)()
+ defer ensureCreated(t, flow)()
+
+ secret := &corev1.Secret{}
+
+ defer ensureCreatedEventually(t, controlNamespace, logging.QualifiedName(fluentd.AppSecretConfigName), secret)()
+
+ g.Expect(string(secret.Data[fluentd.AppConfigKey])).ShouldNot(gomega.ContainSubstring("namespace " + testNamespace))
+}
+
+func TestSingleFlowWithOutputRefDefaultLoggingRef(t *testing.T) {
+ g := gomega.NewGomegaWithT(t)
+ defer beforeEach(t)()
+
+ logging := &v1alpha2.Logging{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test",
+ },
+ Spec: v1alpha2.LoggingSpec{
+ WatchNamespaces: []string{testNamespace},
+ FluentdSpec: &v1alpha2.FluentdSpec{},
+ FlowConfigCheckDisabled: true,
+ ControlNamespace: controlNamespace,
+ },
+ }
+
+ output := &v1alpha2.Output{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test-output",
+ Namespace: testNamespace,
+ },
+ Spec: v1alpha2.OutputSpec{
+ NullOutputConfig: output.NewNullOutputConfig(),
+ },
+ }
+
+ flow := &v1alpha2.Flow{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test-flow",
+ Namespace: testNamespace,
+ },
+ Spec: v1alpha2.FlowSpec{
+ Selectors: map[string]string{
+ "a": "b",
+ },
+ OutputRefs: []string{"test-output"},
+ },
+ }
+
+ defer ensureCreated(t, logging)()
+ defer ensureCreated(t, output)()
+ defer ensureCreated(t, flow)()
+
+ secret := &corev1.Secret{}
+ defer ensureCreatedEventually(t, controlNamespace, logging.QualifiedName(fluentd.AppSecretConfigName), secret)()
+
+ g.Expect(string(secret.Data[fluentd.AppConfigKey])).Should(gomega.ContainSubstring("a:b"))
+}
+
+func TestSingleFlowWithClusterOutput(t *testing.T) {
+ g := gomega.NewGomegaWithT(t)
+ defer beforeEach(t)()
+
+ logging := &v1alpha2.Logging{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test",
+ },
+ Spec: v1alpha2.LoggingSpec{
+ WatchNamespaces: []string{testNamespace},
+ FluentdSpec: &v1alpha2.FluentdSpec{},
+ FlowConfigCheckDisabled: true,
+ ControlNamespace: controlNamespace,
+ },
+ }
+
+ output := &v1alpha2.ClusterOutput{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test-cluster-output",
+ Namespace: controlNamespace,
+ },
+ Spec: v1alpha2.ClusterOutputSpec{
+ OutputSpec: v1alpha2.OutputSpec{
+ NullOutputConfig: output.NewNullOutputConfig(),
+ },
+ },
+ }
+
+ flow := &v1alpha2.Flow{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test-flow",
+ Namespace: testNamespace,
+ },
+ Spec: v1alpha2.FlowSpec{
+ Selectors: map[string]string{
+ "a": "b",
+ },
+ OutputRefs: []string{"test-cluster-output"},
+ },
+ }
+
+ defer ensureCreated(t, logging)()
+ defer ensureCreated(t, output)()
+ defer ensureCreated(t, flow)()
+
+ secret := &corev1.Secret{}
+ defer ensureCreatedEventually(t, controlNamespace, logging.QualifiedName(fluentd.AppSecretConfigName), secret)()
+
+ g.Expect(string(secret.Data[fluentd.AppConfigKey])).Should(gomega.ContainSubstring("a:b"))
+}
+
+func TestClusterFlowWithNamespacedOutput(t *testing.T) {
+ defer beforeEach(t)()
+
+ logging := &v1alpha2.Logging{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test",
+ },
+ Spec: v1alpha2.LoggingSpec{
+ WatchNamespaces: []string{testNamespace},
+ FluentdSpec: &v1alpha2.FluentdSpec{},
+ FlowConfigCheckDisabled: true,
+ ControlNamespace: controlNamespace,
+ },
+ }
+
+ output := &v1alpha2.Output{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test-output",
+ Namespace: testNamespace,
+ },
+ Spec: v1alpha2.OutputSpec{
+ NullOutputConfig: output.NewNullOutputConfig(),
+ },
+ }
+
+ flow := &v1alpha2.ClusterFlow{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test-flow",
+ Namespace: controlNamespace,
+ },
+ Spec: v1alpha2.FlowSpec{
+ Selectors: map[string]string{
+ "a": "b",
+ },
+ OutputRefs: []string{"test-output"},
+ },
+ }
+
+ defer ensureCreated(t, logging)()
+ defer ensureCreated(t, output)()
+ defer ensureCreated(t, flow)()
+
+ err := wait.Poll(time.Second, time.Second*3, func() (bool, error) {
+ select {
+ case err := <-reconcilerErrors:
+ expected := "referenced output not found: test-output"
+ if !strings.Contains(err.Error(), expected) {
+ return false, errors.Errorf("expected `%s` but received `%s`", expected, err.Error())
+ } else {
+ return true, nil
+ }
+ case <-time.After(100 * time.Millisecond):
+ return false, nil
+ }
+ })
+
+ if err != nil {
+ t.Fatalf("%+v", err)
+ }
+}
+
+func TestSingleFlowWithOutputRef(t *testing.T) {
+ g := gomega.NewGomegaWithT(t)
+ defer beforeEach(t)()
+
+ logging := &v1alpha2.Logging{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test",
+ },
+ Spec: v1alpha2.LoggingSpec{
+ LoggingRef: "someloggingref",
+ WatchNamespaces: []string{testNamespace},
+ FluentdSpec: &v1alpha2.FluentdSpec{},
+ FlowConfigCheckDisabled: true,
+ ControlNamespace: controlNamespace,
+ },
+ }
+
+ output := &v1alpha2.Output{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test-output",
+ Namespace: testNamespace,
+ },
+ Spec: v1alpha2.OutputSpec{
+ LoggingRef: "someloggingref",
+ NullOutputConfig: output.NewNullOutputConfig(),
+ },
+ }
+
+ flow := &v1alpha2.Flow{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test-flow",
+ Namespace: testNamespace,
+ },
+ Spec: v1alpha2.FlowSpec{
+ LoggingRef: "someloggingref",
+ Selectors: map[string]string{
+ "a": "b",
+ },
+ OutputRefs: []string{"test-output"},
+ },
+ }
+
+ defer ensureCreated(t, logging)()
+ defer ensureCreated(t, output)()
+ defer ensureCreated(t, flow)()
+
+ secret := &corev1.Secret{}
+ defer ensureCreatedEventually(t, controlNamespace, logging.QualifiedName(fluentd.AppSecretConfigName), secret)()
+
+ g.Expect(string(secret.Data[fluentd.AppConfigKey])).Should(gomega.ContainSubstring("a:b"))
+}
+
+func TestSingleFlowDefaultLoggingRefInvalidOutputRef(t *testing.T) {
+ defer beforeEach(t)()
+
+ logging := &v1alpha2.Logging{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test",
+ },
+ Spec: v1alpha2.LoggingSpec{
+ WatchNamespaces: []string{testNamespace},
+ FluentdSpec: &v1alpha2.FluentdSpec{},
+ FlowConfigCheckDisabled: true,
+ ControlNamespace: controlNamespace,
+ },
+ }
+
+ flow := &v1alpha2.Flow{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test-flow",
+ Namespace: testNamespace,
+ },
+ Spec: v1alpha2.FlowSpec{
+ Selectors: map[string]string{
+ "a": "b",
+ },
+ OutputRefs: []string{"test-output-nonexistent"},
+ },
+ }
+
+ defer ensureCreated(t, logging)()
+ defer ensureCreated(t, flow)()
+
+ err := wait.Poll(time.Second, time.Second*3, func() (bool, error) {
+ select {
+ case err := <-reconcilerErrors:
+ expected := "referenced output not found: test-output-nonexistent"
+ if !strings.Contains(err.Error(), expected) {
+ return false, errors.Errorf("expected `%s` but received `%s`", expected, err.Error())
+ } else {
+ return true, nil
+ }
+ case <-time.After(100 * time.Millisecond):
+ return false, nil
+ }
+ })
+
+ if err != nil {
+ t.Fatalf("%+v", err)
+ }
+}
+
+func TestSingleFlowWithSecretInOutput(t *testing.T) {
+ g := gomega.NewGomegaWithT(t)
+ defer beforeEach(t)()
+
+ logging := &v1alpha2.Logging{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test",
+ },
+ Spec: v1alpha2.LoggingSpec{
+ FluentdSpec: &v1alpha2.FluentdSpec{},
+ FlowConfigCheckDisabled: true,
+ WatchNamespaces: []string{testNamespace},
+ ControlNamespace: controlNamespace,
+ },
+ }
+
+ output := &v1alpha2.Output{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test-output",
+ Namespace: testNamespace,
+ },
+ Spec: v1alpha2.OutputSpec{
+ S3OutputConfig: &output.S3OutputConfig{
+ AwsAccessKey: &secret.Secret{
+ ValueFrom: &secret.ValueFrom{
+ SecretKeyRef: &secret.KubernetesSecret{
+ Name: "topsecret",
+ Key: "key",
+ },
+ },
+ },
+ SharedCredentials: &output.S3SharedCredentials{},
+ },
+ },
+ }
+ flow := &v1alpha2.Flow{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "test-flow",
+ Namespace: testNamespace,
+ },
+ Spec: v1alpha2.FlowSpec{
+ Selectors: map[string]string{
+ "a": "b",
+ },
+ OutputRefs: []string{
+ "test-output",
+ },
+ },
+ }
+ topsecret := &corev1.Secret{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "topsecret",
+ Namespace: testNamespace,
+ },
+ StringData: map[string]string{
+ "key": "topsecretdata",
+ },
+ }
+ defer ensureCreated(t, logging)()
+ defer ensureCreated(t, topsecret)()
+ defer ensureCreated(t, output)()
+ defer ensureCreated(t, flow)()
+
+ secret := &corev1.Secret{}
+
+ defer ensureCreatedEventually(t, controlNamespace, logging.QualifiedName(fluentd.AppSecretConfigName), secret)()
+ g.Expect(string(secret.Data[fluentd.AppConfigKey])).Should(gomega.ContainSubstring("topsecretdata"))
+}
+
+// TODO add following tests:
+// - resources from non watched namespaces are not incorporated
+// - namespaced flow cannot use an output not enabled for the given namespace
+
+func beforeEach(t *testing.T) func() {
+ mgr, err = ctrl.NewManager(cfg, ctrl.Options{
+ Scheme: scheme.Scheme,
+ })
+ g.Expect(err).NotTo(gomega.HaveOccurred())
+
+ flowReconciler := &controllers.LoggingReconciler{
+ Client: mgr.GetClient(),
+ Log: ctrl.Log.WithName("controllers").WithName("Flow"),
+ }
+
+ var wrappedReconciler reconcile.Reconciler
+ wrappedReconciler, requests, _, reconcilerErrors = duplicateRequest(t, flowReconciler)
+
+ err := controllers.SetupLoggingWithManager(mgr, ctrl.Log.WithName("manager").WithName("Setup")).Complete(wrappedReconciler)
+ g.Expect(err).NotTo(gomega.HaveOccurred())
+
+ stopMgr, mgrStopped = startTestManager(t, mgr)
+
+ return func() {
+ close(stopMgr)
+ mgrStopped.Wait()
+ }
+}
+
+func ensureCreated(t *testing.T, object runtime.Object) func() {
+ err := mgr.GetClient().Create(context.TODO(), object)
+ if err != nil {
+ t.Fatalf("%+v", err)
+ }
+ return func() {
+ mgr.GetClient().Delete(context.TODO(), object)
+ }
+}
+
+func ensureCreatedEventually(t *testing.T, ns, name string, object runtime.Object) func() {
+ err := wait.Poll(time.Second, time.Second*3, func() (bool, error) {
+ err := mgr.GetClient().Get(context.TODO(), types.NamespacedName{
+ Name: name, Namespace: ns,
+ }, object)
+ if apierrors.IsNotFound(err) {
+ return false, nil
+ }
+ return true, err
+ })
+ if err != nil {
+ t.Fatalf("%+v", errors.WithStack(err))
+ }
+ return func() {
+ mgr.GetClient().Delete(context.TODO(), object)
+ }
+}
diff --git a/controllers/suite_test.go b/controllers/suite_test.go
new file mode 100644
index 000000000..f2e04e482
--- /dev/null
+++ b/controllers/suite_test.go
@@ -0,0 +1,142 @@
+// Copyright © 2019 Banzai Cloud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package controllers_test
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+ "sync"
+ "testing"
+
+ "github.com/banzaicloud/logging-operator/api/v1alpha2"
+ "github.com/pborman/uuid"
+ v12 "k8s.io/api/core/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/kubernetes/scheme"
+ "k8s.io/client-go/rest"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/envtest"
+ logf "sigs.k8s.io/controller-runtime/pkg/log"
+ "sigs.k8s.io/controller-runtime/pkg/log/zap"
+ "sigs.k8s.io/controller-runtime/pkg/manager"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+ // +kubebuilder:scaffold:imports
+)
+
+// These tests use Ginkgo (BDD-style Go testing framework). Refer to
+// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
+
+var cfg *rest.Config
+var k8sClient client.Client
+var testEnv *envtest.Environment
+var testNamespace = "test-" + uuid.New()[:8]
+var controlNamespace = "control"
+
+func TestMain(m *testing.M) {
+ err := beforeSuite()
+ if err != nil {
+ fmt.Printf("%+v", err)
+ os.Exit(1)
+ }
+ code := m.Run()
+ err = afterSuite()
+ if err != nil {
+ fmt.Printf("%+v", err)
+ os.Exit(1)
+ }
+ os.Exit(code)
+}
+
+func beforeSuite() error {
+ logf.SetLogger(zap.LoggerTo(os.Stdout, true))
+
+ testEnv = &envtest.Environment{
+ CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")},
+ }
+
+ var err error
+
+ cfg, err = testEnv.Start()
+ if err != nil {
+ return err
+ }
+ if cfg == nil {
+ return fmt.Errorf("failed to start testenv, config is nil")
+ }
+
+ err = v1alpha2.AddToScheme(scheme.Scheme)
+ if err != nil {
+ return err
+ }
+
+ k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
+ if err != nil {
+ return err
+ }
+ if k8sClient == nil {
+ return fmt.Errorf("failed to create k8s config")
+ }
+
+ for _, ns := range []string{controlNamespace, testNamespace} {
+ err := k8sClient.Create(context.TODO(), &v12.Namespace{
+ ObjectMeta: v1.ObjectMeta{
+ Name: ns,
+ },
+ })
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func afterSuite() error {
+ return testEnv.Stop()
+}
+
+// duplicateRequest returns a reconcile.Reconcile implementation that delegates to inner and
+// writes the request to requests after Reconcile is finished.
+func duplicateRequest(t *testing.T, inner reconcile.Reconciler) (reconcile.Reconciler, chan reconcile.Request, chan reconcile.Result, chan error) {
+ requests := make(chan reconcile.Request)
+ results := make(chan reconcile.Result)
+ errors := make(chan error)
+ fn := reconcile.Func(func(req reconcile.Request) (reconcile.Result, error) {
+ result, err := inner.Reconcile(req)
+ if err != nil {
+ t.Logf("reconcile failure err: %+v req: %+v, result: %+v", err, req, result)
+ errors <- err
+ }
+ requests <- req
+ results <- result
+ return result, err
+ })
+ return fn, requests, results, errors
+}
+
+// startTestManager adds recFn
+func startTestManager(t *testing.T, mgr manager.Manager) (chan struct{}, *sync.WaitGroup) {
+ stop := make(chan struct{})
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ if err := mgr.Start(stop); err != nil {
+ t.Fatalf("%+v", err)
+ }
+ }()
+ return stop, wg
+}
diff --git a/deploy/clusterrole.yaml b/deploy/clusterrole.yaml
deleted file mode 100644
index 85a4d051b..000000000
--- a/deploy/clusterrole.yaml
+++ /dev/null
@@ -1,60 +0,0 @@
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
- creationTimestamp: null
- name: logging-operator
-rules:
-- apiGroups:
- - ""
- resources:
- - pods
- - services
- - endpoints
- - persistentvolumeclaims
- - events
- - configmaps
- - secrets
- - serviceaccounts
- verbs:
- - '*'
-- apiGroups:
- - rbac.authorization.k8s.io
- resources:
- - clusterroles
- - clusterrolebindings
- verbs:
- - list
- - get
- - create
- - update
- - watch
-- apiGroups:
- - ""
- resources:
- - namespaces
- verbs:
- - get
-- apiGroups:
- - apps
- resources:
- - deployments
- - daemonsets
- - replicasets
- - statefulsets
- verbs:
- - '*'
-- apiGroups:
- - monitoring.coreos.com
- resources:
- - servicemonitors
- verbs:
- - get
- - create
-- apiGroups:
- - logging.banzaicloud.com
- resources:
- - '*'
- - fluentbits
- - fluentds
- verbs:
- - '*'
diff --git a/deploy/crds/logging_v1alpha1_fluentbit_cr.yaml b/deploy/crds/logging_v1alpha1_fluentbit_cr.yaml
deleted file mode 100644
index 575f8ed2a..000000000
--- a/deploy/crds/logging_v1alpha1_fluentbit_cr.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-apiVersion: logging.banzaicloud.com/v1alpha1
-kind: Fluentbit
-metadata:
- name: example-fluentbit
- labels:
- release: test
-spec:
- namespace: default
- annotations:
- prometheus.io/scrape: "true"
- prometheus.io/path: "/api/v1/metrics/prometheus"
- prometheus.io/port: "2020"
- tls:
- enabled: false
- image:
- tag: "1.1.3"
- repository: "fluent/fluent-bit"
- pullPolicy: "IfNotPresent"
- resources: {}
diff --git a/deploy/crds/logging_v1alpha1_fluentbit_crd.yaml b/deploy/crds/logging_v1alpha1_fluentbit_crd.yaml
deleted file mode 100644
index e2c9264f3..000000000
--- a/deploy/crds/logging_v1alpha1_fluentbit_crd.yaml
+++ /dev/null
@@ -1,33 +0,0 @@
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- creationTimestamp: null
- name: fluentbits.logging.banzaicloud.com
-spec:
- group: logging.banzaicloud.com
- names:
- kind: Fluentbit
- listKind: FluentbitList
- plural: fluentbits
- singular: fluentbit
- scope: Namespaced
- subresources:
- status: {}
- validation:
- openAPIV3Schema:
- properties:
- apiVersion:
- type: string
- kind:
- type: string
- metadata:
- type: object
- spec:
- type: object
- status:
- type: object
- version: v1alpha1
- versions:
- - name: v1alpha1
- served: true
- storage: true
diff --git a/deploy/crds/logging_v1alpha1_fluentd_cr.yaml b/deploy/crds/logging_v1alpha1_fluentd_cr.yaml
deleted file mode 100644
index aa149e59c..000000000
--- a/deploy/crds/logging_v1alpha1_fluentd_cr.yaml
+++ /dev/null
@@ -1,33 +0,0 @@
-apiVersion: logging.banzaicloud.com/v1alpha1
-kind: Fluentd
-metadata:
- name: example-fluentd
- labels:
- release: test
-spec:
- namespace: default
- annotations:
- prometheus.io/scrape: "true"
- prometheus.io/path: "/metrics"
- prometheus.io/port: "25000"
- tls:
- enabled: false
- image:
- tag: "v1.5.0"
- repository: "banzaicloud/fluentd"
- pullPolicy: "IfNotPresent"
- volumeModImage:
- tag: "latest"
- repository: "busybox"
- pullPolicy: "IfNotPresent"
- configReloaderImage:
- tag: "v0.2.2"
- repository: "jimmidyson/configmap-reload"
- pullPolicy: "IfNotPresent"
- resources: {}
- fluentdPvcSpec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 21Gi
\ No newline at end of file
diff --git a/deploy/crds/logging_v1alpha1_fluentd_crd.yaml b/deploy/crds/logging_v1alpha1_fluentd_crd.yaml
deleted file mode 100644
index 93148ffde..000000000
--- a/deploy/crds/logging_v1alpha1_fluentd_crd.yaml
+++ /dev/null
@@ -1,33 +0,0 @@
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- creationTimestamp: null
- name: fluentds.logging.banzaicloud.com
-spec:
- group: logging.banzaicloud.com
- names:
- kind: Fluentd
- listKind: FluentdList
- plural: fluentds
- singular: fluentd
- scope: Namespaced
- subresources:
- status: {}
- validation:
- openAPIV3Schema:
- properties:
- apiVersion:
- type: string
- kind:
- type: string
- metadata:
- type: object
- spec:
- type: object
- status:
- type: object
- version: v1alpha1
- versions:
- - name: v1alpha1
- served: true
- storage: true
diff --git a/deploy/crds/logging_v1alpha1_plugin_cr.yaml b/deploy/crds/logging_v1alpha1_plugin_cr.yaml
deleted file mode 100644
index 026250e7a..000000000
--- a/deploy/crds/logging_v1alpha1_plugin_cr.yaml
+++ /dev/null
@@ -1,31 +0,0 @@
-apiVersion: logging.banzaicloud.com/v1alpha1
-kind: Plugin
-metadata:
- name: example-plugin
- labels:
- release: test
-spec:
- input:
- label:
- app: "*"
- output:
- - type: gcs
- name: outputGCS
- parameters:
- - name: project
- valueFrom:
- secretKeyRef:
- name: ""
- key: ""
- - name: client_email
- valueFrom:
- secretKeyRef:
- name: ""
- key: ""
- - name: private_key
- valueFrom:
- secretKeyRef:
- name: ""
- key: ""
- - name: bucket
- value: ""
\ No newline at end of file
diff --git a/deploy/crds/logging_v1alpha1_plugin_crd.yaml b/deploy/crds/logging_v1alpha1_plugin_crd.yaml
deleted file mode 100644
index b798db0d9..000000000
--- a/deploy/crds/logging_v1alpha1_plugin_crd.yaml
+++ /dev/null
@@ -1,33 +0,0 @@
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- creationTimestamp: null
- name: plugins.logging.banzaicloud.com
-spec:
- group: logging.banzaicloud.com
- names:
- kind: Plugin
- listKind: PluginList
- plural: plugins
- singular: plugin
- scope: Namespaced
- subresources:
- status: {}
- validation:
- openAPIV3Schema:
- properties:
- apiVersion:
- type: string
- kind:
- type: string
- metadata:
- type: object
- spec:
- type: object
- status:
- type: object
- version: v1alpha1
- versions:
- - name: v1alpha1
- served: true
- storage: true
diff --git a/deploy/operator.yaml b/deploy/operator.yaml
deleted file mode 100644
index 1ac252764..000000000
--- a/deploy/operator.yaml
+++ /dev/null
@@ -1,33 +0,0 @@
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: logging-operator
-spec:
- replicas: 1
- selector:
- matchLabels:
- name: logging-operator
- template:
- metadata:
- labels:
- name: logging-operator
- spec:
- serviceAccountName: logging-operator
- containers:
- - name: logging-operator
- # Replace this with the built image name
- image: banzaicloud/logging-operator:0.2.2
- command:
- - logging-operator
- imagePullPolicy: IfNotPresent
- env:
- - name: WATCH_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- - name: OPERATOR_NAME
- value: "logging-operator"
diff --git a/deploy/service_account.yaml b/deploy/service_account.yaml
deleted file mode 100644
index 1a684cff5..000000000
--- a/deploy/service_account.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: logging-operator
diff --git a/developer.md b/developer.md
deleted file mode 100644
index 0a57334e7..000000000
--- a/developer.md
+++ /dev/null
@@ -1,68 +0,0 @@
-# Developer's Guide
-
-## Setting up local development environment
-
-
-### Prerequisites
-
-These steps are required to build the logging-operator and run on your computer.
-
-### Install operator-sdk
-
-Please follow the official guide for the **operator-sdk**:
-https://github.com/operator-framework/operator-sdk#quick-start
-
-### Set-up the `kubernetes` context
-
-Set up the kubernetes environment where you want create resources
-
-#### Docker-for-mac
-
-```
-kubectl config use-context docker-for-desktop
-```
-
-#### Minikube
-
-```
-kubectl config use-context minikiube
-```
-
-### Install using operator-sdk local
-
-```
-operator-sdk up local
-```
-
-## Building docker image from the operator
-
-```
-$ docker build -t banzaicloud/logging-operator:local
-```
-
-### Using Helm to install logging-operator (with custom image)
-
-Add banzaicloud-stable repo (or download the chart)
-
-```
-helm repo add banzaicloud-stable http://kubernetes-charts.banzaicloud.com/branch/master
-helm repo update
-```
-
-Install the Helm deployment with custom (local) image
-
-```
-helm install banzaicloud-stable/logging-operator --set image.tag="local"
-```
-
-Verify installation
-
-```
-helm list
-```
-
-### Contribution
-
-1. When contributing please check the issues and pull-requests weather your problem has been already addressed.
-2. Open an issue and/or pull request describing your contribution
-3. Please follow the issue and pull-request templates instructions
diff --git a/docs/crds.md b/docs/crds.md
new file mode 100644
index 000000000..b1872c3a4
--- /dev/null
+++ b/docs/crds.md
@@ -0,0 +1,276 @@
+# Custom Resource Definitions
+
+This document contains the detailed information about the CRDs logging-operator uses.
+
+Available CRDs:
+- [loggings.logging.banzaicloud.com](/config/crd/bases/logging.banzaicloud.com_loggings.yaml)
+- [outputs.logging.banzaicloud.com](/config/crd/bases/logging.banzaicloud.com_outputs.yaml)
+- [flows.logging.banzaicloud.com](/config/crd/bases/logging.banzaicloud.com_flows.yaml)
+- [clusteroutputs.logging.banzaicloud.com](/config/crd/bases/logging.banzaicloud.com_clusteroutputs.yaml)
+- [clusterflows.logging.banzaicloud.com](/config/crd/bases/logging.banzaicloud.com_clusterflows.yaml)
+
+> You can find example yamls [here](/docs/examples)
+
+## loggings
+
+Logging resource define a logging infrastructure for your cluster. You can define **one** or **more** `logging` resource. This resource holds together a `logging pipeline`. It is responsible to deploy `fluentd` and `fluent-bit` on the cluster. It declares a `controlNamespace` and `watchNamespaces` if applicable.
+
+> Note: The `logging` resources are referenced by `loggingRef`. If you setup multiple `logging flow` you have to reference other objects to this field. This can happen if you want to run multiple fluentd with separated configuration.
+
+You can install `logging` resource via [Helm chart](/charts/logging-operator-logging) with built-in TLS generation.
+
+### Namespace separation
+A `logging pipeline` consist two type of resources.
+- `Namespaced` resources: `Flow`, `Output`
+- `Global` resources: `ClusterFlow`, `ClusterOutput`
+
+The `namespaced` resources only effective in their **own** namespace. `Global` resources are operate **cluster wide**.
+
+> You can only create `ClusterFlow` and `ClusterOutput` in the `controlNamespace`. It **MUST** be a **protected** namespace that only **administrators** have access.
+
+Create a namespace for logging
+```bash
+kubectl create ns logging
+```
+
+**`logging` plain example**
+```yaml
+apiVersion: logging.banzaicloud.com/v1alpha2
+kind: Logging
+metadata:
+ name: default-logging-simple
+ namespace: logging
+spec:
+ fluentd: {}
+ fluentbit: {}
+ controlNamespace: logging
+```
+
+**`logging` with filtered namespaces**
+```yaml
+apiVersion: logging.banzaicloud.com/v1alpha2
+kind: Logging
+metadata:
+ name: default-logging-namespaced
+ namespace: logging
+spec:
+ fluentd: {}
+ fluentbit: {}
+ controlNamespace: logging
+ watchNamespaces: ["prod", "test"]
+```
+
+### Logging parameters
+| Name | Type | Default | Description |
+|-------------------------|----------------|---------|-------------------------------------------------------------------------|
+| loggingRef | string | "" | Reference name of the logging deployment |
+| flowConfigCheckDisabled | bool | False | Disable configuration check before deploy |
+| flowConfigOverride | string | "" | Use static configuration instead of generated config. |
+| fluentbit | [FluentbitSpec](#Fluent-bit-Spec) | {} | Fluent-bit configurations |
+| fluentd | [FluentdSpec](#Fluentd-Spec) | {} | Fluentd configurations |
+| watchNamespaces | []string | "" | Limit namespaces from where to read Flow and Output specs |
+| controlNamespace | string | "" | Control namespace that contains ClusterOutput and ClusterFlow resources |
+
+#### Fluentd Spec
+
+You can customize the `fluentd` statefulset with the following parameters.
+
+| Name | Type | Default | Description |
+|-------------------------|----------------|---------|-------------------------------------------------------------------------|
+| annotations | map[string]string | {} | Extra annotations to Kubernetes resource|
+| tls | [TLS](#TLS-Spec) | {} | Configure TLS settings|
+| image | [ImageSpec](#Image-Spec) | {} | Fluentd image override |
+| fluentdPvcSpec | [PersistentVolumeClaimSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.12/#persistentvolumeclaimspec-v1-core) | {} | FLuentd PVC spec to mount persistent volume for Buffer |
+| disablePvc | bool | false | Disable PVC binding |
+| volumeModImage | [ImageSpec](#Image-Spec) | {} | Volume modifier image override |
+| configReloaderImage | [ImageSpec](#Image-Spec) | {} | Config reloader image override |
+| resources | [ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.12/#resourcerequirements-v1-core) | {} | Resource requirements and limits |
+
+**`logging` with custom fluentd pvc**
+```yaml
+apiVersion: logging.banzaicloud.com/v1alpha2
+kind: Logging
+metadata:
+ name: default-logging-simple
+ namespace: logging
+spec:
+ fluentd:
+ fluentdPvcSpec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 40Gi
+ storageClassName: fast
+ fluentbit: {}
+ controlNamespace: logging
+```
+
+#### Fluent-bit Spec
+| Name | Type | Default | Description |
+|-------------------------|----------------|---------|-------------------------------------------------------------------------|
+| annotations | map[string]string | {} | Extra annotations to Kubernetes resource|
+| tls | [TLS](#TLS-Spec) | {} | Configure TLS settings|
+| image | [ImageSpec](#Image-Spec) | {} | Fluentd image override |
+| resources | [ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.12/#resourcerequirements-v1-core) | {} | Resource requirements and limits |
+| targetHost | string | *Fluentd host* | Hostname to send the logs forward |
+| targetPort | int | *Fluentd port* | Port to send the logs forward |
+
+**`logging` with custom fluent-bit annotations**
+```yaml
+apiVersion: logging.banzaicloud.com/v1alpha2
+kind: Logging
+metadata:
+ name: default-logging-simple
+ namespace: logging
+spec:
+ fluentd: {}
+ fluentbit:
+ annotations:
+ my-annotations/enable: true
+ controlNamespace: logging
+```
+
+#### Image Spec
+
+Override default images
+
+| Name | Type | Default | Description |
+|-------------------------|----------------|---------|-------------|
+| repository | string | "" | Image repository |
+| tag | string | "" | Image tag |
+| pullPolicy | string | "" | Always, IfNotPresent, Never |
+
+**`logging` with custom fluentd image**
+```yaml
+apiVersion: logging.banzaicloud.com/v1alpha2
+kind: Logging
+metadata:
+ name: default-logging-simple
+ namespace: logging
+spec:
+ fluentd:
+ image:
+ repository: banzaicloud/fluentd
+ tag: v1.6.3-alpine
+ pullPolicy: IfNotPresent
+ fluentbit: {}
+ controlNamespace: logging
+```
+
+#### TLS Spec
+
+Define TLS certificate secret
+
+| Name | Type | Default | Description |
+|-------------------------|----------------|---------|-------------|
+| enabled | string | "" | Image repository |
+| secretName | string | "" | Kubernetes secret that contains: **tls.crt, tls.key, ca.crt** |
+| sharedKey | string | "" | Shared secret for fluentd authentication |
+
+
+**`logging` setup with TLS**
+```yaml
+apiVersion: logging.banzaicloud.com/v1alpha2
+kind: Logging
+metadata:
+ name: default-logging-tls
+ namespace: logging
+spec:
+ fluentd:
+ disablePvc: true
+ tls:
+ enabled: true
+ secretName: fluentd-tls
+ sharedKey: asdadas
+ fluentbit:
+ tls:
+ enabled: true
+ secretName: fluentbit-tls
+ sharedKey: asdadas
+ controlNamespace: logging
+
+```
+
+## outputs, clusteroutputs
+
+Outputs are the final stage for a `logging flow`. You can define multiple `outputs` and attach them to multiple `flows`.
+
+> Note: `Flow` can be connected to `Output` and `ClusterOutput` but `ClusterFlow` is only attachable to `ClusterOutput`.
+
+### Defining outputs
+
+The supported `Output` plugins are documented [here](./plugins/outputs)
+
+| Name | Type | Default | Description |
+|-------------------------|-------------------|---------|-------------|
+| **Output Definitions** | [Output](./plugins/outputs) | nil | Named output definitions |
+| loggingRef | string | "" | Specified `logging` resource reference to connect `Output` and `ClusterOutput` to |
+
+
+**`output` s3 example**
+```yaml
+apiVersion: logging.banzaicloud.com/v1alpha2
+kind: Output
+metadata:
+ name: s3-output-sample
+spec:
+ s3:
+ aws_key_id:
+ valueFrom:
+ secretKeyRef:
+ name: s3-secret
+ key: awsAccessKeyId
+ namespace: default
+ aws_sec_key:
+ valueFrom:
+ secretKeyRef:
+ name: s3-secret
+ key: awsSecretAccesKey
+ namespace: default
+ s3_bucket: example-logging-bucket
+ s3_region: eu-west-1
+ path: logs/${tag}/%Y/%m/%d/
+ buffer:
+ path: /tmp/buffer
+ timekey: 1m
+ timekey_wait: 10s
+ timekey_use_utc: true
+```
+
+## flows, clusterflows
+
+Flows define a `logging flow` that defines the `filters` and `outputs`.
+
+> `Flow` resources are `namespaced`, the `selector` only select `Pod` logs within namespace.
+> `ClusterFlow` select logs from **ALL** namespace.
+
+### Parameters
+| Name | Type | Default | Description |
+|-------------------------|-------------------|---------|-------------|
+| selectors | map[string]string | {} | Kubernetes label selectors for the log. |
+| filters | [][Filter](./plugins/filters) | [] | List of applied [filter](./plugins/filters). |
+| loggingRef | string | "" | Specified `logging` resource reference to connect `FLow` and `ClusterFlow` to |
+| outputRefs | []string | [] | List of [Outputs](#Defining-outputs) or [ClusterOutputs](#Defining-outputs) names |
+
+*`flow` example with filters and output in the `default` namespace*
+```yaml
+apiVersion: logging.banzaicloud.com/v1alpha2
+kind: Flow
+metadata:
+ name: flow-sample
+ namespace: default
+spec:
+ filters:
+ - parse:
+ key_name: log
+ remove_key_name_field: true
+ parsers:
+ - type: nginx
+ - tag_normaliser:
+ format: ${namespace_name}.${pod_name}.${container_name}
+ outputRefs:
+ - s3-output
+ selectors:
+ app: nginx
+```
\ No newline at end of file
diff --git a/docs/developers.md b/docs/developers.md
new file mode 100644
index 000000000..c6f351b6a
--- /dev/null
+++ b/docs/developers.md
@@ -0,0 +1,154 @@
+# Developers documentation
+
+THis documentation helps to set-up a developer environment and writing plugins for the operator.
+
+## Setting up Kind
+
+Install Kind on your computer
+```
+go get sigs.k8s.io/kind@v0.5.1
+```
+
+Create cluster
+```
+kind create cluster --name logging
+```
+
+Install prerequisites (this is a Kubebuilder makefile that will generate and install crds)
+```
+make install
+```
+
+Run the Operator
+```
+go run main.go
+```
+
+## Writing a plugin
+
+To add a `plugin` to the logging operator you need to define the plugin struct.
+
+> Note: Place your plugin in the corresponding directory `pkg/model/filter` or `pkg/model/output`
+
+```go
+type MyExampleOutput struct {
+ // Path that is required for the plugin
+ Path string `json:"path,omitempty"`
+}
+```
+
+The plugin uses the **JSON** tags to parse and validate configuration. Without tags the configuration is not valid. The `fluent` parameter name must match with the JSON tag. Don't forget to use `omitempty` for non required parameters.
+
+### Implement `ToDirective`
+
+To render the configuration you have to implement the `ToDirective` function.
+```go
+func (c *S3OutputConfig) ToDirective(secretLoader secret.SecretLoader) (types.Directive, error) {
+ ...
+}
+```
+For simple Plugins you can use the `NewFlatDirective` function.
+```go
+func (c *ExampleOutput) ToDirective(secretLoader secret.SecretLoader) (types.Directive, error) {
+ return types.NewFlatDirective(types.PluginMeta{
+ Type: "example",
+ Directive: "output",
+ Tags: "**",
+ }, c, secretLoader)
+}
+```
+For more example please check the available plugins.
+
+### Reuse existing Plugin sections
+
+You can embed existing configuration for your plugins. For example modern `Output` plugins have `Buffer` section.
+
+```go
+// +docLink:"Buffer,./buffer.md"
+Buffer *Buffer `json:"buffer,omitempty"`
+```
+
+If you are using embedded sections you must call its `ToDirective` method manually and append it as a `SubDirective`
+
+```go
+if c.Buffer != nil {
+ if buffer, err := c.Buffer.ToDirective(secretLoader); err != nil {
+ return nil, err
+ } else {
+ s3.SubDirectives = append(s3.SubDirectives, buffer)
+ }
+}
+```
+
+### Special plugin tags
+To document the plugins logging-operator uses the Go `tags` (like JSON tags). Logging operator uses `plugin` named tags for special instructions.
+
+Special tag `default`
+The default tag helps to give `default` values for parameters. These parameters are explicitly set in the generated fluentd configuration.
+```go
+RetryForever bool `json:"retry_forever" plugin:"default:true"`
+```
+Special tag `required`
+The required tag ensures that the attribute can **not** be empty
+```go
+RetryForever bool `json:"retry_forever" plugin:"required"`
+```
+
+## Generate documentation for Plugin
+
+The operator parse the `docstrings` for the documentation.
+
+```go
+...
+// AWS access key id
+AwsAccessKey *secret.Secret `json:"aws_key_id,omitempty"`
+...
+```
+
+Will generate the following Markdown
+
+| Variable Name | Default | Applied function |
+|---|---|---|
+|AwsAccessKey| | AWS access key id|
+
+You can *hint* default values in docstring via `(default: value)`. This is useful if you don't want to set default explicitly with `tag`. However during rendering defaults in `tags` have priority over docstring.
+```go
+...
+// The format of S3 object keys (default: %{path}%{time_slice}_%{index}.%{file_extension})
+S3ObjectKeyFormat string `json:"s3_object_key_format,omitempty"`
+...
+```
+
+### Special docstrings
+
+- `+docName:"Title for the plugin section"`
+- `+docLink:"Buffer,./buffer.md"`
+
+You can declare document **title** and **description** above the `type _doc interface{}` variable declaration.
+
+Example Document headings:
+```go
+// +docName:"Amazon S3 plugin for Fluentd"
+// **s3** output plugin buffers event logs in local file and upload it to S3 periodically. This plugin splits files exactly by using the time of event logs (not the time when the logs are received). For example, a log '2011-01-02 message B' is reached, and then another log '2011-01-03 message B' is reached in this order, the former one is stored in "20110102.gz" file, and latter one in "20110103.gz" file.
+type _doc interface{}
+```
+
+Example Plugin headings:
+```go
+// +kubebuilder:object:generate=true
+// +docName:"Shared Credentials"
+type S3SharedCredentials struct {
+...
+```
+
+Example linking embedded sections
+```go
+// +docLink:"Buffer,./buffer.md"
+Buffer *Buffer `json:"buffer,omitempty"`
+```
+
+### Generate docs for your Plugin
+
+```
+make docs
+```
diff --git a/docs/example-s3.md b/docs/example-s3.md
new file mode 100644
index 000000000..11678a7a3
--- /dev/null
+++ b/docs/example-s3.md
@@ -0,0 +1,114 @@
+# Save all logs to S3
+
+Before you start [install logging-operator](/README.md#deploying-with-helm-chart)
+
+### Create default logging
+
+Create a namespace for logging
+```bash
+kubectl create ns logging
+```
+> You can install `logging` resource via [Helm chart](/charts/logging-operator-logging) with built-in TLS generation.
+
+Create `logging` resource
+```bash
+kubectl apply -f logging.yaml
+```
+*logging.yaml*
+```yaml
+apiVersion: logging.banzaicloud.com/v1alpha2
+kind: Logging
+metadata:
+ name: default-logging-simple
+spec:
+ fluentd: {}
+ fluentbit: {}
+ controlNamespace: logging-system
+```
+
+> Note: `ClusterOutput` and `ClusterFlow` resource will only be accepted in the `controlNamespace`
+
+### Create AWS secret
+
+If you have your `$AWS_ACCESS_KEY_ID` and `$AWS_SECRET_ACCESS_KEY` set you can use the following snippet.
+```bash
+kubectl create secret generic logging-s3 --from-literal "awsAccessKeyId=$AWS_ACCESS_KEY_ID" --from-literal "awsSecretAccesKey=$AWS_SECRET_ACCESS_KEY"
+```
+Or set up the secret manually.
+```bash
+kubectl apply -f secret.yaml
+```
+*secret.yaml*
+```yaml
+apiVersion: v1
+kind: Secret
+metadata:
+ name: logging-s3
+ namespace: logging-system
+type: Opaque
+data:
+ awsAccessKeyId:
+ awsSecretAccesKey:
+```
+
+> You **MUST** install the `secret` and the `output` definition in the **SAME** namespace
+
+Create an S3 output definition
+
+```bash
+kubectl apply -f clusteroutput.yaml
+```
+*clusteroutput.yaml*
+```yaml
+apiVersion: logging.banzaicloud.com/v1alpha2
+kind: ClusterOutput
+metadata:
+ name: s3-output
+ namespace: logging-system
+spec:
+ s3:
+ aws_key_id:
+ valueFrom:
+ secretKeyRef:
+ name: logging-s3
+ key: awsAccessKeyId
+ aws_sec_key:
+ valueFrom:
+ secretKeyRef:
+ name: logging-s3
+ key: awsSecretAccesKey
+ s3_bucket: logging-amazon-s3
+ s3_region: eu-central-1
+ path: logs/${tag}/%Y/%m/%d/
+ buffer:
+ path: /tmp/buffer
+ timekey: 10m
+ timekey_wait: 30s
+ timekey_use_utc: true
+```
+
+> Note: For production set-up we recommend using longer `timekey` interval to avoid generating too many object.
+
+The following snippet will use [tag_normaliser](./plugins/filters/tagnormaliser.md) to re-tag logs and after push it to S3.
+
+```bash
+kubectl apply -f clusterflow.yaml
+```
+*clusterflow.yaml*
+```yaml
+apiVersion: logging.banzaicloud.com/v1alpha2
+kind: ClusterFlow
+metadata:
+ name: all-log-to-s3
+ namespace: logging-system
+spec:
+ filters:
+ - tag_normaliser: {}
+ selectors: {}
+ outputRefs:
+ - s3-output
+```
+
+The logs will be available in the bucket on a `path` like:
+
+```/logs/default.default-logging-simple-fluentbit-lsdp5.fluent-bit/2019/09/11/201909111432_0.gz```
\ No newline at end of file
diff --git a/docs/examples/es.md b/docs/examples/es.md
deleted file mode 100644
index a1c95bbbf..000000000
--- a/docs/examples/es.md
+++ /dev/null
@@ -1,50 +0,0 @@
-
-
-## Example Logging-operator with Elasticsearch Operator
-
-
-
-#### Add operator chart repository:
-```bash
-$ helm repo add es-operator https://raw.githubusercontent.com/upmc-enterprises/elasticsearch-operator/master/charts/
-$ helm repo add banzaicloud-stable https://kubernetes-charts.banzaicloud.com
-$ helm repo update
-```
-
-#### Install operators
-```bash
-$ helm install --name elasticsearch-operator es-operator/elasticsearch-operator --set rbac.enabled=True
-$ helm install --name elasticsearch es-operator/elasticsearch --set kibana.enabled=True --set cerebro.enabled=True
-$ helm install --name logging banzaicloud-stable/logging-operator
-$ helm install --name fluent banzaicloud-stable/logging-operator-fluent
-```
-> [Elasticsearch Operator Documentation](https://github.com/upmc-enterprises/elasticsearch-operator)
-
-#### Install Nginx Demo chart
-```bash
-$ helm install banzaicloud-stable/nginx-logging-es-demo
-```
-
-#### Forward cerebro & kibana dashboards
-```bash
-$ kubectl port-forward svc/cerebro-elasticsearch-cluster 9001:80
-$ kubectl port-forward svc/kibana-elasticsearch-cluster 5601:80
-```
-
-[![asciicast](https://asciinema.org/a/9EcfIzlUQJSjJdopEh5HCU7OT.svg)](https://asciinema.org/a/9EcfIzlUQJSjJdopEh5HCU7OT)
-
-## License
-
-Copyright (c) 2017-2019 [Banzai Cloud, Inc.](https://banzaicloud.com)
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-[http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0)
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
diff --git a/docs/examples/logging_flow_multiple_output.yaml b/docs/examples/logging_flow_multiple_output.yaml
new file mode 100644
index 000000000..e403fa395
--- /dev/null
+++ b/docs/examples/logging_flow_multiple_output.yaml
@@ -0,0 +1,11 @@
+apiVersion: logging.banzaicloud.com/v1alpha2
+kind: Flow
+metadata:
+ name: flow-sample
+ namespace: default
+spec:
+ outputRefs:
+ - gcs-output-sample
+ - s3-output-example
+ selectors:
+ app: nginx
diff --git a/docs/examples/logging_flow_single_output.yaml b/docs/examples/logging_flow_single_output.yaml
new file mode 100644
index 000000000..dbc7c526c
--- /dev/null
+++ b/docs/examples/logging_flow_single_output.yaml
@@ -0,0 +1,10 @@
+apiVersion: logging.banzaicloud.com/v1alpha2
+kind: Flow
+metadata:
+ name: flow-sample
+ namespace: default
+spec:
+ outputRefs:
+ - s3-output-sample
+ selectors:
+ app: nginx
diff --git a/docs/examples/logging_flow_with_filters.yaml b/docs/examples/logging_flow_with_filters.yaml
new file mode 100644
index 000000000..08c1f4c6d
--- /dev/null
+++ b/docs/examples/logging_flow_with_filters.yaml
@@ -0,0 +1,18 @@
+apiVersion: logging.banzaicloud.com/v1alpha2
+kind: Flow
+metadata:
+ name: flow-sample
+ namespace: default
+spec:
+ filters:
+ - parse:
+ key_name: log
+ remove_key_name_field: true
+ parsers:
+ - type: nginx
+ - tag_normaliser:
+ format: ${namespace_name}.${pod_name}.${container_name}
+ outputRefs:
+ - s3-output
+ selectors:
+ app: nginx
diff --git a/docs/examples/logging_logging_simple.yaml b/docs/examples/logging_logging_simple.yaml
new file mode 100644
index 000000000..15078cbff
--- /dev/null
+++ b/docs/examples/logging_logging_simple.yaml
@@ -0,0 +1,9 @@
+apiVersion: logging.banzaicloud.com/v1alpha2
+kind: Logging
+metadata:
+ name: default-logging-simple
+ namespace: logging
+spec:
+ fluentd: {}
+ fluentbit: {}
+ controlNamespace: logging
\ No newline at end of file
diff --git a/docs/examples/logging_logging_tls.yaml b/docs/examples/logging_logging_tls.yaml
new file mode 100644
index 000000000..bf334bff8
--- /dev/null
+++ b/docs/examples/logging_logging_tls.yaml
@@ -0,0 +1,18 @@
+apiVersion: logging.banzaicloud.com/v1alpha2
+kind: Logging
+metadata:
+ name: default-logging-tls
+ namespace: logging
+spec:
+ fluentd:
+ disablePvc: true
+ tls:
+ enabled: true
+ secretName: fluentd-tls
+ sharedKey: asdadas
+ fluentbit:
+ tls:
+ enabled: true
+ secretName: fluentbit-tls
+ sharedKey: asdadas
+ controlNamespace: logging
\ No newline at end of file
diff --git a/docs/examples/logging_output_azurestorage.yaml b/docs/examples/logging_output_azurestorage.yaml
new file mode 100644
index 000000000..523705faa
--- /dev/null
+++ b/docs/examples/logging_output_azurestorage.yaml
@@ -0,0 +1,23 @@
+apiVersion: logging.banzaicloud.com/v1alpha2
+kind: Output
+metadata:
+ name: azure-output-sample
+spec:
+ azurestorage:
+ azure_storage_account:
+ valueFrom:
+ secretKeyRef:
+ name: azurestorage-secret
+ key: azureStorageAccount
+ azure_storage_access_key:
+ valueFrom:
+ secretKeyRef:
+ name: azurestorage-secret
+ key: azureStorageAccessKey
+ azure_container: example-azure-container
+ path: logs/${tag}/%Y/%m/%d/
+ buffer:
+ path: /tmp/buffer
+ timekey: 1m
+ timekey_wait: 10s
+ timekey_use_utc: true
\ No newline at end of file
diff --git a/docs/examples/logging_output_gcs.yaml b/docs/examples/logging_output_gcs.yaml
new file mode 100644
index 000000000..60bce1758
--- /dev/null
+++ b/docs/examples/logging_output_gcs.yaml
@@ -0,0 +1,19 @@
+apiVersion: logging.banzaicloud.com/v1alpha2
+kind: Output
+metadata:
+ name: gcs-output-sample
+spec:
+ gcs:
+ credentials_json:
+ valueFrom:
+ secretKeyRef:
+ name: gcs-secret
+ key: credentials.json
+ project: logging-example
+ bucket: banzai-log-test
+ path: logs/${tag}/%Y/%m/%d/
+ buffer:
+ path: /tmp/buffer
+ timekey: 1m
+ timekey_wait: 10s
+ timekey_use_utc: true
\ No newline at end of file
diff --git a/docs/examples/logging_output_s3.yaml b/docs/examples/logging_output_s3.yaml
new file mode 100644
index 000000000..dc89daed8
--- /dev/null
+++ b/docs/examples/logging_output_s3.yaml
@@ -0,0 +1,26 @@
+apiVersion: logging.banzaicloud.com/v1alpha2
+kind: Output
+metadata:
+ name: s3-output-sample
+spec:
+ s3:
+ aws_key_id:
+ valueFrom:
+ secretKeyRef:
+ name: s3-secret
+ key: awsAccessKeyId
+ aws_sec_key:
+ valueFrom:
+ secretKeyRef:
+ name: s3-secret
+ key: awsSecretAccesKey
+ s3_bucket: example-logging-bucket
+ s3_region: eu-central-1
+ path: logs/${tag}/%Y/%m/%d/
+ buffer:
+ path: /tmp/buffer
+ timekey: 1m
+ timekey_wait: 10s
+ timekey_use_utc: true
+ format:
+ type: json
diff --git a/docs/examples/logging_output_sumologic.yaml b/docs/examples/logging_output_sumologic.yaml
new file mode 100644
index 000000000..fe49748bc
--- /dev/null
+++ b/docs/examples/logging_output_sumologic.yaml
@@ -0,0 +1,14 @@
+apiVersion: logging.banzaicloud.com/v1alpha2
+kind: Output
+metadata:
+ name: sumologic-output-sample
+spec:
+ sumologic:
+ endpoint:
+ valueFrom:
+ secretKeyRef:
+ name: sumologic
+ key: endpoint
+ log_format: json
+ source_category: prod/someapp/logs
+ source_name: AppA
diff --git a/docs/examples/s3.md b/docs/examples/s3.md
deleted file mode 100644
index dc96e310c..000000000
--- a/docs/examples/s3.md
+++ /dev/null
@@ -1,107 +0,0 @@
-
-
-
-#### Install S3 output Plugin chart with Aws Credential Access
-```bash
-$ helm install \
---set bucketName='' \
---set region='' \
---set endpoint='' \
---set awsCredentialsAccess.enabled=true \
---set awsCredentialsAccess.secret.awsAccessValue='' \
---set awsCredentialsAccess.secret.awsSecretValue='' \
-banzaicloud-stable/s3-output
-```
-
-> There is **no** need to encode base64 these values.
-
-#### Install Nginx Demo app
-```bash
-$ helm install banzaicloud-stable/nginx-logging-demo
-```
-
-
-### Create Secret
-
-Create a manifest file for the AWS access key:
-
-```
-apiVersion: v1
-kind: Secret
-metadata:
- name: loggings3
-type: Opaque
-data:
- awsAccessKeyId:
- awsSecretAccesKey:
-```
-
-Submit the secret with kubectl:
-
-```
-kubectl apply -f secret.yaml
-```
-
-### Create LoggingOperator resource
-
-Create a manifest that defines that you want to parse the nginx logs with the specified regular expressions on the standard output of pods with the `app: nginx` label, and store them in the given S3 bucket.
-
-```
-apiVersion: "logging.banzaicloud.com/v1alpha1"
-kind: "Plugin"
-metadata:
- name: "nginx-logging"
- labels:
- release: test
-spec:
- input:
- label:
- app: nginx
- filter:
- - type: parser
- name: parser-nginx
- parameters:
- - name: format
- value: '/^(?[^ ]*) (?[^ ]*) (?[^ ]*) \[(?[^\]]*)\] "(?\S+)(?: +(?[^\"]*?)(?: +\S*)?)?" (?[^ ]*) (?[^ ]*)(?: "(?[^\"]*)" "(?[^\"]*)"(?:\s+(?[^ ]+))?)?$/'
- - name: timeFormat
- value: "%d/%b/%Y:%H:%M:%S %z"
- output:
- - type: s3
- name: outputS3
- parameters:
- - name: aws_key_id
- valueFrom:
- secretKeyRef:
- name: loggings3
- key: awsAccessKeyId
- - name: aws_sec_key
- valueFrom:
- secretKeyRef:
- name: loggings3
- key: awsSecretAccesKey
- - name: s3_bucket
- value: logging-bucket
- - name: s3_region
- value: ap-northeast-1
- - name: s3_endpoint
- value: https://s3.amazonaws.com
-```
-
-
-
-## License
-
-Copyright (c) 2017-2019 [Banzai Cloud, Inc.](https://banzaicloud.com)
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-[http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0)
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
diff --git a/docs/examples/tls.md b/docs/examples/tls.md
deleted file mode 100644
index 7dab5fb99..000000000
--- a/docs/examples/tls.md
+++ /dev/null
@@ -1,101 +0,0 @@
-
-# TLS Configuration
-
-To configure TLS for Fluentd and Fluentbit the operator needs TLS certificates
-set via the Fluentd and Fluentbit Custom Resources respectively. This can be
-done in two ways:
-
-## Generic Opaque secret (default)
-
-Create a secret like this:
-
-```
-apiVersion: v1
-data:
- caCert: ...
- clientCert: ...
- clientKey: ...
- serverCert: ...
- serverKey: ...
-kind: Secret
-metadata:
- name: something-something-tls
-type: Opaque
-```
-
-Note that we are providing three certificates in the same secret, one for
-Fluentd (`serverCert`), one for Fluentbit (`clientCert`), and the CA
-certificate (`caCert`).
-
-Then in your custom resource configure like this:
-
-```
-apiVersion: logging.banzaicloud.com/v1alpha1
-kind: Fluentd/Fluentbit
-metadata:
- name: my-fluent-thing
-spec:
- ...
- tls:
- enabled: true
- secretName: something-something-tls
- sharedKey: changeme
-```
-
-
-## `kubernetes.io/tls`
-
-The alternative is if your certificates are in secrets of type `kubernetes.io/tls`, e.g.
-
-```
-apiVersion: v1
-data:
- ca.crt: LS0tLS1...
- tls.crt: LS0tLS1...
- tls.key: LS0tLS1...
-kind: Secret
-metadata:
- name: something-something-tls
-type: kubernetes.io/tls
-```
-
-Then configure your custom resources like this:
-
-```
-apiVersion: logging.banzaicloud.com/v1alpha1
-kind: Fluentd/Fluentbit
-metadata:
- name: my-fluent-thing
-spec:
- ...
- tls:
- enabled: true
- secretName: something-something-tls
- secretType: tls
- sharedKey: changeme
-```
-
-Note: in this case we can use the same secret for both Fluentbit and Fluentd,
-or create separate secrets for each.
-
-Note: the secret's data include the CA certificate, which is in-line with the
-structure created by [jetstack/cert-manager](https://github.com/jetstack/cert-manager/).
-
-## Usage with the helm chart
-
-For the generic Opaque secret just set `tls.enabled=True` and optionally provide the `tls.secretName` value to use your own certificates (instead of the automatically generated ones from the chart).
-
-For `kubernetes.io/tls` install `logging-operator-fluent` with a `values.yaml` like this:
-
-```
-tls:
- enabled: true
-
-fluentbit:
- tlsSecret: something-something-tls
-
-fluentd:
- tlsSecret: otherthing-otherthing-tls
-```
-
-For more information see the helm chart's [README.md](https://github.com/banzaicloud/logging-operator/blob/master/charts/logging-operator-fluent/README.md).
diff --git a/docs/img/helm_logo.png b/docs/img/helm_logo.png
deleted file mode 100644
index 3b26f4dd39d84ba58d7fe243d383d5d92e7534a6..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001
literal 166878
zcmZ5{cRbba`+td$WQ8IlJ3<-BR`$-G$DWxT2icLCO~{tbF|s*!!**mIo9r#?*!*4`
zdVjvZKkDorl+b-`9Fx&+EFcSI`>;N!&YRcW&Icfh#Q~rhMZDhVI2btlPjZO&^r%
zZ``=Y=c=j+v3V~EHng{pw=*%dzHvid6A`rv3p-5!aqrl}KL5y|mo1Gcd|E$V3Wgu9
z28?@{z0HTw$D_Z(M<*dU#9XZ9EFN9M^M{CPT3rHK%$#yDRzzN#%QlX-aYT_@nSM
zG6DNJ%&cWB$gM-L=8*o#>`TwSL$?#vA9jeUwj;0O@;^nR$YV^Rb}DqpX&h2M-;*MC
z8xQV1wdvWd{r7hFwB_(L!3VGVhj=n40Vj5U4p9?i&Fosn2P4KAwnJHY5eGeVjGK2{Ty3`}Eq)_l4|`%hD^-BhmASwDz-w
zshT4%hi3Zqkvdh8waxT5=z*n;6eCBhXO185Ys`LxIaoUh&4hlQTTIoD-yXVR9Vz9e}TF~lnc=~%&&ZN>(mXeRvZba|Q-q1xwxXDVh83(C-9sOVl%hlD$
z+_H>&@16Q^ly%y43ip)8d#4(MQ~J?yl+NoO6Ko#GKM>Y5GXDJCW;(nri?w7r&pXycy?}$6kmtvQ31L6Q+ivd6
z{LDXCMV&4sEMs?GorDB>*zk9~^93Kw9auZu_vhW^&W_$AjJ|c(GIJ$cShc$;jpM!w
zp1|2cAY%$G=t-z2)3=Z&NYgW0rv0~Eq5Q%f^%IFci92-?0wuJ@cUP5ObH-yYmyubtw+RZ%u?NNr
z>iK2m>i%}vXyA^Td;k12FSy_QQ(3a?QN=`oj{Mmm=%wi#Z(;YQprd=gL*56?d(q*`
zY{N_>f24A7jBRZnuw>S*{FOCIODx&Q9$Hoh%@b7ez@tG1gW$AXTQP$y$lj83u)r#m>7-zXbU|ZjO_DOp<{=LKTt-Csp
z-XWibLksoubBL78iT(Ju!(Q}Ftp7?PP!gZ1;wbZ#TTB$*f61xO;<`cdV6CWdrcZXs
zz>j_HmzEN};v<6eAhg7kTmtpg3hv0~)>6Gk6;=Zs+$mzE_HHSHjjDT1Yi!372*1Lj
zCThLsl}JMYY#Jmpg_p!|FwUCG*DSWXS_R14S$o~q*(v+Y+ky2(I6;>F$QC$cMJK23
z+Zs*9dfoHW!F!!%CTg=B8jen49T~Do+0sI$MLnB?8#K8@n@^)mxOb;41(FR`BcL6p
zF(CfV4Do4>y^N~Lqp-%io4O(yA1v;G?O#Y(v$-Uwc`Xfu9_+%{NCeI
zf(q;NgY^xy*JQ0eY0gGE2$>3PCXSJjI7Rx9vVMH^em^^2UPQfJg?>~rNflWMK-+J!-D9s8#SU7)_CQXqM2OeGCt;{Mhldo?>|T8hYGsQalG*-IP=f=
z^x7nX1B+(k@p>hHDrOg~M#_-v{*SL*PbNk`ekORe-OFW6Fh-1BK_?aV$dpsdiBFd8
zs5p_XhI=~`0p;pK>|^>kC9Ye3+3Eb%tsv6V*H7TQtYQjvdGEG*$U%J3pRqyUEDba}GUpfY;4BZ~~%ssOePKG4bx-=yEne^M-FSJ!YGS?fAt`&&CAw!qIAZ
zFlXqWc`*?$-5yPlZV!EV=!rpN>uikAKFun)_{vZ>2>L2-R+&6*(nN68J3e|MD2MT8
z^6@J)!ac6sR?YTD=>7)}s!n#A*3oqIq2iTk3&DH}^AcSxpfxwV2LdEanl!BJk1Ei6
z^qyn+T0b3>_jwFwA?@ipHm-l_v)7DtguK`CJ$+QteZ$zn>NJyfyLr`ShV8pyhd>l;
z+udIZp6d~6CvWsz%M*vlCLCWHY8^QFPzld}`K|o35N59a6s^39v-CJk+33|M5}As$
znZPd_DSWf?n}#aL*ByWEjK6+B_xYi4<~JUnI*q=CxbH$k6K=+;a;#IzOJ*83`}12v^)01e{ycnJ`<=^t`wxDTL5cQ#Te`n}*4aYjDKSyRn`NWcCy$_e$-SN}
z_gwGY`Le{cm6##6m0WF!c%bed)gGX2>HUm`e@SUKch8?$*Ec92%>R{MchbF`Uvw@M
zuR`N_w3?pGxBN{$aaDS%d*>I%>++aM;$3~QxKT{Ho1UGXiUJ8QAot2a+!-$e)BkSN
zY(kKBpCT2|&A?2WQ{p~$CMltlg(VdMSjydFm|L99#0~q_&%`@7@0lRW
zN1uPt5h@EBTe2F_nAdHzfYN*xp>wfVh_W0?D}B@JPC2ftt}Kb-shSIPwQ
z1`Ui6(Ysbs3F)<;gkrRPH%bcge!20YJuiU0_4DO2<gW2rLgfRSgRp{JspMx
zL)7tB*sapXMX$!KUS^&&f8u=RX{P7#)c|hD9dtQ>w_>I3p5=o6gK5*5^0ZNB
zZ?(C=D&@w<6YH$qrgMcKBg)A<5LrgV$9&KKxOfeCIjHf3Ayl#weUDFH+#T#Zz--|_$`Fyfd4r|1XHY8HT1C^J-%GD+
zF_2!=U`&;7Ec%F|%U&~2W197S&q`}xy2|UpCr`iUJ!NDjcRVC7Vk1zVx?idEo;T1>
zB8lbmXxG^HMZs?
zeJX^FN5U<`naexLqGs=x(Hb5-beK8*HtorZolJ`M_QS7Ehgo<(?$DVI*I9S`ZD*$_
zk<9wu&<9G?9%tNX>FIE{g+NnHzJv0|KsGvFwF4*!9CTshc6AnTXb3?7u3+GJ9=NYxBud==x@^4Pz!hb=C@oF-Vk3`yZm+
zk5#;x^4DOfm8~n&ICgVpGuhXu8{E+YDlW8}RBvhJUm$8*dQLVI9-{jhg#X&9++91T
zhT&+$>NJTe_#_i#u?1ch+p#hTwrKr6vCJ%G9J6=05z+=qh^dsje=3m~qd{Avt?~2s!C5?5UFqP))78BqIE=U^hQ67`(!UZ!fb|UP*j^+q2{L1
zWeWO(!GmQ)bk|4Zc!6|=F&cTG*RStH;b_WX=IiG#^fNA0Ty3`b8?t_Ia=7L1C*PHg
zV_b{Os6l)mhdP0Mo|V)ofvH!zui0T4)6KnXBnD>;EDB8+g6#~Kt8g*hDs_&f(!O-XTUMPBL
z{Mh|spgU|QG^^oW8nSI*r~Ao5x5k~H>Q>9oSzbnS<^bvdf_rtHOamnNuX`QN`&+>G1;^8^T+9^OJ|Ti7WIc4XsG`nvIes;m{u_Nv^!2|
zNffy`*a5Lh_Ez#|=5pA+E5@A0~Xc88z?R+iIp{kq+&r)(ffGshw-)
zQJzILjSVU|lG(GGxA##CUaZ=da-*5s&8>sz@%7?=Fka|w#pQq2XIl1>a@~@96O%%A
zCJ_{WAj^9y2A)z8KhN{A)i|F%m+xB9#JW-la3CWLtfHU^G7|r5CGCnB{8%~zZxpNk
z#xX^{u2~VD_!G6aXTXhJn>F@igaW+b6mP51Y>Ozj?eF_qOH}zL_T%-t)W1b<`dHy@
z)o@>L@Y}(=y@ZzTSSQ{s-b7N0uiDBA3{zYZE4fT6BK==e)~0MK!l!)p4@x7=&ZHQFLDW_ph5ibo0ZYCx>%{s)t6wy(E{y
z<|3~RM8#s%7|pW&{2D(;tyuXEf;T9!^QXI2Zm-!Zbz;hbnP6-VsJ-8t+r&*guz%O1nlP6x;ds^*Q9FMwt?8m1Gq>slO`rlwNrqu!jI(*_03NkFHN3
z71Gou^-7XzTZ5xY>;;kLxq2zDO>xgMDTjxmLRDwYrlt9o8=uoJ_R*B+3cT^U4a)=-
z&&V!4ItTVBv)9&``i~ZzIz|dPs$TU~znD7o;^I(lCnN3oXZ$&}57P;V(BUgp|pAY_#0j(GC00lGK1DeO`Rb`WMxa=nU$@;>-<7ePC(!u-*6x
zQr`XO_-}7vjte*`XS>y#R)^bQ9nR|NlBT0Q@%6?%c1lm-jUp@-Hwhh+qNCBucNKOGN?^CZ709`C6J*G*036`~X_$sUrm2IWCDowYR!_R=<3%d`j~C
zsm5)B2aRh%ed8K!Bg)eB(wJhYs|_qUR;DojRQJ%LmY8c85@83-Qa`Xb`R@l#?>tAv
zzTI7Nx++~W_L%#BrY6iL@s23s$x~f~EzX;4!0-E=nD}^GZh!khV4QV>-|qp-vUH@y)0YrUK?M8`h|C%?EWC!r8s*0NfZUqlTvsc
zg9y3Fpz{a#L@qs}F#ecpO=N1*!QgT3w?eOlVEA0DgzM=Che7t?gs3%UwFQs(Q{)($
zQ0Vm@z9iJu{v@W9|3bY;WUoFIbG%W^jIncVFbesW_j?ULU9GzMiH{76*Z8(8n^Zv4
zPvV{^Xq}c>aolcFY_QYDt9TE!QfS2?f{5&4UW=yZzX-+Jee+>@RMg$}dP82)FOS?x
z(^*e0+_A0MS!RcfDX1I={mg|p*KXBFT40yLu71h9GuJKMt
z`|Z1%{|?Cw_z%M3;XiB`Aoy}fa(t}t84@cuxL;a%wAUHG@1(w_%}P<^=_c5?&GhU!
zQUi>1jn`otQPC!RxnIoTT(U8;&!XK>cOlb%pp3;JVvj4)PJNC1X+cdtS6bWml9~H;
zbZ&FzP;h{9e2(i#@H~k$&?yOWJ(q`mE_JHeL+R?ZvunB?$BXG|#rk&;79)w9qJ;k}
zmv#&Q#|F6r=GD4wWleLVKTKk(WNoudjw{m+A~U)3m+r7q!1#Nt1>DfRaWjsg)3@ge
z$hK1lY3e4~Een&5Lyj;#Neubo{)G(|gh|AH9X~AmpTgxa7=|cmN3_6vdS-t92)-&%
zcCpmP4l}Mhjv6{2;g?4srWzIcdUdm%grurZCWy{Esmhkqv0)p(0{#P?ggf}Uywv}E
zTgp3#TXy~qEywZLg7L(8G$cNIVzJzM8t-vjQ^6yg@p2^fo|;1R_;S>#*3NnAhq0$M
z1bcXL3z{L4lGmZ=gM+X8F`5JaN+IAU&x9();1YBcy)@-#0Vb(Kg2x`OI^#CmvWW2}
zJiPZ-7gx4)rkHZd=#GP9{|xTFin-0>6DuHNOUag2OimX_+;UQ5hIhK8gmVW
zK0>*_K2FIst5fABdzg1F7s~&D_Dc}2*=Tc}+`_+tE#RQ(fc`Rc-qByJKc{7Yh$8^eEvk1E
zyovi>gGqm!bkspL_+8C;K0er7(OZPOPGLWpwyd9iUF}>B`qaU=l!EGaRKI7cPmWR0
zKhyRAI3_rSMsiI+f=Iq-^XT`_J9q5bwj*u64F%9S*DT2COIgV8+Ll(kqRqw$D*5ql
zMlQ2PE%1npGUo&jAe>7`iiU5~U5oxf#Q3HUOU|upOXV^eEO6BzyQA1XuSMLJ8l&5H
z841JN>Gyh+F@Q5(cht^IGDW+n5HnJD)HCZzuLf5-|IH1CE1!1>T%#2AZwx@4q;DPu
zUH$Te^nw1dGVvNXzCO1(`paAc-LiGeZLjF<9MeOrE|Os`q-xAMZIr@Wso8Ox{Nb^3
z6AMoFYpHLbl>uTWVO@u~K~)ov&^~&1>hZ&P1=~jnyPNsL-(v52;k`uXV12eDETEe2
zMX5JRSEu*YX6#(4Sl~28PUo-v^>QNQKxn`CR(=0UV2-EHgpSOJcb~-H%Rm0C&=^5H
zS=iTfbeH9?1=heF>qq?3G4ZMIJ>H%QlW*)tCFh-ewfggqPT)rXCqugiiK?c71W_)~
zDTx!SCBF7%bPbMUk56!g`(c>7@z!n)arJVPOG4Q`zlm#jCI;?VK?tD2Nm-~WRf9}Vq
zdkXi+3Gk!gVzeOYr2M`U<2Gu-m#0km#IwGhzb3eLjNhSVf1Ub}1y6`*@$+kt-6EGebxr5xDra1CQKrHdEz>3GzG*qeogz_*4~~
zJ8GOQx2CzVbxa%L`d0+oz~?&R^$Nkaz2fbdQYf!ZyG1t+W-Kt`=op90n-NMb-aVM^A
zt-azd=cbcK;t?idAwJOj3MjqZvG8?wN_qsY0{!s*7j2I;ClZ7-UlH4f{`pbf^9gO_
z!tplt=cb&=3UjJR2bNW@C;dOHoQi15Sh#ZB!$^u?EZ&MyJ@oPgo=Lh>j3zGzZcJO{
zTfC7C*nl(QD_&<}7kujK1;aQO36xcV6v^vaFjw?>glJXZY?E%^3_#q4NQTHB_pudT
zSkC5pRK0%oDNS+jvQT`j$g!fA1iMxR=k{Xtw^}qJ<6!z@MrpXf?!B1Xjg!rH4y1U{vxfxr`sbl0PVOItgHzS?x2zkm
z3ds+;t`S@h$Y5@HJ@wbTub}B5AqV=VTFI=Zkn=q>vnr1ZB0ikRQE2Rq3&qnYInatH
zLEwRpesn3;O>VC$g@&jC%=mY2z}-%Ww;U$-L6<~Kr`=$mnlq%1P3VETfxER|(wX^$
zGgIfO$ggFhatk%?FFQE@yxJG=YND|=(W^}T;YATBTV`Hc8s{zdxU=d2`C;s?Z&LJ%
zn~++a2slT2NW;9*Nd_rgQAOSx`El@#&@p161jF^sqO>d2#9_tVQ@d|5bJ;;n+tKcI
z3g1-cCBE(TM4dX~itSb4m-6I!9grVN%q(Ba%K|`B!5Uo;ic5;7PQhf%7G?R;nvHBfe=^T2-)?0k<$w39Y>qiK
zc7%mY_gGSz>h}UGbmTlXVWQE%?0nO$xGPzQu{?23D6`u+?3>2K)SZp%Ofo@CUG0R3
zTfRcc-}0A?CU+Ice{dVm93O?>Gha8I9I(5>n4Xm&AMn=dngt+Eefz^rH_F2rr1Fk1
zOTj;S{GUVER-46&`kQ)Mp=Jr)1?*6!}DR}dK()cPgMEE
zhhOE134#l{*<~96vG2xBYRF3VLs*V9QNbJ>vL#5q975-imeR#VRN51Q9LYJn&U*QN
zw1XNqAxNvYnTq^)f}*7a(&2XY#jC2n`QKNFng4tx?ASm0all+Zdd<{v;Y>N^JGM5T
zz1wd$j(;RQQ2N6(84*|2t1EB+fEI0~5Cep~^(3VV)Qj~nW7K`|_`s_DW6p<`5CjMpsAd-5U#R(mF{Tq2Fad*xU=oNo>s@4xD9
z4ZY)su-a;pUk{lZZ9+3sAa7NO92e(s15Yduri!q|O{Hf+H?Z9IC0lK@cih#+vvBoz
zlyDmwBkyeX&ctYR@~)2ZJs8(EFj`XsBgKJSf?IqPLV}m_i?YIO?#@-BkO~9BfY+^A
z=IT&s@lCkU{UJRMM7yKR&Ad!{)7n*vvYv
z(xV><;--U7WH&4uSJ)4N?Ef)xX;SKH29E$uD%9CrNQbMDF8Qi0Rt=9jYd&>NX7&AM
z+))@PTTDeUFBvC=iNP58Y9jO#57RvtFwPPk#r{7Lo;3;qk`oy{E>|uFYQ42J)
zl9_Qqar-tKBRh&A4eAQ|QI;EP+|_RM`!X@NZI?$ztDj$+;P>kf(;WoYQDAaOe9X>O
zkL7nZ*wmO?x8dECHe=nZDcLiSy~Nq@{6@4Yag5}`Ump>>7Jj{UI)cW;(1xkK>DxUV
zS?TH)z0>R&c=H_wXoJpZ{Kx(#m>UEj=lnDk<##PUhR?9BkhhOc#9qGNwnY6ZsnGz~
zvaUQo!xw%mnpY>6rPkPiBR-SjZ~@;Sr82ff%FoNj2`VQR`3>V1h`5q^9W!&;Q7E#e
z{Pn4*27@itc4=DBpkm|^1D%`GIxQ(2D3ET~=~H5FQP2372G
zy(1+uF`?{KP5XVz>EQ>dC6ytZBDjaNM~t)OpTDubMXG;hnjZ99gV?nCAt4}RIs=6gO*6LWj@^rsb}
zdP*ZVz*MS$3^mi<8*J+8mQ3&Q>hvQc5h%J`s{jM_@(F-JJGJ`)yeOw=$NxgM+z%IR
zd^&<%fipu9+CH4wTHrICP|eyt;lm&)Ox=IT-Lt;ivof|V!TKm3o{WumUE>E40%7!)
zU-K?%DPE!r?z=K}42_Ci-Svgm+xFbL)f0G?8rtsz3p(g)5~CIzA-(A1)vWU%PH7%FuvD1XgOCKu2fpZt`?lH=
zaRSfz=q%-vbB?pEC%3f0+qWKifGMx@Js}(5bqBXg1W;bb@x=beNp9}2517PA~9kPxm`Iu!p-vqF)f+tfP=)%P&
z8zrOCnu&OtlC`64ZIW9k3EueU7|HHA2Yg-5n!-HK+)%5yJR^kci-4tsm1L8xs~04I
z_u~uaUZIRCtdzj$ai!e`#WWI~pB9r!
zt+bS|p#&*?0`T31hHX{Us&tBJTZ)xJOb)VYWYzR8!DiyzXh3lxu8iR%jv%M+E)FWH
z)1%I-ZGoY-ZOa7UKYFY~6orKm&u$u7eSh)Om=|3GPe5qY@n*UeBBQI#)d
zu5-s<0_w`6CHl=Wbi~{`HcJAl2xMODicX1yUzDwrH*8U9UL+eMuyABd&&LXhqbRxIi?pUeETrT~8|$X;|!W2u2ap$@rwjc}6az#)Y0q9d#~?MPC?mJ)t}
z=HsvQUkJN9Gd>M{OC3AZWvP%{|2h$3w(G6e@8L<8$GSlANjrWryU_w#k#Bf~&j!PEOD)K`u)
zrwoYSX|wnhg;!Wl--H(aT0W2BN*yK}eqlL_RU|z|Fw`F=GMfsS$Ocb21Qo3tfLX8J
z<^p04`~r1oktW5och(+K3$cHr3q9+Cj=7q--|2k!8NHt?fq4B#-{gf|xFR8FEOQQg
zmHl(sMeOHIu1&Ux7HVYWnC{>VwPqje`zyd@O>63EgsmA)7iPre!M#xujbqDRgg
zg`^=i(b1jPyhR;IBY9W-Id3?6!mlAo#xn(C3VFzS$jiu7nra5Hj;5-Z_gib$rLm}#
zfjJ^EL|~Hc^;O8;MX7)H>PQy))Y{e)f^kcu4fbH+SN8H7lix}g(0YwlHy(;v=SL8tjSbLr2{EeC0Al6GB<}x1Xgi|;8sk-`0iL4l5c%HTsW#V)~a(!2S
zv|N!(WtRsO4ulDvIihX6gYOYu-uS*igA|J*rl0v$mIwjN7DB6X<#YieJI<92vZo)Z
z5I;vV=Bz(_whW(!k8Nu>M{J$1aefM{yiC
zP+b*~QS{NELNP@7x~lFAcbQ5*+Wj!aHp16Dsy!O
z5|+rcO7}?#b=?{<3(`tN0*7TF^S)Q)>qE|s=0T!}a8x))NPE96XIc?9O<1JRp$a0k
z3Xdwozn%~+NmzSWL)1*ukvV2TMR~mR70qhkBfq)HytC%w%mrSg?$Sl^
zKi+EUI$AIV*8Y0-l||)P*G21~2$4F@yewU|-y?_u*DT{tLEM`$;{mJgo{1eEJ@bsa
z{IT;NufQFMh=h^C`DT<#mqgMMOC0yXEr_R44m^Y){^3+Q_e?$eiF&rh7tl9aS>iv0
zrM@fE78>>^EJBqvS<7fw^%U(xe-8Htn;BUhjzv)r3RG@TN-M-4Gvp)y8#`Lvoqyw%&b3g7XJy?3bG-m1LCYQ8y
zU$>fDn~f68IJ;HjHV32qYH>V#`Pl3iq9DA><`uf_TtCond
zx8Bd83Fu=OnBy9){BAVI(E40b43QKJ*M~aJwM3wPFEpr^pcm}Dccx+dCr7F_%Z!5e
z3BYyAqvKn4njY1ke)V*eS2vG#L=EK@pLmNk-hS*FO!iwBJ2i^iZ`a{M`}jvOe-WLn41x0
zo*qi!jr*+N4FPFPt}o}byly#10gVJFXLr}aLd%ou1d#VOmY~PJ#@G3lqGFO+CTl|fq
zu#&%MNv1}v`QCo#tXqZ(?H0SL|aW#hcAW$=UNUw5?5i
z=j#TSAkg4^gpHDYn+Dj}HhSb8c*Bkkr1~SWe2Fgy>`QXbuD!F;bb%MDudBx7DbrU9
z>zM<32yC(4rxCG9Cn9r;loynjVzFan%rs$a+JPre*%NcqGPEEe2`8FFC#RFA!&-?V
zZNF`;7Q7)Ly|aJdk!}T6xX*H>WD-FBr(NUdsMJ7-Q~s$?)}N=lz}{##NlYB!Q=7+-
z&U^aW!-=c6qSZXVe@roiHMZZJ{4B<6*_mUXGUj&CXNRG`FM##~qUiJT>-MvPS3m1T
zv#avCs!RIkw`(ZxWIm~k^%vrx9sFCtL<2+qWqN0Y5_$%pi!H41)2mp}p?+&UQC~zS
zj%e>SrUxlPLXUq#4n36`20CiSRNt9fv^O;l7MQv3N)bcyl$c`uEI5;e=AHc_SrU~tf>L=*Kmdf$QvNQYzS3t)EdpuzBHmaw+*
zhf&>m!kr#U^UKaF!)lF#K*zqk>@Rk^-qiGHa+DB@d}>Z#zFYlJOMn|=!~&3>)+cKI
zz^@p!6aeLE-3CukkLbipT$MtEpjIKK5vlG3*DVDN&pz&@Ua9eKK`bfs8i$!(VRqjt
z1LnpxOppuyR?bKH1qZe<>Sr(C^|bn=!+hUVTxS7XgU({RDn)
z!<5=lh)GYl(ByTnET#VG5WAylyjp%-DW?bxD(u5vp@Q*NIv%$SP2kYOS}IM)Q5Cs2
z9YAKI%gL_`LVUEZMgYeDw0)KV@{Wx32@^8W%*FxcX8yD$AJ-=(L+$LeG9n{oYODSn
z`C^+q2eeJ()TVh4#R|57#b(PE6j0iN=u<;3l9E?XwWx=8w=vFcL^8{AmzI8l`YCii
zyT`)L**omf_#OS*=zWAb$#9kPH|tjF>DD&^)DaJ|LOD*`1sCJgY5#c1Q|I*3bS^OW
zIAc)dLwiMlbc|x#E*Yg!-Ap79s<;j~0abKHtWRk|L{f}aZJ(!G
zeJT5KALV2JUoEM9=O?j_j<`lmB5?CV6j;RS3}CAWbvSU!jvAENsCfV@j$3o%eY!|H
z0L!VqNTboMfdQ|n)xGI{4FQ*_Z_f*P+O3reUwr_az-OKjWGo0CN3|@ys{yX{F4#dB
zwf}4EU1mV*qU+QS_-&~oeMYN9m3r(WB_)7qd1zq^n4
zy~|h0zzs{NeO20^<+i?yv$sd_)sMw9E|0V62X({?pp;q?O-EnJ@EoQPQZLh|@iqf`9CY+ak_bcu3Dc5k0BWny{luk&VqqMC%j!gg4S2g|
z*o^Y>ik+GhO*$Chpg6)tXj>7Nl
zd48iYB|Ub0a&Dg2X+cFzd654e5lG2DR**Zi#pOQD=Ht&Bd+$F|oria^EGgvEFHkE0@1$
z0kLW=pbm=vnwnL31h!bV9bNR;DfJ;%riuYWWo#pBnK_{j|M=^4s{niVD_HKzy!*Swt$byA}Xti20g
zzrtF&2;}f6GmOsz^qAg
zijv&@v$Z}9j~-9Z!jPVLnpX`I`3nYshuYSaJ_2!(Sq6h<8?X*=@O4gcMdhhs{!u1Z
z$k`0Uc(e4wt9lCEyRKC|-O5+=J3SVi+(mB*xCgcZE?Zr#S&9I+rJNiYz4Li#R3>oh
zDl;$wCrtU~pmMRDw=S@Ox!w8C;YB+B!UegPmwuoJ5OiaH_RvPDU<3}tfvBZ;QqrLl
zr@WseHw`0y7_DX-EuN&!(!1bWs)?Jds|dWZqg+iv$Mjbrb9G_91$3dha1?ebMA5j?
zY|F-7RhIZm-JH>K>u;QK+2cxhHUq51X}6jH70yb4G)h_3_fWo!4=D8LQ#XLL$Mxz!
z3sm&4WWp;*=+6P9_yvc8o#<-K+w<-}is#pb--}xw0cM)q^fK@+<~qDOYxqr$B9kV*
zg8$r8+h`Q+gpX6M3AvDoS<9c<01GY0lGFkc)=GK`&$JF5r;F0UbT%*
zn0d3b7$PNQ5@{>+9JvTqkE$V+L89>rN@k0Wk@tl{CyMD#rTV;Njv5nGl74?DZd`3i
z=HT4PuE4Ym^N=*_N!pVeL|_3;mG)7ybgNS2$3avo(gvu(>iXiJ%OEl$qOO$4V1P+D
z$c(D-6uTuHWrs(WzX7+ltu0II7)9@$$n|}m%ciqYwfxKTMBB|d4#rY>#vV_hV7<=f
z_-$DXVet7bYA&ux^278>Myok0I0I=!tV7dxmjL%h*15jhn@cd4^~*4E3Q1yVUuy?x
zwp4;PEgkozol@sE;;i-C*BQ-^yX&UZ5~xlZ6og~TwX%cau@VtCWUX;M%ZB48vqgoqS`gXk!DwrZHai8Zl&rqFr&?DvRh_L>U0L~5STB-7&L
zn;oKX8$p@9;=M6N`cpZD+B2owsa|^T*5DTJy5n949;Q#muE>%b-GVTprhZ*H0$1IsiTi
zS#9{D$}|dKTBY?07j1#macUBMAH>)?Nw=IoE{<5K)y(3N0jAZwMi%_lr($4AX2Pn?
zr>do?z@2O3SQ5d;_KM;>Y=%>njfz17sPN)s)la_{z3{Br>{!;`BX0aM;@rsaq_Rj~
zf8cB%#Mlpg%M&y_x;+i#C&KHIwh16VImyP>T*Ag733a9a$azpNyEI>(f_2b&aQQYN
zh$m#D*hi1OhDXZjbEd_4gXU?FzSf>#u1eo{d!?rgcbBE5SH}R1(efzLxrZ&}B)S%^
zW`bmG_mCc|^3uAPSwY&RG%3=tRtg!eaz`a=O0JN__~)O~-e}au#s}$OZyjGjusHQ-
zqlokT(A}$URR&|O_*jp-PgiJ@#Oq}9s%)nXI9%N<|)I?EfI*WB<0pSX&I#2vov=_pmrCpj9O)_TE%Q*^{^
zs=$QZPhr!yMpntf=O>v~Zmq#fqbZ~ssJ_(nV-fqkrr8XX`UQZ1#C3Q;_DkIXC|af$
zKl`)y$$%l;4x{Br^!Q}je%VCZg#Jd^jXNW|q^iI`z!zM2)8{nr~Q3>
z)0`Tn6hB+kxWTM*IV?HKLz)&z$nRKul$)tCAx7X9AWUL$2S!*BK**detie9q{Kty@P8je49HVi+gCC#dyPqkpW)cOW}=+
zHOukr6r-IUB9ixPamym*EScV%`V=PjFLo|N;y-aCcr_wN<`eq&8;TmuF1|5fR&V^-
zc`vmhY=tc91U{tR8te?KBz${%EQT18(7lo`5pby&&DEDv?9_`Fc!Eg{q*z-BMTFKK
zquu_=e!+$W0=Vo~YHl$>`oa?Dc3Fwn?y{1dz4{_=B4gscRdW8=d@%K;P=2%d8B@Ou
z>qFS%?Jg&_f|enlY`S`C6~ME8;Dg3^arcVpVMpW~s5w#ZR)F^M-a|!}7%`ATW8Qj#
z+5oB)V|#h8fA%@EpSo!N0F))F~(@~=cjA%KUzx0q$WJ)arMgc~)
z1d%5OQAVp5mN?_68}{;;N2sl&)-o6z82OlGO6
zl6{c_Z|-x6+rPJDA~d1K&1XA7W%Np8vbqR^$G
z!*7KJJw=HTK9N^iFCiOX7t9qL97X%huv=CYq93#Tn=kk%^3w)7E7^%sCdQ&55c{+g
z(Nd%YAlm2euGr)r06nkW6-&<(
zvzq&ksAWS$4UG0?GiONYC^Q(eg{c?A5HrnoFX|nELNGpwU+eVa;NjLu!l6$A!=^Rv
zA(^*?;!j1`(I$8A;8X>{FPCq5Utc2*w_6OGz91<(yVwc;R3!kqs
zRW127T}-@5@wBhCjgHms15E8tmzRQdXeGZG!a||Dh~}S<|KENGKwtmdrF&ip4=d7;
zbycAVGC9p$O=|d+kaG4#?uVn%7!TKJQ;@dP0`h^=XPsSTpl802XP3$@i|pX*JuT}?
zA{YrCkh_|ALB|^n(0bbK+eViKq>ED>pn(=<0P!VcDQ!
z(g)Boey^zA1HO_y8Jh+N5n9@qi{0p686e^dJx2zmD|*$uq6U+QjI7E$XkkaGZ_}ZT
zkBKhBhXc4K?LHyd`EcnIKZhQ`MaYb-lrvU;E?Gv=xG%@lN@X?gw|q++rwKxfHcpKg
z5w6rEE8*^#BmiYFaX0$^7cu~0(&enL7kKn9WFoSM6W~j4qu{EROJ&d37^b+jLjc^QCl@qI9c)r(k((ust&?r`5ptMBJu2_)KwpK16
zkLw_Dn9{%qqS|MWd>op`#dUv-kFTDfw?d^xQdsgo&tw27VEni`<(-S_4&|9{)`f)h
zf0B#Hi(abwZHLpXtCd+{I7gD~ka<5)aDdr9=S3>T*FzN`9&bE$QZoGPCi@04NB5!&
z2Z#CIEDCLT-AwNJK?w5Lb@GV)Pg`9;zmBsiO91~rZTStu67%AE9TymNs?MuSDoWIx
zyk&kYUMfEnjmQcfH;%a?q0wIkLq->M8?TZM2){2)f;QZy6>J1}jhpy?Vq^mW7C)*$
zdHL*6XGadLR-sIX#1`GpwM)Fqh6Jz!x)_%E?PBjW++I{Qon*ajE>LEkn7z
z6GrGuefZyl2A9Ry|f
zO)pRO{ZzMmeS7Wk@AajG03a9Q?zetgB7;XsUwU93Fu0lRw5s{?GIP;+ldEqQ9{$3O
z3LBk0vfJ14PnQC{#=_STWs-3TDVzu_>NbKd>jHFuy37AL{XW{)bgL8;*`ocA
zY-P#-`p925D!CLe66iWbe$m$Zyg1Cq0z;*8S8?|omXeBjrCFaQw1(c8;iD!0zv9|w
z0nz{Y%K+dnDpjUWFCjB6518TTy7{~~_
zI~>~~`p+=`_aGOs4)nB0YND5902KQM4$ekP7DEK}K9|1bxZwFJu_`GY=!LTl{(sp0
z7I3Y+5oVIhe(xn5;p>poRuO@M3ZFkxwgP+6z
zoT*w91z+AJT_ST}VPRy3n1+#+m1I$%2DJ=e7Kt&UbSthLGYx=ZTtoVMz=|_fff$$F
zIeZ=Cm$!ZnAtM-D8<@&?27u3AfgqIb)2p~O5h(LQnsgTD&dwQ@7Q
zsPLHVW)2j{88T_aCgyj02ff$<+5F`$xwq8+nUcBiT7Zflx5<0;Rlr~N*JyJ#cs6nU
zo0K(hOQy|H_hj*PFC{Pn)15b|rgUK|2VhidF`u+?3KN;Hz8zP-sgMEiuaHH_`R?5Jhtx9NY;OYkUN^^fH0q=@0TO%
zPb3I4TP^X+umGfuzAPE3(h8GX*Ki0@S?PS|m=HNb+Vsm{8bVUOA`wVc+@-{Gwm|wu2bRFTs;>R6Kqt#^3*p)i0pJ1E
zE-=0?#NvxMf3>+Ra!9~ab|6!IT5yC9QF|T1(JzyLAI1A4?b-c^wDs0AYi&XNANrjL^h2z?Mw=Kuf&)6ilD7%3x)l*#
zjjiX^4p63@VH&HU#R~Zc5!EQ)vYz_2ql7CK3;=M%{~;8jAgxalT}MtX3Tn90-?nU`
z8oP#1f~>8W!JyX2&Kh(h`}gln*m-aVHKKe$d)M=a7D7pZh^{{)N9&b|>^_qyK<+_2
zKyFnWHF}NI#6ZgK4Zk-%cIba(=rDV3F_c
z-xf`!u=|3YXj8_BZWRcqF-sM)z4^*zy4(ayRD?)gNgYO$cEGEJFTi
z{I?AX%o$I;0Ps<(KBy9|W*2u5TyH;Wwjj2ZtP6vxE|skV6)$Qdh?{NIS+zK1Y9C}h63Y}dArW`b5B7*p7@U^
z&j6szkWXSmY~hbRh{hwEbg$n8il)XrrwOOOjC4xObS)?nQhcr_x3j>1bh7U79s=Sw1x$+(H_D^$z*`{=Z~#w
z27?X)90E|=Tm_DA>Bv>d$?dsah+C0H%0T;etsehcG~!sw$<{g+Qy4xi8}FU$M|if*
zO&r|p7|w5Dy3H(OT#y&cXFZY;IVbNzpz`2auwg*TyGIQ9JrE;V@8&Kr7!Xdd&XIoe
z2jtVHDepppzJGw=S`2~wb$-zlZeWyEX-$6Ig#2oP6dtXWg7aI@53|H7LFP@!k0p@L
zxQ={cgnWH252Tg(FZp@|^4EEuZ)re&7fopw5@`KH{s$!;fA&t?--P@`
z0{MCC$bSjk=gvqFq}+u3ECTsw*OC7S$lqhg>Vfp0{w3dtK>j+%^X(0guTE3kg{a)W
z+`hiKDyQ-Kiq=kuG&qVa0T!mdm($qENbr7d$5N!RPSK8!qV~F1W$~g
z&S)&jp9~WUkbz6mZ(t{IJ!fI#YRkt{2&;3p5n9LgVWpHKXLJKg-D^BMq;s&}zV-nM
zrT&R4pbt#A5fHDXh{#}8e7SaV5}bd8e%@^k`055Pv~>qRIYWkJufmwzI>sJEvdd(6
zE)U8c|AYWrwmELQtSEvjbPQO>sRKZ;1n+arO)OKC;U#eDrMks`>$HyuwC|w)0ZAnD
z`jrgEQeH+m3oa{Q`eF5R^ZfEdy*-Ut&hmWKb>8xPKF0j;V#|3
z{+l9|CZrMIK1d;FAa0|hU+0h*5kXeE-BntvyDg_uDcSJK=7UyWTAm?ea6z0=CO5)FCvQ@cM)E8
z*D?&VfGyS?Fc)$^a9`>cKl?@?xfnLKk7sM)s*SP^(2Dn18l^?sQrezRL
z@M5hplO=&bw2Tcm)iiSm9rH$L*0Ja+ocLu!3sNQAU@ilM1Bk3UfE2Lw&Aaryo8eaC
zQ6xI>SZ9xi;J{}X%#gmo24K{XL_&U%bukDUW~3|+zad<}`WJ=1MIa2yS!XI4$jdFq
zIKq%IjAaG18LW0lFvAjhmtIv=`);%25eOumBpga`-J$$|Nc2O^Q=1)X
zf^evcb%)|WvZM#a*VZ1YjRM6dK3#9Or<(z_0NloT;V>5O6hDErsxXy2F1N7T|f$-4Q25IiPg?zt!>pJm20Cm}N
z?D%Hp`yf}Huwb1@<-!o!;O=jQYuUXF;}1lIkU-Du!dis*3C|?_t1g<^&m~JHUE(FmfQ}rj45%MZ|Y@
zopr4Y3BdG{72FFHnp0X$jleis$pEtC)=_m9LK{;-kOq?X#RV0YEgo47yy1T}L_lR=
zolR0Bxe7JH(B+2wB`D8aLrOGAOO;_C;ukd69b{#A=)NN4Lzvt6q199cTtz;Tj7P3>
zJq?#2hkA`5dlOu7Obx63t-S6i*@UC8tPODlp&d|cf-Hd~8WkqNgL7XeNKS-dzt%B~
z6!Q1;{~<>?2=H<+ysjQ*Vrj0VKWz9r`|NB^@H^ydtxEL3XCHMW7)pi;ed>Yd)ARXt
z_hsTR$KXFYXAm{|wfk0B%zzr>2#?+Ps6dm|1)Tfs#p`o)(PC_nDdsIFv*~c>y?1ff
z7WmeNoCm(b_@DmVLdh)-sRQWk`p`F|e+?Rlw0*y&fLYf!SLA^o`xit-8Hx};mU(H7
zm720axI0oe`D2q{laUj1;Ve;K(`xIw9#uaR4PA!Qrn3^D7AgO=@t)dXTn&_O{9y_lbQfi2b(M+Ljzp5M%Vfv
z^|?5w5ct9~Hh{hrS-|vsL|s|E`RrAH_5h}i?Iy?o$q|IlmWJ3qmbHu4G`u!p}e9HF^cyE#)|WHL{CXf{I)JCBn#99&lHXI_JR0<$2&~A+je02yIb}#kelg+Gdi*p;HSid%QyN0}tw7)s`
z*d~EBXt^mNH+Z-vB>9EB&Ns<^XX~bR%3y=A$ZlxWYa5zB467ISHqnWV%^%Tv2uaZ_y*oFTz{ZA81|k7g
z&b-YTw6Wo1E)62)QQoh4LQNQ(e-kko84WEbL
zsUDdkZ#r$lp9QDsQK527M$XLAd{SK-w-+0u=QXm^JHuYxs^Zo4P!Kb|btMz>rG$iTGKZ$t%}5
z6KJbInz&R1lo%<5J3HQA&X+*B8Wm;JDx--pZzo*_YqkX%(p}FU_b?TiKTTYd_Cay*?F-@h8sWHL
z!Gsj~s3JA7HH?gaLQ)W>ApzP>tIUZ2zzm|K;=H=WSsW4#6JDtAyB|mX{e9r!P1Keu
ziwIM(91q~XxcOef;L!{QkW#
zd)vHHdWb^R1pXltgCHR~Q0s4>n)EMqvopGROSIc
z=v3`xXg4)pZzMo8da~y4!v?z#^DfT(I6l!ZY;pt!7LJG!3SD
zoz8S{v1YpRo>}y!7VxeH%xqlY)v8?z0W^-he$orYgH51O6bm_XR~+N2$=D*V4s3r>
zdJ=AJYyv=r5NH;lV3vPb_05stmJ+HrHJ3KUxmH$0+=;8=?nk;IRGb`&!x|FJ_+KM+8s{)O|jn+qU@
zAb?ePbml;AHdWm-W!+o=^@sp0K`WM>Q^sXrC=*GM&ZdaIb;mwL0wX)taMT$%N<#Ts
zZgbDE>j-qc+_1;j=qvJs|2{R*ee|2Mh{4z#Idjm3RVCVoly-l1n)GjahAqxTghT9F
zJp@1CXw!R=!n?;e(~7M-ltC?0d%qXQfO|S2y-5AXwEIOt*2EmtE-yEgHQ5{JHd)xJ
zdS%0#H_Lv67T#$Y`9#!Zys5r!*~Se!m6wj(A6o?;QDP9G!2N`Cw>A~w1JO4ij?%I5
zu~qaHbph}d(0G4N>i;Y`0xUaH*n4n|SfD_gvYpcP(&bGReOErD%LS7VU@R+E5hx;)
z8pA%)&82c0DREk3RxRU10&I}~BJcgt&3UYaL`fBXtE^&zxmI8EyXQ{*pEI06HYi`S
z0n#FQ{Nl^e|Fgk!!U@(esR)7*CKTB|;>}M`Ne4_4v|446H{6d@4*mM$I^^LS!>}#e
zMhF|Mu|08Q1EpR2wr{?{b)-C*WdW(}GEa9S3vTb&xAp%l;6N5ktyvHVLNHL>B24#x
z7RV5GIJ&w64YDA=FYN!QUk~&w3Q+W`XHG_97-nl>%FQ>ZJd6lo=<=%V@q*?xKPVU5
z6knlSKnNwXSFQW{Cy?45W8r=OX9J`cW<7Lub%Ski0-}49H~x=pmlIAPvU-Ap&|zoa
z72pAVzfA(wUFJwHtc@Sg@^3KwZVOoT)^GMZHU(JGcbP~)Rp+&;r%4bQ25H_Y_&?Z@
zA&2;1^eW_^egSxcG(=p`lSgcP7Cp#jnvQ*EVHvBN+oGh%W!hd)~t#uU}|uZ{Bw4l7n*cOpRxV)uG2aD%ETj
zX06Kj|@9=}I4&0}mPx&W*
zL%L4T7j}z8a-u)!y>lVN1C9H3C9{g^8_4TU)bEE3`oL-y4XO0~NC359;ne_VAJj)0
z|J#)e5MppeXApWzk-WM+6j1r~wnjSahypNd0Q0TlI}}s5BFQ`olKGJe*SNUz(o?MW
zhHhP4`1?XEtU~H})_N-l{k{*-L
zr*pdWrdjARK2KslI(udzmw0Ayv5&K#=UCV7&
z0V^yU+Z#M4sq01?W>yo_tWXxG)0%O$F!&@f@{3PWFf1G-@o`E7E(;i1#3~ZFCC^Xgv~n!AYlGR33}TGti^huC#dY1q0-%KFS-^dFg2nPE-fWWh)mkEo5MOs
zO^%+kDM#i+&k@p^)RjP{vKINgs;Z3phR;9AEtV8%HYyZKJXVCS^<3h!ihK24`{or>
zy{GbgJl)a$zb++p*93Qm=nl6{w#L!hH>bV)*+fC(>;*~paYCR^w8D4{^2Ys0wduWO
z#xEzY??`sE>~!H}wzAj$bvWhe62>2=g89lJ%Per-Yx>$Wr^JH8B>mx~xn2(y6_yOk
z&z3(9l8OH4NAyNu#T{H!1S`ulRXTt2Up=U#*cOxYKIbVl3*%IRNC{dybKag?GRsD2
zh*6E1M}_*#fFwtzeXVcCo
zCETlD$SKaH&zw*U)Wb-!FU9I6s`RRxHouvXujg#Z8Cxu);fS)>x-XJ|`Gj;-d)2a7
zoqe62jYKE)YS_x%x&Kl2#k8aH#NVUe=(Pytq5q~m;R)l_4aR3qLOyo
zqe}yWwH~E9-+Qx_CZ{&HW
z9z^&`5;7V$!PC6m3)fm#P_v(MD=N6PrCqIF+05bgXo+T7L}i=n$@T0CY#jFRpA
zOH~~e2CAvq?@yIv61|v2dhYHBE;k=saRwR4tEm(v-^BkdsyUDD|CXqcB7>^>Sld2k
zh&wUc^2&?bE0c{+lDDTip3~mkoES^FYkyBRz}$XB*d;5G?)xr)Ia=J~w=1r086^7Y
zoKH!Xw^%j3O~{$M!eQsvEy)!Ap5?x0iGDmw5xsnMwo@XL-H;>Cl94@9Yib2o=!VT$
zA(tN>l4+_cDf~?SPYuu+bKrKMrjLuBtV62Z1Tlw^YOAuF^}{YMq5i!~M(vq)i&iNn
zr~O{`Q}2HRgFM$SB7DBG5)M=*0H58_{QKT+FfzXfnQ=->{^#fQJkZ0{6sGK!HV1|n
zrFGk|7An3Avx~3KE4De|3Q|na$Ls8S5pNNWt2
z-yK`VD^I82g5@fj6fvzoucaC^{wUEsR9y?xyTt8KB8U6n*)ku;uEg7B@#-yd6~UNL
zMsirm=-+n%Ak$(sj%)zrehFu8&iRI{g!8i{-hK4Gdaja56J}B30;dv4S$y#H4W8K+
zYKr$vZz3+P%EEF6JV^@@T7P@&e&CG{y}p3NgGVEDgbHQTyiQStz`}=Ejsg21yAJkN
zv&1B8X7I;pf9t8v7z*nG3VH_#R;T#PrD)0RNfx16yuBD{Pse#ooEg>><=p701w*9X
zHy^N}yVyxqaK{}0-?H?k$vZC?2kFv>TLMSK{+7W+XZ}lTKCI~Yi)fVEbe2opr;)#L
z`;2MIyifo0f?y#h?9?3AZSrSqWA)!JLu{?I5^2inwpW@(x>26>
zez;aMoJ2%#HmatP|1Nj>hZ{-V6<*JmswfSb-=ESGzW#y}YVYr?2uZ<8rQ+fMsCT&9
zv3(l>KP5LnlJ}M#N$<4V4Bz5OD<^01BK=m}l*ua#?s-KoOj=iIx>|jPg&XE-Z#Poa
z4})ev*9ggqu7pE*#@E*796Ug<-d%#1DM<<>?)}bMVNxs1qVs-!Sn$-)_$w{lYpHA|
z^Rz>O+{sKRMK5NIeGl@Kdp|p(;ZrLS*mY$H40QaSZa@AnH-wIU;-CMyQ1JU9TPrp0
zWPR2#nI?_&Qaw+Hh8TzXw(zl$?$m8pzD*tmvynhPs<~n|@ZS&hPWnB0bZo;%kN+$k
zHj??&WIZ`p7h*cIcuc0Hv<2;!ztHZf+v8@IO4^=jlgCsRqcV#;?_o;_uv=I1B5xTe
zCuA^nJoq=D7xmbcremybT2LbvGA6!w*pKpCw(|EPzIaoMfsV5YilmAQdd-O>S6|HO
z3ljqyPJ|=zQQ``UpMa^Blv^dAKX1UhHUi!$$WP2IwwIdO#rw}>vawUdt%*;|^
zoz~P(Qn(?M1XCA5XtfBJ|Mu*i(y3~bm2_tmJu5Iu8e!BGgi(dh
zF%hU+Hb3m4&2WYs3R*w-GjpU!q1N)WAK#t5CbV}Brnnv4DtJ0Ff-ikulS3$+mJF0U
z+RjrY=T2dN$X|!nY;wvTxn}XNN>(E8*f#gi{@B>RdKXDlo;9pKsk?o4-gKruWB!Zf
zoj2VaJO?BfEk6uW8K8Ku5f(!M`+X6k-{O1;acXwXE0HV;fY#t{-=z&GZbU*rsm{wY
zb1qCy{*v2Aww&SiD=AB6iYj;Lcsr6=z-yHDq0U{sgBuw;yNN`+?4&CIh|Hxw@tSo0
zSH@eMKfnKVxZ;qbZ@Sa<#HC|4DM2Tgg_=Tc1*^sXS~8YQHOvYs{GttQ<_#X_2?Sdt
zJ-vDV>f@Geo^k0Kmo&$AZ|}RdD4Z_imsKruhxy3M{dM7@_ozt=_OL25;Z&1@O
z=QwOQl<>0y`D`QdS%qgSpFIknz4&GpV1GT{A+1|0fgV~K;@&bE#}}sfoDW2!ZEYbJ
zq~~iG(;KTD#@fJt(Z6_tqyVR%Ao@+Yf=j1|wvFKLTGh#Vkesva
zndg*e|Maa6xpWxtn_8O!T2Ftv#e%8~C7#-D7(t9eRJI%jn`!5i>0GLRSxZg-mL*P0
z&vGExlAbgKx%&aG1@_}w>HWNs-}-PUc7n@(W0WTP!*!dIZ*1LjFPf=^|0<@QT8f;G
zv9Z?TWyT6*#oLae8};Zkt3cnNk`DQH>aN-^CGavTgY@_Ta0*AswP2cnUoUwN^S=nY
z>xlFn;}b&aJq?Eq-KIG3i7hcBE{OsD?I`hTMSH&NJ`PQ#wng>MU5GC4g91na>conh
zNrKi;5QW0i1OJ3k5Fj#D?!kc4sdC`1VyGmey|={J422TcFxs?^R0K~h>9yYB@b~+X
zb`F{EeaB7kjIJxvM1IxS{N~P$pJm>E=m@n04nKm|E}9Zf4?1bH`(aG2X47kC+o5Xn
zS#4glg~Fw)#+t_wb*_Qdq|6JwYO0mq&NG<7a4Rl4y-`aJZqrMOoiJAaY7^W?|H}SJ
zX0>IQf7H=NnUgkF%GrHgs1N*Diu*IsKW8CMeNBi{SJLsL0WfMp&GcxanXvCV`)J60
zJTjY=52rdoF0o%lGril+n^BQ(dU&z2?yG>_j(SQ;FT#(PiXnZmW~vp?4uvXj*lDiE
z|3;Dwm7%!-dugKQM~9Yptd?FC^=1g_jK?KSb9wb}>}By)5xH5`o+;g2up5(9P<97g
zI1%D)2T1Ys@bVy_zdqE!1nIDITBL4P-wUC1G8Cs+D
zF`H>@gKgtyku<;lV&)5AP=+x{Zj*y4udxAr{u&NFXNRsmu0wk7-<63XfvPvC{Bevj>9`vdqeeP$6i!=L8nF5QJyp+fpUC!VIZ+MEi=|%Igx%1@cuGcFS
z*8573c5zW~{v(wSxm1Q3SIKdd4*B&SjI>U_mul&*M~58GNPKOp%rL3Z^Lxqu=GWmF
z70ZN{`K(5M$)B}*Fv0!wY!v9M%guDH-kMVfj!?Qh5IwVvJb`CcFAOQ>e7ljPSn-Quw$o?7M_#cLVV|{;eYUr3$@sil
zpQ^Q_4QbA0v8xca93q4*=WqU~qiN@?e^0C-v{=EZ7&dV-+^v~WWOldF!uy+c`>TBR
z)+Ql|dXfUK%*W=2xfcB1h+Yazo%wY1@+?5lZj7gsu)zl-Wv;$p;@udbOi8=3d=H0;
zI^yrw)MAqtb;axHcIOG9?TnHx3#vmU6Ml)~ZvrR>?l_XMljz?JM~X8w*sjWg+~{{p
zS&1t~pKeDm!|8M0!Hdb+vZ)fL7980T`k!mdjN+6he92is{Smd^?FR{QHX=
z^fjmR-A_z2vV`i**%bBBFMgi3kQb|{Yr>AQO+VoOT?4V#3ovRvjV~XN5sh!9FxJD3
zwObHCrlbgyQd~(5w*BbKik4*CTuAop4QIn8*eTtFUvgD>#2oKE_@CmWQxxMsv&=Qw
z){K*=5_X~nTq}E}uD;L*QS*^ed?h9+bLwedQ4J?NM|KMQ;*r2lj)!;Y)w|
z=#RsYFCD6(Vn~%~AHF{Z^HhJo^tPd?wyIY85sg<*j7tp3RhPcA(PlC~RY(pn`COgs
zxCDi~*u+eu(wSR|`#g+tEN)=j1gEn5a49=os=0&D!vR$4;Y?gz@-EjOF9R)
zN4jijF<S47VDa^%z`OB=3!gq7MmVPi~1R!}WRQi&bB35A&EBX|$`htmSp;?#w97
z`sK*&2G2lOa~g=zGeBH0u@V=YgX&6g1oXNSpx0e!3wOo;4pVg&)ET;AkhmZ^U6sZr
zAyBn@RMs!~^39NvA=iO3Rze!$rr1-nhGDPU7qbURQ`t&PQ*V=!Kr+FGsAyAztaP6k31YMwCHxGBocmNfLLm|#@lfknP>{NfQr+Qf>*ZZ%8*SGIV$sH
zhE{NFU4evyz4a3&+ZfZO^Zs$K>_Q}6HMAmBMn@x_7EPB7U$Dt3JB5$V8kfzC>9WeX
zUmKd>HdM#cr8gVDvuplXFaRH0p{&ErW0}>Z9Ou)FB*rHrdE!fb7ZYUhMuPbB17S4v
zCRZ%8o{YT`2eT~JB$ZG8ndT*jO_Fj;LZ@D=mQzvIxOwF!*Elv%N~;Wq%i?IvW1fHN
zzgWsTzo`C#?l4D>2*=mwAeDWMm9(~RAd)9pacv!i+roc4`%U#D6Mb*5tY~KOa6z3y
zXbcl))p?wWtgGJ_1yx%IH7=))no{rdg`QsC!k!}ULVX49dPDzqqXnf2em(
zS6k||rhi;x&Z$-_6;0RZ0XD`U&Vj?D{8inx3UtF-TIyVxc?af4<~$sKjiIQ6EI1
z=a83@K)J>dFCN$J{BOY4v%g$7d_vOhmXkTk^7H+9*&Eeo)MMywhE1rMw-Gfzbe|;Q
z%RyDAh?RdDG9P{7M_krhmJKtFZrJG+2fPb2ykWS^{otE20K6LJP(3`Gr_AuP{s7G)
zQ}!{MLltVHzA9`pk>V2Ci4FndwPl0MZrA1ly6gE`mhfqXx*~&bT*^zk^KB~fi{p{7
zKs;^{)w>vMN~FkB>WK?dq;=@(Wq(ny3)7l?w_Z!BC+D^1Wbe#W5P#jLcHWx!Zw~Hf
z_oE^g#6GAGzL}PsrZS7pmnsrgmjOTEkry`r}G(*6Zf<)t#)QdC4N?JK2Fq5lf?KVmB0C1ylV53f|;h~
zZcG`zAJd!HCM+D!KOG1ibLGXvm$XMDLQI-+BALY}B(
zta^NL({@(nONJ;T|C_h;g_2>yC2{!YDE~gB616-YV`se$ZtGGxRm-uY7Bn7SN&Iz;
z^hZARXYkVumgwb)j8xMXc#%tKSWY+#O3)>E5+4L_KJ4;Zb*f%Y;Szq|$>I0qp
zvSl!n-Hs27SQDl@ob9#_o#D%5ZBLqyPvvk#N&EBAlieOxtb#a5d3bKRDH2>!+_}H5
zs3D9TnqL_|AyY>tGye___dJWEJ&5+x<|8dN_|V5QF{k!sxag=Nmd;>aTIKEu9wTmJ
zwR_Wi>9ryz+O|syn*K31ONu!eSKp=k%?(ZOESVJXl{lb6+M;fK%U8caWTC-sRPDEb
zx`C?(I;ogR@HGT*Z=IUnHW@B0na+?1&h!Q7w8bu60h!-b@3{|N=*JZoMRQbD;7!z7E7MzRl
z_Gm%loI1({4}%Wv{KraACoShP7IW+4-DcwZV#P)Eyo}TI0LS|FOzOQG!QD9anRcJ=
ze1WKeAD2?5{ePjuEw6NmW#U>=)k=cO$VaLg%Umc%KF0m_QE|-n=Jb#2=b>$Tgxi_Z
zT)HIYP|-$egw9I3Y8>Q#M_esXvnXJsq*~}jZ;0oIlo`o68^7L?Z2o33{Rk&Z*|y-Z
zW{3K^Ax_%Bk&3`KYQFFmA-hJp-Sj1-i~>I_VK`giG~3=YeG&>_nw(P5<@~~mHHCR(
z1@8#Qy$Ke&VG61jpabn%UwZjZxT+~;Gt|RB6FQ{D#r0drOXwO~trGwIm&}JE>KWT1
zgrQ)Yd3#-&)sL+{kSV102fvARnTt-*py!BjqX28IbA@5Lt}!lvw7D}Tj40sqR7cZ<
zU&gPkMY3#HjCpt%?WEGP1xA)Wv!q_Lc&I04l;rxzs4eGS`Rxf6*#!s8LVB6q&Okvo
zJJP{00slq=X8h)kLetJE(p>jh;Ms+HDu
z&oyVRg1Sp$HpBgMA8+DSI;t%0pAhOoW%_2!)nyJpJcijPaQUuon_|thxNW?SeiyAa
zN7H+#!;!u_nq}EPGxekL`drYMdse=wlUnf5c~8YACsqYN9T%qQUhgQs#ZGv%f7#>g3H1AUD-m~rZt8sw~t;ZdT6t#uYe35svBh&S)
zXOltMYB~dcef{e*?vf&s$vp=BrQ1lG=1;4}TtT1xuDQX^#4$&``kPTp(JFIp7a!?O
z-c`mtWwOmj^M%)?jZYWjXUwMV5<97r?hB6z7EWcBorr0jOkzwCNeig$w!JPs(Udt#
z{rfrZzTN|ha#n*JsBz5RLixElHQ%!fRxQKny%H~S6pgvl0AMj4&z%eu9_cm0`;RGd
zI5+;TG*_5>TCTJVPXMzy)b2n%gamqPe{M*4mxJ=ZzyJ^YV+w=MD(hv5saCQR(P2%?
zh%3`#w6p{m_bTx;8oP|n=0KemcevCC6EyA6wg7gLsWq`QW1^ekNe5
zz=%Lpk~^5L(efNy9!D^=zdFA%K>-Wt7X6vp9~f>>jzBywcZz<3+YGL?XY9?0i-wXa=G}D?n`qsxq
z%R(8ljR^<-K*Y%|HyH2w$=oj?Wkl;Xo{#)0gzAc=Sc%v(j;H!EWn?rzw`%|N3D;!x
zJ`?jgwxX3}$^UbJ_!ZqZjWR{T=&7bLhsd!nV&2j!J;lJ>35O8PqUW#WNumc9tt508
zml~aRHGltXkuRn8bG$%r%)b4Joxl^%F2NJfkn*3D4dl)dz%|e;3V3q9j>4f6QfzK%
z%}L#J!0Gr5PO~>=^iq5u&*4Xuw`*i8Gi^^_7ELgVEc9j<@|G%dFoXS#%JX%-_#d;n
zgxRX6VB_gX-+`;WP<
zCkAasa>Bukyx*-<=2EZBkJ2@JYnHLWpkB~V{Fq@$y;Cv{=bKgJt?kLFuMNL_$2}r@
zD*a^CaOOS90w-^)FT8W(XCP0g6`QyGZf>I9`JX*XYt*L~f3?YiuFUBOSCVX9j1M0@
z)?w*e?_R&}p|p~b$d9-r$K}2Y+hrWCmu0Zr;IwRfoQl6wt?3?Q!VlUrba}Z0?mhZ4
zj_DZ&X^QRCb;Ps?pPKY)KKw1<%%unw44%s&Rv~WTb8E)@jnT};lTKUH;M!MC)csTtaQP
zsT-3k{%cEdgU@7&gT0(@ZCI>47I~rGdnEUjIx@s<03{$ovj6D?Gt4>MKf@wK0l9?_
za_{F))@xN*=Nqb*qo9p5Dbd$|;^U{<-j4nGyN{=POfeYuF6crROP_X^R#Vp~%|yc4CuA+#>jey
zwA~#aEZUKHN-0j>Y_i~I$z%&BcfD@Z?1R{8bYqjC3RD^KC(dMBeTeqZZ|1^Axa}Y1
zX*EE8duwJ!A1)ww!TSM!20hVS&dA)lIE)Y-g;4X)g{o?r1dHf?-Bd~O#m>fO7(HJ7
zkE1qs%4@s?aT23LO|(38*dKengih2vecH#TpFOwNqnLSgpjgqL2wasm=7s;IYav@B;;E#Fl;^Lw
zNz0e^j+2QaZa-qiG7;Su_$}~h`@SH+nAd+>aCsbW0An6rLF!Y_Rxu{7p(z5w;S$zN
z^QO)O+pZB)Tq#=TaBACUVx3yXOhs3`c=e~F!CY!jef&EQwTvh!wj?J9-x3+NS(N11
zo80x$ptBwqXY=Cul2nXYJx5h_m2_j%Iox8aJNe*~3Ue%vQO5g3ClgIKmVx<}sT$@G
zxu*@buW*L0tpgN$+rkanDEH!d&2lzX@N#lKqfwCtQGHsb^K}2r81yA
zien&jwF>4Z_0Ws9Rw5W$F}ol2I3d0y*_R4a5e6~6b;%rG;+?Qv8n#{B1_r4o9j=QA
z@Lon*Y29LXyXdH<40REsQfIc
z8)cG7+_eo-qb5Z0#3iihc(-m&-5Pe*0(SAyD0$DM(Q;L67|HF-$%8EhJ#(_Bt`!a$
znNLdg@E4U<-$Xa^n}lcc#3$p9$CW#{esJKr>^<3Bw
zL1mo?rcwXie@w7Q6uJ%~YO^vke>cu*ra77us*g&vdE(!lUSb*8=t5)DbqO`!_VW!_
zNr`2wkdq+9GU72-Q=CV}mPq*U(dmlM-7(B=%ASkp@L<)spdm*C#fo@w@vRElmnJ6*
z`Z;Zb)lTa8w9G$mW0R|r63nfLHLP~&RV;Kg%1QMwDoz{_df?Vzdv21-&>s6i&m$R2
zC>_YAFvHBI7&5lgd}uikB$kG`l*)ViArL&Z5(vVrw#MyAN6?B;8FI31$?7Stp<&AY
zsIT}2|M>nf`-Rg9-K5d#)7bX8uzDV+6sum@|BSu{*B_Dov0DTsY`A+TA2XYu&@J&k
z&a8&;q0%he@A+-bpAO(LZhVV#6Uz4;{wZ_0f|<>5+OF!f6o*KT2{1ApAY}{67$W
zQ9r1h?#-!;-r2^vuY8VX{9SZQO}+8XBB8X~rV54Gnbe(|`&&>WfRQ;QY
z9i-V%?K)3UmC_TJPZ9~Ez(G6f4stU6z0o0X2Y?E0?Ep|6TLG#k4g6k0#-;tyLdE#l
z!(#QPBx2iG52Cfnlp#X(9-s!Ng!p8}mJ?;?$}d
zMlzXyEwqe$nBprvkExsCwL&vT9qyByZ)erw)tZPY80?}?*DjyVE>5g7RKKWiU=Rry
zW7a8O1N}>x3cW;l`0tPVX9~IAM1N0WRKY#3*<1dWKo32m$k@;AiM{wR!qE3k_wDSn
zHz2_~MTUT>+_Si&c6Tg8hc4UO)lq~fD<>s5nb#*V8C^}PcA(7&iP0!iqj170&hxhT
zWcOB8)7~#4y>EQ~1itqY39Ey^jDlB{vPD|3B5}0xZCM&wMMLe|oslIL8AZXtF|!Oj
zvZZAjh7*sR^*hH$49T-gzqec}3psBPdSmYLtW?uyusKJeY@{#3~J-O
zx4W(Y(UMu`yy%p1VYD@pClu)Ol`rH!oRg5yO(tC~`7&6q*@jR`j&^
zTcQmcP0Czht3yf=Yj_(9=1Lv7`0OggYa@lTPND>+@)F^({ZW@ZH8Jxbf10rX6=dAU
zWqH_&DF~*Q2EM?g)$K!T5LA8Cxx(liT4yf)OxV!GaIf>Mg`GqzKBUJ{H_EAYCN~a>
z6~i$$o;fBR?ytv;+Kk27nN9l2O;JGxvAj=bf?xeo42f-#E$xZCW*ecR{igA7dLN6p
zuawGj+kTd>y%e$JYHeAn7M+;%9)}0YZ)+2Jr!vUvF?3m`2{uIfazApXVx`~q3JK7^
zHjiX$yjMIXbpR(Tfwc=3{M9{JI4
zpRQB)Y=hp)Vymyd@shV9)-YrCa_x}XYrR?W6LdHKmT=24Bh@D_!{yZm-VjQ_
zcHF7FhF{rhNVBm+b&Kq&srUstV*}wU_C}qSUt1#G+tOw`Ukst=e$CR0bW5NV8k=gd
z%?-iwwL#mDv$0oITwAO$jeC_`ie?G0cP*_p#9g%D#ic|92$_&HyW;EZ8T>>TvBX~A
zQ*B1ua=G!F)*#+t^BcDNX8Dw)pxt||AlVICmT%rVk4D+_$x8h%&KHm+yx
zo2-!44>6khI6c*`_P^p?*`Pro=YUII=(~(uv+p>Il#g-)I>VQv23JV`B$J`-yp8hF
zN`*MT?%1?wJ|Pn!-5Zj4tG1XvZDA_Nm?@xSh|57N3SXC~F{_BmsK2*==)e#+q;N?N
zO3J9ySfjLfUaazffKWW2s=liu{K&13UP7sdS8RKDJ~N$y=I#WZx@|}7%HO-BrJv%P
zc`DWAIbx#YC*DjhX;f1C8@niBn>nrHWHI*hcduqMjgT4BG!)b1+S{CNmW}-9r8+V(
zYZ{(}<2o90di+k45u13sQq-@d<_|M2@H(I<{e1T2`bLwHsVE5T1eWihohpLRPMq=~
zxYdi`1OCMSPa2fFk;sBoM66F`Vaj(OONl4*tXJOXBbbpbB6LAr94$LaQ!?{b$7ZowW$sC@(6LqrjK+4Y
zU{k#ZzLu8nIr*)9G&r#Px#@Y6=&xDEur1-bQpVZC%x(o@e}6CVpwHofR$P})Bit#N
zwNW3Iw+S-|D)o~Qd>ofvShAp*%6VqqT9|9j&Wim_H!nPN+K9dqIs}#eL35~>`z$AB
zlBeIdhm`zydApIN=#tU
zVi9Dh<^*DQ3UqB&g-+KnCH(eLO}>lYG`mr^WtsDm!9xp@@wOT$A<=HompPnj3z$yZ)KZtI+KKwPhn4I6_^b27;Ux1HwP#dT9)Kt
zrIfT{uF9V0mSv;&>=QYjv?$XMuVtBeAk$u!-*)Ix;hY@|EtOi)R;3*3Q=Nwk&{-7A
z3~3g0rq6HA+T!I1Z`YR(u2grQG&Ge9ksZ(A9ZNrFU!3(J-UuCB4{+ga8GMs1#NnsT
zWEn9v{zaymh2j!X`*q&^@x?TskPiFQ055y|kx287YhkIe(Z8Z>Mma5ODoPwQCwaG;
z#-|l8>NUj09*95x(__2t?MdE4)gPfFXO6mO8K{9Otj3Gz-!BjDQC4j?uT^q$&mOdZ
zQ8>JgU0<}PYDaZ5&KJFLc|6q&5Sl-U_EReGhNQNE8hAsyun
zC3}@7K7DvG8ZA6UQXR4I3@74Ffs*$4_?~JII)+ZX(-IXGj2Y7lly4}uz~~921b?bg
z($KInc*v4Uxdn!J|GuMhPtSi^^u9TC^D68=H{>>O-~(bze2PMbnVcG=C;#K|qjqhi
zHFYYy7Sfu^o_QzvU`eN;UYq%)DsP(baKA5j!zaoB&3WV$o}OiAkZNwFn!1sLz`(H
zE^G{X*|R+A*?b!EoSo)S2k&30`y5U6H6eX`MX~0`^~Awnc(w__bk$%!`cR(h1vP@5
zZ=~j>r*Sc=xM6{MmEn_7_
z7dx;y_kNMFX0g5O)7@it)^vXVj3E@kGnPDN?_RDqjlG1Wh09BA&zDm+B3Xts$~|yz
zZwuTW`rBK~uj3kXe%C)pXwE9o*vNOz@^A@6aXWr()h$!2;%%gnjpbM}M~yy#A_LN$
z3M%9bXjVHcoEKg%IX#K7cTMk2zvXXzFEeYF>{Jffxndm6k+eaJPRDo*-Y~UBWy#Rh
z;aJpnUP+P}gQ}4!u2x=k17CSM-BTiR>4v8DtTp*~7fF2kG2-}_Mr|=uj}$7}?K)d(
zuJUT?1I(LTJq?M+<9&ich
z@tR!u8DM6~loDq)|8!p0tmcDJiVfb)6CYYa^-Z4XQ~5qePN|@@kq5WQR~3M4J2zp4oj4rnMPD(HEQbTyZA)v&ZWBz#C7j%>5P>Y-tGpoYhe
zssdk$H&Wx{iuoT|Zy6Bfy1fs>(A}Mq(g+Tn3KG&v4&5+xOP6$r(j9_QLw5}=B_Psi
zfWXk*{2%t-XaCN5-!I0u=U(@U>$=uj_Y7t~Od*(vOdPx#-L+XhT{XI6YSC6*IZ+;_
zFSD7B{Go}OscSKu+y-$faaA%&das^2+bJ~jc{OM}JNBHjPGUGx)&0xg^QBqtjmcf(
zMmFiCT4!88*>l%f7i~(o%1*ZjyWG7!6NO;V7520_v9VLWn{~O_o*}=VC_r;56+mu(
zZJBdF5ND*{O47jlsx}X=K>d4feScABqT)-@!8n*3BsbusxyAD~F!wskYI{HYIwA9E
z8B2>oYS-0Q@juJ3ctqx-ZFmZls57%mszY^z3&ozh@zAm%OVZ?93$G*qJ;z?}qJF|8
zjk67d7nrexzZ|Ss7T@lr)+&6$Zo`PUIcZV5O{<9{z**aG%JmV(p^EV02
ztirC5V5T)i0}-U=;kdl5SM{(V
z{PhSm6v2%GboY<|Y5qU5RKtgJIBpSQ7$LowCv~*uw!X#w+t-P`FZnqEy#Q;FP*v;Z
z_Ly#m|L?&rPTIx@5(?_vH}ivOapR@;*}^AU8OAL0%HI1pG-GW<>7BvgAhZ>L(tD@R
zJGLxRd@#KC0-95cqC|u?*IdVz+P{jbz8LMF8jdHoi!CxxS#%EV0l!o@%azi#4CtBr
zebQfCXhJCK`^Okt$!(Tj=E(s;weU|+{oqyu+TKjOH4R9H|FQ>Kj)S*Zu%w%#q6x!N
zcfMBYT4L;{_r~A=B^YZYU5;Is+qM4f&$p|RKR-uCd=u;v65?OUySL}<`f3QG+-X)*QRw5WjnpMj>QX^17D~Nco$sSFkdpv
zY;n#vK6Mi#F>V62w%gjDd<@Pz=^RO3`QG*AZVj^UvwKhvbMq9gTuiSp4)8OW_cmw{
zdUvm~cPanAL++cFciyXS+8forM(_&jJ#lWOJIS}L>7eVKA8Kk}EZm3fejbZlY+c!qzaG7pUBz-ljuB!F5%Gn!r
z&Y<@t(`_3LtKddpZL5JYvi?XisNv)Lk3sML+!+Yw>E6w84mo_v5Q>`}xI4%dT|DO;
z$!szbaY3LJLgO4KbAh`(YxchW1l(;U|37z2`3BSt7TEmw3&=6`0OI;L&!kEc5<>_m
z)6pZ}Pw6B|mR9AOZ0|WIdH7#exiuJ;FtA3$Xy_Q)e&kBGTb_S>DfTp(w=1AIbB{)U
zZdTgn?MsBEZ-Zg%dKs~P+d565_t#%*w{cK5$&K2?Pe(xK(SqQ&_~4n-a-exY1VOSO
z9zNTryt|j(&1BO9E9dEy{SBdn(2D9#lXC`B}x`w`GEuRGR6%0FOdHhn3qc~kQV|3Hj
zHv;pr0=|t(i)+*@c9lB%u>}?fikV>S+hz0mZ;G`53&zUx`^%iAW+e(LLy(Oev8#-+
z#U|l|H!E_pTRRbM9jTiJu+z=dqDR0sh|)i<#s3W;bSz(nED#YSuhpI3Bgi7VoPPd5
z`-51*apw5RFLoyg+)pa7ECal=_Ls8}Ss+k{)--&FIkqAE-X~-&e`cGnOW$*{{waeT
z&qf*PCY)?pEW)l%H%OKVZm?_GDvi3NmK*l#?WeqAGfW%kJE7WUZGby91D1}
z!SE9?_U-YwahF_5i}yUCSGS#1!1ao3x{i~#X^C!-=N$ZY#o#BeSNdTPnHs^3Sz4_oIhy$q1D
zYTTF;Q-C}C)ZIDHxLM3)xi_^qO&nV_3bK6A@X_Z{?ipRo
zpn~q_(d7_n-6uh@x*L-1tPtUqZ%;^!Am*Iwu~D#M!_)+6V3ntx^aPn$tX
zm9On230Udtu>=;X=9d_NbF@C3<^Rr;-Z3ZksQ&0`{ER@N`Ie*M=`B}U*TfRnO8Ynz
zt1Gixer2@W_p|w3>n+*!5!T`$HS-8LkL~xfE=6mKFQ@+ACib=1=XN|lnA{ZWnD`G9
ziH=%T#Cd9*U!J9*l&%^~E&bq`H&qo=_8F`F?ZG3h4ZbZ}$~qUNPfrPpefpj4u7-7<
zbIN&_Cf9xO7RQ+(Yleq33YMf(h-VQ6bClT+Nd!8h5X=8KrRg({`(E}n|KZ~qivgXM
z(IELh?T1tAqI1Lyfa>w}=K}*30N5vwpJ^)Kj;|gbS}6N@Ip==de%K{B`~KVgRfk=d
zk}xFKIOS90MEhamMrF{_t(>5CivqOCAH{`!itOSLrTF8n^2yfHdufLG`_0BF@;eX5
z4?^qGSjkRMp(Ty^Am@9umdge=aoFhUIR*n|qp)jR*%P6w=(asb{Yzl8r3ArTh^QY@
zK1MHu+NBnjwUeMM(W*eABq#f{ZHjtmt`d&McANR%+;Jue7bG;+Hw$D
z8b}8f!mli{2715Tf4wvbXz9zf7%@?m7!?3n|3r`{Fn;~bGxZvSCCg;H{P|d>y8SBa
zIdt^BaN8esPyPPUSLS~oMvyGMxjz`vo2lEb>^-~_Rwi^;TH}6i0jcWp*qM;7mnaWf
zcWtB2O{tEoK!$=En
zPT=bYf~@F~(_(de-sIG=Wvx^tJwRB981?+*&))5+Txi#6x(&1~9p5I;EyR5LUiGL^
z&9RTmn(ePeAw+Fa`=whBu;d^}Gu&ovl;1pe_3M>ButB=saLBH7TJE+5X3H~~@p-d0
z`zht+ePV}uNMd=}*}e0wJ6lyF2gig~zQ(&}b{ZNn#;;ixK3>2FwO=92Du2-8<=ngt
zBWRv`)jy~W^VOVu7TPYSa#EJhq&gg5$Yemq`;ipFsFD9RmHeB~ki=Kx`|*sEq>T0r
zS8MA$LBvv`F3WXA$Ju_9n!s_6B-C%00}HirG(sM&D!owUox|3eUG5IH*iWwQp_$B;
zZma7%$yY~jfyLRgyI(G$I~gfAAquFm?>(s0-|xJ^X2G0OmFo&GftZt>G%wT~Pj5{s
zcOc%Cy#NKpXHca3ny#!ouWbiQeCaDkNB5FyzRb#_AEs*DE5`_dbq5HbwxZ99e-NbD
z+|CmR8P(0Yxy`lmmB`HQOpk5iztPN0QlY{hrfuBAc`R-;_+G~p8i6J}#2-?5xA|D}
z1s7vAI9>Y*y#v}UdD7UveAusD1FkRYvbud3>XYGowHxxdjW_~HsCe0+-djPAAIK8?
z?Nx4mx6eM@XSLs}W}Ch{7gS|LGpE6=3Utso>Z&yRtRN(~h^m;l%%mNH!6UnZL3&cb
z)YR6z7&9ChHA8f!gNy`qg?}Z13OkCvrlHtuDg1#%A^$n6>Gt}dC4
zl_#p!nD3cu`NvylV{Rw{Kd2{FM>^Le=tF51McjbH&-6AVBSUtaMqUxOHowcD3B*!iA*Vcvd9!l^u-)8bVM<
z_UpIAW$<+LXFP(lpw}u74mCWpiG2xT58HeXKbr=aB{7M5_P+PBt=1(@14XFK*4)#mDC`UK+ubt*Gh82R7`&TS
zMR`3^a$&)69qBS6!ny6l1Y&tpsGfcP_iE3)gwdz_+YI#P9Ib?zyeNlBC5_~mv8RC*
z%$J_`zbUSV#7{m%c3pNgp}`Nv(@8r(1J;DOXq!}_E2GsWs9Q$Aw`D7RG+g_G%TMxS
z5ZzJAxB~=*A*JeU5i^y>MCZ)oNl&P=>HQG!57UR4G236>L5i5m4EjCs7RwAmpif^l
z0KTY93v#3@=vD}OqDz$UZ4~1TrO;)oV{>JnOIxTV4$6LBv17MS;O;VbVO%Dh63o_v
zK&eE%;LR$lvW9fbAxJk-*h64}(hL2u>=O+>25D0&AfsD%NIv4iKy5T<@0rEOo1wb!
z*2PY=flq)rbyF?miPti#Iy2xiM9xBJtqIJ{@ELzD*NecvqXxAxq8MEzq_Wpnt7D2t
z^GVa!;yEzyiI#bp5YcdW|DFHrlXmwMFj0R;)dknHTDy08afDvR8eZleq)8vOt7PV5_U6LN>pC$33Pmm{1~{`5h3{O&hNWY*X1zX#r1jbo1*J2Efo$4x!04=Qg2oGX4uz6
zR-ATv3_(UXMbIaO*d9C)!|TG4GP?H?6q{lOBPu`yilYHGO8g}U*eJ1Y9zGLr{6%ec
zG6u(C>=QP@5?A#!108D&e5U$)uzt0tp0>5UUc>N0gav{3uPZP9F09}r6LI{PR4R=L
z0RbYB=P=&@t%5hrUB&0)VW