From 52750ab571fa352d7eb521fd079f85821b056b0e Mon Sep 17 00:00:00 2001 From: tarokkk Date: Fri, 13 Sep 2019 22:44:11 +0200 Subject: [PATCH 01/35] initial logging-operator v2 --- .circleci/config.yml | 156 -- .github/ISSUE_TEMPLATE/bug_report.md | 27 - .github/ISSUE_TEMPLATE/feature_request.md | 20 - .github/PULL_REQUEST_TEMPLATE.md | 34 - Dockerfile | 50 +- Gopkg.lock | 1012 --------- Gopkg.toml | 79 - LICENCE => LICENSE | 2 +- Makefile | 126 +- Makefile.app | 6 + PROJECT | 25 + README.md | 88 +- api/v1alpha2/clusterflow_types.go | 44 + api/v1alpha2/clusterflow_types_test.go | 81 + api/v1alpha2/clusteroutput_types.go | 50 + api/v1alpha2/clusteroutput_types_test.go | 84 + api/v1alpha2/common_types.go | 22 + api/v1alpha2/flow_types.go | 63 + api/v1alpha2/flow_types_test.go | 81 + api/v1alpha2/fluentbit_types.go | 55 + api/v1alpha2/fluentd_types.go | 58 + api/v1alpha2/groupversion_info.go | 34 + api/v1alpha2/logging_types.go | 178 ++ api/v1alpha2/output_types.go | 63 + api/v1alpha2/output_types_test.go | 82 + api/v1alpha2/suite_test.go | 73 + .../v1alpha2}/zz_generated.deepcopy.go | 509 +++-- build/Dockerfile | 15 - build/bin/entrypoint | 12 - build/bin/user_setup | 13 - charts/logging-operator-fluent/Chart.yaml | 18 - .../templates/_helpers.tpl | 32 - .../templates/fluentbit-cr.yaml | 33 - .../templates/fluentd-cr.yaml | 37 - .../templates/psp.yaml | 73 - .../templates/rbac.yaml | 71 - .../templates/secret.yaml | 22 - charts/logging-operator-fluent/values.yaml | 45 - .../.helmignore | 0 charts/logging-operator-logging/Chart.yaml | 5 + .../README.md | 29 +- .../templates/NOTES.txt | 0 .../templates/_helpers.tpl | 19 +- .../templates/logging.yaml | 38 + .../templates/secret.yaml | 34 + charts/logging-operator-logging/values.yaml | 35 + charts/logging-operator/Chart.yaml | 19 +- charts/logging-operator/README.md | 35 +- .../logging-dashboard_rev1.json | 1069 ---------- charts/logging-operator/templates/NOTES.txt | 0 .../logging-operator/templates/_helpers.tpl | 13 + .../logging-operator/templates/configmap.yaml | 16 - charts/logging-operator/templates/crd.yaml | 109 - .../templates/deployment.yaml | 74 +- .../templates/grafana-dashboard-logging.yaml | 12 - .../logging.banzaicloud.com_clusterflows.yaml | 139 ++ ...ogging.banzaicloud.com_clusteroutputs.yaml | 1795 ++++++++++++++++ .../logging.banzaicloud.com_flows.yaml | 142 ++ .../logging.banzaicloud.com_loggings.yaml | 332 +++ .../logging.banzaicloud.com_outputs.yaml | 1790 ++++++++++++++++ charts/logging-operator/templates/psp.yaml | 6 +- charts/logging-operator/templates/rbac.yaml | 151 +- .../logging-operator/templates/service.yaml | 18 + charts/logging-operator/values.yaml | 83 +- charts/nginx-logging-demo/Chart.yaml | 8 - charts/nginx-logging-demo/README.md | 63 - charts/nginx-logging-demo/templates/NOTES.txt | 24 - .../templates/deployment.yaml | 51 - .../nginx-logging-demo/templates/ingress.yaml | 39 - .../nginx-logging-demo/templates/logging.yaml | 37 - .../nginx-logging-demo/templates/service.yaml | 19 - .../templates/tests/test-connection.yaml | 18 - charts/nginx-logging-demo/values.yaml | 56 - charts/nginx-logging-es-demo/.helmignore | 22 - charts/nginx-logging-es-demo/Chart.yaml | 8 - charts/nginx-logging-es-demo/README.md | 63 - .../nginx-logging-es-demo/templates/NOTES.txt | 24 - .../templates/_helpers.tpl | 32 - .../templates/deployment.yaml | 51 - .../templates/ingress.yaml | 39 - .../templates/logging.yaml | 37 - .../templates/service.yaml | 19 - .../templates/tests/test-connection.yaml | 18 - charts/nginx-logging-es-demo/values.yaml | 49 - cmd/docgen/docgen.go | 92 - cmd/docs.go | 300 +++ cmd/manager/main.go | 137 -- config/certmanager/certificate.yaml | 24 + config/certmanager/kustomization.yaml | 26 + config/certmanager/kustomizeconfig.yaml | 16 + .../logging.banzaicloud.com_clusterflows.yaml | 139 ++ ...ogging.banzaicloud.com_clusteroutputs.yaml | 1881 +++++++++++++++++ .../bases/logging.banzaicloud.com_flows.yaml | 142 ++ .../logging.banzaicloud.com_loggings.yaml | 332 +++ .../logging.banzaicloud.com_outputs.yaml | 1876 ++++++++++++++++ config/crd/kustomization.yaml | 37 + config/crd/kustomizeconfig.yaml | 17 + .../patches/cainjection_in_clusterflows.yaml | 8 + .../cainjection_in_clusteroutputs.yaml | 8 + config/crd/patches/cainjection_in_flows.yaml | 8 + .../patches/cainjection_in_fluentbits.yaml | 8 + .../crd/patches/cainjection_in_fluentds.yaml | 8 + .../crd/patches/cainjection_in_loggings.yaml | 8 + .../crd/patches/cainjection_in_outputs.yaml | 8 + .../crd/patches/webhook_in_clusterflows.yaml | 17 + .../patches/webhook_in_clusteroutputs.yaml | 17 + config/crd/patches/webhook_in_flows.yaml | 17 + config/crd/patches/webhook_in_fluentbits.yaml | 17 + config/crd/patches/webhook_in_fluentds.yaml | 17 + config/crd/patches/webhook_in_loggings.yaml | 17 + config/crd/patches/webhook_in_outputs.yaml | 17 + config/default/kustomization.yaml | 43 + config/default/manager_auth_proxy_patch.yaml | 24 + config/default/manager_image_patch.yaml | 12 + .../manager_prometheus_metrics_patch.yaml | 19 + config/default/manager_webhook_patch.yaml | 23 + config/default/webhookcainjection_patch.yaml | 15 + config/manager/kustomization.yaml | 2 + config/manager/manager.yaml | 39 + config/rbac/auth_proxy_role.yaml | 13 + .../rbac/auth_proxy_role_binding.yaml | 16 +- config/rbac/auth_proxy_service.yaml | 18 + config/rbac/kustomization.yaml | 11 + config/rbac/leader_election_role.yaml | 26 + config/rbac/leader_election_role_binding.yaml | 12 + config/rbac/role.yaml | 28 + config/rbac/role_binding.yaml | 12 + ...ogging_v1alpha2_cluster_output_custom.yaml | 13 + config/samples/logging_v1alpha2_flow.yaml | 16 + .../samples/logging_v1alpha2_flow_custom.yaml | 14 + .../logging_v1alpha2_logging_custom.yaml | 12 + .../logging_v1alpha2_logging_default.yaml | 11 + config/samples/logging_v1alpha2_output.yaml | 6 + .../logging_v1alpha2_output_custom.yaml | 8 + config/webhook/kustomization.yaml | 6 + config/webhook/kustomizeconfig.yaml | 25 + config/webhook/manifests.yaml | 0 config/webhook/service.yaml | 12 + controllers/logging_controller.go | 283 +++ controllers/logging_controller_test.go | 552 +++++ controllers/suite_test.go | 142 ++ deploy/clusterrole.yaml | 60 - .../crds/logging_v1alpha1_fluentbit_cr.yaml | 19 - .../crds/logging_v1alpha1_fluentbit_crd.yaml | 33 - deploy/crds/logging_v1alpha1_fluentd_cr.yaml | 33 - deploy/crds/logging_v1alpha1_fluentd_crd.yaml | 33 - deploy/crds/logging_v1alpha1_plugin_cr.yaml | 31 - deploy/crds/logging_v1alpha1_plugin_crd.yaml | 33 - deploy/operator.yaml | 33 - deploy/service_account.yaml | 4 - developer.md | 68 - docs/crds.md | 276 +++ docs/developers.md | 154 ++ docs/example-s3.md | 114 + docs/examples/es.md | 50 - .../logging_flow_multiple_output.yaml | 11 + docs/examples/logging_flow_single_output.yaml | 10 + docs/examples/logging_flow_with_filters.yaml | 18 + docs/examples/logging_logging_simple.yaml | 9 + docs/examples/logging_logging_tls.yaml | 18 + .../examples/logging_output_azurestorage.yaml | 23 + docs/examples/logging_output_gcs.yaml | 19 + docs/examples/logging_output_s3.yaml | 26 + docs/examples/logging_output_sumologic.yaml | 14 + docs/examples/s3.md | 107 - docs/examples/tls.md | 101 - docs/img/helm_logo.png | Bin 166878 -> 0 bytes docs/img/ll_es.gif | Bin 987272 -> 0 bytes docs/img/lll.png | Bin 178126 -> 0 bytes docs/img/log_helm.gif | Bin 1158941 -> 0 bytes docs/img/log_man.png | Bin 219589 -> 0 bytes docs/img/logging-operator-v2-architecture.png | Bin 0 -> 950821 bytes ...ngo_flow.png => logging_operator_flow.png} | Bin docs/img/logo.png | Bin 102200 -> 0 bytes docs/img/s3_logo.png | Bin 139830 -> 0 bytes docs/model.md | 44 + docs/plugins/alibaba.md | 30 - docs/plugins/azure.md | 45 - docs/plugins/elasticsearch.md | 70 - docs/plugins/filters/parser.md | 24 + docs/plugins/filters/stdout.md | 3 + docs/plugins/filters/tagnormaliser.md | 21 + docs/plugins/forward.md | 64 - docs/plugins/gcs.md | 42 - docs/plugins/index.md | 24 + docs/plugins/loki.md | 27 - docs/plugins/outputs/azurestore.md | 13 + docs/plugins/outputs/buffer.md | 35 + docs/plugins/outputs/elasticsearch.md | 57 + docs/plugins/outputs/file.md | 4 + docs/plugins/outputs/format.md | 4 + docs/plugins/outputs/gcs.md | 27 + docs/plugins/outputs/loki.md | 11 + docs/plugins/outputs/oss.md | 24 + docs/plugins/outputs/s3.md | 76 + docs/plugins/outputs/secret.md | 22 + docs/plugins/outputs/sumologic.md | 18 + docs/plugins/parser.md | 24 - docs/plugins/s3.md | 69 - docs/plugins/stdout.md | 11 - example/cluster_forward.yaml | 18 - example/elasticsearch_output.yaml | 22 - example/forward.md | 56 - example/forward_tls.md | 83 - example/loki_output.yaml | 24 - example/stdout.yaml | 11 - example/tls-cluster-forward/cfssl-ca.json | 28 - example/tls-cluster-forward/cfssl-csr.json | 14 - example/tls-cluster-forward/gencert.sh | 19 - go.mod | 35 + go.sum | 224 ++ hack/boilerplate.go.txt | 13 + hack/minio-mc.yaml | 16 - hack/minio.yaml | 45 - hack/test-s3-output.yaml | 26 - hack/test.sh | 85 - main.go | 82 + pkg/apis/addtoscheme_logging_v1alpha1.go | 26 - pkg/apis/apis.go | 29 - pkg/apis/logging/v1alpha1/common_types.go | 24 - pkg/apis/logging/v1alpha1/doc.go | 20 - pkg/apis/logging/v1alpha1/fluentbit_types.go | 91 - pkg/apis/logging/v1alpha1/fluentd_types.go | 103 - .../logging/v1alpha1/loggingplugin_types.go | 140 -- pkg/apis/logging/v1alpha1/register.go | 35 - .../logging/v1alpha1/zz_generated.defaults.go | 32 - .../logging/v1alpha1/zz_generated.openapi.go | 350 --- pkg/controller/add_fluentbit.go | 26 - pkg/controller/add_fluentd.go | 26 - pkg/controller/add_loggingplugin.go | 26 - pkg/controller/controller.go | 34 - .../fluentbit/fluentbit_controller.go | 118 -- pkg/controller/fluentd/fluentd_controller.go | 114 - pkg/controller/plugin/plugin_controller.go | 111 - pkg/k8sutil/resource.go | 178 +- pkg/model/filter/parser.go | 87 + pkg/model/filter/stdout.go | 37 + pkg/model/filter/tagnormaliser.go | 36 + pkg/model/filter/zz_generated.deepcopy.go | 71 + pkg/model/input/fluenthelpers.go | 59 + pkg/model/input/forward.go | 62 + pkg/model/input/tail.go | 37 + pkg/model/input/zz_generated.deepcopy.go | 61 + pkg/model/output/azurestore.go | 56 + pkg/model/output/buffer.go | 112 + pkg/model/output/elasticsearch.go | 148 ++ pkg/model/output/file.go | 34 + pkg/model/output/format.go | 26 + pkg/model/output/gcs.go | 104 + pkg/model/output/loki.go | 49 + pkg/model/output/null.go | 37 + pkg/model/output/oss.go | 85 + pkg/model/output/s3.go | 233 ++ pkg/model/output/sumologic.go | 48 + pkg/model/output/zz_generated.deepcopy.go | 353 ++++ pkg/model/render/fluent.go | 89 + pkg/model/render/fluent_test.go | 571 +++++ pkg/model/render/interface.go | 23 + pkg/model/render/json.go | 46 + pkg/model/render/json_test.go | 125 ++ pkg/model/secret/secret.go | 91 + pkg/model/secret/zz_generated.deepcopy.go | 76 + pkg/model/types/builder.go | 52 + pkg/model/types/flow.go | 135 ++ pkg/model/types/router.go | 92 + pkg/model/types/stringmaps.go | 235 ++ pkg/model/types/stringmaps_test.go | 235 ++ pkg/model/types/types.go | 180 ++ pkg/plugins/plugin.go | 68 + pkg/resources/fluentbit/config.go | 40 +- pkg/resources/fluentbit/configmap.go | 87 - pkg/resources/fluentbit/configsecret.go | 87 + pkg/resources/fluentbit/daemonset.go | 83 +- pkg/resources/fluentbit/fluentbit.go | 64 +- pkg/resources/fluentbit/rbac.go | 47 +- pkg/resources/fluentd/appconfigmap.go | 207 +- pkg/resources/fluentd/config.go | 64 +- pkg/resources/fluentd/configmap.go | 89 +- pkg/resources/fluentd/configsecret.go | 68 + pkg/resources/fluentd/deployment.go | 187 -- pkg/resources/fluentd/fluentd.go | 132 +- pkg/resources/fluentd/pvc.go | 31 - pkg/resources/fluentd/rbac.go | 46 +- pkg/resources/fluentd/service.go | 33 +- pkg/resources/fluentd/statefulset.go | 218 ++ pkg/resources/model/system.go | 164 ++ pkg/resources/plugins/alibaba.go | 45 - pkg/resources/plugins/azure.go | 63 - pkg/resources/plugins/configmap.go | 133 -- pkg/resources/plugins/elasticsearch.go | 71 - pkg/resources/plugins/forward.go | 81 - pkg/resources/plugins/gcs.go | 59 - pkg/resources/plugins/init.go | 80 - pkg/resources/plugins/loki.go | 46 - pkg/resources/plugins/parser.go | 41 - pkg/resources/plugins/plugins.go | 64 - pkg/resources/plugins/s3.go | 79 - pkg/resources/plugins/stdout.go | 26 - pkg/resources/reconciler.go | 54 +- pkg/resources/templates/templates.go | 101 +- pkg/util/util.go | 57 +- scripts/check-header.sh | 35 + scripts/fmt-check.sh | 11 - scripts/generate.sh | 220 ++ scripts/misspell-check.sh | 12 - version/version.go | 22 - 306 files changed, 18999 insertions(+), 8791 deletions(-) delete mode 100644 .circleci/config.yml delete mode 100644 .github/ISSUE_TEMPLATE/bug_report.md delete mode 100644 .github/ISSUE_TEMPLATE/feature_request.md delete mode 100644 .github/PULL_REQUEST_TEMPLATE.md delete mode 100644 Gopkg.lock delete mode 100644 Gopkg.toml rename LICENCE => LICENSE (99%) create mode 100644 Makefile.app create mode 100644 PROJECT create mode 100644 api/v1alpha2/clusterflow_types.go create mode 100644 api/v1alpha2/clusterflow_types_test.go create mode 100644 api/v1alpha2/clusteroutput_types.go create mode 100644 api/v1alpha2/clusteroutput_types_test.go create mode 100644 api/v1alpha2/common_types.go create mode 100644 api/v1alpha2/flow_types.go create mode 100644 api/v1alpha2/flow_types_test.go create mode 100644 api/v1alpha2/fluentbit_types.go create mode 100644 api/v1alpha2/fluentd_types.go create mode 100644 api/v1alpha2/groupversion_info.go create mode 100644 api/v1alpha2/logging_types.go create mode 100644 api/v1alpha2/output_types.go create mode 100644 api/v1alpha2/output_types_test.go create mode 100644 api/v1alpha2/suite_test.go rename {pkg/apis/logging/v1alpha1 => api/v1alpha2}/zz_generated.deepcopy.go (50%) delete mode 100644 build/Dockerfile delete mode 100755 build/bin/entrypoint delete mode 100755 build/bin/user_setup delete mode 100644 charts/logging-operator-fluent/Chart.yaml delete mode 100644 charts/logging-operator-fluent/templates/_helpers.tpl delete mode 100644 charts/logging-operator-fluent/templates/fluentbit-cr.yaml delete mode 100644 charts/logging-operator-fluent/templates/fluentd-cr.yaml delete mode 100644 charts/logging-operator-fluent/templates/psp.yaml delete mode 100644 charts/logging-operator-fluent/templates/rbac.yaml delete mode 100644 charts/logging-operator-fluent/templates/secret.yaml delete mode 100644 charts/logging-operator-fluent/values.yaml rename charts/{nginx-logging-demo => logging-operator-logging}/.helmignore (100%) create mode 100644 charts/logging-operator-logging/Chart.yaml rename charts/{logging-operator-fluent => logging-operator-logging}/README.md (63%) create mode 100644 charts/logging-operator-logging/templates/NOTES.txt rename charts/{nginx-logging-demo => logging-operator-logging}/templates/_helpers.tpl (62%) create mode 100644 charts/logging-operator-logging/templates/logging.yaml create mode 100644 charts/logging-operator-logging/templates/secret.yaml create mode 100644 charts/logging-operator-logging/values.yaml delete mode 100644 charts/logging-operator/grafana-dashboards/logging-dashboard_rev1.json create mode 100644 charts/logging-operator/templates/NOTES.txt delete mode 100644 charts/logging-operator/templates/configmap.yaml delete mode 100644 charts/logging-operator/templates/crd.yaml delete mode 100644 charts/logging-operator/templates/grafana-dashboard-logging.yaml create mode 100644 charts/logging-operator/templates/logging.banzaicloud.com_clusterflows.yaml create mode 100644 charts/logging-operator/templates/logging.banzaicloud.com_clusteroutputs.yaml create mode 100644 charts/logging-operator/templates/logging.banzaicloud.com_flows.yaml create mode 100644 charts/logging-operator/templates/logging.banzaicloud.com_loggings.yaml create mode 100644 charts/logging-operator/templates/logging.banzaicloud.com_outputs.yaml create mode 100644 charts/logging-operator/templates/service.yaml delete mode 100644 charts/nginx-logging-demo/Chart.yaml delete mode 100644 charts/nginx-logging-demo/README.md delete mode 100644 charts/nginx-logging-demo/templates/NOTES.txt delete mode 100644 charts/nginx-logging-demo/templates/deployment.yaml delete mode 100644 charts/nginx-logging-demo/templates/ingress.yaml delete mode 100644 charts/nginx-logging-demo/templates/logging.yaml delete mode 100644 charts/nginx-logging-demo/templates/service.yaml delete mode 100644 charts/nginx-logging-demo/templates/tests/test-connection.yaml delete mode 100644 charts/nginx-logging-demo/values.yaml delete mode 100644 charts/nginx-logging-es-demo/.helmignore delete mode 100644 charts/nginx-logging-es-demo/Chart.yaml delete mode 100644 charts/nginx-logging-es-demo/README.md delete mode 100644 charts/nginx-logging-es-demo/templates/NOTES.txt delete mode 100644 charts/nginx-logging-es-demo/templates/_helpers.tpl delete mode 100644 charts/nginx-logging-es-demo/templates/deployment.yaml delete mode 100644 charts/nginx-logging-es-demo/templates/ingress.yaml delete mode 100644 charts/nginx-logging-es-demo/templates/logging.yaml delete mode 100644 charts/nginx-logging-es-demo/templates/service.yaml delete mode 100644 charts/nginx-logging-es-demo/templates/tests/test-connection.yaml delete mode 100644 charts/nginx-logging-es-demo/values.yaml delete mode 100644 cmd/docgen/docgen.go create mode 100644 cmd/docs.go delete mode 100644 cmd/manager/main.go create mode 100644 config/certmanager/certificate.yaml create mode 100644 config/certmanager/kustomization.yaml create mode 100644 config/certmanager/kustomizeconfig.yaml create mode 100644 config/crd/bases/logging.banzaicloud.com_clusterflows.yaml create mode 100644 config/crd/bases/logging.banzaicloud.com_clusteroutputs.yaml create mode 100644 config/crd/bases/logging.banzaicloud.com_flows.yaml create mode 100644 config/crd/bases/logging.banzaicloud.com_loggings.yaml create mode 100644 config/crd/bases/logging.banzaicloud.com_outputs.yaml create mode 100644 config/crd/kustomization.yaml create mode 100644 config/crd/kustomizeconfig.yaml create mode 100644 config/crd/patches/cainjection_in_clusterflows.yaml create mode 100644 config/crd/patches/cainjection_in_clusteroutputs.yaml create mode 100644 config/crd/patches/cainjection_in_flows.yaml create mode 100644 config/crd/patches/cainjection_in_fluentbits.yaml create mode 100644 config/crd/patches/cainjection_in_fluentds.yaml create mode 100644 config/crd/patches/cainjection_in_loggings.yaml create mode 100644 config/crd/patches/cainjection_in_outputs.yaml create mode 100644 config/crd/patches/webhook_in_clusterflows.yaml create mode 100644 config/crd/patches/webhook_in_clusteroutputs.yaml create mode 100644 config/crd/patches/webhook_in_flows.yaml create mode 100644 config/crd/patches/webhook_in_fluentbits.yaml create mode 100644 config/crd/patches/webhook_in_fluentds.yaml create mode 100644 config/crd/patches/webhook_in_loggings.yaml create mode 100644 config/crd/patches/webhook_in_outputs.yaml create mode 100644 config/default/kustomization.yaml create mode 100644 config/default/manager_auth_proxy_patch.yaml create mode 100644 config/default/manager_image_patch.yaml create mode 100644 config/default/manager_prometheus_metrics_patch.yaml create mode 100644 config/default/manager_webhook_patch.yaml create mode 100644 config/default/webhookcainjection_patch.yaml create mode 100644 config/manager/kustomization.yaml create mode 100644 config/manager/manager.yaml create mode 100644 config/rbac/auth_proxy_role.yaml rename deploy/clusterrole_binding.yaml => config/rbac/auth_proxy_role_binding.yaml (64%) create mode 100644 config/rbac/auth_proxy_service.yaml create mode 100644 config/rbac/kustomization.yaml create mode 100644 config/rbac/leader_election_role.yaml create mode 100644 config/rbac/leader_election_role_binding.yaml create mode 100644 config/rbac/role.yaml create mode 100644 config/rbac/role_binding.yaml create mode 100644 config/samples/logging_v1alpha2_cluster_output_custom.yaml create mode 100644 config/samples/logging_v1alpha2_flow.yaml create mode 100644 config/samples/logging_v1alpha2_flow_custom.yaml create mode 100644 config/samples/logging_v1alpha2_logging_custom.yaml create mode 100644 config/samples/logging_v1alpha2_logging_default.yaml create mode 100644 config/samples/logging_v1alpha2_output.yaml create mode 100644 config/samples/logging_v1alpha2_output_custom.yaml create mode 100644 config/webhook/kustomization.yaml create mode 100644 config/webhook/kustomizeconfig.yaml create mode 100644 config/webhook/manifests.yaml create mode 100644 config/webhook/service.yaml create mode 100644 controllers/logging_controller.go create mode 100644 controllers/logging_controller_test.go create mode 100644 controllers/suite_test.go delete mode 100644 deploy/clusterrole.yaml delete mode 100644 deploy/crds/logging_v1alpha1_fluentbit_cr.yaml delete mode 100644 deploy/crds/logging_v1alpha1_fluentbit_crd.yaml delete mode 100644 deploy/crds/logging_v1alpha1_fluentd_cr.yaml delete mode 100644 deploy/crds/logging_v1alpha1_fluentd_crd.yaml delete mode 100644 deploy/crds/logging_v1alpha1_plugin_cr.yaml delete mode 100644 deploy/crds/logging_v1alpha1_plugin_crd.yaml delete mode 100644 deploy/operator.yaml delete mode 100644 deploy/service_account.yaml delete mode 100644 developer.md create mode 100644 docs/crds.md create mode 100644 docs/developers.md create mode 100644 docs/example-s3.md delete mode 100644 docs/examples/es.md create mode 100644 docs/examples/logging_flow_multiple_output.yaml create mode 100644 docs/examples/logging_flow_single_output.yaml create mode 100644 docs/examples/logging_flow_with_filters.yaml create mode 100644 docs/examples/logging_logging_simple.yaml create mode 100644 docs/examples/logging_logging_tls.yaml create mode 100644 docs/examples/logging_output_azurestorage.yaml create mode 100644 docs/examples/logging_output_gcs.yaml create mode 100644 docs/examples/logging_output_s3.yaml create mode 100644 docs/examples/logging_output_sumologic.yaml delete mode 100644 docs/examples/s3.md delete mode 100644 docs/examples/tls.md delete mode 100644 docs/img/helm_logo.png delete mode 100644 docs/img/ll_es.gif delete mode 100644 docs/img/lll.png delete mode 100644 docs/img/log_helm.gif delete mode 100644 docs/img/log_man.png create mode 100644 docs/img/logging-operator-v2-architecture.png rename docs/img/{loggingo_flow.png => logging_operator_flow.png} (100%) delete mode 100644 docs/img/logo.png delete mode 100644 docs/img/s3_logo.png create mode 100644 docs/model.md delete mode 100644 docs/plugins/alibaba.md delete mode 100644 docs/plugins/azure.md delete mode 100644 docs/plugins/elasticsearch.md create mode 100644 docs/plugins/filters/parser.md create mode 100644 docs/plugins/filters/stdout.md create mode 100644 docs/plugins/filters/tagnormaliser.md delete mode 100644 docs/plugins/forward.md delete mode 100644 docs/plugins/gcs.md create mode 100644 docs/plugins/index.md delete mode 100644 docs/plugins/loki.md create mode 100644 docs/plugins/outputs/azurestore.md create mode 100644 docs/plugins/outputs/buffer.md create mode 100644 docs/plugins/outputs/elasticsearch.md create mode 100644 docs/plugins/outputs/file.md create mode 100644 docs/plugins/outputs/format.md create mode 100644 docs/plugins/outputs/gcs.md create mode 100644 docs/plugins/outputs/loki.md create mode 100644 docs/plugins/outputs/oss.md create mode 100644 docs/plugins/outputs/s3.md create mode 100644 docs/plugins/outputs/secret.md create mode 100644 docs/plugins/outputs/sumologic.md delete mode 100644 docs/plugins/parser.md delete mode 100644 docs/plugins/s3.md delete mode 100644 docs/plugins/stdout.md delete mode 100644 example/cluster_forward.yaml delete mode 100644 example/elasticsearch_output.yaml delete mode 100644 example/forward.md delete mode 100644 example/forward_tls.md delete mode 100644 example/loki_output.yaml delete mode 100644 example/stdout.yaml delete mode 100644 example/tls-cluster-forward/cfssl-ca.json delete mode 100644 example/tls-cluster-forward/cfssl-csr.json delete mode 100755 example/tls-cluster-forward/gencert.sh create mode 100644 go.mod create mode 100644 go.sum create mode 100644 hack/boilerplate.go.txt delete mode 100644 hack/minio-mc.yaml delete mode 100644 hack/minio.yaml delete mode 100644 hack/test-s3-output.yaml delete mode 100755 hack/test.sh create mode 100644 main.go delete mode 100644 pkg/apis/addtoscheme_logging_v1alpha1.go delete mode 100644 pkg/apis/apis.go delete mode 100644 pkg/apis/logging/v1alpha1/common_types.go delete mode 100644 pkg/apis/logging/v1alpha1/doc.go delete mode 100644 pkg/apis/logging/v1alpha1/fluentbit_types.go delete mode 100644 pkg/apis/logging/v1alpha1/fluentd_types.go delete mode 100644 pkg/apis/logging/v1alpha1/loggingplugin_types.go delete mode 100644 pkg/apis/logging/v1alpha1/register.go delete mode 100644 pkg/apis/logging/v1alpha1/zz_generated.defaults.go delete mode 100644 pkg/apis/logging/v1alpha1/zz_generated.openapi.go delete mode 100644 pkg/controller/add_fluentbit.go delete mode 100644 pkg/controller/add_fluentd.go delete mode 100644 pkg/controller/add_loggingplugin.go delete mode 100644 pkg/controller/controller.go delete mode 100644 pkg/controller/fluentbit/fluentbit_controller.go delete mode 100644 pkg/controller/fluentd/fluentd_controller.go delete mode 100644 pkg/controller/plugin/plugin_controller.go create mode 100644 pkg/model/filter/parser.go create mode 100644 pkg/model/filter/stdout.go create mode 100644 pkg/model/filter/tagnormaliser.go create mode 100644 pkg/model/filter/zz_generated.deepcopy.go create mode 100644 pkg/model/input/fluenthelpers.go create mode 100644 pkg/model/input/forward.go create mode 100644 pkg/model/input/tail.go create mode 100644 pkg/model/input/zz_generated.deepcopy.go create mode 100644 pkg/model/output/azurestore.go create mode 100644 pkg/model/output/buffer.go create mode 100644 pkg/model/output/elasticsearch.go create mode 100644 pkg/model/output/file.go create mode 100644 pkg/model/output/format.go create mode 100644 pkg/model/output/gcs.go create mode 100644 pkg/model/output/loki.go create mode 100644 pkg/model/output/null.go create mode 100644 pkg/model/output/oss.go create mode 100644 pkg/model/output/s3.go create mode 100644 pkg/model/output/sumologic.go create mode 100644 pkg/model/output/zz_generated.deepcopy.go create mode 100644 pkg/model/render/fluent.go create mode 100644 pkg/model/render/fluent_test.go create mode 100644 pkg/model/render/interface.go create mode 100644 pkg/model/render/json.go create mode 100644 pkg/model/render/json_test.go create mode 100644 pkg/model/secret/secret.go create mode 100644 pkg/model/secret/zz_generated.deepcopy.go create mode 100644 pkg/model/types/builder.go create mode 100644 pkg/model/types/flow.go create mode 100644 pkg/model/types/router.go create mode 100644 pkg/model/types/stringmaps.go create mode 100644 pkg/model/types/stringmaps_test.go create mode 100644 pkg/model/types/types.go create mode 100644 pkg/plugins/plugin.go delete mode 100644 pkg/resources/fluentbit/configmap.go create mode 100644 pkg/resources/fluentbit/configsecret.go create mode 100644 pkg/resources/fluentd/configsecret.go delete mode 100644 pkg/resources/fluentd/deployment.go delete mode 100644 pkg/resources/fluentd/pvc.go create mode 100644 pkg/resources/fluentd/statefulset.go create mode 100644 pkg/resources/model/system.go delete mode 100644 pkg/resources/plugins/alibaba.go delete mode 100644 pkg/resources/plugins/azure.go delete mode 100644 pkg/resources/plugins/configmap.go delete mode 100644 pkg/resources/plugins/elasticsearch.go delete mode 100644 pkg/resources/plugins/forward.go delete mode 100644 pkg/resources/plugins/gcs.go delete mode 100644 pkg/resources/plugins/init.go delete mode 100644 pkg/resources/plugins/loki.go delete mode 100644 pkg/resources/plugins/parser.go delete mode 100644 pkg/resources/plugins/plugins.go delete mode 100644 pkg/resources/plugins/s3.go delete mode 100644 pkg/resources/plugins/stdout.go create mode 100755 scripts/check-header.sh delete mode 100755 scripts/fmt-check.sh create mode 100755 scripts/generate.sh delete mode 100755 scripts/misspell-check.sh delete mode 100644 version/version.go diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index 7853fbb8e..000000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,156 +0,0 @@ -# Golang CircleCI 2.0 configuration file -# -# Check https://circleci.com/docs/2.0/language-go/ for more details -version: 2.1 - -orbs: - helm: banzaicloud/helm@0.0.5 - - -jobs: - build: - machine: - image: circleci/classic:201808-01 - docker_layer_caching: true - - environment: - GO_VERSION: '1.11.4' - K8S_VERSION: 'v1.12.0' - VAULT_VERSION: '1.0.0' - KUBECONFIG: '/home/circleci/.kube/config' - MINIKUBE_VERSION: 'v0.31.0' - MINIKUBE_WANTUPDATENOTIFICATION: 'false' - MINIKUBE_WANTREPORTERRORPROMPT: 'false' - MINIKUBE_HOME: '/home/circleci' - CHANGE_MINIKUBE_NONE_USER: 'true' - GOPATH: '/home/circleci/go' - - working_directory: /home/circleci/go/src/github.com/banzaicloud/logging-operator - - steps: - - checkout - - - run: - name: Setup golang - command: | - sudo rm -rf /usr/local/go - curl \ - -Lo go.linux-amd64.tar.gz \ - "https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz" \ - && sudo tar -C /usr/local -xzf go.linux-amd64.tar.gz - echo 'export PATH="$GOPATH/bin:$PATH"' >> "${BASH_ENV}" - - - run: - name: Run fmt - command: | - make check-fmt - - - run: - name: Run golint - command: | - make lint - - - run: - name: Run misspell - command: | - make check-misspell - - - run: - name: Run ineffassign - command: | - make ineffassign - - - run: - name: Setup kubectl - command: | - curl \ - -Lo kubectl \ - "https://storage.googleapis.com/kubernetes-release/release/${K8S_VERSION}/bin/linux/amd64/kubectl" \ - && chmod +x kubectl \ - && sudo mv kubectl /usr/local/bin/ - mkdir -p "${HOME}/.kube" - touch "${HOME}/.kube/config" - - - run: - name: Setup minikube - command: | - curl \ - -Lo minikube \ - "https://github.com/kubernetes/minikube/releases/download/${MINIKUBE_VERSION}/minikube-linux-amd64" \ - && chmod +x minikube \ - && sudo mv minikube /usr/local/bin/ - - - run: - name: Start minikube - command: | - sudo -E minikube start --vm-driver=none --cpus 2 --memory 4096 --kubernetes-version="${K8S_VERSION}" - - - run: - name: Install Helm - command: | - curl https://raw.githubusercontent.com/helm/helm/master/scripts/get > get_helm.sh - chmod 700 get_helm.sh - ./get_helm.sh - - helm init - helm repo add banzaicloud-stable http://kubernetes-charts.banzaicloud.com/branch/master - helm repo update - - - run: - name: Start Minio - command: | - kubectl create -f hack/minio.yaml - kubectl wait --for=condition=available deployment/minio-deployment --timeout=120s - minio="$(kubectl get pod -l app=minio -o 'jsonpath={.items[0].metadata.name}')" - kubectl wait --for=condition=Ready pod "${minio}" --timeout=120s - - - run: - name: Setup minio cli - command: | - kubectl create -f hack/minio-mc.yaml - kubectl wait --for=condition=available deployment/minio-mc-deployment --timeout=120s - mc_pod="$(kubectl get pod -l app=minio-mc -o 'jsonpath={.items[0].metadata.name}')" - kubectl wait --for=condition=Ready pod "${mc_pod}" --timeout=120s - kubectl exec "${mc_pod}" -- \ - mc config host add minio \ - 'http://minio-service.default.svc.cluster.local:9000' \ - 'minio_access_key' \ - 'minio_secret_key' - - - run: - name: Create test bucket - command: | - mc_pod="$(kubectl get pod -l app=minio-mc -o 'jsonpath={.items[0].metadata.name}')" - kubectl exec "${mc_pod}" -- \ - mc mb --region 'test_region' minio/logs - - - run: - name: Build docker image - command: | - make docker DOCKER_TAG=local - - - run: - name: Test - command: | - hack/test.sh - -workflows: - version: 2 - build: - jobs: - - build - helm-chart: - jobs: - - helm/lint-chart: - filters: - tags: - ignore: /.*/ - - - helm/publish-chart: - context: helm - filters: - branches: - ignore: /.*/ - tags: - only: /chart\/.*\/\d+.\d+.\d+.*/ - diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md deleted file mode 100644 index 8e266a19c..000000000 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -name: Bug report -about: Report a bug or features that are not working as intended -title: '' -labels: '' -assignees: '' - ---- - -**Describe the bug** -A clear and concise description of what the bug is. - -**To Reproduce** -Steps to reproduce the behavior: -1. Go to '...' -2. Click on '....' -3. Scroll down to '....' -4. See error - -**Expected behavior** -A clear and concise description of what you expected to happen. - -**Screenshots** -If applicable, add screenshots to help explain your problem. - -**Additional context** -Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index bbcbbe7d6..000000000 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -name: Feature request -about: Suggest an idea for this project -title: '' -labels: '' -assignees: '' - ---- - -**Is your feature request related to a problem? Please describe.** -A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] - -**Describe the solution you'd like** -A clear and concise description of what you want to happen. - -**Describe alternatives you've considered** -A clear and concise description of any alternative solutions or features you've considered. - -**Additional context** -Add any other context or screenshots about the feature request here. diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index 1da98c4e1..000000000 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,34 +0,0 @@ -| Q | A -| --------------- | --- -| Bug fix? | no|yes -| New feature? | no|yes -| API breaks? | no|yes -| Deprecations? | no|yes -| Related tickets | fixes #X, partially #Y, mentioned in #Z -| License | Apache 2.0 - - -### What's in this PR? - - - -### Why? - - - -### Additional context - - - -### Checklist - - -- [ ] Implementation tested (with at least one cloud provider) -- [ ] Error handling code meets the [guideline](https://github.com/banzaicloud/pipeline/blob/master/docs/error-handling-guide.md) -- [ ] Logging code meets the guideline (TODO) -- [ ] User guide and development docs updated (if needed) -- [ ] Related Helm chart(s) updated (if needed) - -### To Do - -- [ ] If the PR is not complete but you want to discuss the approach, list what remains to be done here diff --git a/Dockerfile b/Dockerfile index 7178a61af..71ce731a8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,24 +1,26 @@ -FROM golang:1.11-alpine as golang - -RUN apk add --update --no-cache ca-certificates curl git make -RUN go get -u github.com/golang/dep/cmd/dep - -ADD Gopkg.toml /go/src/github.com/banzaicloud/logging-operator/Gopkg.toml -ADD Gopkg.lock /go/src/github.com/banzaicloud/logging-operator/Gopkg.lock - -WORKDIR /go/src/github.com/banzaicloud/logging-operator -RUN dep ensure -v -vendor-only -ADD . /go/src/github.com/banzaicloud/logging-operator -RUN go install ./cmd/manager - - -FROM alpine:3.8 - -RUN apk add --no-cache ca-certificates - -COPY --from=golang /go/bin/manager /usr/local/bin/logging-operator - -RUN adduser -D logging-operator -USER logging-operator - -ENTRYPOINT ["/usr/local/bin/logging-operator"] \ No newline at end of file +# Build the manager binary +FROM golang:1.13 as builder + +WORKDIR /workspace +# Copy the Go Modules manifests +COPY go.mod go.mod +COPY go.sum go.sum +# cache deps before building and copying source so that we don't need to re-download as much +# and so that source changes don't invalidate our downloaded layer +RUN go mod download + +# Copy the go source +COPY main.go main.go +COPY api/ api/ +COPY controllers/ controllers/ +COPY pkg/ pkg/ + +# Build +RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o manager main.go + +# Use distroless as minimal base image to package the manager binary +# Refer to https://github.com/GoogleContainerTools/distroless for more details +FROM gcr.io/distroless/static:latest +WORKDIR / +COPY --from=builder /workspace/manager . +ENTRYPOINT ["/manager"] diff --git a/Gopkg.lock b/Gopkg.lock deleted file mode 100644 index a8c55c94b..000000000 --- a/Gopkg.lock +++ /dev/null @@ -1,1012 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - digest = "1:2173c429b0c4654deb4f3e8d1f503c374f93a6b5549d74f9cba797c1e787f8e4" - name = "cloud.google.com/go" - packages = ["compute/metadata"] - pruneopts = "NT" - revision = "c9474f2f8deb81759839474b6bd1726bbfe1c1c4" - version = "v0.36.0" - -[[projects]] - digest = "1:25870183293a3fb61cc9afd060a61d63a486f091db72af01a8ea3449f5ca530d" - name = "github.com/Masterminds/goutils" - packages = ["."] - pruneopts = "NT" - revision = "41ac8693c5c10a92ea1ff5ac3a7f95646f6123b0" - version = "v1.1.0" - -[[projects]] - digest = "1:a26f8da48b22e6176c1c6a2459904bb30bd0c49ada04b2963c2c3a203e81a620" - name = "github.com/Masterminds/semver" - packages = ["."] - pruneopts = "NT" - revision = "c7af12943936e8c39859482e61f0574c2fd7fc75" - version = "v1.4.2" - -[[projects]] - digest = "1:b3bf7ebdab400adfa4d81687848571417ded5618231ef58124adf5544cab5e59" - name = "github.com/Masterminds/sprig" - packages = ["."] - pruneopts = "NT" - revision = "b1fe2752acccf8c3d7f8a1e7c75c7ae7d83a1975" - version = "v2.18.0" - -[[projects]] - digest = "1:0a111edd8693fd977f42a0c4f199a0efb13c20aec9da99ad8830c7bb6a87e8d6" - name = "github.com/PuerkitoBio/purell" - packages = ["."] - pruneopts = "NT" - revision = "44968752391892e1b0d0b821ee79e9a85fa13049" - version = "v1.1.1" - -[[projects]] - branch = "master" - digest = "1:8098cd40cd09879efbf12e33bcd51ead4a66006ac802cd563a66c4f3373b9727" - name = "github.com/PuerkitoBio/urlesc" - packages = ["."] - pruneopts = "NT" - revision = "de5bf2ad457846296e2031421a34e2568e304e35" - -[[projects]] - digest = "1:680b63a131506e668818d630d3ca36123ff290afa0afc9f4be21940adca3f27d" - name = "github.com/appscode/jsonpatch" - packages = ["."] - pruneopts = "NT" - revision = "7c0e3b262f30165a8ec3d0b4c6059fd92703bfb2" - version = "1.0.0" - -[[projects]] - branch = "master" - digest = "1:c819830f4f5ef85874a90ac3cbcc96cd322c715f5c96fbe4722eacd3dafbaa07" - name = "github.com/beorn7/perks" - packages = ["quantile"] - pruneopts = "NT" - revision = "3a771d992973f24aa725d07868b467d1ddfceafb" - -[[projects]] - digest = "1:c61f4f97321a37adcb5b4fd4fd61209cd553e46c99ee606c465553541b12a229" - name = "github.com/coreos/prometheus-operator" - packages = [ - "pkg/apis/monitoring", - "pkg/apis/monitoring/v1", - "pkg/client/versioned/scheme", - "pkg/client/versioned/typed/monitoring/v1", - ] - pruneopts = "NT" - revision = "72ec4b9b16ef11700724dc71fec77112536eed40" - version = "v0.26.0" - -[[projects]] - digest = "1:4b8b5811da6970495e04d1f4e98bb89518cc3cfc3b3f456bdb876ed7b6c74049" - name = "github.com/davecgh/go-spew" - packages = ["spew"] - pruneopts = "NT" - revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" - version = "v1.1.1" - -[[projects]] - digest = "1:2453249730493850718f891fb40b8f1bc932a0265384fc85b269dc04a01d4673" - name = "github.com/emicklei/go-restful" - packages = [ - ".", - "log", - ] - pruneopts = "NT" - revision = "85d198d05a92d31823b852b4a5928114912e8949" - version = "v2.9.0" - -[[projects]] - digest = "1:81466b4218bf6adddac2572a30ac733a9255919bc2f470b4827a317bd4ee1756" - name = "github.com/ghodss/yaml" - packages = ["."] - pruneopts = "NT" - revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7" - version = "v1.0.0" - -[[projects]] - branch = "master" - digest = "1:d421af4c4fe51d399667d573982d663fe1fa67020a88d3ae43466ebfe8e2b5c9" - name = "github.com/go-logr/logr" - packages = ["."] - pruneopts = "NT" - revision = "9fb12b3b21c5415d16ac18dc5cd42c1cfdd40c4e" - -[[projects]] - digest = "1:340497a512995aa69c0add901d79a2096b3449d35a44a6f1f1115091a9f8c687" - name = "github.com/go-logr/zapr" - packages = ["."] - pruneopts = "NT" - revision = "7536572e8d55209135cd5e7ccf7fce43dca217ab" - version = "v0.1.0" - -[[projects]] - digest = "1:260f7ebefc63024c8dfe2c9f1a2935a89fa4213637a1f522f592f80c001cc441" - name = "github.com/go-openapi/jsonpointer" - packages = ["."] - pruneopts = "NT" - revision = "ef5f0afec364d3b9396b7b77b43dbe26bf1f8004" - version = "v0.18.0" - -[[projects]] - digest = "1:98abd61947ff5c7c6fcfec5473d02a4821ed3a2dd99a4fbfdb7925b0dd745546" - name = "github.com/go-openapi/jsonreference" - packages = ["."] - pruneopts = "NT" - revision = "8483a886a90412cd6858df4ea3483dce9c8e35a3" - version = "v0.18.0" - -[[projects]] - branch = "master" - digest = "1:8f80caf2fa31f78a035f33981c9685013033073b53f344f579e60fa69f0c6670" - name = "github.com/go-openapi/spec" - packages = ["."] - pruneopts = "NT" - revision = "53d776530bf78a11b03a7b52dd8a083086b045e5" - -[[projects]] - digest = "1:dc0f590770e5a6c70ea086232324f7b7dc4857c60eca63ab8ff78e0a5cfcdbf3" - name = "github.com/go-openapi/swag" - packages = ["."] - pruneopts = "NT" - revision = "1d29f06aebd59ccdf11ae04aa0334ded96e2d909" - version = "v0.18.0" - -[[projects]] - digest = "1:9059915429f7f3a5f18cfa6b7cab9a28721d7ac6db4079a62044aa229eb7f2a8" - name = "github.com/gobuffalo/envy" - packages = ["."] - pruneopts = "NT" - revision = "fa0dfdc10b5366ce365b7d9d1755a03e4e797bc5" - version = "v1.6.15" - -[[projects]] - digest = "1:0b39706cfa32c1ba9e14435b5844d04aef81b60f44b6077e61e0607d56692603" - name = "github.com/gogo/protobuf" - packages = [ - "proto", - "sortkeys", - ] - pruneopts = "NT" - revision = "ba06b47c162d49f2af050fb4c75bcbc86a159d5c" - version = "v1.2.1" - -[[projects]] - branch = "master" - digest = "1:e2b86e41f3d669fc36b50d31d32d22c8ac656c75aa5ea89717ce7177e134ff2a" - name = "github.com/golang/glog" - packages = ["."] - pruneopts = "NT" - revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998" - -[[projects]] - branch = "master" - digest = "1:20b774dcfdf0fff3148432beb828c52404f3eb3d70b7ce71ae0356ed6cbc2bae" - name = "github.com/golang/groupcache" - packages = ["lru"] - pruneopts = "NT" - revision = "5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b" - -[[projects]] - digest = "1:d7cb4458ea8782e6efacd8f4940796ec559c90833509c436f40c4085b98156dd" - name = "github.com/golang/protobuf" - packages = [ - "proto", - "ptypes", - "ptypes/any", - "ptypes/duration", - "ptypes/timestamp", - ] - pruneopts = "NT" - revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5" - version = "v1.2.0" - -[[projects]] - branch = "master" - digest = "1:05f95ffdfcf651bdb0f05b40b69e7f5663047f8da75c72d58728acb59b5cc107" - name = "github.com/google/btree" - packages = ["."] - pruneopts = "NT" - revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306" - -[[projects]] - branch = "master" - digest = "1:52c5834e2bebac9030c97cc0798ac11c3aa8a39f098aeb419f142533da6cd3cc" - name = "github.com/google/gofuzz" - packages = ["."] - pruneopts = "NT" - revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1" - -[[projects]] - digest = "1:56a1f3949ebb7fa22fa6b4e4ac0fe0f77cc4faee5b57413e6fa9199a8458faf1" - name = "github.com/google/uuid" - packages = ["."] - pruneopts = "NT" - revision = "9b3b1e0f5f99ae461456d768e7d301a7acdaa2d8" - version = "v1.1.0" - -[[projects]] - digest = "1:289332c13b80edfefc88397cce5266c16845dcf204fa2f6ac7e464ee4c7f6e96" - name = "github.com/googleapis/gnostic" - packages = [ - "OpenAPIv2", - "compiler", - "extensions", - ] - pruneopts = "NT" - revision = "7c663266750e7d82587642f65e60bc4083f1f84e" - version = "v0.2.0" - -[[projects]] - digest = "1:c44f4c3b105e9a06f278c0d12982c915d12cd1537d252391904444777a6791df" - name = "github.com/goph/emperror" - packages = ["."] - pruneopts = "NT" - revision = "4cdd86c173cfed1f47be88bd88327140f81bcede" - version = "v0.16.0" - -[[projects]] - branch = "master" - digest = "1:bb7bd892abcb75ef819ce2efab9d54d22b7e38dc05ffac55428bb0578b52912b" - name = "github.com/gregjones/httpcache" - packages = [ - ".", - "diskcache", - ] - pruneopts = "NT" - revision = "3befbb6ad0cc97d4c25d851e9528915809e1a22f" - -[[projects]] - digest = "1:b42cde0e1f3c816dd57f57f7bbcf05ca40263ad96f168714c130c611fc0856a6" - name = "github.com/hashicorp/golang-lru" - packages = [ - ".", - "simplelru", - ] - pruneopts = "NT" - revision = "20f1fb78b0740ba8c3cb143a61e86ba5c8669768" - version = "v0.5.0" - -[[projects]] - digest = "1:dc54242755f5b6721dd880843de6e45fe234838ea9149ec8249951880fd5802f" - name = "github.com/huandu/xstrings" - packages = ["."] - pruneopts = "NT" - revision = "f02667b379e2fb5916c3cda2cf31e0eb885d79f8" - version = "v1.2.0" - -[[projects]] - digest = "1:aaa38889f11896ee3644d77e17dc7764cc47f5f3d3b488268df2af2b52541c5f" - name = "github.com/imdario/mergo" - packages = ["."] - pruneopts = "NT" - revision = "7c29201646fa3de8506f701213473dd407f19646" - version = "v0.3.7" - -[[projects]] - digest = "1:f5b9328966ccea0970b1d15075698eff0ddb3e75889560aad2e9f76b289b536a" - name = "github.com/joho/godotenv" - packages = ["."] - pruneopts = "NT" - revision = "23d116af351c84513e1946b527c88823e476be13" - version = "v1.3.0" - -[[projects]] - digest = "1:1d39c063244ad17c4b18e8da1551163b6ffb52bd1640a49a8ec5c3b7bf4dbd5d" - name = "github.com/json-iterator/go" - packages = ["."] - pruneopts = "NT" - revision = "1624edc4454b8682399def8740d46db5e4362ba4" - version = "v1.1.5" - -[[projects]] - digest = "1:4059c14e87a2de3a434430340521b5feece186c1469eff0834c29a63870de3ed" - name = "github.com/konsorten/go-windows-terminal-sequences" - packages = ["."] - pruneopts = "NT" - revision = "5c8c8bd35d3832f5d134ae1e1e375b69a4d25242" - version = "v1.0.1" - -[[projects]] - branch = "master" - digest = "1:4925ec3736ef6c299cfcf61597782e3d66ec13114f7476019d04c742a7be55d0" - name = "github.com/mailru/easyjson" - packages = [ - "buffer", - "jlexer", - "jwriter", - ] - pruneopts = "NT" - revision = "6243d8e04c3f819e79757e8bc3faa15c3cb27003" - -[[projects]] - digest = "1:56dbf15e091bf7926cb33a57cb6bdfc658fc6d3498d2f76f10a97ce7856f1fde" - name = "github.com/markbates/inflect" - packages = ["."] - pruneopts = "NT" - revision = "24b83195037b3bc61fcda2d28b7b0518bce293b6" - version = "v1.0.4" - -[[projects]] - digest = "1:ea1db000388d88b31db7531c83016bef0d6db0d908a07794bfc36aca16fbf935" - name = "github.com/matttproud/golang_protobuf_extensions" - packages = ["pbutil"] - pruneopts = "NT" - revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" - version = "v1.0.1" - -[[projects]] - digest = "1:2f42fa12d6911c7b7659738758631bec870b7e9b4c6be5444f963cdcfccc191f" - name = "github.com/modern-go/concurrent" - packages = ["."] - pruneopts = "NT" - revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94" - version = "1.0.3" - -[[projects]] - digest = "1:c6aca19413b13dc59c220ad7430329e2ec454cc310bc6d8de2c7e2b93c18a0f6" - name = "github.com/modern-go/reflect2" - packages = ["."] - pruneopts = "NT" - revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" - version = "1.0.1" - -[[projects]] - digest = "1:df8e741cd0f86087367f3bcfeb1cf237e96fada71194b6d4cee9412d221ec763" - name = "github.com/operator-framework/operator-sdk" - packages = [ - "pkg/k8sutil", - "pkg/leader", - "pkg/log/zap", - "pkg/metrics", - "version", - ] - pruneopts = "NT" - revision = "6754b70169f1b62355516947270e33b9f73d8159" - version = "v0.5.0" - -[[projects]] - digest = "1:93b1d84c5fa6d1ea52f4114c37714cddd84d5b78f151b62bb101128dd51399bf" - name = "github.com/pborman/uuid" - packages = ["."] - pruneopts = "NT" - revision = "adf5a7427709b9deb95d29d3fa8a2bf9cfd388f1" - version = "v1.2" - -[[projects]] - branch = "master" - digest = "1:bf2ac97824a7221eb16b096aecc1c390d4c8a4e49524386aaa2e2dd215cbfb31" - name = "github.com/petar/GoLLRB" - packages = ["llrb"] - pruneopts = "NT" - revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4" - -[[projects]] - digest = "1:e4e9e026b8e4c5630205cd0208efb491b40ad40552e57f7a646bb8a46896077b" - name = "github.com/peterbourgon/diskv" - packages = ["."] - pruneopts = "NT" - revision = "5f041e8faa004a95c88a202771f4cc3e991971e6" - version = "v2.0.1" - -[[projects]] - digest = "1:14715f705ff5dfe0ffd6571d7d201dd8e921030f8070321a79380d8ca4ec1a24" - name = "github.com/pkg/errors" - packages = ["."] - pruneopts = "NT" - revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4" - version = "v0.8.1" - -[[projects]] - digest = "1:ec2a29e3bd141038ae5c3d3a4f57db0c341fcc1d98055a607aedd683aed124ee" - name = "github.com/prometheus/client_golang" - packages = [ - "prometheus", - "prometheus/internal", - "prometheus/promhttp", - ] - pruneopts = "NT" - revision = "505eaef017263e299324067d40ca2c48f6a2cf50" - version = "v0.9.2" - -[[projects]] - branch = "master" - digest = "1:c2cc5049e927e2749c0d5163c9f8d924880d83e84befa732b9aad0b6be227bed" - name = "github.com/prometheus/client_model" - packages = ["go"] - pruneopts = "NT" - revision = "fd36f4220a901265f90734c3183c5f0c91daa0b8" - -[[projects]] - digest = "1:30261b5e263b5c4fb40571b53a41a99c96016c6b1b2c45c1cefd226fc3f6304b" - name = "github.com/prometheus/common" - packages = [ - "expfmt", - "internal/bitbucket.org/ww/goautoneg", - "model", - ] - pruneopts = "NT" - revision = "cfeb6f9992ffa54aaa4f2170ade4067ee478b250" - version = "v0.2.0" - -[[projects]] - branch = "master" - digest = "1:1c282f5c094061ce301d1ea3098799fc907ac1399e9f064c463787323a7b7340" - name = "github.com/prometheus/procfs" - packages = [ - ".", - "internal/util", - "iostats", - "nfs", - "xfs", - ] - pruneopts = "NT" - revision = "6ed1f7e1041181781dd2826d3001075d011a80cc" - -[[projects]] - digest = "1:fcef1ce61da6f8f6f115154fb0e0e5b159fe11656839ba1e6061372711c013ee" - name = "github.com/rogpeppe/go-internal" - packages = [ - "modfile", - "module", - "semver", - ] - pruneopts = "NT" - revision = "1cf9852c553c5b7da2d5a4a091129a7822fed0c9" - version = "v1.2.2" - -[[projects]] - digest = "1:1f84287a4ca2c8f729d8155ba4c45915f5854ebbd214e406070779753da68422" - name = "github.com/sirupsen/logrus" - packages = ["."] - pruneopts = "NT" - revision = "e1e72e9de974bd926e5c56f83753fba2df402ce5" - version = "v1.3.0" - -[[projects]] - digest = "1:1bc08ec221c4fb25e6f2c019b23fe989fb44573c696983d8e403a3b76cc378e1" - name = "github.com/spf13/afero" - packages = [ - ".", - "mem", - ] - pruneopts = "NT" - revision = "f4711e4db9e9a1d3887343acb72b2bbfc2f686f5" - version = "v1.2.1" - -[[projects]] - digest = "1:9d8420bbf131d1618bde6530af37c3799340d3762cc47210c1d9532a4c3a2779" - name = "github.com/spf13/pflag" - packages = ["."] - pruneopts = "NT" - revision = "298182f68c66c05229eb03ac171abe6e309ee79a" - version = "v1.0.3" - -[[projects]] - digest = "1:22f696cee54865fb8e9ff91df7b633f6b8f22037a8015253c6b6a71ca82219c7" - name = "go.uber.org/atomic" - packages = ["."] - pruneopts = "NT" - revision = "1ea20fb1cbb1cc08cbd0d913a96dead89aa18289" - version = "v1.3.2" - -[[projects]] - digest = "1:58ca93bdf81bac106ded02226b5395a0595d5346cdc4caa8d9c1f3a5f8f9976e" - name = "go.uber.org/multierr" - packages = ["."] - pruneopts = "NT" - revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a" - version = "v1.1.0" - -[[projects]] - digest = "1:572fa4496563920f3e3107a2294cf2621d6cc4ffd03403fb6397b1bab9fa082a" - name = "go.uber.org/zap" - packages = [ - ".", - "buffer", - "internal/bufferpool", - "internal/color", - "internal/exit", - "zapcore", - ] - pruneopts = "NT" - revision = "ff33455a0e382e8a81d14dd7c922020b6b5e7982" - version = "v1.9.1" - -[[projects]] - branch = "master" - digest = "1:b19fb19351db5de242e3f1203e63c207c69bf4f4df4822b4ef15220e0204e0e4" - name = "golang.org/x/crypto" - packages = [ - "pbkdf2", - "scrypt", - "ssh/terminal", - ] - pruneopts = "NT" - revision = "7f87c0fbb88b590338857bcb720678c2583d4dea" - -[[projects]] - branch = "master" - digest = "1:60c1f5371132225f21f849a13e379d55c4512ac9ed4b37e7fa33ea0fedeb8480" - name = "golang.org/x/net" - packages = [ - "context", - "context/ctxhttp", - "http/httpguts", - "http2", - "http2/hpack", - "idna", - ] - pruneopts = "NT" - revision = "fe579d43d83210096a79b46dcca0e3721058393a" - -[[projects]] - branch = "master" - digest = "1:22a51305a9f13b8c8ca91c335a0da16a1a7b537155e677e45d7905465e457e87" - name = "golang.org/x/oauth2" - packages = [ - ".", - "google", - "internal", - "jws", - "jwt", - ] - pruneopts = "NT" - revision = "529b322ea34655aa15fb32e063f3d4d3cf803cac" - -[[projects]] - branch = "master" - digest = "1:90abfd79711e2d0ce66e6d23a1b652f8e16c76e12a2ef4b255d1bf0ff4f254b8" - name = "golang.org/x/sys" - packages = [ - "unix", - "windows", - ] - pruneopts = "NT" - revision = "cc5685c2db1239775905f3911f0067c0fa74762f" - -[[projects]] - digest = "1:8c74f97396ed63cc2ef04ebb5fc37bb032871b8fd890a25991ed40974b00cd2a" - name = "golang.org/x/text" - packages = [ - "collate", - "collate/build", - "internal/colltab", - "internal/gen", - "internal/tag", - "internal/triegen", - "internal/ucd", - "language", - "secure/bidirule", - "transform", - "unicode/bidi", - "unicode/cldr", - "unicode/norm", - "unicode/rangetable", - "width", - ] - pruneopts = "NT" - revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" - version = "v0.3.0" - -[[projects]] - branch = "master" - digest = "1:9fdc2b55e8e0fafe4b41884091e51e77344f7dc511c5acedcfd98200003bff90" - name = "golang.org/x/time" - packages = ["rate"] - pruneopts = "NT" - revision = "85acf8d2951cb2a3bde7632f9ff273ef0379bcbd" - -[[projects]] - branch = "master" - digest = "1:dfddac8ab4fec08ac3679d4f64f6054a6be3c849faf6ea05e525e40f7aeeb133" - name = "golang.org/x/tools" - packages = [ - "go/ast/astutil", - "go/gcexportdata", - "go/internal/cgo", - "go/internal/gcimporter", - "go/internal/packagesdriver", - "go/packages", - "go/types/typeutil", - "imports", - "internal/fastwalk", - "internal/gopathwalk", - "internal/module", - "internal/semver", - ] - pruneopts = "NT" - revision = "2dc4ef2775b8122dd5afe2c18fd6f775e87f89e5" - -[[projects]] - digest = "1:902ffa11f1d8c19c12b05cabffe69e1a16608ad03a8899ebcb9c6bde295660ae" - name = "google.golang.org/appengine" - packages = [ - ".", - "internal", - "internal/app_identity", - "internal/base", - "internal/datastore", - "internal/log", - "internal/modules", - "internal/remote_api", - "internal/urlfetch", - "urlfetch", - ] - pruneopts = "NT" - revision = "e9657d882bb81064595ca3b56cbe2546bbabf7b1" - version = "v1.4.0" - -[[projects]] - digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a" - name = "gopkg.in/inf.v0" - packages = ["."] - pruneopts = "NT" - revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf" - version = "v0.9.1" - -[[projects]] - digest = "1:18108594151654e9e696b27b181b953f9a90b16bf14d253dd1b397b025a1487f" - name = "gopkg.in/yaml.v2" - packages = ["."] - pruneopts = "NT" - revision = "51d6538a90f86fe93ac480b35f37b2be17fef232" - version = "v2.2.2" - -[[projects]] - digest = "1:6fa82ea248029bbbdddade20c06ab177ff6e485e5e45e48b045707415b7efd34" - name = "k8s.io/api" - packages = [ - "admission/v1beta1", - "admissionregistration/v1alpha1", - "admissionregistration/v1beta1", - "apps/v1", - "apps/v1beta1", - "apps/v1beta2", - "auditregistration/v1alpha1", - "authentication/v1", - "authentication/v1beta1", - "authorization/v1", - "authorization/v1beta1", - "autoscaling/v1", - "autoscaling/v2beta1", - "autoscaling/v2beta2", - "batch/v1", - "batch/v1beta1", - "batch/v2alpha1", - "certificates/v1beta1", - "coordination/v1beta1", - "core/v1", - "events/v1beta1", - "extensions/v1beta1", - "networking/v1", - "policy/v1beta1", - "rbac/v1", - "rbac/v1alpha1", - "rbac/v1beta1", - "scheduling/v1alpha1", - "scheduling/v1beta1", - "settings/v1alpha1", - "storage/v1", - "storage/v1alpha1", - "storage/v1beta1", - ] - pruneopts = "NT" - revision = "05914d821849570fba9eacfb29466f2d8d3cd229" - -[[projects]] - digest = "1:c6f23048e162e65d586c809fd02e263e180ad157f110df17437c22517bb59a4b" - name = "k8s.io/apiextensions-apiserver" - packages = [ - "pkg/apis/apiextensions", - "pkg/apis/apiextensions/v1beta1", - ] - pruneopts = "NT" - revision = "0fe22c71c47604641d9aa352c785b7912c200562" - -[[projects]] - digest = "1:15b5c41ff6faa4d0400557d4112d6337e1abc961c65513d44fce7922e32c9ca7" - name = "k8s.io/apimachinery" - packages = [ - "pkg/api/errors", - "pkg/api/meta", - "pkg/api/resource", - "pkg/apis/meta/internalversion", - "pkg/apis/meta/v1", - "pkg/apis/meta/v1/unstructured", - "pkg/apis/meta/v1beta1", - "pkg/conversion", - "pkg/conversion/queryparams", - "pkg/fields", - "pkg/labels", - "pkg/runtime", - "pkg/runtime/schema", - "pkg/runtime/serializer", - "pkg/runtime/serializer/json", - "pkg/runtime/serializer/protobuf", - "pkg/runtime/serializer/recognizer", - "pkg/runtime/serializer/streaming", - "pkg/runtime/serializer/versioning", - "pkg/selection", - "pkg/types", - "pkg/util/cache", - "pkg/util/clock", - "pkg/util/diff", - "pkg/util/errors", - "pkg/util/framer", - "pkg/util/intstr", - "pkg/util/json", - "pkg/util/mergepatch", - "pkg/util/naming", - "pkg/util/net", - "pkg/util/runtime", - "pkg/util/sets", - "pkg/util/strategicpatch", - "pkg/util/uuid", - "pkg/util/validation", - "pkg/util/validation/field", - "pkg/util/wait", - "pkg/util/yaml", - "pkg/version", - "pkg/watch", - "third_party/forked/golang/json", - "third_party/forked/golang/reflect", - ] - pruneopts = "NT" - revision = "2b1284ed4c93a43499e781493253e2ac5959c4fd" - -[[projects]] - digest = "1:c904a3d70131b33df36e4e51b574226b82308fc1ea66964aa21095a95d453fc9" - name = "k8s.io/client-go" - packages = [ - "discovery", - "dynamic", - "kubernetes", - "kubernetes/scheme", - "kubernetes/typed/admissionregistration/v1alpha1", - "kubernetes/typed/admissionregistration/v1beta1", - "kubernetes/typed/apps/v1", - "kubernetes/typed/apps/v1beta1", - "kubernetes/typed/apps/v1beta2", - "kubernetes/typed/auditregistration/v1alpha1", - "kubernetes/typed/authentication/v1", - "kubernetes/typed/authentication/v1beta1", - "kubernetes/typed/authorization/v1", - "kubernetes/typed/authorization/v1beta1", - "kubernetes/typed/autoscaling/v1", - "kubernetes/typed/autoscaling/v2beta1", - "kubernetes/typed/autoscaling/v2beta2", - "kubernetes/typed/batch/v1", - "kubernetes/typed/batch/v1beta1", - "kubernetes/typed/batch/v2alpha1", - "kubernetes/typed/certificates/v1beta1", - "kubernetes/typed/coordination/v1beta1", - "kubernetes/typed/core/v1", - "kubernetes/typed/events/v1beta1", - "kubernetes/typed/extensions/v1beta1", - "kubernetes/typed/networking/v1", - "kubernetes/typed/policy/v1beta1", - "kubernetes/typed/rbac/v1", - "kubernetes/typed/rbac/v1alpha1", - "kubernetes/typed/rbac/v1beta1", - "kubernetes/typed/scheduling/v1alpha1", - "kubernetes/typed/scheduling/v1beta1", - "kubernetes/typed/settings/v1alpha1", - "kubernetes/typed/storage/v1", - "kubernetes/typed/storage/v1alpha1", - "kubernetes/typed/storage/v1beta1", - "pkg/apis/clientauthentication", - "pkg/apis/clientauthentication/v1alpha1", - "pkg/apis/clientauthentication/v1beta1", - "pkg/version", - "plugin/pkg/client/auth/exec", - "plugin/pkg/client/auth/gcp", - "rest", - "rest/watch", - "restmapper", - "third_party/forked/golang/template", - "tools/auth", - "tools/cache", - "tools/clientcmd", - "tools/clientcmd/api", - "tools/clientcmd/api/latest", - "tools/clientcmd/api/v1", - "tools/leaderelection", - "tools/leaderelection/resourcelock", - "tools/metrics", - "tools/pager", - "tools/record", - "tools/reference", - "transport", - "util/buffer", - "util/cert", - "util/connrotation", - "util/flowcontrol", - "util/homedir", - "util/integer", - "util/jsonpath", - "util/retry", - "util/workqueue", - ] - pruneopts = "NT" - revision = "8d9ed539ba3134352c586810e749e58df4e94e4f" - -[[projects]] - digest = "1:dc1ae99dcab96913d81ae970b1f7a7411a54199b14bfb17a7e86f9a56979c720" - name = "k8s.io/code-generator" - packages = [ - "cmd/client-gen", - "cmd/client-gen/args", - "cmd/client-gen/generators", - "cmd/client-gen/generators/fake", - "cmd/client-gen/generators/scheme", - "cmd/client-gen/generators/util", - "cmd/client-gen/path", - "cmd/client-gen/types", - "cmd/conversion-gen", - "cmd/conversion-gen/args", - "cmd/conversion-gen/generators", - "cmd/deepcopy-gen", - "cmd/deepcopy-gen/args", - "cmd/defaulter-gen", - "cmd/defaulter-gen/args", - "cmd/informer-gen", - "cmd/informer-gen/args", - "cmd/informer-gen/generators", - "cmd/lister-gen", - "cmd/lister-gen/args", - "cmd/lister-gen/generators", - "pkg/util", - ] - pruneopts = "T" - revision = "c2090bec4d9b1fb25de3812f868accc2bc9ecbae" - -[[projects]] - branch = "master" - digest = "1:2b9071c93303f1196cfe959c7f7f69ed1e4a5180f240a259536c5886f79f86d4" - name = "k8s.io/gengo" - packages = [ - "args", - "examples/deepcopy-gen/generators", - "examples/defaulter-gen/generators", - "examples/set-gen/sets", - "generator", - "namer", - "parser", - "types", - ] - pruneopts = "T" - revision = "0689ccc1d7d65d9dd1bedcc3b0b1ed7df91ba266" - -[[projects]] - digest = "1:29f93bb84d907a2c035e729e19d66fe52165d8c905cb3ef1920140d76ae6afaf" - name = "k8s.io/klog" - packages = ["."] - pruneopts = "NT" - revision = "71442cd4037d612096940ceb0f3fec3f7fff66e0" - version = "v0.2.0" - -[[projects]] - digest = "1:c48a795cd7048bb1888273bc604b6e69b22f9b8089c3df65f77cc527757b515c" - name = "k8s.io/kube-openapi" - packages = [ - "cmd/openapi-gen", - "cmd/openapi-gen/args", - "pkg/common", - "pkg/generators", - "pkg/generators/rules", - "pkg/util/proto", - "pkg/util/sets", - ] - pruneopts = "NT" - revision = "0cf8f7e6ed1d2e3d47d02e3b6e559369af24d803" - -[[projects]] - digest = "1:06035489efbd51ccface65fc878ceeb849aba05b2f9443c8993f363fc96e80ac" - name = "sigs.k8s.io/controller-runtime" - packages = [ - "pkg/cache", - "pkg/cache/internal", - "pkg/client", - "pkg/client/apiutil", - "pkg/client/config", - "pkg/controller", - "pkg/event", - "pkg/handler", - "pkg/internal/controller", - "pkg/internal/controller/metrics", - "pkg/internal/recorder", - "pkg/leaderelection", - "pkg/manager", - "pkg/metrics", - "pkg/patch", - "pkg/predicate", - "pkg/reconcile", - "pkg/recorder", - "pkg/runtime/inject", - "pkg/runtime/log", - "pkg/runtime/scheme", - "pkg/runtime/signals", - "pkg/source", - "pkg/source/internal", - "pkg/webhook/admission", - "pkg/webhook/admission/types", - "pkg/webhook/internal/metrics", - "pkg/webhook/types", - ] - pruneopts = "NT" - revision = "12d98582e72927b6cd0123e2b4e819f9341ce62c" - version = "v0.1.10" - -[[projects]] - digest = "1:0a14ea9a2647d064bb9d48b2de78306e74b196681efd7b654eb0b518d90c2e8d" - name = "sigs.k8s.io/controller-tools" - packages = [ - "pkg/crd/generator", - "pkg/crd/util", - "pkg/internal/codegen", - "pkg/internal/codegen/parse", - "pkg/internal/general", - "pkg/util", - ] - pruneopts = "NT" - revision = "950a0e88e4effb864253b3c7504b326cc83b9d11" - version = "v0.1.8" - -[[projects]] - digest = "1:8730e0150dfb2b7e173890c8b9868e7a273082ef8e39f4940e3506a481cf895c" - name = "sigs.k8s.io/yaml" - packages = ["."] - pruneopts = "NT" - revision = "fd68e9863619f6ec2fdd8625fe1f02e7c877e480" - version = "v1.1.0" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - input-imports = [ - "github.com/Masterminds/sprig", - "github.com/go-logr/logr", - "github.com/go-openapi/spec", - "github.com/goph/emperror", - "github.com/operator-framework/operator-sdk/pkg/k8sutil", - "github.com/operator-framework/operator-sdk/pkg/leader", - "github.com/operator-framework/operator-sdk/pkg/log/zap", - "github.com/operator-framework/operator-sdk/pkg/metrics", - "github.com/operator-framework/operator-sdk/version", - "github.com/sirupsen/logrus", - "github.com/spf13/pflag", - "k8s.io/api/apps/v1", - "k8s.io/api/core/v1", - "k8s.io/api/rbac/v1", - "k8s.io/apimachinery/pkg/api/errors", - "k8s.io/apimachinery/pkg/apis/meta/v1", - "k8s.io/apimachinery/pkg/runtime", - "k8s.io/apimachinery/pkg/runtime/schema", - "k8s.io/apimachinery/pkg/types", - "k8s.io/apimachinery/pkg/util/intstr", - "k8s.io/client-go/plugin/pkg/client/auth/gcp", - "k8s.io/code-generator/cmd/client-gen", - "k8s.io/code-generator/cmd/conversion-gen", - "k8s.io/code-generator/cmd/deepcopy-gen", - "k8s.io/code-generator/cmd/defaulter-gen", - "k8s.io/code-generator/cmd/informer-gen", - "k8s.io/code-generator/cmd/lister-gen", - "k8s.io/gengo/args", - "k8s.io/kube-openapi/cmd/openapi-gen", - "k8s.io/kube-openapi/pkg/common", - "sigs.k8s.io/controller-runtime/pkg/client", - "sigs.k8s.io/controller-runtime/pkg/client/config", - "sigs.k8s.io/controller-runtime/pkg/controller", - "sigs.k8s.io/controller-runtime/pkg/handler", - "sigs.k8s.io/controller-runtime/pkg/manager", - "sigs.k8s.io/controller-runtime/pkg/reconcile", - "sigs.k8s.io/controller-runtime/pkg/runtime/log", - "sigs.k8s.io/controller-runtime/pkg/runtime/scheme", - "sigs.k8s.io/controller-runtime/pkg/runtime/signals", - "sigs.k8s.io/controller-runtime/pkg/source", - "sigs.k8s.io/controller-tools/pkg/crd/generator", - ] - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml deleted file mode 100644 index b4c4b8cda..000000000 --- a/Gopkg.toml +++ /dev/null @@ -1,79 +0,0 @@ -# Force dep to vendor the code generators, which aren't imported just used at dev time. -required = [ - "k8s.io/code-generator/cmd/defaulter-gen", - "k8s.io/code-generator/cmd/deepcopy-gen", - "k8s.io/code-generator/cmd/conversion-gen", - "k8s.io/code-generator/cmd/client-gen", - "k8s.io/code-generator/cmd/lister-gen", - "k8s.io/code-generator/cmd/informer-gen", - "k8s.io/kube-openapi/cmd/openapi-gen", - "k8s.io/gengo/args", - "sigs.k8s.io/controller-tools/pkg/crd/generator", -] - -[[override]] - name = "k8s.io/code-generator" - # revision for tag "kubernetes-1.13.1" - revision = "c2090bec4d9b1fb25de3812f868accc2bc9ecbae" - -[[override]] - name = "k8s.io/kube-openapi" - revision = "0cf8f7e6ed1d2e3d47d02e3b6e559369af24d803" - -[[override]] - name = "github.com/go-openapi/spec" - branch = "master" - -[[override]] - name = "sigs.k8s.io/controller-tools" - version = "=v0.1.8" - -[[override]] - name = "k8s.io/api" - # revision for tag "kubernetes-1.13.1" - revision = "05914d821849570fba9eacfb29466f2d8d3cd229" - -[[override]] - name = "k8s.io/apiextensions-apiserver" - # revision for tag "kubernetes-1.13.1" - revision = "0fe22c71c47604641d9aa352c785b7912c200562" - -[[override]] - name = "k8s.io/apimachinery" - # revision for tag "kubernetes-1.13.1" - revision = "2b1284ed4c93a43499e781493253e2ac5959c4fd" - -[[override]] - name = "k8s.io/client-go" - # revision for tag "kubernetes-1.13.1" - revision = "8d9ed539ba3134352c586810e749e58df4e94e4f" - -[[override]] - name = "github.com/coreos/prometheus-operator" - version = "=v0.26.0" - -[[override]] - name = "sigs.k8s.io/controller-runtime" - version = "=v0.1.10" - -[[constraint]] - name = "github.com/operator-framework/operator-sdk" - # The version rule is used for a specific release and the master branch for in between releases. - # branch = "master" #osdk_branch_annotation - version = "=v0.5.0" #osdk_version_annotation - -[prune] - go-tests = true - non-go = true - - [[prune.project]] - name = "k8s.io/code-generator" - non-go = false - - [[prune.project]] - name = "k8s.io/gengo" - non-go = false - -[[constraint]] - name = "github.com/Masterminds/sprig" - version = "2.18.0" diff --git a/LICENCE b/LICENSE similarity index 99% rename from LICENCE rename to LICENSE index 261eeb9e9..f49a4e16e 100644 --- a/LICENCE +++ b/LICENSE @@ -198,4 +198,4 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and - limitations under the License. + limitations under the License. \ No newline at end of file diff --git a/Makefile b/Makefile index 344a6f983..3f6e8c96e 100644 --- a/Makefile +++ b/Makefile @@ -1,66 +1,90 @@ -VERSION := $(shell git describe --abbrev=0 --tags) -DOCKER_IMAGE = banzaicloud/logging-operator -DOCKER_TAG ?= ${VERSION} -GOFILES_NOVENDOR = $(shell find . -type f -name '*.go' -not -path "./vendor/*" -not -path "./client/*") -PKGS=$(shell go list ./... | grep -v /vendor) +# this makefile was generated by +include Makefile.app -DEP_VERSION = 0.5.0 +# Image URL to use all building/pushing image targets +IMG ?= controller:latest +# Produce CRDs that work back to Kubernetes 1.11 (no version conversion) +CRD_OPTIONS ?= "crd:trivialVersions=true" -bin/dep: bin/dep-${DEP_VERSION} - @ln -sf dep-${DEP_VERSION} bin/dep +KUBEBUILDER_VERSION = 2.0.0 -bin/dep-${DEP_VERSION}: - @mkdir -p bin - curl https://raw.githubusercontent.com/golang/dep/master/install.sh | INSTALL_DIRECTORY=bin DEP_RELEASE_TAG=v${DEP_VERSION} sh - @mv bin/dep $@ - -.PHONY: vendor -vendor: bin/dep ## Install dependencies - bin/dep ensure -v -vendor-only - -build: vendor - go build -v $(PKGS) +export PATH := $(PWD)/bin:$(PATH) -check-fmt: - PKGS="${GOFILES_NOVENDOR}" GOFMT="gofmt" ./scripts/fmt-check.sh +all: manager -fmt: - gofmt -w ${GOFILES_NOVENDOR} +# Generate docs +.PHONY: docs +docs: + go run cmd/docs.go -lint: install-golint - golint -min_confidence 0.9 -set_exit_status $(PKGS) +# Run tests +test: generate fmt vet manifests bin/kubebuilder + @which kubebuilder + kubebuilder version + go test ./api/... ./controllers/... ./pkg/... -coverprofile cover.out -install-golint: - GOLINT_CMD=$(shell command -v golint 2> /dev/null) -ifndef GOLINT_CMD - go get golang.org/x/lint/golint -endif +# Build manager binary +manager: generate fmt vet + go build -o bin/manager main.go -check-misspell: install-misspell - PKGS="${GOFILES_NOVENDOR}" MISSPELL="misspell" ./scripts/misspell-check.sh +# Run against the configured Kubernetes cluster in ~/.kube/config +run: generate fmt vet + go run ./main.go --verbose -misspell: install-misspell - misspell -w ${GOFILES_NOVENDOR} +# Install CRDs into a cluster +install: manifests + kubectl apply -f config/crd/bases -install-misspell: - MISSPELL_CMD=$(shell command -v misspell 2> /dev/null) -ifndef MISSPELL_CMD - go get -u github.com/client9/misspell/cmd/misspell -endif +# Deploy controller in the configured Kubernetes cluster in ~/.kube/config +deploy: manifests + kubectl apply -f config/crd/bases + kustomize build config/default | kubectl apply -f - -ineffassign: install-ineffassign - ineffassign ${GOFILES_NOVENDOR} +# Generate manifests e.g. CRD, RBAC etc. +manifests: controller-gen + $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases -install-ineffassign: - INEFFASSIGN_CMD=$(shell command -v ineffassign 2> /dev/null) -ifndef INEFFASSIGN_CMD - go get -u github.com/gordonklaus/ineffassign +# Run go fmt against code +fmt: + go fmt ./... + +# Run go vet against code +vet: + go vet ./... + +# Generate code +generate: controller-gen + $(CONTROLLER_GEN) object:headerFile=./hack/boilerplate.go.txt paths=./api/... + $(CONTROLLER_GEN) object:headerFile=./hack/boilerplate.go.txt paths=./pkg/model/... + +# Build the docker image +docker-build: test + docker build . -t ${IMG} + @echo "updating kustomize image patch file for manager resource" + sed -i'' -e 's@image: .*@image: '"${IMG}"'@' ./config/default/manager_image_patch.yaml + +# Push the docker image +docker-push: + docker push ${IMG} + +# find or download controller-gen +# download controller-gen if necessary +controller-gen: +ifeq (, $(shell which controller-gen)) + go get sigs.k8s.io/controller-tools/cmd/controller-gen@v0.2.1 +CONTROLLER_GEN=$(shell go env GOPATH)/bin/controller-gen +else +CONTROLLER_GEN=$(shell which controller-gen) endif -.PHONY: docker -docker: ## Build Docker image - docker build -t ${DOCKER_IMAGE}:${DOCKER_TAG} -f Dockerfile . +bin/kubebuilder: bin/kubebuilder_${KUBEBUILDER_VERSION} + @ln -sf kubebuilder_${KUBEBUILDER_VERSION}/kubebuilder bin/kubebuilder + @ln -sf kubebuilder_${KUBEBUILDER_VERSION}/kube-apiserver bin/kube-apiserver + @ln -sf kubebuilder_${KUBEBUILDER_VERSION}/etcd bin/etcd + @ln -sf kubebuilder_${KUBEBUILDER_VERSION}/kubectl bin/kubectl -.PHONY: docs -docs: - go run cmd/docgen/docgen.go \ No newline at end of file +bin/kubebuilder_${KUBEBUILDER_VERSION}: + @mkdir -p bin + curl -L https://github.com/kubernetes-sigs/kubebuilder/releases/download/v${KUBEBUILDER_VERSION}/kubebuilder_${KUBEBUILDER_VERSION}_darwin_amd64.tar.gz | tar xvz - -C bin + @ln -sf kubebuilder_${KUBEBUILDER_VERSION}_darwin_amd64/bin bin/kubebuilder_${KUBEBUILDER_VERSION} +#todo implement for linux (CI) diff --git a/Makefile.app b/Makefile.app new file mode 100644 index 000000000..8e59b88a8 --- /dev/null +++ b/Makefile.app @@ -0,0 +1,6 @@ +.PHONY: check +check: test license + +.PHONY: license +license: + ./scripts/check-header.sh \ No newline at end of file diff --git a/PROJECT b/PROJECT new file mode 100644 index 000000000..1ca5d55ef --- /dev/null +++ b/PROJECT @@ -0,0 +1,25 @@ +version: "2" +domain: banzaicloud.com +repo: github.com/banzaicloud/logging-operator +resources: +- group: logging + version: v1alpha2 + kind: Fluentbit +- group: logging + version: v1alpha2 + kind: Fluentd +- group: logging + version: v1alpha2 + kind: Flow +- group: logging + version: v1alpha2 + kind: ClusterFlow +- group: logging + version: v1alpha2 + kind: Output +- group: logging + version: v1alpha2 + kind: ClusterOutput +- group: logging + version: v1alpha2 + kind: Logging diff --git a/README.md b/README.md index 26ebd0fb7..3021ca743 100644 --- a/README.md +++ b/README.md @@ -24,22 +24,25 @@

-# logging-operator +# logging-operator v2 -Logging operator for Kubernetes based on Fluentd and Fluent-bit. For more details please follow up with this [post](https://banzaicloud.com/blog/k8s-logging-operator/). +Logging operator for Kubernetes based on Fluentd and Fluent-bit. ## What is this operator for? This operator helps you to pack together logging information with your applications. With the help of Custom Resource Definition you can describe the behaviour of your application within its charts. The operator does the rest. -

+

### Motivation -The logging operator automates the deployment and configuration of a Kubernetes logging pipeline. Under the hood the operator configures a fluent-bit daemonset for collecting container logs from the node file system. Fluent-bit enriches the logs with Kubernetes metadata and transfers them to fluentd. Fluentd receives, filters and transfer logs to multiple outputs. The whole flow can be defined in a single custom resource. Your logs will always be transferred on authenticated and encrypted channels. +The logging operator automates the deployment and configuration of a Kubernetes logging pipeline. Under the hood the operator configures a fluent-bit daemonset for collecting container logs from the node file system. Fluent-bit enriches the logs with Kubernetes metadata and transfers them to fluentd. Fluentd receives, filters and transfer logs to multiple outputs. Your logs will always be transferred on authenticated and encrypted channels. ##### Blogs + - [Logging-Operator v2](https://banzaicloud.com/blog/logging-operator-v2/) (soon) + +##### Blogs (general logging and operator v1) - [Advanced logging on Kubernetes](https://banzaicloud.com/blog/k8s-logging-advanced/) - [Secure logging on Kubernetes with Fluentd and Fluent Bit](https://banzaicloud.com/blog/k8s-logging-tls/) - [Centralized logging under Kubernetes](https://banzaicloud.com/blog/k8s-logging/) @@ -47,7 +50,6 @@ The logging operator automates the deployment and configuration of a Kubernetes - [And more...](https://banzaicloud.com/tags/logging/) - Logging-operator is a core part of the [Pipeline](https://beta.banzaicloud.io) platform, a Cloud Native application and devops platform that natively supports multi- and hybrid-cloud deployments with multiple authentication backends. Check out the developer beta:

@@ -55,16 +57,30 @@ Logging-operator is a core part of the [Pipeline](https://beta.banzaicloud.io) p

+## Architecture + +Available custom resources: +- [logging](/docs/crds.md#loggings) - Represents a logging system. Includes `Fluentd` and `Fluent-bit` configuration. Specifies the `controlNamespace`. Fluentd and Fluent-bit will be deployed in the `controlNamespace` +- [output](/docs/crds.md#outputs-clusteroutputs) - Defines an Output for a logging flow. This is a namespaced resource. +- [flow](/docs/crds.md#flows-clusterflows) - Defines a logging flow with `filters` and `outputs`. You can specify `selectors` to filter logs by labels. Outputs can be `output` or `clusteroutput`. This is a namespaced resource. +- [clusteroutput](/docs/crds.md#outputs-clusteroutputs) - Defines an output without namespace restriction. Only effective in `controlNamespace`. +- [clusterflow](/docs/crds.md#flows-clusterflows) - Defines a logging flow without namespace restriction. + +The detailed CRD documentation can be found [here](/docs/crds.md). + +

+ +*connection between custom resources* + --- ## Contents - Installation - [Deploy with Helm](#deploying-with-helm-chart) - - [Deploy with Manifest](#deploying-with-kubernetes-manifest) - [Supported Plugins](#supported-plugins) - Examples - - [S3 Output](./docs/examples/s3.md) - - [Elasticsearch Output](./docs/examples/es.md) + - [S3 Output](./docs/example-s3.md) + - [Elasticsearch Output](./docs/example-es.md) - [Troubleshooting](#troubleshooting) - [Contributing](#contributing) --- @@ -85,57 +101,29 @@ $ helm repo update $ helm install banzaicloud-stable/logging-operator ``` -#### Install FluentD, FluentBit CRs from chart -```bash -$ helm install banzaicloud-stable/logging-operator-fluent -``` -

- --- -## Deploying with Kubernetes Manifest - -``` -# Create all the CRDs used by the Operator -kubectl create -f deploy/crds/logging_v1alpha1_plugin_crd.yaml -kubectl create -f deploy/crds/logging_v1alpha1_fluentbit_crd.yaml -kubectl create -f deploy/crds/logging_v1alpha1_fluentd_crd.yaml - -# If RBAC enabled create the required resources -kubectl create -f deploy/clusterrole.yaml -kubectl create -f deploy/clusterrole_binding.yaml -kubectl create -f deploy/service_account.yaml - -# Create the Operator -kubectl create -f deploy/operator.yaml - -# Create the fluent-bit daemonset by submiting a fluent-bit CR -kubectl create -f deploy/crds/logging_v1alpha1_fluentbit_cr.yaml +## Supported Plugins -# Create the fluentd deployment by submitting a fluentd CR -kubectl create -f deploy/crds/logging_v1alpha1_fluentd_cr.yaml +For complete list of supported plugins pleas checkl the [plugins index](/docs/plugins/index.md). -``` +| Name | Type | Description | Status | Version | +|---------------------------------------------------------|:------:|:-------------------------------------------------------------------------:|---------|-------------------------------------------------------------------------------------------| +| [Alibaba](./docs/plugins/outputs/oss.md) | Output | Store logs the Alibaba Cloud Object Storage Service | GA | [0.0.1](https://github.com/aliyun/fluent-plugin-oss) | +| [Amazon S3](./docs/plugins/outputs/s3.md) | Output | Store logs in Amazon S3 | GA | [1.1.10](https://github.com/fluent/fluent-plugin-s3/releases/tag/v1.1.10) | +| [Azure](./docs/plugins/outputs/azurestore.md) | Output | Store logs in Azure Storega | GA | [0.1.1](https://github.com/htgc/fluent-plugin-azurestorage/releases/tag/v0.1.0) | +| [Google Storage](./docs/plugins/outputs/gcs.md) | Output | Store logs in Google Cloud Storage | GA | [0.4.0.beta1](https://github.com/banzaicloud/fluent-plugin-gcs) | +| [Grafana Loki](./docs/plugins/outputs/loki.md) | Output | Transfer logs to Loki | Testing | [0.2](https://github.com/banzaicloud/fluent-plugin-kubernetes-loki/releases/tag/v0.2) | +| [ElasticSearch](./docs/plugins/outputs/elasticsearch.md) | Output | Send your logs to Elasticsearch | GA | [3.5.2](https://github.com/uken/fluent-plugin-elasticsearch/releases/tag/v3.5.2) | +| [Tag Normaliser](./docs/plugins/filters/tagnormaliser.md) | Parser | Normalise tags for outputs | GA | | +| [Parser](./docs/plugins/filters/parser.md) | Parser | Parse logs with parser plugin | GA | | -## Supported Plugins -| Name | Type | Description | Status | Version | -|-------------------------------------------------|:------:|:-------------------------------------------------------------------------:|---------|------------------------------------------------------------------------------------------| -| [Alibaba](./docs/plugins/alibaba.md) | Output | Store logs the Alibaba Cloud Object Storage Service | GA | [0.0.2](https://github.com/jicong/fluent-plugin-oss) | -| [Amazon S3](./docs/plugins/s3.md) | Output | Store logs in Amazon S3 | GA | [1.1.10](https://github.com/fluent/fluent-plugin-s3/releases/tag/v1.1.10) | -| [Azure](./docs/plugins/azure.md) | Output | Store logs in Azure Storega | GA | [0.1.1](https://github.com/htgc/fluent-plugin-azurestorage/releases/tag/v0.1.0) | -| [Google Storage](./docs/plugins/gcs.md) | Output | Store logs in Google Cloud Storage | GA | [0.4.0.beta1](https://github.com/banzaicloud/fluent-plugin-gcs) | -| [Grafana Loki](./docs/plugins/loki.md) | Output | Transfer logs to Loki | Testing | [0.2](https://github.com/banzaicloud/fluent-plugin-kubernetes-loki/releases/tag/v0.2) | -| [ElasticSearch](./docs/plugins/parser.md) | Output | Send your logs to Elasticsearch | GA | [3.5.2](https://github.com/uken/fluent-plugin-elasticsearch/releases/tag/v3.5.2) | -| [HDFS](https://docs.fluentd.org/output/webhdfs) | Output | Fluentd output plugin to write data into Hadoop HDFS over WebHDFS/HttpFs. | GA | [1.2.3](https://github.com/fluent/fluent-plugin-webhdfs/releases/tag/v1.2.3) | -| [Parser](./docs/plugins/parser.md) | Parser | Parse logs with parser plugin | GA | | --- ## Troubleshooting If you encounter any problems that the documentation does not address, please [file an issue](https://github.com/banzaicloud/logging-operator/issues) or talk to us on the Banzai Cloud Slack channel [#logging-operator](https://slack.banzaicloud.io/). - - ## Contributing If you find this project useful here's how you can help: @@ -144,6 +132,8 @@ If you find this project useful here's how you can help: - Help new users with issues they may encounter - Support the development of this project and star this repo! +For more information please read the [developer documentation](./docs/developers.md) + ## License Copyright (c) 2017-2019 [Banzai Cloud, Inc.](https://banzaicloud.com) @@ -158,4 +148,4 @@ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and -limitations under the License. +limitations under the License. \ No newline at end of file diff --git a/api/v1alpha2/clusterflow_types.go b/api/v1alpha2/clusterflow_types.go new file mode 100644 index 000000000..bee039977 --- /dev/null +++ b/api/v1alpha2/clusterflow_types.go @@ -0,0 +1,44 @@ +// Copyright © 2019 Banzai Cloud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +kubebuilder:object:root=true + +// ClusterFlow is the Schema for the clusterflows API +type ClusterFlow struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Name of the logging cluster to be attached + Spec FlowSpec `json:"spec,omitempty"` + Status FlowStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ClusterFlowList contains a list of ClusterFlow +type ClusterFlowList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ClusterFlow `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ClusterFlow{}, &ClusterFlowList{}) +} diff --git a/api/v1alpha2/clusterflow_types_test.go b/api/v1alpha2/clusterflow_types_test.go new file mode 100644 index 000000000..229a90fd4 --- /dev/null +++ b/api/v1alpha2/clusterflow_types_test.go @@ -0,0 +1,81 @@ +// Copyright © 2019 Banzai Cloud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha2 + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/types" + + "golang.org/x/net/context" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// These tests are written in BDD-style using Ginkgo framework. Refer to +// http://onsi.github.io/ginkgo to learn more. + +var _ = Describe("ClusterFlow", func() { + var ( + key types.NamespacedName + created, fetched *ClusterFlow + ) + + BeforeEach(func() { + // Add any setup steps that needs to be executed before each test + }) + + AfterEach(func() { + // Add any teardown steps that needs to be executed after each test + }) + + // Add Tests for OpenAPI validation (or additonal CRD features) specified in + // your API definition. + // Avoid adding tests for vanilla CRUD operations because they would + // test Kubernetes API server, which isn't the goal here. + Context("Create API", func() { + + It("should create an object successfully", func() { + + key = types.NamespacedName{ + Namespace: "foo", + Name: "foo", + } + created = &ClusterFlow{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "foo", + }, + Spec: FlowSpec{ + Selectors: map[string]string{}, + OutputRefs: []string{}, + }, + Status: FlowStatus{}, + } + + By("creating an API obj") + Expect(k8sClient.Create(context.TODO(), created)).To(Succeed()) + + fetched = &ClusterFlow{} + Expect(k8sClient.Get(context.TODO(), key, fetched)).To(Succeed()) + Expect(fetched).To(Equal(created)) + + By("deleting the created object") + Expect(k8sClient.Delete(context.TODO(), created)).To(Succeed()) + Expect(k8sClient.Get(context.TODO(), key, created)).ToNot(Succeed()) + }) + + }) + +}) diff --git a/api/v1alpha2/clusteroutput_types.go b/api/v1alpha2/clusteroutput_types.go new file mode 100644 index 000000000..25ac13451 --- /dev/null +++ b/api/v1alpha2/clusteroutput_types.go @@ -0,0 +1,50 @@ +// Copyright © 2019 Banzai Cloud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +kubebuilder:object:root=true + +// ClusterOutput is the Schema for the clusteroutputs API +type ClusterOutput struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ClusterOutputSpec `json:"spec"` + Status OutputStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:generate=true + +type ClusterOutputSpec struct { + OutputSpec `json:",inline"` + EnabledNamespaces []string `json:"enabledNamespaces,omitempty"` +} + +// +kubebuilder:object:root=true + +// ClusterOutputList contains a list of ClusterOutput +type ClusterOutputList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ClusterOutput `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ClusterOutput{}, &ClusterOutputList{}) +} diff --git a/api/v1alpha2/clusteroutput_types_test.go b/api/v1alpha2/clusteroutput_types_test.go new file mode 100644 index 000000000..7ce221880 --- /dev/null +++ b/api/v1alpha2/clusteroutput_types_test.go @@ -0,0 +1,84 @@ +// Copyright © 2019 Banzai Cloud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha2 + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "golang.org/x/net/context" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +// These tests are written in BDD-style using Ginkgo framework. Refer to +// http://onsi.github.io/ginkgo to learn more. + +var _ = Describe("ClusterOutput", func() { + var ( + key types.NamespacedName + created, fetched *ClusterOutput + ) + + BeforeEach(func() { + // Add any setup steps that needs to be executed before each test + }) + + AfterEach(func() { + // Add any teardown steps that needs to be executed after each test + }) + + // Add Tests for OpenAPI validation (or additonal CRD features) specified in + // your API definition. + // Avoid adding tests for vanilla CRUD operations because they would + // test Kubernetes API server, which isn't the goal here. + Context("Create API", func() { + + It("should create an object successfully", func() { + + key = types.NamespacedName{ + Name: "foo", + Namespace: "foo", + } + created = &ClusterOutput{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "foo", + }, + Spec: ClusterOutputSpec{ + OutputSpec: OutputSpec{ + S3OutputConfig: nil, + NullOutputConfig: nil, + }, + }, + Status: OutputStatus{}, + } + + By("creating an API obj") + Expect(k8sClient.Create(context.TODO(), created)).To(Succeed()) + + fetched = &ClusterOutput{} + Expect(k8sClient.Get(context.TODO(), key, fetched)).To(Succeed()) + Expect(fetched).To(Equal(created)) + + By("deleting the created object") + Expect(k8sClient.Delete(context.TODO(), created)).To(Succeed()) + Expect(k8sClient.Get(context.TODO(), key, created)).ToNot(Succeed()) + }) + + }) + +}) diff --git a/api/v1alpha2/common_types.go b/api/v1alpha2/common_types.go new file mode 100644 index 000000000..d8b530b1f --- /dev/null +++ b/api/v1alpha2/common_types.go @@ -0,0 +1,22 @@ +// Copyright © 2019 Banzai Cloud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha2 + +// ImageSpec struct hold information about image specification +type ImageSpec struct { + Repository string `json:"repository"` + Tag string `json:"tag"` + PullPolicy string `json:"pullPolicy"` +} diff --git a/api/v1alpha2/flow_types.go b/api/v1alpha2/flow_types.go new file mode 100644 index 000000000..431581bf8 --- /dev/null +++ b/api/v1alpha2/flow_types.go @@ -0,0 +1,63 @@ +// Copyright © 2019 Banzai Cloud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha2 + +import ( + "github.com/banzaicloud/logging-operator/pkg/model/filter" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type FlowSpec struct { + Selectors map[string]string `json:"selectors"` + Filters []Filter `json:"filters,omitempty"` + LoggingRef string `json:"loggingRef,omitempty"` + OutputRefs []string `json:"outputRefs"` +} + +type Filter struct { + StdOut *filter.StdOutFilterConfig `json:"stdout,omitempty"` + Parser *filter.ParserConfig `json:"parser,omitempty"` + TagNormaliser *filter.TagNormaliser `json:"tag_normaliser,omitempty"` +} + +// FlowStatus defines the observed state of Flow +type FlowStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +// +kubebuilder:object:root=true + +// +kubebuilder:printcolumn:name="Logging",type=string,JSONPath=`.spec.loggingRef` +type Flow struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec FlowSpec `json:"spec,omitempty"` + Status FlowStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// FlowList contains a list of Flow +type FlowList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Flow `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Flow{}, &FlowList{}) +} diff --git a/api/v1alpha2/flow_types_test.go b/api/v1alpha2/flow_types_test.go new file mode 100644 index 000000000..d7a78389d --- /dev/null +++ b/api/v1alpha2/flow_types_test.go @@ -0,0 +1,81 @@ +// Copyright © 2019 Banzai Cloud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha2 + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "golang.org/x/net/context" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +// These tests are written in BDD-style using Ginkgo framework. Refer to +// http://onsi.github.io/ginkgo to learn more. + +var _ = Describe("Flow", func() { + var ( + key types.NamespacedName + created, fetched *Flow + ) + + BeforeEach(func() { + // Add any setup steps that needs to be executed before each test + }) + + AfterEach(func() { + // Add any teardown steps that needs to be executed after each test + }) + + // Add Tests for OpenAPI validation (or additonal CRD features) specified in + // your API definition. + // Avoid adding tests for vanilla CRUD operations because they would + // test Kubernetes API server, which isn't the goal here. + Context("Create API", func() { + + It("should create an object successfully", func() { + + key = types.NamespacedName{ + Name: "foo", + Namespace: "default", + } + created = &Flow{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "default", + }, + Spec: FlowSpec{ + Selectors: map[string]string{}, + OutputRefs: []string{}, + }, + Status: FlowStatus{}, + } + + By("creating an API obj") + Expect(k8sClient.Create(context.TODO(), created)).To(Succeed()) + + fetched = &Flow{} + Expect(k8sClient.Get(context.TODO(), key, fetched)).To(Succeed()) + Expect(fetched).To(Equal(created)) + + By("deleting the created object") + Expect(k8sClient.Delete(context.TODO(), created)).To(Succeed()) + Expect(k8sClient.Get(context.TODO(), key, created)).ToNot(Succeed()) + }) + + }) + +}) diff --git a/api/v1alpha2/fluentbit_types.go b/api/v1alpha2/fluentbit_types.go new file mode 100644 index 000000000..34d90885c --- /dev/null +++ b/api/v1alpha2/fluentbit_types.go @@ -0,0 +1,55 @@ +// Copyright © 2019 Banzai Cloud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha2 + +import ( + "strconv" + + corev1 "k8s.io/api/core/v1" +) + +// +kubebuilder:object:generate=true + +// FluentbitSpec defines the desired state of Fluentbit +type FluentbitSpec struct { + Annotations map[string]string `json:"annotations,omitempty"` + Image ImageSpec `json:"image,omitempty"` + TLS FluentbitTLS `json:"tls,omitempty"` + TargetHost string `json:"targetHost,omitempty"` + TargetPort int32 `json:"targetPort,omitempty"` + Resources corev1.ResourceRequirements `json:"resources,omitempty"` +} + +// +kubebuilder:object:generate=true + +// FluentbitTLS defines the TLS configs +type FluentbitTLS struct { + Enabled bool `json:"enabled"` + SecretName string `json:"secretName"` + SharedKey string `json:"sharedKey,omitempty"` +} + +// GetPrometheusPortFromAnnotation gets the port value from annotation +func (spec FluentbitSpec) GetPrometheusPortFromAnnotation() int32 { + var err error + var port int64 + if spec.Annotations != nil { + port, err = strconv.ParseInt(spec.Annotations["prometheus.io/port"], 10, 32) + if err != nil { + panic(err) + } + } + return int32(port) +} diff --git a/api/v1alpha2/fluentd_types.go b/api/v1alpha2/fluentd_types.go new file mode 100644 index 000000000..4438fda9f --- /dev/null +++ b/api/v1alpha2/fluentd_types.go @@ -0,0 +1,58 @@ +// Copyright © 2019 Banzai Cloud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha2 + +import ( + "strconv" + + corev1 "k8s.io/api/core/v1" +) + +// +kubebuilder:object:generate=true + +// FluentdSpec defines the desired state of Fluentd +type FluentdSpec struct { + Annotations map[string]string `json:"annotations,omitempty"` + TLS FluentdTLS `json:"tls,omitempty"` + Image ImageSpec `json:"image,omitempty"` + FluentdPvcSpec corev1.PersistentVolumeClaimSpec `json:"fluentdPvcSpec,omitempty"` + DisablePvc bool `json:"disablePvc,omitempty"` + VolumeModImage ImageSpec `json:"volumeModImage,omitempty"` + ConfigReloaderImage ImageSpec `json:"configReloaderImage,omitempty"` + Resources corev1.ResourceRequirements `json:"resources,omitempty"` + Port int32 `json:"port,omitempty"` +} + +// +kubebuilder:object:generate=true + +// FluentdTLS defines the TLS configs +type FluentdTLS struct { + Enabled bool `json:"enabled"` + SecretName string `json:"secretName"` + SharedKey string `json:"sharedKey,omitempty"` +} + +// GetPrometheusPortFromAnnotation gets the port value from annotation +func (spec FluentdSpec) GetPrometheusPortFromAnnotation() int32 { + var err error + var port int64 + if spec.Annotations != nil { + port, err = strconv.ParseInt(spec.Annotations["prometheus.io/port"], 10, 32) + if err != nil { + return 0 + } + } + return int32(port) +} diff --git a/api/v1alpha2/groupversion_info.go b/api/v1alpha2/groupversion_info.go new file mode 100644 index 000000000..aac6e0d4e --- /dev/null +++ b/api/v1alpha2/groupversion_info.go @@ -0,0 +1,34 @@ +// Copyright © 2019 Banzai Cloud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package v1alpha2 contains API Schema definitions for the logging v1alpha2 API group +// +kubebuilder:object:generate=true +// +groupName=logging.banzaicloud.com +package v1alpha2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "logging.banzaicloud.com", Version: "v1alpha2"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/api/v1alpha2/logging_types.go b/api/v1alpha2/logging_types.go new file mode 100644 index 000000000..2010a3ba8 --- /dev/null +++ b/api/v1alpha2/logging_types.go @@ -0,0 +1,178 @@ +// Copyright © 2019 Banzai Cloud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha2 + +import ( + "fmt" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// LoggingSpec defines the desired state of Logging +type LoggingSpec struct { + LoggingRef string `json:"loggingRef,omitempty"` + FlowConfigCheckDisabled bool `json:"flowConfigCheckDisabled,omitempty"` + FlowConfigOverride string `json:"flowConfigOverride,omitempty"` + FluentbitSpec *FluentbitSpec `json:"fluentbit,omitempty"` + FluentdSpec *FluentdSpec `json:"fluentd,omitempty"` + WatchNamespaces []string `json:"watchNamespaces,omitempty"` + ControlNamespace string `json:"controlNamespace"` +} + +// LoggingStatus defines the observed state of Logging +type LoggingStatus struct { + ConfigCheckResults map[string]bool `json:"configCheckResults,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:path=loggings,scope=Cluster + +// Logging is the Schema for the loggings API +type Logging struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec LoggingSpec `json:"spec,omitempty"` + Status LoggingStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LoggingList contains a list of Logging +type LoggingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Logging `json:"items"` +} + +func (l *Logging) SetDefaults() *Logging { + copy := l.DeepCopy() + if !copy.Spec.FlowConfigCheckDisabled && copy.Status.ConfigCheckResults == nil { + copy.Status.ConfigCheckResults = make(map[string]bool) + } + if copy.Spec.WatchNamespaces == nil { + copy.Spec.WatchNamespaces = []string{} + } + if copy.Spec.FluentdSpec != nil { + if copy.Spec.FluentdSpec.Image.Repository == "" { + copy.Spec.FluentdSpec.Image.Repository = "banzaicloud/fluentd" + } + if copy.Spec.FluentdSpec.Image.Tag == "" { + copy.Spec.FluentdSpec.Image.Tag = "v1.6.3-alpine" + } + if copy.Spec.FluentdSpec.Image.PullPolicy == "" { + copy.Spec.FluentdSpec.Image.PullPolicy = "IfNotPresent" + } + if copy.Spec.FluentdSpec.Annotations == nil { + copy.Spec.FluentdSpec.Annotations = map[string]string{ + "prometheus.io/scrape": "true", + "prometheus.io/path": "/metrics", + "prometheus.io/port": "25000", + } + } + if copy.Spec.FluentdSpec.FluentdPvcSpec.AccessModes == nil { + copy.Spec.FluentdSpec.FluentdPvcSpec.AccessModes = []v1.PersistentVolumeAccessMode{ + v1.ReadWriteOnce, + } + } + if copy.Spec.FluentdSpec.FluentdPvcSpec.Resources.Requests == nil { + copy.Spec.FluentdSpec.FluentdPvcSpec.Resources.Requests = map[v1.ResourceName]resource.Quantity{ + "storage": resource.MustParse("20Gi"), + } + } + if copy.Spec.FluentdSpec.VolumeModImage.Repository == "" { + copy.Spec.FluentdSpec.VolumeModImage.Repository = "busybox" + } + if copy.Spec.FluentdSpec.VolumeModImage.Tag == "" { + copy.Spec.FluentdSpec.VolumeModImage.Tag = "latest" + } + if copy.Spec.FluentdSpec.VolumeModImage.PullPolicy == "" { + copy.Spec.FluentdSpec.VolumeModImage.PullPolicy = "IfNotPresent" + } + if copy.Spec.FluentdSpec.ConfigReloaderImage.Repository == "" { + copy.Spec.FluentdSpec.ConfigReloaderImage.Repository = "jimmidyson/configmap-reload" + } + if copy.Spec.FluentdSpec.ConfigReloaderImage.Tag == "" { + copy.Spec.FluentdSpec.ConfigReloaderImage.Tag = "v0.2.2" + } + if copy.Spec.FluentdSpec.ConfigReloaderImage.PullPolicy == "" { + copy.Spec.FluentdSpec.ConfigReloaderImage.PullPolicy = "IfNotPresent" + } + if copy.Spec.FluentdSpec.Resources.Limits == nil { + copy.Spec.FluentdSpec.Resources.Limits = v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("200M"), + v1.ResourceCPU: resource.MustParse("1000m"), + } + } + if copy.Spec.FluentdSpec.Resources.Requests == nil { + copy.Spec.FluentdSpec.Resources.Requests = v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("100M"), + v1.ResourceCPU: resource.MustParse("500m"), + } + } + if copy.Spec.FluentdSpec.Port == 0 { + copy.Spec.FluentdSpec.Port = 24240 + } + } + if copy.Spec.FluentbitSpec != nil { + if copy.Spec.FluentbitSpec.Image.Repository == "" { + copy.Spec.FluentbitSpec.Image.Repository = "fluent/fluent-bit" + } + if copy.Spec.FluentbitSpec.Image.Tag == "" { + copy.Spec.FluentbitSpec.Image.Tag = "1.2.2" + } + if copy.Spec.FluentbitSpec.Image.PullPolicy == "" { + copy.Spec.FluentbitSpec.Image.PullPolicy = "IfNotPresent" + } + if copy.Spec.FluentbitSpec.Resources.Limits == nil { + copy.Spec.FluentbitSpec.Resources.Limits = v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("100M"), + v1.ResourceCPU: resource.MustParse("200m"), + } + } + if copy.Spec.FluentbitSpec.Resources.Requests == nil { + copy.Spec.FluentbitSpec.Resources.Requests = v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("50M"), + v1.ResourceCPU: resource.MustParse("100m"), + } + } + if copy.Spec.FluentbitSpec.Annotations == nil { + copy.Spec.FluentbitSpec.Annotations = map[string]string{ + "prometheus.io/scrape": "true", + "prometheus.io/path": "/api/v1/metrics/prometheus", + "prometheus.io/port": "2020", + } + } + } + return copy +} + +func (l *Logging) QualifiedName(name string) string { + return fmt.Sprintf("%s-%s", l.Name, name) +} + +func (l *Logging) QualifiedNamespacedName(name string) string { + return fmt.Sprintf("%s-%s-%s", l.Spec.ControlNamespace, l.Name, name) +} + +func init() { + SchemeBuilder.Register(&Logging{}, &LoggingList{}) +} diff --git a/api/v1alpha2/output_types.go b/api/v1alpha2/output_types.go new file mode 100644 index 000000000..a9748c93b --- /dev/null +++ b/api/v1alpha2/output_types.go @@ -0,0 +1,63 @@ +// Copyright © 2019 Banzai Cloud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha2 + +import ( + "github.com/banzaicloud/logging-operator/pkg/model/output" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// OutputSpec defines the desired state of Output +type OutputSpec struct { + LoggingRef string `json:"loggingRef,omitempty"` + S3OutputConfig *output.S3OutputConfig `json:"s3,omitempty"` + AzureStorage *output.AzureStorage `json:"azurestorage,omitempty"` + GCSOutput *output.GCSOutput `json:"gcs,omitempty"` + OSSOutput *output.OSSOutput `json:"oss,omitempty"` + ElasticsearchOutput *output.ElasticsearchOutput `json:"elasticsearch,omitempty"` + LokiOutput *output.LokiOutput `json:"loki,omitempty"` + SumologicOutput *output.SumologicOutput `json:"sumologic"` + NullOutputConfig *output.NullOutputConfig `json:"nullout,omitempty"` +} + +// OutputStatus defines the observed state of Output +type OutputStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +// +kubebuilder:object:root=true + +// Output is the Schema for the outputs API +type Output struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec OutputSpec `json:"spec,omitempty"` + Status OutputStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// OutputList contains a list of Output +type OutputList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Output `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Output{}, &OutputList{}) +} diff --git a/api/v1alpha2/output_types_test.go b/api/v1alpha2/output_types_test.go new file mode 100644 index 000000000..5f9a88d78 --- /dev/null +++ b/api/v1alpha2/output_types_test.go @@ -0,0 +1,82 @@ +// Copyright © 2019 Banzai Cloud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha2 + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "golang.org/x/net/context" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +// These tests are written in BDD-style using Ginkgo framework. Refer to +// http://onsi.github.io/ginkgo to learn more. + +var _ = Describe("Output", func() { + var ( + key types.NamespacedName + created, fetched *Output + ) + + BeforeEach(func() { + // Add any setup steps that needs to be executed before each test + }) + + AfterEach(func() { + // Add any teardown steps that needs to be executed after each test + }) + + // Add Tests for OpenAPI validation (or additonal CRD features) specified in + // your API definition. + // Avoid adding tests for vanilla CRUD operations because they would + // test Kubernetes API server, which isn't the goal here. + Context("Create API", func() { + + It("should create an object successfully", func() { + + key = types.NamespacedName{ + Name: "foo", + Namespace: "default", + } + created = &Output{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "default", + }, + Spec: OutputSpec{ + S3OutputConfig: nil, + NullOutputConfig: nil, + }, + Status: OutputStatus{}, + } + + By("creating an API obj") + Expect(k8sClient.Create(context.TODO(), created)).To(Succeed()) + + fetched = &Output{} + Expect(k8sClient.Get(context.TODO(), key, fetched)).To(Succeed()) + Expect(fetched).To(Equal(created)) + + By("deleting the created object") + Expect(k8sClient.Delete(context.TODO(), created)).To(Succeed()) + Expect(k8sClient.Get(context.TODO(), key, created)).ToNot(Succeed()) + }) + + }) + +}) diff --git a/api/v1alpha2/suite_test.go b/api/v1alpha2/suite_test.go new file mode 100644 index 000000000..3e764132a --- /dev/null +++ b/api/v1alpha2/suite_test.go @@ -0,0 +1,73 @@ +// Copyright © 2019 Banzai Cloud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha2 + +import ( + "path/filepath" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var cfg *rest.Config +var k8sClient client.Client +var testEnv *envtest.Environment + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecsWithDefaultAndCustomReporters(t, + "v1alpha2 Suite", + []Reporter{envtest.NewlineReporter{}}) +} + +var _ = BeforeSuite(func(done Done) { + logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, + } + + err := SchemeBuilder.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + cfg, err = testEnv.Start() + Expect(err).ToNot(HaveOccurred()) + Expect(cfg).ToNot(BeNil()) + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).ToNot(HaveOccurred()) + Expect(k8sClient).ToNot(BeNil()) + + close(done) +}, 60) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).ToNot(HaveOccurred()) +}) diff --git a/pkg/apis/logging/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha2/zz_generated.deepcopy.go similarity index 50% rename from pkg/apis/logging/v1alpha1/zz_generated.deepcopy.go rename to api/v1alpha2/zz_generated.deepcopy.go index 69a9e38f2..5467dd9be 100644 --- a/pkg/apis/logging/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha2/zz_generated.deepcopy.go @@ -1,75 +1,109 @@ // +build !ignore_autogenerated -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by operator-sdk. DO NOT EDIT. - -package v1alpha1 +// Copyright © 2019 Banzai Cloud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha2 import ( - v1 "k8s.io/api/core/v1" + "github.com/banzaicloud/logging-operator/pkg/model/filter" + "github.com/banzaicloud/logging-operator/pkg/model/output" runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FPlugin) DeepCopyInto(out *FPlugin) { +func (in *ClusterFlow) DeepCopyInto(out *ClusterFlow) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterFlow. +func (in *ClusterFlow) DeepCopy() *ClusterFlow { + if in == nil { + return nil + } + out := new(ClusterFlow) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterFlow) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterFlowList) DeepCopyInto(out *ClusterFlowList) { *out = *in - if in.Parameters != nil { - in, out := &in.Parameters, &out.Parameters - *out = make([]Parameter, len(*in)) + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterFlow, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FPlugin. -func (in *FPlugin) DeepCopy() *FPlugin { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterFlowList. +func (in *ClusterFlowList) DeepCopy() *ClusterFlowList { if in == nil { return nil } - out := new(FPlugin) + out := new(ClusterFlowList) in.DeepCopyInto(out) return out } +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterFlowList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Fluentbit) DeepCopyInto(out *Fluentbit) { +func (in *ClusterOutput) DeepCopyInto(out *ClusterOutput) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) out.Status = in.Status - return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Fluentbit. -func (in *Fluentbit) DeepCopy() *Fluentbit { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOutput. +func (in *ClusterOutput) DeepCopy() *ClusterOutput { if in == nil { return nil } - out := new(Fluentbit) + out := new(ClusterOutput) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Fluentbit) DeepCopyObject() runtime.Object { +func (in *ClusterOutput) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -77,32 +111,31 @@ func (in *Fluentbit) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FluentbitList) DeepCopyInto(out *FluentbitList) { +func (in *ClusterOutputList) DeepCopyInto(out *ClusterOutputList) { *out = *in out.TypeMeta = in.TypeMeta out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]Fluentbit, len(*in)) + *out = make([]ClusterOutput, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentbitList. -func (in *FluentbitList) DeepCopy() *FluentbitList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOutputList. +func (in *ClusterOutputList) DeepCopy() *ClusterOutputList { if in == nil { return nil } - out := new(FluentbitList) + out := new(ClusterOutputList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *FluentbitList) DeepCopyObject() runtime.Object { +func (in *ClusterOutputList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -110,92 +143,77 @@ func (in *FluentbitList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FluentbitSpec) DeepCopyInto(out *FluentbitSpec) { +func (in *ClusterOutputSpec) DeepCopyInto(out *ClusterOutputSpec) { *out = *in - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - out.Image = in.Image - out.TLS = in.TLS - in.Resources.DeepCopyInto(&out.Resources) - if in.Tolerations != nil { - in, out := &in.Tolerations, &out.Tolerations - *out = make([]v1.Toleration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + in.OutputSpec.DeepCopyInto(&out.OutputSpec) + if in.EnabledNamespaces != nil { + in, out := &in.EnabledNamespaces, &out.EnabledNamespaces + *out = make([]string, len(*in)) + copy(*out, *in) } - return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentbitSpec. -func (in *FluentbitSpec) DeepCopy() *FluentbitSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOutputSpec. +func (in *ClusterOutputSpec) DeepCopy() *ClusterOutputSpec { if in == nil { return nil } - out := new(FluentbitSpec) + out := new(ClusterOutputSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FluentbitStatus) DeepCopyInto(out *FluentbitStatus) { +func (in *Filter) DeepCopyInto(out *Filter) { *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentbitStatus. -func (in *FluentbitStatus) DeepCopy() *FluentbitStatus { - if in == nil { - return nil + if in.StdOut != nil { + in, out := &in.StdOut, &out.StdOut + *out = new(filter.StdOutFilterConfig) + **out = **in + } + if in.Parser != nil { + in, out := &in.Parser, &out.Parser + *out = new(filter.ParserConfig) + (*in).DeepCopyInto(*out) + } + if in.TagNormaliser != nil { + in, out := &in.TagNormaliser, &out.TagNormaliser + *out = new(filter.TagNormaliser) + **out = **in } - out := new(FluentbitStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FluentbitTLS) DeepCopyInto(out *FluentbitTLS) { - *out = *in - return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentbitTLS. -func (in *FluentbitTLS) DeepCopy() *FluentbitTLS { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Filter. +func (in *Filter) DeepCopy() *Filter { if in == nil { return nil } - out := new(FluentbitTLS) + out := new(Filter) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Fluentd) DeepCopyInto(out *Fluentd) { +func (in *Flow) DeepCopyInto(out *Flow) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) out.Status = in.Status - return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Fluentd. -func (in *Fluentd) DeepCopy() *Fluentd { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Flow. +func (in *Flow) DeepCopy() *Flow { if in == nil { return nil } - out := new(Fluentd) + out := new(Flow) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Fluentd) DeepCopyObject() runtime.Object { +func (in *Flow) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -203,32 +221,31 @@ func (in *Fluentd) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FluentdList) DeepCopyInto(out *FluentdList) { +func (in *FlowList) DeepCopyInto(out *FlowList) { *out = *in out.TypeMeta = in.TypeMeta out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]Fluentd, len(*in)) + *out = make([]Flow, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentdList. -func (in *FluentdList) DeepCopy() *FluentdList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowList. +func (in *FlowList) DeepCopy() *FlowList { if in == nil { return nil } - out := new(FluentdList) + out := new(FlowList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *FluentdList) DeepCopyObject() runtime.Object { +func (in *FlowList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -236,171 +253,173 @@ func (in *FluentdList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FluentdSpec) DeepCopyInto(out *FluentdSpec) { +func (in *FlowSpec) DeepCopyInto(out *FlowSpec) { *out = *in - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors *out = make(map[string]string, len(*in)) for key, val := range *in { (*out)[key] = val } } - out.TLS = in.TLS - out.Image = in.Image - in.FluentdPvcSpec.DeepCopyInto(&out.FluentdPvcSpec) - out.VolumeModImage = in.VolumeModImage - out.ConfigReloaderImage = in.ConfigReloaderImage - in.Resources.DeepCopyInto(&out.Resources) - if in.Tolerations != nil { - in, out := &in.Tolerations, &out.Tolerations - *out = make([]v1.Toleration, len(*in)) + if in.Filters != nil { + in, out := &in.Filters, &out.Filters + *out = make([]Filter, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return + if in.OutputRefs != nil { + in, out := &in.OutputRefs, &out.OutputRefs + *out = make([]string, len(*in)) + copy(*out, *in) + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentdSpec. -func (in *FluentdSpec) DeepCopy() *FluentdSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSpec. +func (in *FlowSpec) DeepCopy() *FlowSpec { if in == nil { return nil } - out := new(FluentdSpec) + out := new(FlowSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FluentdStatus) DeepCopyInto(out *FluentdStatus) { +func (in *FlowStatus) DeepCopyInto(out *FlowStatus) { *out = *in - return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentdStatus. -func (in *FluentdStatus) DeepCopy() *FluentdStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowStatus. +func (in *FlowStatus) DeepCopy() *FlowStatus { if in == nil { return nil } - out := new(FluentdStatus) + out := new(FlowStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FluentdTLS) DeepCopyInto(out *FluentdTLS) { +func (in *FluentbitSpec) DeepCopyInto(out *FluentbitSpec) { *out = *in - return + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.Image = in.Image + out.TLS = in.TLS + in.Resources.DeepCopyInto(&out.Resources) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentdTLS. -func (in *FluentdTLS) DeepCopy() *FluentdTLS { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentbitSpec. +func (in *FluentbitSpec) DeepCopy() *FluentbitSpec { if in == nil { return nil } - out := new(FluentdTLS) + out := new(FluentbitSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageSpec) DeepCopyInto(out *ImageSpec) { +func (in *FluentbitTLS) DeepCopyInto(out *FluentbitTLS) { *out = *in - return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSpec. -func (in *ImageSpec) DeepCopy() *ImageSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentbitTLS. +func (in *FluentbitTLS) DeepCopy() *FluentbitTLS { if in == nil { return nil } - out := new(ImageSpec) + out := new(FluentbitTLS) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Input) DeepCopyInto(out *Input) { +func (in *FluentdSpec) DeepCopyInto(out *FluentdSpec) { *out = *in - if in.Label != nil { - in, out := &in.Label, &out.Label + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations *out = make(map[string]string, len(*in)) for key, val := range *in { (*out)[key] = val } } - return + out.TLS = in.TLS + out.Image = in.Image + in.FluentdPvcSpec.DeepCopyInto(&out.FluentdPvcSpec) + out.VolumeModImage = in.VolumeModImage + out.ConfigReloaderImage = in.ConfigReloaderImage + in.Resources.DeepCopyInto(&out.Resources) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Input. -func (in *Input) DeepCopy() *Input { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentdSpec. +func (in *FluentdSpec) DeepCopy() *FluentdSpec { if in == nil { return nil } - out := new(Input) + out := new(FluentdSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubernetesSecret) DeepCopyInto(out *KubernetesSecret) { +func (in *FluentdTLS) DeepCopyInto(out *FluentdTLS) { *out = *in - return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesSecret. -func (in *KubernetesSecret) DeepCopy() *KubernetesSecret { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentdTLS. +func (in *FluentdTLS) DeepCopy() *FluentdTLS { if in == nil { return nil } - out := new(KubernetesSecret) + out := new(FluentdTLS) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Parameter) DeepCopyInto(out *Parameter) { +func (in *ImageSpec) DeepCopyInto(out *ImageSpec) { *out = *in - if in.ValueFrom != nil { - in, out := &in.ValueFrom, &out.ValueFrom - *out = new(ValueFrom) - **out = **in - } - return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Parameter. -func (in *Parameter) DeepCopy() *Parameter { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSpec. +func (in *ImageSpec) DeepCopy() *ImageSpec { if in == nil { return nil } - out := new(Parameter) + out := new(ImageSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Plugin) DeepCopyInto(out *Plugin) { +func (in *Logging) DeepCopyInto(out *Logging) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status - return + in.Status.DeepCopyInto(&out.Status) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plugin. -func (in *Plugin) DeepCopy() *Plugin { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Logging. +func (in *Logging) DeepCopy() *Logging { if in == nil { return nil } - out := new(Plugin) + out := new(Logging) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Plugin) DeepCopyObject() runtime.Object { +func (in *Logging) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -408,32 +427,31 @@ func (in *Plugin) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PluginList) DeepCopyInto(out *PluginList) { +func (in *LoggingList) DeepCopyInto(out *LoggingList) { *out = *in out.TypeMeta = in.TypeMeta out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]Plugin, len(*in)) + *out = make([]Logging, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginList. -func (in *PluginList) DeepCopy() *PluginList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingList. +func (in *LoggingList) DeepCopy() *LoggingList { if in == nil { return nil } - out := new(PluginList) + out := new(LoggingList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PluginList) DeepCopyObject() runtime.Object { +func (in *LoggingList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -441,65 +459,182 @@ func (in *PluginList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PluginSpec) DeepCopyInto(out *PluginSpec) { +func (in *LoggingSpec) DeepCopyInto(out *LoggingSpec) { *out = *in - in.Input.DeepCopyInto(&out.Input) - if in.Filter != nil { - in, out := &in.Filter, &out.Filter - *out = make([]FPlugin, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) + if in.FluentbitSpec != nil { + in, out := &in.FluentbitSpec, &out.FluentbitSpec + *out = new(FluentbitSpec) + (*in).DeepCopyInto(*out) + } + if in.FluentdSpec != nil { + in, out := &in.FluentdSpec, &out.FluentdSpec + *out = new(FluentdSpec) + (*in).DeepCopyInto(*out) + } + if in.WatchNamespaces != nil { + in, out := &in.WatchNamespaces, &out.WatchNamespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingSpec. +func (in *LoggingSpec) DeepCopy() *LoggingSpec { + if in == nil { + return nil + } + out := new(LoggingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingStatus) DeepCopyInto(out *LoggingStatus) { + *out = *in + if in.ConfigCheckResults != nil { + in, out := &in.ConfigCheckResults, &out.ConfigCheckResults + *out = make(map[string]bool, len(*in)) + for key, val := range *in { + (*out)[key] = val } } - if in.Output != nil { - in, out := &in.Output, &out.Output - *out = make([]FPlugin, len(*in)) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingStatus. +func (in *LoggingStatus) DeepCopy() *LoggingStatus { + if in == nil { + return nil + } + out := new(LoggingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Output) DeepCopyInto(out *Output) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Output. +func (in *Output) DeepCopy() *Output { + if in == nil { + return nil + } + out := new(Output) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Output) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputList) DeepCopyInto(out *OutputList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Output, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginSpec. -func (in *PluginSpec) DeepCopy() *PluginSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputList. +func (in *OutputList) DeepCopy() *OutputList { if in == nil { return nil } - out := new(PluginSpec) + out := new(OutputList) in.DeepCopyInto(out) return out } +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OutputList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PluginStatus) DeepCopyInto(out *PluginStatus) { +func (in *OutputSpec) DeepCopyInto(out *OutputSpec) { *out = *in - return + if in.S3OutputConfig != nil { + in, out := &in.S3OutputConfig, &out.S3OutputConfig + *out = new(output.S3OutputConfig) + (*in).DeepCopyInto(*out) + } + if in.AzureStorage != nil { + in, out := &in.AzureStorage, &out.AzureStorage + *out = new(output.AzureStorage) + (*in).DeepCopyInto(*out) + } + if in.GCSOutput != nil { + in, out := &in.GCSOutput, &out.GCSOutput + *out = new(output.GCSOutput) + (*in).DeepCopyInto(*out) + } + if in.OSSOutput != nil { + in, out := &in.OSSOutput, &out.OSSOutput + *out = new(output.OSSOutput) + (*in).DeepCopyInto(*out) + } + if in.ElasticsearchOutput != nil { + in, out := &in.ElasticsearchOutput, &out.ElasticsearchOutput + *out = new(output.ElasticsearchOutput) + (*in).DeepCopyInto(*out) + } + if in.LokiOutput != nil { + in, out := &in.LokiOutput, &out.LokiOutput + *out = new(output.LokiOutput) + (*in).DeepCopyInto(*out) + } + if in.SumologicOutput != nil { + in, out := &in.SumologicOutput, &out.SumologicOutput + *out = new(output.SumologicOutput) + (*in).DeepCopyInto(*out) + } + if in.NullOutputConfig != nil { + in, out := &in.NullOutputConfig, &out.NullOutputConfig + *out = new(output.NullOutputConfig) + **out = **in + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginStatus. -func (in *PluginStatus) DeepCopy() *PluginStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputSpec. +func (in *OutputSpec) DeepCopy() *OutputSpec { if in == nil { return nil } - out := new(PluginStatus) + out := new(OutputSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ValueFrom) DeepCopyInto(out *ValueFrom) { +func (in *OutputStatus) DeepCopyInto(out *OutputStatus) { *out = *in - out.SecretKeyRef = in.SecretKeyRef - return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValueFrom. -func (in *ValueFrom) DeepCopy() *ValueFrom { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputStatus. +func (in *OutputStatus) DeepCopy() *OutputStatus { if in == nil { return nil } - out := new(ValueFrom) + out := new(OutputStatus) in.DeepCopyInto(out) return out } diff --git a/build/Dockerfile b/build/Dockerfile deleted file mode 100644 index c2f7eec44..000000000 --- a/build/Dockerfile +++ /dev/null @@ -1,15 +0,0 @@ -FROM alpine:3.8 - -ENV OPERATOR=/usr/local/bin/logging-operator \ - USER_UID=1001 \ - USER_NAME=logging-operator - -# install operator binary -COPY build/_output/bin/logging-operator ${OPERATOR} - -COPY build/bin /usr/local/bin -RUN /usr/local/bin/user_setup - -ENTRYPOINT ["/usr/local/bin/entrypoint"] - -USER ${USER_UID} diff --git a/build/bin/entrypoint b/build/bin/entrypoint deleted file mode 100755 index 76d31a162..000000000 --- a/build/bin/entrypoint +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/sh -e - -# This is documented here: -# https://docs.openshift.com/container-platform/3.11/creating_images/guidelines.html#openshift-specific-guidelines - -if ! whoami &>/dev/null; then - if [ -w /etc/passwd ]; then - echo "${USER_NAME:-logging-operator}:x:$(id -u):$(id -g):${USER_NAME:-logging-operator} user:${HOME}:/sbin/nologin" >> /etc/passwd - fi -fi - -exec ${OPERATOR} $@ diff --git a/build/bin/user_setup b/build/bin/user_setup deleted file mode 100755 index 1e36064cb..000000000 --- a/build/bin/user_setup +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/sh -set -x - -# ensure $HOME exists and is accessible by group 0 (we don't know what the runtime UID will be) -mkdir -p ${HOME} -chown ${USER_UID}:0 ${HOME} -chmod ug+rwx ${HOME} - -# runtime user will need to be able to self-insert in /etc/passwd -chmod g+rw /etc/passwd - -# no need for this script to remain in the image after running -rm $0 diff --git a/charts/logging-operator-fluent/Chart.yaml b/charts/logging-operator-fluent/Chart.yaml deleted file mode 100644 index 3341c6f4b..000000000 --- a/charts/logging-operator-fluent/Chart.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -description: Logging operator CR for Fluentd and Fluent-bit. -name: logging-operator-fluent -version: 0.1.5 -home: https://github.com/banzaicloud/logging-operator -icon: https://banzaicloud.com/img/banzai-cloud-logo.png -keywords: - - logging - - monitoring - - fluentd - - fluenbit - - operator -sources: - - https://github.com/banzaicloud/logging-operator -maintainers: - - name: Banzai Cloud - email: info@banzaicloud.com - diff --git a/charts/logging-operator-fluent/templates/_helpers.tpl b/charts/logging-operator-fluent/templates/_helpers.tpl deleted file mode 100644 index 149739ce7..000000000 --- a/charts/logging-operator-fluent/templates/_helpers.tpl +++ /dev/null @@ -1,32 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "logging-operator-fluent.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "logging-operator-fluent.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "logging-operator-fluent.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end -}} diff --git a/charts/logging-operator-fluent/templates/fluentbit-cr.yaml b/charts/logging-operator-fluent/templates/fluentbit-cr.yaml deleted file mode 100644 index f65ef07b8..000000000 --- a/charts/logging-operator-fluent/templates/fluentbit-cr.yaml +++ /dev/null @@ -1,33 +0,0 @@ -{{- if .Values.fluentbit.enabled }} -{{ $fluentbitUseGenericSecret := or .Values.tls.secretName (not .Values.fluentbit.tlsSecret ) }} -apiVersion: logging.banzaicloud.com/v1alpha1 -kind: Fluentbit -metadata: - name: {{ template "logging-operator-fluent.fullname" . }}-fluentbit - labels: - app.kubernetes.io/name: {{ include "logging-operator-fluent.name" . }} - helm.sh/chart: {{ include "logging-operator-fluent.chart" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} -spec: - namespace: {{ default .Release.Namespace .Values.watchNamespace }} - annotations: - prometheus.io/scrape: "true" - prometheus.io/path: "/api/v1/metrics/prometheus" - prometheus.io/port: "2020" - image: {{ toYaml .Values.fluentbit.image | nindent 4 }} - resources: {} - {{- if .Values.fluentbit.tolerations }} - tolerations: {{ toYaml .Values.fluentbit.tolerations | nindent 4 }} - {{- end }} - tls: - enabled: {{ .Values.tls.enabled }} -{{- if $fluentbitUseGenericSecret }} - secretName: {{ .Values.tls.secretName | default (include "logging-operator-fluent.fullname" .) }} - secretType: generic -{{- else }} - secretName: {{ .Values.fluentbit.tlsSecret }} - secretType: tls -{{- end }} - sharedKey: {{ .Values.tls.sharedKey | default (derivePassword 1 "long" (.Release.Time | toString) .Release.Name .Chart.Name ) | b64enc | quote }} -{{ end }} diff --git a/charts/logging-operator-fluent/templates/fluentd-cr.yaml b/charts/logging-operator-fluent/templates/fluentd-cr.yaml deleted file mode 100644 index f205e3d61..000000000 --- a/charts/logging-operator-fluent/templates/fluentd-cr.yaml +++ /dev/null @@ -1,37 +0,0 @@ -{{- if .Values.fluentd.enabled }} -{{ $fluentdUseGenericSecret := or .Values.tls.secretName (not .Values.fluentd.tlsSecret) }} -apiVersion: logging.banzaicloud.com/v1alpha1 -kind: Fluentd -metadata: - name: {{ template "logging-operator-fluent.fullname" . }}-fluentd - labels: - app.kubernetes.io/name: {{ include "logging-operator-fluent.name" . }} - helm.sh/chart: {{ include "logging-operator-fluent.chart" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} -spec: - namespace: {{ default .Release.Namespace .Values.watchNamespace }} - annotations: - prometheus.io/scrape: "true" - prometheus.io/path: "/metrics" - prometheus.io/port: "25000" - image: {{ toYaml .Values.fluentd.image | nindent 4 }} - volumeModImage: {{ toYaml .Values.fluentd.volumeModImage | nindent 4 }} - configReloaderImage: {{ toYaml .Values.fluentd.configReloaderImage | nindent 4 }} - resources: {} - fluentdPvcSpec: {{ toYaml .Values.fluentd.fluentdPvcSpec | nindent 4 }} - {{- if .Values.fluentd.tolerations }} - tolerations: {{ toYaml .Values.fluentd.tolerations | nindent 4 }} - {{- end }} - tls: - enabled: {{ .Values.tls.enabled }} -{{- if $fluentdUseGenericSecret }} - secretName: {{ .Values.tls.secretName | default (include "logging-operator-fluent.fullname" .) }} - secretType: generic -{{- else }} - secretName: {{ .Values.fluentd.tlsSecret }} - secretType: tls -{{- end }} - sharedKey: {{ .Values.tls.sharedKey | default (derivePassword 1 "long" (.Release.Time | toString) .Release.Name .Chart.Name ) | b64enc | quote }} - serviceType: {{ .Values.fluentd.serviceType | default "ClusterIP" | quote }} -{{ end }} diff --git a/charts/logging-operator-fluent/templates/psp.yaml b/charts/logging-operator-fluent/templates/psp.yaml deleted file mode 100644 index bfa1f488a..000000000 --- a/charts/logging-operator-fluent/templates/psp.yaml +++ /dev/null @@ -1,73 +0,0 @@ -{{ if .Values.psp.enabled }} -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - creationTimestamp: null - name: psp.fluent-bit - annotations: - seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' - seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default' -spec: - allowedHostPaths: - - pathPrefix: /var/lib/docker/containers - readOnly: true - - pathPrefix: /var/log - readOnly: true - fsGroup: - rule: MustRunAs - ranges: - - min: 1 - max: 65535 - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - runAsUser: - rule: MustRunAs - ranges: - - min: 1 - max: 65535 - seLinux: - rule: RunAsAny - supplementalGroups: - rule: MustRunAs - ranges: - - min: 1 - max: 65535 - volumes: - - configMap - - emptyDir - - secret - - hostPath ---- -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - creationTimestamp: null - name: psp.fluentd - annotations: - seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' - seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default' -spec: - fsGroup: - rule: MustRunAs - ranges: - - min: 1 - max: 65535 - allowPrivilegeEscalation: false - runAsUser: - rule: MustRunAs - ranges: - - min: 1 - max: 65535 - seLinux: - rule: RunAsAny - supplementalGroups: - rule: MustRunAs - ranges: - - min: 1 - max: 65535 - volumes: - - configMap - - emptyDir - - secret - - persistentVolumeClaim -{{ end }} diff --git a/charts/logging-operator-fluent/templates/rbac.yaml b/charts/logging-operator-fluent/templates/rbac.yaml deleted file mode 100644 index eabb1b020..000000000 --- a/charts/logging-operator-fluent/templates/rbac.yaml +++ /dev/null @@ -1,71 +0,0 @@ -{{ if .Values.psp.enabled }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: {{ template "logging-operator-fluent.fullname" . }}-fluent-bit - namespace: {{ .Release.Namespace }} - labels: - kubernetes.io/cluster-service: "true" - addonmanager.kubernetes.io/mode: Reconcile -rules: -- apiGroups: - - policy - resourceNames: - - psp.fluent-bit - resources: - - podsecuritypolicies - verbs: - - use ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: {{ template "logging-operator-fluent.fullname" . }}-fluentd - namespace: {{ .Release.Namespace }} - labels: - kubernetes.io/cluster-service: "true" - addonmanager.kubernetes.io/mode: Reconcile -rules: -- apiGroups: - - policy - resourceNames: - - psp.fluentd - resources: - - podsecuritypolicies - verbs: - - use ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: {{ template "logging-operator-fluent.fullname" . }}-fluent-bit - namespace: {{ .Release.Namespace }} - labels: - addonmanager.kubernetes.io/mode: Reconcile - kubernetes.io/cluster-service: 'true' -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: {{ template "logging-operator-fluent.fullname" . }}-fluent-bit -subjects: - - kind: ServiceAccount - name: logging - namespace: {{ .Release.Namespace }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: {{ template "logging-operator-fluent.fullname" . }}-fluentd - namespace: {{ .Release.Namespace }} - labels: - addonmanager.kubernetes.io/mode: Reconcile - kubernetes.io/cluster-service: 'true' -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: {{ template "logging-operator-fluent.fullname" . }}-fluentd -subjects: - - kind: ServiceAccount - name: logging-fluentd - namespace: {{ .Release.Namespace }} -{{ end }} diff --git a/charts/logging-operator-fluent/templates/secret.yaml b/charts/logging-operator-fluent/templates/secret.yaml deleted file mode 100644 index f0b93e661..000000000 --- a/charts/logging-operator-fluent/templates/secret.yaml +++ /dev/null @@ -1,22 +0,0 @@ -{{- if and .Values.tls.enabled (not .Values.tls.secretName) }} -{{ $ca := genCA "svc-cat-ca" 3650 }} -{{ $cn := printf "fluentd.%s.svc.cluster.local" .Release.Namespace }} -{{ $server := genSignedCert $cn nil nil 365 $ca }} -{{ $client := genSignedCert "" nil nil 365 $ca }} - -apiVersion: v1 -kind: Secret -metadata: - name: {{ template "logging-operator-fluent.fullname" . }} - labels: - app.kubernetes.io/name: {{ include "logging-operator-fluent.name" . }} - helm.sh/chart: {{ include "logging-operator-fluent.chart" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} -data: - caCert: {{ b64enc $ca.Cert }} - clientCert: {{ b64enc $client.Cert }} - clientKey: {{ b64enc $client.Key }} - serverCert: {{ b64enc $server.Cert }} - serverKey: {{ b64enc $server.Key }} -{{ end }} \ No newline at end of file diff --git a/charts/logging-operator-fluent/values.yaml b/charts/logging-operator-fluent/values.yaml deleted file mode 100644 index 409356406..000000000 --- a/charts/logging-operator-fluent/values.yaml +++ /dev/null @@ -1,45 +0,0 @@ -# Default values for logging-operator-fluent. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -tls: - enabled: false - secretName: "" - sharedKey: "" - -fluentbit: - enabled: true - namespace: "" - tolerations: - image: - tag: "1.1.3" - repository: "fluent/fluent-bit" - pullPolicy: "IfNotPresent" - tlsSecret: "" - -fluentd: - enabled: true - namespace: "" - image: - tag: "v1.5.0" - repository: "banzaicloud/fluentd" - pullPolicy: "IfNotPresent" - volumeModImage: - tag: "latest" - repository: "busybox" - pullPolicy: "IfNotPresent" - configReloaderImage: - tag: "v0.2.2" - repository: "jimmidyson/configmap-reload" - pullPolicy: "IfNotPresent" - tolerations: - fluentdPvcSpec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 21Gi - tlsSecret: "" - -psp: - enabled: false diff --git a/charts/nginx-logging-demo/.helmignore b/charts/logging-operator-logging/.helmignore similarity index 100% rename from charts/nginx-logging-demo/.helmignore rename to charts/logging-operator-logging/.helmignore diff --git a/charts/logging-operator-logging/Chart.yaml b/charts/logging-operator-logging/Chart.yaml new file mode 100644 index 000000000..c3ef5ff3e --- /dev/null +++ b/charts/logging-operator-logging/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "2.0.0" +description: A Helm chart for Kubernetes +name: logging-operator-logging +version: 2.0.0 diff --git a/charts/logging-operator-fluent/README.md b/charts/logging-operator-logging/README.md similarity index 63% rename from charts/logging-operator-fluent/README.md rename to charts/logging-operator-logging/README.md index 37e4950bf..74f2b61c7 100644 --- a/charts/logging-operator-fluent/README.md +++ b/charts/logging-operator-logging/README.md @@ -1,41 +1,30 @@ -# Logging Operator Fluent Chart +# Installing logging resource to logging-operator ## tl;dr: ```bash $ helm repo add banzaicloud-stable https://kubernetes-charts.banzaicloud.com $ helm repo update -# Prerequisites -$ helm install banzaicloud-stable/logging-operator -# Install fluent and fluent-bit cr -$ helm install banzaicloud-stable/logging-operator-fluent +$ helm install banzaicloud-stable/logging-operator-logging ``` -## Introduction - -This chart applies Fluentd and Fluent-bit custom resources to [Logging Operator](https://github.com/banzaicloud/banzai-charts/logging-operator) deployment on a [Kubernetes](http://kubernetes.io) - -## Prerequisites - -- Kubernetes 1.8+ with Beta APIs enabled -- [Logging Operator](https://github.com/banzaicloud/logging-operator) +## Configuration +The following tables lists the configurable parameters of the logging-operator-logging chart and their default values. | Parameter | Description | Default | | --------------------------------------------------- | ------------------------------------------------------ | ------------------------------ | | `tls.enabled` | Enabled TLS communication between components | true | -| `tls.secretName` | Specified generic secret name, which contain tls certs | This will overwrite automatic Helm certificate generation and overrides `fluentbit.tlsSecret` and `fluentd.tlsSecret`. | +| `tls.fluentdSecretName` | Specified secret name, which contain tls certs | This will overwrite automatic Helm certificate generation. | +| `tls.fluentbitSecretName` | Specified secret name, which contain tls certs | This will overwrite automatic Helm certificate generation. | | `tls.sharedKey` | Shared key between nodes (fluentd-fluentbit) | [autogenerated] | | `fluentbit.enabled` | Install fluent-bit | true | | `fluentbit.namespace` | Specified fluentbit installation namespace | same as operator namespace | | `fluentbit.image.tag` | Fluentbit container image tag | `1.1.3` | | `fluentbit.image.repository` | Fluentbit container image repository | `fluent/fluent-bit` | | `fluentbit.image.pullPolicy` | Fluentbit container pull policy | `IfNotPresent` | -| `fluentbit.tolerations` | Fluentbit tolerations | `nil` | -| `fluentbit.tlsSecret` | Secret name that contains Fluentbit TLS client cert | Ignored if `tls.secretName` is specified. Must refer to a secret of type `kubernetes.io/tls` | | `fluentd.enabled` | Install fluentd | true | -| `fluentd.namespace` | Specified fluentd installation namespace | same as operator namespace | -| `fluentd.image.tag` | Fluentd container image tag | `v1.5.0` | +| `fluentd.image.tag` | Fluentd container image tag | `v1.6.3-alpine` | | `fluentd.image.repository` | Fluentd container image repository | `banzaicloud/fluentd` | | `fluentd.image.pullPolicy` | Fluentd container pull policy | `IfNotPresent` | | `fluentd.volumeModImage.tag` | Fluentd volumeModImage container image tag | `latest` | @@ -46,6 +35,4 @@ This chart applies Fluentd and Fluent-bit custom resources to [Logging Operator] | `fluentd.configReloaderImage.pullPolicy` | Fluentd configReloaderImage container pull policy | `IfNotPresent` | | `fluentd.fluentdPvcSpec.accessModes` | Fluentd persistence volume access modes | `[ReadWriteOnce]` | | `fluentd.fluentdPvcSpec.resources.requests.storage` | Fluentd persistence volume size | `21Gi` | -| `fluentd.tolerations` | Fluentd tolerations | `nil` | -| `fluentd.tlsSecret` | Secret name that contains Fluentd TLS client cert | Ignored if `tls.secretName` is specified. Must refer to a secret of type `kubernetes.io/tls`. | -| `psp.enabled` | Install PodSecurityPolicy | `false` | +| `fluentd.fluentdPvcSpec.resources.storageClassName` | Fluentd persistence volume storageclass | `"""` | \ No newline at end of file diff --git a/charts/logging-operator-logging/templates/NOTES.txt b/charts/logging-operator-logging/templates/NOTES.txt new file mode 100644 index 000000000..e69de29bb diff --git a/charts/nginx-logging-demo/templates/_helpers.tpl b/charts/logging-operator-logging/templates/_helpers.tpl similarity index 62% rename from charts/nginx-logging-demo/templates/_helpers.tpl rename to charts/logging-operator-logging/templates/_helpers.tpl index 86303e7c0..adf39e886 100644 --- a/charts/nginx-logging-demo/templates/_helpers.tpl +++ b/charts/logging-operator-logging/templates/_helpers.tpl @@ -2,7 +2,7 @@ {{/* Expand the name of the chart. */}} -{{- define "nginx-logging-demo.name" -}} +{{- define "logging-operator-logging.name" -}} {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} {{- end -}} @@ -11,7 +11,7 @@ Create a default fully qualified app name. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). If release name contains chart name it will be used as a full name. */}} -{{- define "nginx-logging-demo.fullname" -}} +{{- define "logging-operator-logging.fullname" -}} {{- if .Values.fullnameOverride -}} {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} {{- else -}} @@ -27,6 +27,19 @@ If release name contains chart name it will be used as a full name. {{/* Create chart name and version as used by the chart label. */}} -{{- define "nginx-logging-demo.chart" -}} +{{- define "logging-operator-logging.chart" -}} {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} {{- end -}} + +{{/* +Common labels +*/}} +{{- define "logging-operator-logging.labels" -}} +app.kubernetes.io/name: {{ include "logging-operator-logging.name" . }} +helm.sh/chart: {{ include "logging-operator-logging.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} diff --git a/charts/logging-operator-logging/templates/logging.yaml b/charts/logging-operator-logging/templates/logging.yaml new file mode 100644 index 000000000..f340aac60 --- /dev/null +++ b/charts/logging-operator-logging/templates/logging.yaml @@ -0,0 +1,38 @@ +apiVersion: logging.banzaicloud.com/v1alpha2 +kind: Logging +metadata: + name: {{ include "logging-operator-logging.name" . }} + labels: +{{ include "logging-operator-logging.labels" . | indent 4 }} +spec: + {{- with .Values.loggingRef }} + loggingRef: {{ . }} + {{- end }} + {{- with .Values.flowConfigCheckDisabled }} + flowConfigCheckDisabled: {{ . }} + {{- end }} + {{- with .Values.flowConfigOverride }} + flowConfigOverride: {{ . }} + {{- end }} + controlNamespace: {{ .Values.controlNamespace | default .Release.Namespace }} + fluentd: + {{- if .Values.tls.enabled }} + tls: + enabled: true + secretName: {{ .Values.tls.fluentdSecretName | default (printf "%s-%s" (include "logging-operator-logging.name" . ) "fluentd-tls" ) }} + sharedKey: "{{ .Values.tls.sharedKey | default (derivePassword 1 "long" (.Release.Time | toString) .Release.Name .Chart.Name ) }}" + {{- end }} + {{- if .Values.fluentd }} +{{ toYaml .Values.fluentd | indent 4}} + {{- end}} + fluentbit: + {{- if .Values.tls.enabled }} + tls: + enabled: true + secretName: {{ .Values.tls.fluentbitSecretName | default (printf "%s-%s" (include "logging-operator-logging.name" . ) "fluentbit-tls" ) }} + sharedKey: "{{ .Values.tls.sharedKey | default (derivePassword 1 "long" (.Release.Time | toString) .Release.Name .Chart.Name ) }}" + {{- end }} + {{- if .Values.fluentbit }} +{{ toYaml .Values.fluentbit | indent 4}} + {{- end}} + diff --git a/charts/logging-operator-logging/templates/secret.yaml b/charts/logging-operator-logging/templates/secret.yaml new file mode 100644 index 000000000..721f68621 --- /dev/null +++ b/charts/logging-operator-logging/templates/secret.yaml @@ -0,0 +1,34 @@ +{{- if .Values.tls.enabled }} +{{ $ca := genCA "svc-cat-ca" 3650 }} +{{ $cn := printf "%s-%s.%s.svc.cluster.local" (include "logging-operator-logging.name" .) "fluentd" .Release.Namespace }} +{{ $server := genSignedCert $cn nil nil 365 $ca }} +{{ $client := genSignedCert "" nil nil 365 $ca }} + +{{- if not .Values.tls.fluentdSecretName }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "logging-operator-logging.name" . }}-fluentd-tls + labels: +{{ include "logging-operator-logging.labels" . | indent 4 }} +data: + ca.crt: {{ b64enc $ca.Cert }} + tls.crt: {{ b64enc $server.Cert }} + tls.key: {{ b64enc $server.Key }} +{{ end }} + +--- + +{{- if not .Values.tls.fluentbitSecretName }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "logging-operator-logging.name" . }}-fluentbit-tls + labels: +{{ include "logging-operator-logging.labels" . | indent 4 }} +data: + ca.crt: {{ b64enc $ca.Cert }} + tls.crt: {{ b64enc $client.Cert }} + tls.key: {{ b64enc $client.Key }} +{{ end }} +{{ end }} diff --git a/charts/logging-operator-logging/values.yaml b/charts/logging-operator-logging/values.yaml new file mode 100644 index 000000000..eb2e0766e --- /dev/null +++ b/charts/logging-operator-logging/values.yaml @@ -0,0 +1,35 @@ +# Default values for logging-operator-logging. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Reference name of the logging deployment +loggingRef: "" +# Disable configuration check before deploy +flowConfigCheckDisabled: false +# Use static configuration instead of generated config. +flowConfigOverride: "" + +# Fluent-bit configurations +fluentbit: {} +# Fluentd configurations +fluentd: {} +# fluentdPvcSpec: +# accessModes: +# - ReadWriteOnce +# resources: +# requests: +# storage: 40Gi +# storageClassName: fast + +# Enable secure connection between fluentd and fluent-bit +tls: + enabled: true + # Shared key for fluentd authentication + sharedKey: "" + fluentbitSecretName: "" + fluentdSecretName: "" + +# Limit namespaces from where to read Flow and Output specs +watchNamespaces: [] +# Control namespace that contains ClusterOutput and ClusterFlow resources +controlNamespace: "" \ No newline at end of file diff --git a/charts/logging-operator/Chart.yaml b/charts/logging-operator/Chart.yaml index 361b7fc6c..aa6cc149d 100644 --- a/charts/logging-operator/Chart.yaml +++ b/charts/logging-operator/Chart.yaml @@ -1,18 +1,5 @@ apiVersion: v1 -description: Logging operator for Kubernetes based on Fluentd and Fluent-bit. +appVersion: "2.0.0" +description: A Helm chart for Kubernetes name: logging-operator -version: 0.3.3 -appVersion: 0.2.2 -home: https://github.com/banzaicloud/logging-operator -icon: https://banzaicloud.com/img/banzai-cloud-logo.png -keywords: - - logging - - monitoring - - fluentd - - fluenbit - - operator -sources: -- https://github.com/banzaicloud/logging-operator -maintainers: -- name: Banzai Cloud - email: info@banzaicloud.com +version: 2.0.0 diff --git a/charts/logging-operator/README.md b/charts/logging-operator/README.md index cad88b1dd..0a54a1747 100644 --- a/charts/logging-operator/README.md +++ b/charts/logging-operator/README.md @@ -12,7 +12,7 @@ $ helm install banzaicloud-stable/logging-operator ## Introduction -This chart bootstraps an [Logging Operator](https://github.com/banzaicloud/banzai-charts/logging-operator) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. +This chart bootstraps an [Logging Operator](https://github.com/banzaicloud/logging-operator) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. ## Prerequisites @@ -45,12 +45,11 @@ The following tables lists the configurable parameters of the logging-operator c | Parameter | Description | Default | | --------------------------------------------------- | ------------------------------------------------------ | ------------------------------ | | `image.repository` | Container image repository | `banzaicloud/logging-operator` | -| `image.tag` | Container image tag | `0.2.2` | +| `image.tag` | Container image tag | `2.0.0` | | `image.pullPolicy` | Container pull policy | `IfNotPresent` | | `nameOverride` | Override name of app | `` | | `fullnameOverride` | Override full name of app | `` | | `watchNamespace` | Namespace to watch fot LoggingOperator CRD | `` | -| `grafana.dashboard.enabled` | Install grafana logging-operator dashboard | `true` | | `rbac.enabled` | Create rbac service account and roles | `true` | | `rbac.psp.enabled` | Must be used with `rbac.enabled` true. If true, creates & uses RBAC resources required in the cluster with [Pod Security Policies](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) enabled. | `false` | | `affinity` | Node Affinity | `{}` | @@ -68,31 +67,46 @@ $ helm install --name my-release -f values.yaml banzaicloud-stable/logging-opera > **Tip**: You can use the default [values.yaml](values.yaml) -## Installing Fluentd and Fluent-bit +## Installing Fluentd and Fluent-bit via logging -The previous chart does **not** install Fluentd or Fluent-bit custom resource. To install them please use the [Logging Operator Fluent](https://github.com/banzaicloud/banzai-charts/logging-operator-fluent) chart. +The previous chart does **not** install `logging` resource to deploy Fluentd and Fluent-bit on luster. To install them please use the [Logging Operator Logging](https://github.com/banzaicloud/logging-operator/tree/master/charts/logging-operator-logging) chart. ## tl;dr: ```bash $ helm repo add banzaicloud-stable https://kubernetes-charts.banzaicloud.com $ helm repo update -$ helm install banzaicloud-stable/logging-operator-fluent +$ helm install banzaicloud-stable/logging-operator-logging ``` +## Configuration + +The following tables lists the configurable parameters of the logging-operator-logging chart and their default values. +## tl;dr: + +```bash +$ helm repo add banzaicloud-stable https://kubernetes-charts.banzaicloud.com +$ helm repo update +$ helm install banzaicloud-stable/logging-operator-logging +``` + +## Configuration + +The following tables lists the configurable parameters of the logging-operator-logging chart and their default values. + | Parameter | Description | Default | | --------------------------------------------------- | ------------------------------------------------------ | ------------------------------ | | `tls.enabled` | Enabled TLS communication between components | true | -| `tls.secretName` | Specified secret name, which contain tls certs | This will overwrite automatic Helm certificate generation. | +| `tls.fluentdSecretName` | Specified secret name, which contain tls certs | This will overwrite automatic Helm certificate generation. | +| `tls.fluentbitSecretName` | Specified secret name, which contain tls certs | This will overwrite automatic Helm certificate generation. | | `tls.sharedKey` | Shared key between nodes (fluentd-fluentbit) | [autogenerated] | | `fluentbit.enabled` | Install fluent-bit | true | | `fluentbit.namespace` | Specified fluentbit installation namespace | same as operator namespace | -| `fluentbit.image.tag` | Fluentbit container image tag | `1.1.3` | +| `fluentbit.image.tag` | Fluentbit container image tag | `1.1.3` | | `fluentbit.image.repository` | Fluentbit container image repository | `fluent/fluent-bit` | | `fluentbit.image.pullPolicy` | Fluentbit container pull policy | `IfNotPresent` | | `fluentd.enabled` | Install fluentd | true | -| `fluentd.namespace` | Specified fluentd installation namespace | same as operator namespace | -| `fluentd.image.tag` | Fluentd container image tag | `v1.5.0` | +| `fluentd.image.tag` | Fluentd container image tag | `v1.6.3-alpine` | | `fluentd.image.repository` | Fluentd container image repository | `banzaicloud/fluentd` | | `fluentd.image.pullPolicy` | Fluentd container pull policy | `IfNotPresent` | | `fluentd.volumeModImage.tag` | Fluentd volumeModImage container image tag | `latest` | @@ -103,3 +117,4 @@ $ helm install banzaicloud-stable/logging-operator-fluent | `fluentd.configReloaderImage.pullPolicy` | Fluentd configReloaderImage container pull policy | `IfNotPresent` | | `fluentd.fluentdPvcSpec.accessModes` | Fluentd persistence volume access modes | `[ReadWriteOnce]` | | `fluentd.fluentdPvcSpec.resources.requests.storage` | Fluentd persistence volume size | `21Gi` | +| `fluentd.fluentdPvcSpec.resources.storageClassName` | Fluentd persistence volume storageclass | `"""` | \ No newline at end of file diff --git a/charts/logging-operator/grafana-dashboards/logging-dashboard_rev1.json b/charts/logging-operator/grafana-dashboards/logging-dashboard_rev1.json deleted file mode 100644 index 676fa21a7..000000000 --- a/charts/logging-operator/grafana-dashboards/logging-dashboard_rev1.json +++ /dev/null @@ -1,1069 +0,0 @@ -{ - "__inputs": [ - { - "name": "DS_PROMETHEUS", - "label": "Prometheus", - "description": "", - "type": "datasource", - "pluginId": "prometheus", - "pluginName": "Prometheus" - } - ], - "__requires": [ - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "5.1.3" - }, - { - "type": "panel", - "id": "graph", - "name": "Graph", - "version": "5.0.0" - }, - { - "type": "datasource", - "id": "prometheus", - "name": "Prometheus", - "version": "5.0.0" - }, - { - "type": "panel", - "id": "singlestat", - "name": "Singlestat", - "version": "5.0.0" - } - ], - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "gnetId": 7752, - "graphTooltip": 0, - "id": null, - "links": [], - "panels": [ - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": true, - "colors": [ - "#d44a3a", - "rgba(237, 129, 40, 0.89)", - "#299c46" - ], - "datasource": "Prometheus", - "format": "none", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 5, - "w": 2, - "x": 0, - "y": 0 - }, - "id": 4, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": true - }, - "tableColumn": "", - "targets": [ - { - "expr": "sum(kube_node_status_condition{condition=\"Ready\"})", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": "0,1", - "title": "Active Nodes", - "type": "singlestat", - "valueFontSize": "100%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": true, - "colors": [ - "#d44a3a", - "rgba(237, 129, 40, 0.89)", - "#299c46" - ], - "datasource": "Prometheus", - "format": "none", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 5, - "w": 2, - "x": 2, - "y": 0 - }, - "id": 6, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": true - }, - "tableColumn": "", - "targets": [ - { - "expr": "sum(kube_pod_info{pod=~\"fluent-bit.*\"})", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "Active Fluent-bit", - "refId": "A" - } - ], - "thresholds": "0,1", - "title": "Fluent-bit", - "type": "singlestat", - "valueFontSize": "100%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "avg" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": true, - "colors": [ - "#d44a3a", - "rgba(237, 129, 40, 0.89)", - "#299c46" - ], - "datasource": "Prometheus", - "format": "none", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 5, - "w": 2, - "x": 4, - "y": 0 - }, - "id": 8, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": true - }, - "tableColumn": "", - "targets": [ - { - "expr": "sum(kube_pod_info{pod=~\"fluentd.*\"})", - "format": "time_series", - "instant": false, - "intervalFactor": 1, - "refId": "A" - } - ], - "thresholds": "0,1", - "title": "Fluentd", - "type": "singlestat", - "valueFontSize": "100%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fill": 1, - "gridPos": { - "h": 5, - "w": 9, - "x": 6, - "y": 0 - }, - "id": 2, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "sort": "current", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(fluentbit_input_bytes_total[1m])", - "format": "time_series", - "hide": false, - "intervalFactor": 1, - "legendFormat": "{{ kubernetes_pod_name }}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Fluent-bit input bytes/s", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "bytes", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fill": 1, - "gridPos": { - "h": 5, - "w": 9, - "x": 15, - "y": 0 - }, - "id": 9, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "sort": "current", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(fluentbit_output_proc_bytes_total[1m])", - "format": "time_series", - "hide": false, - "intervalFactor": 1, - "legendFormat": "{{ kubernetes_pod_name }}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Fluent-bit output bytes/s", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "bytes", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fill": 1, - "gridPos": { - "h": 5, - "w": 8, - "x": 0, - "y": 5 - }, - "id": 10, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "sort": "current", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(fluentbit_output_errors_total[1m])", - "format": "time_series", - "hide": false, - "intervalFactor": 1, - "legendFormat": "{{ kubernetes_pod_name }}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Fluent-bit error/s", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "bytes", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fill": 1, - "gridPos": { - "h": 5, - "w": 8, - "x": 8, - "y": 5 - }, - "id": 12, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "sort": "current", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(fluentd_output_status_emit_count[1m])", - "format": "time_series", - "hide": false, - "intervalFactor": 1, - "legendFormat": "{{ type }}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Fluentd output emit/s by Plugin", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "ops", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fill": 1, - "gridPos": { - "h": 5, - "w": 8, - "x": 16, - "y": 5 - }, - "id": 15, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "sort": "current", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "fluentd_output_status_buffer_queue_length", - "format": "time_series", - "hide": false, - "intervalFactor": 1, - "legendFormat": "{{ type }}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Fluentd output buffer queue", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fill": 1, - "gridPos": { - "h": 6, - "w": 8, - "x": 0, - "y": 10 - }, - "id": 11, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "sort": "current", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(fluentbit_output_retries_total[1m])", - "format": "time_series", - "hide": false, - "intervalFactor": 1, - "legendFormat": "Retries {{ kubernetes_pod_name }}", - "refId": "A" - }, - { - "expr": "rate(fluentbit_output_retries_failed_total[1m])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "Failed {{ kubernetes_pod_name }}", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Fluent-bit retries/fails", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "decimals": null, - "format": "short", - "label": "", - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fill": 1, - "gridPos": { - "h": 6, - "w": 8, - "x": 8, - "y": 10 - }, - "id": 14, - "legend": { - "alignAsTable": false, - "avg": false, - "current": true, - "max": true, - "min": true, - "rightSide": false, - "show": true, - "sort": "current", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(rate(fluentd_output_status_retry_count[1m]))", - "format": "time_series", - "hide": false, - "intervalFactor": 1, - "legendFormat": "Retry rate", - "refId": "A" - }, - { - "expr": "sum(rate(fluentd_output_status_num_errors[1m]))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "Error rate", - "refId": "C" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Fluentd output error/retry rate", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "ops", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fill": 1, - "gridPos": { - "h": 6, - "w": 8, - "x": 16, - "y": 10 - }, - "id": 13, - "legend": { - "alignAsTable": false, - "avg": false, - "current": true, - "max": true, - "min": true, - "rightSide": false, - "show": true, - "sort": "current", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "fluentd_output_status_buffer_total_bytes", - "format": "time_series", - "hide": false, - "intervalFactor": 1, - "legendFormat": "{{ type }}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Fluentd output buffer size", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "bytes", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "refresh": "30s", - "schemaVersion": 16, - "style": "dark", - "tags": [], - "templating": { - "list": [] - }, - "time": { - "from": "now-15m", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "Logging Dashboard", - "uid": "bNn5LUtiz", - "version": 10, - "description": "This is a simple dashboard for: https://github.com/banzaicloud/logging-operator utilising Fluent-bit and Fluentd" -} \ No newline at end of file diff --git a/charts/logging-operator/templates/NOTES.txt b/charts/logging-operator/templates/NOTES.txt new file mode 100644 index 000000000..e69de29bb diff --git a/charts/logging-operator/templates/_helpers.tpl b/charts/logging-operator/templates/_helpers.tpl index a58c97189..a5e197e22 100644 --- a/charts/logging-operator/templates/_helpers.tpl +++ b/charts/logging-operator/templates/_helpers.tpl @@ -30,3 +30,16 @@ Create chart name and version as used by the chart label. {{- define "logging-operator.chart" -}} {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} {{- end -}} + +{{/* +Common labels +*/}} +{{- define "logging-operator.labels" -}} +app.kubernetes.io/name: {{ include "logging-operator.name" . }} +helm.sh/chart: {{ include "logging-operator.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} diff --git a/charts/logging-operator/templates/configmap.yaml b/charts/logging-operator/templates/configmap.yaml deleted file mode 100644 index 127e65b30..000000000 --- a/charts/logging-operator/templates/configmap.yaml +++ /dev/null @@ -1,16 +0,0 @@ -kind: ConfigMap -apiVersion: v1 -metadata: - name: {{ template "logging-operator.fullname" . }} - labels: - app.kubernetes.io/name: {{ include "logging-operator.name" . }} - helm.sh/chart: {{ include "logging-operator.chart" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - -data: - config.toml: |- - # This is the config for the logging operator - - [logging-operator] - rbac = {{ .Values.rbac.enabled }} \ No newline at end of file diff --git a/charts/logging-operator/templates/crd.yaml b/charts/logging-operator/templates/crd.yaml deleted file mode 100644 index f15d981c7..000000000 --- a/charts/logging-operator/templates/crd.yaml +++ /dev/null @@ -1,109 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: plugins.logging.banzaicloud.com - annotations: - "helm.sh/hook": crd-install - "helm.sh/hook-delete-policy": "before-hook-creation" -spec: - group: logging.banzaicloud.com - names: - kind: Plugin - listKind: PluginList - plural: plugins - singular: plugin - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - properties: - apiVersion: - type: string - kind: - type: string - metadata: - type: object - spec: - type: object - status: - type: object - version: v1alpha1 - versions: - - name: v1alpha1 - served: true - storage: true ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: fluentbits.logging.banzaicloud.com - annotations: - "helm.sh/hook": crd-install - "helm.sh/hook-delete-policy": "before-hook-creation" -spec: - group: logging.banzaicloud.com - names: - kind: Fluentbit - listKind: FluentbitList - plural: fluentbits - singular: fluentbit - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - properties: - apiVersion: - type: string - kind: - type: string - metadata: - type: object - spec: - type: object - status: - type: object - version: v1alpha1 - versions: - - name: v1alpha1 - served: true - storage: true - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: fluentds.logging.banzaicloud.com - annotations: - "helm.sh/hook": crd-install - "helm.sh/hook-delete-policy": "before-hook-creation" -spec: - group: logging.banzaicloud.com - names: - kind: Fluentd - listKind: FluentdList - plural: fluentds - singular: fluentd - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - properties: - apiVersion: - type: string - kind: - type: string - metadata: - type: object - spec: - type: object - status: - type: object - version: v1alpha1 - versions: - - name: v1alpha1 - served: true - storage: true \ No newline at end of file diff --git a/charts/logging-operator/templates/deployment.yaml b/charts/logging-operator/templates/deployment.yaml index b4b29ebc8..97a16394b 100644 --- a/charts/logging-operator/templates/deployment.yaml +++ b/charts/logging-operator/templates/deployment.yaml @@ -1,12 +1,9 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: {{ template "logging-operator.fullname" . }} + name: {{ include "logging-operator.fullname" . }} labels: - app.kubernetes.io/name: {{ include "logging-operator.name" . }} - helm.sh/chart: {{ include "logging-operator.chart" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} +{{ include "logging-operator.labels" . | indent 4 }} spec: replicas: {{ .Values.replicaCount }} selector: @@ -15,56 +12,35 @@ spec: app.kubernetes.io/instance: {{ .Release.Name }} template: metadata: - annotations: - scheduler.alpha.kubernetes.io/tolerations: {{ toJson .Values.tolerations | quote }} labels: app.kubernetes.io/name: {{ include "logging-operator.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} spec: - containers: - - name: {{ template "logging-operator.name" . }} - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - imagePullPolicy: "{{ .Values.image.pullPolicy }}" - command: - - logging-operator - env: - - name: WATCH_NAMESPACE - value: {{ .Values.watchNamespace | quote }} - - name: KUBERNETES_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: OPERATOR_NAME - value: {{ include "logging-operator.fullname" . | quote }} - volumeMounts: - - mountPath: /logging-operator/config - name: config - - {{- if .Values.securityContext }} - securityContext: {{ toYaml .Values.securityContext | nindent 10 }} - {{- end }} - resources: {{ toYaml .Values.resources | nindent 10 }} - {{- if .Values.podSecurityContext }} - securityContext: {{ toYaml .Values.podSecurityContext | nindent 8 }} - {{- end }} - {{- if .Values.rbac.enabled }} - serviceAccountName: {{ template "logging-operator.fullname" . }} - {{- end }} - volumes: - - configMap: - name: {{ template "logging-operator.fullname" . }} - name: config - - {{- with .Values.nodeSelector }} - nodeSelector: {{ toYaml . | nindent 8 }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} {{- end }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + ports: + - name: http + containerPort: {{ .Values.http.port }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} {{- with .Values.affinity }} - affinity: {{ toYaml . | nindent 8 }} + affinity: + {{- toYaml . | nindent 8 }} {{- end }} {{- with .Values.tolerations }} - tolerations: {{ toYaml . | nindent 8 }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.rbac.enabled }} + serviceAccountName: {{ include "logging-operator.fullname" . }} {{- end }} \ No newline at end of file diff --git a/charts/logging-operator/templates/grafana-dashboard-logging.yaml b/charts/logging-operator/templates/grafana-dashboard-logging.yaml deleted file mode 100644 index 353e96db7..000000000 --- a/charts/logging-operator/templates/grafana-dashboard-logging.yaml +++ /dev/null @@ -1,12 +0,0 @@ -{{- if .Values.grafana.dashboard.enabled }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ template "logging-operator.fullname" . }}-grafana-dashboard-logging - labels: - pipeline_grafana_dashboard: "1" -data: - logging.json: |-2 - -{{.Files.Get "grafana-dashboards/logging-dashboard_rev1.json"| indent 4}} -{{- end }} \ No newline at end of file diff --git a/charts/logging-operator/templates/logging.banzaicloud.com_clusterflows.yaml b/charts/logging-operator/templates/logging.banzaicloud.com_clusterflows.yaml new file mode 100644 index 000000000..ac7040257 --- /dev/null +++ b/charts/logging-operator/templates/logging.banzaicloud.com_clusterflows.yaml @@ -0,0 +1,139 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: clusterflows.logging.banzaicloud.com +spec: + group: logging.banzaicloud.com + names: + kind: ClusterFlow + listKind: ClusterFlowList + plural: clusterflows + singular: clusterflow + scope: "" + validation: + openAPIV3Schema: + description: ClusterFlow is the Schema for the clusterflows API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Name of the logging cluster to be attached + properties: + filters: + items: + properties: + parser: + description: https://docs.fluentd.org/filter/parser + properties: + emit_invalid_record_to_error: + description: 'Emit invalid record to @ERROR label. Invalid + cases are: key not exist, format is not matched, unexpected + error' + type: boolean + hash_value_fiel: + description: Store parsed values as a hash value in a field. + type: string + inject_key_prefix: + description: Store parsed values with specified key name prefix. + type: string + key_name: + description: Specify field name in the record to parse. + type: string + parsers: + items: + properties: + estimate_current_event: + description: If true, use Fluent::EventTime.now(current + time) as a timestamp when time_key is specified. + type: boolean + expression: + description: Regexp expression to evaluate + type: string + keep_time_key: + description: If true, keep time field in the record. + type: boolean + null_empty_string: + description: If true, empty string field is replaced + with nil + type: boolean + null_value_pattern: + description: ' Specify null value pattern.' + type: string + time_key: + description: Specify time field for event time. If the + event doesn't have this field, current time is used. + type: string + type: + description: 'Parse type: apache2, apache_error, nginx, + syslog, csv, tsv, ltsv, json, multiline, none' + type: string + type: object + type: array + remove_key_name_field: + description: Remove key_name field when parsing is succeeded + type: boolean + replace_invalid_sequence: + description: If true, invalid string is replaced with safe + characters and re-parse it. + type: boolean + reserve_data: + description: Keep original key-value pair in parsed result. + type: boolean + reserve_time: + description: Keep original event time in parsed result. + type: boolean + required: + - key_name + type: object + stdout: + type: object + tag_normaliser: + properties: + format: + description: Re-Tag log messages info at [github](https://github.com/banzaicloud/fluent-plugin-tag-normaliser) + type: string + type: object + type: object + type: array + loggingRef: + type: string + outputRefs: + items: + type: string + type: array + selectors: + additionalProperties: + type: string + type: object + required: + - outputRefs + - selectors + type: object + status: + description: FlowStatus defines the observed state of Flow + type: object + type: object + version: v1alpha2 + versions: + - name: v1alpha2 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/charts/logging-operator/templates/logging.banzaicloud.com_clusteroutputs.yaml b/charts/logging-operator/templates/logging.banzaicloud.com_clusteroutputs.yaml new file mode 100644 index 000000000..a094d1c7b --- /dev/null +++ b/charts/logging-operator/templates/logging.banzaicloud.com_clusteroutputs.yaml @@ -0,0 +1,1795 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: clusteroutputs.logging.banzaicloud.com +spec: + group: logging.banzaicloud.com + names: + kind: ClusterOutput + listKind: ClusterOutputList + plural: clusteroutputs + singular: clusteroutput + scope: "" + validation: + openAPIV3Schema: + description: ClusterOutput is the Schema for the clusteroutputs API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + azurestorage: + properties: + auto_create_container: + description: 'Automatically create container if not exists(default: + true)' + type: boolean + azure_container: + description: Your azure storage container + type: string + azure_object_key_format: + description: 'Object key format (default: %{path}%{time_slice}_%{index}.%{file_extension})' + type: string + azure_storage_access_key: + description: Your azure storage access key + properties: + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + description: Secret key for the value + type: string + name: + description: Name of the kubernetes secret + type: string + required: + - key + - name + type: object + type: object + required: + - valueFrom + type: object + azure_storage_account: + description: Your azure storage account + properties: + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + description: Secret key for the value + type: string + name: + description: Name of the kubernetes secret + type: string + required: + - key + - name + type: object + type: object + required: + - valueFrom + type: object + azure_storage_type: + description: 'Azure storage type currently only "blob" supported + (default: blob)' + type: string + buffer: + properties: + chunk_full_threshold: + description: The percentage of chunk size threshold for flushing. + output plugin will flush the chunk when actual size reaches + chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in + default) + type: string + chunk_limit_records: + description: The max number of events that each chunks can store + in it + type: integer + chunk_limit_size: + description: 'The max size of each chunks: events will be written + into chunks until the size of chunks become this size' + type: string + compress: + description: If you set this option to gzip, you can get Fluentd + to compress data records before writing to buffer chunks. + type: string + delayed_commit_timeout: + description: The timeout seconds until output plugin decides + that async write operation fails + type: string + disable_chunk_backup: + description: Instead of storing unrecoverable chunks in the + backup directory, just discard them. This option is new in + Fluentd v1.2.6. + type: boolean + flush_at_shutdown: + description: The value to specify to flush/write all buffer + chunks at shutdown, or not + type: boolean + flush_interval: + description: 'Default: 60s' + type: string + flush_mode: + description: 'Default: default (equals to lazy if time is specified + as chunk key, interval otherwise) lazy: flush/write chunks + once per timekey interval: flush/write chunks per specified + time via flush_interval immediate: flush/write chunks immediately + after events are appended into chunks' + type: string + flush_thread_burst_interval: + description: The sleep interval seconds of threads between flushes + when output plugin flushes waiting chunks next to next + type: string + flush_thread_count: + description: The number of threads of output plugins, which + is used to write chunks in parallel + type: integer + flush_thread_interval: + description: The sleep interval seconds of threads to wait next + flush trial (when no chunks are waiting) + type: string + overflow_action: + description: 'How output plugin behaves when its buffer queue + is full throw_exception: raise exception to show this error + in log block: block processing of input plugin to emit events + into that buffer drop_oldest_chunk: drop/purge oldest chunk + to accept newly incoming chunk' + type: string + path: + description: The path where buffer chunks are stored. The '*' + is replaced with random characters. This parameter is required. + type: string + queue_limit_length: + description: The queue length limitation of this buffer plugin + instance + type: integer + queued_chunks_limit_size: + description: Limit the number of queued chunks. If you set smaller + flush_interval, e.g. 1s, there are lots of small queued chunks + in buffer. This is not good with file buffer because it consumes + lots of fd resources when output destination has a problem. + This parameter mitigates such situations. + type: integer + retry_exponential_backoff_base: + description: The base number of exponential backoff for retries + type: string + retry_forever: + description: If true, plugin will ignore retry_timeout and retry_max_times + options and retry flushing forever + type: boolean + retry_max_interval: + description: The maximum interval seconds for exponential backoff + between retries while failing + type: string + retry_max_times: + description: The maximum number of times to retry to flush while + failing + type: integer + retry_randomize: + description: If true, output plugin will retry after randomized + interval not to do burst retries + type: boolean + retry_secondary_threshold: + description: The ratio of retry_timeout to switch to use secondary + while failing (Maximum valid value is 1.0) + type: string + retry_timeout: + description: The maximum seconds to retry to flush while failing, + until plugin discards buffer chunks + type: string + retry_type: + description: 'exponential_backoff: wait seconds will become + large exponentially per failures periodic: output plugin will + retry periodically with fixed intervals (configured via retry_wait)' + type: string + retry_wait: + description: Seconds to wait before next retry to flush, or + constant factor of exponential backoff + type: string + tags: + description: 'When tag is specified as buffer chunk key, output + plugin writes events into chunks separately per tags. (default: + tag,time)' + type: string + timekey: + description: Output plugin will flush chunks per specified time + (enabled when time is specified in chunk keys) + type: string + timekey_use_utc: + description: Output plugin decides to use UTC or not to format + placeholders using timekey + type: boolean + timekey_wait: + description: Output plugin writes chunks after timekey_wait + seconds later after timekey expiration + type: string + timekey_zone: + description: The timezone (-0700 or Asia/Tokyo) string for formatting + timekey placeholders + type: string + total_limit_size: + description: The size limitation of this buffer plugin instance. + Once the total size of stored buffer reached this threshold, + all append operations will fail with error (and data will + be lost) + type: string + type: + description: Fluentd core bundles memory and file plugins. 3rd + party plugins are also available when installed. + type: string + required: + - timekey + type: object + format: + description: 'Compat format type: out_file, json, ltsv (default: + out_file)' + type: string + path: + description: Path prefix of the files on Azure + type: string + store_as: + description: 'Store as: gzip, json, text, lzo, lzma2 (default: gzip)' + type: string + required: + - azure_container + - azure_storage_access_key + - azure_storage_account + type: object + elasticsearch: + description: Send your logs to Elasticsearch + properties: + application_name: + description: 'Specify the application name for the rollover index + to be created.(default: default)' + type: string + buffer: + properties: + chunk_full_threshold: + description: The percentage of chunk size threshold for flushing. + output plugin will flush the chunk when actual size reaches + chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in + default) + type: string + chunk_limit_records: + description: The max number of events that each chunks can store + in it + type: integer + chunk_limit_size: + description: 'The max size of each chunks: events will be written + into chunks until the size of chunks become this size' + type: string + compress: + description: If you set this option to gzip, you can get Fluentd + to compress data records before writing to buffer chunks. + type: string + delayed_commit_timeout: + description: The timeout seconds until output plugin decides + that async write operation fails + type: string + disable_chunk_backup: + description: Instead of storing unrecoverable chunks in the + backup directory, just discard them. This option is new in + Fluentd v1.2.6. + type: boolean + flush_at_shutdown: + description: The value to specify to flush/write all buffer + chunks at shutdown, or not + type: boolean + flush_interval: + description: 'Default: 60s' + type: string + flush_mode: + description: 'Default: default (equals to lazy if time is specified + as chunk key, interval otherwise) lazy: flush/write chunks + once per timekey interval: flush/write chunks per specified + time via flush_interval immediate: flush/write chunks immediately + after events are appended into chunks' + type: string + flush_thread_burst_interval: + description: The sleep interval seconds of threads between flushes + when output plugin flushes waiting chunks next to next + type: string + flush_thread_count: + description: The number of threads of output plugins, which + is used to write chunks in parallel + type: integer + flush_thread_interval: + description: The sleep interval seconds of threads to wait next + flush trial (when no chunks are waiting) + type: string + overflow_action: + description: 'How output plugin behaves when its buffer queue + is full throw_exception: raise exception to show this error + in log block: block processing of input plugin to emit events + into that buffer drop_oldest_chunk: drop/purge oldest chunk + to accept newly incoming chunk' + type: string + path: + description: The path where buffer chunks are stored. The '*' + is replaced with random characters. This parameter is required. + type: string + queue_limit_length: + description: The queue length limitation of this buffer plugin + instance + type: integer + queued_chunks_limit_size: + description: Limit the number of queued chunks. If you set smaller + flush_interval, e.g. 1s, there are lots of small queued chunks + in buffer. This is not good with file buffer because it consumes + lots of fd resources when output destination has a problem. + This parameter mitigates such situations. + type: integer + retry_exponential_backoff_base: + description: The base number of exponential backoff for retries + type: string + retry_forever: + description: If true, plugin will ignore retry_timeout and retry_max_times + options and retry flushing forever + type: boolean + retry_max_interval: + description: The maximum interval seconds for exponential backoff + between retries while failing + type: string + retry_max_times: + description: The maximum number of times to retry to flush while + failing + type: integer + retry_randomize: + description: If true, output plugin will retry after randomized + interval not to do burst retries + type: boolean + retry_secondary_threshold: + description: The ratio of retry_timeout to switch to use secondary + while failing (Maximum valid value is 1.0) + type: string + retry_timeout: + description: The maximum seconds to retry to flush while failing, + until plugin discards buffer chunks + type: string + retry_type: + description: 'exponential_backoff: wait seconds will become + large exponentially per failures periodic: output plugin will + retry periodically with fixed intervals (configured via retry_wait)' + type: string + retry_wait: + description: Seconds to wait before next retry to flush, or + constant factor of exponential backoff + type: string + tags: + description: 'When tag is specified as buffer chunk key, output + plugin writes events into chunks separately per tags. (default: + tag,time)' + type: string + timekey: + description: Output plugin will flush chunks per specified time + (enabled when time is specified in chunk keys) + type: string + timekey_use_utc: + description: Output plugin decides to use UTC or not to format + placeholders using timekey + type: boolean + timekey_wait: + description: Output plugin writes chunks after timekey_wait + seconds later after timekey expiration + type: string + timekey_zone: + description: The timezone (-0700 or Asia/Tokyo) string for formatting + timekey placeholders + type: string + total_limit_size: + description: The size limitation of this buffer plugin instance. + Once the total size of stored buffer reached this threshold, + all append operations will fail with error (and data will + be lost) + type: string + type: + description: Fluentd core bundles memory and file plugins. 3rd + party plugins are also available when installed. + type: string + required: + - timekey + type: object + content_type: + description: 'With content_type application/x-ndjson, elasticsearch + plugin adds application/x-ndjson as Content-Type in payload. (default: + application/json)' + type: string + customize_template: + description: Specify the string and its value to be replaced in + form of hash. Can contain multiple key value pair that would be + replaced in the specified template_file. This setting only creates + template and to add rollover index please check the rollover_index + configuration. + type: string + deflector_alias: + description: Specify the deflector alias which would be assigned + to the rollover index created. This is useful in case of using + the Elasticsearch rollover API + type: string + fail_on_putting_template_retry_exceed: + description: 'Indicates whether to fail when max_retry_putting_template + is exceeded. If you have multiple output plugin, you could use + this property to do not fail on fluentd statup.(default: true)' + type: boolean + host: + description: You can specify Elasticsearch host by this parameter. + (default:localhost) + type: string + hosts: + description: You can specify multiple Elasticsearch hosts with separator + ",". If you specify hosts option, host and port options are ignored. + type: string + http_backend: + description: 'With http_backend typhoeus, elasticsearch plugin uses + typhoeus faraday http backend. Typhoeus can handle HTTP keepalive. + (default: excon)' + type: string + id_key: + description: https://github.com/uken/fluent-plugin-elasticsearch#id_key + type: string + include_index_in_url: + description: With this option set to true, Fluentd manifests the + index name in the request URL (rather than in the request body). + You can use this option to enforce an URL-based access control. + type: boolean + include_tag_key: + description: 'This will add the Fluentd tag in the JSON record.(default: + false)' + type: boolean + include_timestamp: + description: Adds a @timestamp field to the log, following all settings + logstash_format does, except without the restrictions on index_name. + This allows one to log to an alias in Elasticsearch and utilize + the rollover API. + type: boolean + index_date_pattern: + description: 'Specify this to override the index date pattern for + creating a rollover index.(default: now/d)' + type: string + index_prefix: + description: Specify the index prefix for the rollover index to + be created. + type: string + logstash_dateformat: + description: 'Set the Logstash date format.(default: %Y.%m.%d)' + type: string + logstash_format: + description: 'Enable Logstash log format.(default: false)' + type: boolean + logstash_prefix: + description: 'Set the Logstash prefix.(default: true)' + type: string + logstash_prefix_separator: + description: 'Set the Logstash prefix separator.(default: -)' + type: string + max_retry_get_es_version: + description: 'You can specify times of retry obtaining Elasticsearch + version.(default: 15)' + type: string + max_retry_putting_template: + description: 'You can specify times of retry putting template.(default: + 10)' + type: string + password: + description: Password for HTTP Basic authentication. + properties: + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + description: Secret key for the value + type: string + name: + description: Name of the kubernetes secret + type: string + required: + - key + - name + type: object + type: object + required: + - valueFrom + type: object + path: + description: Path for HTTP Basic authentication. + type: string + pipeline: + description: This param is to set a pipeline id of your elasticsearch + to be added into the request, you can configure ingest node. + type: string + port: + description: 'You can specify Elasticsearch port by this parameter.(default: + 9200)' + type: string + prefer_oj_serializer: + description: 'With default behavior, Elasticsearch client uses Yajl + as JSON encoder/decoder. Oj is the alternative high performance + JSON encoder/decoder. When this parameter sets as true, Elasticsearch + client uses Oj as JSON encoder/decoder. (default: fqlse)' + type: boolean + reconnect_on_error: + description: 'Indicates that the plugin should reset connection + on any error (reconnect on next send). By default it will reconnect + only on "host unreachable exceptions". We recommended to set this + true in the presence of elasticsearch shield.(default: false)' + type: boolean + reload_connections: + description: 'You can tune how the elasticsearch-transport host + reloading feature works.(default: true)' + type: boolean + reload_on_failure: + description: 'Indicates that the elasticsearch-transport will try + to reload the nodes addresses if there is a failure while making + the request, this can be useful to quickly remove a dead node + from the list of addresses.(default: false)' + type: boolean + remove_keys_on_update: + description: Remove keys on update will not update the configured + keys in elasticsearch when a record is being updated. This setting + only has any effect if the write operation is update or upsert. + type: string + remove_keys_on_update_key: + description: This setting allows remove_keys_on_update to be configured + with a key in each record, in much the same way as target_index_key + works. + type: string + request_timeout: + description: 'You can specify HTTP request timeout.(default: 5s)' + type: string + resurrect_after: + description: 'You can set in the elasticsearch-transport how often + dead connections from the elasticsearch-transport''s pool will + be resurrected.(default: 60s)' + type: string + retry_tag: + description: This setting allows custom routing of messages in response + to bulk request failures. The default behavior is to emit failed + records using the same tag that was provided. + type: string + rollover_index: + description: 'Specify this as true when an index with rollover capability + needs to be created.(default: false) https://github.com/uken/fluent-plugin-elasticsearch#rollover_index' + type: boolean + routing_key: + description: Similar to parent_key config, will add _routing into + elasticsearch command if routing_key is set and the field does + exist in input event. + type: string + scheme: + description: 'Scheme for HTTP Basic authentication.(default: true)' + type: boolean + tag_key: + description: 'This will add the Fluentd tag in the JSON record.(default: + tag)' + type: string + target_index_key: + description: Tell this plugin to find the index name to write to + in the record under this key in preference to other mechanisms. + Key can be specified as path to nested record using dot ('.') + as a separator. https://github.com/uken/fluent-plugin-elasticsearch#target_index_key + type: string + target_type_key: + description: 'Similar to target_index_key config, find the type + name to write to in the record under this key (or nested record). + If key not found in record - fallback to type_name.(default: true)' + type: string + template_file: + description: The path to the file containing the template to install. + type: string + template_name: + description: The name of the template to define. If a template by + the name given is already present, it will be left unchanged, + unless template_overwrite is set, in which case the template will + be updated. + type: string + template_overwrite: + description: 'Always update the template, even if it already exists.(default: + false)' + type: boolean + templates: + description: Specify index templates in form of hash. Can contain + multiple templates. + type: string + time_key: + description: By default, when inserting records in Logstash format, + @timestamp is dynamically created with the time at log ingestion. + If you'd like to use a custom time, include an @timestamp with + your record. + type: string + time_key_format: + description: The format of the time stamp field (@timestamp or what + you specify with time_key). This parameter only has an effect + when logstash_format is true as it only affects the name of the + index we write to. + type: string + time_parse_error_tag: + description: With logstash_format true, elasticsearch plugin parses + timestamp field for generating index name. If the record has invalid + timestamp value, this plugin emits an error event to @ERROR label + with time_parse_error_tag configured tag. + type: string + time_precision: + description: Should the record not include a time_key, define the + degree of sub-second time precision to preserve from the time + portion of the routed event. + type: string + user: + description: User for HTTP Basic authentication. This plugin will + escape required URL encoded characters within %{} placeholders. + e.g. %{demo+} + type: string + utc_index: + description: 'By default, the records inserted into index logstash-YYMMDD + with UTC (Coordinated Universal Time). This option allows to use + local time if you describe utc_index to false.(default: true)' + type: boolean + with_transporter_log: + description: 'This is debugging purpose option to enable to obtain + transporter layer log. (default: false)' + type: boolean + write_operation: + description: 'The write_operation can be any of: (index,create,update,upsert)(default: + index)' + type: string + type: object + enabledNamespaces: + items: + type: string + type: array + gcs: + properties: + acl: + description: 'Permission for the object in GCS: auth_read owner_full + owner_read private project_private public_read' + type: string + auto_create_bucket: + description: 'Create GCS bucket if it does not exists (default: + true)' + type: boolean + bucket: + description: Name of a GCS bucket + type: string + buffer: + properties: + chunk_full_threshold: + description: The percentage of chunk size threshold for flushing. + output plugin will flush the chunk when actual size reaches + chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in + default) + type: string + chunk_limit_records: + description: The max number of events that each chunks can store + in it + type: integer + chunk_limit_size: + description: 'The max size of each chunks: events will be written + into chunks until the size of chunks become this size' + type: string + compress: + description: If you set this option to gzip, you can get Fluentd + to compress data records before writing to buffer chunks. + type: string + delayed_commit_timeout: + description: The timeout seconds until output plugin decides + that async write operation fails + type: string + disable_chunk_backup: + description: Instead of storing unrecoverable chunks in the + backup directory, just discard them. This option is new in + Fluentd v1.2.6. + type: boolean + flush_at_shutdown: + description: The value to specify to flush/write all buffer + chunks at shutdown, or not + type: boolean + flush_interval: + description: 'Default: 60s' + type: string + flush_mode: + description: 'Default: default (equals to lazy if time is specified + as chunk key, interval otherwise) lazy: flush/write chunks + once per timekey interval: flush/write chunks per specified + time via flush_interval immediate: flush/write chunks immediately + after events are appended into chunks' + type: string + flush_thread_burst_interval: + description: The sleep interval seconds of threads between flushes + when output plugin flushes waiting chunks next to next + type: string + flush_thread_count: + description: The number of threads of output plugins, which + is used to write chunks in parallel + type: integer + flush_thread_interval: + description: The sleep interval seconds of threads to wait next + flush trial (when no chunks are waiting) + type: string + overflow_action: + description: 'How output plugin behaves when its buffer queue + is full throw_exception: raise exception to show this error + in log block: block processing of input plugin to emit events + into that buffer drop_oldest_chunk: drop/purge oldest chunk + to accept newly incoming chunk' + type: string + path: + description: The path where buffer chunks are stored. The '*' + is replaced with random characters. This parameter is required. + type: string + queue_limit_length: + description: The queue length limitation of this buffer plugin + instance + type: integer + queued_chunks_limit_size: + description: Limit the number of queued chunks. If you set smaller + flush_interval, e.g. 1s, there are lots of small queued chunks + in buffer. This is not good with file buffer because it consumes + lots of fd resources when output destination has a problem. + This parameter mitigates such situations. + type: integer + retry_exponential_backoff_base: + description: The base number of exponential backoff for retries + type: string + retry_forever: + description: If true, plugin will ignore retry_timeout and retry_max_times + options and retry flushing forever + type: boolean + retry_max_interval: + description: The maximum interval seconds for exponential backoff + between retries while failing + type: string + retry_max_times: + description: The maximum number of times to retry to flush while + failing + type: integer + retry_randomize: + description: If true, output plugin will retry after randomized + interval not to do burst retries + type: boolean + retry_secondary_threshold: + description: The ratio of retry_timeout to switch to use secondary + while failing (Maximum valid value is 1.0) + type: string + retry_timeout: + description: The maximum seconds to retry to flush while failing, + until plugin discards buffer chunks + type: string + retry_type: + description: 'exponential_backoff: wait seconds will become + large exponentially per failures periodic: output plugin will + retry periodically with fixed intervals (configured via retry_wait)' + type: string + retry_wait: + description: Seconds to wait before next retry to flush, or + constant factor of exponential backoff + type: string + tags: + description: 'When tag is specified as buffer chunk key, output + plugin writes events into chunks separately per tags. (default: + tag,time)' + type: string + timekey: + description: Output plugin will flush chunks per specified time + (enabled when time is specified in chunk keys) + type: string + timekey_use_utc: + description: Output plugin decides to use UTC or not to format + placeholders using timekey + type: boolean + timekey_wait: + description: Output plugin writes chunks after timekey_wait + seconds later after timekey expiration + type: string + timekey_zone: + description: The timezone (-0700 or Asia/Tokyo) string for formatting + timekey placeholders + type: string + total_limit_size: + description: The size limitation of this buffer plugin instance. + Once the total size of stored buffer reached this threshold, + all append operations will fail with error (and data will + be lost) + type: string + type: + description: Fluentd core bundles memory and file plugins. 3rd + party plugins are also available when installed. + type: string + required: + - timekey + type: object + client_retries: + description: Number of times to retry requests on server error + type: integer + client_timeout: + description: Default timeout to use in requests + type: integer + credentials_json: + description: GCS service account credentials in JSON format + properties: + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + description: Secret key for the value + type: string + name: + description: Name of the kubernetes secret + type: string + required: + - key + - name + type: object + type: object + required: + - valueFrom + type: object + encryption_key: + description: Customer-supplied, AES-256 encryption key + type: string + format: + properties: + type: + description: 'Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value' + enum: + - out_file + - json + - ltsv + - csv + - msgpack + - hash + - single_value + type: string + type: object + hex_random_length: + description: 'Max length of `%{hex_random}` placeholder(4-16) (default: + 4)' + type: integer + keyfile: + description: Path of GCS service account credentials JSON file + type: string + object_key_format: + description: 'Format of GCS object keys (default: %{path}%{time_slice}_%{index}.%{file_extension})' + type: string + object_metadata: + description: User provided web-safe keys and arbitrary string values + that will returned with requests for the file as "x-goog-meta-" + response headers. + items: + properties: + key: + description: Key + type: string + value: + description: Value + type: string + required: + - key + - value + type: object + type: array + overwrite: + description: 'Overwrite already existing path (default: false)' + type: boolean + path: + description: Path prefix of the files on GCS + type: string + project: + description: Project identifier for GCS + type: string + storage_class: + description: 'Storage class of the file: dra nearline coldline multi_regional + regional standard' + type: string + store_as: + description: 'Archive format on GCS: gzip json text (default: gzip)' + type: string + transcoding: + description: Enable the decompressive form of transcoding + type: boolean + required: + - bucket + - project + type: object + loggingRef: + type: string + loki: + description: Fluentd output plugin to ship logs to a Loki server. + properties: + buffer: + properties: + chunk_full_threshold: + description: The percentage of chunk size threshold for flushing. + output plugin will flush the chunk when actual size reaches + chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in + default) + type: string + chunk_limit_records: + description: The max number of events that each chunks can store + in it + type: integer + chunk_limit_size: + description: 'The max size of each chunks: events will be written + into chunks until the size of chunks become this size' + type: string + compress: + description: If you set this option to gzip, you can get Fluentd + to compress data records before writing to buffer chunks. + type: string + delayed_commit_timeout: + description: The timeout seconds until output plugin decides + that async write operation fails + type: string + disable_chunk_backup: + description: Instead of storing unrecoverable chunks in the + backup directory, just discard them. This option is new in + Fluentd v1.2.6. + type: boolean + flush_at_shutdown: + description: The value to specify to flush/write all buffer + chunks at shutdown, or not + type: boolean + flush_interval: + description: 'Default: 60s' + type: string + flush_mode: + description: 'Default: default (equals to lazy if time is specified + as chunk key, interval otherwise) lazy: flush/write chunks + once per timekey interval: flush/write chunks per specified + time via flush_interval immediate: flush/write chunks immediately + after events are appended into chunks' + type: string + flush_thread_burst_interval: + description: The sleep interval seconds of threads between flushes + when output plugin flushes waiting chunks next to next + type: string + flush_thread_count: + description: The number of threads of output plugins, which + is used to write chunks in parallel + type: integer + flush_thread_interval: + description: The sleep interval seconds of threads to wait next + flush trial (when no chunks are waiting) + type: string + overflow_action: + description: 'How output plugin behaves when its buffer queue + is full throw_exception: raise exception to show this error + in log block: block processing of input plugin to emit events + into that buffer drop_oldest_chunk: drop/purge oldest chunk + to accept newly incoming chunk' + type: string + path: + description: The path where buffer chunks are stored. The '*' + is replaced with random characters. This parameter is required. + type: string + queue_limit_length: + description: The queue length limitation of this buffer plugin + instance + type: integer + queued_chunks_limit_size: + description: Limit the number of queued chunks. If you set smaller + flush_interval, e.g. 1s, there are lots of small queued chunks + in buffer. This is not good with file buffer because it consumes + lots of fd resources when output destination has a problem. + This parameter mitigates such situations. + type: integer + retry_exponential_backoff_base: + description: The base number of exponential backoff for retries + type: string + retry_forever: + description: If true, plugin will ignore retry_timeout and retry_max_times + options and retry flushing forever + type: boolean + retry_max_interval: + description: The maximum interval seconds for exponential backoff + between retries while failing + type: string + retry_max_times: + description: The maximum number of times to retry to flush while + failing + type: integer + retry_randomize: + description: If true, output plugin will retry after randomized + interval not to do burst retries + type: boolean + retry_secondary_threshold: + description: The ratio of retry_timeout to switch to use secondary + while failing (Maximum valid value is 1.0) + type: string + retry_timeout: + description: The maximum seconds to retry to flush while failing, + until plugin discards buffer chunks + type: string + retry_type: + description: 'exponential_backoff: wait seconds will become + large exponentially per failures periodic: output plugin will + retry periodically with fixed intervals (configured via retry_wait)' + type: string + retry_wait: + description: Seconds to wait before next retry to flush, or + constant factor of exponential backoff + type: string + tags: + description: 'When tag is specified as buffer chunk key, output + plugin writes events into chunks separately per tags. (default: + tag,time)' + type: string + timekey: + description: Output plugin will flush chunks per specified time + (enabled when time is specified in chunk keys) + type: string + timekey_use_utc: + description: Output plugin decides to use UTC or not to format + placeholders using timekey + type: boolean + timekey_wait: + description: Output plugin writes chunks after timekey_wait + seconds later after timekey expiration + type: string + timekey_zone: + description: The timezone (-0700 or Asia/Tokyo) string for formatting + timekey placeholders + type: string + total_limit_size: + description: The size limitation of this buffer plugin instance. + Once the total size of stored buffer reached this threshold, + all append operations will fail with error (and data will + be lost) + type: string + type: + description: Fluentd core bundles memory and file plugins. 3rd + party plugins are also available when installed. + type: string + required: + - timekey + type: object + extra_labels: + description: 'Set of labels to include with every Loki stream.(default: + nil)' + type: boolean + password: + description: Specify password if the Loki server requires authentication. + properties: + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + description: Secret key for the value + type: string + name: + description: Name of the kubernetes secret + type: string + required: + - key + - name + type: object + type: object + required: + - valueFrom + type: object + tenant: + description: Loki is a multi-tenant log storage platform and all + requests sent must include a tenant. + type: string + url: + description: The url of the Loki server to send logs to. (default:https://logs-us-west1.grafana.net) + type: string + username: + description: Specify a username if the Loki server requires authentication. + properties: + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + description: Secret key for the value + type: string + name: + description: Name of the kubernetes secret + type: string + required: + - key + - name + type: object + type: object + required: + - valueFrom + type: object + type: object + nullout: + type: object + oss: + properties: + aaccess_key_secret: + description: Your access secret key + properties: + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + description: Secret key for the value + type: string + name: + description: Name of the kubernetes secret + type: string + required: + - key + - name + type: object + type: object + required: + - valueFrom + type: object + access_key_id: + description: Your access key id + properties: + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + description: Secret key for the value + type: string + name: + description: Name of the kubernetes secret + type: string + required: + - key + - name + type: object + type: object + required: + - valueFrom + type: object + auto_create_bucket: + description: 'desc ''Create OSS bucket if it does not exists (default: + false)' + type: boolean + bucket: + description: Your bucket name + type: string + buffer: + properties: + chunk_full_threshold: + description: The percentage of chunk size threshold for flushing. + output plugin will flush the chunk when actual size reaches + chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in + default) + type: string + chunk_limit_records: + description: The max number of events that each chunks can store + in it + type: integer + chunk_limit_size: + description: 'The max size of each chunks: events will be written + into chunks until the size of chunks become this size' + type: string + compress: + description: If you set this option to gzip, you can get Fluentd + to compress data records before writing to buffer chunks. + type: string + delayed_commit_timeout: + description: The timeout seconds until output plugin decides + that async write operation fails + type: string + disable_chunk_backup: + description: Instead of storing unrecoverable chunks in the + backup directory, just discard them. This option is new in + Fluentd v1.2.6. + type: boolean + flush_at_shutdown: + description: The value to specify to flush/write all buffer + chunks at shutdown, or not + type: boolean + flush_interval: + description: 'Default: 60s' + type: string + flush_mode: + description: 'Default: default (equals to lazy if time is specified + as chunk key, interval otherwise) lazy: flush/write chunks + once per timekey interval: flush/write chunks per specified + time via flush_interval immediate: flush/write chunks immediately + after events are appended into chunks' + type: string + flush_thread_burst_interval: + description: The sleep interval seconds of threads between flushes + when output plugin flushes waiting chunks next to next + type: string + flush_thread_count: + description: The number of threads of output plugins, which + is used to write chunks in parallel + type: integer + flush_thread_interval: + description: The sleep interval seconds of threads to wait next + flush trial (when no chunks are waiting) + type: string + overflow_action: + description: 'How output plugin behaves when its buffer queue + is full throw_exception: raise exception to show this error + in log block: block processing of input plugin to emit events + into that buffer drop_oldest_chunk: drop/purge oldest chunk + to accept newly incoming chunk' + type: string + path: + description: The path where buffer chunks are stored. The '*' + is replaced with random characters. This parameter is required. + type: string + queue_limit_length: + description: The queue length limitation of this buffer plugin + instance + type: integer + queued_chunks_limit_size: + description: Limit the number of queued chunks. If you set smaller + flush_interval, e.g. 1s, there are lots of small queued chunks + in buffer. This is not good with file buffer because it consumes + lots of fd resources when output destination has a problem. + This parameter mitigates such situations. + type: integer + retry_exponential_backoff_base: + description: The base number of exponential backoff for retries + type: string + retry_forever: + description: If true, plugin will ignore retry_timeout and retry_max_times + options and retry flushing forever + type: boolean + retry_max_interval: + description: The maximum interval seconds for exponential backoff + between retries while failing + type: string + retry_max_times: + description: The maximum number of times to retry to flush while + failing + type: integer + retry_randomize: + description: If true, output plugin will retry after randomized + interval not to do burst retries + type: boolean + retry_secondary_threshold: + description: The ratio of retry_timeout to switch to use secondary + while failing (Maximum valid value is 1.0) + type: string + retry_timeout: + description: The maximum seconds to retry to flush while failing, + until plugin discards buffer chunks + type: string + retry_type: + description: 'exponential_backoff: wait seconds will become + large exponentially per failures periodic: output plugin will + retry periodically with fixed intervals (configured via retry_wait)' + type: string + retry_wait: + description: Seconds to wait before next retry to flush, or + constant factor of exponential backoff + type: string + tags: + description: 'When tag is specified as buffer chunk key, output + plugin writes events into chunks separately per tags. (default: + tag,time)' + type: string + timekey: + description: Output plugin will flush chunks per specified time + (enabled when time is specified in chunk keys) + type: string + timekey_use_utc: + description: Output plugin decides to use UTC or not to format + placeholders using timekey + type: boolean + timekey_wait: + description: Output plugin writes chunks after timekey_wait + seconds later after timekey expiration + type: string + timekey_zone: + description: The timezone (-0700 or Asia/Tokyo) string for formatting + timekey placeholders + type: string + total_limit_size: + description: The size limitation of this buffer plugin instance. + Once the total size of stored buffer reached this threshold, + all append operations will fail with error (and data will + be lost) + type: string + type: + description: Fluentd core bundles memory and file plugins. 3rd + party plugins are also available when installed. + type: string + required: + - timekey + type: object + check_bucket: + description: 'Check bucket if exists or not (default: true)' + type: boolean + check_object: + description: 'Check object before creation (default: true)' + type: boolean + download_crc_enable: + description: 'Download crc enabled (default: true)' + type: boolean + endpoint: + description: OSS endpoint to connect to' + type: string + format: + properties: + type: + description: 'Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value' + enum: + - out_file + - json + - ltsv + - csv + - msgpack + - hash + - single_value + type: string + type: object + hex_random_length: + description: 'The length of `%{hex_random}` placeholder(4-16) (default: + 4)' + type: integer + index_format: + description: '`sprintf` format for `%{index}` (default: %d)' + type: string + key_format: + description: 'The format of OSS object keys (default: %{path}/%{time_slice}_%{index}_%{thread_id}.%{file_extension})' + type: string + open_timeout: + description: 'Timeout for open connections (default: 10)' + type: integer + oss_sdk_log_dir: + description: 'OSS SDK log directory (default: /var/log/td-agent)' + type: string + overwrite: + description: 'Overwrite already existing path (default: false)' + type: boolean + path: + description: 'Path prefix of the files on OSS (default: fluent/logs)' + type: string + read_timeout: + description: 'Timeout for read response (default: 120)' + type: integer + store_as: + description: 'Archive format on OSS: gzip, json, text, lzo, lzma2 + (default: gzip)' + type: string + upload_crc_enable: + description: 'Upload crc enabled (default: true)' + type: boolean + warn_for_delay: + description: Given a threshold to treat events as delay, output + warning logs if delayed events were put into OSS + type: string + required: + - aaccess_key_secret + - access_key_id + - bucket + - endpoint + type: object + s3: + properties: + acl: + description: Permission for the object in S3 + type: string + assume_role_credentials: + description: assume_role_credentials + properties: + duration_seconds: + description: The duration, in seconds, of the role session (900-3600) + type: string + external_id: + description: A unique identifier that is used by third parties + when assuming roles in their customers' accounts. + type: string + policy: + description: An IAM policy in JSON format + type: string + role_arn: + description: The Amazon Resource Name (ARN) of the role to assume + type: string + role_session_name: + description: An identifier for the assumed role session + type: string + required: + - role_arn + - role_session_name + type: object + auto_create_bucket: + description: Create S3 bucket if it does not exists + type: string + aws_iam_retries: + description: The number of attempts to load instance profile credentials + from the EC2 metadata service using IAM role + type: string + aws_key_id: + description: AWS access key id + properties: + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + description: Secret key for the value + type: string + name: + description: Name of the kubernetes secret + type: string + required: + - key + - name + type: object + type: object + required: + - valueFrom + type: object + aws_sec_key: + description: AWS secret key. + properties: + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + description: Secret key for the value + type: string + name: + description: Name of the kubernetes secret + type: string + required: + - key + - name + type: object + type: object + required: + - valueFrom + type: object + buffer: + properties: + chunk_full_threshold: + description: The percentage of chunk size threshold for flushing. + output plugin will flush the chunk when actual size reaches + chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in + default) + type: string + chunk_limit_records: + description: The max number of events that each chunks can store + in it + type: integer + chunk_limit_size: + description: 'The max size of each chunks: events will be written + into chunks until the size of chunks become this size' + type: string + compress: + description: If you set this option to gzip, you can get Fluentd + to compress data records before writing to buffer chunks. + type: string + delayed_commit_timeout: + description: The timeout seconds until output plugin decides + that async write operation fails + type: string + disable_chunk_backup: + description: Instead of storing unrecoverable chunks in the + backup directory, just discard them. This option is new in + Fluentd v1.2.6. + type: boolean + flush_at_shutdown: + description: The value to specify to flush/write all buffer + chunks at shutdown, or not + type: boolean + flush_interval: + description: 'Default: 60s' + type: string + flush_mode: + description: 'Default: default (equals to lazy if time is specified + as chunk key, interval otherwise) lazy: flush/write chunks + once per timekey interval: flush/write chunks per specified + time via flush_interval immediate: flush/write chunks immediately + after events are appended into chunks' + type: string + flush_thread_burst_interval: + description: The sleep interval seconds of threads between flushes + when output plugin flushes waiting chunks next to next + type: string + flush_thread_count: + description: The number of threads of output plugins, which + is used to write chunks in parallel + type: integer + flush_thread_interval: + description: The sleep interval seconds of threads to wait next + flush trial (when no chunks are waiting) + type: string + overflow_action: + description: 'How output plugin behaves when its buffer queue + is full throw_exception: raise exception to show this error + in log block: block processing of input plugin to emit events + into that buffer drop_oldest_chunk: drop/purge oldest chunk + to accept newly incoming chunk' + type: string + path: + description: The path where buffer chunks are stored. The '*' + is replaced with random characters. This parameter is required. + type: string + queue_limit_length: + description: The queue length limitation of this buffer plugin + instance + type: integer + queued_chunks_limit_size: + description: Limit the number of queued chunks. If you set smaller + flush_interval, e.g. 1s, there are lots of small queued chunks + in buffer. This is not good with file buffer because it consumes + lots of fd resources when output destination has a problem. + This parameter mitigates such situations. + type: integer + retry_exponential_backoff_base: + description: The base number of exponential backoff for retries + type: string + retry_forever: + description: If true, plugin will ignore retry_timeout and retry_max_times + options and retry flushing forever + type: boolean + retry_max_interval: + description: The maximum interval seconds for exponential backoff + between retries while failing + type: string + retry_max_times: + description: The maximum number of times to retry to flush while + failing + type: integer + retry_randomize: + description: If true, output plugin will retry after randomized + interval not to do burst retries + type: boolean + retry_secondary_threshold: + description: The ratio of retry_timeout to switch to use secondary + while failing (Maximum valid value is 1.0) + type: string + retry_timeout: + description: The maximum seconds to retry to flush while failing, + until plugin discards buffer chunks + type: string + retry_type: + description: 'exponential_backoff: wait seconds will become + large exponentially per failures periodic: output plugin will + retry periodically with fixed intervals (configured via retry_wait)' + type: string + retry_wait: + description: Seconds to wait before next retry to flush, or + constant factor of exponential backoff + type: string + tags: + description: 'When tag is specified as buffer chunk key, output + plugin writes events into chunks separately per tags. (default: + tag,time)' + type: string + timekey: + description: Output plugin will flush chunks per specified time + (enabled when time is specified in chunk keys) + type: string + timekey_use_utc: + description: Output plugin decides to use UTC or not to format + placeholders using timekey + type: boolean + timekey_wait: + description: Output plugin writes chunks after timekey_wait + seconds later after timekey expiration + type: string + timekey_zone: + description: The timezone (-0700 or Asia/Tokyo) string for formatting + timekey placeholders + type: string + total_limit_size: + description: The size limitation of this buffer plugin instance. + Once the total size of stored buffer reached this threshold, + all append operations will fail with error (and data will + be lost) + type: string + type: + description: Fluentd core bundles memory and file plugins. 3rd + party plugins are also available when installed. + type: string + required: + - timekey + type: object + check_apikey_on_start: + description: Check AWS key on start + type: string + check_bucket: + description: Check bucket if exists or not + type: string + check_object: + description: Check object before creation + type: string + compute_checksums: + description: AWS SDK uses MD5 for API request/response by default + type: string + enable_transfer_acceleration: + description: 'If true, S3 Transfer Acceleration will be enabled + for uploads. IMPORTANT: You must first enable this feature on + your destination S3 bucket' + type: string + force_path_style: + description: If true, the bucket name is always left in the request + URI and never moved to the host as a sub-domain + type: string + format: + properties: + type: + description: 'Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value' + enum: + - out_file + - json + - ltsv + - csv + - msgpack + - hash + - single_value + type: string + type: object + grant_full_control: + description: Allows grantee READ, READ_ACP, and WRITE_ACP permissions + on the object + type: string + grant_read: + description: Allows grantee to read the object data and its metadata + type: string + grant_read_acp: + description: Allows grantee to read the object ACL + type: string + grant_write_acp: + description: Allows grantee to write the ACL for the applicable + object + type: string + hex_random_length: + description: The length of `%{hex_random}` placeholder(4-16) + type: string + index_format: + description: '`sprintf` format for `%{index}`' + type: string + instance_profile_credentials: + description: instance_profile_credentials + properties: + http_open_timeout: + description: Number of seconds to wait for the connection to + open + type: string + http_read_timeout: + description: Number of seconds to wait for one block to be read + type: string + ip_address: + description: IP address (default:169.254.169.254) + type: string + port: + description: Port number (default:80) + type: string + retries: + description: Number of times to retry when retrieving credentials + type: string + type: object + overwrite: + description: Overwrite already existing path + type: string + path: + description: Path prefix of the files on S3 + type: string + proxy_uri: + description: URI of proxy environment + type: string + s3_bucket: + description: S3 bucket name + type: string + s3_endpoint: + description: Custom S3 endpoint (like minio) + type: string + s3_metadata: + description: Arbitrary S3 metadata headers to set for the object + type: string + s3_object_key_format: + description: 'The format of S3 object keys (default: %{path}%{time_slice}_%{index}.%{file_extension})' + type: string + s3_region: + description: S3 region name + type: string + shared_credentials: + description: shared_credentials + properties: + path: + description: 'Path to the shared file. (default: $HOME/.aws/credentials)' + type: string + profile_name: + description: Profile name. Default to 'default' or ENV['AWS_PROFILE'] + type: string + type: object + signature_version: + description: Signature version for API Request (s3,v4) + type: string + sse_customer_algorithm: + description: Specifies the algorithm to use to when encrypting the + object + type: string + sse_customer_key: + description: Specifies the customer-provided encryption key for + Amazon S3 to use in encrypting data + type: string + sse_customer_key_md5: + description: Specifies the 128-bit MD5 digest of the encryption + key according to RFC 1321 + type: string + ssekms_key_id: + description: Specifies the AWS KMS key ID to use for object encryption + type: string + ssl_verify_peer: + description: If false, the certificate of endpoint will not be verified + type: string + storage_class: + description: The type of storage to use for the object(STANDARD,REDUCED_REDUNDANCY,STANDARD_IA) + type: string + store_as: + description: Archive format on S3 + type: string + use_bundled_cert: + description: Use aws-sdk-ruby bundled cert + type: string + use_server_side_encryption: + description: The Server-side encryption algorithm used when storing + this object in S3 (AES256, aws:kms) + type: string + warn_for_delay: + description: Given a threshold to treat events as delay, output + warning logs if delayed events were put into s3 + type: string + required: + - s3_bucket + type: object + type: object + status: + description: OutputStatus defines the observed state of Output + type: object + required: + - spec + type: object + version: v1alpha2 + versions: + - name: v1alpha2 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/charts/logging-operator/templates/logging.banzaicloud.com_flows.yaml b/charts/logging-operator/templates/logging.banzaicloud.com_flows.yaml new file mode 100644 index 000000000..b68f6efec --- /dev/null +++ b/charts/logging-operator/templates/logging.banzaicloud.com_flows.yaml @@ -0,0 +1,142 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: flows.logging.banzaicloud.com +spec: + additionalPrinterColumns: + - JSONPath: .spec.loggingRef + name: Logging + type: string + group: logging.banzaicloud.com + names: + kind: Flow + listKind: FlowList + plural: flows + singular: flow + scope: "" + subresources: {} + validation: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + filters: + items: + properties: + parser: + description: https://docs.fluentd.org/filter/parser + properties: + emit_invalid_record_to_error: + description: 'Emit invalid record to @ERROR label. Invalid + cases are: key not exist, format is not matched, unexpected + error' + type: boolean + hash_value_fiel: + description: Store parsed values as a hash value in a field. + type: string + inject_key_prefix: + description: Store parsed values with specified key name prefix. + type: string + key_name: + description: Specify field name in the record to parse. + type: string + parsers: + items: + properties: + estimate_current_event: + description: If true, use Fluent::EventTime.now(current + time) as a timestamp when time_key is specified. + type: boolean + expression: + description: Regexp expression to evaluate + type: string + keep_time_key: + description: If true, keep time field in the record. + type: boolean + null_empty_string: + description: If true, empty string field is replaced + with nil + type: boolean + null_value_pattern: + description: ' Specify null value pattern.' + type: string + time_key: + description: Specify time field for event time. If the + event doesn't have this field, current time is used. + type: string + type: + description: 'Parse type: apache2, apache_error, nginx, + syslog, csv, tsv, ltsv, json, multiline, none' + type: string + type: object + type: array + remove_key_name_field: + description: Remove key_name field when parsing is succeeded + type: boolean + replace_invalid_sequence: + description: If true, invalid string is replaced with safe + characters and re-parse it. + type: boolean + reserve_data: + description: Keep original key-value pair in parsed result. + type: boolean + reserve_time: + description: Keep original event time in parsed result. + type: boolean + required: + - key_name + type: object + stdout: + type: object + tag_normaliser: + properties: + format: + description: Re-Tag log messages info at [github](https://github.com/banzaicloud/fluent-plugin-tag-normaliser) + type: string + type: object + type: object + type: array + loggingRef: + type: string + outputRefs: + items: + type: string + type: array + selectors: + additionalProperties: + type: string + type: object + required: + - outputRefs + - selectors + type: object + status: + description: FlowStatus defines the observed state of Flow + type: object + type: object + version: v1alpha2 + versions: + - name: v1alpha2 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/charts/logging-operator/templates/logging.banzaicloud.com_loggings.yaml b/charts/logging-operator/templates/logging.banzaicloud.com_loggings.yaml new file mode 100644 index 000000000..adcf63897 --- /dev/null +++ b/charts/logging-operator/templates/logging.banzaicloud.com_loggings.yaml @@ -0,0 +1,332 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: loggings.logging.banzaicloud.com +spec: + group: logging.banzaicloud.com + names: + kind: Logging + listKind: LoggingList + plural: loggings + singular: logging + scope: Cluster + subresources: + status: {} + validation: + openAPIV3Schema: + description: Logging is the Schema for the loggings API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: LoggingSpec defines the desired state of Logging + properties: + controlNamespace: + type: string + flowConfigCheckDisabled: + type: boolean + flowConfigOverride: + type: string + fluentbit: + description: FluentbitSpec defines the desired state of Fluentbit + properties: + annotations: + additionalProperties: + type: string + type: object + image: + description: ImageSpec struct hold information about image specification + properties: + pullPolicy: + type: string + repository: + type: string + tag: + type: string + required: + - pullPolicy + - repository + - tag + type: object + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + limits: + additionalProperties: + type: string + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + type: string + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + targetHost: + type: string + targetPort: + format: int32 + type: integer + tls: + description: FluentbitTLS defines the TLS configs + properties: + enabled: + type: boolean + secretName: + type: string + sharedKey: + type: string + required: + - enabled + - secretName + type: object + type: object + fluentd: + description: FluentdSpec defines the desired state of Fluentd + properties: + annotations: + additionalProperties: + type: string + type: object + configReloaderImage: + description: ImageSpec struct hold information about image specification + properties: + pullPolicy: + type: string + repository: + type: string + tag: + type: string + required: + - pullPolicy + - repository + - tag + type: object + disablePvc: + type: boolean + fluentdPvcSpec: + description: PersistentVolumeClaimSpec describes the common attributes + of storage devices and allows a Source for provider-specific attributes + properties: + accessModes: + description: 'AccessModes contains the desired access modes + the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: This field requires the VolumeSnapshotDataSource + alpha feature gate to be enabled and currently VolumeSnapshot + is the only supported data source. If the provisioner can + support VolumeSnapshot data source, it will create a new volume + and data will be restored to the volume at the same time. + If the provisioner does not support VolumeSnapshot data source, + volume will not be created and the failure will be reported + as an event. In the future, we plan to support more data source + types and the behavior of the provisioner may change. + properties: + apiGroup: + description: APIGroup is the group for the resource being + referenced. If APIGroup is not specified, the specified + Kind must be in the core API group. For any other third-party + types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources the + volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + type: string + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + type: string + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. More info: + https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + selector: + description: A label query over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists or + DoesNotExist, the values array must be empty. This + array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is required + by the claim. Value of Filesystem is implied when not included + in claim spec. This is a beta feature. + type: string + volumeName: + description: VolumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + image: + description: ImageSpec struct hold information about image specification + properties: + pullPolicy: + type: string + repository: + type: string + tag: + type: string + required: + - pullPolicy + - repository + - tag + type: object + port: + format: int32 + type: integer + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + limits: + additionalProperties: + type: string + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + type: string + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + tls: + description: FluentdTLS defines the TLS configs + properties: + enabled: + type: boolean + secretName: + type: string + sharedKey: + type: string + required: + - enabled + - secretName + type: object + volumeModImage: + description: ImageSpec struct hold information about image specification + properties: + pullPolicy: + type: string + repository: + type: string + tag: + type: string + required: + - pullPolicy + - repository + - tag + type: object + type: object + loggingRef: + type: string + watchNamespaces: + items: + type: string + type: array + required: + - controlNamespace + type: object + status: + description: LoggingStatus defines the observed state of Logging + properties: + configCheckResults: + additionalProperties: + type: boolean + type: object + type: object + type: object + version: v1alpha2 + versions: + - name: v1alpha2 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/charts/logging-operator/templates/logging.banzaicloud.com_outputs.yaml b/charts/logging-operator/templates/logging.banzaicloud.com_outputs.yaml new file mode 100644 index 000000000..dff3427aa --- /dev/null +++ b/charts/logging-operator/templates/logging.banzaicloud.com_outputs.yaml @@ -0,0 +1,1790 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: outputs.logging.banzaicloud.com +spec: + group: logging.banzaicloud.com + names: + kind: Output + listKind: OutputList + plural: outputs + singular: output + scope: "" + validation: + openAPIV3Schema: + description: Output is the Schema for the outputs API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: OutputSpec defines the desired state of Output + properties: + azurestorage: + properties: + auto_create_container: + description: 'Automatically create container if not exists(default: + true)' + type: boolean + azure_container: + description: Your azure storage container + type: string + azure_object_key_format: + description: 'Object key format (default: %{path}%{time_slice}_%{index}.%{file_extension})' + type: string + azure_storage_access_key: + description: Your azure storage access key + properties: + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + description: Secret key for the value + type: string + name: + description: Name of the kubernetes secret + type: string + required: + - key + - name + type: object + type: object + required: + - valueFrom + type: object + azure_storage_account: + description: Your azure storage account + properties: + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + description: Secret key for the value + type: string + name: + description: Name of the kubernetes secret + type: string + required: + - key + - name + type: object + type: object + required: + - valueFrom + type: object + azure_storage_type: + description: 'Azure storage type currently only "blob" supported + (default: blob)' + type: string + buffer: + properties: + chunk_full_threshold: + description: The percentage of chunk size threshold for flushing. + output plugin will flush the chunk when actual size reaches + chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in + default) + type: string + chunk_limit_records: + description: The max number of events that each chunks can store + in it + type: integer + chunk_limit_size: + description: 'The max size of each chunks: events will be written + into chunks until the size of chunks become this size' + type: string + compress: + description: If you set this option to gzip, you can get Fluentd + to compress data records before writing to buffer chunks. + type: string + delayed_commit_timeout: + description: The timeout seconds until output plugin decides + that async write operation fails + type: string + disable_chunk_backup: + description: Instead of storing unrecoverable chunks in the + backup directory, just discard them. This option is new in + Fluentd v1.2.6. + type: boolean + flush_at_shutdown: + description: The value to specify to flush/write all buffer + chunks at shutdown, or not + type: boolean + flush_interval: + description: 'Default: 60s' + type: string + flush_mode: + description: 'Default: default (equals to lazy if time is specified + as chunk key, interval otherwise) lazy: flush/write chunks + once per timekey interval: flush/write chunks per specified + time via flush_interval immediate: flush/write chunks immediately + after events are appended into chunks' + type: string + flush_thread_burst_interval: + description: The sleep interval seconds of threads between flushes + when output plugin flushes waiting chunks next to next + type: string + flush_thread_count: + description: The number of threads of output plugins, which + is used to write chunks in parallel + type: integer + flush_thread_interval: + description: The sleep interval seconds of threads to wait next + flush trial (when no chunks are waiting) + type: string + overflow_action: + description: 'How output plugin behaves when its buffer queue + is full throw_exception: raise exception to show this error + in log block: block processing of input plugin to emit events + into that buffer drop_oldest_chunk: drop/purge oldest chunk + to accept newly incoming chunk' + type: string + path: + description: The path where buffer chunks are stored. The '*' + is replaced with random characters. This parameter is required. + type: string + queue_limit_length: + description: The queue length limitation of this buffer plugin + instance + type: integer + queued_chunks_limit_size: + description: Limit the number of queued chunks. If you set smaller + flush_interval, e.g. 1s, there are lots of small queued chunks + in buffer. This is not good with file buffer because it consumes + lots of fd resources when output destination has a problem. + This parameter mitigates such situations. + type: integer + retry_exponential_backoff_base: + description: The base number of exponential backoff for retries + type: string + retry_forever: + description: If true, plugin will ignore retry_timeout and retry_max_times + options and retry flushing forever + type: boolean + retry_max_interval: + description: The maximum interval seconds for exponential backoff + between retries while failing + type: string + retry_max_times: + description: The maximum number of times to retry to flush while + failing + type: integer + retry_randomize: + description: If true, output plugin will retry after randomized + interval not to do burst retries + type: boolean + retry_secondary_threshold: + description: The ratio of retry_timeout to switch to use secondary + while failing (Maximum valid value is 1.0) + type: string + retry_timeout: + description: The maximum seconds to retry to flush while failing, + until plugin discards buffer chunks + type: string + retry_type: + description: 'exponential_backoff: wait seconds will become + large exponentially per failures periodic: output plugin will + retry periodically with fixed intervals (configured via retry_wait)' + type: string + retry_wait: + description: Seconds to wait before next retry to flush, or + constant factor of exponential backoff + type: string + tags: + description: 'When tag is specified as buffer chunk key, output + plugin writes events into chunks separately per tags. (default: + tag,time)' + type: string + timekey: + description: Output plugin will flush chunks per specified time + (enabled when time is specified in chunk keys) + type: string + timekey_use_utc: + description: Output plugin decides to use UTC or not to format + placeholders using timekey + type: boolean + timekey_wait: + description: Output plugin writes chunks after timekey_wait + seconds later after timekey expiration + type: string + timekey_zone: + description: The timezone (-0700 or Asia/Tokyo) string for formatting + timekey placeholders + type: string + total_limit_size: + description: The size limitation of this buffer plugin instance. + Once the total size of stored buffer reached this threshold, + all append operations will fail with error (and data will + be lost) + type: string + type: + description: Fluentd core bundles memory and file plugins. 3rd + party plugins are also available when installed. + type: string + required: + - timekey + type: object + format: + description: 'Compat format type: out_file, json, ltsv (default: + out_file)' + type: string + path: + description: Path prefix of the files on Azure + type: string + store_as: + description: 'Store as: gzip, json, text, lzo, lzma2 (default: gzip)' + type: string + required: + - azure_container + - azure_storage_access_key + - azure_storage_account + type: object + elasticsearch: + description: Send your logs to Elasticsearch + properties: + application_name: + description: 'Specify the application name for the rollover index + to be created.(default: default)' + type: string + buffer: + properties: + chunk_full_threshold: + description: The percentage of chunk size threshold for flushing. + output plugin will flush the chunk when actual size reaches + chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in + default) + type: string + chunk_limit_records: + description: The max number of events that each chunks can store + in it + type: integer + chunk_limit_size: + description: 'The max size of each chunks: events will be written + into chunks until the size of chunks become this size' + type: string + compress: + description: If you set this option to gzip, you can get Fluentd + to compress data records before writing to buffer chunks. + type: string + delayed_commit_timeout: + description: The timeout seconds until output plugin decides + that async write operation fails + type: string + disable_chunk_backup: + description: Instead of storing unrecoverable chunks in the + backup directory, just discard them. This option is new in + Fluentd v1.2.6. + type: boolean + flush_at_shutdown: + description: The value to specify to flush/write all buffer + chunks at shutdown, or not + type: boolean + flush_interval: + description: 'Default: 60s' + type: string + flush_mode: + description: 'Default: default (equals to lazy if time is specified + as chunk key, interval otherwise) lazy: flush/write chunks + once per timekey interval: flush/write chunks per specified + time via flush_interval immediate: flush/write chunks immediately + after events are appended into chunks' + type: string + flush_thread_burst_interval: + description: The sleep interval seconds of threads between flushes + when output plugin flushes waiting chunks next to next + type: string + flush_thread_count: + description: The number of threads of output plugins, which + is used to write chunks in parallel + type: integer + flush_thread_interval: + description: The sleep interval seconds of threads to wait next + flush trial (when no chunks are waiting) + type: string + overflow_action: + description: 'How output plugin behaves when its buffer queue + is full throw_exception: raise exception to show this error + in log block: block processing of input plugin to emit events + into that buffer drop_oldest_chunk: drop/purge oldest chunk + to accept newly incoming chunk' + type: string + path: + description: The path where buffer chunks are stored. The '*' + is replaced with random characters. This parameter is required. + type: string + queue_limit_length: + description: The queue length limitation of this buffer plugin + instance + type: integer + queued_chunks_limit_size: + description: Limit the number of queued chunks. If you set smaller + flush_interval, e.g. 1s, there are lots of small queued chunks + in buffer. This is not good with file buffer because it consumes + lots of fd resources when output destination has a problem. + This parameter mitigates such situations. + type: integer + retry_exponential_backoff_base: + description: The base number of exponential backoff for retries + type: string + retry_forever: + description: If true, plugin will ignore retry_timeout and retry_max_times + options and retry flushing forever + type: boolean + retry_max_interval: + description: The maximum interval seconds for exponential backoff + between retries while failing + type: string + retry_max_times: + description: The maximum number of times to retry to flush while + failing + type: integer + retry_randomize: + description: If true, output plugin will retry after randomized + interval not to do burst retries + type: boolean + retry_secondary_threshold: + description: The ratio of retry_timeout to switch to use secondary + while failing (Maximum valid value is 1.0) + type: string + retry_timeout: + description: The maximum seconds to retry to flush while failing, + until plugin discards buffer chunks + type: string + retry_type: + description: 'exponential_backoff: wait seconds will become + large exponentially per failures periodic: output plugin will + retry periodically with fixed intervals (configured via retry_wait)' + type: string + retry_wait: + description: Seconds to wait before next retry to flush, or + constant factor of exponential backoff + type: string + tags: + description: 'When tag is specified as buffer chunk key, output + plugin writes events into chunks separately per tags. (default: + tag,time)' + type: string + timekey: + description: Output plugin will flush chunks per specified time + (enabled when time is specified in chunk keys) + type: string + timekey_use_utc: + description: Output plugin decides to use UTC or not to format + placeholders using timekey + type: boolean + timekey_wait: + description: Output plugin writes chunks after timekey_wait + seconds later after timekey expiration + type: string + timekey_zone: + description: The timezone (-0700 or Asia/Tokyo) string for formatting + timekey placeholders + type: string + total_limit_size: + description: The size limitation of this buffer plugin instance. + Once the total size of stored buffer reached this threshold, + all append operations will fail with error (and data will + be lost) + type: string + type: + description: Fluentd core bundles memory and file plugins. 3rd + party plugins are also available when installed. + type: string + required: + - timekey + type: object + content_type: + description: 'With content_type application/x-ndjson, elasticsearch + plugin adds application/x-ndjson as Content-Type in payload. (default: + application/json)' + type: string + customize_template: + description: Specify the string and its value to be replaced in + form of hash. Can contain multiple key value pair that would be + replaced in the specified template_file. This setting only creates + template and to add rollover index please check the rollover_index + configuration. + type: string + deflector_alias: + description: Specify the deflector alias which would be assigned + to the rollover index created. This is useful in case of using + the Elasticsearch rollover API + type: string + fail_on_putting_template_retry_exceed: + description: 'Indicates whether to fail when max_retry_putting_template + is exceeded. If you have multiple output plugin, you could use + this property to do not fail on fluentd statup.(default: true)' + type: boolean + host: + description: You can specify Elasticsearch host by this parameter. + (default:localhost) + type: string + hosts: + description: You can specify multiple Elasticsearch hosts with separator + ",". If you specify hosts option, host and port options are ignored. + type: string + http_backend: + description: 'With http_backend typhoeus, elasticsearch plugin uses + typhoeus faraday http backend. Typhoeus can handle HTTP keepalive. + (default: excon)' + type: string + id_key: + description: https://github.com/uken/fluent-plugin-elasticsearch#id_key + type: string + include_index_in_url: + description: With this option set to true, Fluentd manifests the + index name in the request URL (rather than in the request body). + You can use this option to enforce an URL-based access control. + type: boolean + include_tag_key: + description: 'This will add the Fluentd tag in the JSON record.(default: + false)' + type: boolean + include_timestamp: + description: Adds a @timestamp field to the log, following all settings + logstash_format does, except without the restrictions on index_name. + This allows one to log to an alias in Elasticsearch and utilize + the rollover API. + type: boolean + index_date_pattern: + description: 'Specify this to override the index date pattern for + creating a rollover index.(default: now/d)' + type: string + index_prefix: + description: Specify the index prefix for the rollover index to + be created. + type: string + logstash_dateformat: + description: 'Set the Logstash date format.(default: %Y.%m.%d)' + type: string + logstash_format: + description: 'Enable Logstash log format.(default: false)' + type: boolean + logstash_prefix: + description: 'Set the Logstash prefix.(default: true)' + type: string + logstash_prefix_separator: + description: 'Set the Logstash prefix separator.(default: -)' + type: string + max_retry_get_es_version: + description: 'You can specify times of retry obtaining Elasticsearch + version.(default: 15)' + type: string + max_retry_putting_template: + description: 'You can specify times of retry putting template.(default: + 10)' + type: string + password: + description: Password for HTTP Basic authentication. + properties: + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + description: Secret key for the value + type: string + name: + description: Name of the kubernetes secret + type: string + required: + - key + - name + type: object + type: object + required: + - valueFrom + type: object + path: + description: Path for HTTP Basic authentication. + type: string + pipeline: + description: This param is to set a pipeline id of your elasticsearch + to be added into the request, you can configure ingest node. + type: string + port: + description: 'You can specify Elasticsearch port by this parameter.(default: + 9200)' + type: string + prefer_oj_serializer: + description: 'With default behavior, Elasticsearch client uses Yajl + as JSON encoder/decoder. Oj is the alternative high performance + JSON encoder/decoder. When this parameter sets as true, Elasticsearch + client uses Oj as JSON encoder/decoder. (default: fqlse)' + type: boolean + reconnect_on_error: + description: 'Indicates that the plugin should reset connection + on any error (reconnect on next send). By default it will reconnect + only on "host unreachable exceptions". We recommended to set this + true in the presence of elasticsearch shield.(default: false)' + type: boolean + reload_connections: + description: 'You can tune how the elasticsearch-transport host + reloading feature works.(default: true)' + type: boolean + reload_on_failure: + description: 'Indicates that the elasticsearch-transport will try + to reload the nodes addresses if there is a failure while making + the request, this can be useful to quickly remove a dead node + from the list of addresses.(default: false)' + type: boolean + remove_keys_on_update: + description: Remove keys on update will not update the configured + keys in elasticsearch when a record is being updated. This setting + only has any effect if the write operation is update or upsert. + type: string + remove_keys_on_update_key: + description: This setting allows remove_keys_on_update to be configured + with a key in each record, in much the same way as target_index_key + works. + type: string + request_timeout: + description: 'You can specify HTTP request timeout.(default: 5s)' + type: string + resurrect_after: + description: 'You can set in the elasticsearch-transport how often + dead connections from the elasticsearch-transport''s pool will + be resurrected.(default: 60s)' + type: string + retry_tag: + description: This setting allows custom routing of messages in response + to bulk request failures. The default behavior is to emit failed + records using the same tag that was provided. + type: string + rollover_index: + description: 'Specify this as true when an index with rollover capability + needs to be created.(default: false) https://github.com/uken/fluent-plugin-elasticsearch#rollover_index' + type: boolean + routing_key: + description: Similar to parent_key config, will add _routing into + elasticsearch command if routing_key is set and the field does + exist in input event. + type: string + scheme: + description: 'Scheme for HTTP Basic authentication.(default: true)' + type: boolean + tag_key: + description: 'This will add the Fluentd tag in the JSON record.(default: + tag)' + type: string + target_index_key: + description: Tell this plugin to find the index name to write to + in the record under this key in preference to other mechanisms. + Key can be specified as path to nested record using dot ('.') + as a separator. https://github.com/uken/fluent-plugin-elasticsearch#target_index_key + type: string + target_type_key: + description: 'Similar to target_index_key config, find the type + name to write to in the record under this key (or nested record). + If key not found in record - fallback to type_name.(default: true)' + type: string + template_file: + description: The path to the file containing the template to install. + type: string + template_name: + description: The name of the template to define. If a template by + the name given is already present, it will be left unchanged, + unless template_overwrite is set, in which case the template will + be updated. + type: string + template_overwrite: + description: 'Always update the template, even if it already exists.(default: + false)' + type: boolean + templates: + description: Specify index templates in form of hash. Can contain + multiple templates. + type: string + time_key: + description: By default, when inserting records in Logstash format, + @timestamp is dynamically created with the time at log ingestion. + If you'd like to use a custom time, include an @timestamp with + your record. + type: string + time_key_format: + description: The format of the time stamp field (@timestamp or what + you specify with time_key). This parameter only has an effect + when logstash_format is true as it only affects the name of the + index we write to. + type: string + time_parse_error_tag: + description: With logstash_format true, elasticsearch plugin parses + timestamp field for generating index name. If the record has invalid + timestamp value, this plugin emits an error event to @ERROR label + with time_parse_error_tag configured tag. + type: string + time_precision: + description: Should the record not include a time_key, define the + degree of sub-second time precision to preserve from the time + portion of the routed event. + type: string + user: + description: User for HTTP Basic authentication. This plugin will + escape required URL encoded characters within %{} placeholders. + e.g. %{demo+} + type: string + utc_index: + description: 'By default, the records inserted into index logstash-YYMMDD + with UTC (Coordinated Universal Time). This option allows to use + local time if you describe utc_index to false.(default: true)' + type: boolean + with_transporter_log: + description: 'This is debugging purpose option to enable to obtain + transporter layer log. (default: false)' + type: boolean + write_operation: + description: 'The write_operation can be any of: (index,create,update,upsert)(default: + index)' + type: string + type: object + gcs: + properties: + acl: + description: 'Permission for the object in GCS: auth_read owner_full + owner_read private project_private public_read' + type: string + auto_create_bucket: + description: 'Create GCS bucket if it does not exists (default: + true)' + type: boolean + bucket: + description: Name of a GCS bucket + type: string + buffer: + properties: + chunk_full_threshold: + description: The percentage of chunk size threshold for flushing. + output plugin will flush the chunk when actual size reaches + chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in + default) + type: string + chunk_limit_records: + description: The max number of events that each chunks can store + in it + type: integer + chunk_limit_size: + description: 'The max size of each chunks: events will be written + into chunks until the size of chunks become this size' + type: string + compress: + description: If you set this option to gzip, you can get Fluentd + to compress data records before writing to buffer chunks. + type: string + delayed_commit_timeout: + description: The timeout seconds until output plugin decides + that async write operation fails + type: string + disable_chunk_backup: + description: Instead of storing unrecoverable chunks in the + backup directory, just discard them. This option is new in + Fluentd v1.2.6. + type: boolean + flush_at_shutdown: + description: The value to specify to flush/write all buffer + chunks at shutdown, or not + type: boolean + flush_interval: + description: 'Default: 60s' + type: string + flush_mode: + description: 'Default: default (equals to lazy if time is specified + as chunk key, interval otherwise) lazy: flush/write chunks + once per timekey interval: flush/write chunks per specified + time via flush_interval immediate: flush/write chunks immediately + after events are appended into chunks' + type: string + flush_thread_burst_interval: + description: The sleep interval seconds of threads between flushes + when output plugin flushes waiting chunks next to next + type: string + flush_thread_count: + description: The number of threads of output plugins, which + is used to write chunks in parallel + type: integer + flush_thread_interval: + description: The sleep interval seconds of threads to wait next + flush trial (when no chunks are waiting) + type: string + overflow_action: + description: 'How output plugin behaves when its buffer queue + is full throw_exception: raise exception to show this error + in log block: block processing of input plugin to emit events + into that buffer drop_oldest_chunk: drop/purge oldest chunk + to accept newly incoming chunk' + type: string + path: + description: The path where buffer chunks are stored. The '*' + is replaced with random characters. This parameter is required. + type: string + queue_limit_length: + description: The queue length limitation of this buffer plugin + instance + type: integer + queued_chunks_limit_size: + description: Limit the number of queued chunks. If you set smaller + flush_interval, e.g. 1s, there are lots of small queued chunks + in buffer. This is not good with file buffer because it consumes + lots of fd resources when output destination has a problem. + This parameter mitigates such situations. + type: integer + retry_exponential_backoff_base: + description: The base number of exponential backoff for retries + type: string + retry_forever: + description: If true, plugin will ignore retry_timeout and retry_max_times + options and retry flushing forever + type: boolean + retry_max_interval: + description: The maximum interval seconds for exponential backoff + between retries while failing + type: string + retry_max_times: + description: The maximum number of times to retry to flush while + failing + type: integer + retry_randomize: + description: If true, output plugin will retry after randomized + interval not to do burst retries + type: boolean + retry_secondary_threshold: + description: The ratio of retry_timeout to switch to use secondary + while failing (Maximum valid value is 1.0) + type: string + retry_timeout: + description: The maximum seconds to retry to flush while failing, + until plugin discards buffer chunks + type: string + retry_type: + description: 'exponential_backoff: wait seconds will become + large exponentially per failures periodic: output plugin will + retry periodically with fixed intervals (configured via retry_wait)' + type: string + retry_wait: + description: Seconds to wait before next retry to flush, or + constant factor of exponential backoff + type: string + tags: + description: 'When tag is specified as buffer chunk key, output + plugin writes events into chunks separately per tags. (default: + tag,time)' + type: string + timekey: + description: Output plugin will flush chunks per specified time + (enabled when time is specified in chunk keys) + type: string + timekey_use_utc: + description: Output plugin decides to use UTC or not to format + placeholders using timekey + type: boolean + timekey_wait: + description: Output plugin writes chunks after timekey_wait + seconds later after timekey expiration + type: string + timekey_zone: + description: The timezone (-0700 or Asia/Tokyo) string for formatting + timekey placeholders + type: string + total_limit_size: + description: The size limitation of this buffer plugin instance. + Once the total size of stored buffer reached this threshold, + all append operations will fail with error (and data will + be lost) + type: string + type: + description: Fluentd core bundles memory and file plugins. 3rd + party plugins are also available when installed. + type: string + required: + - timekey + type: object + client_retries: + description: Number of times to retry requests on server error + type: integer + client_timeout: + description: Default timeout to use in requests + type: integer + credentials_json: + description: GCS service account credentials in JSON format + properties: + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + description: Secret key for the value + type: string + name: + description: Name of the kubernetes secret + type: string + required: + - key + - name + type: object + type: object + required: + - valueFrom + type: object + encryption_key: + description: Customer-supplied, AES-256 encryption key + type: string + format: + properties: + type: + description: 'Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value' + enum: + - out_file + - json + - ltsv + - csv + - msgpack + - hash + - single_value + type: string + type: object + hex_random_length: + description: 'Max length of `%{hex_random}` placeholder(4-16) (default: + 4)' + type: integer + keyfile: + description: Path of GCS service account credentials JSON file + type: string + object_key_format: + description: 'Format of GCS object keys (default: %{path}%{time_slice}_%{index}.%{file_extension})' + type: string + object_metadata: + description: User provided web-safe keys and arbitrary string values + that will returned with requests for the file as "x-goog-meta-" + response headers. + items: + properties: + key: + description: Key + type: string + value: + description: Value + type: string + required: + - key + - value + type: object + type: array + overwrite: + description: 'Overwrite already existing path (default: false)' + type: boolean + path: + description: Path prefix of the files on GCS + type: string + project: + description: Project identifier for GCS + type: string + storage_class: + description: 'Storage class of the file: dra nearline coldline multi_regional + regional standard' + type: string + store_as: + description: 'Archive format on GCS: gzip json text (default: gzip)' + type: string + transcoding: + description: Enable the decompressive form of transcoding + type: boolean + required: + - bucket + - project + type: object + loggingRef: + type: string + loki: + description: Fluentd output plugin to ship logs to a Loki server. + properties: + buffer: + properties: + chunk_full_threshold: + description: The percentage of chunk size threshold for flushing. + output plugin will flush the chunk when actual size reaches + chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in + default) + type: string + chunk_limit_records: + description: The max number of events that each chunks can store + in it + type: integer + chunk_limit_size: + description: 'The max size of each chunks: events will be written + into chunks until the size of chunks become this size' + type: string + compress: + description: If you set this option to gzip, you can get Fluentd + to compress data records before writing to buffer chunks. + type: string + delayed_commit_timeout: + description: The timeout seconds until output plugin decides + that async write operation fails + type: string + disable_chunk_backup: + description: Instead of storing unrecoverable chunks in the + backup directory, just discard them. This option is new in + Fluentd v1.2.6. + type: boolean + flush_at_shutdown: + description: The value to specify to flush/write all buffer + chunks at shutdown, or not + type: boolean + flush_interval: + description: 'Default: 60s' + type: string + flush_mode: + description: 'Default: default (equals to lazy if time is specified + as chunk key, interval otherwise) lazy: flush/write chunks + once per timekey interval: flush/write chunks per specified + time via flush_interval immediate: flush/write chunks immediately + after events are appended into chunks' + type: string + flush_thread_burst_interval: + description: The sleep interval seconds of threads between flushes + when output plugin flushes waiting chunks next to next + type: string + flush_thread_count: + description: The number of threads of output plugins, which + is used to write chunks in parallel + type: integer + flush_thread_interval: + description: The sleep interval seconds of threads to wait next + flush trial (when no chunks are waiting) + type: string + overflow_action: + description: 'How output plugin behaves when its buffer queue + is full throw_exception: raise exception to show this error + in log block: block processing of input plugin to emit events + into that buffer drop_oldest_chunk: drop/purge oldest chunk + to accept newly incoming chunk' + type: string + path: + description: The path where buffer chunks are stored. The '*' + is replaced with random characters. This parameter is required. + type: string + queue_limit_length: + description: The queue length limitation of this buffer plugin + instance + type: integer + queued_chunks_limit_size: + description: Limit the number of queued chunks. If you set smaller + flush_interval, e.g. 1s, there are lots of small queued chunks + in buffer. This is not good with file buffer because it consumes + lots of fd resources when output destination has a problem. + This parameter mitigates such situations. + type: integer + retry_exponential_backoff_base: + description: The base number of exponential backoff for retries + type: string + retry_forever: + description: If true, plugin will ignore retry_timeout and retry_max_times + options and retry flushing forever + type: boolean + retry_max_interval: + description: The maximum interval seconds for exponential backoff + between retries while failing + type: string + retry_max_times: + description: The maximum number of times to retry to flush while + failing + type: integer + retry_randomize: + description: If true, output plugin will retry after randomized + interval not to do burst retries + type: boolean + retry_secondary_threshold: + description: The ratio of retry_timeout to switch to use secondary + while failing (Maximum valid value is 1.0) + type: string + retry_timeout: + description: The maximum seconds to retry to flush while failing, + until plugin discards buffer chunks + type: string + retry_type: + description: 'exponential_backoff: wait seconds will become + large exponentially per failures periodic: output plugin will + retry periodically with fixed intervals (configured via retry_wait)' + type: string + retry_wait: + description: Seconds to wait before next retry to flush, or + constant factor of exponential backoff + type: string + tags: + description: 'When tag is specified as buffer chunk key, output + plugin writes events into chunks separately per tags. (default: + tag,time)' + type: string + timekey: + description: Output plugin will flush chunks per specified time + (enabled when time is specified in chunk keys) + type: string + timekey_use_utc: + description: Output plugin decides to use UTC or not to format + placeholders using timekey + type: boolean + timekey_wait: + description: Output plugin writes chunks after timekey_wait + seconds later after timekey expiration + type: string + timekey_zone: + description: The timezone (-0700 or Asia/Tokyo) string for formatting + timekey placeholders + type: string + total_limit_size: + description: The size limitation of this buffer plugin instance. + Once the total size of stored buffer reached this threshold, + all append operations will fail with error (and data will + be lost) + type: string + type: + description: Fluentd core bundles memory and file plugins. 3rd + party plugins are also available when installed. + type: string + required: + - timekey + type: object + extra_labels: + description: 'Set of labels to include with every Loki stream.(default: + nil)' + type: boolean + password: + description: Specify password if the Loki server requires authentication. + properties: + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + description: Secret key for the value + type: string + name: + description: Name of the kubernetes secret + type: string + required: + - key + - name + type: object + type: object + required: + - valueFrom + type: object + tenant: + description: Loki is a multi-tenant log storage platform and all + requests sent must include a tenant. + type: string + url: + description: The url of the Loki server to send logs to. (default:https://logs-us-west1.grafana.net) + type: string + username: + description: Specify a username if the Loki server requires authentication. + properties: + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + description: Secret key for the value + type: string + name: + description: Name of the kubernetes secret + type: string + required: + - key + - name + type: object + type: object + required: + - valueFrom + type: object + type: object + nullout: + type: object + oss: + properties: + aaccess_key_secret: + description: Your access secret key + properties: + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + description: Secret key for the value + type: string + name: + description: Name of the kubernetes secret + type: string + required: + - key + - name + type: object + type: object + required: + - valueFrom + type: object + access_key_id: + description: Your access key id + properties: + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + description: Secret key for the value + type: string + name: + description: Name of the kubernetes secret + type: string + required: + - key + - name + type: object + type: object + required: + - valueFrom + type: object + auto_create_bucket: + description: 'desc ''Create OSS bucket if it does not exists (default: + false)' + type: boolean + bucket: + description: Your bucket name + type: string + buffer: + properties: + chunk_full_threshold: + description: The percentage of chunk size threshold for flushing. + output plugin will flush the chunk when actual size reaches + chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in + default) + type: string + chunk_limit_records: + description: The max number of events that each chunks can store + in it + type: integer + chunk_limit_size: + description: 'The max size of each chunks: events will be written + into chunks until the size of chunks become this size' + type: string + compress: + description: If you set this option to gzip, you can get Fluentd + to compress data records before writing to buffer chunks. + type: string + delayed_commit_timeout: + description: The timeout seconds until output plugin decides + that async write operation fails + type: string + disable_chunk_backup: + description: Instead of storing unrecoverable chunks in the + backup directory, just discard them. This option is new in + Fluentd v1.2.6. + type: boolean + flush_at_shutdown: + description: The value to specify to flush/write all buffer + chunks at shutdown, or not + type: boolean + flush_interval: + description: 'Default: 60s' + type: string + flush_mode: + description: 'Default: default (equals to lazy if time is specified + as chunk key, interval otherwise) lazy: flush/write chunks + once per timekey interval: flush/write chunks per specified + time via flush_interval immediate: flush/write chunks immediately + after events are appended into chunks' + type: string + flush_thread_burst_interval: + description: The sleep interval seconds of threads between flushes + when output plugin flushes waiting chunks next to next + type: string + flush_thread_count: + description: The number of threads of output plugins, which + is used to write chunks in parallel + type: integer + flush_thread_interval: + description: The sleep interval seconds of threads to wait next + flush trial (when no chunks are waiting) + type: string + overflow_action: + description: 'How output plugin behaves when its buffer queue + is full throw_exception: raise exception to show this error + in log block: block processing of input plugin to emit events + into that buffer drop_oldest_chunk: drop/purge oldest chunk + to accept newly incoming chunk' + type: string + path: + description: The path where buffer chunks are stored. The '*' + is replaced with random characters. This parameter is required. + type: string + queue_limit_length: + description: The queue length limitation of this buffer plugin + instance + type: integer + queued_chunks_limit_size: + description: Limit the number of queued chunks. If you set smaller + flush_interval, e.g. 1s, there are lots of small queued chunks + in buffer. This is not good with file buffer because it consumes + lots of fd resources when output destination has a problem. + This parameter mitigates such situations. + type: integer + retry_exponential_backoff_base: + description: The base number of exponential backoff for retries + type: string + retry_forever: + description: If true, plugin will ignore retry_timeout and retry_max_times + options and retry flushing forever + type: boolean + retry_max_interval: + description: The maximum interval seconds for exponential backoff + between retries while failing + type: string + retry_max_times: + description: The maximum number of times to retry to flush while + failing + type: integer + retry_randomize: + description: If true, output plugin will retry after randomized + interval not to do burst retries + type: boolean + retry_secondary_threshold: + description: The ratio of retry_timeout to switch to use secondary + while failing (Maximum valid value is 1.0) + type: string + retry_timeout: + description: The maximum seconds to retry to flush while failing, + until plugin discards buffer chunks + type: string + retry_type: + description: 'exponential_backoff: wait seconds will become + large exponentially per failures periodic: output plugin will + retry periodically with fixed intervals (configured via retry_wait)' + type: string + retry_wait: + description: Seconds to wait before next retry to flush, or + constant factor of exponential backoff + type: string + tags: + description: 'When tag is specified as buffer chunk key, output + plugin writes events into chunks separately per tags. (default: + tag,time)' + type: string + timekey: + description: Output plugin will flush chunks per specified time + (enabled when time is specified in chunk keys) + type: string + timekey_use_utc: + description: Output plugin decides to use UTC or not to format + placeholders using timekey + type: boolean + timekey_wait: + description: Output plugin writes chunks after timekey_wait + seconds later after timekey expiration + type: string + timekey_zone: + description: The timezone (-0700 or Asia/Tokyo) string for formatting + timekey placeholders + type: string + total_limit_size: + description: The size limitation of this buffer plugin instance. + Once the total size of stored buffer reached this threshold, + all append operations will fail with error (and data will + be lost) + type: string + type: + description: Fluentd core bundles memory and file plugins. 3rd + party plugins are also available when installed. + type: string + required: + - timekey + type: object + check_bucket: + description: 'Check bucket if exists or not (default: true)' + type: boolean + check_object: + description: 'Check object before creation (default: true)' + type: boolean + download_crc_enable: + description: 'Download crc enabled (default: true)' + type: boolean + endpoint: + description: OSS endpoint to connect to' + type: string + format: + properties: + type: + description: 'Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value' + enum: + - out_file + - json + - ltsv + - csv + - msgpack + - hash + - single_value + type: string + type: object + hex_random_length: + description: 'The length of `%{hex_random}` placeholder(4-16) (default: + 4)' + type: integer + index_format: + description: '`sprintf` format for `%{index}` (default: %d)' + type: string + key_format: + description: 'The format of OSS object keys (default: %{path}/%{time_slice}_%{index}_%{thread_id}.%{file_extension})' + type: string + open_timeout: + description: 'Timeout for open connections (default: 10)' + type: integer + oss_sdk_log_dir: + description: 'OSS SDK log directory (default: /var/log/td-agent)' + type: string + overwrite: + description: 'Overwrite already existing path (default: false)' + type: boolean + path: + description: 'Path prefix of the files on OSS (default: fluent/logs)' + type: string + read_timeout: + description: 'Timeout for read response (default: 120)' + type: integer + store_as: + description: 'Archive format on OSS: gzip, json, text, lzo, lzma2 + (default: gzip)' + type: string + upload_crc_enable: + description: 'Upload crc enabled (default: true)' + type: boolean + warn_for_delay: + description: Given a threshold to treat events as delay, output + warning logs if delayed events were put into OSS + type: string + required: + - aaccess_key_secret + - access_key_id + - bucket + - endpoint + type: object + s3: + properties: + acl: + description: Permission for the object in S3 + type: string + assume_role_credentials: + description: assume_role_credentials + properties: + duration_seconds: + description: The duration, in seconds, of the role session (900-3600) + type: string + external_id: + description: A unique identifier that is used by third parties + when assuming roles in their customers' accounts. + type: string + policy: + description: An IAM policy in JSON format + type: string + role_arn: + description: The Amazon Resource Name (ARN) of the role to assume + type: string + role_session_name: + description: An identifier for the assumed role session + type: string + required: + - role_arn + - role_session_name + type: object + auto_create_bucket: + description: Create S3 bucket if it does not exists + type: string + aws_iam_retries: + description: The number of attempts to load instance profile credentials + from the EC2 metadata service using IAM role + type: string + aws_key_id: + description: AWS access key id + properties: + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + description: Secret key for the value + type: string + name: + description: Name of the kubernetes secret + type: string + required: + - key + - name + type: object + type: object + required: + - valueFrom + type: object + aws_sec_key: + description: AWS secret key. + properties: + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + description: Secret key for the value + type: string + name: + description: Name of the kubernetes secret + type: string + required: + - key + - name + type: object + type: object + required: + - valueFrom + type: object + buffer: + properties: + chunk_full_threshold: + description: The percentage of chunk size threshold for flushing. + output plugin will flush the chunk when actual size reaches + chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in + default) + type: string + chunk_limit_records: + description: The max number of events that each chunks can store + in it + type: integer + chunk_limit_size: + description: 'The max size of each chunks: events will be written + into chunks until the size of chunks become this size' + type: string + compress: + description: If you set this option to gzip, you can get Fluentd + to compress data records before writing to buffer chunks. + type: string + delayed_commit_timeout: + description: The timeout seconds until output plugin decides + that async write operation fails + type: string + disable_chunk_backup: + description: Instead of storing unrecoverable chunks in the + backup directory, just discard them. This option is new in + Fluentd v1.2.6. + type: boolean + flush_at_shutdown: + description: The value to specify to flush/write all buffer + chunks at shutdown, or not + type: boolean + flush_interval: + description: 'Default: 60s' + type: string + flush_mode: + description: 'Default: default (equals to lazy if time is specified + as chunk key, interval otherwise) lazy: flush/write chunks + once per timekey interval: flush/write chunks per specified + time via flush_interval immediate: flush/write chunks immediately + after events are appended into chunks' + type: string + flush_thread_burst_interval: + description: The sleep interval seconds of threads between flushes + when output plugin flushes waiting chunks next to next + type: string + flush_thread_count: + description: The number of threads of output plugins, which + is used to write chunks in parallel + type: integer + flush_thread_interval: + description: The sleep interval seconds of threads to wait next + flush trial (when no chunks are waiting) + type: string + overflow_action: + description: 'How output plugin behaves when its buffer queue + is full throw_exception: raise exception to show this error + in log block: block processing of input plugin to emit events + into that buffer drop_oldest_chunk: drop/purge oldest chunk + to accept newly incoming chunk' + type: string + path: + description: The path where buffer chunks are stored. The '*' + is replaced with random characters. This parameter is required. + type: string + queue_limit_length: + description: The queue length limitation of this buffer plugin + instance + type: integer + queued_chunks_limit_size: + description: Limit the number of queued chunks. If you set smaller + flush_interval, e.g. 1s, there are lots of small queued chunks + in buffer. This is not good with file buffer because it consumes + lots of fd resources when output destination has a problem. + This parameter mitigates such situations. + type: integer + retry_exponential_backoff_base: + description: The base number of exponential backoff for retries + type: string + retry_forever: + description: If true, plugin will ignore retry_timeout and retry_max_times + options and retry flushing forever + type: boolean + retry_max_interval: + description: The maximum interval seconds for exponential backoff + between retries while failing + type: string + retry_max_times: + description: The maximum number of times to retry to flush while + failing + type: integer + retry_randomize: + description: If true, output plugin will retry after randomized + interval not to do burst retries + type: boolean + retry_secondary_threshold: + description: The ratio of retry_timeout to switch to use secondary + while failing (Maximum valid value is 1.0) + type: string + retry_timeout: + description: The maximum seconds to retry to flush while failing, + until plugin discards buffer chunks + type: string + retry_type: + description: 'exponential_backoff: wait seconds will become + large exponentially per failures periodic: output plugin will + retry periodically with fixed intervals (configured via retry_wait)' + type: string + retry_wait: + description: Seconds to wait before next retry to flush, or + constant factor of exponential backoff + type: string + tags: + description: 'When tag is specified as buffer chunk key, output + plugin writes events into chunks separately per tags. (default: + tag,time)' + type: string + timekey: + description: Output plugin will flush chunks per specified time + (enabled when time is specified in chunk keys) + type: string + timekey_use_utc: + description: Output plugin decides to use UTC or not to format + placeholders using timekey + type: boolean + timekey_wait: + description: Output plugin writes chunks after timekey_wait + seconds later after timekey expiration + type: string + timekey_zone: + description: The timezone (-0700 or Asia/Tokyo) string for formatting + timekey placeholders + type: string + total_limit_size: + description: The size limitation of this buffer plugin instance. + Once the total size of stored buffer reached this threshold, + all append operations will fail with error (and data will + be lost) + type: string + type: + description: Fluentd core bundles memory and file plugins. 3rd + party plugins are also available when installed. + type: string + required: + - timekey + type: object + check_apikey_on_start: + description: Check AWS key on start + type: string + check_bucket: + description: Check bucket if exists or not + type: string + check_object: + description: Check object before creation + type: string + compute_checksums: + description: AWS SDK uses MD5 for API request/response by default + type: string + enable_transfer_acceleration: + description: 'If true, S3 Transfer Acceleration will be enabled + for uploads. IMPORTANT: You must first enable this feature on + your destination S3 bucket' + type: string + force_path_style: + description: If true, the bucket name is always left in the request + URI and never moved to the host as a sub-domain + type: string + format: + properties: + type: + description: 'Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value' + enum: + - out_file + - json + - ltsv + - csv + - msgpack + - hash + - single_value + type: string + type: object + grant_full_control: + description: Allows grantee READ, READ_ACP, and WRITE_ACP permissions + on the object + type: string + grant_read: + description: Allows grantee to read the object data and its metadata + type: string + grant_read_acp: + description: Allows grantee to read the object ACL + type: string + grant_write_acp: + description: Allows grantee to write the ACL for the applicable + object + type: string + hex_random_length: + description: The length of `%{hex_random}` placeholder(4-16) + type: string + index_format: + description: '`sprintf` format for `%{index}`' + type: string + instance_profile_credentials: + description: instance_profile_credentials + properties: + http_open_timeout: + description: Number of seconds to wait for the connection to + open + type: string + http_read_timeout: + description: Number of seconds to wait for one block to be read + type: string + ip_address: + description: IP address (default:169.254.169.254) + type: string + port: + description: Port number (default:80) + type: string + retries: + description: Number of times to retry when retrieving credentials + type: string + type: object + overwrite: + description: Overwrite already existing path + type: string + path: + description: Path prefix of the files on S3 + type: string + proxy_uri: + description: URI of proxy environment + type: string + s3_bucket: + description: S3 bucket name + type: string + s3_endpoint: + description: Custom S3 endpoint (like minio) + type: string + s3_metadata: + description: Arbitrary S3 metadata headers to set for the object + type: string + s3_object_key_format: + description: 'The format of S3 object keys (default: %{path}%{time_slice}_%{index}.%{file_extension})' + type: string + s3_region: + description: S3 region name + type: string + shared_credentials: + description: shared_credentials + properties: + path: + description: 'Path to the shared file. (default: $HOME/.aws/credentials)' + type: string + profile_name: + description: Profile name. Default to 'default' or ENV['AWS_PROFILE'] + type: string + type: object + signature_version: + description: Signature version for API Request (s3,v4) + type: string + sse_customer_algorithm: + description: Specifies the algorithm to use to when encrypting the + object + type: string + sse_customer_key: + description: Specifies the customer-provided encryption key for + Amazon S3 to use in encrypting data + type: string + sse_customer_key_md5: + description: Specifies the 128-bit MD5 digest of the encryption + key according to RFC 1321 + type: string + ssekms_key_id: + description: Specifies the AWS KMS key ID to use for object encryption + type: string + ssl_verify_peer: + description: If false, the certificate of endpoint will not be verified + type: string + storage_class: + description: The type of storage to use for the object(STANDARD,REDUCED_REDUNDANCY,STANDARD_IA) + type: string + store_as: + description: Archive format on S3 + type: string + use_bundled_cert: + description: Use aws-sdk-ruby bundled cert + type: string + use_server_side_encryption: + description: The Server-side encryption algorithm used when storing + this object in S3 (AES256, aws:kms) + type: string + warn_for_delay: + description: Given a threshold to treat events as delay, output + warning logs if delayed events were put into s3 + type: string + required: + - s3_bucket + type: object + type: object + status: + description: OutputStatus defines the observed state of Output + type: object + type: object + version: v1alpha2 + versions: + - name: v1alpha2 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/charts/logging-operator/templates/psp.yaml b/charts/logging-operator/templates/psp.yaml index 25be0f127..515d29448 100644 --- a/charts/logging-operator/templates/psp.yaml +++ b/charts/logging-operator/templates/psp.yaml @@ -26,6 +26,6 @@ spec: seLinux: rule: RunAsAny volumes: - - secret - - configMap -{{ end }} + - secret + - configMap +{{ end }} \ No newline at end of file diff --git a/charts/logging-operator/templates/rbac.yaml b/charts/logging-operator/templates/rbac.yaml index b49e8f3ae..6167ae844 100644 --- a/charts/logging-operator/templates/rbac.yaml +++ b/charts/logging-operator/templates/rbac.yaml @@ -20,90 +20,73 @@ metadata: app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} rules: + - apiGroups: + - logging.banzaicloud.com + resources: + - loggings + - flows + - clusterflows + - outputs + - clusteroutputs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - logging.banzaicloud.com + resources: + - loggings/status + verbs: + - get + - patch + - update + - apiGroups: + - "" + - apps + - batch + - extensions + - policy + - rbac.authorization.k8s.io + resources: + - namespaces + - nodes + - persistentvolumeclaims + - pods + - services + - resourcequotas + - replicationcontrollers + - limitranges + - persistentvolumeclaims + - persistentvolumes + - endpoints + - secrets + - configmaps + - serviceaccounts + - roles + - rolebindings + - clusterroles + - clusterrolebindings + - daemonsets + - deployments + - replicasets + - statefulsets + - jobs + verbs: + - "*" + {{- if .Values.rbac.psp.enabled }} - apiGroups: - - logging.banzaicloud.com + - extensions resources: - - plugins - - fluentds - - fluentbits - verbs: - - "*" -- apiGroups: - - "" - - apps - - autoscaling - - batch - - extensions - - policy - - rbac.authorization.k8s.io - resources: - - namespaces - - nodes - - persistentvolumeclaims - - pods - - services - - resourcequotas - - replicationcontrollers - - limitranges - - persistentvolumeclaims - - persistentvolumes - - endpoints - - secrets - - configmaps - - serviceaccounts - - clusterroles - - clusterrolebindings - verbs: - - "*" -- apiGroups: - - apps - resources: - - daemonsets - - deployments - - replicasets - verbs: - - "*" -- apiGroups: - - extensions - resources: - - daemonsets - - deployments - - replicasets - verbs: - - "*" -- apiGroups: - - apps - resources: - - statefulsets - verbs: - - get - - list - - watch -- apiGroups: - - batch - resources: - - cronjobs - - jobs - verbs: - - list - - watch -- apiGroups: - - autoscaling - resources: - - horizontalpodautoscalers - verbs: - - list - - watch -{{- if .Values.rbac.psp.enabled }} -- apiGroups: - - extensions - resources: - - podsecuritypolicies + - podsecuritypolicies resourceNames: - - psp.logging-operator + - psp.logging-operator verbs: - - use -{{- end }} + - use + {{- end }} --- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding @@ -115,12 +98,12 @@ metadata: app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} subjects: -- kind: ServiceAccount - name: {{ template "logging-operator.fullname" . }} - namespace: {{ .Release.Namespace }} + - kind: ServiceAccount + name: {{ template "logging-operator.fullname" . }} + namespace: {{ .Release.Namespace }} roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: {{ template "logging-operator.fullname" . }} -{{- end }} + {{- end }} \ No newline at end of file diff --git a/charts/logging-operator/templates/service.yaml b/charts/logging-operator/templates/service.yaml new file mode 100644 index 000000000..b1aee6fca --- /dev/null +++ b/charts/logging-operator/templates/service.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "logging-operator.fullname" . }} + labels: +{{ include "logging-operator.labels" . | indent 4 }} +spec: + type: ClusterIP + clusterIP: None + ports: + - port: {{ .Values.http.port }} + targetPort: http + protocol: TCP + name: http + selector: + app.kubernetes.io/name: {{ include "logging-operator.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: query \ No newline at end of file diff --git a/charts/logging-operator/values.yaml b/charts/logging-operator/values.yaml index cb60d8e0c..e4fcbb248 100644 --- a/charts/logging-operator/values.yaml +++ b/charts/logging-operator/values.yaml @@ -6,39 +6,18 @@ replicaCount: 1 image: repository: banzaicloud/logging-operator - tag: 0.2.2 + tag: 2.0.0 pullPolicy: IfNotPresent +imagePullSecrets: [] nameOverride: "" fullnameOverride: "" -# Namespace to watch fot LoggingOperator CRD -watchNamespace: "" - -grafana: - dashboard: - enabled: true - - -## Role Based Access -## Ref: https://kubernetes.io/docs/admin/authorization/rbac/ -## -rbac: - enabled: true - ## Pod Security Policy - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ - ## - psp: - enabled: false - -## Define resources requests and limits for single Pods. -## ref: https://kubernetes.io/docs/user-guide/compute-resources/ -## We usually recommend not to specify default resources and to leave this as a conscious -## choice for the user. This also increases chances charts run on environments with little -## resources, such as Minikube. If you do want to specify resources, uncomment the following -## lines, adjust them as necessary, and remove the curly braces after 'resources:'. -## resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. # limits: # cpu: 100m # memory: 128Mi @@ -46,46 +25,24 @@ resources: {} # cpu: 100m # memory: 128Mi - -## Define which Nodes the Pods are scheduled on. -## ref: https://kubernetes.io/docs/user-guide/node-selection/ -## nodeSelector: {} - -## If specified, the pod's tolerations. -## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ -## tolerations: [] -# - key: "key" -# operator: "Equal" -# value: "value" -# effect: "NoSchedule" -## Assign the Logging operator to run on specific nodes -## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ -## affinity: {} -# requiredDuringSchedulingIgnoredDuringExecution: -# nodeSelectorTerms: -# - matchExpressions: -# - key: kubernetes.io/e2e-az-name -# operator: In -# values: -# - e2e-az1 -# - e2e-az2 +http: + # http listen port number + port: 8080 + # Service definition for query http service + service: + type: ClusterIP + # Annotations to query http service + annotations: {} + # Labels to query http service + labels: {} -## SecurityContext holds pod-level security attributes and common container settings. -## This defaults to non root user with uid 1000 and gid 2000. *v1.PodSecurityContext false -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ -## -podSecurityContext: - runAsNonRoot: true - runAsUser: 1000 - fsGroup: 2000 -securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - # capabilities: - # drop: ["ALL"] +rbac: + enabled: true + psp: + enabled: false \ No newline at end of file diff --git a/charts/nginx-logging-demo/Chart.yaml b/charts/nginx-logging-demo/Chart.yaml deleted file mode 100644 index 92515dcaa..000000000 --- a/charts/nginx-logging-demo/Chart.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: v1 -appVersion: "1.0" -description: A Demo application for the logging-operator -name: nginx-logging-demo -version: 0.1.2 -maintainers: -- name: Banzai Cloud - email: info@banzaicloud.com diff --git a/charts/nginx-logging-demo/README.md b/charts/nginx-logging-demo/README.md deleted file mode 100644 index a40dc5cf8..000000000 --- a/charts/nginx-logging-demo/README.md +++ /dev/null @@ -1,63 +0,0 @@ - -# Logging Operator Nginx demonstration Chart - -[Logging Operator](https://github.com/banzaicloud/logging-operator) is a managed centralized logging component based on fluentd and fluent-bit. -## tl;dr: - -```bash -$ helm repo add banzaicloud-stable http://kubernetes-charts.banzaicloud.com/branch/master -$ helm repo update -$ helm install banzaicloud-stable/nginx-logging-demo -``` - -## Introduction - -This chart demonstrates the use of the [Logging Operator](https://github.com/banzaicloud/banzai-charts/logging-operator) with an nginx deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. - -## Prerequisites - -- [Logging Operator](https://github.com/banzaicloud/logging-operator) available on the cluster - - -## Installing the Chart - -To install the chart with the release name `log-test-nginx`: - -```bash -$ helm install --name log-test-nginx banzaicloud-stable/nginx-logging-demo -``` -## Uninstalling the Chart - -To uninstall/delete the `log-test-nginx` deployment: - -```bash -$ helm delete log-test-nginx -``` - -The command removes all the Kubernetes components associated with the chart and deletes the release. - -## Configuration - -The following tables lists the configurable parameters of the nginx-logging-demo chart and their default values. - -| Parameter | Description | Default | -| --------------------------------------------------- | ------------------------------------------------------ | ------------------------------ | -| `image.repository` | Container image repository | `nginx` | -| `image.tag` | Container image tag | `stable` | -| `image.pullPolicy` | Container pull policy | `IfNotPresent` | -| `nameOverride` | Override name of app | `` | -| `fullnameOverride` | Override full name of app | `` | -| `affinity` | Node Affinity | `{}` | -| `resources` | CPU/Memory resource requests/limits | `{}` | -| `tolerations` | Node Tolerations | `[]` | -| `nodeSelector` | Define which Nodes the Pods are scheduled on. | `{}` | - - -Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example: - -```bash -$ helm install --name my-release -f values.yaml banzaicloud-stable/nginx-logging-demo -``` - -> **Tip**: You can use the default [values.yaml](values.yaml) - diff --git a/charts/nginx-logging-demo/templates/NOTES.txt b/charts/nginx-logging-demo/templates/NOTES.txt deleted file mode 100644 index 46c4f729a..000000000 --- a/charts/nginx-logging-demo/templates/NOTES.txt +++ /dev/null @@ -1,24 +0,0 @@ -1. Get the application URL by running these commands: -{{- if .Values.ingress.enabled }} -{{- range $host := .Values.ingress.hosts }} - {{- range .paths }} - http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }} - {{- end }} -{{- end }} -{{- else if contains "NodePort" .Values.service.type }} - export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "nginx-logging-demo.fullname" . }}) - export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT -{{- else if contains "LoadBalancer" .Values.service.type }} - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "nginx-logging-demo.fullname" . }}' - export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "nginx-logging-demo.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') - echo http://$SERVICE_IP:{{ .Values.service.port }} -{{- else if contains "ClusterIP" .Values.service.type }} - export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ include "nginx-logging-demo.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") - echo "Visit http://127.0.0.1:8080 to use your application" - kubectl port-forward $POD_NAME 8080:80 -{{- end }} - - - diff --git a/charts/nginx-logging-demo/templates/deployment.yaml b/charts/nginx-logging-demo/templates/deployment.yaml deleted file mode 100644 index bf70c6fba..000000000 --- a/charts/nginx-logging-demo/templates/deployment.yaml +++ /dev/null @@ -1,51 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "nginx-logging-demo.fullname" . }} - labels: - app: {{ include "nginx-logging-demo.name" . }} - chart: {{ include "nginx-logging-demo.chart" . }} - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -spec: - replicas: {{ .Values.replicaCount }} - selector: - matchLabels: - app: {{ include "nginx-logging-demo.name" . }} - release: {{ .Release.Name }} - template: - metadata: - labels: - app: {{ include "nginx-logging-demo.name" . }} - release: {{ .Release.Name }} - spec: - containers: - - name: {{ .Chart.Name }} - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - ports: - - name: http - containerPort: 80 - protocol: TCP - livenessProbe: - httpGet: - path: / - port: http - readinessProbe: - httpGet: - path: / - port: http - resources: - {{- toYaml .Values.resources | nindent 12 }} - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} diff --git a/charts/nginx-logging-demo/templates/ingress.yaml b/charts/nginx-logging-demo/templates/ingress.yaml deleted file mode 100644 index b64548268..000000000 --- a/charts/nginx-logging-demo/templates/ingress.yaml +++ /dev/null @@ -1,39 +0,0 @@ -{{- if .Values.ingress.enabled -}} -{{- $fullName := include "nginx-logging-demo.fullname" . -}} -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: {{ $fullName }} - labels: - app: {{ include "nginx-logging-demo.name" . }} - chart: {{ include "nginx-logging-demo.chart" . }} - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} - {{- with .Values.ingress.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: -{{- if .Values.ingress.tls }} - tls: - {{- range .Values.ingress.tls }} - - hosts: - {{- range .hosts }} - - {{ . | quote }} - {{- end }} - secretName: {{ .secretName }} - {{- end }} -{{- end }} - rules: - {{- range .Values.ingress.hosts }} - - host: {{ .host | quote }} - http: - paths: - {{- range .paths }} - - path: {{ . }} - backend: - serviceName: {{ $fullName }} - servicePort: http - {{- end }} - {{- end }} -{{- end }} diff --git a/charts/nginx-logging-demo/templates/logging.yaml b/charts/nginx-logging-demo/templates/logging.yaml deleted file mode 100644 index a0bfb0837..000000000 --- a/charts/nginx-logging-demo/templates/logging.yaml +++ /dev/null @@ -1,37 +0,0 @@ -apiVersion: "logging.banzaicloud.com/v1alpha1" -kind: "Plugin" -metadata: - name: {{ include "nginx-logging-demo.fullname" . }} - labels: - app: {{ include "nginx-logging-demo.name" . }} - chart: {{ include "nginx-logging-demo.chart" . }} - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -spec: - input: - label: - app: {{ include "nginx-logging-demo.name" . }} -{{- if .Values.forwarding.enabled }} - output: - - type: forward - name: forward - parameters: - - name: host - value: {{ .Values.forwarding.targetHost | quote }} - - name: port - value: {{ .Values.forwarding.targetPort | quote }} - - name: name - value: {{ .Values.forwarding.targetHost | quote }} -{{- if .Values.forwarding.tlsSharedKey }} - - name: tlsSharedKey - value: {{ .Values.forwarding.tlsSharedKey | b64enc | quote }} -{{- end }} -{{- end }} - filter: - - type: parser - name: parser-nginx - parameters: - - name: format - value: '/^(?[^ ]*) (?[^ ]*) (?[^ ]*) \[(?