diff --git a/.github/renovate.json b/.github/renovate.json new file mode 100644 index 0000000..24e2047 --- /dev/null +++ b/.github/renovate.json @@ -0,0 +1,14 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": [ + "config:base", + ":dependencyDashboard" + ], + "hostRules": [ + { + "matchHost": "https://quay.io", + "enabled": false + } + ], + "pinDigests": true +} \ No newline at end of file diff --git a/.github/workflows/integration-testing.yml b/.github/workflows/integration-testing.yml new file mode 100644 index 0000000..e6f7a7f --- /dev/null +++ b/.github/workflows/integration-testing.yml @@ -0,0 +1,30 @@ +name: 'Integration testing' +on: + push: + +jobs: + integration-testing: + runs-on: ubuntu-latest-4-cores + steps: + - name: Checkout code + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 + - name: Install Helm + uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78 # v3 + - name: Install kind + uses: helm/kind-action@dda0770415bac9fc20092cacbc54aa298604d140 # v1.8.0 + with: + install_only: true + - uses: actions/setup-java@387ac29b308b003ca37ba93a6cab5eb57c8f5f93 # v4 + with: + java-version: '17' + distribution: 'adopt' + - name: Check + working-directory: integration-testing + run: ./gradlew check -i + - name: Upload Artifact + if: success() || failure() + uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4 + with: + name: test-result + path: /home/runner/work/**/build/reports + retention-days: 2 \ No newline at end of file diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml new file mode 100644 index 0000000..91cbda3 --- /dev/null +++ b/.github/workflows/publish.yml @@ -0,0 +1,31 @@ +name: 'Publication' +on: + push: + paths: + - 'xenit-alfresco/**' + tags: + - v* +jobs: + continuousIntegration: + runs-on: ubuntu-latest + steps: + - name: Parse version + run: | + echo "version=${GITHUB_REF#refs/tags/v}" >> "$GITHUB_ENV" + - name: Checkout code + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 + - name: Install Helm + uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78 # v3 + - name: Package Helm Chart + run: | + helm package xenit-alfresco --version "${{ env.version }}" + - name: publish to cloudsmith + id: push + uses: cloudsmith-io/action@master + with: + api-key: ${{ secrets.CLOUDSMITH_API_KEY }} + command: 'push' + format: 'helm' + owner: 'xenit' + repo: 'open-source' + file: 'alfresco-${{ env.version }}.tgz' \ No newline at end of file diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..ff655c0 --- /dev/null +++ b/.gitignore @@ -0,0 +1,6 @@ +.idea +.iml + +.gradle/ +build/ +classes/* diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..eef0a2a --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,87 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), with additional info about the +chronology things are added/fixed/changed and - where possible - links to the PRs involved. + +### Changes +* make acs liveliness and readiness completely overridable +* make transformation services components each enable configurable +* remove mq config in other deployments when mq is disabled + +[EUIAUDIT23-7] +* correct cilium ooi connections to sharepoint online + +* fix the location of service account in solr and remove volume mounts when pvc is disabled for solr +* make init container for mq and sfs configurable for file permission creation + +[XENOPS-1123] + +* remove share-file-store cleanup interval from configmap +* fix liveness probe share and transform-core-aio + +[XM2C-158] + +* Share probe the same as acs probe when they are in the same pod +* Update share readiness check + +[XM2C-156] + +* make podManagementPolicy editable in solr statefulset + +[XM2C-155] + +* Remove service type from Solr service + + +* remove init containers of the shared file store container + + +* Disable cpu autoscaling by default +* Option to merge Share and ACS into 1 pod (sticky session workaround) + +[XM2C-127] + +* make default backend in ingress in helm alfresco settable by values + + +* Add HorizontalPodAutoscaler for ACS +* Add -pvc to the additionalClaim in the deployment +* Enable solr readiness probe by default +* Add support for envFrom values or all deployments + +[XM2C-91] + +* Split SOLR Backup from normal mount + +[XM2C-89] + +* make repository and registry and tag the only way to set up image +* add alfresco-port to solr-config +* make solr readiness probe disabled by default +* fix transform-router-to-shared-file-store network policy + +[XM2C-90] + +* add support for custom alfresco mounts and split image values to registry , repository tag for renovate bot +* add renovate + +[XM2C-68] + +* remove default pull secrets + +[XM2C-55] + +* Rename all `replicaCount` properties to `replicas` + +[XM2C-54] + +* Rename all `image.repository` properties to `image` +* Rename all `image.pullPolicy` properties to `imagePullPolicy` + +[XM2C-30] + +* added checksum of configmap and secrets for deployment for **Automatically Roll Deployments** +* changed mq default strategy from RollingUpdate to Recreate because of conflict in updating + diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..8405e89 --- /dev/null +++ b/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..9ca16cf --- /dev/null +++ b/README.md @@ -0,0 +1,2237 @@ +# Xenit Alfresco Helm Chart + +[![Latest version of 'alfresco' @ Cloudsmith](https://api-prd.cloudsmith.io/v1/badges/version/xenit/open-source/helm/alfresco/latest/x/?render=true&show_latest=true)](https://cloudsmith.io/~xenit/repos/open-source/packages/detail/helm/alfresco/latest/) + +This is a helm chart for installing Alfresco + +## Helm + +[![Hosted By: Cloudsmith](https://img.shields.io/badge/OSS%20hosting%20by-cloudsmith-blue?logo=cloudsmith&style=for-the-badge)](https://cloudsmith.com) + +Package repository hosting is graciously provided by [Cloudsmith](https://cloudsmith.com). Cloudsmith is the only fully +hosted, cloud-native, universal package management solution, that enables your organization to create, store and share +packages in any format, to any place, with total confidence. + +You can install this helm chart on you K8s cluster. Keep in mind that you will need to add some `--set` statements for +this to work: + +```bash +helm install alfresco \ + --repo 'https://repo.xenit.eu/public/open-source/helm/charts/' +``` + +Or you can use it as a dependency in your `requirements.yaml` in your own chart. + +```yaml +dependencies: + - name: alfresco + version: 0.1.2 + repository: https://repo.xenit.eu/public/open-source/helm/charts/ +``` + +## Dev Requirements + +Make sure you have the following installed: + +* Kubectl: https://kubernetes.io/docs/tasks/tools/#kubectl +* docker: https://www.docker.com/get-started/ +* Helm: https://helm.sh/docs/intro/install/ +* kind: https://kind.sigs.k8s.io/docs/user/quick-start/ +* skaffold: https://skaffold.dev/docs/install/ + +## Start Local Cluster + +* To start the cluster you have to create one using kind with the config file as a parameter that is under the directory + kind: + ```bash + kind create cluster --config=kind/config.yaml + ``` +* switch to kind-kind context : + ```bash + kubectl config use-context kind-kind + ``` +* Add An ingress controller by running this command after starting the cluster: + ```bash + kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/kind/deploy.yaml + ``` + +* set up the image pull secrets like in the example and add them to the ```general.imagePullSecrets``` + Example : + +``` +apiVersion: v1 +kind: Secret +metadata: + name: secretName + namespace: {{ .Release.Namespace | quote }} +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: {{- printf "{\"auths\":{\"%s\":{\"username\":\"%s\",\"password\":\"%s\",\"email\":\"%s\",\"auth\":\"%s\"}}}" <> <> <> (printf "%s:%s" .username .password | b64enc) | b64enc }} +``` + +* wait for the ingress controller to be ready you can check by running this command : + ```bash + kubectl wait --namespace ingress-nginx --for=condition=ready pod --selector=app.kubernetes.io/component=controller --timeout=90s + ``` +* some the services are disabled by default to minimize the resource usage such as : + - solr + - transformServices + - digitalWorkspace + + to enable them modify the values inside local-values.yaml +* finally, run skaffold (instead of helm) and wait for the ingress controller to be ready first: + ```bash + skaffold dev + ``` + +## Image Requirements + +This helm chart supports a lot of features like share and desktop sync. You are however yourself responsible to provide +an ACS image with the correct amps installed to support these features. Please note that this helm chart is build to +support the xenit open source images. These are build on the official Alfresco Images but have additional K8S support. +The deployments that rely on Xenit Images are the following: + +* acs +* share +* postgresql +* solr + +For more information take a look at + +* https://hub.docker.com/u/xenit +* https://github.com/xenit-eu + +## Configuration + +### General + +#### `general.strategy` + +* Required: false +* Default: + ```yaml + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + ``` +* Description: You can overwrite here the rollout strategy of deployments. This will be effective on ALL deployments in + the helm chart that have strategy type RollingUpdate (default) + +#### `general.podAnnotations` + +* Required: false +* Default: None +* Example: + ```yaml + annotation1Key: annotation1Value + annotation2Key: annotation2Value + ``` +* Description: With this list of parameters you can add 1 or multiple annotations to ALL deployments and statefullSets + +#### `general.imagePullSecrets` + +* Required: false +* Default: None +* Example: + ```yaml + - name: privateDockerRepo1Secret + - name: privateDockerRepo2Secret + ``` +* Description: If you use an image that is not public. then you can create dockerconfigjson secrets on your cluster and + reference them here. The secrets will be referenced in all Deployments and StatefullSets. + +#### `general.serviceType` + +* Required: false +* Default: None +* Description: will set a serviceType on the services that are exposed via an ingress. This might be useful for example + when you are working on AWS infra with an AWS ALB which requires NodePort services + +#### `general.db.username` + +* Required: false +* Default: None +* Description: Used in the ACS and SyncService pod to access the Database and to set the username of the rootuser of the + postgres (if enabled) +* Note: If not specified the helm chart will try to reuse the value used in previous deployments. If these are not there + a random user will be used. + +#### `general.db.password` + +* Required: false +* Default: None +* Description: Used in the ACS and SyncService pod to access the Database and to set the password of the rootuser of the + postgres (if enabled) +* Note: If not specified the helm chart will try to reuse the value used in previous deployments. If these are not there + a random password will be used. + +#### `general.networkPolicies.enabled` + +* Required: false +* Default: true +* Description: A field to enabled/disable network policies. + +#### `general.cni` + +* Required: false +* Default: cilium +* Description: A field to tell the helm chart what cni provider your cluster is using. By default we assume cilium. If + this is not the case you will need to add a network policy to allow the following +* Alfresco to access heartbeat + +#### `general.secrets.acs.selfManaged` + +* Required: false +* Default: false +* Description: Whether or not you want to provide secrets for the helm chart yourself. This is useful when working on a + prod environment and you want a secure secret solution (for example Bitnami' Sealed secrets) +* Please note that when you enable this you are yourself responsible to provide a secret acs-secret in the namespace + that you will install this chart in. +* Secret data expected: + +``` + GLOBAL_objectstorage.store.myS3ContentStore.value.accessKey + GLOBAL_objectstorage.store.myS3ContentStore.value.secretKey +``` + +#### `general.secrets.mq.selfManaged` + +* Required: false +* Default: false +* Description: Whether or not you want to provide secrets for the helm chart yourself. This is useful when working on a + prod environment and you want a secure secret solution (for example Bitnami' Sealed secrets) +* Please note that when you enable this you are yourself responsible to provide a secret mq-secret in the namespace that + you will install this chart in. +* Secret data expected: + +``` + ACTIVEMQ_ADMIN_LOGIN + ACTIVEMQ_ADMIN_PASSWORD + GLOBAL_messaging.broker.username + GLOBAL_messaging.broker.password +``` + +#### `general.secrets.db.selfManaged` + +* Required: false +* Default: false +* Description: Whether or not you want to provide secrets for the helm chart yourself. This is useful when working on a + prod environment and you want a secure secret solution (for example Bitnami' Sealed secrets) +* Please note that when you enable this you are yourself responsible to provide a secret db-secret in the namespace that + you will install this chart in. +* Secret data expected: + +``` + DB_USERNAME + DB_PASSWORD + POSTGRES_USER + POSTGRES_PASSWORD +``` + +### Ingress + +#### `ingress.host` + +* Required: true +* Default: None +* Description: The host that points to the alfresco cluster for all services besides the syncService service + +#### `ingress.syncServiceHost` + +* Required: when `syncService.enabled` is `true` +* Default: None +* Description: The host that points to the alfresco cluster for the syncService service + +#### `ingress.ingressAnnotations` + +* Required: false +* Default: + ``` + kubernetes.io/ingress.class: "nginx" + cert-manager.io/cluster-issuer: "letsencrypt-production" + ``` +* Description: Annotations for ingress + +#### `ingress.additionalPaths` + +* Required: false +* Default: None +* Example: + +```yaml +- path: /service-path + pathType: Prefix + backend: + service: + name: service-name + port: + number: service-port +``` + +* Description: used to add more path to ingress under the same host name for new services + +#### `ingress.defaultBackend.service` + +* Required: true +* Default: acs-service +* Description: the default service name that ingress will point to + +#### `ingress.defaultBackend.port` + +* Required: true +* Default: 30000 +* Description: the default service port that ingress will point to + +#### `ingress.blockAcsSolrApi.enabled` + +* Required: false +* Default: `true` +* Description: Enable 403 handler for alfresco api solr endpoints +#### `ingress.blockAcsSolrApi.paths` + +* Required: false +* Default: +```yaml +- /alfresco/s/api/solr +- /alfresco/service/api/solr +- /alfresco/service/api/solr +- /alfresco/wcservice/api/solr +``` +* Description: List of paths that are blocked +### ACS + +#### `acs.replicas` + +* Required: false +* Default: `1` +* Description: The number of pods that will be running + +#### `acs.image.registry` + +* Required: false +* Default: `docker.io` +* Description: The registry where the docker container can be found in + +#### `acs.image.repository` + +* Required: false +* Default: `xenit/alfresco-repository-community` +* Description: The repository of the docker image that will be used + +#### `acs.image.tag` + +* Required: false +* Default: `7.2.0` +* Description: The tag of the docker image that will be used + +#### `acs.imagePullPolicy` + +* Required: false +* Default: `IfNotPresent` +* Description: Specify when the pods should pull the image from the repositories + +#### `acs.livenessProbe` + +* Required: false +* Default: +``` + httpGet: + path: /alfresco/api/-default-/public/alfresco/versions/1/probes/-live- + port: 8080 + scheme: HTTP + failureThreshold: 1 + initialDelaySeconds: 130 + periodSeconds: 20 + successThreshold: 1 + timeoutSeconds: 10 +``` +* Description: Specify the livenessProbe configuration for acs + +#### `acs.readinessProbe` + +* Required: false +* Default: +``` + httpGet: + path: /alfresco/api/-default-/public/alfresco/versions/1/probes/-ready- + port: 8080 + scheme: HTTP + failureThreshold: 6 + initialDelaySeconds: 60 + periodSeconds: 20 + successThreshold: 1 + timeoutSeconds: 10 +``` +* Description: Specify the readinessProbe configuration for acs + +#### `acs.strategy.type` + +* Required: false +* Default: `RollingUpdate` +* Description: Can be set to `Recreate` if you want all your pods to be killed before new ones are created + +#### `acs.dbUrl` + +* Required: false +* Default: `jdbc:postgresql://postgresql:5432/alfresco` +* Description: Must be overwritten to point to your DB if you are not using the provided postgresql + +#### `acs.dbDriver` + +* Required: false +* Default: `org.postgresql.Driver` +* Description: If you use another kind of DB then postgres you must specify the driver that needs to be used here + +#### `acs.sharePort` + +* Required: false +* Default: `443` +* Description: Set to overwrite the share port + +#### `acs.shareProtocol` + +* Required: false +* Default: `https` +* Description: Set to overwrite the share protocol + +#### `acs.additionalEnvironmentVariables` + +* Required: false +* Default: None +* Example: + ```yaml + environmentVariable1Key: environmentVariable1Value + environmentVariable2Key: environmentVariable2Value + ``` +* Description: With this list of parameters you can add 1 or multiple environment variables that will be passed to the + docker container. These will be stored in a config and are hence not safe for sensitive information + +#### `acs.envFrom` + +* Required: false +* Default: None +* Description: This allows you to add to the acs-container envFrom section. This was added to allow to integrate secrets + that are not added by this helm chart. +* Example: + +```yaml +- secretRef: + name: s3-secret +``` + +#### `acs.podAnnotations` + +* Required: false +* Default: None +* Example: + ```yaml + annotation1Key: annotation1Value + annotation2Key: annotation2Value + ``` +* Description: With this list of parameters you can add 1 or multiple annotations to the acs deployment + +#### `acs.serviceAnnotations` + +* Required: false +* Default: None +* Example: + ```yaml + annotation1Key: annotation1Value + annotation2Key: annotation2Value + ``` +* Description: With this list of parameters you can add 1 or multiple annotations to the acs service + +#### `acs.serviceAccount` + +* Required: false +* Default: None +* Description: If your pods need to run with a service account you can specify that here. Please note that you are + yourself responsible to create the serviceAccount referenced in the namespace of this helm chart + +#### `acs.resources.requests` + +* Required: false +* Default: + ```yaml + requests: + memory: "2Gi" + cpu: "2" + ``` +* Description: The resources a node should keep reserved for your pod + +#### `acs.resources.limits` + +* Required: false +* Default: None +* Description: The maximum resources a pod may consume from a node + +#### `acs.hpa.enabled` + +* Required: false +* Default: false +* Description: Whether the ACS deployment should autoscale + +#### `acs.hpa.minReplicas` + +* Required: false +* Default: 1 +* Description: The min ammount of replicas will run when autoscaling + +#### `acs.hpa.maxReplicas` + +* Required: false +* Default: 10 +* Description: The max ammount of replicas will run when autoscaling + +#### `acs.hpa.cpu.enabled` + +* Required: false +* Default: false +* Description: whether horizontal scaling should trigger on cpu load + +#### `acs.hpa.cpu.utilization` + +* Required: false +* Default: 70 +* Description: The CPU cutover percentage + +#### `acs.hpa.memory.enabled` + +* Required: false +* Default: true +* Description: whether horizontal scaling should trigger on memory load + +#### `acs.hpa.memory.utilization` + +* Required: false +* Default: 70 +* Description: The Memory cutover percentage + +#### `acs.imagePullSecrets` + +* Required: false +* Default: None +* Example: + ```yaml + - name: privateDockerRepo1Secret + - name: privateDockerRepo2Secret + ``` +* Description: If you use an image that is not public. then you can create dockerconfigjson secrets on your cluster and + reference them here. + +#### `acs.additionalVolumeMounts` + +* Required: false +* Default: None +* Description: A list of configMaps that need to be mounted as volumes to the alfresco pods. Make sure the configMap specified exists. Layout should be as follows: + +```yaml + - mountPath: >- + /usr/local/tomcat/shared/classes/alfresco/extension/subsystems/Authentication/ldap-ad/oup-ad1 + name: ldap1-ad-auth-volume + readOnly: true + - mountPath: >- + /usr/local/tomcat/shared/classes/alfresco/extension/subsystems/Authentication/ldap-ad/oup-ad2 + name: ldap2-ad-auth-volume + readOnly: true + - mountPath: >- + /usr/local/tomcat/shared/classes/alfresco/extension/subsystems/Authentication/ldap-ad/oup-ad3 + name: ldap3-ad-auth-volume + readOnly: true +``` + +#### `acs.additionalVolumes` + +* Required: false +* Default: None +* Description: A list of configMaps that need to be mounted as volumes to the alfresco pods. Make sure the configMap specified exists. Layout should be as follows: + +```yaml + - configMap: + defaultMode: 420 + items: + - key: ldap-ad-authentication.properties + path: ldap-ad-authentication.properties + name: ldap1-ad-auth-config + name: ldap1-ad-auth-volume + - configMap: + defaultMode: 420 + items: + - key: ldap-ad-authentication.properties + path: ldap-ad-authentication.properties + name: ldap2-ad-auth-config + name: ldap2-ad-auth-volume + - configMap: + defaultMode: 420 + items: + - key: ldap-ad-authentication.properties + path: ldap-ad-authentication.properties + name: ldap3-ad-auth-config + name: ldap3-ad-auth-volume +``` + + +### Digital Workspace + +#### `digitalWorkspace.enabled` + +* Required: false +* Default: `true` +* Description: Enable or disable the digital workspace + +#### `digitalWorkspace.replicas` + +* Required: false +* Default: `1` +* Description: The number of pods that will be running + +#### `digitalWorkspace.image.registry` + +* Required: false +* Default: `quay.io` +* Description: The registry where the docker container can be found in + +#### `digitalWorkspace.image.repository` + +* Required: false +* Default: `alfresco/alfresco-digital-workspace` +* Description: The repository of the docker image that will be used + +#### `digitalWorkspace.image.tag` + +* Required: false +* Default: `2.4.2-adw` +* Description: The tag of the docker image that will be used + +#### `digitalWorkspace.imagePullPolicy` + +* Required: false +* Default: `IfNotPresent` +* Description: Specify when the pods should pull the image from the repositories + +#### `digitalWorkspace.strategy.type` + +* Required: false +* Default: `RollingUpdate` +* Description: Can be set to `Recreate` if you want all your pods to be killed before new ones are created + +#### `digitalWorkspace.basePath` + +* Required: false +* Default: `/workspace` +* Description: Specify the basepath where the digital workspace can be reached + +#### `digitalWorkspace.additionalEnvironmentVariables` + +* Required: false +* Default: None +* Example: + ```yaml + environmentVariable1Key: environmentVariable1Value + environmentVariable2Key: environmentVariable2Value + ``` +* Description: With this list of parameters you can add 1 or multiple environment variables that will be passed to the + docker container. These will be stored in a config and are hence not safe for sensitive information + +#### `digitalWorkspace.envFrom` + +* Required: false +* Default: None +* Description: This allows you to add to the digitalWorkspace-container envFrom section. This was added to allow to + integrate secrets + that are not added by this helm chart. +* Example: + +```yaml + - secretRef: + name: s3-secret +``` + +#### `digitalWorkspace.podAnnotations` + +* Required: false +* Default: None +* Example: + ```yaml + annotation1Key: annotation1Value + annotation2Key: annotation2Value + ``` +* Description: With this list of parameters you can add 1 or multiple annotations to the Digital Workspace deployment + +#### `digitalWorkspace.serviceAnnotations` + +* Required: false +* Default: None +* Example: + ```yaml + annotation1Key: annotation1Value + annotation2Key: annotation2Value + ``` +* Description: With this list of parameters you can add 1 or multiple annotations to the Digital Workspace service + +#### `digitalWorkspace.serviceAccount` + +* Required: false +* Default: None +* Description: If your pods need to run with a service account you can specify that here. Please note that you are + yourself responsible to create the serviceAccount referenced in the namespace of this helm chart + +#### `digitalWorkspace.resources.requests` + +* Required: false +* Default: + ```yaml + requests: + memory: "256Mi" + cpu: "150m" + ``` +* Description: The resources a node should keep reserved for your pod + +#### `digitalWorkspace.resources.limits` + +* Required: false +* Default: None +* Description: The maximum resources a pod may consume from a node + +#### `digitalWorkspace.imagePullSecrets` + +* Required: false +* Default: None +* Example: + ```yaml + - name: privateDockerRepo1Secret + - name: privateDockerRepo2Secret + ``` +* Description: If you use an image that is not public. then you can create dockerconfigjson secrets on your cluster and + reference them here. + +### Share + +#### `share.enabled` + +* Required: false +* Default: `true` +* Description: Enable or disable the share + +#### `share.mergeAcsShare` + +* Required: false +* Default: false +* Description: If set to `true` the Share container will be installed inside the ACS pod. + +#### `share.replicas` + +* Required: false +* Default: `1` +* Description: The number of pods that will be running + +#### `share.image.registry` + +* Required: false +* Default: `docker.io` +* Description: The registry where the docker container can be found in + +#### `share.image.repository` + +* Required: false +* Default: `alfresco-share-community` +* Description: The repository of the docker image that will be used + +#### `share.image.tag` + +* Required: false +* Default: `7.2.0` +* Description: The tag of the docker image that will be used + +#### `share.imagePullPolicy` + +* Required: false +* Default: `IfNotPresent` +* Description: Specify when the pods should pull the image from the repositories + +#### `share.livenessProbe.failureThreshold` + +* Required: false +* Default: 1 +* Description: Specify the livenessProbe failure thresh hold fp how many consecutive failure before it stops probing + +#### `share.livenessProbe.initialDelaySeconds` + +* Required: false +* Default: 130 +* Description: Specify the livenessProbe initial delay before it starts probing + +#### `share.livenessProbe.periodSeconds` + +* Required: false +* Default: 20 +* Description: Specify the livenessProbe period between probes + +#### `share.livenessProbe.successThreshold` + +* Required: false +* Default: 1 +* Description: Specify the livenessProbe success thresh hold for how many consecutive successes for the probe to be + considered successful after having failed + +#### `share.livenessProbe.timeoutSeconds` + +* Required: false +* Default: 10 +* Description: Specify the livenessProbe timeout for probes to be considered as failure + +#### `share.readinessProbe.failureThreshold` + +* Required: false +* Default: 6 +* Description: Specify the readinessProbe failure thresh hold fp how many consecutive failure before it stops probing + +#### `share.readinessProbe.initialDelaySeconds` + +* Required: false +* Default: 60 +* Description: Specify the readinessProbe initial delay before it starts probing + +#### `share.readinessProbe.periodSeconds` + +* Required: false +* Default: 20 +* Description: Specify the readinessProbe period between probes + +#### `share.readinessProbe.successThreshold` + +* Required: false +* Default: 1 +* Description: Specify the readinessProbe success thresh hold for how many consecutive successes for the probe to be + considered successful after having failed + +#### `share.readinessProbe.timeoutSeconds` + +* Required: false +* Default: 10 +* Description: Specify the readinessProbe timeout for probes to be considered as failure + +#### `share.strategy.type` + +* Required: false +* Default: `RollingUpdate` +* Description: Can be set to `Recreate` if you want all your pods to be killed before new ones are created + +#### `share.additionalEnvironmentVariables` + +* Required: false +* Default: None +* Example: + ```yaml + environmentVariable1Key: environmentVariable1Value + environmentVariable2Key: environmentVariable2Value + ``` +* Description: With this list of parameters you can add 1 or multiple environment variables that will be passed to the + docker container. These will be stored in a config and are hence not safe for sensitive information + +#### `share.envFrom` + +* Required: false +* Default: None +* Description: This allows you to add to the share-container envFrom section. This was added to allow to integrate + secrets + that are not added by this helm chart. +* Example: + +```yaml + - secretRef: + name: s3-secret +``` + +#### `share.podAnnotations` + +* Required: false +* Default: None +* Example: + ```yaml + annotation1Key: annotation1Value + annotation2Key: annotation2Value + ``` +* Description: With this list of parameters you can add 1 or multiple annotations to the Share deployment + +#### `share.serviceAnnotations` + +* Required: false +* Default: None +* Example: + ```yaml + annotation1Key: annotation1Value + annotation2Key: annotation2Value + ``` +* Description: With this list of parameters you can add 1 or multiple annotations to the Share service + +#### `share.serviceAccount` + +* Required: false +* Default: None +* Description: If your pods need to run with a service account you can specify that here. Please note that you are + yourself responsible to create the serviceAccount referenced in the namespace of this helm chart + +#### `share.resources.requests` + +* Required: false +* Default: + ```yaml + requests: + memory: "256Mi" + cpu: "0.5" + ``` +* Description: The resources a node should keep reserved for your pod +* + +#### `share.resources.limits` + +* Required: false +* Default: None +* Description: The maximum resources a pod may consume from a node + +#### `share.imagePullSecrets` + +* Required: false +* Default: None +* Example: + ```yaml + - name: privateDockerRepo1Secret + - name: privateDockerRepo2Secret + ``` +* Description: If you use an image that is not public. then you can create dockerconfigjson secrets on your cluster and + reference them here. + +### Active MQ + +#### `mq.adminLogin` + +* Required: false +* Default: None +* Description: Sets the username of the admin user of the MQ +* Note: If not specified the helm chart will try to reuse the value used in previous deployments. If these are not there + a random login will be used. + +#### `mq.adminPassword` + +* Required: false +* Default: None +* Description: Sets the password of the admin user of the MQ +* Note: If not specified the helm chart will try to reuse the value used in previous deployments. If these are not there + a random password will be used. + +#### `mq.enabled` + +* Required: false +* Default: `true` +* Description: Enable or disable the Active MQ + +#### `mq.replicas` + +* Required: false +* Default: `1` +* Description: The number of pods that will be running + +#### `mq.image.registry` + +* Required: false +* Default: `docker.io` +* Description: The registry where the docker container can be found in + +#### `mq.image.repository` + +* Required: false +* Default: `alfresco/alfresco-activemq` +* Description: The repository of the docker image that will be used + +#### `mq.image.tag` + +* Required: false +* Default: `5.16.1` +* Description: The tag of the docker image that will be used + +#### `mq.imagePullPolicy` + +* Required: false +* Default: `IfNotPresent` +* Description: Specify when the pods should pull the image from the repositories + +#### `mq.strategy.type` + +* Required: false +* Default: `Recreate` +* Description: Can be set to `RollingUpdate` if you want to create pod before killing existing pod + +#### `mq.additionalEnvironmentVariables` + +* Required: false +* Default: None +* Example: + ```yaml + environmentVariable1Key: environmentVariable1Value + environmentVariable2Key: environmentVariable2Value + ``` +* Description: With this list of parameters you can add 1 or multiple environment variables that will be passed to the + docker container. These will be stored in a config and are hence not safe for sensitive information + +#### `mq.envFrom` + +* Required: false +* Default: None +* Description: This allows you to add to the mq-container envFrom section. This was added to allow to integrate secrets + that are not added by this helm chart. +* Example: + +```yaml + - secretRef: + name: s3-secret +``` + +#### `mq.podAnnotations` + +* Required: false +* Default: None +* Example: + ```yaml + annotation1Key: annotation1Value + annotation2Key: annotation2Value + ``` +* Description: With this list of parameters you can add 1 or multiple annotations to the Active MQ deployment + +#### `mq.serviceAnnotations` + +* Required: false +* Default: None +* Example: + ```yaml + annotation1Key: annotation1Value + annotation2Key: annotation2Value + ``` +* Description: With this list of parameters you can add 1 or multiple annotations to the Active MQ service + +#### `mq.serviceAccount` + +* Required: false +* Default: None +* Description: If your pods need to run with a service account you can specify that here. Please note that you are + yourself responsible to create the serviceAccount referenced in the namespace of this helm chart + +#### `mq.resources.requests` + +* Required: false +* Default: + ```yaml + requests: + memory: "512Mi" + cpu: "0.5" + ``` +* Description: The resources a node should keep reserved for your pod +* + +#### `mq.resources.limits` + +* Required: false +* Default: None +* Description: The maximum resources a pod may consume from a node + +#### `mq.imagePullSecrets` + +* Required: false +* Default: None +* Example: + ```yaml + - name: privateDockerRepo1Secret + - name: privateDockerRepo2Secret + ``` +* Description: If you use an image that is not public. then you can create dockerconfigjson secrets on your cluster and + reference them here. + +### Postgresql + +#### `postgresql.enabled` + +* Required: false +* Default: `true` +* Description: Enable or disable the PostgresQl + +#### `postgresql.replicas` + +* Required: false +* Default: `1` +* Description: The number of pods that will be running + +#### `postgresql.image.registry` + +* Required: false +* Default: `docker.io` +* Description: The registry where the docker container can be found in + +#### `postgresql.image.repository` + +* Required: false +* Default: `xenit/postgres` +* Description: The repository of the docker image that will be used + +#### `postgresql.image.tag` + +* Required: false +* Default: `latest` +* Description: The tag of the docker image that will be used + +#### `postgresql.imagePullPolicy` + +* Required: false +* Default: `IfNotPresent` +* Description: Specify when the pods should pull the image from the repositories + +#### `postgresql.strategy.type` + +* Required: false +* Default: `RollingUpdate` +* Description: Can be set to `Recreate` if you want all your pods to be killed before new ones are created + +#### `postgresql.additionalEnvironmentVariables` + +* Required: false +* Default: None +* Example: + ```yaml + environmentVariable1Key: environmentVariable1Value + environmentVariable2Key: environmentVariable2Value + ``` +* Description: With this list of parameters you can add 1 or multiple environment variables that will be passed to the + docker container. These will be stored in a config and are hence not safe for sensitive information + +#### `postgresql.envFrom` + +* Required: false +* Default: None +* Description: This allows you to add to the postgresql-container envFrom section. This was added to allow to integrate + secrets + that are not added by this helm chart. +* Example: + +```yaml + - secretRef: + name: s3-secret +``` + +#### `postgresql.podAnnotations` + +* Required: false +* Default: None +* Example: + ```yaml + annotation1Key: annotation1Value + annotation2Key: annotation2Value + ``` +* Description: With this list of parameters you can add 1 or multiple annotations to the PostgresQl deployment + +#### `postgresql.serviceAnnotations` + +* Required: false +* Default: None +* Example: + ```yaml + annotation1Key: annotation1Value + annotation2Key: annotation2Value + ``` +* Description: With this list of parameters you can add 1 or multiple annotations to the PostgresQl service + +#### `postgresql.serviceAccount` + +* Required: false +* Default: None +* Description: If your pods need to run with a service account you can specify that here. Please note that you are + yourself responsible to create the serviceAccount referenced in the namespace of this helm chart + +#### `postgresql.resources.requests` + +* Required: false +* Default: + ```yaml + requests: + memory: "1Gi" + cpu: "1" + ``` +* Description: The resources a node should keep reserved for your pod + +#### `postgresql.resources.limits` + +* Required: false +* Default: None +* Description: The maximum resources a pod may consume from a node + +#### `postgresql.imagePullSecrets` + +* Required: false +* Default: None +* Example: + ```yaml + - name: privateDockerRepo1Secret + - name: privateDockerRepo2Secret + ``` +* Description: If you use an image that is not public. then you can create dockerconfigjson secrets on your cluster and + reference them here. + +### SOLR + +#### `solr.enabled` + +* Required: false +* Default: `true` +* Description: Enable or disable the Solr + +#### `solr.replicas` + +* Required: false +* Default: `2` +* Description: The number of pods that will be running + +* #### `solr.podManagementPolicy` + +* Required: false +* Default: `Parallel` +* Description: The way to manage multiple pod launching or termination possible values are `Parallel` or `OrderedReady`find more information in [Docs](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#parallel-pod-management) + +#### `solr.image.registry` + +* Required: false +* Default: `docker.io` +* Description: The registry where the docker container can be found in + +#### `solr.image.repository` + +* Required: false +* Default: `xenit/alfresco-solr6-xenit` +* Description: The repository of the docker image that will be used + +#### `solr.image.tag` + +* Required: false +* Default: `2.0.6` +* Description: The tag of the docker image that will be used + +#### `solr.imagePullPolicy` + +* Required: false +* Default: `IfNotPresent` +* Description: Specify when the pods should pull the image from the repositories + +#### `solr.strategy.type` + +* Required: false +* Default: `RollingUpdate` +* Description: Can be set to `OnDelete` if you want your statefulSet controller to no automatically update the pods + +#### `solr.additionalEnvironmentVariables` + +* Required: false +* Default: None +* Example: + ```yaml + environmentVariable1Key: environmentVariable1Value + environmentVariable2Key: environmentVariable2Value + ``` +* Description: With this list of parameters you can add 1 or multiple environment variables that will be passed to the + docker container. These will be stored in a config and are hence not safe for sensitive information + +#### `solr.podAnnotations` + +* Required: false +* Default: None +* Example: + ```yaml + annotation1Key: annotation1Value + annotation2Key: annotation2Value + ``` +* Description: With this list of parameters you can add 1 or multiple annotations to the Solr deployment + +#### `solr.serviceAnnotations` + +* Required: false +* Default: None +* Example: + ```yaml + annotation1Key: annotation1Value + annotation2Key: annotation2Value + ``` +* Description: With this list of parameters you can add 1 or multiple annotations to the Solr service + +#### `solr.serviceAccount` + +* Required: false +* Default: None +* Description: If your pods need to run with a service account you can specify that here. Please note that you are + yourself responsible to create the serviceAccount referenced in the namespace of this helm chart + +#### `solr.resources.requests` + +* Required: false +* Default: + ```yaml + requests: + memory: "4Gi" + cpu: "1" + ``` +* Description: The resources a node should keep reserved for your pod +* + +#### `solr.resources.limits` + +* Required: false +* Default: None +* Description: The maximum resources a pod may consume from a node + +#### `solr.imagePullSecrets` + +* Required: false +* Default: None +* Example: + ```yaml + - name: privateDockerRepo1Secret + - name: privateDockerRepo2Secret + ``` +* Description: If you use an image that is not public. then you can create dockerconfigjson secrets on your cluster and + reference them here. + +#### `solr.autoBackup.enabled` + +* Required: false +* Default: false +* Description: + - Enable or disable the Solr auto backup job , it will create a cron job that calls solr + to start a backup snapshot. + - backup repository environment variables needs to be set if enabled: + ```yaml + - JAVA_OPTS_S3_ENDPOINT=-DS3_ENDPOINT= + - JAVA_OPTS_S3_REGION=-DS3_REGION= + - JAVA_OPTS_S3_BUCKET_NAME=-DS3_BUCKET_NAME= + - JAVA_OPTS_AWS_ACCESS_KEY_ID=-Daws.accessKeyId= + - JAVA_OPTS_AWS_SECRET_ACCESS_KEY=-Daws.secretKey= + ``` + +#### `solr.autoBackup.cron` + +* Required: false +* Default: 0 * * * * +* Description: if `solr.autoBackup.enabled` is true then a cron job will be created with this value as its cron + +#### `solr.autoBackup.backupUrl` + +* Required: false +* + +Default: http://solr-service:30300/solr/alfresco/replication?command=backup&repository=s3&location=s3:///&numberToKeep=3 + +* Description: if `solr.autoBackup.enabled` is true then a cron job will be created that will curl this url + +#### `solr.readinessProbe.enabled` + +* Required: false +* Default: true +* Description: Enable or disable the job readiness probe + +#### `solr.readinessProbe.failureThreshold` + +* Required: false +* Default: 3 +* Description: Specify the readinessProbe failure thresh hold fp how many consecutive failure before it stops probing + +#### `solr.readinessProbe.initialDelaySeconds` + +* Required: false +* Default: 30 +* Description: Specify the readinessProbe initial delay before it starts probing + +#### `solr.readinessProbe.periodSeconds` + +* Required: false +* Default: 10 +* Description: Specify the readinessProbe period between probes + +#### `solr.readinessProbe.successThreshold` + +* Required: false +* Default: 1 +* Description: Specify the readinessProbe success thresh hold for how many consecutive successes for the probe to be + considered successful after having failed + +#### `solr.readinessProbe.timeoutSeconds` + +* Required: false +* Default: 10 +* Description: Specify the readinessProbe timeout for probes to be considered as failure + +### Transform Services + +#### `transformServices.enabled` + +* Required: false +* Default: `true` +* Description: Enable or disable the Transform Services + +#### `transformServices.imagePullSecrets` + +* Required: false +* Default: None +* Example: + ```yaml + - name: privateDockerRepo1Secret + - name: privateDockerRepo2Secret + ``` +* Description: If you use an image that is not public. then you can create dockerconfigjson secrets on your cluster and + reference them here. they will be referenced in all transform services Deployments. + +### Shared File Store + +#### `transformServices.sharedFileStore.enabled` + +* Required: false +* Default: `true` +* Description: Enable or disable the Shared File Store + +#### `transformServices.sharedFileStore.replicas` + +* Required: false +* Default: `1` +* Description: The number of pods that will be running + +#### `transformServices.sharedFileStore.image.registry` + +* Required: false +* Default: `docker.io` +* Description: The registry where the docker container can be found in + +#### `transformServices.sharedFileStore.image.repository` + +* Required: false +* Default: `alfresco/alfresco-shared-file-store` +* Description: The repository of the docker image that will be used + +#### `transformServices.sharedFileStore.image.tag` + +* Required: false +* Default: `0.16.1` +* Description: The tag of the docker image that will be used + +#### `transformServices.sharedFileStore.imagePullPolicy` + +* Required: false +* Default: `IfNotPresent` +* Description: Specify when the pods should pull the image from the repositories + +#### `transformServices.sharedFileStore.strategy.type` + +* Required: false +* Default: `RollingUpdate` +* Description: Can be set to `Recreate` if you want all your pods to be killed before new ones are created + +#### `transformServices.sharedFileStore.additionalEnvironmentVariables` + +* Required: false +* Default: None +* Example: + ```yaml + scheduler.cleanup.interval: "10800000" + scheduler.content.age.millis: "43200000" + ``` +* Description: With this list of parameters you can add 1 or multiple environment variables that will be passed to the + docker container. These will be stored in a config and are hence not safe for sensitive information + +#### `transformServices.sharedFileStore.envFrom` + +* Required: false +* Default: None +* Description: This allows you to add to the sharedFileStore-container envFrom section. This was added to allow to + integrate secrets + that are not added by this helm chart. +* Example: + +```yaml + - secretRef: + name: s3-secret +``` + +#### `transformServices.sharedFileStore.podAnnotations` + +* Required: false +* Default: None +* Example: + ```yaml + annotation1Key: annotation1Value + annotation2Key: annotation2Value + ``` +* Description: With this list of parameters you can add 1 or multiple annotations to the Transform Services deployment + +#### `transformServices.sharedFileStore.serviceAnnotations` + +* Required: false +* Default: None +* Example: + ```yaml + annotation1Key: annotation1Value + annotation2Key: annotation2Value + ``` +* Description: With this list of parameters you can add 1 or multiple annotations to the Transform Services service + +#### `transformServices.sharedFileStore.serviceAccount` + +* Required: false +* Default: None +* Description: If your pods need to run with a service account you can specify that here. Please note that you are + yourself responsible to create the serviceAccount referenced in the namespace of this helm chart + +#### `transformServices.sharedFileStore.resources.requests` + +* Required: false +* Default: + ```yaml + requests: + memory: "512Mi" + cpu: "200mi" + ``` +* Description: The resources a node should keep reserved for your pod + +#### `transformServices.sharedFileStore.resources.limits` + +* Required: false +* Default: None +* Description: The maximum resources a pod may consume from a node + +#### `transformServices.sharedFileStore.imagePullSecrets` + +* Required: false +* Default: None +* Example: + ```yaml + - name: privateDockerRepo1Secret + - name: privateDockerRepo2Secret + ``` +* Description: If you use an image that is not public. then you can create dockerconfigjson secrets on your cluster and + reference them here. + +#### `transformServices.sharedFileStore.initVolumes` + +* Required: false +* Default: `true` +* Description: Enable or disable the setting of /tmp/Alfresco owner to sfs user + +### Transform Core All In One + +#### `transformServices.transformCoreAio.enabled` + +* Required: false +* Default: `true` +* Description: Enable or disable the Transform Core All In One + +#### `transformServices.transformCoreAio.replicas` + +* Required: false +* Default: `1` +* Description: The number of pods that will be running + +#### `transformServices.transformCoreAio.image.registry` + +* Required: false +* Default: `docker.io` +* Description: The registry where the docker container can be found in + +#### `transformServices.transformCoreAio.image.repository` + +* Required: false +* Default: `alfresco/alfresco-transform-core-aio` +* Description: The repository of the docker image that will be used + +#### `transformServices.transformCoreAio.image.tag` + +* Required: false +* Default: `latest` +* Description: The tag of the docker image that will be used + +#### `transformServices.transformCoreAio.imagePullPolicy` + +* Required: false +* Default: `IfNotPresent` +* Description: Specify when the pods should pull the image from the repositories + +#### `transformServices.transformCoreAio.strategy.type` + +* Required: false +* Default: `RollingUpdate` +* Description: Can be set to `Recreate` if you want all your pods to be killed before new ones are created + +#### `transformServices.transformCoreAio.additionalEnvironmentVariables` + +* Required: false +* Default: None +* Example: + ```yaml + livenessPercent: "150" + livenessTransformPeriodSeconds: "600" + maxTransforms: "100000" + ``` +* Description: With this list of parameters you can add 1 or multiple environment variables that will be passed to the + docker container. These will be stored in a config and are hence not safe for sensitive information + +#### `transformServices.transformCoreAio.envFrom` + +* Required: false +* Default: None +* Description: This allows you to add to the transformCoreAio-container envFrom section. This was added to allow to + integrate secrets + that are not added by this helm chart. +* Example: + +```yaml + - secretRef: + name: s3-secret +``` +#### `transformServices.transformCoreAio.livenessProbe.enabled` + +* Required: false +* Default: true +* Description: will enable liveness and readiness probes +additional settings can be added through additionalEnvironmentVariables. +```yaml + livenessPercent: "150" + livenessTransformPeriodSeconds: "600" + maxTransforms: "100000" + ``` + + +#### `transformServices.transformCoreAio.podAnnotations` + +* Required: false +* Default: None +* Example: + ```yaml + prometheus.io/path: actuator/prometheus + prometheus.io/scrape: 'true' + ``` +* Description: With this list of parameters you can add 1 or multiple annotations to the Transform Core All In One + +#### `transformServices.transformCoreAio.serviceAnnotations` + +* Required: false +* Default: None +* Example: + ```yaml + annotation1Key: annotation1Value + annotation2Key: annotation2Value + ``` +* Description: With this list of parameters you can add 1 or multiple annotations to the Transform Core All In One + service + +#### `transformServices.transformCoreAio.serviceAccount` + +* Required: false +* Default: None +* Description: If your pods need to run with a service account you can specify that here. Please note that you are + yourself responsible to create the serviceAccount referenced in the namespace of this helm chart + +#### `transformServices.transformCoreAio.resources.requests` + +* Required: false +* Default: + ```yaml + requests: + memory: "256Mi" + cpu: "150m" + ``` +* Description: The resources a node should keep reserved for your pod +* + +#### `transformServices.transformCoreAio.resources.limits` + +* Required: false +* Default: None +* Description: The maximum resources a pod may consume from a node + +#### `transformServices.transformCoreAio.imagePullSecrets` + +* Required: false +* Default: None +* Example: + ```yaml + - name: privateDockerRepo1Secret + - name: privateDockerRepo2Secret + ``` +* Description: If you use an image that is not public. then you can create dockerconfigjson secrets on your cluster and + reference them here. + +### Transform Router + +#### `transformServices.transformRouter.enabled` + +* Required: false +* Default: `true` +* Description: Enable or disable the Transform Router + +#### `transformServices.transformRouter.replicas` + +* Required: false +* Default: `1` +* Description: The number of pods that will be running + +#### `transformServices.transformRouter.image.registry` + +* Required: false +* Default: `quay.io` +* Description: The registry where the docker container can be found in + +#### `transformServices.transformRouter.image.repository` + +* Required: false +* Default: `alfresco/alfresco-transform-router` +* Description: The repository of the docker image that will be used + +#### `transformServices.transformRouter.image.tag` + +* Required: false +* Default: `1.5.2` +* Description: The tag of the docker image that will be used + +#### `transformServices.transformRouter.imagePullPolicy` + +* Required: false +* Default: `IfNotPresent` +* Description: Specify when the pods should pull the image from the repositories + +#### `transformServices.transformRouter.strategy.type` + +* Required: false +* Default: `RollingUpdate` +* Description: Can be set to `Recreate` if you want all your pods to be killed before new ones are created + +#### `transformServices.transformRouter.additionalEnvironmentVariables` + +* Required: false +* Default: None +* Example: + ```yaml + environmentVariable1Key: environmentVariable1Value + environmentVariable2Key: environmentVariable2Value + ``` +* Description: With this list of parameters you can add 1 or multiple environment variables that will be passed to the + docker container. These will be stored in a config and are hence not safe for sensitive information + +#### `transformServices.transformRouter.envFrom` + +* Required: false +* Default: None +* Description: This allows you to add to the transformRouter-container envFrom section. This was added to allow to + integrate secrets + that are not added by this helm chart. +* Example: + +```yaml + - secretRef: + name: s3-secret +``` + +#### `transformServices.transformRouter.podAnnotations` + +* Required: false +* Default: None +* Example: + ```yaml + annotation1Key: annotation1Value + annotation2Key: annotation2Value + ``` +* Description: With this list of parameters you can add 1 or multiple annotations to the Transform Router deployment + +#### `transformServices.transformRouter.serviceAnnotations` + +* Required: false +* Default: None +* Example: + ```yaml + annotation1Key: annotation1Value + annotation2Key: annotation2Value + ``` +* Description: With this list of parameters you can add 1 or multiple annotations to the Transform Router service + +#### `transformServices.transformRouter.serviceAccount` + +* Required: false +* Default: None +* Description: If your pods need to run with a service account you can specify that here. Please note that you are + yourself responsible to create the serviceAccount referenced in the namespace of this helm chart + +#### `transformServices.transformRouter.resources.requests` + +* Required: false +* Default: + ```yaml + requests: + memory: "128Mi" + cpu: "100m" + ``` +* Description: The resources a node should keep reserved for your pod +* + +#### `transformServices.transformRouter.resources.limits` + +* Required: false +* Default: None +* Description: The maximum resources a pod may consume from a node + +#### `transformServices.transformRouter.imagePullSecrets` + +* Required: false +* Default: None +* Example: + ```yaml + - name: privateDockerRepo1Secret + - name: privateDockerRepo2Secret + ``` +* Description: If you use an image that is not public. then you can create dockerconfigjson secrets on your cluster and + reference them here. + +### Sync Service + +#### `syncService.enabled` + +* Required: false +* Default: `true` +* Description: Enable or disable the Sync Service + +#### `syncService.replicas` + +* Required: false +* Default: `1` +* Description: The number of pods that will be running + +#### `syncService.image.registry` + +* Required: false +* Default: `quay.io` +* Description: The registry where the docker container can be found in + +#### `syncService.image.repository` + +* Required: false +* Default: `alfresco/service-sync` +* Description: The repository of the docker image that will be used + +#### `syncService.image.tag` + +* Required: false +* Default: `3.4.0` +* Description: The tag of the docker image that will be used + +#### `syncService.imagePullPolicy` + +* Required: false +* Default: `IfNotPresent` +* Description: Specify when the pods should pull the image from the repositories + +#### `syncService.strategy.type` + +* Required: false +* Default: `RollingUpdate` +* Description: Can be set to `Recreate` if you want all your pods to be killed before new ones are created + +#### `syncService.additionalEnvironmentVariables` + +* Required: false +* Default: None +* Example: + ```yaml + environmentVariable1Key: environmentVariable1Value + environmentVariable2Key: environmentVariable2Value + ``` +* Description: With this list of parameters you can add 1 or multiple environment variables that will be passed to the + docker container. These will be stored in a config and are hence not safe for sensitive information + +#### `syncService.envFrom` + +* Required: false +* Default: None +* Description: This allows you to add to the syncService-container envFrom section. This was added to allow to integrate + secrets + that are not added by this helm chart. +* Example: + +```yaml + - secretRef: + name: s3-secret +``` + +#### `syncService.podAnnotations` + +* Required: false +* Default: None +* Example: + ```yaml + annotation1Key: annotation1Value + annotation2Key: annotation2Value + ``` +* Description: With this list of parameters you can add 1 or multiple annotations to the Sync Service deployment + +#### `syncService.serviceAnnotations` + +* Required: false +* Default: None +* Example: + ```yaml + annotation1Key: annotation1Value + annotation2Key: annotation2Value + ``` +* Description: With this list of parameters you can add 1 or multiple annotations to the Sync Service service + +#### `syncService.serviceAccount` + +* Required: false +* Default: None +* Description: If your pods need to run with a service account you can specify that here. Please note that you are + yourself responsible to create the serviceAccount referenced in the namespace of this helm chart + +#### `syncService.resources.requests` + +* Required: false +* Default: + ```yaml + requests: + memory: "512Mi" + cpu: "0.5" + ``` +* Description: The resources a node should keep reserved for your pod +* + +#### `syncService.resources.limits` + +* Required: false +* Default: None +* Description: The maximum resources a pod may consume from a node### Sync Service + +#### `syncService.imagePullSecrets` + +* Required: false +* Default: None +* Example: + ```yaml + - name: privateDockerRepo1Secret + - name: privateDockerRepo2Secret + ``` +* Description: If you use an image that is not public. then you can create dockerconfigjson secrets on your cluster and + reference them here. + +### Office Online Integration(OOI) + +#### `ooi.enabled` + +* Required: false +* Default: `false` +* Description: Enable or disable the Office Online Integration + +#### `ooi.replicas` + +* Required: false +* Default: `1` +* Description: The number of pods that will be running + +#### `ooi.image.registry` + +* Required: false +* Default: `quay.io` +* Description: The registry where the docker container can be found in + +#### `ooi.image.repository` + +* Required: false +* Default: `alfresco/alfresco-ooi-service` +* Description: The repository of the docker image that will be used + +#### `ooi.image.tag` + +* Required: false +* Default: `1.1.2` +* Description: The tag of the docker image that will be used + +#### `ooi.imagePullPolicy` + +* Required: false +* Default: `IfNotPresent` +* Description: Specify when the pods should pull the image from the repositories + +#### `ooi.strategy.type` + +* Required: false +* Default: `RollingUpdate` +* Description: Can be set to `Recreate` if you want all your pods to be killed before new ones are created + +#### `ooi.additionalEnvironmentVariables` + +* Required: false +* Default: None +* Example: + ```yaml + environmentVariable1Key: environmentVariable1Value + environmentVariable2Key: environmentVariable2Value + ``` +* Description: With this list of parameters you can add 1 or multiple environment variables that will be passed to the + docker container. These will be stored in a config and are hence not safe for sensitive information + +#### `ooi.envFrom` + +* Required: false +* Default: None +* Description: This allows you to add to the ooi-container envFrom section. This was added to allow to integrate secrets + that are not added by this helm chart. +* Example: + +```yaml + - secretRef: + name: s3-secret +``` + +#### `ooi.podAnnotations` + +* Required: false +* Default: None +* Example: + ```yaml + annotation1Key: annotation1Value + annotation2Key: annotation2Value + ``` +* Description: With this list of parameters you can add 1 or multiple annotations to the Office Online Integration + deployment + +#### `ooi.serviceAnnotations` + +* Required: false +* Default: None +* Example: + ```yaml + annotation1Key: annotation1Value + annotation2Key: annotation2Value + ``` +* Description: With this list of parameters you can add 1 or multiple annotations to the Office Online Integration + service + +#### `ooi.serviceAccount` + +* Required: false +* Default: None +* Description: If your pods need to run with a service account you can specify that here. Please note that you are + yourself responsible to create the serviceAccount referenced in the namespace of this helm chart + +#### `ooi.resources.requests` + +* Required: false +* Default: + ```yaml + requests: + memory: "128Mi" + cpu: "100m" + ``` +* Description: The resources a node should keep reserved for your pod +* + +#### `ooi.resources.limits` + +* Required: false +* Default: None +* Description: The maximum resources a pod may consume from a node + +#### `ooi.imagePullSecrets` + +* Required: false +* Default: None +* Example: + ```yaml + - name: privateDockerRepo1Secret + - name: privateDockerRepo2Secret + ``` +* Description: If you use an image that is not public. then you can create dockerconfigjson secrets on your cluster and + reference them here. + +### Persistent Storage + +### Alfresco + +#### `persistentStorage.alfresco.enabled` + +* Required: false +* Default: `true` +* Description: Enable or disable the creation of a PV and PVC for the ACS pods + +#### `persistentStorage.alfresco.storageClassName` + +* Required: false +* Default: `scw-bssd` +* Description: Provide what storageClass should be used. For values other then `scw-bssd` `standard` + or `efs-storage-class` you will need to make sure that that storage class is created + +#### `persistentStorage.alfresco.storage` + +* Required: false +* Default: `3` +* Description: The size in GB of the volume that should be reserved + +#### `persistentStorage.alfresco.efs.volumeHandle` + +* Required: when `persistentStorage.alfresco.storageClassName` is `scw-bssd` +* Default: None +* Description: The volume handle pointing to the AWS EFS location + +#### `persistentStorage.alfresco.additionalClaims` + +* Required: false +* Default: None +* Description: A list of additional volume claims that can be added to the alfresco pods. Layout should be as follows: + +```yaml + - name: name1 + mountPath: /apps/example + subPath: subPath/example + storageClassName: "standard" + storage: 2 + efs: + volumeHandle: "efs-identifier" +``` + +### Postgres + +#### `persistentStorage.postgres.enabled` + +* Required: false +* Default: `true` +* Description: Enable or disable the creation of a PV and PVC for the PostgresQL pods + +#### `persistentStorage.postgres.storageClassName` + +* Required: false +* Default: `scw-bssd` +* Description: Provide what storageClass should be used. For values other then `scw-bssd` `standard` + or `efs-storage-class` you will need to make sure that that storage class is created + +#### `persistentStorage.postgres.storage` + +* Required: false +* Default: `2` +* Description: The size in GB of the volume that should be reserved + +#### `persistentStorage.postgres.efs.volumeHandle` + +* Required: when `persistentStorage.postgres.storageClassName` is `scw-bssd` +* Default: None +* Description: The volume handle pointing to the AWS EFS location + +### SOLR + +#### `persistentStorage.solr.enabled` + +* Required: false +* Default: `true` +* Description: Enable or disable the creation of a PV and PVC for the SOLR pods + +#### `persistentStorage.solr.storageClassName` + +* Required: false +* Default: `scw-bssd` +* Description: Provide what storageClass should be used. For values other then `scw-bssd` `standard` + or `efs-storage-class` you will need to make sure that that storage class is created + +#### `persistentStorage.solr.storage` + +* Required: false +* Default: `3` +* Description: The size in GB of the volume that should be reserved + +#### `persistentStorage.solr.efs.volumeHandle` + +* Required: when `persistentStorage.solr.storageClassName` is `scw-bssd` +* Default: None +* Description: The volume handle pointing to the AWS EFS location + +### SOLR BACKUP + +#### `persistentStorage.solrBackup.enabled` + +* Required: false +* Default: `true` +* Description: Enable or disable the creation of a PV and PVC for the SOLR BACKUP for SOLR pods + +#### `persistentStorage.solrBackup.storageClassName` + +* Required: false +* Default: `scw-bssd` +* Description: Provide what storageClass should be used. For values other then `scw-bssd` `standard` + or `efs-storage-class` you will need to make sure that that storage class is created + +#### `persistentStorage.solrBackup.storage` + +* Required: false +* Default: `3` +* Description: The size in GB of the volume that should be reserved + +#### `persistentStorage.solrBackup.efs.volumeHandle` + +* Required: when `persistentStorage.solrBackup.storageClassName` is `scw-bssd` +* Default: None +* Description: The volume handle pointing to the AWS EFS location + +### Shared File Store + +#### `persistentStorage.sharedFileStore.enabled` + +* Required: false +* Default: `true` +* Description: Enable or disable the creation of a PV and PVC for the Shared File Store pods + +#### `persistentStorage.sharedFileStore.storageClassName` + +* Required: false +* Default: `scw-bssd` +* Description: Provide what storageClass should be used. For values other then `scw-bssd` `standard` + or `efs-storage-class` you will need to make sure that that storage class is created + +#### `persistentStorage.sharedFileStore.storage` + +* Required: false +* Default: `3` +* Description: The size in GB of the volume that should be reserved + +#### `persistentStorage.sharedFileStore.efs.volumeHandle` + +* Required: when `persistentStorage.sharedFileStore.storageClassName` is `scw-bssd` +* Default: None +* Description: The volume handle pointing to the AWS EFS location + +### Active MQ + +#### `persistentStorage.mq.enabled` + +* Required: false +* Default: `true` +* Description: Enable or disable the creation of a PV and PVC for the Active MQ pods + + +#### `persistentStorage.mq.initVolumes` + +* Required: false +* Default: `true` +* Description: Enable or disable the setting of mq data owner to amq user + +#### `persistentStorage.mq.storageClassName` + +* Required: false +* Default: `scw-bssd` +* Description: Provide what storageClass should be used. For values other then `scw-bssd` `standard` + or `efs-storage-class` you will need to make sure that that storage class is created + +#### `persistentStorage.mq.storage` + +* Required: false +* Default: `3` +* Description: The size in GB of the volume that should be reserved + +#### `persistentStorage.mq.efs.volumeHandle` + +* Required: when `persistentStorage.mq.storageClassName` is `scw-bssd` +* Default: None +* Description: The volume handle pointing to the AWS EFS location diff --git a/integration-testing/build.gradle b/integration-testing/build.gradle new file mode 100644 index 0000000..3b93fe7 --- /dev/null +++ b/integration-testing/build.gradle @@ -0,0 +1,27 @@ +plugins { + id 'java-library' +} + +java { + toolchain { + languageVersion = JavaLanguageVersion.of(17) + } +} + +test { + useJUnitPlatform() +} + +repositories { + mavenCentral() +} + +dependencies { + implementation 'io.kubernetes:client-java:18.0.1' + implementation 'org.awaitility:awaitility:4.2.0' + implementation 'org.slf4j:slf4j-api:2.0.11' + implementation 'org.zeroturnaround:zt-exec:1.12' + testImplementation 'org.junit.jupiter:junit-jupiter:5.10.1' + testImplementation 'org.slf4j:slf4j-simple:2.0.11' +} + diff --git a/integration-testing/gradle/wrapper/gradle-wrapper.jar b/integration-testing/gradle/wrapper/gradle-wrapper.jar new file mode 100644 index 0000000..d64cd49 Binary files /dev/null and b/integration-testing/gradle/wrapper/gradle-wrapper.jar differ diff --git a/integration-testing/gradle/wrapper/gradle-wrapper.properties b/integration-testing/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 0000000..1af9e09 --- /dev/null +++ b/integration-testing/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,7 @@ +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-8.5-bin.zip +networkTimeout=10000 +validateDistributionUrl=true +zipStoreBase=GRADLE_USER_HOME +zipStorePath=wrapper/dists diff --git a/integration-testing/gradlew b/integration-testing/gradlew new file mode 100755 index 0000000..1aa94a4 --- /dev/null +++ b/integration-testing/gradlew @@ -0,0 +1,249 @@ +#!/bin/sh + +# +# Copyright © 2015-2021 the original authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +############################################################################## +# +# Gradle start up script for POSIX generated by Gradle. +# +# Important for running: +# +# (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is +# noncompliant, but you have some other compliant shell such as ksh or +# bash, then to run this script, type that shell name before the whole +# command line, like: +# +# ksh Gradle +# +# Busybox and similar reduced shells will NOT work, because this script +# requires all of these POSIX shell features: +# * functions; +# * expansions «$var», «${var}», «${var:-default}», «${var+SET}», +# «${var#prefix}», «${var%suffix}», and «$( cmd )»; +# * compound commands having a testable exit status, especially «case»; +# * various built-in commands including «command», «set», and «ulimit». +# +# Important for patching: +# +# (2) This script targets any POSIX shell, so it avoids extensions provided +# by Bash, Ksh, etc; in particular arrays are avoided. +# +# The "traditional" practice of packing multiple parameters into a +# space-separated string is a well documented source of bugs and security +# problems, so this is (mostly) avoided, by progressively accumulating +# options in "$@", and eventually passing that to Java. +# +# Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS, +# and GRADLE_OPTS) rely on word-splitting, this is performed explicitly; +# see the in-line comments for details. +# +# There are tweaks for specific operating systems such as AIX, CygWin, +# Darwin, MinGW, and NonStop. +# +# (3) This script is generated from the Groovy template +# https://github.com/gradle/gradle/blob/HEAD/subprojects/plugins/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt +# within the Gradle project. +# +# You can find Gradle at https://github.com/gradle/gradle/. +# +############################################################################## + +# Attempt to set APP_HOME + +# Resolve links: $0 may be a link +app_path=$0 + +# Need this for daisy-chained symlinks. +while + APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path + [ -h "$app_path" ] +do + ls=$( ls -ld "$app_path" ) + link=${ls#*' -> '} + case $link in #( + /*) app_path=$link ;; #( + *) app_path=$APP_HOME$link ;; + esac +done + +# This is normally unused +# shellcheck disable=SC2034 +APP_BASE_NAME=${0##*/} +# Discard cd standard output in case $CDPATH is set (https://github.com/gradle/gradle/issues/25036) +APP_HOME=$( cd "${APP_HOME:-./}" > /dev/null && pwd -P ) || exit + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD=maximum + +warn () { + echo "$*" +} >&2 + +die () { + echo + echo "$*" + echo + exit 1 +} >&2 + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +nonstop=false +case "$( uname )" in #( + CYGWIN* ) cygwin=true ;; #( + Darwin* ) darwin=true ;; #( + MSYS* | MINGW* ) msys=true ;; #( + NONSTOP* ) nonstop=true ;; +esac + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD=$JAVA_HOME/jre/sh/java + else + JAVACMD=$JAVA_HOME/bin/java + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD=java + if ! command -v java >/dev/null 2>&1 + then + die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +fi + +# Increase the maximum file descriptors if we can. +if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then + case $MAX_FD in #( + max*) + # In POSIX sh, ulimit -H is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC2039,SC3045 + MAX_FD=$( ulimit -H -n ) || + warn "Could not query maximum file descriptor limit" + esac + case $MAX_FD in #( + '' | soft) :;; #( + *) + # In POSIX sh, ulimit -n is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC2039,SC3045 + ulimit -n "$MAX_FD" || + warn "Could not set maximum file descriptor limit to $MAX_FD" + esac +fi + +# Collect all arguments for the java command, stacking in reverse order: +# * args from the command line +# * the main class name +# * -classpath +# * -D...appname settings +# * --module-path (only if needed) +# * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables. + +# For Cygwin or MSYS, switch paths to Windows format before running java +if "$cygwin" || "$msys" ; then + APP_HOME=$( cygpath --path --mixed "$APP_HOME" ) + CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" ) + + JAVACMD=$( cygpath --unix "$JAVACMD" ) + + # Now convert the arguments - kludge to limit ourselves to /bin/sh + for arg do + if + case $arg in #( + -*) false ;; # don't mess with options #( + /?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath + [ -e "$t" ] ;; #( + *) false ;; + esac + then + arg=$( cygpath --path --ignore --mixed "$arg" ) + fi + # Roll the args list around exactly as many times as the number of + # args, so each arg winds up back in the position where it started, but + # possibly modified. + # + # NB: a `for` loop captures its iteration list before it begins, so + # changing the positional parameters here affects neither the number of + # iterations, nor the values presented in `arg`. + shift # remove old arg + set -- "$@" "$arg" # push replacement arg + done +fi + + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' + +# Collect all arguments for the java command: +# * DEFAULT_JVM_OPTS, JAVA_OPTS, JAVA_OPTS, and optsEnvironmentVar are not allowed to contain shell fragments, +# and any embedded shellness will be escaped. +# * For example: A user cannot expect ${Hostname} to be expanded, as it is an environment variable and will be +# treated as '${Hostname}' itself on the command line. + +set -- \ + "-Dorg.gradle.appname=$APP_BASE_NAME" \ + -classpath "$CLASSPATH" \ + org.gradle.wrapper.GradleWrapperMain \ + "$@" + +# Stop when "xargs" is not available. +if ! command -v xargs >/dev/null 2>&1 +then + die "xargs is not available" +fi + +# Use "xargs" to parse quoted args. +# +# With -n1 it outputs one arg per line, with the quotes and backslashes removed. +# +# In Bash we could simply go: +# +# readarray ARGS < <( xargs -n1 <<<"$var" ) && +# set -- "${ARGS[@]}" "$@" +# +# but POSIX shell has neither arrays nor command substitution, so instead we +# post-process each arg (as a line of input to sed) to backslash-escape any +# character that might be a shell metacharacter, then use eval to reverse +# that process (while maintaining the separation between arguments), and wrap +# the whole thing up as a single "set" statement. +# +# This will of course break if any of these variables contains a newline or +# an unmatched quote. +# + +eval "set -- $( + printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" | + xargs -n1 | + sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' | + tr '\n' ' ' + )" '"$@"' + +exec "$JAVACMD" "$@" diff --git a/integration-testing/gradlew.bat b/integration-testing/gradlew.bat new file mode 100644 index 0000000..6689b85 --- /dev/null +++ b/integration-testing/gradlew.bat @@ -0,0 +1,92 @@ +@rem +@rem Copyright 2015 the original author or authors. +@rem +@rem Licensed under the Apache License, Version 2.0 (the "License"); +@rem you may not use this file except in compliance with the License. +@rem You may obtain a copy of the License at +@rem +@rem https://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem + +@if "%DEBUG%"=="" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%"=="" set DIRNAME=. +@rem This is normally unused +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Resolve any "." and ".." in APP_HOME to make it shorter. +for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if %ERRORLEVEL% equ 0 goto execute + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto execute + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* + +:end +@rem End local scope for the variables with windows NT shell +if %ERRORLEVEL% equ 0 goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +set EXIT_CODE=%ERRORLEVEL% +if %EXIT_CODE% equ 0 set EXIT_CODE=1 +if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE% +exit /b %EXIT_CODE% + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/integration-testing/settings.gradle b/integration-testing/settings.gradle new file mode 100644 index 0000000..7a81f5c --- /dev/null +++ b/integration-testing/settings.gradle @@ -0,0 +1 @@ +rootProject.name = 'helm-alfresco-integration-testing' \ No newline at end of file diff --git a/integration-testing/src/main/java/eu/xenit/testing/k8s/PodTests.java b/integration-testing/src/main/java/eu/xenit/testing/k8s/PodTests.java new file mode 100644 index 0000000..3cdc792 --- /dev/null +++ b/integration-testing/src/main/java/eu/xenit/testing/k8s/PodTests.java @@ -0,0 +1,97 @@ +package eu.xenit.testing.k8s; + +import static org.awaitility.Awaitility.await; + +import eu.xenit.testing.k8s.cluster.Cluster; +import io.kubernetes.client.openapi.ApiClient; +import io.kubernetes.client.openapi.ApiException; +import io.kubernetes.client.openapi.apis.CoreV1Api; +import io.kubernetes.client.openapi.models.V1PodList; +import io.kubernetes.client.util.Config; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import org.awaitility.core.ConditionTimeoutException; +import org.jetbrains.annotations.NotNull; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class PodTests { + + private static Logger logger = LoggerFactory.getLogger(PodTests.class); + + public static void checkPodsReady(Cluster cluster, String namespace, String labelSelector, int podCount, int timeoutSeconds) { + try { + await().atMost(timeoutSeconds, TimeUnit.SECONDS).until(() -> checkPodsReadyInternal(cluster, namespace, labelSelector, podCount)); + } catch (ConditionTimeoutException e) { + var podList = getV1PodList(cluster, namespace, labelSelector); + if (podList.getItems().isEmpty()) { + logger.error("Podlist is empty"); + } else { + logger.error("Number of pods for %s: %d".formatted(labelSelector, podList.getItems().size())); + var api = getCoreV1Api(cluster); + for (var pod: podList.getItems()) { + try { + var events = api.listNamespacedEvent(namespace, null, null, null, "involvedObject.name="+pod.getMetadata().getName(), null, null, null, null, null, null); + logger.error("Events for pod: "+pod.getMetadata().getName()); + for (var event: events.getItems()) { + logger.error("Type: %s, Reason:%s, First time:%s, Last time:%s, Count:%s, From:%s, Message:%s".formatted(event.getType(), event.getReason(), event.getFirstTimestamp(), event.getLastTimestamp(), event.getCount(), event.getSource(), event.getMessage())); + } + + } catch (ApiException ex) { + throw new RuntimeException(ex); + } + } + } + throw e; + } + } + + @NotNull + private static Boolean checkPodsReadyInternal( + Cluster cluster, String namespace, String labelSelector, int amount) throws ApiException { + V1PodList podList = getV1PodList(cluster, namespace, labelSelector); + if (podList.getItems().size() != amount) { + return false; + } + for (var pod: podList.getItems()) { + boolean ready = false; + if (pod.getStatus().getConditions() == null) { + return false; + } + for (var condition: pod.getStatus().getConditions()) { + if ("Ready".equals(condition.getType()) && "True".equals(condition.getStatus())) { + ready = true; + break; + } + } + if (!ready) { + return false; + } + } + return true; + } + + private static V1PodList getV1PodList(Cluster cluster, String namespace, String labelSelector) { + CoreV1Api api = getCoreV1Api(cluster); + V1PodList podList = null; + try { + podList = api.listNamespacedPod(namespace, null, null, null, null, labelSelector, null, null, null, null, null); + } catch (ApiException e) { + throw new RuntimeException(e); + } + return podList; + } + + @NotNull + private static CoreV1Api getCoreV1Api(Cluster cluster) { + ApiClient apiClient = null; + try { + apiClient = Config.fromConfig(cluster.getKubeConfig().toAbsolutePath().toString()); + } catch (IOException e) { + throw new RuntimeException(e); + } + CoreV1Api api = new CoreV1Api(apiClient); + return api; + } + +} diff --git a/integration-testing/src/main/java/eu/xenit/testing/k8s/cluster/Cluster.java b/integration-testing/src/main/java/eu/xenit/testing/k8s/cluster/Cluster.java new file mode 100644 index 0000000..7fca039 --- /dev/null +++ b/integration-testing/src/main/java/eu/xenit/testing/k8s/cluster/Cluster.java @@ -0,0 +1,22 @@ +package eu.xenit.testing.k8s.cluster; + +import java.io.InputStream; +import java.nio.file.Path; + +public abstract class Cluster { + + public Cluster(String context) { + this.context = context; + } + + private String context; + + public String getContext() { + return context; + } + + public abstract void destroy(); + + public abstract Path getKubeConfig(); + +} diff --git a/integration-testing/src/main/java/eu/xenit/testing/k8s/cluster/ClusterProvisioner.java b/integration-testing/src/main/java/eu/xenit/testing/k8s/cluster/ClusterProvisioner.java new file mode 100644 index 0000000..de311a3 --- /dev/null +++ b/integration-testing/src/main/java/eu/xenit/testing/k8s/cluster/ClusterProvisioner.java @@ -0,0 +1,7 @@ +package eu.xenit.testing.k8s.cluster; + +public interface ClusterProvisioner { + + Cluster provision(); + +} diff --git a/integration-testing/src/main/java/eu/xenit/testing/k8s/command/CommandHelper.java b/integration-testing/src/main/java/eu/xenit/testing/k8s/command/CommandHelper.java new file mode 100644 index 0000000..1c8d553 --- /dev/null +++ b/integration-testing/src/main/java/eu/xenit/testing/k8s/command/CommandHelper.java @@ -0,0 +1,51 @@ +package eu.xenit.testing.k8s.command; + +import java.io.IOException; +import java.util.concurrent.TimeoutException; +import org.jetbrains.annotations.NotNull; +import org.slf4j.Logger; +import org.zeroturnaround.exec.ProcessExecutor; +import org.zeroturnaround.exec.stream.slf4j.Slf4jStream; + +public class CommandHelper { + + public static void executeAndPrintCommand(Logger logger, String... command) { + executeAndPrintCommand(logger, false, command); + } + + public static void executeAndPrintCommand(Logger logger, boolean stdErrAsInfo, String... command) { + try { + var executor = new ProcessExecutor().command(command) + .redirectOutput(Slf4jStream.of(logger).asInfo()); + if(!stdErrAsInfo) { + executor = executor.redirectError(Slf4jStream.of(logger).asError()); + } + assert executor.execute().getExitValue() == 0; + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public static String executeCommandAndReturnOutput(String... command) { + ProcessBuilder builder = new ProcessBuilder(); + builder.redirectError(); + builder.command(command); + try { + Process process = builder.start(); + String result = new String(process.getInputStream().readAllBytes()); + assert process.waitFor() == 0; + return result; + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + @NotNull + public static String[] prependArg(String first, String[] args) { + var newArgs = new String[args.length + 1]; + newArgs[0] = first; + System.arraycopy(args, 0, newArgs, 1, args.length); + return newArgs; + } + +} diff --git a/integration-testing/src/main/java/eu/xenit/testing/k8s/helm/HelmCommander.java b/integration-testing/src/main/java/eu/xenit/testing/k8s/helm/HelmCommander.java new file mode 100644 index 0000000..e343f96 --- /dev/null +++ b/integration-testing/src/main/java/eu/xenit/testing/k8s/helm/HelmCommander.java @@ -0,0 +1,29 @@ +package eu.xenit.testing.k8s.helm; + +import eu.xenit.testing.k8s.cluster.Cluster; +import eu.xenit.testing.k8s.command.CommandHelper; +import java.util.ArrayList; +import java.util.Arrays; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class HelmCommander { + + private static Logger logger = LoggerFactory.getLogger(HelmCommander.class); + + private final Cluster cluster; + + public HelmCommander(Cluster cluster) { + this.cluster = cluster; + } + + public void commandAndPrint(String... args) { + var argList = new ArrayList(Arrays.stream(CommandHelper.prependArg("helm", args)).toList()); + argList.add("--kubeconfig"); + argList.add(cluster.getKubeConfig().toAbsolutePath().toString()); + String[] argArray = (String[]) argList.toArray(new String[argList.size()]); + CommandHelper.executeAndPrintCommand(logger, argArray); + } + + +} diff --git a/integration-testing/src/main/java/eu/xenit/testing/k8s/kind/KindCluster.java b/integration-testing/src/main/java/eu/xenit/testing/k8s/kind/KindCluster.java new file mode 100644 index 0000000..720958a --- /dev/null +++ b/integration-testing/src/main/java/eu/xenit/testing/k8s/kind/KindCluster.java @@ -0,0 +1,36 @@ +package eu.xenit.testing.k8s.kind; + +import eu.xenit.testing.k8s.cluster.Cluster; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; + +public class KindCluster extends Cluster { + + private KindCommander kindCommander; + private String name; + public KindCluster(String name, KindCommander kindCommander) { + super("kind-" + name); + this.name = name; + this.kindCommander = kindCommander; + } + + @Override + public void destroy() { + kindCommander.commandAndPrint("delete", "cluster", "--name", name); + } + + private Path kubeConfig; + @Override + public Path getKubeConfig() { + if (kubeConfig == null) { + try { + kubeConfig = Files.createTempFile("kubeconfig-"+name, ".yaml"); + Files.writeString(kubeConfig, kindCommander.commandReturn("get", "kubeconfig", "--name", name)); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + return kubeConfig; + } +} diff --git a/integration-testing/src/main/java/eu/xenit/testing/k8s/kind/KindClusterProvisioner.java b/integration-testing/src/main/java/eu/xenit/testing/k8s/kind/KindClusterProvisioner.java new file mode 100644 index 0000000..7e3911e --- /dev/null +++ b/integration-testing/src/main/java/eu/xenit/testing/k8s/kind/KindClusterProvisioner.java @@ -0,0 +1,62 @@ +package eu.xenit.testing.k8s.kind; + +import eu.xenit.testing.k8s.cluster.Cluster; +import eu.xenit.testing.k8s.cluster.ClusterProvisioner; +import java.io.IOException; +import java.io.InputStream; +import java.io.PrintStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Random; +import java.util.Scanner; + +public class KindClusterProvisioner implements ClusterProvisioner { + private KindCommander kindCommander; + public KindClusterProvisioner(KindCommander kindCommander) { + this.kindCommander = kindCommander; + } + + public KindClusterProvisioner() { + this.kindCommander = new KindCommander(); + } + + private String configuration; + + public String getConfiguration() { + return configuration; + } + + public void setConfiguration(String configuration) { + this.configuration = configuration; + } + + private Random random = new Random(); + @Override + public Cluster provision() { + Path kindConfig = null; + try { + kindConfig = Files.createTempFile("kindconfig-", ".yaml"); + Files.writeString(kindConfig, configuration); + } catch (IOException e) { + throw new RuntimeException(e); + } + String name = random.ints(97, 123) + .limit(10) + .collect(StringBuilder::new, StringBuilder::appendCodePoint, StringBuilder::append) + .toString(); + + kindCommander.commandAndPrint("create", "cluster", "--name", name, "--config=%s".formatted(kindConfig.toAbsolutePath().toString())); + return new KindCluster(name, kindCommander); + } + + private static void inheritIO(final InputStream src, final PrintStream dest) { + new Thread(new Runnable() { + public void run() { + Scanner sc = new Scanner(src); + while (sc.hasNextLine()) { + dest.println(sc.nextLine()); + } + } + }).start(); + } +} diff --git a/integration-testing/src/main/java/eu/xenit/testing/k8s/kind/KindCommander.java b/integration-testing/src/main/java/eu/xenit/testing/k8s/kind/KindCommander.java new file mode 100644 index 0000000..04a7cfa --- /dev/null +++ b/integration-testing/src/main/java/eu/xenit/testing/k8s/kind/KindCommander.java @@ -0,0 +1,35 @@ +package eu.xenit.testing.k8s.kind; + +import eu.xenit.testing.k8s.command.CommandHelper; +import org.jetbrains.annotations.NotNull; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class KindCommander { + + private static Logger logger = LoggerFactory.getLogger(KindCommander.class); + + private String binaryPath; + + public KindCommander(String binaryPath) { + this.binaryPath = binaryPath; + } + + public KindCommander() { + this("kind"); + } + + public void commandAndPrint(String... args) { + CommandHelper.executeAndPrintCommand(logger, true, prependKind(args)); + } + + public String commandReturn(String... args) { + return CommandHelper.executeCommandAndReturnOutput(prependKind(args)); + } + + @NotNull + private String[] prependKind(String[] args) { + return CommandHelper.prependArg(binaryPath, args); + } + +} diff --git a/integration-testing/src/test/java/eu/xenit/testing/k8s/kind/HelmAlfrescoTest.java b/integration-testing/src/test/java/eu/xenit/testing/k8s/kind/HelmAlfrescoTest.java new file mode 100644 index 0000000..05104c7 --- /dev/null +++ b/integration-testing/src/test/java/eu/xenit/testing/k8s/kind/HelmAlfrescoTest.java @@ -0,0 +1,85 @@ +package eu.xenit.testing.k8s.kind; + +import eu.xenit.testing.k8s.PodTests; +import eu.xenit.testing.k8s.cluster.Cluster; +import eu.xenit.testing.k8s.helm.HelmCommander; +import java.io.IOException; +import java.nio.file.Files; +import org.junit.jupiter.api.Test; + + +class HelmAlfrescoTest { + + @Test + void smallSetup() throws IOException { + var kindConfiguration = """ + kind: Cluster + apiVersion: kind.x-k8s.io/v1alpha4 + nodes: + - role: control-plane + kubeadmConfigPatches: + - | + kind: InitConfiguration + nodeRegistration: + kubeletExtraArgs: + node-labels: "ingress-ready=true" + extraPortMappings: + - containerPort: 80 + hostPort: 8099 + protocol: TCP + """; + + var values = """ + general: + cni: kindnetd + ingress: + host: test + protocol: http + kubernetes.io/ingress.class: {} + acs: + replicas: 1 + resources: + requests: + memory: "2Gi" + cpu: "0" + mq: + resources: + requests: + memory: "1Gi" + cpu: "0" + solr: + enabled: false + transformServices: + enabled: false + digitalWorkspace: + enabled: false + """; + + var clusterProvisioner = new KindClusterProvisioner(); + clusterProvisioner.setConfiguration(kindConfiguration); + + Cluster cluster = null; + + try { + cluster = clusterProvisioner.provision(); + + var tempFile = Files.createTempFile("values", ".yaml"); + Files.writeString(tempFile, values); + + var helmCommander = new HelmCommander(cluster); + var namespace = "mynamespace"; + helmCommander.commandAndPrint("install", + "testinstall", "../xenit-alfresco", + "-f", tempFile.toAbsolutePath().toString(), + "-n", namespace, "--create-namespace"); + + PodTests.checkPodsReady(cluster, namespace, "app = acs", 1, 300); + PodTests.checkPodsReady(cluster, namespace, "app = share", 1, 300); + } finally { + if (cluster != null) { + cluster.destroy(); + } + } + + } +} \ No newline at end of file diff --git a/integration-testing/src/test/resources/simplelogger.properties b/integration-testing/src/test/resources/simplelogger.properties new file mode 100644 index 0000000..9eb315d --- /dev/null +++ b/integration-testing/src/test/resources/simplelogger.properties @@ -0,0 +1,3 @@ +org.slf4j.simpleLogger.log.eu.xenit=INFO +org.slf4j.simpleLogger.showDateTime=true +org.slf4j.simpleLogger.dateTimeFormat=yyyy-MM-dd HH:mm:ss,SSS \ No newline at end of file diff --git a/local-values.yaml b/local-values.yaml new file mode 100644 index 0000000..2ca2f60 --- /dev/null +++ b/local-values.yaml @@ -0,0 +1,26 @@ +ingress: + host: test + protocol: http + kubernetes.io/ingress.class: {} +acs: + resources: + requests: + memory: "1Gi" + cpu: "1" +share: + mergeAcsShare: true" +mq: + resources: + requests: + memory: "1Gi" + cpu: "1" +solr: + enabled: false +transformServices: + enabled: false +digitalWorkspace: + enabled: false + +general: + networkPolicies: + enabled: false \ No newline at end of file diff --git a/skaffold.yaml b/skaffold.yaml new file mode 100644 index 0000000..0a44f9f --- /dev/null +++ b/skaffold.yaml @@ -0,0 +1,14 @@ +apiVersion: skaffold/v2beta10 +kind: Config +metadata: + name: alfresco +build: + local: + concurrency: 0 +deploy: + helm: + releases: + - name: alfresco + chartPath: xenit-alfresco + valuesFiles: + - local-values.yaml \ No newline at end of file diff --git a/xenit-aps/.gitignore b/xenit-aps/.gitignore new file mode 100644 index 0000000..ebf1d3d --- /dev/null +++ b/xenit-aps/.gitignore @@ -0,0 +1 @@ +charts diff --git a/xenit-aps/.helmignore b/xenit-aps/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/xenit-aps/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/xenit-aps/Chart.yaml b/xenit-aps/Chart.yaml new file mode 100644 index 0000000..f61fa39 --- /dev/null +++ b/xenit-aps/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: alfresco +description: A Helm chart for Alfresco managed by Xenit Solutions + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.5 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "0.1.0" \ No newline at end of file diff --git a/xenit-aps/templates/alfresco-activity-admin/alfresco-activity-admin-config.yaml b/xenit-aps/templates/alfresco-activity-admin/alfresco-activity-admin-config.yaml new file mode 100644 index 0000000..150878d --- /dev/null +++ b/xenit-aps/templates/alfresco-activity-admin/alfresco-activity-admin-config.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: alfresco-activity-admin-configmap + namespace: {{ .Release.Namespace | quote }} + labels: + app: alfresco-activity-admin +data: + DB_DRIVER: {{ quote .Values.acs.dbDriver }} //TODO + {{- if .Values.activityAdmin.additionalEnvironmentVariables }} + {{ toYaml .Values.activityAdmin.additionalEnvironmentVariables | nindent 2 }} + {{- end }} \ No newline at end of file diff --git a/xenit-aps/templates/alfresco-activity-admin/alfresco-activity-admin-deployment.yaml b/xenit-aps/templates/alfresco-activity-admin/alfresco-activity-admin-deployment.yaml new file mode 100644 index 0000000..d72a7f2 --- /dev/null +++ b/xenit-aps/templates/alfresco-activity-admin/alfresco-activity-admin-deployment.yaml @@ -0,0 +1,108 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: alfresco-activity-admin + namespace: {{ .Release.Namespace | quote }} + labels: + app: alfresco-activity-admin +spec: + replicas: {{ .Values.activityAdmin.replicas }} + selector: + matchLabels: + app: alfresco-activity-admin + strategy: + {{- if eq .Values.activityAdmin.strategy.type "Recreate" }} + type: {{ .Values.activityAdmin.strategy.type }} + {{- else }} + {{ toYaml .Values.general.strategy | nindent 4 }} + {{- end }} + template: + metadata: + labels: + app: alfresco-activity-admin + annotations: + checksum/alfresco-activity-admin-config: {{ include (print $.Template.BasePath "/alfresco-activity-admin/alfresco-activity-config.yaml") . | sha256sum }} + checksum/db-secret: {{ include (print $.Template.BasePath "/db-secret.yaml") . | sha256sum }} + spec: + {{- if .Values.activityAdmin.serviceAccount }} + serviceAccountName: {{ .Values.activityAdmin.serviceAccount }} + {{- end }} + containers: + - name: alfresco-activity-admin-container + image: {{ .Values.activityAdmin.image.registry }}/{{ .Values.activityAdmin.image.repository }}:{{ .Values.activityAdmin.image.tag }} + imagePullPolicy: {{ .Values.activityAdmin.imagePullPolicy | default "IfNotPresent" }} + {{- if .Values.activityAdmin.readinessProbe }} + readinessProbe: + {{ toYaml .Values.activityAdmin.readinessProbe | nindent 10 }} + {{- end }} + {{- if .Values.activityAdmin.livenessProbe }} + livenessProbe: + {{ toYaml .Values.activityAdmin.livenessProbe | nindent 10 }} + {{- end }} + lifecycle: + preStop: + exec: + command: + - /bin/bash + - -c + - sleep 20 + envFrom: + - configMapRef: + name: alfresco-activity-admin-configmap + - secretRef: + name: db-secret + {{- if .Values.activityAdmin.envFrom }} + {{ toYaml .Values.activityAdmin.envFrom | nindent 10 }} + {{- end }} + ports: + - containerPort: 8080 + protocol: TCP + - containerPort: 5701 + protocol: TCP + - containerPort: 8443 + protocol: TCP + {{- if or (.Values.activityAdmin.resources.requests) ((.Values.activityAdmin.resources.limits)) }} + resources: + {{- if .Values.activityAdmin.resources.requests }} + requests: + {{ toYaml .Values.activityAdmin.resources.requests | nindent 14 }} + {{- end }} + {{- if .Values.activityAdmin.resources.limits }} + limits: + {{ toYaml .Values.activityAdmin.resources.limits | nindent 14 }} + {{- end }} + {{- end }} + volumeMounts: + {{- if .Values.persistentStorage.alfresco.enabled }} + - name: data + mountPath: /opt/alfresco/alf_data + subPath: alfresco/data + {{- end }} + {{- if .Values.persistentStorage.alfresco.additionalClaims }} + {{- range .Values.persistentStorage.alfresco.additionalClaims }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + {{- end }} + {{- end }} + {{- if .Values.activityAdmin.additionalVolumeMounts }} + {{- toYaml .Values.activityAdmin.additionalVolumeMounts | nindent 10 }} + {{- end }} + volumes: + {{- if .Values.persistentStorage.activityAdmin.additionalClaims }} + {{- range .Values.persistentStorage.activityAdmin.additionalClaims }} + - name: {{ .name }} + persistentVolumeClaim: + claimName: {{ .name }}-pvc + {{- end }} + {{- end }} + {{- if .Values.activityAdmin.additionalVolumes }} + {{ toYaml .Values.activityAdmin.additionalVolumes | nindent 8 }} + {{- end }} + imagePullSecrets: + {{- if .Values.general.imagePullSecrets}} + {{ toYaml .Values.general.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if .Values.activityAdmin.imagePullSecrets}} + {{ toYaml .Values.activityAdmin.imagePullSecrets | nindent 8 }} + {{- end }} \ No newline at end of file diff --git a/xenit-aps/templates/alfresco-activity-admin/alfresco-activity-admin-service.yaml b/xenit-aps/templates/alfresco-activity-admin/alfresco-activity-admin-service.yaml new file mode 100644 index 0000000..be39c5e --- /dev/null +++ b/xenit-aps/templates/alfresco-activity-admin/alfresco-activity-admin-service.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Service +metadata: + name: alfresco-activity-admin-service + namespace: {{ .Release.Namespace | quote }} + annotations: + {{- if .Values.activityAdmin.serviceAnnotations }} + {{ toYaml .Values.activityAdmin.serviceAnnotations | nindent 4 }} + {{- end }} +spec: + selector: + app: alfresco-activity-admin + ports: + - name: 'alfresco-activity-admin' + protocol: TCP + port: 30000 + targetPort: 8080 + - name: 'alfresco-activity-admin-ssl' + protocol: TCP + port: 8443 + targetPort: 8443 + {{- if .Values.general.serviceType }} + type: {{ .Values.general.serviceType }} + {{- end }} \ No newline at end of file diff --git a/xenit-aps/templates/alfresco-activity/alfresco-activity-config.yaml b/xenit-aps/templates/alfresco-activity/alfresco-activity-config.yaml new file mode 100644 index 0000000..8f86843 --- /dev/null +++ b/xenit-aps/templates/alfresco-activity/alfresco-activity-config.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: alfresco-activity-configmap + namespace: {{ .Release.Namespace | quote }} + labels: + app: alfresco-activity +data: + DB_DRIVER: {{ quote .Values.acs.dbDriver }} //TODO + {{- if .Values.activity.additionalEnvironmentVariables }} + {{ toYaml .Values.activity.additionalEnvironmentVariables | nindent 2 }} + {{- end }} \ No newline at end of file diff --git a/xenit-aps/templates/alfresco-activity/alfresco-activity-deployment.yaml b/xenit-aps/templates/alfresco-activity/alfresco-activity-deployment.yaml new file mode 100644 index 0000000..2f7c22f --- /dev/null +++ b/xenit-aps/templates/alfresco-activity/alfresco-activity-deployment.yaml @@ -0,0 +1,108 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: alfresco-activity + namespace: {{ .Release.Namespace | quote }} + labels: + app: alfresco-activity +spec: + replicas: {{ .Values.activity.replicas }} + selector: + matchLabels: + app: alfresco-activity + strategy: + {{- if eq .Values.activity.strategy.type "Recreate" }} + type: {{ .Values.activity.strategy.type }} + {{- else }} + {{ toYaml .Values.general.strategy | nindent 4 }} + {{- end }} + template: + metadata: + labels: + app: alfresco-activity + annotations: + checksum/alfresco-activity-config: {{ include (print $.Template.BasePath "/alfresco-activity/alfresco-activity-config.yaml") . | sha256sum }} + checksum/db-secret: {{ include (print $.Template.BasePath "/db-secret.yaml") . | sha256sum }} + spec: + {{- if .Values.activity.serviceAccount }} + serviceAccountName: {{ .Values.activity.serviceAccount }} + {{- end }} + containers: + - name: alfresco-activity-container + image: {{ .Values.activity.image.registry }}/{{ .Values.activity.image.repository }}:{{ .Values.activity.image.tag }} + imagePullPolicy: {{ .Values.activity.imagePullPolicy | default "IfNotPresent" }} + {{- if .Values.activity.readinessProbe }} + readinessProbe: + {{ toYaml .Values.activity.readinessProbe | nindent 10 }} + {{- end }} + {{- if .Values.activity.livenessProbe }} + livenessProbe: + {{ toYaml .Values.activity.livenessProbe | nindent 10 }} + {{- end }} + lifecycle: + preStop: + exec: + command: + - /bin/bash + - -c + - sleep 20 + envFrom: + - configMapRef: + name: alfresco-activity-configmap + - secretRef: + name: db-secret + {{- if .Values.activity.envFrom }} + {{ toYaml .Values.activity.envFrom | nindent 10 }} + {{- end }} + ports: + - containerPort: 8080 + protocol: TCP + - containerPort: 5701 + protocol: TCP + - containerPort: 8443 + protocol: TCP + {{- if or (.Values.activity.resources.requests) ((.Values.activity.resources.limits)) }} + resources: + {{- if .Values.activity.resources.requests }} + requests: + {{ toYaml .Values.activity.resources.requests | nindent 14 }} + {{- end }} + {{- if .Values.activity.resources.limits }} + limits: + {{ toYaml .Values.activity.resources.limits | nindent 14 }} + {{- end }} + {{- end }} + volumeMounts: + {{- if .Values.persistentStorage.alfresco.enabled }} + - name: data + mountPath: /opt/alfresco/alf_data + subPath: alfresco/data + {{- end }} + {{- if .Values.persistentStorage.alfresco.additionalClaims }} + {{- range .Values.persistentStorage.alfresco.additionalClaims }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + {{- end }} + {{- end }} + {{- if .Values.activity.additionalVolumeMounts }} + {{- toYaml .Values.activity.additionalVolumeMounts | nindent 10 }} + {{- end }} + volumes: + {{- if .Values.persistentStorage.activity.additionalClaims }} + {{- range .Values.persistentStorage.activity.additionalClaims }} + - name: {{ .name }} + persistentVolumeClaim: + claimName: {{ .name }}-pvc + {{- end }} + {{- end }} + {{- if .Values.activity.additionalVolumes }} + {{ toYaml .Values.activity.additionalVolumes | nindent 8 }} + {{- end }} + imagePullSecrets: + {{- if .Values.general.imagePullSecrets}} + {{ toYaml .Values.general.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if .Values.activity.imagePullSecrets}} + {{ toYaml .Values.activity.imagePullSecrets | nindent 8 }} + {{- end }} \ No newline at end of file diff --git a/xenit-aps/templates/alfresco-activity/alfresco-activity-service.yaml b/xenit-aps/templates/alfresco-activity/alfresco-activity-service.yaml new file mode 100644 index 0000000..ab9983b --- /dev/null +++ b/xenit-aps/templates/alfresco-activity/alfresco-activity-service.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Service +metadata: + name: alfresco-activity-service + namespace: {{ .Release.Namespace | quote }} + annotations: + {{- if .Values.activity.serviceAnnotations }} + {{ toYaml .Values.activity.serviceAnnotations | nindent 4 }} + {{- end }} +spec: + selector: + app: alfresco-activity + ports: + - name: 'alfresco-activity' + protocol: TCP + port: 30000 + targetPort: 8080 + - name: 'alfresco-activity-ssl' + protocol: TCP + port: 8443 + targetPort: 8443 + {{- if .Values.general.serviceType }} + type: {{ .Values.general.serviceType }} + {{- end }} \ No newline at end of file diff --git a/xenit-aps/templates/db-secret.yaml b/xenit-aps/templates/db-secret.yaml new file mode 100644 index 0000000..d570a73 --- /dev/null +++ b/xenit-aps/templates/db-secret.yaml @@ -0,0 +1,47 @@ +{{- if not .Values.general.secrets.db.selfManaged }} +{{- $db_secret_name := "db-secret" }} + +apiVersion: v1 +kind: Secret +metadata: + name: {{ $db_secret_name }} + +data: + # try to get the old secret and sync-service-configmap + # keep in mind, that a dry-run only returns an empty map + {{- $old_db_sec := lookup "v1" "Secret" .Release.Namespace $db_secret_name }} + + # check, if a secret isn't already set and no user is passed + {{- if and (not $old_db_sec) (not $old_db_sec.data) (not .Values.general.db.username) }} + {{- $generated_db_user := randAlphaNum 20 | b64enc -}} + # if not set and not passed, then generate a new user + DB_USERNAME: {{ $generated_db_user }} + # Setting the data to both be compatible with the Postgres Pods and the ACS Pods + POSTGRES_USER: {{ $generated_db_user }} + # check if the secret exists and no user is passed + {{- else if and ($old_db_sec) ($old_db_sec.data) (not .Values.general.db.username) }} + # if set and not passed, then use the old value + DB_USERNAME: {{ index $old_db_sec.data "DB_USERNAME" }} + POSTGRES_USER: {{ index $old_db_sec.data "POSTGRES_USER" }} + {{- else }} + DB_USERNAME: {{ .Values.general.db.username | b64enc }} + POSTGRES_USER: {{ .Values.general.db.username | b64enc }} + {{- end }} + + # check, if a secret isn't already set and no password is passed + {{- if and (not $old_db_sec) (not $old_db_sec.data) (not .Values.general.db.password) }} + {{- $generated_db_password := randAlphaNum 20 | b64enc -}} + # if not set and not passed, then generate a new password + DB_PASSWORD: {{ $generated_db_password }} + # Setting the data to both be compatible with the Postgres Pods and the ACS Pods + POSTGRES_PASSWORD: {{ $generated_db_password }} + # check if the secret exists and no password is passed + {{- else if and ($old_db_sec) ($old_db_sec.data) (not .Values.general.db.password) }} + # if set and not passed, then use the old value + DB_PASSWORD: {{ index $old_db_sec.data "DB_PASSWORD" }} + POSTGRES_PASSWORD: {{ index $old_db_sec.data "POSTGRES_PASSWORD" }} + {{- else }} + DB_PASSWORD: {{ .Values.general.db.password | b64enc }} + POSTGRES_PASSWORD: {{ .Values.general.db.password | b64enc }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/xenit-aps/templates/helpers/_globalAnnotations.tpl b/xenit-aps/templates/helpers/_globalAnnotations.tpl new file mode 100644 index 0000000..203fee9 --- /dev/null +++ b/xenit-aps/templates/helpers/_globalAnnotations.tpl @@ -0,0 +1,5 @@ +{{- define "globalPodAnnotations" }} +{{- if .Values.general.podAnnotations }} +{{ toYaml .Values.general.podAnnotations }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/xenit-aps/templates/helpers/_volume-helper.tpl b/xenit-aps/templates/helpers/_volume-helper.tpl new file mode 100644 index 0000000..6f4877c --- /dev/null +++ b/xenit-aps/templates/helpers/_volume-helper.tpl @@ -0,0 +1,75 @@ +{{- define "hepers.volumeHelper" }} +{{- $namespace := index . 0 }} +{{- $name := index . 1 }} +{{- $storageClassName := index . 2 }} +{{- $storage := index . 3 }} +{{- $efsVolumeHandle := index . 4 }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ $name }}-pvc + namespace: {{ $namespace | quote }} +spec: + accessModes: + {{- if eq $storageClassName "efs-storage-class" }} + - ReadWriteMany + {{- else }} + - ReadWriteOnce + {{- end }} + resources: + requests: + storage: {{ $storage }}Gi + {{- if ne $storageClassName "" }} + storageClassName: {{ $storageClassName }} + {{- end }} +--- +{{- if eq $storageClassName "standard" }} +apiVersion: v1 +kind: PersistentVolume +metadata: + name: {{ $name }}-pv +spec: + capacity: + storage: {{ $storage }}Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + {{- if ne $storageClassName "" }} + storageClassName: {{ $storageClassName }} + {{- end }} + claimRef: + apiVersion: v1 + kind: PersistentVolumeClaim + name: {{ $name }}-pvc + namespace: {{ $namespace }} + hostPath: + path: /var/local-path-provisioner/shared-storage/alfresco-volume-claim + type: DirectoryOrCreate +{{- end }} +--- +{{- if eq $storageClassName "efs-storage-class" }} +apiVersion: v1 +kind: PersistentVolume +metadata: + name: {{ $name }}-pv +spec: + capacity: + storage: {{ $storage }}Gi + volumeMode: Filesystem + claimRef: + apiVersion: v1 + kind: PersistentVolumeClaim + name: {{ $name }}-pvc + namespace: {{ $namespace }} + accessModes: + - ReadWriteMany + persistentVolumeReclaimPolicy: Retain + {{- if ne $storageClassName "" }} + storageClassName: {{ $storageClassName }} + {{- end }} + csi: + driver: efs.csi.aws.com + volumeHandle: {{ $efsVolumeHandle }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/xenit-aps/templates/ingress/alfresco-ingress.yaml b/xenit-aps/templates/ingress/alfresco-ingress.yaml new file mode 100644 index 0000000..62e3dbb --- /dev/null +++ b/xenit-aps/templates/ingress/alfresco-ingress.yaml @@ -0,0 +1,91 @@ +# Defines the deployment for the alfresco content repository app +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: alfresco-ingress + namespace: {{ .Release.Namespace | quote }} + annotations: + {{- if .Values.ingress.ingressAnnotations}} + {{ toYaml .Values.ingress.ingressAnnotations | nindent 4 }} + {{- end }} +spec: + {{- if eq .Values.ingress.protocol "https" }} + tls: + - hosts: + # Provide the desired host + - {{ required "A host where your alfresco services can be reached on must be specified in values.ingress.host" .Values.ingress.host }} + {{- if .Values.syncService.enabled }} + - {{ required "If sync Services are enabled a specific host for sync services must be specified in values.ingress.syncServiceHost" .Values.ingress.syncServiceHost }} + {{- end }} + # Provide a unique secret to store the SSL credentials + secretName: tls-alfresco-{{ .Release.Name }}-secret + {{- end }} + defaultBackend: + service: + name: {{ .Values.ingress.defaultBackend.service }} + port: + number: {{ .Values.ingress.defaultBackend.port }} + rules: + - host: {{ required "A host where your alfresco services can be reached on must be specified in values.ingress.host" .Values.ingress.host }} + http: + paths: + - path: /alfresco + pathType: Prefix + backend: + service: + name: acs-service + port: + number: 30000 + {{- if .Values.share.enabled }} + - path: /share + pathType: Prefix + backend: + service: + name: share-service + port: + number: 30100 + {{- end }} + {{- if .Values.digitalWorkspace.enabled }} + - path: {{ .Values.digitalWorkspace.basePath }} + pathType: Prefix + backend: + service: + name: digital-workspace-service + port: + number: 30200 + {{- end }} + {{- if .Values.ooi.enabled }} + - path: /ooi-service + pathType: Prefix + backend: + service: + name: ooi-service + port: + number: 30500 + {{- end }} + {{- if .Values.ingress.blockAcsSolrApi.enabled }} + {{- range .Values.ingress.blockAcsSolrApi.paths }} + - path: {{ . }} + pathType: Prefix + backend: + service: + name: nginx-403-service + port: + number: 30403 + {{- end }} + {{- end }} + {{- if .Values.ingress.additionalPaths }} + {{ toYaml .Values.ingress.additionalPaths | nindent 6 }} + {{- end }} + {{- if .Values.syncService.enabled }} + - host: {{ required "If sync Services are enabled a specific host for sync services must be specified in values.ingress.syncServiceHost" .Values.ingress.syncServiceHost }} + http: + paths: + - path: /alfresco + pathType: Prefix + backend: + service: + name: sync-service-service + port: + number: 30400 + {{- end }} diff --git a/xenit-aps/templates/ingress/nginx-403-config.yaml b/xenit-aps/templates/ingress/nginx-403-config.yaml new file mode 100644 index 0000000..f9d2e2a --- /dev/null +++ b/xenit-aps/templates/ingress/nginx-403-config.yaml @@ -0,0 +1,26 @@ +{{- if .Values.ingress.blockAcsSolrApi -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx-403-configmap + namespace: {{ .Release.Namespace | quote }} + labels: + app: nginx-403 +data: + nginx.conf: | + worker_processes 1; + + events { + worker_connections 1024; + } + http{ + server { + listen 80; + server_name _; + + location / { + return 403 'Forbidden'; + } + } + } +{{- end }} diff --git a/xenit-aps/templates/ingress/nginx-403-deployment.yaml b/xenit-aps/templates/ingress/nginx-403-deployment.yaml new file mode 100644 index 0000000..268d285 --- /dev/null +++ b/xenit-aps/templates/ingress/nginx-403-deployment.yaml @@ -0,0 +1,32 @@ +{{- if .Values.ingress.blockAcsSolrApi -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-403 + namespace: {{ .Release.Namespace | quote }} + labels: + app: nginx-403 +spec: + replicas: 1 + selector: + matchLabels: + app: nginx-403 + template: + metadata: + labels: + app: nginx-403 + spec: + containers: + - name: nginx + image: nginx:alpine + ports: + - containerPort: 80 + volumeMounts: + - name: config-volume + mountPath: /etc/nginx/nginx.conf + subPath: nginx.conf + volumes: + - name: config-volume + configMap: + name: nginx-403-configmap +{{- end }} \ No newline at end of file diff --git a/xenit-aps/templates/ingress/nginx-403-service.yaml b/xenit-aps/templates/ingress/nginx-403-service.yaml new file mode 100644 index 0000000..1f72ad4 --- /dev/null +++ b/xenit-aps/templates/ingress/nginx-403-service.yaml @@ -0,0 +1,17 @@ +{{- if .Values.ingress.blockAcsSolrApi -}} +apiVersion: v1 +kind: Service +metadata: + name: nginx-403-service + namespace: {{ .Release.Namespace | quote }} +spec: + {{- if .Values.general.serviceType }} + type: {{ .Values.general.serviceType }} + {{- end }} + selector: + app: nginx-403 + ports: + - port: 30403 + targetPort: 80 + protocol: TCP +{{- end }} \ No newline at end of file diff --git a/xenit-aps/templates/postgres/network-policy.yaml b/xenit-aps/templates/postgres/network-policy.yaml new file mode 100644 index 0000000..2ef2ef6 --- /dev/null +++ b/xenit-aps/templates/postgres/network-policy.yaml @@ -0,0 +1,42 @@ +{{- if .Values.general.networkPolicies.enabled }} +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + namespace: {{ .Release.Namespace }} + name: postgresql-from-acs +spec: + podSelector: + matchLabels: + app: postgresql + policyTypes: + - Ingress + ingress: + - from: + - podSelector: + matchLabels: + app: acs + ports: + - protocol: TCP + port: 5432 +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + namespace: {{ .Release.Namespace }} + name: postgresql-from-sync-service +spec: + podSelector: + matchLabels: + app: postgresql + policyTypes: + - Ingress + ingress: + - from: + - podSelector: + matchLabels: + app: sync-service + ports: + - protocol: TCP + port: 5432 +{{- end }} \ No newline at end of file diff --git a/xenit-aps/templates/postgres/postgresql-config.yaml b/xenit-aps/templates/postgres/postgresql-config.yaml new file mode 100644 index 0000000..672c9d5 --- /dev/null +++ b/xenit-aps/templates/postgres/postgresql-config.yaml @@ -0,0 +1,15 @@ +{{- if .Values.postgresql.enabled -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: postgresql-configmap + namespace: {{ .Release.Namespace | quote }} + labels: + app: postgresql +data: + POSTGRES_DB: 'alfresco' + RELEASE_NAME: postgresql + {{- if .Values.postgresql.additionalEnvironmentVariables }} + {{ toYaml .Values.postgresql.additionalEnvironmentVariables | nindent 2 }} + {{- end }} +{{- end }} diff --git a/xenit-aps/templates/postgres/postgresql-deployment.yaml b/xenit-aps/templates/postgres/postgresql-deployment.yaml new file mode 100644 index 0000000..93a3fc7 --- /dev/null +++ b/xenit-aps/templates/postgres/postgresql-deployment.yaml @@ -0,0 +1,80 @@ +{{- if .Values.postgresql.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: postgresql + namespace: {{ .Release.Namespace | quote }} + labels: + app: postgresql +spec: + replicas: 1 + selector: + matchLabels: + app: postgresql + strategy: + {{- if eq .Values.postgresql.strategy.type "Recreate" }} + type: {{ .Values.postgresql.strategy.type }} + {{- else }} + {{ toYaml .Values.general.strategy | nindent 4 }} + {{- end }} + {{- if .Values.postgresql.serviceAccount }} + serviceAccountName: {{ .Values.postgresql.serviceAccount }} + {{- end }} + template: + metadata: + labels: + app: postgresql + annotations: + checksum/postgresql-config: {{ include (print $.Template.BasePath "/postgres/postgresql-config.yaml") . | sha256sum }} + checksum/db-secret: {{ include (print $.Template.BasePath "/db-secret.yaml") . | sha256sum }} + {{- include "globalPodAnnotations" . | indent 8 }} + {{- if .Values.postgresql.podAnnotations }} + {{ toYaml .Values.postgresql.podAnnotations | nindent 8 }} + {{- end }} + spec: + containers: + - name: postgresql-container + image: {{ .Values.postgresql.image.registry }}/{{ .Values.postgresql.image.repository }}:{{ .Values.postgresql.image.tag }} + imagePullPolicy: {{ .Values.postgresql.imagePullPolicy | default "IfNotPresent" }} + envFrom: + - configMapRef: + name: postgresql-configmap + - secretRef: + name: db-secret + {{- if .Values.postgresql.envFrom }} + {{ toYaml .Values.postgresql.envFrom | nindent 10 }} + {{- end }} + ports: + - containerPort: 5432 + protocol: TCP + {{- if .Values.persistentStorage.postgres.enabled }} + volumeMounts: + - name: data + mountPath: /var/lib/postgresql/data + subPath: postgres/data + {{- end }} + {{- if or (.Values.postgresql.resources.requests) ((.Values.postgresql.resources.limits)) }} + resources: + {{- if .Values.postgresql.resources.requests }} + requests: + {{ toYaml .Values.postgresql.resources.requests | nindent 12 }} + {{- end }} + {{- if .Values.postgresql.resources.limits }} + limits: + {{ toYaml .Values.postgresql.resources.limits | nindent 12 }} + {{- end }} + {{- end }} + imagePullSecrets: + {{- if .Values.general.imagePullSecrets}} + {{ toYaml .Values.general.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if .Values.postgresql.imagePullSecrets}} + {{ toYaml .Values.postgresql.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if .Values.persistentStorage.postgres.enabled }} + volumes: + - name: data + persistentVolumeClaim: + claimName: postgres-pvc + {{- end }} +{{- end }} \ No newline at end of file diff --git a/xenit-aps/templates/postgres/postgresql-service.yaml b/xenit-aps/templates/postgres/postgresql-service.yaml new file mode 100644 index 0000000..98cdac7 --- /dev/null +++ b/xenit-aps/templates/postgres/postgresql-service.yaml @@ -0,0 +1,18 @@ +{{- if .Values.postgresql.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: postgresql-service + namespace: {{ .Release.Namespace | quote }} + annotations: + {{- if .Values.postgresql.serviceAnnotations }} + {{ toYaml .Values.postgresql.serviceAnnotations | nindent 4 }} + {{- end }} +spec: + selector: + app: postgresql + ports: + - protocol: TCP + port: 5432 + targetPort: 5432 +{{- end }} \ No newline at end of file diff --git a/xenit-aps/templates/storage/alfresco-volumes.yaml b/xenit-aps/templates/storage/alfresco-volumes.yaml new file mode 100644 index 0000000..5a292be --- /dev/null +++ b/xenit-aps/templates/storage/alfresco-volumes.yaml @@ -0,0 +1,19 @@ +{{- $namespace := .Release.Namespace -}} +{{- with .Values.persistentStorage.alfresco }} +{{- if .enabled}} +{{- $name := "alfresco" -}} +{{- $storageClassName := .storageClassName -}} +{{- $storage := .storage -}} +{{- $efsVolumeHandle := .efs.volumeHandle -}} +{{- include "hepers.volumeHelper" (list $namespace $name $storageClassName $storage $efsVolumeHandle) }} +{{- end }} +{{- if .additionalClaims }} +{{- range .additionalClaims }} +{{- $name := .name -}} +{{- $storageClassName := .storageClassName -}} +{{- $storage := .storage -}} +{{- $efsVolumeHandle := .efs.volumeHandle -}} +{{- include "hepers.volumeHelper" (list $namespace $name $storageClassName $storage $efsVolumeHandle) }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/xenit-aps/templates/storage/mq-volumes.yaml b/xenit-aps/templates/storage/mq-volumes.yaml new file mode 100644 index 0000000..75a676a --- /dev/null +++ b/xenit-aps/templates/storage/mq-volumes.yaml @@ -0,0 +1,12 @@ +{{- if .Values.mq.enabled}} +{{- $namespace := .Release.Namespace -}} +{{- with .Values.persistentStorage.mq }} +{{- if .enabled}} +{{- $name := "mq" -}} +{{- $storageClassName := .storageClassName -}} +{{- $storage := .storage -}} +{{- $efsVolumeHandle := .efs.volumeHandle -}} +{{- include "hepers.volumeHelper" (list $namespace $name $storageClassName $storage $efsVolumeHandle) }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/xenit-aps/templates/storage/postgress-volumes.yaml b/xenit-aps/templates/storage/postgress-volumes.yaml new file mode 100644 index 0000000..328705e --- /dev/null +++ b/xenit-aps/templates/storage/postgress-volumes.yaml @@ -0,0 +1,10 @@ +{{- $namespace := .Release.Namespace -}} +{{- with .Values.persistentStorage.postgres }} +{{- if .enabled}} +{{- $name := "postgres" -}} +{{- $storageClassName := .storageClassName -}} +{{- $storage := .storage -}} +{{- $efsVolumeHandle := .efs.volumeHandle -}} +{{- include "hepers.volumeHelper" (list $namespace $name $storageClassName $storage $efsVolumeHandle) }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/xenit-aps/templates/storage/shared-file-store-volumes.yaml b/xenit-aps/templates/storage/shared-file-store-volumes.yaml new file mode 100644 index 0000000..e040afd --- /dev/null +++ b/xenit-aps/templates/storage/shared-file-store-volumes.yaml @@ -0,0 +1,10 @@ +{{- $namespace := .Release.Namespace -}} +{{- with .Values.persistentStorage.sharedFileStore }} +{{- if .enabled}} +{{- $name := "shared-file-store" -}} +{{- $storageClassName := .storageClassName -}} +{{- $storage := .storage -}} +{{- $efsVolumeHandle := .efs.volumeHandle -}} +{{- include "hepers.volumeHelper" (list $namespace $name $storageClassName $storage $efsVolumeHandle) }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/xenit-aps/templates/storage/storage-class.yaml b/xenit-aps/templates/storage/storage-class.yaml new file mode 100644 index 0000000..9ea1a80 --- /dev/null +++ b/xenit-aps/templates/storage/storage-class.yaml @@ -0,0 +1,7 @@ +{{- if or (eq .Values.persistentStorage.alfresco.storageClassName "efs-storage-class") (eq .Values.persistentStorage.postgres.storageClassName "efs-storage-class") (eq .Values.persistentStorage.solr.storageClassName "efs-storage-class") (eq .Values.persistentStorage.sharedFileStore.storageClassName "efs-storage-class") -}} +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: efs-storage-class +provisioner: efs.csi.aws.com +{{- end }} \ No newline at end of file diff --git a/xenit-aps/values.yaml b/xenit-aps/values.yaml new file mode 100644 index 0000000..08f3022 --- /dev/null +++ b/xenit-aps/values.yaml @@ -0,0 +1,322 @@ +general: + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + db: {} + cni: cilium + networkPolicies: + enabled: true + secrets: + acs: + selfManaged: false + mq: + selfManaged: false + db: + selfManaged: false + imageCredentials: + selfManaged: false + +ingress: + host: "test" + protocol: 'https' + ingressAnnotations: + kubernetes.io/ingress.class: "nginx" + cert-manager.io/cluster-issuer: "letsencrypt-production" + defaultBackend: + service: acs-service + port: 30000 + blockAcsSolrApi: + enabled: true + paths: + - /alfresco/s/api/solr + - /alfresco/service/api/solr + - /alfresco/service/api/solr + - /alfresco/wcservice/api/solr +acs: + replicas: 1 + image: + registry: 'docker.io' + repository: 'xenit/alfresco-repository-community' + tag: '7.3.0' + strategy: + type: RollingUpdate + resources: + requests: + memory: "2Gi" + cpu: "2" + hpa: + enabled: false + minReplicas: 1 + maxReplicas: 10 + cpu: + enabled: false + utilization: 70 + memory: + enabled: true + utilization: 70 + dbUrl: 'jdbc:postgresql://postgresql-service:5432/alfresco' + dbDriver: 'org.postgresql.Driver' + sharePort: '443' + shareProtocol: 'https' + volumeMounts: + - mountPath: >- + /usr/local/tomcat/shared/classes/alfresco/extension/subsystems/Authentication/ldap-ad/oup-ad1 + name: ldap1-ad-auth-volume + readOnly: true + - mountPath: >- + /usr/local/tomcat/shared/classes/alfresco/extension/subsystems/Authentication/ldap-ad/oup-ad2 + name: ldap2-ad-auth-volume + readOnly: true + - mountPath: >- + /usr/local/tomcat/shared/classes/alfresco/extension/subsystems/Authentication/ldap-ad/oup-ad3 + name: ldap3-ad-auth-volume + readOnly: true + volumes: + - configMap: + defaultMode: 420 + items: + - key: ldap-ad-authentication.properties + path: ldap-ad-authentication.properties + name: ldap1-ad-auth-config + name: ldap1-ad-auth-volume + - configMap: + defaultMode: 420 + items: + - key: ldap-ad-authentication.properties + path: ldap-ad-authentication.properties + name: ldap2-ad-auth-config + name: ldap2-ad-auth-volume + - configMap: + defaultMode: 420 + items: + - key: ldap-ad-authentication.properties + path: ldap-ad-authentication.properties + name: ldap3-ad-auth-config + name: ldap3-ad-auth-volume + livenessProbe: + httpGet: + path: /alfresco/api/-default-/public/alfresco/versions/1/probes/-live- + port: 8080 + scheme: HTTP + failureThreshold: 1 + initialDelaySeconds: 130 + periodSeconds: 20 + successThreshold: 1 + timeoutSeconds: 10 + readinessProbe: + httpGet: + path: /alfresco/api/-default-/public/alfresco/versions/1/probes/-ready- + port: 8080 + scheme: HTTP + failureThreshold: 6 + initialDelaySeconds: 60 + periodSeconds: 20 + successThreshold: 1 + timeoutSeconds: 10 +digitalWorkspace: + enabled: true + replicas: 1 + image: + registry: 'quay.io' + repository: 'alfresco/alfresco-digital-workspace' + tag: '3.0.0' + strategy: + type: RollingUpdate + resources: + requests: + memory: "256Mi" + cpu: "150m" + basePath: "/workspace" + +share: + enabled: true + mergeAcsShare: false + replicas: 1 + image: + registry: 'docker.io' + repository: 'xenit/alfresco-share-community' + tag: '7.3' + strategy: + type: RollingUpdate + resources: + requests: + memory: "256Mi" + cpu: "0.5" + livenessProbe: + failureThreshold: 1 + initialDelaySeconds: 130 + periodSeconds: 20 + successThreshold: 1 + timeoutSeconds: 10 + readinessProbe: + failureThreshold: 6 + initialDelaySeconds: 60 + periodSeconds: 20 + successThreshold: 1 + timeoutSeconds: 10 + +mq: + enabled: true + replicas: 1 + image: + registry: 'docker.io' + repository: 'alfresco/alfresco-activemq' + tag: '5.18.3-jre17-rockylinux8@sha256:25386b20263b7e838605e07fea2713fb65762010c1c677cf82aecefbaed5d227' + strategy: + type: Recreate + resources: + requests: + memory: "512Mi" + cpu: "0.5" + +postgresql: + enabled: true + image: + registry: 'docker.io' + repository: 'xenit/postgres' + tag: 'latest' + strategy: + type: RollingUpdate + resources: + requests: + memory: "1Gi" + cpu: "1" + +solr: + enabled: true + replicas: 2 + podManagementPolicy: Parallel + image: + registry: 'docker.io' + repository: 'xenit/alfresco-solr6-xenit' + tag: '2.0.6' + strategy: + type: RollingUpdate + resources: + requests: + memory: "4Gi" + cpu: "1" + autoBackup: + enabled: false + cron: "0 * * * *" + backupUrl: "http://solr-service:30300/solr/alfresco/replication?command=backup&repository=s3&location=s3:///&numberToKeep=3&wt=json" + readinessProbe: + enabled: true + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 10 +transformServices: + enabled: true + sharedFileStore: + enabled: true + replicas: 1 + image: + registry: 'quay.io' + repository: 'alfresco/alfresco-shared-file-store' + tag: '3.0.0' + strategy: + type: RollingUpdate + resources: + requests: + memory: "512Mi" + cpu: "200m" + transformCoreAio: + enabled: true + replicas: 1 + image: + registry: 'docker.io' + repository: 'alfresco/alfresco-transform-core-aio' + tag: 'latest' + strategy: + type: RollingUpdate + resources: + requests: + memory: "1600Mi" + cpu: "150m" + livenessProbe: + enabled: true + transformRouter: + enabled: true + replicas: 1 + image: + registry: 'quay.io' + repository: 'alfresco/alfresco-transform-router' + tag: '1.5.2' + strategy: + type: RollingUpdate + resources: + requests: + memory: "128Mi" + cpu: "100m" + +syncService: + enabled: false + replicas: 1 + image: + registry: 'quay.io' + repository: 'alfresco/service-sync' + tag: '3.4.0' + strategy: + type: RollingUpdate + resources: + requests: + memory: "512Mi" + cpu: "0.5" + +ooi: + enabled: false + replicas: 1 + image: + registry: 'quay.io' + repository: 'alfresco/alfresco-ooi-service' + tag: '1.1.2' + strategy: + type: RollingUpdate + resources: + requests: + memory: "128Mi" + cpu: "100m" + +persistentStorage: + alfresco: + enabled: true + storageClassName: "" # Standard for local / scw-bssd for scaleway / efs-storage-class for efs + storage: 3 + efs: + volumeHandle: "" + postgres: + enabled: true + storageClassName: "" + storage: 2 + efs: + volumeHandle: "" + solr: + enabled: true + storageClassName: "" + storage: 3 + efs: + volumeHandle: "" + solrBackup: + enabled: true + storageClassName: "" + storage: 3 + efs: + volumeHandle: "" + sharedFileStore: + enabled: true + initVolumes: true + storageClassName: "" + storage: 2 + efs: + volumeHandle: "" + mq: + enabled: true + initVolumes: true + storageClassName: "" + storage: 1 + efs: + volumeHandle: "" \ No newline at end of file