diff --git a/.github/workflows/build-test.yaml b/.github/workflows/build-test.yaml index c5e4c3d15b..2e3cead6e5 100644 --- a/.github/workflows/build-test.yaml +++ b/.github/workflows/build-test.yaml @@ -12,4002 +12,3963 @@ concurrency: cancel-in-progress: true jobs: - test-build-release: + can-run-ci: runs-on: ubuntu-20.04 + # if the event is pull_request and: + # - it is not a fork OR + # - it has the label '@actions/safe-to-test' + # + # The 'pull_request_target' workflow trigger may lead to malicious PR authors being able to obtain repository write permissions or stealing repository secrets. + # Please read https://securitylab.github.com/research/github-actions-preventing-pwn-requests/ + if: > + ( github.event_name == 'pull_request' && + ( github.event.pull_request.head.repo.full_name == github.repository || + contains(github.event.pull_request.labels.*.name, '@actions/safe-to-test' ) + ) + ) steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Run bundle registry - run: | - mkdir -p kotsadm-bundle/images - mkdir -p kotsadm-bundle-nominio/images - docker run -d -p 5000:5000 --restart=always --name registry -v "$(pwd)/kotsadm-bundle/images":/var/lib/registry registry:2 - docker run -d -p 5001:5000 --restart=always --name registry-nominio -v "$(pwd)/kotsadm-bundle-nominio/images":/var/lib/registry registry:2 - - - name: Build kotsadm bundle - env: - BUNDLE_DIR: kotsadm-bundle - BUNDLE_REGISTRY: localhost:5000 - GIT_TAG: ${{ needs.generate-tag.outputs.tag }} - run: mapfile -t envs < <(grep -v '#.*' < .image.env) && export "${envs[@]}" && make kotsadm-bundle - - - name: Build kotsadm bundle without minio - env: - BUNDLE_DIR: kotsadm-bundle-nominio - BUNDLE_REGISTRY: localhost:5001 - GIT_TAG: ${{ needs.generate-tag.outputs.tag }} - run: mapfile -t envs < <(grep -v '#.*' < .image.env) && export "${envs[@]}" && make kotsadm-bundle-nominio - - - name: Upload kotsadm bundle - uses: actions/upload-artifact@v4 - with: - name: kotsadm-bundle - path: kotsadm-bundle - - - name: Upload kotsadm bundle without minio - uses: actions/upload-artifact@v4 - with: - name: kotsadm-bundle-nominio - path: kotsadm-bundle-nominio - - # can-run-ci: - # runs-on: ubuntu-20.04 - # # if the event is pull_request and: - # # - it is not a fork OR - # # - it has the label '@actions/safe-to-test' - # # - # # The 'pull_request_target' workflow trigger may lead to malicious PR authors being able to obtain repository write permissions or stealing repository secrets. - # # Please read https://securitylab.github.com/research/github-actions-preventing-pwn-requests/ - # if: > - # ( github.event_name == 'pull_request' && - # ( github.event.pull_request.head.repo.full_name == github.repository || - # contains(github.event.pull_request.labels.*.name, '@actions/safe-to-test' ) - # ) - # ) - # steps: - # - name: ok - # run: echo "yes" - - # # Use this to disable tests when iteratig on a specific test to save time - # enable-tests: - # runs-on: ubuntu-20.04 - # steps: - # - name: ok - # # change 0 to a positive interger to prevent all tests from running - # run: exit 0 - - - # generate-tag: - # runs-on: ubuntu-20.04 - # outputs: - # tag: ${{ steps.get_tag.outputs.GIT_TAG }} - # steps: - # - name: Checkout - # uses: actions/checkout@v4 - # - name: Get tags - # id: get_tag - # uses: ./.github/actions/version-tag - - - # deps-web: - # runs-on: ubuntu-20.04 - # needs: [ can-run-ci ] - # steps: - # - name: Checkout - # uses: actions/checkout@v4 - # with: - # ref: ${{github.event.pull_request.head.ref}} - # repository: ${{github.event.pull_request.head.repo.full_name}} - - # - name: Setup Node.js environment - # uses: actions/setup-node@v4 - # with: - # node-version: '18.x' - # cache: yarn - # cache-dependency-path: web/yarn.lock - - # - name: Cache node_modules - # uses: actions/cache@v4 - # with: - # path: ${{ github.workspace }}/web/node_modules - # key: ${{ runner.os }}-${{ runner.arch }}-yarn-node_modules-${{ hashFiles('web/yarn.lock') }} - - # - name: Install web deps - # run: make -C web deps - - - # lint-web: - # runs-on: ubuntu-20.04 - # needs: [ can-run-ci, deps-web ] - # steps: - # - name: Checkout - # uses: actions/checkout@v4 - # with: - # ref: ${{github.event.pull_request.head.ref}} - # repository: ${{github.event.pull_request.head.repo.full_name}} - - # - name: Setup Node.js environment - # uses: actions/setup-node@v4 - # with: - # node-version: '18.x' - # cache: yarn - # cache-dependency-path: web/yarn.lock - - # - name: Cache node_modules - # uses: actions/cache@v4 - # with: - # path: ${{ github.workspace }}/web/node_modules - # key: ${{ runner.os }}-${{ runner.arch }}-yarn-node_modules-${{ hashFiles('web/yarn.lock') }} - - # - name: Lint - # run: make -C web lint - - - # unit-test-web: - # runs-on: ubuntu-20.04 - # needs: [ can-run-ci, deps-web ] - # steps: - # - name: Checkout - # uses: actions/checkout@v4 - # with: - # ref: ${{github.event.pull_request.head.ref}} - # repository: ${{github.event.pull_request.head.repo.full_name}} - - # - name: Setup Node.js environment - # uses: actions/setup-node@v4 - # with: - # node-version: '18.x' - # cache: yarn - # cache-dependency-path: web/yarn.lock - - # - name: Cache node_modules - # uses: actions/cache@v4 - # with: - # path: ${{ github.workspace }}/web/node_modules - # key: ${{ runner.os }}-${{ runner.arch }}-yarn-node_modules-${{ hashFiles('web/yarn.lock') }} - - # - name: Unit test - # run: make -C web test-unit - - - # build-web: - # runs-on: ubuntu-20.04 - # needs: [ can-run-ci, deps-web, generate-tag ] - # steps: - # # This workflow trigger may lead to malicious PR authors being able to obtain repository write permissions or stealing repository secrets. - # # Please read https://securitylab.github.com/research/github-actions-preventing-pwn-requests/ - # # this action checks out the remote branch and runs CI - # - name: Checkout - # uses: actions/checkout@v4 - # with: - # ref: ${{github.event.pull_request.head.ref}} - # repository: ${{github.event.pull_request.head.repo.full_name}} - - # - name: Setup Node.js environment - # uses: actions/setup-node@v4 - # with: - # node-version: '18.x' - # cache: yarn - # cache-dependency-path: web/yarn.lock - - # - name: Cache node_modules - # uses: actions/cache@v4 - # with: - # path: ${{ github.workspace }}/web/node_modules - # key: ${{ runner.os }}-${{ runner.arch }}-yarn-node_modules-${{ hashFiles('web/yarn.lock') }} - - # - name: Build web - # env: - # GIT_TAG: ${{ needs.generate-tag.outputs.tag }} - # run: mapfile -t envs < <(grep -v '#.*' < .image.env) && export "${envs[@]}" && make -C web build-kotsadm - - # - name: Upload web artifact - # uses: actions/upload-artifact@v4 - # with: - # name: web - # path: ./web/dist - - # deps-kots: - # runs-on: ubuntu-20.04 - # needs: [ can-run-ci ] - # steps: - # - uses: actions/checkout@v4 - # with: - # ref: ${{github.event.pull_request.head.ref}} - # repository: ${{github.event.pull_request.head.repo.full_name}} - - # - uses: actions/setup-go@v5 - # with: - # go-version: '^1.20.0' - # cache: true - - # - run: go mod download - - # vet-kots: - # runs-on: ubuntu-20.04 - # needs: [ can-run-ci, deps-kots ] - # steps: - # - uses: actions/checkout@v4 - # with: - # ref: ${{github.event.pull_request.head.ref}} - # repository: ${{github.event.pull_request.head.repo.full_name}} - - # - uses: actions/setup-go@v5 - # with: - # go-version: '^1.20.0' - # cache: true - - # - name: vet - # run: make vet - - # ci-test-kots: - # runs-on: ubuntu-20.04 - # needs: [ can-run-ci, deps-kots ] - # steps: - # - uses: actions/checkout@v4 - # with: - # ref: ${{github.event.pull_request.head.ref}} - # repository: ${{github.event.pull_request.head.repo.full_name}} - - # - uses: actions/setup-go@v5 - # with: - # go-version: '^1.20.0' - # cache: true - - # - name: test - # run: make ci-test - - - # build-kots: - # runs-on: ubuntu-20.04 - # needs: [ can-run-ci, build-web, deps-kots, generate-tag ] - - # steps: - # - uses: actions/checkout@v4 - # with: - # ref: ${{github.event.pull_request.head.ref}} - # repository: ${{github.event.pull_request.head.repo.full_name}} - - # - uses: actions/setup-go@v5 - # with: - # go-version: '^1.20.0' - # cache: true - - # - name: Download web artifact - # uses: actions/download-artifact@v4 - # with: - # name: web - # path: ./web/dist - - # - name: Build kots - # env: - # GIT_TAG: ${{ needs.generate-tag.outputs.tag }} - # run: mapfile -t envs < <(grep -v '#.*' < .image.env) && export "${envs[@]}" && make kots - - # - uses: actions/upload-artifact@v4 - # with: - # name: kots - # path: ./bin/kots - - - # build-kotsadm-melange-packages: - # needs: [ can-run-ci, generate-tag ] - # strategy: - # fail-fast: true - # matrix: - # runner: [ - # {name: ubuntu-20.04, arch: amd64}, - # {name: arm64-runner-set, arch: arm64} - # ] - # runs-on: ${{ matrix.runner.name }} - # steps: - # - uses: actions/checkout@v4 - # with: - # ref: ${{github.event.pull_request.head.ref}} - # repository: ${{github.event.pull_request.head.repo.full_name}} - # - uses: ./.github/actions/build-custom-melange-package - # with: - # context: deploy - # component: kotsadm - # git-tag: ${{ needs.generate-tag.outputs.tag }} - # arch: ${{ matrix.runner.arch }} - - # build-kotsadm: - # runs-on: ubuntu-20.04 - # needs: [ can-run-ci, generate-tag, build-kotsadm-melange-packages ] - # steps: - # - uses: actions/checkout@v4 - # with: - # ref: ${{github.event.pull_request.head.ref}} - # repository: ${{github.event.pull_request.head.repo.full_name}} - # - uses: ./.github/actions/build-custom-image-with-apko - # with: - # context: deploy - # component: kotsadm - # git-tag: ${{ needs.generate-tag.outputs.tag }} - # image-name: ttl.sh/automated-${{ github.run_id }}/kotsadm:24h - - - # build-kots-helm: - # runs-on: ubuntu-20.04 - # needs: [ can-run-ci ] - - # steps: - # - uses: actions/checkout@v4 - # with: - # ref: main - # repository: replicatedhq/kots-helm - - # - name: Build KOTS Helm chart - # env: - # GIT_COMMIT: ${{ github.sha }} - # run: | - # curl -O -L "https://raw.githubusercontent.com/replicatedhq/kots/${GIT_COMMIT}/.image.env" - # mapfile -t envs < <(grep -v '#.*' < .image.env) && export "${envs[@]}" - - # export CHART_VERSION=0.0.${{ github.run_id }}-automated - # export KOTS_VERSION=24h - # export KOTS_TAG=24h - # export KOTSADM_REGISTRY=ttl.sh/automated-${{ github.run_id }} - - # envsubst < Chart.yaml.tmpl > Chart.yaml - # envsubst < values.yaml.tmpl > values.yaml - - # CHART_NAME=$(helm package . | rev | cut -d/ -f1 | rev) - # export CHART_NAME - # helm push "$CHART_NAME" oci://ttl.sh/automated-${{ github.run_id }} - - - # build-e2e: - # runs-on: ubuntu-20.04 - # needs: [ can-run-ci ] - # steps: - # - uses: actions/setup-go@v5 - # with: - # go-version: '^1.20.0' - - # - uses: actions/checkout@v4 - # with: - # ref: ${{github.event.pull_request.head.ref}} - # repository: ${{github.event.pull_request.head.repo.full_name}} - - # - name: Cache Go modules - # uses: actions/cache@v4 - # with: - # path: | - # ~/.cache/go-build - # ~/go/pkg/mod - # key: ${{ runner.os }}-go-e2e-${{ hashFiles('**/go.sum') }} - # restore-keys: | - # ${{ runner.os }}-go-e2e- - # - run: make -C e2e build deps - # - run: docker save e2e-deps -o e2e/bin/e2e-deps.tar - # - uses: actions/upload-artifact@v4 - # with: - # name: e2e - # path: e2e/bin/ - - - # build-kurl-proxy-melange-packages: - # needs: [ can-run-ci, generate-tag ] - # strategy: - # fail-fast: true - # matrix: - # runner: [ - # {name: ubuntu-20.04, arch: amd64}, - # {name: arm64-runner-set, arch: arm64} - # ] - # runs-on: ${{ matrix.runner.name }} - # steps: - # - uses: actions/checkout@v4 - # with: - # ref: ${{github.event.pull_request.head.ref}} - # repository: ${{github.event.pull_request.head.repo.full_name}} - # - uses: ./.github/actions/build-custom-melange-package - # with: - # context: kurl_proxy/deploy - # component: kurl-proxy - # git-tag: ${{ needs.generate-tag.outputs.tag }} - # arch: ${{ matrix.runner.arch }} - - # build-kurl-proxy: - # runs-on: ubuntu-20.04 - # needs: [ can-run-ci, generate-tag, build-kurl-proxy-melange-packages ] - # steps: - # - uses: actions/checkout@v4 - # with: - # ref: ${{github.event.pull_request.head.ref}} - # repository: ${{github.event.pull_request.head.repo.full_name}} - # - uses: ./.github/actions/build-custom-image-with-apko - # with: - # context: kurl_proxy/deploy - # component: kurl-proxy - # git-tag: ${{ needs.generate-tag.outputs.tag }} - # image-name: ttl.sh/automated-${{ github.run_id }}/kurl-proxy:24h - - - # build-migrations-melange-packages: - # needs: [ can-run-ci, generate-tag ] - # strategy: - # fail-fast: true - # matrix: - # runner: [ - # {name: ubuntu-20.04, arch: amd64}, - # {name: arm64-runner-set, arch: arm64} - # ] - # runs-on: ${{ matrix.runner.name }} - # steps: - # - uses: actions/checkout@v4 - # with: - # ref: ${{github.event.pull_request.head.ref}} - # repository: ${{github.event.pull_request.head.repo.full_name}} - # - uses: ./.github/actions/build-custom-melange-package - # with: - # context: migrations/deploy - # component: kotsadm-migrations - # git-tag: ${{ needs.generate-tag.outputs.tag }} - # arch: ${{ matrix.runner.arch }} - - # build-migrations: - # runs-on: ubuntu-20.04 - # needs: [ can-run-ci, generate-tag, build-migrations-melange-packages ] - # steps: - # - uses: actions/checkout@v4 - # with: - # ref: ${{github.event.pull_request.head.ref}} - # repository: ${{github.event.pull_request.head.repo.full_name}} - # - uses: ./.github/actions/build-custom-image-with-apko - # with: - # context: migrations/deploy - # component: kotsadm-migrations - # git-tag: ${{ needs.generate-tag.outputs.tag }} - # image-name: ttl.sh/automated-${{ github.run_id }}/kotsadm-migrations:24h - - # push-minio: - # runs-on: ubuntu-20.04 - # needs: [ can-run-ci ] - # steps: - # # This workflow trigger may lead to malicious PR authors being able to obtain repository write permissions or stealing repository secrets. - # # Please read https://securitylab.github.com/research/github-actions-preventing-pwn-requests/ - # # this action creates a branch based on remote branch and runs the tests - # - uses: actions/checkout@v4 - # with: - # ref: ${{github.event.pull_request.head.ref}} - # repository: ${{github.event.pull_request.head.repo.full_name}} - - # - name: load environment variables from .image.env - # uses: falti/dotenv-action@v1 - # id: dotenv - # with: - # path: .image.env - - # - name: push minio - # run: skopeo copy --all docker://kotsadm/minio:${{ steps.dotenv.outputs.MINIO_TAG }} docker://ttl.sh/automated-${{ github.run_id }}/minio:${{ steps.dotenv.outputs.MINIO_TAG }} - - - # push-rqlite: - # runs-on: ubuntu-20.04 - # needs: [ can-run-ci ] - # steps: - # # This workflow trigger may lead to malicious PR authors being able to obtain repository write permissions or stealing repository secrets. - # # Please read https://securitylab.github.com/research/github-actions-preventing-pwn-requests/ - # # this action creates a branch based on remote branch and runs the tests - # - uses: actions/checkout@v4 - # with: - # ref: ${{github.event.pull_request.head.ref}} - # repository: ${{github.event.pull_request.head.repo.full_name}} - - # - name: load environment variables from .image.env - # uses: falti/dotenv-action@v1 - # id: dotenv - # with: - # path: .image.env - - # - name: push rqlite - # run: skopeo copy --all docker://kotsadm/rqlite:${{ steps.dotenv.outputs.RQLITE_TAG }} docker://ttl.sh/automated-${{ github.run_id }}/rqlite:${{ steps.dotenv.outputs.RQLITE_TAG }} - - # push-dex: - # runs-on: ubuntu-20.04 - # needs: [ can-run-ci ] - # steps: - # # This workflow trigger may lead to malicious PR authors being able to obtain repository write permissions or stealing repository secrets. - # # Please read https://securitylab.github.com/research/github-actions-preventing-pwn-requests/ - # # this action creates a branch based on remote branch and runs the tests - # - uses: actions/checkout@v4 - # with: - # ref: ${{github.event.pull_request.head.ref}} - # repository: ${{github.event.pull_request.head.repo.full_name}} - - # - name: load environment variables from .image.env - # uses: falti/dotenv-action@v1 - # id: dotenv - # with: - # path: .image.env - - # - name: push dex - # run: skopeo copy --all docker://kotsadm/dex:${{ steps.dotenv.outputs.DEX_TAG }} docker://ttl.sh/automated-${{ github.run_id }}/dex:${{ steps.dotenv.outputs.DEX_TAG }} - - # # only run validate-kurl-addon if changes to "deploy/kurl/kotsadm/template/**" - # kurl-addon-changes-filter: - # runs-on: ubuntu-20.04 - # needs: [ can-run-ci, enable-tests ] - # outputs: - # ok-to-test: ${{ steps.filter.outputs.kurl-addon }} - # steps: - # - uses: actions/checkout@v4 - # - uses: dorny/paths-filter@v2 - # id: filter - # with: - # filters: | - # kurl-addon: - # - 'deploy/kurl/kotsadm/template/**' - # - 'deploy/kurl/kotsadm/testgrid-os-spec.yaml' - # validate-kurl-addon: - # runs-on: ubuntu-20.04 - # if: ${{ needs.kurl-addon-changes-filter.outputs.ok-to-test == 'true' }} - # needs: [ can-run-ci, enable-tests, generate-tag, kurl-addon-changes-filter, build-kots, build-kotsadm, build-kurl-proxy, build-migrations, push-dex ] - # steps: - # - name: checkout - # uses: actions/checkout@v4 - # - name: set outputs - # id: vars - # run: | - # addon_version=${{ needs.generate-tag.outputs.tag }} - # echo "addon_version=${addon_version#v}" >> "$GITHUB_OUTPUT" - # - name: download kots binary - # uses: actions/download-artifact@v4 - # with: - # name: kots - # path: bin/ - # - name: prepare kots binary executable - # run: | - # chmod +x bin/* - # tar -C bin/ -czvf bin/kots.tar.gz kots - # - name: generate kurl add-on - # id: addon-generate - # uses: ./.github/actions/kurl-addon-kots-generate - # env: - # AWS_ACCESS_KEY_ID: ${{ secrets.KURL_ADDONS_AWS_ACCESS_KEY_ID }} - # AWS_SECRET_ACCESS_KEY: ${{ secrets.KURL_ADDONS_AWS_SECRET_ACCESS_KEY }} - # AWS_DEFAULT_REGION: us-east-1 - # with: - # addon_version: ${{ steps.vars.outputs.addon_version }} - # kotsadm_image_registry: ttl.sh - # kotsadm_image_namespace: automated-${{ github.run_id }} - # kotsadm_image_tag: 24h - # kotsadm_binary_override: bin/kots.tar.gz - # - name: test kurl add-on - # id: addon-test - # uses: ./.github/actions/kurl-addon-kots-test - # with: - # addon_version: ${{ steps.vars.outputs.addon_version }} - # addon_package_url: ${{ steps.addon-generate.outputs.addon_package_url }} - # testgrid_api_token: ${{ secrets.TESTGRID_PROD_API_TOKEN }} - # - name: comment testgrid url - # uses: mshick/add-pr-comment@v2 - # with: - # message: ${{ steps.addon-test.outputs.testgrid_run_message }} - # repo-token: ${{ secrets.GITHUB_TOKEN }} - # allow-repeats: false - - - # cmx-versions: - # runs-on: ubuntu-20.04 - # needs: [ enable-tests, can-run-ci ] - # steps: - # - uses: actions/checkout@v4 - # with: - # fetch-depth: 0 - # - name: get CMX versions to test - # id: cmx-versions-to-test - # uses: ./.github/actions/cmx-versions - # with: - # replicated-api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} - # distros: 'k3s,openshift' - # outputs: - # versions-to-test: ${{ steps.cmx-versions-to-test.outputs.versions-to-test }} - - - # validate-existing-online-install-minimal: - # runs-on: ubuntu-20.04 - # needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-e2e, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] - # steps: - # - name: Checkout - # uses: actions/checkout@v4 - # - name: download e2e deps - # uses: actions/download-artifact@v4 - # with: - # name: e2e - # path: e2e/bin/ - # - name: download kots binary - # uses: actions/download-artifact@v4 - # with: - # name: kots - # path: bin/ - # - run: | - # docker load -i e2e/bin/e2e-deps.tar - # chmod +x e2e/bin/* - # chmod +x bin/* - # - uses: ./.github/actions/kots-e2e - # with: - # kots-namespace: 'qakotsregression' - # test-focus: 'Regression' - # k8s-distribution: k3s - # k8s-version: v1.26 - # testim-branch: ${{ github.head_ref == 'main' && 'master' || github.head_ref }} - # testim-access-token: '${{ secrets.TESTIM_ACCESS_TOKEN }}' - # aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' - # aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' - # replicated-api-token: '${{ secrets.C11Y_MATRIX_TOKEN }}' - # kots-dockerhub-username: '${{ secrets.E2E_DOCKERHUB_USERNAME }}' - # kots-dockerhub-password: '${{ secrets.E2E_DOCKERHUB_PASSWORD }}' - - - # validate-smoke-test: - # runs-on: ubuntu-20.04 - # needs: [ enable-tests, can-run-ci, cmx-versions, build-kots, build-kotsadm, build-e2e, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] - # strategy: - # fail-fast: false - # matrix: - # cluster: ${{ fromJson(needs.cmx-versions.outputs.versions-to-test) }} - # continue-on-error: ${{ matrix.cluster.stage != 'stable' }} - # steps: - # - name: Checkout - # uses: actions/checkout@v4 - # - name: download e2e deps - # uses: actions/download-artifact@v4 - # with: - # name: e2e - # path: e2e/bin/ - # - run: docker load -i e2e/bin/e2e-deps.tar - # - run: chmod +x e2e/bin/* - # - name: download kots binary - # uses: actions/download-artifact@v4 - # with: - # name: kots - # path: bin/ - # - run: chmod +x bin/* - # - uses: ./.github/actions/kots-e2e - # with: - # test-focus: 'Smoke Test' - # kots-namespace: 'smoke-test' - # k8s-distribution: ${{ matrix.cluster.distribution }} - # k8s-version: ${{ matrix.cluster.version }} - # testim-access-token: '${{ secrets.TESTIM_ACCESS_TOKEN }}' - # testim-branch: ${{ github.head_ref == 'main' && 'master' || github.head_ref }} - # aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' - # aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' - # replicated-api-token: '${{ secrets.C11Y_MATRIX_TOKEN }}' - # kots-dockerhub-username: '${{ secrets.E2E_DOCKERHUB_USERNAME }}' - # kots-dockerhub-password: '${{ secrets.E2E_DOCKERHUB_PASSWORD }}' - - - # validate-minimal-rbac: - # runs-on: ubuntu-20.04 - # needs: [ enable-tests, can-run-ci, cmx-versions, build-kots, build-kotsadm, build-e2e, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] - # strategy: - # fail-fast: false - # matrix: - # cluster: [ - # {distribution: kind, version: v1.28.0}, - # {distribution: openshift, version: 4.13.0-okd} - # ] - # env: - # APP_SLUG: minimal-rbac - # steps: - # - name: Checkout - # uses: actions/checkout@v4 - - # - name: Create Cluster - # id: create-cluster - # uses: replicatedhq/replicated-actions/create-cluster@v1 - # with: - # api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} - # kubernetes-distribution: ${{ matrix.cluster.distribution }} - # kubernetes-version: ${{ matrix.cluster.version }} - # cluster-name: automated-kots-${{ github.run_id }}-${{ matrix.cluster.distribution }}-${{ matrix.cluster.version }} - # timeout-minutes: '120' - # ttl: 2h - # export-kubeconfig: true - - # - name: download kots binary - # uses: actions/download-artifact@v4 - # with: - # name: kots - # path: bin/ - - # - run: chmod +x bin/kots - - # - name: create namespace and dockerhub secret - # run: | - # kubectl create ns "$APP_SLUG" - # kubectl create secret docker-registry kotsadm-dockerhub --docker-server index.docker.io --docker-username "${{ secrets.E2E_DOCKERHUB_USERNAME }}" --docker-password "${{ secrets.E2E_DOCKERHUB_PASSWORD }}" --namespace "$APP_SLUG" - - # - name: run the test - # run: | - # set +e - # echo ${{ secrets.MINIMAL_RBAC_LICENSE }} | base64 -d > license.yaml - # ./bin/kots \ - # install "$APP_SLUG/automated" \ - # --license-file license.yaml \ - # --no-port-forward \ - # --namespace "$APP_SLUG" \ - # --shared-password password \ - # --kotsadm-registry ttl.sh \ - # --kotsadm-namespace automated-${{ github.run_id }} \ - # --kotsadm-tag 24h | tee output.txt - - # if ! grep -q "The Kubernetes RBAC policy that the Admin Console is running with does not have access to complete the Preflight Checks. It's recommended that you run these manually before proceeding." output.txt; then - # echo "Expected to see an RBAC error for preflight checks, but did not" - # exit 1 - # fi - - # if ! grep -q 'The app was not deployed.' output.txt; then - # printf "Expected to see message about app not being deployed, but did not\n" - # exit 1 - # fi - - # if grep FAIL output.txt | grep -q 'This application requires at least 100 nodes'; then - # printf "Did not expect to see a failure about number of nodes, but did\n" - # exit 1 - # fi - - # if ! kubectl get role -n "$APP_SLUG" | grep -q kotsadm; then - # echo "kotsadm role not found in namespace $APP_SLUG" - # kubectl get role -n "$APP_SLUG" - # exit 1 - # fi - - # if ! kubectl get rolebinding -n "$APP_SLUG" | grep -q kotsadm; then - # echo "kotsadm rolebinding not found in namespace $APP_SLUG" - # kubectl get rolebinding -n "$APP_SLUG" - # exit 1 - # fi - - # if kubectl get clusterrole | grep -q kotsadm; then - # echo "found kotsadm clusterrole in minimal RBAC install" - # kubectl get clusterrole - # exit 1 - # fi - - # if kubectl get clusterrolebinding | grep -q kotsadm; then - # echo "found kotsadm clusterrolebinding in minimal RBAC install" - # kubectl get clusterrolebinding - # exit 1 - # fi - - # - name: Generate support bundle on failure - # if: failure() - # uses: ./.github/actions/generate-support-bundle - # with: - # kots-namespace: "$APP_SLUG" - # aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' - # aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' - - # - name: Remove Cluster - # id: remove-cluster - # uses: replicatedhq/replicated-actions/remove-cluster@v1 - # if: ${{ always() && steps.create-cluster.outputs.cluster-id != '' }} - # continue-on-error: true - # with: - # api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} - # cluster-id: ${{ steps.create-cluster.outputs.cluster-id }} - - - # validate-backup-and-restore: - # runs-on: ubuntu-20.04 - # needs: [ enable-tests, can-run-ci, cmx-versions, build-kots, build-kotsadm, build-e2e, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] - # strategy: - # fail-fast: false - # matrix: - # cluster: ${{ fromJson(needs.cmx-versions.outputs.versions-to-test) }} - # continue-on-error: ${{ matrix.cluster.stage != 'stable' }} - # steps: - # - name: Checkout - # uses: actions/checkout@v4 - # - name: download e2e deps - # uses: actions/download-artifact@v4 - # with: - # name: e2e - # path: e2e/bin/ - # - run: docker load -i e2e/bin/e2e-deps.tar - # - run: chmod +x e2e/bin/* - # - name: download kots binary - # uses: actions/download-artifact@v4 - # with: - # name: kots - # path: bin/ - # - run: chmod +x bin/* - # - uses: ./.github/actions/kots-e2e - # with: - # test-focus: 'Backup and Restore' - # kots-namespace: 'backup-and-restore' - # k8s-distribution: ${{ matrix.cluster.distribution }} - # k8s-version: ${{ matrix.cluster.version }} - # testim-access-token: '${{ secrets.TESTIM_ACCESS_TOKEN }}' - # testim-branch: ${{ github.head_ref == 'main' && 'master' || github.head_ref }} - # aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' - # aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' - # replicated-api-token: '${{ secrets.C11Y_MATRIX_TOKEN }}' - # kots-dockerhub-username: '${{ secrets.E2E_DOCKERHUB_USERNAME }}' - # kots-dockerhub-password: '${{ secrets.E2E_DOCKERHUB_PASSWORD }}' - - - # validate-no-required-config: - # runs-on: ubuntu-20.04 - # needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-e2e, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] - # strategy: - # fail-fast: false - # matrix: - # cluster: [ - # {distribution: kind, version: v1.28.0} - # ] - # steps: - # - name: Checkout - # uses: actions/checkout@v4 - # - name: download e2e deps - # uses: actions/download-artifact@v4 - # with: - # name: e2e - # path: e2e/bin/ - # - run: docker load -i e2e/bin/e2e-deps.tar - # - run: chmod +x e2e/bin/* - # - name: download kots binary - # uses: actions/download-artifact@v4 - # with: - # name: kots - # path: bin/ - # - run: chmod +x bin/* - # - uses: ./.github/actions/kots-e2e - # with: - # test-focus: 'No Required Config' - # kots-namespace: 'no-required-config' - # k8s-distribution: ${{ matrix.cluster.distribution }} - # k8s-version: ${{ matrix.cluster.version }} - # testim-access-token: '${{ secrets.TESTIM_ACCESS_TOKEN }}' - # testim-branch: ${{ github.head_ref == 'main' && 'master' || github.head_ref }} - # aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' - # aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' - # replicated-api-token: '${{ secrets.C11Y_MATRIX_TOKEN }}' - # kots-dockerhub-username: '${{ secrets.E2E_DOCKERHUB_USERNAME }}' - # kots-dockerhub-password: '${{ secrets.E2E_DOCKERHUB_PASSWORD }}' - - - # validate-strict-preflight-checks: - # runs-on: ubuntu-20.04 - # needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-e2e, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] - # strategy: - # fail-fast: false - # matrix: - # cluster: [ - # {distribution: kind, version: v1.28.0} - # ] - # env: - # APP_SLUG: strict-preflight-checks - # steps: - # - name: Checkout - # uses: actions/checkout@v4 - - # - name: Create Cluster - # id: create-cluster - # uses: replicatedhq/replicated-actions/create-cluster@v1 - # with: - # api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} - # kubernetes-distribution: ${{ matrix.cluster.distribution }} - # kubernetes-version: ${{ matrix.cluster.version }} - # cluster-name: automated-kots-${{ github.run_id }}-${{ matrix.cluster.distribution }}-${{ matrix.cluster.version }} - # timeout-minutes: '120' - # ttl: 2h - # export-kubeconfig: true - - # - name: download kots binary - # uses: actions/download-artifact@v4 - # with: - # name: kots - # path: bin/ - - # - run: chmod +x bin/kots - - # - name: create namespace and dockerhub secret - # run: | - # kubectl create ns "$APP_SLUG" - # kubectl create secret docker-registry kotsadm-dockerhub --docker-server index.docker.io --docker-username "${{ secrets.E2E_DOCKERHUB_USERNAME }}" --docker-password "${{ secrets.E2E_DOCKERHUB_PASSWORD }}" --namespace "$APP_SLUG" - - # - name: run the test - # run: | - # set +e - # echo ${{ secrets.STRICT_PREFLIGHT_CHECKS_LICENSE }} | base64 -d > license.yaml - # ./bin/kots \ - # install "$APP_SLUG/automated" \ - # --license-file license.yaml \ - # --no-port-forward \ - # --namespace "$APP_SLUG" \ - # --shared-password password \ - # --kotsadm-registry ttl.sh \ - # --kotsadm-namespace automated-${{ github.run_id }} \ - # --skip-preflights \ - # --kotsadm-tag 24h | tee output.txt - - # if ! grep -q 'The app was not deployed.' output.txt; then - # printf "Expected to see message about app not being deployed, but did not\n" - # exit 1 - # fi - - # if ! grep FAIL output.txt | grep -q 'The application requires a Kubernetes 2.0.0 or later.'; then - # printf "Expected to see a failure about kubernetes version, but did not\n" - # exit 1 - # fi - - # if ! kubectl logs deploy/kotsadm -n "$APP_SLUG" | grep -q "preflights will not be skipped, strict preflights are set to true"; then - # echo "Failed to find a log line about strict preflights not being skipped in kotsadm logs" - # echo "kotsadm logs:" - # kubectl logs deploy/kotsadm -n "$APP_SLUG" - # exit 1 - # fi - - # # disable the strict preflight check and app should deploy successfully - # ./bin/kots set config "$APP_SLUG" enable_failing_strict_analyzers="0" --namespace "$APP_SLUG" --deploy - - # COUNTER=1 - # while [ "$(./bin/kots get apps --namespace "$APP_SLUG" | awk 'NR>1{print $2}')" != "ready" ]; do - # ((COUNTER += 1)) - # if [ $COUNTER -gt 120 ]; then - # echo "Timed out waiting for app to be ready" - # ./bin/kots get apps --namespace "$APP_SLUG" - # echo "kotsadm logs:" - # kubectl logs deploy/kotsadm --tail=100 -n "$APP_SLUG" - # exit 1 - # fi - # sleep 1 - # done - - # printf "App is installed successfully and is ready\n\n" - # ./bin/kots get apps --namespace "$APP_SLUG" - - # - name: Generate support bundle on failure - # if: failure() - # uses: ./.github/actions/generate-support-bundle - # with: - # kots-namespace: "$APP_SLUG" - # aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' - # aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' - - # - name: Remove Cluster - # id: remove-cluster - # uses: replicatedhq/replicated-actions/remove-cluster@v1 - # if: ${{ always() && steps.create-cluster.outputs.cluster-id != '' }} - # continue-on-error: true - # with: - # api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} - # cluster-id: ${{ steps.create-cluster.outputs.cluster-id }} - - - # validate-config: - # runs-on: ubuntu-20.04 - # needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-e2e, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] - # strategy: - # fail-fast: false - # matrix: - # cluster: [ - # {distribution: kind, version: v1.28.0} - # ] - # steps: - # - name: Checkout - # uses: actions/checkout@v4 - # - name: download e2e deps - # uses: actions/download-artifact@v4 - # with: - # name: e2e - # path: e2e/bin/ - # - run: docker load -i e2e/bin/e2e-deps.tar - # - run: chmod +x e2e/bin/* - # - name: download kots binary - # uses: actions/download-artifact@v4 - # with: - # name: kots - # path: bin/ - # - run: chmod +x bin/* - # - uses: ./.github/actions/kots-e2e - # with: - # test-focus: 'Config Validation' - # kots-namespace: 'config-validation' - # k8s-distribution: ${{ matrix.cluster.distribution }} - # k8s-version: ${{ matrix.cluster.version }} - # testim-access-token: '${{ secrets.TESTIM_ACCESS_TOKEN }}' - # testim-branch: ${{ github.head_ref == 'main' && 'master' || github.head_ref }} - # aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' - # aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' - # replicated-api-token: '${{ secrets.C11Y_MATRIX_TOKEN }}' - # kots-dockerhub-username: '${{ secrets.E2E_DOCKERHUB_USERNAME }}' - # kots-dockerhub-password: '${{ secrets.E2E_DOCKERHUB_PASSWORD }}' - - - # validate-version-history-pagination: - # runs-on: ubuntu-20.04 - # needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-e2e, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] - # strategy: - # fail-fast: false - # matrix: - # cluster: [ - # {distribution: k3s-local, version: v1.27.1-k3s1} - # ] - # steps: - # - name: Checkout - # uses: actions/checkout@v4 - # - name: download e2e deps - # uses: actions/download-artifact@v4 - # with: - # name: e2e - # path: e2e/bin/ - # - run: docker load -i e2e/bin/e2e-deps.tar - # - run: chmod +x e2e/bin/* - # - name: download kots binary - # uses: actions/download-artifact@v4 - # with: - # name: kots - # path: bin/ - # - run: chmod +x bin/* - # - uses: ./.github/actions/kots-e2e - # with: - # test-focus: 'Version History Pagination' - # kots-namespace: 'version-history-pagination' - # k8s-distribution: ${{ matrix.cluster.distribution }} - # k8s-version: ${{ matrix.cluster.version }} - # testim-access-token: '${{ secrets.TESTIM_ACCESS_TOKEN }}' - # testim-branch: ${{ github.head_ref == 'main' && 'master' || github.head_ref }} - # aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' - # aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' - # kots-dockerhub-username: '${{ secrets.E2E_DOCKERHUB_USERNAME }}' - # kots-dockerhub-password: '${{ secrets.E2E_DOCKERHUB_PASSWORD }}' - - - # validate-change-license: - # runs-on: ubuntu-20.04 - # needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-e2e, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] - # strategy: - # fail-fast: false - # matrix: - # cluster: [ - # {distribution: kind, version: v1.28.0} - # ] - # steps: - # - name: Checkout - # uses: actions/checkout@v4 - # - name: download e2e deps - # uses: actions/download-artifact@v4 - # with: - # name: e2e - # path: e2e/bin/ - # - run: docker load -i e2e/bin/e2e-deps.tar - # - run: chmod +x e2e/bin/* - # - name: download kots binary - # uses: actions/download-artifact@v4 - # with: - # name: kots - # path: bin/ - # - run: chmod +x bin/* - # - uses: ./.github/actions/kots-e2e - # with: - # test-focus: 'Change License' - # kots-namespace: 'change-license' - # k8s-distribution: ${{ matrix.cluster.distribution }} - # k8s-version: ${{ matrix.cluster.version }} - # testim-access-token: '${{ secrets.TESTIM_ACCESS_TOKEN }}' - # testim-branch: ${{ github.head_ref == 'main' && 'master' || github.head_ref }} - # aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' - # aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' - # replicated-api-token: '${{ secrets.C11Y_MATRIX_TOKEN }}' - # kots-dockerhub-username: '${{ secrets.E2E_DOCKERHUB_USERNAME }}' - # kots-dockerhub-password: '${{ secrets.E2E_DOCKERHUB_PASSWORD }}' - - - # validate-minimal-rbac-override: - # runs-on: ubuntu-20.04 - # needs: [ enable-tests, can-run-ci, cmx-versions, build-kots, build-kotsadm, build-e2e, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] - # strategy: - # fail-fast: false - # matrix: - # cluster: [ - # {distribution: kind, version: v1.28.0}, - # {distribution: openshift, version: 4.13.0-okd} - # ] - # env: - # APP_SLUG: minimal-rbac - # APP_VERSION_LABEL: "0.0.1" - # steps: - # - name: Checkout - # uses: actions/checkout@v4 - - # - name: Create Cluster - # id: create-cluster - # uses: replicatedhq/replicated-actions/create-cluster@v1 - # with: - # api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} - # kubernetes-distribution: ${{ matrix.cluster.distribution }} - # kubernetes-version: ${{ matrix.cluster.version }} - # cluster-name: automated-kots-${{ github.run_id }}-${{ matrix.cluster.distribution }}-${{ matrix.cluster.version }} - # timeout-minutes: '120' - # ttl: 2h - # instance-type: ${{ matrix.cluster.distribution == 'gke' && 'n2-standard-4' || '' }} - # export-kubeconfig: true - - # - name: download kots binary - # uses: actions/download-artifact@v4 - # with: - # name: kots - # path: bin/ - - # - run: chmod +x bin/kots - - # - name: create namespace and dockerhub secret - # run: | - # kubectl create ns "$APP_SLUG" - # kubectl create secret docker-registry kotsadm-dockerhub --docker-server index.docker.io --docker-username "${{ secrets.E2E_DOCKERHUB_USERNAME }}" --docker-password "${{ secrets.E2E_DOCKERHUB_PASSWORD }}" --namespace "$APP_SLUG" - - # - name: minimal rbac override on command line - # run: | - # set +e - # echo ${{ secrets.MINIMAL_RBAC_LICENSE }} | base64 -d > license.yaml - # ./bin/kots \ - # install "$APP_SLUG/automated" \ - # --license-file license.yaml \ - # --app-version-label "$APP_VERSION_LABEL" \ - # --no-port-forward \ - # --namespace "$APP_SLUG" \ - # --shared-password password \ - # --kotsadm-registry ttl.sh \ - # --kotsadm-namespace automated-${{ github.run_id }} \ - # --kotsadm-tag 24h \ - # --use-minimal-rbac | tee output.txt - - # if ! grep -q "The Kubernetes RBAC policy that the Admin Console is running with does not have access to complete the Preflight Checks. It's recommended that you run these manually before proceeding." output.txt; then - # echo "Expected to see an RBAC error for preflight checks, but did not" - # exit 1 - # fi - - # if ! grep -q 'The app was not deployed.' output.txt; then - # printf "Expected to see message about app not being deployed, but did not\n" - # exit 1 - # fi - - # if grep FAIL output.txt | grep -q 'This application requires at least 100 nodes'; then - # printf "Did not expect to see a failure about number of nodes, but did\n" - # exit 1 - # fi - - # if ! kubectl get role -n "$APP_SLUG" | grep -q kotsadm; then - # echo "kotsadm role not found in namespace $APP_SLUG" - # kubectl get role -n "$APP_SLUG" - # exit 1 - # fi - - # if ! kubectl get rolebinding -n "$APP_SLUG" | grep -q kotsadm; then - # echo "kotsadm rolebinding not found in namespace $APP_SLUG" - # kubectl get rolebinding -n "$APP_SLUG" - # exit 1 - # fi - - # if kubectl get clusterrole | grep -q kotsadm; then - # echo "found kotsadm clusterrole in minimal RBAC install" - # kubectl get clusterrole - # exit 1 - # fi - - # if kubectl get clusterrolebinding | grep -q kotsadm; then - # echo "found kotsadm clusterrolebinding in minimal RBAC install" - # kubectl get clusterrolebinding - # exit 1 - # fi - - # - name: create namespace and dockerhub secret - # run: | - # kubectl delete ns "$APP_SLUG" --ignore-not-found - # kubectl create ns "$APP_SLUG" - # kubectl create secret docker-registry kotsadm-dockerhub --docker-server index.docker.io --docker-username "${{ secrets.E2E_DOCKERHUB_USERNAME }}" --docker-password "${{ secrets.E2E_DOCKERHUB_PASSWORD }}" --namespace "$APP_SLUG" - - # - name: no minimal rbac override on command line - # run: | - # set +e - # echo ${{ secrets.MINIMAL_RBAC_LICENSE }} | base64 -d > license.yaml - # ./bin/kots \ - # install "$APP_SLUG/automated" \ - # --license-file license.yaml \ - # --app-version-label "$APP_VERSION_LABEL" \ - # --no-port-forward \ - # --namespace "$APP_SLUG" \ - # --shared-password password \ - # --kotsadm-registry ttl.sh \ - # --kotsadm-namespace automated-${{ github.run_id }} \ - # --kotsadm-tag 24h | tee output.txt - - # if grep -q "The Kubernetes RBAC policy that the Admin Console is running with does not have access to complete the Preflight Checks. It's recommended that you run these manually before proceeding." output.txt; then - # echo "Did not expect to see an RBAC error for preflight checks, but did" - # exit 1 - # fi - - # if ! grep -q 'The app was not deployed.' output.txt; then - # printf "Expected to see message about app not being deployed, but did not\n" - # exit 1 - # fi - - # if ! grep FAIL output.txt | grep -q 'This application requires at least 100 nodes'; then - # printf "Expected to see a failure about number of nodes, but did not\n" - # exit 1 - # fi - - # if kubectl get role -n "$APP_SLUG" | grep -q kotsadm; then - # echo "kotsadm role found in cluster scoped install" - # kubectl get role -n "$APP_SLUG" - # exit 1 - # fi - - # if kubectl get rolebinding -n "$APP_SLUG" | grep -q kotsadm; then - # echo "kotsadm rolebinding found in cluster scoped install" - # kubectl get rolebinding -n "$APP_SLUG" - # exit 1 - # fi - - # if ! kubectl get clusterrole | grep -q kotsadm; then - # echo "No kotsadm clusterrole found in cluster scoped install" - # kubectl get clusterrole - # exit 1 - # fi - - # if ! kubectl get clusterrolebinding | grep -q kotsadm; then - # echo "No kotsadm clusterrolebinding found in cluster scoped install" - # kubectl get clusterrolebinding - # exit 1 - # fi - - # - name: Generate support bundle on failure - # if: failure() - # uses: ./.github/actions/generate-support-bundle - # with: - # kots-namespace: "$APP_SLUG" - # aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' - # aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' - - # - name: Remove Cluster - # id: remove-cluster - # uses: replicatedhq/replicated-actions/remove-cluster@v1 - # if: ${{ always() && steps.create-cluster.outputs.cluster-id != '' }} - # continue-on-error: true - # with: - # api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} - # cluster-id: ${{ steps.create-cluster.outputs.cluster-id }} - - - # validate-multi-namespace: - # runs-on: ubuntu-20.04 - # needs: [ enable-tests, can-run-ci, cmx-versions, build-kots, build-kotsadm, build-e2e, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] - # strategy: - # fail-fast: false - # matrix: - # cluster: ${{ fromJson(needs.cmx-versions.outputs.versions-to-test) }} - # continue-on-error: ${{ matrix.cluster.stage != 'stable' }} - # env: - # APP_SLUG: multi-namespace-yeti - # steps: - # - name: Checkout - # uses: actions/checkout@v4 - - # - uses: azure/setup-helm@v3 - # with: - # token: ${{ secrets.GITHUB_TOKEN }} - - # - name: Create Cluster - # id: create-cluster - # uses: replicatedhq/replicated-actions/create-cluster@v1 - # with: - # api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} - # kubernetes-distribution: ${{ matrix.cluster.distribution }} - # kubernetes-version: ${{ matrix.cluster.version }} - # cluster-name: automated-kots-${{ github.run_id }}-${{ matrix.cluster.distribution }}-${{ matrix.cluster.version }} - # timeout-minutes: '120' - # ttl: 2h - # instance-type: ${{ matrix.cluster.distribution == 'gke' && 'n2-standard-4' || '' }} - # export-kubeconfig: true - - # - name: download kots binary - # uses: actions/download-artifact@v4 - # with: - # name: kots - # path: bin/ - - # - run: chmod +x bin/kots - - # - name: create namespace and dockerhub secret - # run: | - # kubectl create ns "$APP_SLUG" - # kubectl create secret docker-registry kotsadm-dockerhub --docker-server index.docker.io --docker-username "${{ secrets.E2E_DOCKERHUB_USERNAME }}" --docker-password "${{ secrets.E2E_DOCKERHUB_PASSWORD }}" --namespace "$APP_SLUG" - - # - name: run the test - # run: | - # set +e - # echo ${{ secrets.MULTI_NAMESPACE_LICENSE }} | base64 -d > license.yaml - # ./bin/kots \ - # install "$APP_SLUG/automated" \ - # --license-file license.yaml \ - # --no-port-forward \ - # --namespace "$APP_SLUG" \ - # --shared-password password \ - # --kotsadm-registry ttl.sh \ - # --kotsadm-namespace automated-${{ github.run_id }} \ - # --kotsadm-tag 24h - - # EXIT_CODE=$? - # if [ $EXIT_CODE -ne 0 ]; then - # echo "------pods:" - # kubectl -n "$APP_SLUG" get pods - # echo "------kotsadm logs" - # kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" - # exit $EXIT_CODE - # fi - - # COUNTER=1 - # while [ "$(./bin/kots get apps --namespace "$APP_SLUG" | awk 'NR>1{print $2}')" != "ready" ]; do - # ((COUNTER += 1)) - # if [ $COUNTER -gt 180 ]; then - # echo "Timed out waiting for app to be ready" - # ./bin/kots get apps --namespace "$APP_SLUG" - # echo "kotsadm logs:" - # kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" - # exit 1 - # fi - # sleep 1 - # done - - # # validate that helm charts installed using the native helm workflow were deployed via the helm CLI correctly - - # if ! helm ls -n postgres-test | awk 'NR>1{print $1}' | grep -q postgresql; then - # printf "postgresql helm release not found in postgres-test namespace\n\n" - # helm ls -n postgres-test - # exit 1 - # fi - - # if ! helm ls -n "$APP_SLUG" | awk 'NR>1{print $1}' | grep -q private-chart; then - # printf "private-chart helm release not found in %s namespace\n\n" "$APP_SLUG" - # helm ls -n "$APP_SLUG" - # exit 1 - # fi - - # - name: Generate support bundle on failure - # if: failure() - # uses: ./.github/actions/generate-support-bundle - # with: - # kots-namespace: "$APP_SLUG" - # aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' - # aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' + - name: ok + run: echo "yes" + + # Use this to disable tests when iteratig on a specific test to save time + enable-tests: + runs-on: ubuntu-20.04 + steps: + - name: ok + # change 0 to a positive interger to prevent all tests from running + run: exit 0 + + + generate-tag: + runs-on: ubuntu-20.04 + outputs: + tag: ${{ steps.get_tag.outputs.GIT_TAG }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Get tags + id: get_tag + uses: ./.github/actions/version-tag + + + deps-web: + runs-on: ubuntu-20.04 + needs: [ can-run-ci ] + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + ref: ${{github.event.pull_request.head.ref}} + repository: ${{github.event.pull_request.head.repo.full_name}} + + - name: Setup Node.js environment + uses: actions/setup-node@v4 + with: + node-version: '18.x' + cache: yarn + cache-dependency-path: web/yarn.lock + + - name: Cache node_modules + uses: actions/cache@v4 + with: + path: ${{ github.workspace }}/web/node_modules + key: ${{ runner.os }}-${{ runner.arch }}-yarn-node_modules-${{ hashFiles('web/yarn.lock') }} + + - name: Install web deps + run: make -C web deps + + + lint-web: + runs-on: ubuntu-20.04 + needs: [ can-run-ci, deps-web ] + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + ref: ${{github.event.pull_request.head.ref}} + repository: ${{github.event.pull_request.head.repo.full_name}} + + - name: Setup Node.js environment + uses: actions/setup-node@v4 + with: + node-version: '18.x' + cache: yarn + cache-dependency-path: web/yarn.lock + + - name: Cache node_modules + uses: actions/cache@v4 + with: + path: ${{ github.workspace }}/web/node_modules + key: ${{ runner.os }}-${{ runner.arch }}-yarn-node_modules-${{ hashFiles('web/yarn.lock') }} + + - name: Lint + run: make -C web lint + + + unit-test-web: + runs-on: ubuntu-20.04 + needs: [ can-run-ci, deps-web ] + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + ref: ${{github.event.pull_request.head.ref}} + repository: ${{github.event.pull_request.head.repo.full_name}} + + - name: Setup Node.js environment + uses: actions/setup-node@v4 + with: + node-version: '18.x' + cache: yarn + cache-dependency-path: web/yarn.lock + + - name: Cache node_modules + uses: actions/cache@v4 + with: + path: ${{ github.workspace }}/web/node_modules + key: ${{ runner.os }}-${{ runner.arch }}-yarn-node_modules-${{ hashFiles('web/yarn.lock') }} + + - name: Unit test + run: make -C web test-unit + + + build-web: + runs-on: ubuntu-20.04 + needs: [ can-run-ci, deps-web, generate-tag ] + steps: + # This workflow trigger may lead to malicious PR authors being able to obtain repository write permissions or stealing repository secrets. + # Please read https://securitylab.github.com/research/github-actions-preventing-pwn-requests/ + # this action checks out the remote branch and runs CI + - name: Checkout + uses: actions/checkout@v4 + with: + ref: ${{github.event.pull_request.head.ref}} + repository: ${{github.event.pull_request.head.repo.full_name}} + + - name: Setup Node.js environment + uses: actions/setup-node@v4 + with: + node-version: '18.x' + cache: yarn + cache-dependency-path: web/yarn.lock + + - name: Cache node_modules + uses: actions/cache@v4 + with: + path: ${{ github.workspace }}/web/node_modules + key: ${{ runner.os }}-${{ runner.arch }}-yarn-node_modules-${{ hashFiles('web/yarn.lock') }} + + - name: Build web + env: + GIT_TAG: ${{ needs.generate-tag.outputs.tag }} + run: mapfile -t envs < <(grep -v '#.*' < .image.env) && export "${envs[@]}" && make -C web build-kotsadm + + - name: Upload web artifact + uses: actions/upload-artifact@v4 + with: + name: web + path: ./web/dist + + deps-kots: + runs-on: ubuntu-20.04 + needs: [ can-run-ci ] + steps: + - uses: actions/checkout@v4 + with: + ref: ${{github.event.pull_request.head.ref}} + repository: ${{github.event.pull_request.head.repo.full_name}} + + - uses: actions/setup-go@v5 + with: + go-version: '^1.20.0' + cache: true + + - run: go mod download + + vet-kots: + runs-on: ubuntu-20.04 + needs: [ can-run-ci, deps-kots ] + steps: + - uses: actions/checkout@v4 + with: + ref: ${{github.event.pull_request.head.ref}} + repository: ${{github.event.pull_request.head.repo.full_name}} + + - uses: actions/setup-go@v5 + with: + go-version: '^1.20.0' + cache: true + + - name: vet + run: make vet + + ci-test-kots: + runs-on: ubuntu-20.04 + needs: [ can-run-ci, deps-kots ] + steps: + - uses: actions/checkout@v4 + with: + ref: ${{github.event.pull_request.head.ref}} + repository: ${{github.event.pull_request.head.repo.full_name}} + + - uses: actions/setup-go@v5 + with: + go-version: '^1.20.0' + cache: true + + - name: test + run: make ci-test + + + build-kots: + runs-on: ubuntu-20.04 + needs: [ can-run-ci, build-web, deps-kots, generate-tag ] + + steps: + - uses: actions/checkout@v4 + with: + ref: ${{github.event.pull_request.head.ref}} + repository: ${{github.event.pull_request.head.repo.full_name}} + + - uses: actions/setup-go@v5 + with: + go-version: '^1.20.0' + cache: true + + - name: Download web artifact + uses: actions/download-artifact@v4 + with: + name: web + path: ./web/dist + + - name: Build kots + env: + GIT_TAG: ${{ needs.generate-tag.outputs.tag }} + run: mapfile -t envs < <(grep -v '#.*' < .image.env) && export "${envs[@]}" && make kots + + - uses: actions/upload-artifact@v4 + with: + name: kots + path: ./bin/kots + + + build-kotsadm-melange-packages: + needs: [ can-run-ci, generate-tag ] + strategy: + fail-fast: true + matrix: + runner: [ + {name: ubuntu-20.04, arch: amd64}, + {name: arm64-runner-set, arch: arm64} + ] + runs-on: ${{ matrix.runner.name }} + steps: + - uses: actions/checkout@v4 + with: + ref: ${{github.event.pull_request.head.ref}} + repository: ${{github.event.pull_request.head.repo.full_name}} + - uses: ./.github/actions/build-custom-melange-package + with: + context: deploy + component: kotsadm + git-tag: ${{ needs.generate-tag.outputs.tag }} + arch: ${{ matrix.runner.arch }} + + build-kotsadm: + runs-on: ubuntu-20.04 + needs: [ can-run-ci, generate-tag, build-kotsadm-melange-packages ] + steps: + - uses: actions/checkout@v4 + with: + ref: ${{github.event.pull_request.head.ref}} + repository: ${{github.event.pull_request.head.repo.full_name}} + - uses: ./.github/actions/build-custom-image-with-apko + with: + context: deploy + component: kotsadm + git-tag: ${{ needs.generate-tag.outputs.tag }} + image-name: ttl.sh/automated-${{ github.run_id }}/kotsadm:24h + + + build-kots-helm: + runs-on: ubuntu-20.04 + needs: [ can-run-ci ] + + steps: + - uses: actions/checkout@v4 + with: + ref: main + repository: replicatedhq/kots-helm + + - name: Build KOTS Helm chart + env: + GIT_COMMIT: ${{ github.sha }} + run: | + curl -O -L "https://raw.githubusercontent.com/replicatedhq/kots/${GIT_COMMIT}/.image.env" + mapfile -t envs < <(grep -v '#.*' < .image.env) && export "${envs[@]}" + + export CHART_VERSION=0.0.${{ github.run_id }}-automated + export KOTS_VERSION=24h + export KOTS_TAG=24h + export KOTSADM_REGISTRY=ttl.sh/automated-${{ github.run_id }} + + envsubst < Chart.yaml.tmpl > Chart.yaml + envsubst < values.yaml.tmpl > values.yaml + + CHART_NAME=$(helm package . | rev | cut -d/ -f1 | rev) + export CHART_NAME + helm push "$CHART_NAME" oci://ttl.sh/automated-${{ github.run_id }} + + + build-e2e: + runs-on: ubuntu-20.04 + needs: [ can-run-ci ] + steps: + - uses: actions/setup-go@v5 + with: + go-version: '^1.20.0' + + - uses: actions/checkout@v4 + with: + ref: ${{github.event.pull_request.head.ref}} + repository: ${{github.event.pull_request.head.repo.full_name}} + + - name: Cache Go modules + uses: actions/cache@v4 + with: + path: | + ~/.cache/go-build + ~/go/pkg/mod + key: ${{ runner.os }}-go-e2e-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go-e2e- + - run: make -C e2e build deps + - run: docker save e2e-deps -o e2e/bin/e2e-deps.tar + - uses: actions/upload-artifact@v4 + with: + name: e2e + path: e2e/bin/ + + + build-kurl-proxy-melange-packages: + needs: [ can-run-ci, generate-tag ] + strategy: + fail-fast: true + matrix: + runner: [ + {name: ubuntu-20.04, arch: amd64}, + {name: arm64-runner-set, arch: arm64} + ] + runs-on: ${{ matrix.runner.name }} + steps: + - uses: actions/checkout@v4 + with: + ref: ${{github.event.pull_request.head.ref}} + repository: ${{github.event.pull_request.head.repo.full_name}} + - uses: ./.github/actions/build-custom-melange-package + with: + context: kurl_proxy/deploy + component: kurl-proxy + git-tag: ${{ needs.generate-tag.outputs.tag }} + arch: ${{ matrix.runner.arch }} + + build-kurl-proxy: + runs-on: ubuntu-20.04 + needs: [ can-run-ci, generate-tag, build-kurl-proxy-melange-packages ] + steps: + - uses: actions/checkout@v4 + with: + ref: ${{github.event.pull_request.head.ref}} + repository: ${{github.event.pull_request.head.repo.full_name}} + - uses: ./.github/actions/build-custom-image-with-apko + with: + context: kurl_proxy/deploy + component: kurl-proxy + git-tag: ${{ needs.generate-tag.outputs.tag }} + image-name: ttl.sh/automated-${{ github.run_id }}/kurl-proxy:24h + + + build-migrations-melange-packages: + needs: [ can-run-ci, generate-tag ] + strategy: + fail-fast: true + matrix: + runner: [ + {name: ubuntu-20.04, arch: amd64}, + {name: arm64-runner-set, arch: arm64} + ] + runs-on: ${{ matrix.runner.name }} + steps: + - uses: actions/checkout@v4 + with: + ref: ${{github.event.pull_request.head.ref}} + repository: ${{github.event.pull_request.head.repo.full_name}} + - uses: ./.github/actions/build-custom-melange-package + with: + context: migrations/deploy + component: kotsadm-migrations + git-tag: ${{ needs.generate-tag.outputs.tag }} + arch: ${{ matrix.runner.arch }} + + build-migrations: + runs-on: ubuntu-20.04 + needs: [ can-run-ci, generate-tag, build-migrations-melange-packages ] + steps: + - uses: actions/checkout@v4 + with: + ref: ${{github.event.pull_request.head.ref}} + repository: ${{github.event.pull_request.head.repo.full_name}} + - uses: ./.github/actions/build-custom-image-with-apko + with: + context: migrations/deploy + component: kotsadm-migrations + git-tag: ${{ needs.generate-tag.outputs.tag }} + image-name: ttl.sh/automated-${{ github.run_id }}/kotsadm-migrations:24h + + push-minio: + runs-on: ubuntu-20.04 + needs: [ can-run-ci ] + steps: + # This workflow trigger may lead to malicious PR authors being able to obtain repository write permissions or stealing repository secrets. + # Please read https://securitylab.github.com/research/github-actions-preventing-pwn-requests/ + # this action creates a branch based on remote branch and runs the tests + - uses: actions/checkout@v4 + with: + ref: ${{github.event.pull_request.head.ref}} + repository: ${{github.event.pull_request.head.repo.full_name}} + + - name: load environment variables from .image.env + uses: falti/dotenv-action@v1 + id: dotenv + with: + path: .image.env + + - name: push minio + run: skopeo copy --all docker://kotsadm/minio:${{ steps.dotenv.outputs.MINIO_TAG }} docker://ttl.sh/automated-${{ github.run_id }}/minio:${{ steps.dotenv.outputs.MINIO_TAG }} + + + push-rqlite: + runs-on: ubuntu-20.04 + needs: [ can-run-ci ] + steps: + # This workflow trigger may lead to malicious PR authors being able to obtain repository write permissions or stealing repository secrets. + # Please read https://securitylab.github.com/research/github-actions-preventing-pwn-requests/ + # this action creates a branch based on remote branch and runs the tests + - uses: actions/checkout@v4 + with: + ref: ${{github.event.pull_request.head.ref}} + repository: ${{github.event.pull_request.head.repo.full_name}} + + - name: load environment variables from .image.env + uses: falti/dotenv-action@v1 + id: dotenv + with: + path: .image.env + + - name: push rqlite + run: skopeo copy --all docker://kotsadm/rqlite:${{ steps.dotenv.outputs.RQLITE_TAG }} docker://ttl.sh/automated-${{ github.run_id }}/rqlite:${{ steps.dotenv.outputs.RQLITE_TAG }} + + push-dex: + runs-on: ubuntu-20.04 + needs: [ can-run-ci ] + steps: + # This workflow trigger may lead to malicious PR authors being able to obtain repository write permissions or stealing repository secrets. + # Please read https://securitylab.github.com/research/github-actions-preventing-pwn-requests/ + # this action creates a branch based on remote branch and runs the tests + - uses: actions/checkout@v4 + with: + ref: ${{github.event.pull_request.head.ref}} + repository: ${{github.event.pull_request.head.repo.full_name}} + + - name: load environment variables from .image.env + uses: falti/dotenv-action@v1 + id: dotenv + with: + path: .image.env + + - name: push dex + run: skopeo copy --all docker://kotsadm/dex:${{ steps.dotenv.outputs.DEX_TAG }} docker://ttl.sh/automated-${{ github.run_id }}/dex:${{ steps.dotenv.outputs.DEX_TAG }} + + # only run validate-kurl-addon if changes to "deploy/kurl/kotsadm/template/**" + kurl-addon-changes-filter: + runs-on: ubuntu-20.04 + needs: [ can-run-ci, enable-tests ] + outputs: + ok-to-test: ${{ steps.filter.outputs.kurl-addon }} + steps: + - uses: actions/checkout@v4 + - uses: dorny/paths-filter@v2 + id: filter + with: + filters: | + kurl-addon: + - 'deploy/kurl/kotsadm/template/**' + - 'deploy/kurl/kotsadm/testgrid-os-spec.yaml' + validate-kurl-addon: + runs-on: ubuntu-20.04 + if: ${{ needs.kurl-addon-changes-filter.outputs.ok-to-test == 'true' }} + needs: [ can-run-ci, enable-tests, generate-tag, kurl-addon-changes-filter, build-kots, build-kotsadm, build-kurl-proxy, build-migrations, push-dex ] + steps: + - name: checkout + uses: actions/checkout@v4 + - name: set outputs + id: vars + run: | + addon_version=${{ needs.generate-tag.outputs.tag }} + echo "addon_version=${addon_version#v}" >> "$GITHUB_OUTPUT" + - name: download kots binary + uses: actions/download-artifact@v4 + with: + name: kots + path: bin/ + - name: prepare kots binary executable + run: | + chmod +x bin/* + tar -C bin/ -czvf bin/kots.tar.gz kots + - name: generate kurl add-on + id: addon-generate + uses: ./.github/actions/kurl-addon-kots-generate + env: + AWS_ACCESS_KEY_ID: ${{ secrets.KURL_ADDONS_AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.KURL_ADDONS_AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: us-east-1 + with: + addon_version: ${{ steps.vars.outputs.addon_version }} + kotsadm_image_registry: ttl.sh + kotsadm_image_namespace: automated-${{ github.run_id }} + kotsadm_image_tag: 24h + kotsadm_binary_override: bin/kots.tar.gz + - name: test kurl add-on + id: addon-test + uses: ./.github/actions/kurl-addon-kots-test + with: + addon_version: ${{ steps.vars.outputs.addon_version }} + addon_package_url: ${{ steps.addon-generate.outputs.addon_package_url }} + testgrid_api_token: ${{ secrets.TESTGRID_PROD_API_TOKEN }} + - name: comment testgrid url + uses: mshick/add-pr-comment@v2 + with: + message: ${{ steps.addon-test.outputs.testgrid_run_message }} + repo-token: ${{ secrets.GITHUB_TOKEN }} + allow-repeats: false + + + cmx-versions: + runs-on: ubuntu-20.04 + needs: [ enable-tests, can-run-ci ] + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: get CMX versions to test + id: cmx-versions-to-test + uses: ./.github/actions/cmx-versions + with: + replicated-api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} + distros: 'k3s,openshift' + outputs: + versions-to-test: ${{ steps.cmx-versions-to-test.outputs.versions-to-test }} + + + validate-existing-online-install-minimal: + runs-on: ubuntu-20.04 + needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-e2e, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: download e2e deps + uses: actions/download-artifact@v4 + with: + name: e2e + path: e2e/bin/ + - name: download kots binary + uses: actions/download-artifact@v4 + with: + name: kots + path: bin/ + - run: | + docker load -i e2e/bin/e2e-deps.tar + chmod +x e2e/bin/* + chmod +x bin/* + - uses: ./.github/actions/kots-e2e + with: + kots-namespace: 'qakotsregression' + test-focus: 'Regression' + k8s-distribution: k3s + k8s-version: v1.26 + testim-branch: ${{ github.head_ref == 'main' && 'master' || github.head_ref }} + testim-access-token: '${{ secrets.TESTIM_ACCESS_TOKEN }}' + aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' + aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' + replicated-api-token: '${{ secrets.C11Y_MATRIX_TOKEN }}' + kots-dockerhub-username: '${{ secrets.E2E_DOCKERHUB_USERNAME }}' + kots-dockerhub-password: '${{ secrets.E2E_DOCKERHUB_PASSWORD }}' + + + validate-smoke-test: + runs-on: ubuntu-20.04 + needs: [ enable-tests, can-run-ci, cmx-versions, build-kots, build-kotsadm, build-e2e, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] + strategy: + fail-fast: false + matrix: + cluster: ${{ fromJson(needs.cmx-versions.outputs.versions-to-test) }} + continue-on-error: ${{ matrix.cluster.stage != 'stable' }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: download e2e deps + uses: actions/download-artifact@v4 + with: + name: e2e + path: e2e/bin/ + - run: docker load -i e2e/bin/e2e-deps.tar + - run: chmod +x e2e/bin/* + - name: download kots binary + uses: actions/download-artifact@v4 + with: + name: kots + path: bin/ + - run: chmod +x bin/* + - uses: ./.github/actions/kots-e2e + with: + test-focus: 'Smoke Test' + kots-namespace: 'smoke-test' + k8s-distribution: ${{ matrix.cluster.distribution }} + k8s-version: ${{ matrix.cluster.version }} + testim-access-token: '${{ secrets.TESTIM_ACCESS_TOKEN }}' + testim-branch: ${{ github.head_ref == 'main' && 'master' || github.head_ref }} + aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' + aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' + replicated-api-token: '${{ secrets.C11Y_MATRIX_TOKEN }}' + kots-dockerhub-username: '${{ secrets.E2E_DOCKERHUB_USERNAME }}' + kots-dockerhub-password: '${{ secrets.E2E_DOCKERHUB_PASSWORD }}' + + + validate-minimal-rbac: + runs-on: ubuntu-20.04 + needs: [ enable-tests, can-run-ci, cmx-versions, build-kots, build-kotsadm, build-e2e, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] + strategy: + fail-fast: false + matrix: + cluster: [ + {distribution: kind, version: v1.28.0}, + {distribution: openshift, version: 4.13.0-okd} + ] + env: + APP_SLUG: minimal-rbac + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Create Cluster + id: create-cluster + uses: replicatedhq/replicated-actions/create-cluster@v1 + with: + api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} + kubernetes-distribution: ${{ matrix.cluster.distribution }} + kubernetes-version: ${{ matrix.cluster.version }} + cluster-name: automated-kots-${{ github.run_id }}-${{ matrix.cluster.distribution }}-${{ matrix.cluster.version }} + timeout-minutes: '120' + ttl: 2h + export-kubeconfig: true + + - name: download kots binary + uses: actions/download-artifact@v4 + with: + name: kots + path: bin/ + + - run: chmod +x bin/kots + + - name: create namespace and dockerhub secret + run: | + kubectl create ns "$APP_SLUG" + kubectl create secret docker-registry kotsadm-dockerhub --docker-server index.docker.io --docker-username "${{ secrets.E2E_DOCKERHUB_USERNAME }}" --docker-password "${{ secrets.E2E_DOCKERHUB_PASSWORD }}" --namespace "$APP_SLUG" + + - name: run the test + run: | + set +e + echo ${{ secrets.MINIMAL_RBAC_LICENSE }} | base64 -d > license.yaml + ./bin/kots \ + install "$APP_SLUG/automated" \ + --license-file license.yaml \ + --no-port-forward \ + --namespace "$APP_SLUG" \ + --shared-password password \ + --kotsadm-registry ttl.sh \ + --kotsadm-namespace automated-${{ github.run_id }} \ + --kotsadm-tag 24h | tee output.txt + + if ! grep -q "The Kubernetes RBAC policy that the Admin Console is running with does not have access to complete the Preflight Checks. It's recommended that you run these manually before proceeding." output.txt; then + echo "Expected to see an RBAC error for preflight checks, but did not" + exit 1 + fi + + if ! grep -q 'The app was not deployed.' output.txt; then + printf "Expected to see message about app not being deployed, but did not\n" + exit 1 + fi + + if grep FAIL output.txt | grep -q 'This application requires at least 100 nodes'; then + printf "Did not expect to see a failure about number of nodes, but did\n" + exit 1 + fi + + if ! kubectl get role -n "$APP_SLUG" | grep -q kotsadm; then + echo "kotsadm role not found in namespace $APP_SLUG" + kubectl get role -n "$APP_SLUG" + exit 1 + fi + + if ! kubectl get rolebinding -n "$APP_SLUG" | grep -q kotsadm; then + echo "kotsadm rolebinding not found in namespace $APP_SLUG" + kubectl get rolebinding -n "$APP_SLUG" + exit 1 + fi + + if kubectl get clusterrole | grep -q kotsadm; then + echo "found kotsadm clusterrole in minimal RBAC install" + kubectl get clusterrole + exit 1 + fi + + if kubectl get clusterrolebinding | grep -q kotsadm; then + echo "found kotsadm clusterrolebinding in minimal RBAC install" + kubectl get clusterrolebinding + exit 1 + fi + + - name: Generate support bundle on failure + if: failure() + uses: ./.github/actions/generate-support-bundle + with: + kots-namespace: "$APP_SLUG" + aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' + aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' + + - name: Remove Cluster + id: remove-cluster + uses: replicatedhq/replicated-actions/remove-cluster@v1 + if: ${{ always() && steps.create-cluster.outputs.cluster-id != '' }} + continue-on-error: true + with: + api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} + cluster-id: ${{ steps.create-cluster.outputs.cluster-id }} + + + validate-backup-and-restore: + runs-on: ubuntu-20.04 + needs: [ enable-tests, can-run-ci, cmx-versions, build-kots, build-kotsadm, build-e2e, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] + strategy: + fail-fast: false + matrix: + cluster: ${{ fromJson(needs.cmx-versions.outputs.versions-to-test) }} + continue-on-error: ${{ matrix.cluster.stage != 'stable' }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: download e2e deps + uses: actions/download-artifact@v4 + with: + name: e2e + path: e2e/bin/ + - run: docker load -i e2e/bin/e2e-deps.tar + - run: chmod +x e2e/bin/* + - name: download kots binary + uses: actions/download-artifact@v4 + with: + name: kots + path: bin/ + - run: chmod +x bin/* + - uses: ./.github/actions/kots-e2e + with: + test-focus: 'Backup and Restore' + kots-namespace: 'backup-and-restore' + k8s-distribution: ${{ matrix.cluster.distribution }} + k8s-version: ${{ matrix.cluster.version }} + testim-access-token: '${{ secrets.TESTIM_ACCESS_TOKEN }}' + testim-branch: ${{ github.head_ref == 'main' && 'master' || github.head_ref }} + aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' + aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' + replicated-api-token: '${{ secrets.C11Y_MATRIX_TOKEN }}' + kots-dockerhub-username: '${{ secrets.E2E_DOCKERHUB_USERNAME }}' + kots-dockerhub-password: '${{ secrets.E2E_DOCKERHUB_PASSWORD }}' + + + validate-no-required-config: + runs-on: ubuntu-20.04 + needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-e2e, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] + strategy: + fail-fast: false + matrix: + cluster: [ + {distribution: kind, version: v1.28.0} + ] + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: download e2e deps + uses: actions/download-artifact@v4 + with: + name: e2e + path: e2e/bin/ + - run: docker load -i e2e/bin/e2e-deps.tar + - run: chmod +x e2e/bin/* + - name: download kots binary + uses: actions/download-artifact@v4 + with: + name: kots + path: bin/ + - run: chmod +x bin/* + - uses: ./.github/actions/kots-e2e + with: + test-focus: 'No Required Config' + kots-namespace: 'no-required-config' + k8s-distribution: ${{ matrix.cluster.distribution }} + k8s-version: ${{ matrix.cluster.version }} + testim-access-token: '${{ secrets.TESTIM_ACCESS_TOKEN }}' + testim-branch: ${{ github.head_ref == 'main' && 'master' || github.head_ref }} + aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' + aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' + replicated-api-token: '${{ secrets.C11Y_MATRIX_TOKEN }}' + kots-dockerhub-username: '${{ secrets.E2E_DOCKERHUB_USERNAME }}' + kots-dockerhub-password: '${{ secrets.E2E_DOCKERHUB_PASSWORD }}' + + + validate-strict-preflight-checks: + runs-on: ubuntu-20.04 + needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-e2e, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] + strategy: + fail-fast: false + matrix: + cluster: [ + {distribution: kind, version: v1.28.0} + ] + env: + APP_SLUG: strict-preflight-checks + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Create Cluster + id: create-cluster + uses: replicatedhq/replicated-actions/create-cluster@v1 + with: + api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} + kubernetes-distribution: ${{ matrix.cluster.distribution }} + kubernetes-version: ${{ matrix.cluster.version }} + cluster-name: automated-kots-${{ github.run_id }}-${{ matrix.cluster.distribution }}-${{ matrix.cluster.version }} + timeout-minutes: '120' + ttl: 2h + export-kubeconfig: true + + - name: download kots binary + uses: actions/download-artifact@v4 + with: + name: kots + path: bin/ + + - run: chmod +x bin/kots + + - name: create namespace and dockerhub secret + run: | + kubectl create ns "$APP_SLUG" + kubectl create secret docker-registry kotsadm-dockerhub --docker-server index.docker.io --docker-username "${{ secrets.E2E_DOCKERHUB_USERNAME }}" --docker-password "${{ secrets.E2E_DOCKERHUB_PASSWORD }}" --namespace "$APP_SLUG" + + - name: run the test + run: | + set +e + echo ${{ secrets.STRICT_PREFLIGHT_CHECKS_LICENSE }} | base64 -d > license.yaml + ./bin/kots \ + install "$APP_SLUG/automated" \ + --license-file license.yaml \ + --no-port-forward \ + --namespace "$APP_SLUG" \ + --shared-password password \ + --kotsadm-registry ttl.sh \ + --kotsadm-namespace automated-${{ github.run_id }} \ + --skip-preflights \ + --kotsadm-tag 24h | tee output.txt + + if ! grep -q 'The app was not deployed.' output.txt; then + printf "Expected to see message about app not being deployed, but did not\n" + exit 1 + fi + + if ! grep FAIL output.txt | grep -q 'The application requires a Kubernetes 2.0.0 or later.'; then + printf "Expected to see a failure about kubernetes version, but did not\n" + exit 1 + fi + + if ! kubectl logs deploy/kotsadm -n "$APP_SLUG" | grep -q "preflights will not be skipped, strict preflights are set to true"; then + echo "Failed to find a log line about strict preflights not being skipped in kotsadm logs" + echo "kotsadm logs:" + kubectl logs deploy/kotsadm -n "$APP_SLUG" + exit 1 + fi + + # disable the strict preflight check and app should deploy successfully + ./bin/kots set config "$APP_SLUG" enable_failing_strict_analyzers="0" --namespace "$APP_SLUG" --deploy + + COUNTER=1 + while [ "$(./bin/kots get apps --namespace "$APP_SLUG" | awk 'NR>1{print $2}')" != "ready" ]; do + ((COUNTER += 1)) + if [ $COUNTER -gt 120 ]; then + echo "Timed out waiting for app to be ready" + ./bin/kots get apps --namespace "$APP_SLUG" + echo "kotsadm logs:" + kubectl logs deploy/kotsadm --tail=100 -n "$APP_SLUG" + exit 1 + fi + sleep 1 + done + + printf "App is installed successfully and is ready\n\n" + ./bin/kots get apps --namespace "$APP_SLUG" + + - name: Generate support bundle on failure + if: failure() + uses: ./.github/actions/generate-support-bundle + with: + kots-namespace: "$APP_SLUG" + aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' + aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' + + - name: Remove Cluster + id: remove-cluster + uses: replicatedhq/replicated-actions/remove-cluster@v1 + if: ${{ always() && steps.create-cluster.outputs.cluster-id != '' }} + continue-on-error: true + with: + api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} + cluster-id: ${{ steps.create-cluster.outputs.cluster-id }} + + + validate-config: + runs-on: ubuntu-20.04 + needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-e2e, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] + strategy: + fail-fast: false + matrix: + cluster: [ + {distribution: kind, version: v1.28.0} + ] + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: download e2e deps + uses: actions/download-artifact@v4 + with: + name: e2e + path: e2e/bin/ + - run: docker load -i e2e/bin/e2e-deps.tar + - run: chmod +x e2e/bin/* + - name: download kots binary + uses: actions/download-artifact@v4 + with: + name: kots + path: bin/ + - run: chmod +x bin/* + - uses: ./.github/actions/kots-e2e + with: + test-focus: 'Config Validation' + kots-namespace: 'config-validation' + k8s-distribution: ${{ matrix.cluster.distribution }} + k8s-version: ${{ matrix.cluster.version }} + testim-access-token: '${{ secrets.TESTIM_ACCESS_TOKEN }}' + testim-branch: ${{ github.head_ref == 'main' && 'master' || github.head_ref }} + aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' + aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' + replicated-api-token: '${{ secrets.C11Y_MATRIX_TOKEN }}' + kots-dockerhub-username: '${{ secrets.E2E_DOCKERHUB_USERNAME }}' + kots-dockerhub-password: '${{ secrets.E2E_DOCKERHUB_PASSWORD }}' + + + validate-version-history-pagination: + runs-on: ubuntu-20.04 + needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-e2e, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] + strategy: + fail-fast: false + matrix: + cluster: [ + {distribution: k3s-local, version: v1.27.1-k3s1} + ] + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: download e2e deps + uses: actions/download-artifact@v4 + with: + name: e2e + path: e2e/bin/ + - run: docker load -i e2e/bin/e2e-deps.tar + - run: chmod +x e2e/bin/* + - name: download kots binary + uses: actions/download-artifact@v4 + with: + name: kots + path: bin/ + - run: chmod +x bin/* + - uses: ./.github/actions/kots-e2e + with: + test-focus: 'Version History Pagination' + kots-namespace: 'version-history-pagination' + k8s-distribution: ${{ matrix.cluster.distribution }} + k8s-version: ${{ matrix.cluster.version }} + testim-access-token: '${{ secrets.TESTIM_ACCESS_TOKEN }}' + testim-branch: ${{ github.head_ref == 'main' && 'master' || github.head_ref }} + aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' + aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' + kots-dockerhub-username: '${{ secrets.E2E_DOCKERHUB_USERNAME }}' + kots-dockerhub-password: '${{ secrets.E2E_DOCKERHUB_PASSWORD }}' + + + validate-change-license: + runs-on: ubuntu-20.04 + needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-e2e, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] + strategy: + fail-fast: false + matrix: + cluster: [ + {distribution: kind, version: v1.28.0} + ] + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: download e2e deps + uses: actions/download-artifact@v4 + with: + name: e2e + path: e2e/bin/ + - run: docker load -i e2e/bin/e2e-deps.tar + - run: chmod +x e2e/bin/* + - name: download kots binary + uses: actions/download-artifact@v4 + with: + name: kots + path: bin/ + - run: chmod +x bin/* + - uses: ./.github/actions/kots-e2e + with: + test-focus: 'Change License' + kots-namespace: 'change-license' + k8s-distribution: ${{ matrix.cluster.distribution }} + k8s-version: ${{ matrix.cluster.version }} + testim-access-token: '${{ secrets.TESTIM_ACCESS_TOKEN }}' + testim-branch: ${{ github.head_ref == 'main' && 'master' || github.head_ref }} + aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' + aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' + replicated-api-token: '${{ secrets.C11Y_MATRIX_TOKEN }}' + kots-dockerhub-username: '${{ secrets.E2E_DOCKERHUB_USERNAME }}' + kots-dockerhub-password: '${{ secrets.E2E_DOCKERHUB_PASSWORD }}' + + + validate-minimal-rbac-override: + runs-on: ubuntu-20.04 + needs: [ enable-tests, can-run-ci, cmx-versions, build-kots, build-kotsadm, build-e2e, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] + strategy: + fail-fast: false + matrix: + cluster: [ + {distribution: kind, version: v1.28.0}, + {distribution: openshift, version: 4.13.0-okd} + ] + env: + APP_SLUG: minimal-rbac + APP_VERSION_LABEL: "0.0.1" + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Create Cluster + id: create-cluster + uses: replicatedhq/replicated-actions/create-cluster@v1 + with: + api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} + kubernetes-distribution: ${{ matrix.cluster.distribution }} + kubernetes-version: ${{ matrix.cluster.version }} + cluster-name: automated-kots-${{ github.run_id }}-${{ matrix.cluster.distribution }}-${{ matrix.cluster.version }} + timeout-minutes: '120' + ttl: 2h + instance-type: ${{ matrix.cluster.distribution == 'gke' && 'n2-standard-4' || '' }} + export-kubeconfig: true + + - name: download kots binary + uses: actions/download-artifact@v4 + with: + name: kots + path: bin/ + + - run: chmod +x bin/kots + + - name: create namespace and dockerhub secret + run: | + kubectl create ns "$APP_SLUG" + kubectl create secret docker-registry kotsadm-dockerhub --docker-server index.docker.io --docker-username "${{ secrets.E2E_DOCKERHUB_USERNAME }}" --docker-password "${{ secrets.E2E_DOCKERHUB_PASSWORD }}" --namespace "$APP_SLUG" + + - name: minimal rbac override on command line + run: | + set +e + echo ${{ secrets.MINIMAL_RBAC_LICENSE }} | base64 -d > license.yaml + ./bin/kots \ + install "$APP_SLUG/automated" \ + --license-file license.yaml \ + --app-version-label "$APP_VERSION_LABEL" \ + --no-port-forward \ + --namespace "$APP_SLUG" \ + --shared-password password \ + --kotsadm-registry ttl.sh \ + --kotsadm-namespace automated-${{ github.run_id }} \ + --kotsadm-tag 24h \ + --use-minimal-rbac | tee output.txt + + if ! grep -q "The Kubernetes RBAC policy that the Admin Console is running with does not have access to complete the Preflight Checks. It's recommended that you run these manually before proceeding." output.txt; then + echo "Expected to see an RBAC error for preflight checks, but did not" + exit 1 + fi + + if ! grep -q 'The app was not deployed.' output.txt; then + printf "Expected to see message about app not being deployed, but did not\n" + exit 1 + fi + + if grep FAIL output.txt | grep -q 'This application requires at least 100 nodes'; then + printf "Did not expect to see a failure about number of nodes, but did\n" + exit 1 + fi + + if ! kubectl get role -n "$APP_SLUG" | grep -q kotsadm; then + echo "kotsadm role not found in namespace $APP_SLUG" + kubectl get role -n "$APP_SLUG" + exit 1 + fi + + if ! kubectl get rolebinding -n "$APP_SLUG" | grep -q kotsadm; then + echo "kotsadm rolebinding not found in namespace $APP_SLUG" + kubectl get rolebinding -n "$APP_SLUG" + exit 1 + fi + + if kubectl get clusterrole | grep -q kotsadm; then + echo "found kotsadm clusterrole in minimal RBAC install" + kubectl get clusterrole + exit 1 + fi + + if kubectl get clusterrolebinding | grep -q kotsadm; then + echo "found kotsadm clusterrolebinding in minimal RBAC install" + kubectl get clusterrolebinding + exit 1 + fi + + - name: create namespace and dockerhub secret + run: | + kubectl delete ns "$APP_SLUG" --ignore-not-found + kubectl create ns "$APP_SLUG" + kubectl create secret docker-registry kotsadm-dockerhub --docker-server index.docker.io --docker-username "${{ secrets.E2E_DOCKERHUB_USERNAME }}" --docker-password "${{ secrets.E2E_DOCKERHUB_PASSWORD }}" --namespace "$APP_SLUG" + + - name: no minimal rbac override on command line + run: | + set +e + echo ${{ secrets.MINIMAL_RBAC_LICENSE }} | base64 -d > license.yaml + ./bin/kots \ + install "$APP_SLUG/automated" \ + --license-file license.yaml \ + --app-version-label "$APP_VERSION_LABEL" \ + --no-port-forward \ + --namespace "$APP_SLUG" \ + --shared-password password \ + --kotsadm-registry ttl.sh \ + --kotsadm-namespace automated-${{ github.run_id }} \ + --kotsadm-tag 24h | tee output.txt + + if grep -q "The Kubernetes RBAC policy that the Admin Console is running with does not have access to complete the Preflight Checks. It's recommended that you run these manually before proceeding." output.txt; then + echo "Did not expect to see an RBAC error for preflight checks, but did" + exit 1 + fi + + if ! grep -q 'The app was not deployed.' output.txt; then + printf "Expected to see message about app not being deployed, but did not\n" + exit 1 + fi + + if ! grep FAIL output.txt | grep -q 'This application requires at least 100 nodes'; then + printf "Expected to see a failure about number of nodes, but did not\n" + exit 1 + fi + + if kubectl get role -n "$APP_SLUG" | grep -q kotsadm; then + echo "kotsadm role found in cluster scoped install" + kubectl get role -n "$APP_SLUG" + exit 1 + fi + + if kubectl get rolebinding -n "$APP_SLUG" | grep -q kotsadm; then + echo "kotsadm rolebinding found in cluster scoped install" + kubectl get rolebinding -n "$APP_SLUG" + exit 1 + fi + + if ! kubectl get clusterrole | grep -q kotsadm; then + echo "No kotsadm clusterrole found in cluster scoped install" + kubectl get clusterrole + exit 1 + fi + + if ! kubectl get clusterrolebinding | grep -q kotsadm; then + echo "No kotsadm clusterrolebinding found in cluster scoped install" + kubectl get clusterrolebinding + exit 1 + fi + + - name: Generate support bundle on failure + if: failure() + uses: ./.github/actions/generate-support-bundle + with: + kots-namespace: "$APP_SLUG" + aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' + aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' + + - name: Remove Cluster + id: remove-cluster + uses: replicatedhq/replicated-actions/remove-cluster@v1 + if: ${{ always() && steps.create-cluster.outputs.cluster-id != '' }} + continue-on-error: true + with: + api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} + cluster-id: ${{ steps.create-cluster.outputs.cluster-id }} + + + validate-multi-namespace: + runs-on: ubuntu-20.04 + needs: [ enable-tests, can-run-ci, cmx-versions, build-kots, build-kotsadm, build-e2e, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] + strategy: + fail-fast: false + matrix: + cluster: ${{ fromJson(needs.cmx-versions.outputs.versions-to-test) }} + continue-on-error: ${{ matrix.cluster.stage != 'stable' }} + env: + APP_SLUG: multi-namespace-yeti + steps: + - name: Checkout + uses: actions/checkout@v4 + + - uses: azure/setup-helm@v3 + with: + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Create Cluster + id: create-cluster + uses: replicatedhq/replicated-actions/create-cluster@v1 + with: + api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} + kubernetes-distribution: ${{ matrix.cluster.distribution }} + kubernetes-version: ${{ matrix.cluster.version }} + cluster-name: automated-kots-${{ github.run_id }}-${{ matrix.cluster.distribution }}-${{ matrix.cluster.version }} + timeout-minutes: '120' + ttl: 2h + instance-type: ${{ matrix.cluster.distribution == 'gke' && 'n2-standard-4' || '' }} + export-kubeconfig: true + + - name: download kots binary + uses: actions/download-artifact@v4 + with: + name: kots + path: bin/ + + - run: chmod +x bin/kots + + - name: create namespace and dockerhub secret + run: | + kubectl create ns "$APP_SLUG" + kubectl create secret docker-registry kotsadm-dockerhub --docker-server index.docker.io --docker-username "${{ secrets.E2E_DOCKERHUB_USERNAME }}" --docker-password "${{ secrets.E2E_DOCKERHUB_PASSWORD }}" --namespace "$APP_SLUG" + + - name: run the test + run: | + set +e + echo ${{ secrets.MULTI_NAMESPACE_LICENSE }} | base64 -d > license.yaml + ./bin/kots \ + install "$APP_SLUG/automated" \ + --license-file license.yaml \ + --no-port-forward \ + --namespace "$APP_SLUG" \ + --shared-password password \ + --kotsadm-registry ttl.sh \ + --kotsadm-namespace automated-${{ github.run_id }} \ + --kotsadm-tag 24h + + EXIT_CODE=$? + if [ $EXIT_CODE -ne 0 ]; then + echo "------pods:" + kubectl -n "$APP_SLUG" get pods + echo "------kotsadm logs" + kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" + exit $EXIT_CODE + fi + + COUNTER=1 + while [ "$(./bin/kots get apps --namespace "$APP_SLUG" | awk 'NR>1{print $2}')" != "ready" ]; do + ((COUNTER += 1)) + if [ $COUNTER -gt 180 ]; then + echo "Timed out waiting for app to be ready" + ./bin/kots get apps --namespace "$APP_SLUG" + echo "kotsadm logs:" + kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" + exit 1 + fi + sleep 1 + done + + # validate that helm charts installed using the native helm workflow were deployed via the helm CLI correctly + + if ! helm ls -n postgres-test | awk 'NR>1{print $1}' | grep -q postgresql; then + printf "postgresql helm release not found in postgres-test namespace\n\n" + helm ls -n postgres-test + exit 1 + fi + + if ! helm ls -n "$APP_SLUG" | awk 'NR>1{print $1}' | grep -q private-chart; then + printf "private-chart helm release not found in %s namespace\n\n" "$APP_SLUG" + helm ls -n "$APP_SLUG" + exit 1 + fi + + - name: Generate support bundle on failure + if: failure() + uses: ./.github/actions/generate-support-bundle + with: + kots-namespace: "$APP_SLUG" + aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' + aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' - # - name: Remove Cluster - # id: remove-cluster - # uses: replicatedhq/replicated-actions/remove-cluster@v1 - # if: ${{ always() && steps.create-cluster.outputs.cluster-id != '' }} - # continue-on-error: true - # with: - # api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} - # cluster-id: ${{ steps.create-cluster.outputs.cluster-id }} - - - # validate-kots-pull: - # runs-on: ubuntu-20.04 - # needs: [ enable-tests, can-run-ci, cmx-versions, build-kots, build-kotsadm, build-e2e, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] - # strategy: - # fail-fast: false - # matrix: - # cluster: ${{ fromJson(needs.cmx-versions.outputs.versions-to-test) }} - # continue-on-error: ${{ matrix.cluster.stage != 'stable' }} - # env: - # APP_NAME: multi-namespace-yeti - # APP_SLUG: multi-namespace - # steps: - # - name: Checkout - # uses: actions/checkout@v4 - - # - name: Create Cluster - # id: create-cluster - # uses: replicatedhq/replicated-actions/create-cluster@v1 - # with: - # api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} - # kubernetes-distribution: ${{ matrix.cluster.distribution }} - # kubernetes-version: ${{ matrix.cluster.version }} - # cluster-name: automated-kots-${{ github.run_id }}-${{ matrix.cluster.distribution }}-${{ matrix.cluster.version }} - # timeout-minutes: '120' - # ttl: 2h - # instance-type: ${{ matrix.cluster.distribution == 'gke' && 'n2-standard-4' || '' }} - # export-kubeconfig: true - - # - name: download kots binary - # uses: actions/download-artifact@v4 - # with: - # name: kots - # path: bin/ - - # - run: chmod +x bin/kots - - # - name: create namespace and dockerhub secret - # run: | - # kubectl create ns "$APP_NAME" - # kubectl create secret docker-registry kotsadm-dockerhub --docker-server index.docker.io --docker-username "${{ secrets.E2E_DOCKERHUB_USERNAME }}" --docker-password "${{ secrets.E2E_DOCKERHUB_PASSWORD }}" --namespace "$APP_NAME" - - # - name: run kots pull - # run: | - # set +e - # echo ${{ secrets.MULTI_NAMESPACE_LICENSE }} | base64 -d > license.yaml - # ./bin/kots pull "$APP_NAME/automated" \ - # --license-file license.yaml \ - # --shared-password password \ - # --namespace "$APP_NAME" \ - # --exclude-admin-console - - # kubectl create ns "$APP_NAME" - # kubectl create ns nginx-test - # kubectl create ns redis-test - # kubectl create ns postgres-test - - # # HACK: without operator, additonal namespaces don't get image pull secrets - # echo ${{ secrets.MULTI_NAMESPACE_REGISTRY_AUTH }} | base64 -d > replicated-registry-auth.json - # kubectl -n nginx-test create secret generic multi-namespace-yeti-registry --type=kubernetes.io/dockerconfigjson --from-file=.dockerconfigjson=./replicated-registry-auth.json - # kubectl -n redis-test create secret generic multi-namespace-yeti-registry --type=kubernetes.io/dockerconfigjson --from-file=.dockerconfigjson=./replicated-registry-auth.json - # kubectl -n redis-test create secret generic multi-namespace-yeti-redis-registry --type=kubernetes.io/dockerconfigjson --from-file=.dockerconfigjson=./replicated-registry-auth.json - # kubectl -n postgres-test create secret generic multi-namespace-yeti-registry --type=kubernetes.io/dockerconfigjson --from-file=.dockerconfigjson=./replicated-registry-auth.json - # kubectl -n default create secret generic multi-namespace-yeti-registry --type=kubernetes.io/dockerconfigjson --from-file=.dockerconfigjson=./replicated-registry-auth.json - - # kustomize build "$PWD/$APP_SLUG/overlays/midstream" | kubectl apply -f - - # kustomize build "$PWD/$APP_SLUG/overlays/midstream/charts/redis" | kubectl apply -f - - - # EXIT_CODE=$? - # if [ $EXIT_CODE -ne 0 ]; then - # echo "Failed to apply spec" - # echo "------pods:" - # kubectl get pods -A - # echo "------kotsadm logs" - # kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" - # exit $EXIT_CODE - # fi - - # echo "Waiting for pods to start" - - # COUNTER=1 - # while [ "$(kubectl get pods --no-headers | grep -v Running | grep -cv Completed)" -gt 0 ]; do - # ((COUNTER += 1)) - # if [ $COUNTER -gt 120 ]; then - # echo "Timed out waiting for pods to start" - # kubectl get pods -A - # exit 1 - # fi - # sleep 1 - # done - - # echo "All pods started" - - # - name: Generate support bundle on failure - # if: failure() - # uses: ./.github/actions/generate-support-bundle - # with: - # kots-namespace: "$APP_NAME" - # aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' - # aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' - - # - name: Remove Cluster - # id: remove-cluster - # uses: replicatedhq/replicated-actions/remove-cluster@v1 - # if: ${{ always() && steps.create-cluster.outputs.cluster-id != '' }} - # continue-on-error: true - # with: - # api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} - # cluster-id: ${{ steps.create-cluster.outputs.cluster-id }} - - - # validate-app-version-label: - # runs-on: ubuntu-20.04 - # needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] - # strategy: - # fail-fast: false - # matrix: - # cluster: [ - # {distribution: kind, version: v1.28.0} - # ] - # env: - # APP_SLUG: app-version-label - # APP_VERSION_LABEL: v1.0.0 - # LATEST_APP_VERSION_LABEL: v1.0.1 - # steps: - # - name: Checkout - # uses: actions/checkout@v4 - - # - name: Create Cluster - # id: create-cluster - # uses: replicatedhq/replicated-actions/create-cluster@v1 - # with: - # api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} - # kubernetes-distribution: ${{ matrix.cluster.distribution }} - # kubernetes-version: ${{ matrix.cluster.version }} - # cluster-name: automated-kots-${{ github.run_id }}-${{ matrix.cluster.distribution }}-${{ matrix.cluster.version }} - # timeout-minutes: '120' - # ttl: 2h - # export-kubeconfig: true - - # - name: download kots binary - # uses: actions/download-artifact@v4 - # with: - # name: kots - # path: bin/ - - # - run: chmod +x bin/kots - - # - name: create namespace and dockerhub secret - # run: | - # kubectl create ns "$APP_SLUG" - # kubectl create secret docker-registry kotsadm-dockerhub --docker-server index.docker.io --docker-username "${{ secrets.E2E_DOCKERHUB_USERNAME }}" --docker-password "${{ secrets.E2E_DOCKERHUB_PASSWORD }}" --namespace "$APP_SLUG" - - # - name: test kots install with version label - # run: | - # set +e - # echo ${{ secrets.APP_VERSION_LABEL_LICENSE }} | base64 -d > license.yaml - # ./bin/kots \ - # install "$APP_SLUG/automated" \ - # --license-file license.yaml \ - # --app-version-label "$APP_VERSION_LABEL" \ - # --no-port-forward \ - # --namespace "$APP_SLUG" \ - # --shared-password password \ - # --kotsadm-registry ttl.sh \ - # --kotsadm-namespace automated-${{ github.run_id }} \ - # --kotsadm-tag 24h - - # EXIT_CODE=$? - # if [ $EXIT_CODE -ne 0 ]; then - # echo "------pods:" - # kubectl -n "$APP_SLUG" get pods - # echo "------kotsadm logs" - # kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" - # exit $EXIT_CODE - # fi - - # COUNTER=1 - # while [ "$(./bin/kots get apps --namespace "$APP_SLUG" | awk 'NR>1{print $3}')" != "$APP_VERSION_LABEL" ]; do - # ((COUNTER += 1)) - # if [ $COUNTER -gt 120 ]; then - # echo "Timed out waiting for app to be installed with correct version label: $APP_VERSION_LABEL" - # ./bin/kots get apps --namespace "$APP_SLUG" - # echo "kotsadm logs:" - # kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" - # exit 1 - # fi - # sleep 1 - # done - - # printf "App is installed successfully with the correct version label: %s\n\n" "$APP_VERSION_LABEL" - # ./bin/kots get apps --namespace "$APP_SLUG" - - # # test setting DockerHub credentials - - # set +e - - # # TODO: deploy and check secrets are actually created and images are pulled - # ./bin/kots docker ensure-secret --dockerhub-username replicatedtests --dockerhub-password ${{ secrets.DOCKERHUB_RATELIMIT_PASSWORD }} -n "$APP_SLUG" - # ./bin/kots download -n "$APP_SLUG" --slug "$APP_SLUG" - # if grep "${APP_SLUG}-kotsadm-dockerhub" -w "./${APP_SLUG}/overlays/midstream/secret.yaml"; then - # echo "Found DockerHub secret in ${APP_SLUG} latest version" - # else - # echo "No DockerHub secret found in appication namespace" - # exit 1 - # fi - - # - name: remove the app - # run: | - # set +e - # ./bin/kots remove "$APP_SLUG" --namespace "$APP_SLUG" --force - - # - name: test kots install without version label - # run: | - # set +e - # echo ${{ secrets.APP_VERSION_LABEL_LICENSE }} | base64 -d > license.yaml - # ./bin/kots \ - # install "$APP_SLUG/automated" \ - # --license-file license.yaml \ - # --no-port-forward \ - # --namespace "$APP_SLUG" \ - # --shared-password password \ - # --kotsadm-registry ttl.sh \ - # --kotsadm-namespace automated-${{ github.run_id }} \ - # --kotsadm-tag 24h - - # EXIT_CODE=$? - # if [ $EXIT_CODE -ne 0 ]; then - # echo "------pods:" - # kubectl -n "$APP_SLUG" get pods - # echo "------kotsadm logs" - # kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" - # exit $EXIT_CODE - # fi - - # COUNTER=1 - # while [ "$(./bin/kots get apps --namespace "$APP_SLUG" | awk 'NR>1{print $3}')" != "$LATEST_APP_VERSION_LABEL" ]; do - # ((COUNTER += 1)) - # if [ $COUNTER -gt 120 ]; then - # echo "Timed out waiting for app to be installed with latest version label: $LATEST_APP_VERSION_LABEL" - # ./bin/kots get apps --namespace "$APP_SLUG" - # echo "kotsadm logs:" - # kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" - # exit 1 - # fi - # sleep 1 - # done - - # printf "App is installed successfully with the correct version label: %s\n\n" "$LATEST_APP_VERSION_LABEL" - # ./bin/kots get apps --namespace "$APP_SLUG" - - # - name: Generate support bundle on failure - # if: failure() - # uses: ./.github/actions/generate-support-bundle - # with: - # kots-namespace: "$APP_SLUG" - # aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' - # aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' - - # - name: Remove Cluster - # id: remove-cluster - # uses: replicatedhq/replicated-actions/remove-cluster@v1 - # if: ${{ always() && steps.create-cluster.outputs.cluster-id != '' }} - # continue-on-error: true - # with: - # api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} - # cluster-id: ${{ steps.create-cluster.outputs.cluster-id }} - - - # validate-helm-install-order: - # runs-on: ubuntu-20.04 - # needs: [ enable-tests, can-run-ci, cmx-versions, build-kots, build-kotsadm, build-e2e, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] - # strategy: - # fail-fast: false - # matrix: - # cluster: [ - # {distribution: kind, version: v1.28.0} - # ] - # env: - # APP_SLUG: helm-install-order - # steps: - # - name: Checkout - # uses: actions/checkout@v4 - - # - name: Create Cluster - # id: create-cluster - # uses: replicatedhq/replicated-actions/create-cluster@v1 - # with: - # api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} - # kubernetes-distribution: ${{ matrix.cluster.distribution }} - # kubernetes-version: ${{ matrix.cluster.version }} - # cluster-name: automated-kots-${{ github.run_id }}-${{ matrix.cluster.distribution }}-${{ matrix.cluster.version }} - # timeout-minutes: '120' - # ttl: 2h - # instance-type: ${{ matrix.cluster.distribution == 'gke' && 'n2-standard-4' || '' }} - # export-kubeconfig: true - - # - name: download kots binary - # uses: actions/download-artifact@v4 - # with: - # name: kots - # path: bin/ - - # - run: chmod +x bin/kots - - # - name: create namespace and dockerhub secret - # run: | - # kubectl create ns "$APP_SLUG" - # kubectl create secret docker-registry kotsadm-dockerhub --docker-server index.docker.io --docker-username "${{ secrets.E2E_DOCKERHUB_USERNAME }}" --docker-password "${{ secrets.E2E_DOCKERHUB_PASSWORD }}" --namespace "$APP_SLUG" - - # - name: run the test - # run: | - # set +e - # echo ${{ secrets.HELM_INSTALL_ORDER_LICENSE }} | base64 -d > license.yaml - # ./bin/kots \ - # install "$APP_SLUG/automated" \ - # --license-file license.yaml \ - # --no-port-forward \ - # --namespace "$APP_SLUG" \ - # --shared-password password \ - # --kotsadm-registry ttl.sh \ - # --kotsadm-namespace automated-${{ github.run_id }} \ - # --skip-preflights \ - # --kotsadm-tag 24h - - # EXIT_CODE=$? - # if [ $EXIT_CODE -ne 0 ]; then - # echo "------pods:" - # kubectl -n "$APP_SLUG" get pods - # echo "------kotsadm logs" - # kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" - # exit $EXIT_CODE - # fi - - # COUNTER=1 - # while [ "$(./bin/kots get apps --namespace "$APP_SLUG" | awk 'NR>1{print $2}')" != "ready" ]; do - # ((COUNTER += 1)) - # if [ $COUNTER -gt 120 ]; then - # echo "Timed out waiting for app to be ready" - # ./bin/kots get apps --namespace "$APP_SLUG" - # echo "kotsadm logs:" - # kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" - # exit 1 - # fi - # sleep 1 - # done - - # printf "App is installed successfully and is ready\n\n" - # ./bin/kots get apps --namespace "$APP_SLUG" - - # - name: Generate support bundle on failure - # if: failure() - # uses: ./.github/actions/generate-support-bundle - # with: - # kots-namespace: "$APP_SLUG" - # aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' - # aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' - - # - name: Remove Cluster - # id: remove-cluster - # uses: replicatedhq/replicated-actions/remove-cluster@v1 - # if: ${{ always() && steps.create-cluster.outputs.cluster-id != '' }} - # continue-on-error: true - # with: - # api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} - # cluster-id: ${{ steps.create-cluster.outputs.cluster-id }} - - - # validate-no-redeploy-on-restart: - # runs-on: ubuntu-20.04 - # needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] - # strategy: - # fail-fast: false - # matrix: - # cluster: [ - # {distribution: kind, version: v1.28.0} - # ] - # env: - # APP_SLUG: no-redeploy-on-restart - # steps: - # - name: Checkout - # uses: actions/checkout@v4 - - # - name: Create Cluster - # id: create-cluster - # uses: replicatedhq/replicated-actions/create-cluster@v1 - # with: - # api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} - # kubernetes-distribution: ${{ matrix.cluster.distribution }} - # kubernetes-version: ${{ matrix.cluster.version }} - # cluster-name: automated-kots-${{ github.run_id }}-${{ matrix.cluster.distribution }}-${{ matrix.cluster.version }} - # timeout-minutes: '120' - # ttl: 2h - # export-kubeconfig: true - - # - name: download kots binary - # uses: actions/download-artifact@v4 - # with: - # name: kots - # path: bin/ - - # - run: chmod +x bin/kots - - # - name: create namespace and dockerhub secret - # run: | - # kubectl create ns "$APP_SLUG" - # kubectl create secret docker-registry kotsadm-dockerhub --docker-server index.docker.io --docker-username "${{ secrets.E2E_DOCKERHUB_USERNAME }}" --docker-password "${{ secrets.E2E_DOCKERHUB_PASSWORD }}" --namespace "$APP_SLUG" - - # - name: run the test - # run: | - # set +e - # echo ${{ secrets.NO_REDEPLOY_ON_RESTART_LICENSE }} | base64 -d > license.yaml - # ./bin/kots \ - # install "$APP_SLUG/automated" \ - # --license-file license.yaml \ - # --no-port-forward \ - # --namespace "$APP_SLUG" \ - # --shared-password password \ - # --kotsadm-registry ttl.sh \ - # --kotsadm-namespace automated-${{ github.run_id }} \ - # --skip-preflights \ - # --kotsadm-tag 24h - - # EXIT_CODE=$? - # if [ $EXIT_CODE -ne 0 ]; then - # echo "------pods:" - # kubectl -n "$APP_SLUG" get pods - # echo "------kotsadm logs" - # kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" - # exit $EXIT_CODE - # fi - - # # wait for application job to be created - # COUNTER=1 - # while [ "$(kubectl get jobs -l app=example,component=job -n "$APP_SLUG" --ignore-not-found | awk 'NR>1' | wc -l)" == "0" ]; do - # ((COUNTER += 1)) - # if [ $COUNTER -gt 60 ]; then - # echo "Timed out waiting for job to be created" - # exit 1 - # fi - # sleep 1 - # done - - # # delete the application job and restart the admin console - # kubectl delete jobs -n "$APP_SLUG" --all - # kubectl delete pods -l app=kotsadm -n "$APP_SLUG" - - # # wait for old kotsadm pod to terminate - # COUNTER=1 - # while [ "$(kubectl get pods -l app=kotsadm -n "$APP_SLUG" | awk 'NR>1' | wc -l)" != "1" ]; do - # ((COUNTER += 1)) - # if [ $COUNTER -gt 60 ]; then - # echo "More than 1 kotsadm pod found" - # exit 1 - # fi - # sleep 1 - # done - - # # wait for new kotsadm pod to become ready - # kubectl wait --for=condition=ready pod -l app=kotsadm -n "$APP_SLUG" --timeout=60s - - # # delay in case the app takes a bit to be deployed - # sleep 20 - - # # validate that the application wasn't re-deployed and the job wasn't re-created - # if [ "$(kubectl get jobs -l app=example,component=job -n "$APP_SLUG" --ignore-not-found | awk 'NR>1' | wc -l)" != "0" ]; then - # echo "App should not be re-deployed after restart" - # exit 1 - # fi - - # printf "Success. App was not re-deployed after restart\n\n" - - # - name: Generate support bundle on failure - # if: failure() - # uses: ./.github/actions/generate-support-bundle - # with: - # kots-namespace: "$APP_SLUG" - # aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' - # aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' - - # - name: Remove Cluster - # id: remove-cluster - # uses: replicatedhq/replicated-actions/remove-cluster@v1 - # if: ${{ always() && steps.create-cluster.outputs.cluster-id != '' }} - # continue-on-error: true - # with: - # api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} - # cluster-id: ${{ steps.create-cluster.outputs.cluster-id }} - - - # validate-kubernetes-installer-preflight: - # runs-on: ubuntu-20.04 - # needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] - # strategy: - # fail-fast: false - # matrix: - # cluster: [ - # {distribution: kind, version: v1.28.0} - # ] - # env: - # APP_SLUG: kubernetes-installer-preflight - # steps: - # - uses: actions/checkout@v4 - - # - name: Create Cluster - # id: create-cluster - # uses: replicatedhq/replicated-actions/create-cluster@v1 - # with: - # api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} - # kubernetes-distribution: ${{ matrix.cluster.distribution }} - # kubernetes-version: ${{ matrix.cluster.version }} - # cluster-name: automated-kots-${{ github.run_id }}-${{ matrix.cluster.distribution }}-${{ matrix.cluster.version }} - # timeout-minutes: '120' - # ttl: 2h - # export-kubeconfig: true - - # - name: download kots binary - # uses: actions/download-artifact@v4 - # with: - # name: kots - # path: bin/ - - # - run: chmod +x bin/kots - - # - name: create namespace and dockerhub secret - # run: | - # kubectl create ns "$APP_SLUG" - # kubectl create secret docker-registry kotsadm-dockerhub --docker-server index.docker.io --docker-username "${{ secrets.E2E_DOCKERHUB_USERNAME }}" --docker-password "${{ secrets.E2E_DOCKERHUB_PASSWORD }}" --namespace "$APP_SLUG" - - # - name: run the test - # run: | - # set +e - - # echo ${{ secrets.KUBERNETES_INSTALLER_PREFLIGHT_LICENSE }} | base64 -d > license.yaml - - # # Fake kurl installation using the crd and installer spec from the application manifests: - - # # Pull application manifests - # ./bin/kots pull "$APP_SLUG/automated" --license-file license.yaml --shared-password password - - # # Apply installer crd - # kubectl apply -f "$APP_SLUG/upstream/installer-crd.yaml" - - # # Wait for crd to be created - # kubectl wait --for condition=established --timeout=60s crd/installers.cluster.kurl.sh - - # # Seems that the above does not always guarantee the crd exists? So just in case... - # sleep 10 - - # # Apply installer - # kubectl apply -f "$APP_SLUG/upstream/installer.yaml" - - # # Create kurl-config configmap in kube-system - # kubectl create cm kurl-config -n kube-system --from-literal=installer_id=7cc8094 - - # ./bin/kots \ - # install "$APP_SLUG/automated" \ - # --license-file license.yaml \ - # --no-port-forward \ - # --namespace "$APP_SLUG" \ - # --shared-password password \ - # --kotsadm-registry ttl.sh \ - # --kotsadm-namespace automated-${{ github.run_id }} \ - # --kotsadm-tag 24h - - # EXIT_CODE=$? - # if [ $EXIT_CODE -ne 0 ]; then - # echo "------pods:" - # kubectl -n "$APP_SLUG" get pods - # echo "------kotsadm logs" - # kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" - # exit $EXIT_CODE - # fi - - # # validate that preflight checks ran - # JSON_PATH="jsonpath={.data['automated-install-slug-$APP_SLUG']}" - # if [ "$(kubectl get cm kotsadm-tasks -n "$APP_SLUG" -o "$JSON_PATH" | grep -c pending_preflight)" != "1" ]; then - # echo "Preflight checks did not run" - # exit 1 - # fi - - # COUNTER=1 - # while [ "$(./bin/kots get apps --namespace "$APP_SLUG" | awk 'NR>1{print $2}')" != "ready" ]; do - # ((COUNTER += 1)) - # if [ $COUNTER -gt 120 ]; then - # echo "Timed out waiting for app to be ready" - # ./bin/kots get apps --namespace "$APP_SLUG" - # echo "kotsadm logs:" - # kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" - # exit 1 - # fi - # sleep 1 - # done - - # # try get apps without namespace (using kubeconfig) - # # validate that output is the same as above - # mkdir -p /tmp/.kube - # sudo cp "$KUBECONFIG" /tmp/.kube/config - # sudo chmod -R 777 /tmp/.kube - # export KUBECONFIG=/tmp/.kube/config - # kubectl config set-context --current --namespace="$APP_SLUG" - # if [ "$(./bin/kots get apps | awk 'NR>1{print $2}')" != "ready" ]; then - # echo "kots get apps output is not the same as above" - # exit 1 - # fi - - # printf "App is installed successfully and is ready\n\n" - # ./bin/kots get apps --namespace "$APP_SLUG" - - # - name: Generate support bundle on failure - # if: failure() - # uses: ./.github/actions/generate-support-bundle - # with: - # kots-namespace: "$APP_SLUG" - # aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' - # aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' - - # - name: Remove Cluster - # id: remove-cluster - # uses: replicatedhq/replicated-actions/remove-cluster@v1 - # if: ${{ always() && steps.create-cluster.outputs.cluster-id != '' }} - # continue-on-error: true - # with: - # api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} - # cluster-id: ${{ steps.create-cluster.outputs.cluster-id }} - - - # validate-kots-push-images-anonymous: - # runs-on: ubuntu-20.04 - # needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] - # steps: - # - name: Checkout - # uses: actions/checkout@v4 - - # - name: download kots binary - # uses: actions/download-artifact@v4 - # with: - # name: kots - # path: bin/ - - # - run: chmod +x bin/kots - - # - name: run kots admin-console push-images - # run: | - # set +e - # ./bin/kots admin-console push-images ./hack/tests/small.airgap ttl.sh/automated-${{ github.run_id }} - - - # validate-kots-admin-console-generate-manifests: - # runs-on: ubuntu-20.04 - # needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] - # strategy: - # fail-fast: false - # matrix: - # cluster: [ - # {distribution: kind, version: v1.28.0} - # ] - # env: - # APP_SLUG: minimal-rbac - # BASE_KOTS_VERSION: v1.72.0 - # NAMESPACE: generate-manifests - # steps: - # - name: Checkout - # uses: actions/checkout@v4 - - # - name: download kots binary - # uses: actions/download-artifact@v4 - # with: - # name: kots - # path: bin/ - - # - run: chmod +x bin/kots - - # - name: run kots admin-console generate-manifests without k8s context - # run: | - # set +e - # ./bin/kots admin-console generate-manifests -n "$NAMESPACE" --shared-password password - - # - name: validate that ./admin-console exists and is not empty - # run: | - # set +e - # if [ ! -d ./admin-console ]; then - # echo "admin-console directory does not exist" - # exit 1 - # fi - # if [ -z "$(ls -A ./admin-console)" ]; then - # echo "admin-console directory is empty" - # exit 1 - # fi - - # - name: remove admin-console directory - # run: rm -rf ./admin-console - - # - name: Create Cluster - # id: create-cluster - # uses: replicatedhq/replicated-actions/create-cluster@v1 - # with: - # api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} - # kubernetes-distribution: ${{ matrix.cluster.distribution }} - # kubernetes-version: ${{ matrix.cluster.version }} - # cluster-name: automated-kots-${{ github.run_id }}-${{ matrix.cluster.distribution }}-${{ matrix.cluster.version }} - # timeout-minutes: '120' - # ttl: 2h - # export-kubeconfig: true - - # - name: download base kots version - # run: | - # curl -LO "https://github.com/replicatedhq/kots/releases/download/$BASE_KOTS_VERSION/kots_linux_amd64.tar.gz" \ - # && tar zxvf kots_linux_amd64.tar.gz \ - # && mv kots "kots-$BASE_KOTS_VERSION" - - # - name: create namespace - # run: | - # set +e - # kubectl create namespace "$NAMESPACE" - - # - name: run kots admin-console generate-manifests using base kots version with k8s context - # run: | - # set +e - # "./kots-$BASE_KOTS_VERSION" admin-console generate-manifests -n "$NAMESPACE" --shared-password password - - # - name: apply the generated manifests - # run: | - # set +e - # kubectl apply -f ./admin-console -n "$NAMESPACE" - - # - name: wait for the kotsadm-minio-0 pod to be created - # run: | - # set +e - # COUNTER=1 - # while ! kubectl get pods -n "$NAMESPACE" | grep -q kotsadm-minio-0; do - # ((COUNTER += 1)) - # if [ $COUNTER -gt 30 ]; then - # echo "timed out waiting for kotsadm-minio-0 pod to be created" - # exit 1 - # fi - # sleep 1 - # done - - # - name: wait for kotsadm-minio-0 pod to be ready - # run: | - # set +e - # kubectl wait --for=condition=ready --timeout=180s pod/kotsadm-minio-0 -n "$NAMESPACE" - - # - name: wait for the kotsadm deployment to be ready - # run: | - # set +e - # kubectl wait --for=condition=available --timeout=300s deployment/kotsadm -n "$NAMESPACE" - - # - name: run kots admin-console generate-manifests using new kots version with k8s context - # run: | - # set +e - # ./bin/kots admin-console generate-manifests -n "$NAMESPACE" \ - # --shared-password password \ - # --kotsadm-registry ttl.sh \ - # --kotsadm-namespace automated-${{ github.run_id }} \ - # --kotsadm-tag 24h - - # - name: validate that ./admin-console/minio-statefulset.yaml has initContainers since a migration is needed - # run: | - # set +e - # if ! grep -qE 'initContainers' ./admin-console/minio-statefulset.yaml; then - # echo "admin-console/minio-statefulset.yaml does not have initContainers" - # exit 1 - # fi - - # - name: apply the generated manifests - # run: | - # set +e - # kubectl apply -f ./admin-console -n "$NAMESPACE" - - # - name: wait for kotsadm-minio-0 pod to be ready - # run: | - # set +e - # sleep 10 - # kubectl wait --for=condition=ready --timeout=180s pod/kotsadm-minio-0 -n "$NAMESPACE" - - # - name: wait for the kotsadm deployment to be ready - # run: | - # set +e - # kubectl wait --for=condition=available --timeout=300s deployment/kotsadm -n "$NAMESPACE" - - # - name: run kots admin-console generate-manifests using new kots version with k8s context - # run: | - # set +e - # ./bin/kots admin-console generate-manifests -n "$NAMESPACE" \ - # --shared-password password \ - # --kotsadm-registry ttl.sh \ - # --kotsadm-namespace automated-${{ github.run_id }} \ - # --kotsadm-tag 24h - - # - name: validate that ./admin-console/minio-statefulset.yaml does not have initContainers since a migration is not needed - # run: | - # set +e - # if grep -qE 'initContainers' ./admin-console/minio-statefulset.yaml; then - # echo "admin-console/minio-statefulset.yaml has initContainers" - # exit 1 - # fi - - # - name: apply the generated manifests - # run: | - # set +e - # kubectl apply -f ./admin-console -n "$NAMESPACE" - - # - name: wait for kotsadm-minio-0 pod to be ready - # run: | - # set +e - # sleep 10 - # kubectl wait --for=condition=ready --timeout=180s pod/kotsadm-minio-0 -n "$NAMESPACE" - - # - name: wait for the kotsadm deployment to be ready - # run: | - # set +e - # kubectl wait --for=condition=available --timeout=300s deployment/kotsadm -n "$NAMESPACE" - - # - name: print pods and logs on failure - # if: failure() - # run: | - # echo "------pods:" - # kubectl -n "$NAMESPACE" get pods - # echo "------kotsadm logs" - # kubectl logs -l app=kotsadm --tail=100 --namespace "$NAMESPACE" - # echo "------kotsadm-minio logs" - # kubectl logs -l app=kotsadm-minio --tail=100 --namespace "$NAMESPACE" - - # - name: Generate support bundle on failure - # if: failure() - # uses: ./.github/actions/generate-support-bundle - # with: - # kots-namespace: automated-${{ github.run_id }} - # aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' - # aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' - - # - name: Remove Cluster - # id: remove-cluster - # uses: replicatedhq/replicated-actions/remove-cluster@v1 - # if: ${{ always() && steps.create-cluster.outputs.cluster-id != '' }} - # continue-on-error: true - # with: - # api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} - # cluster-id: ${{ steps.create-cluster.outputs.cluster-id }} - - - # validate-min-kots-version: - # runs-on: ubuntu-20.04 - # needs: [ enable-tests, can-run-ci, build-e2e, build-kots, build-kotsadm, build-kurl-proxy, build-migrations, push-minio, push-rqlite, generate-tag ] - # env: - # APP_SLUG: min-kots-version - # strategy: - # fail-fast: false - # matrix: - # cluster: [ - # {distribution: kind, version: v1.28.0} - # ] - # steps: - # - name: Checkout - # uses: actions/checkout@v4 - # - name: download e2e deps - # uses: actions/download-artifact@v4 - # with: - # name: e2e - # path: e2e/bin/ - # - name: download kots binary - # uses: actions/download-artifact@v4 - # with: - # name: kots - # path: bin/ - # - run: | - # docker load -i e2e/bin/e2e-deps.tar - # chmod +x e2e/bin/* - # chmod +x bin/* - # cp ./bin/kots /usr/local/bin/kubectl-kots - # sudo apt-get update -y && sudo apt-get install jq -y - # - uses: ./.github/actions/kots-e2e - # id: kots-e2e - # with: - # test-focus: 'Min KOTS Version' - # kots-namespace: 'min-kots-version' - # k8s-distribution: ${{ matrix.cluster.distribution }} - # k8s-version: ${{ matrix.cluster.version }} - # testim-access-token: '${{ secrets.TESTIM_ACCESS_TOKEN }}' - # testim-branch: ${{ github.head_ref == 'main' && 'master' || github.head_ref }} - # kotsadm-image-registry: ttl.sh - # aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' - # aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' - # replicated-api-token: '${{ secrets.C11Y_MATRIX_TOKEN }}' - # kots-dockerhub-username: '${{ secrets.E2E_DOCKERHUB_USERNAME }}' - # kots-dockerhub-password: '${{ secrets.E2E_DOCKERHUB_PASSWORD }}' - # k8s-cluster-skip-teardown: true - - # - name: validate that kots install fails early - # run: | - # set +e - - # result=$(kubectl kots install "$APP_SLUG/automated" --no-port-forward --namespace "$APP_SLUG" --shared-password password 2>&1 >/dev/null) - # echo "$result" - - # if [[ "$result" == *"requires"* ]] && [[ "$result" == *"10000.0.0"* ]]; then - # exit 0 - # else - # exit 1 - # fi - - # - name: Remove Cluster - # id: remove-cluster - # uses: replicatedhq/replicated-actions/remove-cluster@v1 - # if: ${{ always() && steps.kots-e2e.outputs.cluster-id != '' }} - # continue-on-error: true - # with: - # api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} - # cluster-id: ${{ steps.kots-e2e.outputs.cluster-id }} - - - # validate-target-kots-version: - # runs-on: ubuntu-20.04 - # needs: [ enable-tests, can-run-ci, build-e2e, build-kots, build-kotsadm, build-kurl-proxy, build-migrations, push-minio, push-rqlite, generate-tag ] - # strategy: - # fail-fast: false - # matrix: - # cluster: [ - # {distribution: kind, version: v1.28.0} - # ] - # steps: - # - name: Checkout - # uses: actions/checkout@v4 - # - name: download e2e deps - # uses: actions/download-artifact@v4 - # with: - # name: e2e - # path: e2e/bin/ - # - name: download kots binary - # uses: actions/download-artifact@v4 - # with: - # name: kots - # path: bin/ - # - run: | - # docker load -i e2e/bin/e2e-deps.tar - # chmod +x e2e/bin/* - # chmod +x bin/* - # cp ./bin/kots /usr/local/bin/kubectl-kots - # sudo apt-get update -y && sudo apt-get install jq -y - # - uses: ./.github/actions/kots-e2e - # id: kots-e2e - # with: - # test-focus: 'Target KOTS Version' - # kots-namespace: 'target-kots-version' - # k8s-distribution: ${{ matrix.cluster.distribution }} - # k8s-version: ${{ matrix.cluster.version }} - # testim-access-token: '${{ secrets.TESTIM_ACCESS_TOKEN }}' - # testim-branch: ${{ github.head_ref == 'main' && 'master' || github.head_ref }} - # aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' - # aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' - # replicated-api-token: '${{ secrets.C11Y_MATRIX_TOKEN }}' - # kots-dockerhub-username: '${{ secrets.E2E_DOCKERHUB_USERNAME }}' - # kots-dockerhub-password: '${{ secrets.E2E_DOCKERHUB_PASSWORD }}' - # k8s-cluster-skip-teardown: true - - # - name: validate that kots install fails early - # run: | - # set +e - - # result=$(kubectl kots install target-kots-version/automated --no-port-forward --namespace target-kots-version --shared-password password 2>&1 >/dev/null) - # echo "$result" - - # if [[ "$result" == *"requires"* ]] && [[ "$result" == *"1.0.0"* ]]; then - # exit 0 - # else - # exit 1 - # fi - - # - name: Remove Cluster - # id: remove-cluster - # uses: replicatedhq/replicated-actions/remove-cluster@v1 - # if: ${{ always() && steps.kots-e2e.outputs.cluster-id != '' }} - # continue-on-error: true - # with: - # api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} - # cluster-id: ${{ steps.kots-e2e.outputs.cluster-id }} - - - # validate-range-kots-version: - # runs-on: ubuntu-20.04 - # needs: [ enable-tests, can-run-ci, build-e2e, build-kots, build-kotsadm, build-kurl-proxy, build-migrations, push-minio, push-rqlite, generate-tag ] - # strategy: - # fail-fast: false - # matrix: - # cluster: [ - # {distribution: kind, version: v1.28.0} - # ] - # steps: - # - name: Checkout - # uses: actions/checkout@v4 - # - name: download e2e deps - # uses: actions/download-artifact@v4 - # with: - # name: e2e - # path: e2e/bin/ - # - name: download kots binary - # uses: actions/download-artifact@v4 - # with: - # name: kots - # path: bin/ - # - run: | - # docker load -i e2e/bin/e2e-deps.tar - # chmod +x e2e/bin/* - # chmod +x bin/* - # cp ./bin/kots /usr/local/bin/kubectl-kots - # sudo apt-get update -y && sudo apt-get install jq -y - # - uses: ./.github/actions/kots-e2e - # id: kots-e2e - # with: - # test-focus: 'Range KOTS Version' - # kots-namespace: 'range-kots-version' - # k8s-distribution: ${{ matrix.cluster.distribution }} - # k8s-version: ${{ matrix.cluster.version }} - # testim-access-token: '${{ secrets.TESTIM_ACCESS_TOKEN }}' - # testim-branch: ${{ github.head_ref == 'main' && 'master' || github.head_ref }} - # aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' - # aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' - # replicated-api-token: '${{ secrets.C11Y_MATRIX_TOKEN }}' - # kots-dockerhub-username: '${{ secrets.E2E_DOCKERHUB_USERNAME }}' - # kots-dockerhub-password: '${{ secrets.E2E_DOCKERHUB_PASSWORD }}' - # k8s-cluster-skip-teardown: true - - # - name: validate that kots install fails early - # run: | - # set +e - - # result=$(kubectl kots install range-kots-version/automated --no-port-forward --namespace range-kots-version --shared-password password 2>&1 >/dev/null) - # echo "$result" - - # if [[ "$result" == *"requires"* ]] && [[ "$result" == *"11000.0.0"* ]]; then - # exit 0 - # else - # exit 1 - # fi - - # - name: Remove Cluster - # id: remove-cluster - # uses: replicatedhq/replicated-actions/remove-cluster@v1 - # if: ${{ always() && steps.kots-e2e.outputs.cluster-id != '' }} - # continue-on-error: true - # with: - # api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} - # cluster-id: ${{ steps.kots-e2e.outputs.cluster-id }} - - - # validate-kots-upgrade: - # runs-on: ubuntu-20.04 - # needs: [ enable-tests, can-run-ci, cmx-versions, build-kots, build-kotsadm, build-e2e, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] - # strategy: - # fail-fast: false - # matrix: - # cluster: ${{ fromJson(needs.cmx-versions.outputs.versions-to-test) }} - # continue-on-error: ${{ matrix.cluster.stage != 'stable' }} - # env: - # APP_SLUG: postgres-to-rqlite - # BASE_KOTS_VERSION: v1.57.0 - # steps: - # - name: Checkout - # uses: actions/checkout@v4 - - # - name: Create Cluster - # id: create-cluster - # uses: replicatedhq/replicated-actions/create-cluster@v1 - # with: - # api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} - # kubernetes-distribution: ${{ matrix.cluster.distribution }} - # kubernetes-version: ${{ matrix.cluster.version }} - # cluster-name: automated-kots-${{ github.run_id }}-${{ matrix.cluster.distribution }}-${{ matrix.cluster.version }} - # timeout-minutes: '120' - # ttl: 2h - # instance-type: ${{ matrix.cluster.distribution == 'gke' && 'n2-standard-4' || '' }} - # export-kubeconfig: true - - # - name: download base kots version - # run: | - # curl -LO "https://github.com/replicatedhq/kots/releases/download/$BASE_KOTS_VERSION/kots_linux_amd64.tar.gz" \ - # && tar zxvf kots_linux_amd64.tar.gz \ - # && mv kots "kots-$BASE_KOTS_VERSION" - - # - name: download kots binary - # uses: actions/download-artifact@v4 - # with: - # name: kots - # path: bin/ - - # - run: chmod +x bin/kots - - # - name: create namespace and dockerhub secret - # run: | - # kubectl create ns "$APP_SLUG" - # kubectl create secret docker-registry kotsadm-dockerhub --docker-server index.docker.io --docker-username "${{ secrets.E2E_DOCKERHUB_USERNAME }}" --docker-password "${{ secrets.E2E_DOCKERHUB_PASSWORD }}" --namespace "$APP_SLUG" - - # - name: run the test - # run: | - # set +e - # echo ${{ secrets.POSTGRES_TO_RQLITE_LICENSE }} | base64 -d > license.yaml - - # # install using the base KOTS version - - # "./kots-$BASE_KOTS_VERSION" \ - # install "$APP_SLUG/automated" \ - # --license-file license.yaml \ - # --port-forward=false \ - # --namespace "$APP_SLUG" \ - # --shared-password password - - # EXIT_CODE=$? - # if [ $EXIT_CODE -ne 0 ]; then - # echo "------pods:" - # kubectl -n "$APP_SLUG" get pods - # echo "------kotsadm logs" - # kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" - # exit $EXIT_CODE - # fi - - # COUNTER=1 - # while [ "$("./kots-$BASE_KOTS_VERSION" get apps --namespace "$APP_SLUG" | awk 'NR>1{print $2}')" != "ready" ]; do - # ((COUNTER += 1)) - # if [ $COUNTER -gt 120 ]; then - # echo "Timed out waiting for app to be ready" - # "./kots-$BASE_KOTS_VERSION" get apps --namespace "$APP_SLUG" - # echo "kotsadm logs:" - # kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" - # exit 1 - # fi - # sleep 1 - # done - - # # upgrade using the new KOTS version - - # ./bin/kots admin-console upgrade \ - # --namespace "$APP_SLUG" \ - # --kotsadm-registry ttl.sh \ - # --kotsadm-namespace automated-${{ github.run_id }} \ - # --kotsadm-tag 24h - - # # verify that the postgres to rqlite migration was successful - - # if ! kubectl logs -l app=kotsadm --namespace "$APP_SLUG" | grep -q "Migrated from Postgres to rqlite successfully"; then - # echo "Failed to find a successful migration log line" - # echo "kotsadm logs:" - # kubectl logs -l app=kotsadm --all-containers --namespace "$APP_SLUG" - # exit 1 - # fi - - # # verify that the minio migration happened - - # if [ -z "$(kubectl get statefulset kotsadm-minio -n "$APP_SLUG" -o jsonpath='{.spec.template.spec.initContainers}')" ]; then - # echo "Failed to find initContainers in the kotsadm-minio statefulset" - # echo "kotsadm-minio statefulset:" - # kubectl get statefulset kotsadm-minio -n "$APP_SLUG" -o yaml - # exit 1 - # fi - - # # make sure app is still installed and ready - - # if [ "$(./bin/kots get apps --namespace "$APP_SLUG" | awk 'NR>1{print $2}')" != "ready" ]; then - # echo "App is not ready after the upgrade" - # echo "kotsadm logs:" - # kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" - # exit 1 - # fi - - # printf "App is still installed and is ready after the migration\n\n" - # ./bin/kots get apps --namespace "$APP_SLUG" - - # - name: Generate support bundle on failure - # if: failure() - # uses: ./.github/actions/generate-support-bundle - # with: - # kots-namespace: "$APP_SLUG" - # aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' - # aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' - - # - name: Remove Cluster - # id: remove-cluster - # uses: replicatedhq/replicated-actions/remove-cluster@v1 - # if: ${{ always() && steps.create-cluster.outputs.cluster-id != '' }} - # continue-on-error: true - # with: - # api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} - # cluster-id: ${{ steps.create-cluster.outputs.cluster-id }} - - - # validate-kots-helm-release-secret-migration: - # runs-on: ubuntu-20.04 - # needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] - # strategy: - # fail-fast: false - # matrix: - # cluster: [ - # {distribution: kind, version: v1.28.0} - # ] - # env: - # APP_SLUG: helm-release-secret-migration - # RELEASE_NAME: helm-release-chart - # RELEASE_NAMESPACE: helm-release - # BASE_KOTS_VERSION: v1.94.0 - # steps: - # - name: Checkout - # uses: actions/checkout@v4 - - # - name: Create Cluster - # id: create-cluster - # uses: replicatedhq/replicated-actions/create-cluster@v1 - # with: - # api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} - # kubernetes-distribution: ${{ matrix.cluster.distribution }} - # kubernetes-version: ${{ matrix.cluster.version }} - # cluster-name: automated-kots-${{ github.run_id }}-${{ matrix.cluster.distribution }}-${{ matrix.cluster.version }} - # timeout-minutes: '120' - # ttl: 2h - # export-kubeconfig: true - - # - name: download base kots version - # run: | - # curl -LO "https://github.com/replicatedhq/kots/releases/download/$BASE_KOTS_VERSION/kots_linux_amd64.tar.gz" \ - # && tar zxvf kots_linux_amd64.tar.gz \ - # && mv kots "kots-$BASE_KOTS_VERSION" - - # - name: download kots binary - # uses: actions/download-artifact@v4 - # with: - # name: kots - # path: bin/ - - # - run: chmod +x bin/kots - - # - name: create namespace and dockerhub secret - # run: | - # kubectl create ns "$APP_SLUG" - # kubectl create secret docker-registry kotsadm-dockerhub --docker-server index.docker.io --docker-username "${{ secrets.E2E_DOCKERHUB_USERNAME }}" --docker-password "${{ secrets.E2E_DOCKERHUB_PASSWORD }}" --namespace "$APP_SLUG" - - # - name: run the test - # run: | - # set +e - # echo ${{ secrets.HELM_RELEASE_SECRET_MIGRATION_LICENSE }} | base64 -d > license.yaml - - # # install using the base KOTS version - # "./kots-$BASE_KOTS_VERSION" \ - # install "$APP_SLUG/automated" \ - # --license-file license.yaml \ - # --no-port-forward=true \ - # --namespace "$APP_SLUG" \ - # --shared-password password - - # EXIT_CODE=$? - # if [ $EXIT_CODE -ne 0 ]; then - # echo "------pods:" - # kubectl -n "$APP_SLUG" get pods - # echo "------kotsadm logs" - # kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" - # exit $EXIT_CODE - # fi - - # COUNTER=1 - # while [ "$("./kots-$BASE_KOTS_VERSION" get apps --namespace "$APP_SLUG" | awk 'NR>1{print $2}')" != "ready" ]; do - # ((COUNTER += 1)) - # if [ $COUNTER -gt 120 ]; then - # echo "Timed out waiting for app to be ready" - # "./kots-$BASE_KOTS_VERSION" get apps --namespace "$APP_SLUG" - # echo "kotsadm logs:" - # kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" - # exit 1 - # fi - # sleep 1 - # done - - # # verify that the helm release secret is created in the kotsadm namespace - # releaseSecretName=$(kubectl get secret -n "$APP_SLUG" -l owner=helm,name="$RELEASE_NAME" -o jsonpath='{.items[*].metadata.name}') - # if [ -z "$releaseSecretName" ]; then - # echo "Failed to find the helm release secret in the $APP_SLUG namespace" - # echo "kotsadm logs:" - # kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" - # exit 1 - # fi - - # # if there are more than one helm release secrets, fail - # if [ "$(echo "$releaseSecretName" | wc -l)" -gt 1 ]; then - # echo "Found more than one helm release secret in the $APP_SLUG namespace" - # echo "kotsadm logs:" - # kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" - # exit 1 - # fi - - # # upgrade using the new KOTS version - # ./bin/kots admin-console upgrade \ - # --namespace "$APP_SLUG" \ - # --kotsadm-registry ttl.sh \ - # --kotsadm-namespace automated-${{ github.run_id }} \ - # --kotsadm-tag 24h - - # # make a config change and redeploy the app - # ./bin/kots set config "$APP_SLUG" create_new_sequence=true --deploy --namespace "$APP_SLUG" - - # # make sure app is still installed and ready - # if [ "$(./bin/kots get apps --namespace "$APP_SLUG" | awk 'NR>1{print $2}')" != "ready" ]; then - # echo "App is not ready after the upgrade" - # echo "kotsadm logs:" - # kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" - # exit 1 - # fi - - # COUNTER=1 - # while [ "$(helm ls -n "$RELEASE_NAMESPACE" | grep "$RELEASE_NAME" | awk 'NR>0{print $3}')" != "2" ]; do - # ((COUNTER += 1)) - # if [ $COUNTER -gt 120 ]; then - # echo "Timed out waiting for $RELEASE_NAME to be upgraded to revision 2" - # helm ls -n "$RELEASE_NAMESPACE" - # exit 1 - # fi - # sleep 1 - # done - - # # verify that the helm release secret is created in the helm relase namespace - # COUNT=1 - # releaseSecret="" - # while [ -z "$releaseSecret" ]; do - # ((COUNT += 1)) - # if [ $COUNT -gt 10 ]; then - # echo "Timed out waiting for the helm release secret to be migrated" - # echo "kotsadm logs:" - # kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" - # exit 1 - # fi - # sleep 1 - # releaseSecret=$(kubectl get secret "$releaseSecretName" -n "$RELEASE_NAMESPACE") - # done - - # # verify that the release secret in app namepspace is deleted - # oldReleaseSecret=$(kubectl get secret "$releaseSecretName" -n "$APP_SLUG") - # if [ -n "$oldReleaseSecret" ]; then - # echo "Found the helm release secret in the $APP_SLUG namespace" - # echo "$oldReleaseSecret" - # echo "kotsadm logs:" - # kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" - # exit 1 - # fi - - # # verify that there are two helm release secrets in the helm release namespace - # releaseSecretNames=$(kubectl get secret -n "$RELEASE_NAMESPACE" -l owner=helm,name="$RELEASE_NAME" -o jsonpath='{.items[*].metadata.name}') - # releaseSecretNamesCount=$(echo "$releaseSecretNames" | wc -w) - # if [ "$releaseSecretNamesCount" -ne 2 ]; then - # echo "Found $releaseSecretNamesCount helm release secrets in the $RELEASE_NAMESPACE namespace(Want: 2)" - # kubectl get secret -n "$RELEASE_NAMESPACE" -l owner=helm,name="$RELEASE_NAME" - # echo "kotsadm logs:" - # kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" - # exit 1 - # fi - - # printf "Helm release secret migration test passed\n\n" - # ./bin/kots get apps --namespace "$APP_SLUG" - - # - name: Generate support bundle on failure - # if: failure() - # uses: ./.github/actions/generate-support-bundle - # with: - # kots-namespace: "$APP_SLUG" - # aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' - # aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' - - # - name: Remove Cluster - # id: remove-cluster - # uses: replicatedhq/replicated-actions/remove-cluster@v1 - # if: ${{ always() && steps.create-cluster.outputs.cluster-id != '' }} - # continue-on-error: true - # with: - # api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} - # cluster-id: ${{ steps.create-cluster.outputs.cluster-id }} - - - # validate-multi-app-backup-and-restore: - # runs-on: ubuntu-20.04 - # needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-e2e, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] - # strategy: - # fail-fast: false - # matrix: - # cluster: [ - # {distribution: kind, version: v1.28.0} - # ] - # steps: - # - name: Checkout - # uses: actions/checkout@v4 - # - name: download e2e deps - # uses: actions/download-artifact@v4 - # with: - # name: e2e - # path: e2e/bin/ - # - run: docker load -i e2e/bin/e2e-deps.tar - # - run: chmod +x e2e/bin/* - # - name: download kots binary - # uses: actions/download-artifact@v4 - # with: - # name: kots - # path: bin/ - # - run: chmod +x bin/* - # - uses: ./.github/actions/kots-e2e - # with: - # test-focus: 'multi-app-backup-and-restore' - # kots-namespace: 'multi-app-backup-and-restore' - # k8s-distribution: ${{ matrix.cluster.distribution }} - # k8s-version: ${{ matrix.cluster.version }} - # testim-access-token: '${{ secrets.TESTIM_ACCESS_TOKEN }}' - # testim-branch: ${{ github.head_ref == 'main' && 'master' || github.head_ref }} - # aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' - # aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' - # replicated-api-token: '${{ secrets.C11Y_MATRIX_TOKEN }}' - # kots-dockerhub-username: '${{ secrets.E2E_DOCKERHUB_USERNAME }}' - # kots-dockerhub-password: '${{ secrets.E2E_DOCKERHUB_PASSWORD }}' - - - # validate-multi-app-install: - # runs-on: ubuntu-20.04 - # needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-e2e, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] - # strategy: - # fail-fast: false - # matrix: - # cluster: [ - # {distribution: kind, version: v1.28.0} - # ] - # steps: - # - name: Checkout - # uses: actions/checkout@v4 - # - name: download e2e deps - # uses: actions/download-artifact@v4 - # with: - # name: e2e - # path: e2e/bin/ - # - run: docker load -i e2e/bin/e2e-deps.tar - # - run: chmod +x e2e/bin/* - # - name: download kots binary - # uses: actions/download-artifact@v4 - # with: - # name: kots - # path: bin/ - # - run: chmod +x bin/* - # - uses: ./.github/actions/kots-e2e - # with: - # test-focus: 'multi-app-install' - # kots-namespace: 'multi-app-install' - # k8s-distribution: ${{ matrix.cluster.distribution }} - # k8s-version: ${{ matrix.cluster.version }} - # testim-access-token: '${{ secrets.TESTIM_ACCESS_TOKEN }}' - # testim-branch: ${{ github.head_ref == 'main' && 'master' || github.head_ref }} - # aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' - # aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' - # replicated-api-token: '${{ secrets.C11Y_MATRIX_TOKEN }}' - # kots-dockerhub-username: '${{ secrets.E2E_DOCKERHUB_USERNAME }}' - # kots-dockerhub-password: '${{ secrets.E2E_DOCKERHUB_PASSWORD }}' - - - # validate-airgap-smoke-test: - # runs-on: ubuntu-20.04 - # needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-e2e, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] - # strategy: - # fail-fast: false - # matrix: - # cluster: [ - # {distribution: kind, version: v1.28.0} - # ] - # steps: - # - name: Checkout - # uses: actions/checkout@v4 - # - name: download e2e deps - # uses: actions/download-artifact@v4 - # with: - # name: e2e - # path: e2e/bin/ - # - run: docker load -i e2e/bin/e2e-deps.tar - # - run: chmod +x e2e/bin/* - # - name: download kots binary - # uses: actions/download-artifact@v4 - # with: - # name: kots - # path: bin/ - # - run: chmod +x bin/* - # - uses: ./.github/actions/kots-e2e - # with: - # test-focus: 'airgap-smoke-test' - # kots-namespace: 'airgap-smoke-test' - # k8s-distribution: ${{ matrix.cluster.distribution }} - # k8s-version: ${{ matrix.cluster.version }} - # testim-access-token: '${{ secrets.TESTIM_ACCESS_TOKEN }}' - # testim-branch: ${{ github.head_ref == 'main' && 'master' || github.head_ref }} - # kots-airgap: true - # aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' - # aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' - # replicated-api-token: '${{ secrets.C11Y_MATRIX_TOKEN }}' - # kots-dockerhub-username: '${{ secrets.E2E_DOCKERHUB_USERNAME }}' - # kots-dockerhub-password: '${{ secrets.E2E_DOCKERHUB_PASSWORD }}' - - - # validate-remove-app: - # runs-on: ubuntu-20.04 - # needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] - # strategy: - # fail-fast: false - # matrix: - # cluster: [ - # {distribution: kind, version: v1.28.0, instance-type: r1.medium} - # ] - # env: - # APP_SLUG: remove-app - # steps: - # - name: Checkout - # uses: actions/checkout@v4 - - # - name: Create Cluster - # id: create-cluster - # uses: replicatedhq/replicated-actions/create-cluster@v1 - # with: - # api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} - # kubernetes-distribution: ${{ matrix.cluster.distribution }} - # kubernetes-version: ${{ matrix.cluster.version }} - # cluster-name: automated-kots-${{ github.run_id }}-${{ matrix.cluster.distribution }}-${{ matrix.cluster.version }} - # timeout-minutes: '120' - # ttl: 2h - # instance-type: ${{ matrix.cluster.instance-type }} - # export-kubeconfig: true - - # - name: download kots binary - # uses: actions/download-artifact@v4 - # with: - # name: kots - # path: bin/ - - # - run: chmod +x bin/kots - - # - name: create namespace and dockerhub secret - # run: | - # kubectl create ns "$APP_SLUG" - # kubectl create secret docker-registry kotsadm-dockerhub --docker-server index.docker.io --docker-username "${{ secrets.E2E_DOCKERHUB_USERNAME }}" --docker-password "${{ secrets.E2E_DOCKERHUB_PASSWORD }}" --namespace "$APP_SLUG" - - # - name: run the test - # run: | - # set +e + - name: Remove Cluster + id: remove-cluster + uses: replicatedhq/replicated-actions/remove-cluster@v1 + if: ${{ always() && steps.create-cluster.outputs.cluster-id != '' }} + continue-on-error: true + with: + api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} + cluster-id: ${{ steps.create-cluster.outputs.cluster-id }} + + + validate-kots-pull: + runs-on: ubuntu-20.04 + needs: [ enable-tests, can-run-ci, cmx-versions, build-kots, build-kotsadm, build-e2e, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] + strategy: + fail-fast: false + matrix: + cluster: ${{ fromJson(needs.cmx-versions.outputs.versions-to-test) }} + continue-on-error: ${{ matrix.cluster.stage != 'stable' }} + env: + APP_NAME: multi-namespace-yeti + APP_SLUG: multi-namespace + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Create Cluster + id: create-cluster + uses: replicatedhq/replicated-actions/create-cluster@v1 + with: + api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} + kubernetes-distribution: ${{ matrix.cluster.distribution }} + kubernetes-version: ${{ matrix.cluster.version }} + cluster-name: automated-kots-${{ github.run_id }}-${{ matrix.cluster.distribution }}-${{ matrix.cluster.version }} + timeout-minutes: '120' + ttl: 2h + instance-type: ${{ matrix.cluster.distribution == 'gke' && 'n2-standard-4' || '' }} + export-kubeconfig: true + + - name: download kots binary + uses: actions/download-artifact@v4 + with: + name: kots + path: bin/ + + - run: chmod +x bin/kots + + - name: create namespace and dockerhub secret + run: | + kubectl create ns "$APP_NAME" + kubectl create secret docker-registry kotsadm-dockerhub --docker-server index.docker.io --docker-username "${{ secrets.E2E_DOCKERHUB_USERNAME }}" --docker-password "${{ secrets.E2E_DOCKERHUB_PASSWORD }}" --namespace "$APP_NAME" + + - name: run kots pull + run: | + set +e + echo ${{ secrets.MULTI_NAMESPACE_LICENSE }} | base64 -d > license.yaml + ./bin/kots pull "$APP_NAME/automated" \ + --license-file license.yaml \ + --shared-password password \ + --namespace "$APP_NAME" \ + --exclude-admin-console + + kubectl create ns "$APP_NAME" + kubectl create ns nginx-test + kubectl create ns redis-test + kubectl create ns postgres-test + + # HACK: without operator, additonal namespaces don't get image pull secrets + echo ${{ secrets.MULTI_NAMESPACE_REGISTRY_AUTH }} | base64 -d > replicated-registry-auth.json + kubectl -n nginx-test create secret generic multi-namespace-yeti-registry --type=kubernetes.io/dockerconfigjson --from-file=.dockerconfigjson=./replicated-registry-auth.json + kubectl -n redis-test create secret generic multi-namespace-yeti-registry --type=kubernetes.io/dockerconfigjson --from-file=.dockerconfigjson=./replicated-registry-auth.json + kubectl -n redis-test create secret generic multi-namespace-yeti-redis-registry --type=kubernetes.io/dockerconfigjson --from-file=.dockerconfigjson=./replicated-registry-auth.json + kubectl -n postgres-test create secret generic multi-namespace-yeti-registry --type=kubernetes.io/dockerconfigjson --from-file=.dockerconfigjson=./replicated-registry-auth.json + kubectl -n default create secret generic multi-namespace-yeti-registry --type=kubernetes.io/dockerconfigjson --from-file=.dockerconfigjson=./replicated-registry-auth.json + + kustomize build "$PWD/$APP_SLUG/overlays/midstream" | kubectl apply -f - + kustomize build "$PWD/$APP_SLUG/overlays/midstream/charts/redis" | kubectl apply -f - + + EXIT_CODE=$? + if [ $EXIT_CODE -ne 0 ]; then + echo "Failed to apply spec" + echo "------pods:" + kubectl get pods -A + echo "------kotsadm logs" + kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" + exit $EXIT_CODE + fi + + echo "Waiting for pods to start" + + COUNTER=1 + while [ "$(kubectl get pods --no-headers | grep -v Running | grep -cv Completed)" -gt 0 ]; do + ((COUNTER += 1)) + if [ $COUNTER -gt 120 ]; then + echo "Timed out waiting for pods to start" + kubectl get pods -A + exit 1 + fi + sleep 1 + done + + echo "All pods started" + + - name: Generate support bundle on failure + if: failure() + uses: ./.github/actions/generate-support-bundle + with: + kots-namespace: "$APP_NAME" + aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' + aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' + + - name: Remove Cluster + id: remove-cluster + uses: replicatedhq/replicated-actions/remove-cluster@v1 + if: ${{ always() && steps.create-cluster.outputs.cluster-id != '' }} + continue-on-error: true + with: + api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} + cluster-id: ${{ steps.create-cluster.outputs.cluster-id }} + + + validate-app-version-label: + runs-on: ubuntu-20.04 + needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] + strategy: + fail-fast: false + matrix: + cluster: [ + {distribution: kind, version: v1.28.0} + ] + env: + APP_SLUG: app-version-label + APP_VERSION_LABEL: v1.0.0 + LATEST_APP_VERSION_LABEL: v1.0.1 + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Create Cluster + id: create-cluster + uses: replicatedhq/replicated-actions/create-cluster@v1 + with: + api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} + kubernetes-distribution: ${{ matrix.cluster.distribution }} + kubernetes-version: ${{ matrix.cluster.version }} + cluster-name: automated-kots-${{ github.run_id }}-${{ matrix.cluster.distribution }}-${{ matrix.cluster.version }} + timeout-minutes: '120' + ttl: 2h + export-kubeconfig: true + + - name: download kots binary + uses: actions/download-artifact@v4 + with: + name: kots + path: bin/ + + - run: chmod +x bin/kots + + - name: create namespace and dockerhub secret + run: | + kubectl create ns "$APP_SLUG" + kubectl create secret docker-registry kotsadm-dockerhub --docker-server index.docker.io --docker-username "${{ secrets.E2E_DOCKERHUB_USERNAME }}" --docker-password "${{ secrets.E2E_DOCKERHUB_PASSWORD }}" --namespace "$APP_SLUG" + + - name: test kots install with version label + run: | + set +e + echo ${{ secrets.APP_VERSION_LABEL_LICENSE }} | base64 -d > license.yaml + ./bin/kots \ + install "$APP_SLUG/automated" \ + --license-file license.yaml \ + --app-version-label "$APP_VERSION_LABEL" \ + --no-port-forward \ + --namespace "$APP_SLUG" \ + --shared-password password \ + --kotsadm-registry ttl.sh \ + --kotsadm-namespace automated-${{ github.run_id }} \ + --kotsadm-tag 24h + + EXIT_CODE=$? + if [ $EXIT_CODE -ne 0 ]; then + echo "------pods:" + kubectl -n "$APP_SLUG" get pods + echo "------kotsadm logs" + kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" + exit $EXIT_CODE + fi + + COUNTER=1 + while [ "$(./bin/kots get apps --namespace "$APP_SLUG" | awk 'NR>1{print $3}')" != "$APP_VERSION_LABEL" ]; do + ((COUNTER += 1)) + if [ $COUNTER -gt 120 ]; then + echo "Timed out waiting for app to be installed with correct version label: $APP_VERSION_LABEL" + ./bin/kots get apps --namespace "$APP_SLUG" + echo "kotsadm logs:" + kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" + exit 1 + fi + sleep 1 + done + + printf "App is installed successfully with the correct version label: %s\n\n" "$APP_VERSION_LABEL" + ./bin/kots get apps --namespace "$APP_SLUG" + + # test setting DockerHub credentials + + set +e + + # TODO: deploy and check secrets are actually created and images are pulled + ./bin/kots docker ensure-secret --dockerhub-username replicatedtests --dockerhub-password ${{ secrets.DOCKERHUB_RATELIMIT_PASSWORD }} -n "$APP_SLUG" + ./bin/kots download -n "$APP_SLUG" --slug "$APP_SLUG" + if grep "${APP_SLUG}-kotsadm-dockerhub" -w "./${APP_SLUG}/overlays/midstream/secret.yaml"; then + echo "Found DockerHub secret in ${APP_SLUG} latest version" + else + echo "No DockerHub secret found in appication namespace" + exit 1 + fi + + - name: remove the app + run: | + set +e + ./bin/kots remove "$APP_SLUG" --namespace "$APP_SLUG" --force + + - name: test kots install without version label + run: | + set +e + echo ${{ secrets.APP_VERSION_LABEL_LICENSE }} | base64 -d > license.yaml + ./bin/kots \ + install "$APP_SLUG/automated" \ + --license-file license.yaml \ + --no-port-forward \ + --namespace "$APP_SLUG" \ + --shared-password password \ + --kotsadm-registry ttl.sh \ + --kotsadm-namespace automated-${{ github.run_id }} \ + --kotsadm-tag 24h + + EXIT_CODE=$? + if [ $EXIT_CODE -ne 0 ]; then + echo "------pods:" + kubectl -n "$APP_SLUG" get pods + echo "------kotsadm logs" + kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" + exit $EXIT_CODE + fi + + COUNTER=1 + while [ "$(./bin/kots get apps --namespace "$APP_SLUG" | awk 'NR>1{print $3}')" != "$LATEST_APP_VERSION_LABEL" ]; do + ((COUNTER += 1)) + if [ $COUNTER -gt 120 ]; then + echo "Timed out waiting for app to be installed with latest version label: $LATEST_APP_VERSION_LABEL" + ./bin/kots get apps --namespace "$APP_SLUG" + echo "kotsadm logs:" + kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" + exit 1 + fi + sleep 1 + done + + printf "App is installed successfully with the correct version label: %s\n\n" "$LATEST_APP_VERSION_LABEL" + ./bin/kots get apps --namespace "$APP_SLUG" + + - name: Generate support bundle on failure + if: failure() + uses: ./.github/actions/generate-support-bundle + with: + kots-namespace: "$APP_SLUG" + aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' + aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' + + - name: Remove Cluster + id: remove-cluster + uses: replicatedhq/replicated-actions/remove-cluster@v1 + if: ${{ always() && steps.create-cluster.outputs.cluster-id != '' }} + continue-on-error: true + with: + api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} + cluster-id: ${{ steps.create-cluster.outputs.cluster-id }} + + + validate-helm-install-order: + runs-on: ubuntu-20.04 + needs: [ enable-tests, can-run-ci, cmx-versions, build-kots, build-kotsadm, build-e2e, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] + strategy: + fail-fast: false + matrix: + cluster: [ + {distribution: kind, version: v1.28.0} + ] + env: + APP_SLUG: helm-install-order + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Create Cluster + id: create-cluster + uses: replicatedhq/replicated-actions/create-cluster@v1 + with: + api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} + kubernetes-distribution: ${{ matrix.cluster.distribution }} + kubernetes-version: ${{ matrix.cluster.version }} + cluster-name: automated-kots-${{ github.run_id }}-${{ matrix.cluster.distribution }}-${{ matrix.cluster.version }} + timeout-minutes: '120' + ttl: 2h + instance-type: ${{ matrix.cluster.distribution == 'gke' && 'n2-standard-4' || '' }} + export-kubeconfig: true + + - name: download kots binary + uses: actions/download-artifact@v4 + with: + name: kots + path: bin/ + + - run: chmod +x bin/kots + + - name: create namespace and dockerhub secret + run: | + kubectl create ns "$APP_SLUG" + kubectl create secret docker-registry kotsadm-dockerhub --docker-server index.docker.io --docker-username "${{ secrets.E2E_DOCKERHUB_USERNAME }}" --docker-password "${{ secrets.E2E_DOCKERHUB_PASSWORD }}" --namespace "$APP_SLUG" + + - name: run the test + run: | + set +e + echo ${{ secrets.HELM_INSTALL_ORDER_LICENSE }} | base64 -d > license.yaml + ./bin/kots \ + install "$APP_SLUG/automated" \ + --license-file license.yaml \ + --no-port-forward \ + --namespace "$APP_SLUG" \ + --shared-password password \ + --kotsadm-registry ttl.sh \ + --kotsadm-namespace automated-${{ github.run_id }} \ + --skip-preflights \ + --kotsadm-tag 24h + + EXIT_CODE=$? + if [ $EXIT_CODE -ne 0 ]; then + echo "------pods:" + kubectl -n "$APP_SLUG" get pods + echo "------kotsadm logs" + kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" + exit $EXIT_CODE + fi + + COUNTER=1 + while [ "$(./bin/kots get apps --namespace "$APP_SLUG" | awk 'NR>1{print $2}')" != "ready" ]; do + ((COUNTER += 1)) + if [ $COUNTER -gt 120 ]; then + echo "Timed out waiting for app to be ready" + ./bin/kots get apps --namespace "$APP_SLUG" + echo "kotsadm logs:" + kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" + exit 1 + fi + sleep 1 + done + + printf "App is installed successfully and is ready\n\n" + ./bin/kots get apps --namespace "$APP_SLUG" + + - name: Generate support bundle on failure + if: failure() + uses: ./.github/actions/generate-support-bundle + with: + kots-namespace: "$APP_SLUG" + aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' + aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' + + - name: Remove Cluster + id: remove-cluster + uses: replicatedhq/replicated-actions/remove-cluster@v1 + if: ${{ always() && steps.create-cluster.outputs.cluster-id != '' }} + continue-on-error: true + with: + api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} + cluster-id: ${{ steps.create-cluster.outputs.cluster-id }} + + + validate-no-redeploy-on-restart: + runs-on: ubuntu-20.04 + needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] + strategy: + fail-fast: false + matrix: + cluster: [ + {distribution: kind, version: v1.28.0} + ] + env: + APP_SLUG: no-redeploy-on-restart + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Create Cluster + id: create-cluster + uses: replicatedhq/replicated-actions/create-cluster@v1 + with: + api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} + kubernetes-distribution: ${{ matrix.cluster.distribution }} + kubernetes-version: ${{ matrix.cluster.version }} + cluster-name: automated-kots-${{ github.run_id }}-${{ matrix.cluster.distribution }}-${{ matrix.cluster.version }} + timeout-minutes: '120' + ttl: 2h + export-kubeconfig: true + + - name: download kots binary + uses: actions/download-artifact@v4 + with: + name: kots + path: bin/ + + - run: chmod +x bin/kots + + - name: create namespace and dockerhub secret + run: | + kubectl create ns "$APP_SLUG" + kubectl create secret docker-registry kotsadm-dockerhub --docker-server index.docker.io --docker-username "${{ secrets.E2E_DOCKERHUB_USERNAME }}" --docker-password "${{ secrets.E2E_DOCKERHUB_PASSWORD }}" --namespace "$APP_SLUG" + + - name: run the test + run: | + set +e + echo ${{ secrets.NO_REDEPLOY_ON_RESTART_LICENSE }} | base64 -d > license.yaml + ./bin/kots \ + install "$APP_SLUG/automated" \ + --license-file license.yaml \ + --no-port-forward \ + --namespace "$APP_SLUG" \ + --shared-password password \ + --kotsadm-registry ttl.sh \ + --kotsadm-namespace automated-${{ github.run_id }} \ + --skip-preflights \ + --kotsadm-tag 24h + + EXIT_CODE=$? + if [ $EXIT_CODE -ne 0 ]; then + echo "------pods:" + kubectl -n "$APP_SLUG" get pods + echo "------kotsadm logs" + kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" + exit $EXIT_CODE + fi + + # wait for application job to be created + COUNTER=1 + while [ "$(kubectl get jobs -l app=example,component=job -n "$APP_SLUG" --ignore-not-found | awk 'NR>1' | wc -l)" == "0" ]; do + ((COUNTER += 1)) + if [ $COUNTER -gt 60 ]; then + echo "Timed out waiting for job to be created" + exit 1 + fi + sleep 1 + done + + # delete the application job and restart the admin console + kubectl delete jobs -n "$APP_SLUG" --all + kubectl delete pods -l app=kotsadm -n "$APP_SLUG" + + # wait for old kotsadm pod to terminate + COUNTER=1 + while [ "$(kubectl get pods -l app=kotsadm -n "$APP_SLUG" | awk 'NR>1' | wc -l)" != "1" ]; do + ((COUNTER += 1)) + if [ $COUNTER -gt 60 ]; then + echo "More than 1 kotsadm pod found" + exit 1 + fi + sleep 1 + done + + # wait for new kotsadm pod to become ready + kubectl wait --for=condition=ready pod -l app=kotsadm -n "$APP_SLUG" --timeout=60s + + # delay in case the app takes a bit to be deployed + sleep 20 + + # validate that the application wasn't re-deployed and the job wasn't re-created + if [ "$(kubectl get jobs -l app=example,component=job -n "$APP_SLUG" --ignore-not-found | awk 'NR>1' | wc -l)" != "0" ]; then + echo "App should not be re-deployed after restart" + exit 1 + fi + + printf "Success. App was not re-deployed after restart\n\n" + + - name: Generate support bundle on failure + if: failure() + uses: ./.github/actions/generate-support-bundle + with: + kots-namespace: "$APP_SLUG" + aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' + aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' + + - name: Remove Cluster + id: remove-cluster + uses: replicatedhq/replicated-actions/remove-cluster@v1 + if: ${{ always() && steps.create-cluster.outputs.cluster-id != '' }} + continue-on-error: true + with: + api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} + cluster-id: ${{ steps.create-cluster.outputs.cluster-id }} + + + validate-kubernetes-installer-preflight: + runs-on: ubuntu-20.04 + needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] + strategy: + fail-fast: false + matrix: + cluster: [ + {distribution: kind, version: v1.28.0} + ] + env: + APP_SLUG: kubernetes-installer-preflight + steps: + - uses: actions/checkout@v4 + + - name: Create Cluster + id: create-cluster + uses: replicatedhq/replicated-actions/create-cluster@v1 + with: + api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} + kubernetes-distribution: ${{ matrix.cluster.distribution }} + kubernetes-version: ${{ matrix.cluster.version }} + cluster-name: automated-kots-${{ github.run_id }}-${{ matrix.cluster.distribution }}-${{ matrix.cluster.version }} + timeout-minutes: '120' + ttl: 2h + export-kubeconfig: true + + - name: download kots binary + uses: actions/download-artifact@v4 + with: + name: kots + path: bin/ + + - run: chmod +x bin/kots + + - name: create namespace and dockerhub secret + run: | + kubectl create ns "$APP_SLUG" + kubectl create secret docker-registry kotsadm-dockerhub --docker-server index.docker.io --docker-username "${{ secrets.E2E_DOCKERHUB_USERNAME }}" --docker-password "${{ secrets.E2E_DOCKERHUB_PASSWORD }}" --namespace "$APP_SLUG" + + - name: run the test + run: | + set +e + + echo ${{ secrets.KUBERNETES_INSTALLER_PREFLIGHT_LICENSE }} | base64 -d > license.yaml + + # Fake kurl installation using the crd and installer spec from the application manifests: + + # Pull application manifests + ./bin/kots pull "$APP_SLUG/automated" --license-file license.yaml --shared-password password + + # Apply installer crd + kubectl apply -f "$APP_SLUG/upstream/installer-crd.yaml" + + # Wait for crd to be created + kubectl wait --for condition=established --timeout=60s crd/installers.cluster.kurl.sh + + # Seems that the above does not always guarantee the crd exists? So just in case... + sleep 10 + + # Apply installer + kubectl apply -f "$APP_SLUG/upstream/installer.yaml" + + # Create kurl-config configmap in kube-system + kubectl create cm kurl-config -n kube-system --from-literal=installer_id=7cc8094 + + ./bin/kots \ + install "$APP_SLUG/automated" \ + --license-file license.yaml \ + --no-port-forward \ + --namespace "$APP_SLUG" \ + --shared-password password \ + --kotsadm-registry ttl.sh \ + --kotsadm-namespace automated-${{ github.run_id }} \ + --kotsadm-tag 24h + + EXIT_CODE=$? + if [ $EXIT_CODE -ne 0 ]; then + echo "------pods:" + kubectl -n "$APP_SLUG" get pods + echo "------kotsadm logs" + kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" + exit $EXIT_CODE + fi + + # validate that preflight checks ran + JSON_PATH="jsonpath={.data['automated-install-slug-$APP_SLUG']}" + if [ "$(kubectl get cm kotsadm-tasks -n "$APP_SLUG" -o "$JSON_PATH" | grep -c pending_preflight)" != "1" ]; then + echo "Preflight checks did not run" + exit 1 + fi + + COUNTER=1 + while [ "$(./bin/kots get apps --namespace "$APP_SLUG" | awk 'NR>1{print $2}')" != "ready" ]; do + ((COUNTER += 1)) + if [ $COUNTER -gt 120 ]; then + echo "Timed out waiting for app to be ready" + ./bin/kots get apps --namespace "$APP_SLUG" + echo "kotsadm logs:" + kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" + exit 1 + fi + sleep 1 + done + + # try get apps without namespace (using kubeconfig) + # validate that output is the same as above + mkdir -p /tmp/.kube + sudo cp "$KUBECONFIG" /tmp/.kube/config + sudo chmod -R 777 /tmp/.kube + export KUBECONFIG=/tmp/.kube/config + kubectl config set-context --current --namespace="$APP_SLUG" + if [ "$(./bin/kots get apps | awk 'NR>1{print $2}')" != "ready" ]; then + echo "kots get apps output is not the same as above" + exit 1 + fi + + printf "App is installed successfully and is ready\n\n" + ./bin/kots get apps --namespace "$APP_SLUG" + + - name: Generate support bundle on failure + if: failure() + uses: ./.github/actions/generate-support-bundle + with: + kots-namespace: "$APP_SLUG" + aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' + aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' + + - name: Remove Cluster + id: remove-cluster + uses: replicatedhq/replicated-actions/remove-cluster@v1 + if: ${{ always() && steps.create-cluster.outputs.cluster-id != '' }} + continue-on-error: true + with: + api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} + cluster-id: ${{ steps.create-cluster.outputs.cluster-id }} + + + validate-kots-push-images-anonymous: + runs-on: ubuntu-20.04 + needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: download kots binary + uses: actions/download-artifact@v4 + with: + name: kots + path: bin/ + + - run: chmod +x bin/kots + + - name: run kots admin-console push-images + run: | + set +e + ./bin/kots admin-console push-images ./hack/tests/small.airgap ttl.sh/automated-${{ github.run_id }} + + + validate-kots-admin-console-generate-manifests: + runs-on: ubuntu-20.04 + needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] + strategy: + fail-fast: false + matrix: + cluster: [ + {distribution: kind, version: v1.28.0} + ] + env: + APP_SLUG: minimal-rbac + BASE_KOTS_VERSION: v1.72.0 + NAMESPACE: generate-manifests + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: download kots binary + uses: actions/download-artifact@v4 + with: + name: kots + path: bin/ + + - run: chmod +x bin/kots + + - name: run kots admin-console generate-manifests without k8s context + run: | + set +e + ./bin/kots admin-console generate-manifests -n "$NAMESPACE" --shared-password password + + - name: validate that ./admin-console exists and is not empty + run: | + set +e + if [ ! -d ./admin-console ]; then + echo "admin-console directory does not exist" + exit 1 + fi + if [ -z "$(ls -A ./admin-console)" ]; then + echo "admin-console directory is empty" + exit 1 + fi + + - name: remove admin-console directory + run: rm -rf ./admin-console + + - name: Create Cluster + id: create-cluster + uses: replicatedhq/replicated-actions/create-cluster@v1 + with: + api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} + kubernetes-distribution: ${{ matrix.cluster.distribution }} + kubernetes-version: ${{ matrix.cluster.version }} + cluster-name: automated-kots-${{ github.run_id }}-${{ matrix.cluster.distribution }}-${{ matrix.cluster.version }} + timeout-minutes: '120' + ttl: 2h + export-kubeconfig: true + + - name: download base kots version + run: | + curl -LO "https://github.com/replicatedhq/kots/releases/download/$BASE_KOTS_VERSION/kots_linux_amd64.tar.gz" \ + && tar zxvf kots_linux_amd64.tar.gz \ + && mv kots "kots-$BASE_KOTS_VERSION" + + - name: create namespace + run: | + set +e + kubectl create namespace "$NAMESPACE" + + - name: run kots admin-console generate-manifests using base kots version with k8s context + run: | + set +e + "./kots-$BASE_KOTS_VERSION" admin-console generate-manifests -n "$NAMESPACE" --shared-password password + + - name: apply the generated manifests + run: | + set +e + kubectl apply -f ./admin-console -n "$NAMESPACE" + + - name: wait for the kotsadm-minio-0 pod to be created + run: | + set +e + COUNTER=1 + while ! kubectl get pods -n "$NAMESPACE" | grep -q kotsadm-minio-0; do + ((COUNTER += 1)) + if [ $COUNTER -gt 30 ]; then + echo "timed out waiting for kotsadm-minio-0 pod to be created" + exit 1 + fi + sleep 1 + done + + - name: wait for kotsadm-minio-0 pod to be ready + run: | + set +e + kubectl wait --for=condition=ready --timeout=180s pod/kotsadm-minio-0 -n "$NAMESPACE" + + - name: wait for the kotsadm deployment to be ready + run: | + set +e + kubectl wait --for=condition=available --timeout=300s deployment/kotsadm -n "$NAMESPACE" + + - name: run kots admin-console generate-manifests using new kots version with k8s context + run: | + set +e + ./bin/kots admin-console generate-manifests -n "$NAMESPACE" \ + --shared-password password \ + --kotsadm-registry ttl.sh \ + --kotsadm-namespace automated-${{ github.run_id }} \ + --kotsadm-tag 24h + + - name: validate that ./admin-console/minio-statefulset.yaml has initContainers since a migration is needed + run: | + set +e + if ! grep -qE 'initContainers' ./admin-console/minio-statefulset.yaml; then + echo "admin-console/minio-statefulset.yaml does not have initContainers" + exit 1 + fi + + - name: apply the generated manifests + run: | + set +e + kubectl apply -f ./admin-console -n "$NAMESPACE" + + - name: wait for kotsadm-minio-0 pod to be ready + run: | + set +e + sleep 10 + kubectl wait --for=condition=ready --timeout=180s pod/kotsadm-minio-0 -n "$NAMESPACE" + + - name: wait for the kotsadm deployment to be ready + run: | + set +e + kubectl wait --for=condition=available --timeout=300s deployment/kotsadm -n "$NAMESPACE" + + - name: run kots admin-console generate-manifests using new kots version with k8s context + run: | + set +e + ./bin/kots admin-console generate-manifests -n "$NAMESPACE" \ + --shared-password password \ + --kotsadm-registry ttl.sh \ + --kotsadm-namespace automated-${{ github.run_id }} \ + --kotsadm-tag 24h + + - name: validate that ./admin-console/minio-statefulset.yaml does not have initContainers since a migration is not needed + run: | + set +e + if grep -qE 'initContainers' ./admin-console/minio-statefulset.yaml; then + echo "admin-console/minio-statefulset.yaml has initContainers" + exit 1 + fi + + - name: apply the generated manifests + run: | + set +e + kubectl apply -f ./admin-console -n "$NAMESPACE" + + - name: wait for kotsadm-minio-0 pod to be ready + run: | + set +e + sleep 10 + kubectl wait --for=condition=ready --timeout=180s pod/kotsadm-minio-0 -n "$NAMESPACE" + + - name: wait for the kotsadm deployment to be ready + run: | + set +e + kubectl wait --for=condition=available --timeout=300s deployment/kotsadm -n "$NAMESPACE" + + - name: print pods and logs on failure + if: failure() + run: | + echo "------pods:" + kubectl -n "$NAMESPACE" get pods + echo "------kotsadm logs" + kubectl logs -l app=kotsadm --tail=100 --namespace "$NAMESPACE" + echo "------kotsadm-minio logs" + kubectl logs -l app=kotsadm-minio --tail=100 --namespace "$NAMESPACE" + + - name: Generate support bundle on failure + if: failure() + uses: ./.github/actions/generate-support-bundle + with: + kots-namespace: automated-${{ github.run_id }} + aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' + aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' + + - name: Remove Cluster + id: remove-cluster + uses: replicatedhq/replicated-actions/remove-cluster@v1 + if: ${{ always() && steps.create-cluster.outputs.cluster-id != '' }} + continue-on-error: true + with: + api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} + cluster-id: ${{ steps.create-cluster.outputs.cluster-id }} + + + validate-min-kots-version: + runs-on: ubuntu-20.04 + needs: [ enable-tests, can-run-ci, build-e2e, build-kots, build-kotsadm, build-kurl-proxy, build-migrations, push-minio, push-rqlite, generate-tag ] + env: + APP_SLUG: min-kots-version + strategy: + fail-fast: false + matrix: + cluster: [ + {distribution: kind, version: v1.28.0} + ] + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: download e2e deps + uses: actions/download-artifact@v4 + with: + name: e2e + path: e2e/bin/ + - name: download kots binary + uses: actions/download-artifact@v4 + with: + name: kots + path: bin/ + - run: | + docker load -i e2e/bin/e2e-deps.tar + chmod +x e2e/bin/* + chmod +x bin/* + cp ./bin/kots /usr/local/bin/kubectl-kots + sudo apt-get update -y && sudo apt-get install jq -y + - uses: ./.github/actions/kots-e2e + id: kots-e2e + with: + test-focus: 'Min KOTS Version' + kots-namespace: 'min-kots-version' + k8s-distribution: ${{ matrix.cluster.distribution }} + k8s-version: ${{ matrix.cluster.version }} + testim-access-token: '${{ secrets.TESTIM_ACCESS_TOKEN }}' + testim-branch: ${{ github.head_ref == 'main' && 'master' || github.head_ref }} + kotsadm-image-registry: ttl.sh + aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' + aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' + replicated-api-token: '${{ secrets.C11Y_MATRIX_TOKEN }}' + kots-dockerhub-username: '${{ secrets.E2E_DOCKERHUB_USERNAME }}' + kots-dockerhub-password: '${{ secrets.E2E_DOCKERHUB_PASSWORD }}' + k8s-cluster-skip-teardown: true + + - name: validate that kots install fails early + run: | + set +e + + result=$(kubectl kots install "$APP_SLUG/automated" --no-port-forward --namespace "$APP_SLUG" --shared-password password 2>&1 >/dev/null) + echo "$result" + + if [[ "$result" == *"requires"* ]] && [[ "$result" == *"10000.0.0"* ]]; then + exit 0 + else + exit 1 + fi + + - name: Remove Cluster + id: remove-cluster + uses: replicatedhq/replicated-actions/remove-cluster@v1 + if: ${{ always() && steps.kots-e2e.outputs.cluster-id != '' }} + continue-on-error: true + with: + api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} + cluster-id: ${{ steps.kots-e2e.outputs.cluster-id }} + + + validate-target-kots-version: + runs-on: ubuntu-20.04 + needs: [ enable-tests, can-run-ci, build-e2e, build-kots, build-kotsadm, build-kurl-proxy, build-migrations, push-minio, push-rqlite, generate-tag ] + strategy: + fail-fast: false + matrix: + cluster: [ + {distribution: kind, version: v1.28.0} + ] + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: download e2e deps + uses: actions/download-artifact@v4 + with: + name: e2e + path: e2e/bin/ + - name: download kots binary + uses: actions/download-artifact@v4 + with: + name: kots + path: bin/ + - run: | + docker load -i e2e/bin/e2e-deps.tar + chmod +x e2e/bin/* + chmod +x bin/* + cp ./bin/kots /usr/local/bin/kubectl-kots + sudo apt-get update -y && sudo apt-get install jq -y + - uses: ./.github/actions/kots-e2e + id: kots-e2e + with: + test-focus: 'Target KOTS Version' + kots-namespace: 'target-kots-version' + k8s-distribution: ${{ matrix.cluster.distribution }} + k8s-version: ${{ matrix.cluster.version }} + testim-access-token: '${{ secrets.TESTIM_ACCESS_TOKEN }}' + testim-branch: ${{ github.head_ref == 'main' && 'master' || github.head_ref }} + aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' + aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' + replicated-api-token: '${{ secrets.C11Y_MATRIX_TOKEN }}' + kots-dockerhub-username: '${{ secrets.E2E_DOCKERHUB_USERNAME }}' + kots-dockerhub-password: '${{ secrets.E2E_DOCKERHUB_PASSWORD }}' + k8s-cluster-skip-teardown: true + + - name: validate that kots install fails early + run: | + set +e + + result=$(kubectl kots install target-kots-version/automated --no-port-forward --namespace target-kots-version --shared-password password 2>&1 >/dev/null) + echo "$result" + + if [[ "$result" == *"requires"* ]] && [[ "$result" == *"1.0.0"* ]]; then + exit 0 + else + exit 1 + fi + + - name: Remove Cluster + id: remove-cluster + uses: replicatedhq/replicated-actions/remove-cluster@v1 + if: ${{ always() && steps.kots-e2e.outputs.cluster-id != '' }} + continue-on-error: true + with: + api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} + cluster-id: ${{ steps.kots-e2e.outputs.cluster-id }} + + + validate-range-kots-version: + runs-on: ubuntu-20.04 + needs: [ enable-tests, can-run-ci, build-e2e, build-kots, build-kotsadm, build-kurl-proxy, build-migrations, push-minio, push-rqlite, generate-tag ] + strategy: + fail-fast: false + matrix: + cluster: [ + {distribution: kind, version: v1.28.0} + ] + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: download e2e deps + uses: actions/download-artifact@v4 + with: + name: e2e + path: e2e/bin/ + - name: download kots binary + uses: actions/download-artifact@v4 + with: + name: kots + path: bin/ + - run: | + docker load -i e2e/bin/e2e-deps.tar + chmod +x e2e/bin/* + chmod +x bin/* + cp ./bin/kots /usr/local/bin/kubectl-kots + sudo apt-get update -y && sudo apt-get install jq -y + - uses: ./.github/actions/kots-e2e + id: kots-e2e + with: + test-focus: 'Range KOTS Version' + kots-namespace: 'range-kots-version' + k8s-distribution: ${{ matrix.cluster.distribution }} + k8s-version: ${{ matrix.cluster.version }} + testim-access-token: '${{ secrets.TESTIM_ACCESS_TOKEN }}' + testim-branch: ${{ github.head_ref == 'main' && 'master' || github.head_ref }} + aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' + aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' + replicated-api-token: '${{ secrets.C11Y_MATRIX_TOKEN }}' + kots-dockerhub-username: '${{ secrets.E2E_DOCKERHUB_USERNAME }}' + kots-dockerhub-password: '${{ secrets.E2E_DOCKERHUB_PASSWORD }}' + k8s-cluster-skip-teardown: true + + - name: validate that kots install fails early + run: | + set +e + + result=$(kubectl kots install range-kots-version/automated --no-port-forward --namespace range-kots-version --shared-password password 2>&1 >/dev/null) + echo "$result" + + if [[ "$result" == *"requires"* ]] && [[ "$result" == *"11000.0.0"* ]]; then + exit 0 + else + exit 1 + fi + + - name: Remove Cluster + id: remove-cluster + uses: replicatedhq/replicated-actions/remove-cluster@v1 + if: ${{ always() && steps.kots-e2e.outputs.cluster-id != '' }} + continue-on-error: true + with: + api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} + cluster-id: ${{ steps.kots-e2e.outputs.cluster-id }} + + + validate-kots-upgrade: + runs-on: ubuntu-20.04 + needs: [ enable-tests, can-run-ci, cmx-versions, build-kots, build-kotsadm, build-e2e, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] + strategy: + fail-fast: false + matrix: + cluster: ${{ fromJson(needs.cmx-versions.outputs.versions-to-test) }} + continue-on-error: ${{ matrix.cluster.stage != 'stable' }} + env: + APP_SLUG: postgres-to-rqlite + BASE_KOTS_VERSION: v1.57.0 + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Create Cluster + id: create-cluster + uses: replicatedhq/replicated-actions/create-cluster@v1 + with: + api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} + kubernetes-distribution: ${{ matrix.cluster.distribution }} + kubernetes-version: ${{ matrix.cluster.version }} + cluster-name: automated-kots-${{ github.run_id }}-${{ matrix.cluster.distribution }}-${{ matrix.cluster.version }} + timeout-minutes: '120' + ttl: 2h + instance-type: ${{ matrix.cluster.distribution == 'gke' && 'n2-standard-4' || '' }} + export-kubeconfig: true + + - name: download base kots version + run: | + curl -LO "https://github.com/replicatedhq/kots/releases/download/$BASE_KOTS_VERSION/kots_linux_amd64.tar.gz" \ + && tar zxvf kots_linux_amd64.tar.gz \ + && mv kots "kots-$BASE_KOTS_VERSION" + + - name: download kots binary + uses: actions/download-artifact@v4 + with: + name: kots + path: bin/ + + - run: chmod +x bin/kots + + - name: create namespace and dockerhub secret + run: | + kubectl create ns "$APP_SLUG" + kubectl create secret docker-registry kotsadm-dockerhub --docker-server index.docker.io --docker-username "${{ secrets.E2E_DOCKERHUB_USERNAME }}" --docker-password "${{ secrets.E2E_DOCKERHUB_PASSWORD }}" --namespace "$APP_SLUG" + + - name: run the test + run: | + set +e + echo ${{ secrets.POSTGRES_TO_RQLITE_LICENSE }} | base64 -d > license.yaml + + # install using the base KOTS version + + "./kots-$BASE_KOTS_VERSION" \ + install "$APP_SLUG/automated" \ + --license-file license.yaml \ + --port-forward=false \ + --namespace "$APP_SLUG" \ + --shared-password password + + EXIT_CODE=$? + if [ $EXIT_CODE -ne 0 ]; then + echo "------pods:" + kubectl -n "$APP_SLUG" get pods + echo "------kotsadm logs" + kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" + exit $EXIT_CODE + fi + + COUNTER=1 + while [ "$("./kots-$BASE_KOTS_VERSION" get apps --namespace "$APP_SLUG" | awk 'NR>1{print $2}')" != "ready" ]; do + ((COUNTER += 1)) + if [ $COUNTER -gt 120 ]; then + echo "Timed out waiting for app to be ready" + "./kots-$BASE_KOTS_VERSION" get apps --namespace "$APP_SLUG" + echo "kotsadm logs:" + kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" + exit 1 + fi + sleep 1 + done + + # upgrade using the new KOTS version + + ./bin/kots admin-console upgrade \ + --namespace "$APP_SLUG" \ + --kotsadm-registry ttl.sh \ + --kotsadm-namespace automated-${{ github.run_id }} \ + --kotsadm-tag 24h + + # verify that the postgres to rqlite migration was successful + + if ! kubectl logs -l app=kotsadm --namespace "$APP_SLUG" | grep -q "Migrated from Postgres to rqlite successfully"; then + echo "Failed to find a successful migration log line" + echo "kotsadm logs:" + kubectl logs -l app=kotsadm --all-containers --namespace "$APP_SLUG" + exit 1 + fi + + # verify that the minio migration happened + + if [ -z "$(kubectl get statefulset kotsadm-minio -n "$APP_SLUG" -o jsonpath='{.spec.template.spec.initContainers}')" ]; then + echo "Failed to find initContainers in the kotsadm-minio statefulset" + echo "kotsadm-minio statefulset:" + kubectl get statefulset kotsadm-minio -n "$APP_SLUG" -o yaml + exit 1 + fi + + # make sure app is still installed and ready + + if [ "$(./bin/kots get apps --namespace "$APP_SLUG" | awk 'NR>1{print $2}')" != "ready" ]; then + echo "App is not ready after the upgrade" + echo "kotsadm logs:" + kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" + exit 1 + fi + + printf "App is still installed and is ready after the migration\n\n" + ./bin/kots get apps --namespace "$APP_SLUG" + + - name: Generate support bundle on failure + if: failure() + uses: ./.github/actions/generate-support-bundle + with: + kots-namespace: "$APP_SLUG" + aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' + aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' + + - name: Remove Cluster + id: remove-cluster + uses: replicatedhq/replicated-actions/remove-cluster@v1 + if: ${{ always() && steps.create-cluster.outputs.cluster-id != '' }} + continue-on-error: true + with: + api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} + cluster-id: ${{ steps.create-cluster.outputs.cluster-id }} + + + validate-kots-helm-release-secret-migration: + runs-on: ubuntu-20.04 + needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] + strategy: + fail-fast: false + matrix: + cluster: [ + {distribution: kind, version: v1.28.0} + ] + env: + APP_SLUG: helm-release-secret-migration + RELEASE_NAME: helm-release-chart + RELEASE_NAMESPACE: helm-release + BASE_KOTS_VERSION: v1.94.0 + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Create Cluster + id: create-cluster + uses: replicatedhq/replicated-actions/create-cluster@v1 + with: + api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} + kubernetes-distribution: ${{ matrix.cluster.distribution }} + kubernetes-version: ${{ matrix.cluster.version }} + cluster-name: automated-kots-${{ github.run_id }}-${{ matrix.cluster.distribution }}-${{ matrix.cluster.version }} + timeout-minutes: '120' + ttl: 2h + export-kubeconfig: true + + - name: download base kots version + run: | + curl -LO "https://github.com/replicatedhq/kots/releases/download/$BASE_KOTS_VERSION/kots_linux_amd64.tar.gz" \ + && tar zxvf kots_linux_amd64.tar.gz \ + && mv kots "kots-$BASE_KOTS_VERSION" + + - name: download kots binary + uses: actions/download-artifact@v4 + with: + name: kots + path: bin/ + + - run: chmod +x bin/kots + + - name: create namespace and dockerhub secret + run: | + kubectl create ns "$APP_SLUG" + kubectl create secret docker-registry kotsadm-dockerhub --docker-server index.docker.io --docker-username "${{ secrets.E2E_DOCKERHUB_USERNAME }}" --docker-password "${{ secrets.E2E_DOCKERHUB_PASSWORD }}" --namespace "$APP_SLUG" + + - name: run the test + run: | + set +e + echo ${{ secrets.HELM_RELEASE_SECRET_MIGRATION_LICENSE }} | base64 -d > license.yaml + + # install using the base KOTS version + "./kots-$BASE_KOTS_VERSION" \ + install "$APP_SLUG/automated" \ + --license-file license.yaml \ + --no-port-forward=true \ + --namespace "$APP_SLUG" \ + --shared-password password + + EXIT_CODE=$? + if [ $EXIT_CODE -ne 0 ]; then + echo "------pods:" + kubectl -n "$APP_SLUG" get pods + echo "------kotsadm logs" + kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" + exit $EXIT_CODE + fi + + COUNTER=1 + while [ "$("./kots-$BASE_KOTS_VERSION" get apps --namespace "$APP_SLUG" | awk 'NR>1{print $2}')" != "ready" ]; do + ((COUNTER += 1)) + if [ $COUNTER -gt 120 ]; then + echo "Timed out waiting for app to be ready" + "./kots-$BASE_KOTS_VERSION" get apps --namespace "$APP_SLUG" + echo "kotsadm logs:" + kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" + exit 1 + fi + sleep 1 + done + + # verify that the helm release secret is created in the kotsadm namespace + releaseSecretName=$(kubectl get secret -n "$APP_SLUG" -l owner=helm,name="$RELEASE_NAME" -o jsonpath='{.items[*].metadata.name}') + if [ -z "$releaseSecretName" ]; then + echo "Failed to find the helm release secret in the $APP_SLUG namespace" + echo "kotsadm logs:" + kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" + exit 1 + fi + + # if there are more than one helm release secrets, fail + if [ "$(echo "$releaseSecretName" | wc -l)" -gt 1 ]; then + echo "Found more than one helm release secret in the $APP_SLUG namespace" + echo "kotsadm logs:" + kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" + exit 1 + fi + + # upgrade using the new KOTS version + ./bin/kots admin-console upgrade \ + --namespace "$APP_SLUG" \ + --kotsadm-registry ttl.sh \ + --kotsadm-namespace automated-${{ github.run_id }} \ + --kotsadm-tag 24h + + # make a config change and redeploy the app + ./bin/kots set config "$APP_SLUG" create_new_sequence=true --deploy --namespace "$APP_SLUG" + + # make sure app is still installed and ready + if [ "$(./bin/kots get apps --namespace "$APP_SLUG" | awk 'NR>1{print $2}')" != "ready" ]; then + echo "App is not ready after the upgrade" + echo "kotsadm logs:" + kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" + exit 1 + fi + + COUNTER=1 + while [ "$(helm ls -n "$RELEASE_NAMESPACE" | grep "$RELEASE_NAME" | awk 'NR>0{print $3}')" != "2" ]; do + ((COUNTER += 1)) + if [ $COUNTER -gt 120 ]; then + echo "Timed out waiting for $RELEASE_NAME to be upgraded to revision 2" + helm ls -n "$RELEASE_NAMESPACE" + exit 1 + fi + sleep 1 + done + + # verify that the helm release secret is created in the helm relase namespace + COUNT=1 + releaseSecret="" + while [ -z "$releaseSecret" ]; do + ((COUNT += 1)) + if [ $COUNT -gt 10 ]; then + echo "Timed out waiting for the helm release secret to be migrated" + echo "kotsadm logs:" + kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" + exit 1 + fi + sleep 1 + releaseSecret=$(kubectl get secret "$releaseSecretName" -n "$RELEASE_NAMESPACE") + done + + # verify that the release secret in app namepspace is deleted + oldReleaseSecret=$(kubectl get secret "$releaseSecretName" -n "$APP_SLUG") + if [ -n "$oldReleaseSecret" ]; then + echo "Found the helm release secret in the $APP_SLUG namespace" + echo "$oldReleaseSecret" + echo "kotsadm logs:" + kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" + exit 1 + fi + + # verify that there are two helm release secrets in the helm release namespace + releaseSecretNames=$(kubectl get secret -n "$RELEASE_NAMESPACE" -l owner=helm,name="$RELEASE_NAME" -o jsonpath='{.items[*].metadata.name}') + releaseSecretNamesCount=$(echo "$releaseSecretNames" | wc -w) + if [ "$releaseSecretNamesCount" -ne 2 ]; then + echo "Found $releaseSecretNamesCount helm release secrets in the $RELEASE_NAMESPACE namespace(Want: 2)" + kubectl get secret -n "$RELEASE_NAMESPACE" -l owner=helm,name="$RELEASE_NAME" + echo "kotsadm logs:" + kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" + exit 1 + fi + + printf "Helm release secret migration test passed\n\n" + ./bin/kots get apps --namespace "$APP_SLUG" + + - name: Generate support bundle on failure + if: failure() + uses: ./.github/actions/generate-support-bundle + with: + kots-namespace: "$APP_SLUG" + aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' + aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' + + - name: Remove Cluster + id: remove-cluster + uses: replicatedhq/replicated-actions/remove-cluster@v1 + if: ${{ always() && steps.create-cluster.outputs.cluster-id != '' }} + continue-on-error: true + with: + api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} + cluster-id: ${{ steps.create-cluster.outputs.cluster-id }} + + + validate-multi-app-backup-and-restore: + runs-on: ubuntu-20.04 + needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-e2e, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] + strategy: + fail-fast: false + matrix: + cluster: [ + {distribution: kind, version: v1.28.0} + ] + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: download e2e deps + uses: actions/download-artifact@v4 + with: + name: e2e + path: e2e/bin/ + - run: docker load -i e2e/bin/e2e-deps.tar + - run: chmod +x e2e/bin/* + - name: download kots binary + uses: actions/download-artifact@v4 + with: + name: kots + path: bin/ + - run: chmod +x bin/* + - uses: ./.github/actions/kots-e2e + with: + test-focus: 'multi-app-backup-and-restore' + kots-namespace: 'multi-app-backup-and-restore' + k8s-distribution: ${{ matrix.cluster.distribution }} + k8s-version: ${{ matrix.cluster.version }} + testim-access-token: '${{ secrets.TESTIM_ACCESS_TOKEN }}' + testim-branch: ${{ github.head_ref == 'main' && 'master' || github.head_ref }} + aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' + aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' + replicated-api-token: '${{ secrets.C11Y_MATRIX_TOKEN }}' + kots-dockerhub-username: '${{ secrets.E2E_DOCKERHUB_USERNAME }}' + kots-dockerhub-password: '${{ secrets.E2E_DOCKERHUB_PASSWORD }}' + + + validate-multi-app-install: + runs-on: ubuntu-20.04 + needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-e2e, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] + strategy: + fail-fast: false + matrix: + cluster: [ + {distribution: kind, version: v1.28.0} + ] + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: download e2e deps + uses: actions/download-artifact@v4 + with: + name: e2e + path: e2e/bin/ + - run: docker load -i e2e/bin/e2e-deps.tar + - run: chmod +x e2e/bin/* + - name: download kots binary + uses: actions/download-artifact@v4 + with: + name: kots + path: bin/ + - run: chmod +x bin/* + - uses: ./.github/actions/kots-e2e + with: + test-focus: 'multi-app-install' + kots-namespace: 'multi-app-install' + k8s-distribution: ${{ matrix.cluster.distribution }} + k8s-version: ${{ matrix.cluster.version }} + testim-access-token: '${{ secrets.TESTIM_ACCESS_TOKEN }}' + testim-branch: ${{ github.head_ref == 'main' && 'master' || github.head_ref }} + aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' + aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' + replicated-api-token: '${{ secrets.C11Y_MATRIX_TOKEN }}' + kots-dockerhub-username: '${{ secrets.E2E_DOCKERHUB_USERNAME }}' + kots-dockerhub-password: '${{ secrets.E2E_DOCKERHUB_PASSWORD }}' + + + validate-airgap-smoke-test: + runs-on: ubuntu-20.04 + needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-e2e, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] + strategy: + fail-fast: false + matrix: + cluster: [ + {distribution: kind, version: v1.28.0} + ] + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: download e2e deps + uses: actions/download-artifact@v4 + with: + name: e2e + path: e2e/bin/ + - run: docker load -i e2e/bin/e2e-deps.tar + - run: chmod +x e2e/bin/* + - name: download kots binary + uses: actions/download-artifact@v4 + with: + name: kots + path: bin/ + - run: chmod +x bin/* + - uses: ./.github/actions/kots-e2e + with: + test-focus: 'airgap-smoke-test' + kots-namespace: 'airgap-smoke-test' + k8s-distribution: ${{ matrix.cluster.distribution }} + k8s-version: ${{ matrix.cluster.version }} + testim-access-token: '${{ secrets.TESTIM_ACCESS_TOKEN }}' + testim-branch: ${{ github.head_ref == 'main' && 'master' || github.head_ref }} + kots-airgap: true + aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' + aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' + replicated-api-token: '${{ secrets.C11Y_MATRIX_TOKEN }}' + kots-dockerhub-username: '${{ secrets.E2E_DOCKERHUB_USERNAME }}' + kots-dockerhub-password: '${{ secrets.E2E_DOCKERHUB_PASSWORD }}' + + + validate-remove-app: + runs-on: ubuntu-20.04 + needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] + strategy: + fail-fast: false + matrix: + cluster: [ + {distribution: kind, version: v1.28.0, instance-type: r1.medium} + ] + env: + APP_SLUG: remove-app + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Create Cluster + id: create-cluster + uses: replicatedhq/replicated-actions/create-cluster@v1 + with: + api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} + kubernetes-distribution: ${{ matrix.cluster.distribution }} + kubernetes-version: ${{ matrix.cluster.version }} + cluster-name: automated-kots-${{ github.run_id }}-${{ matrix.cluster.distribution }}-${{ matrix.cluster.version }} + timeout-minutes: '120' + ttl: 2h + instance-type: ${{ matrix.cluster.instance-type }} + export-kubeconfig: true + + - name: download kots binary + uses: actions/download-artifact@v4 + with: + name: kots + path: bin/ + + - run: chmod +x bin/kots + + - name: create namespace and dockerhub secret + run: | + kubectl create ns "$APP_SLUG" + kubectl create secret docker-registry kotsadm-dockerhub --docker-server index.docker.io --docker-username "${{ secrets.E2E_DOCKERHUB_USERNAME }}" --docker-password "${{ secrets.E2E_DOCKERHUB_PASSWORD }}" --namespace "$APP_SLUG" + + - name: run the test + run: | + set +e - # echo ${{ secrets.REMOVE_APP_LICENSE }} | base64 -d > license.yaml - # HELM_APP_NAMESPACES=(postgres-test redis-test "$APP_SLUG") - # APP_NAMESPACES=(postgres-test redis-test nginx-test rabbitmq-system "$APP_SLUG") - - # ./bin/kots \ - # install "$APP_SLUG/automated" \ - # --license-file license.yaml \ - # --no-port-forward \ - # --namespace "$APP_SLUG" \ - # --shared-password password \ - # --kotsadm-registry ttl.sh \ - # --kotsadm-namespace automated-${{ github.run_id }} \ - # --kotsadm-tag 24h - - # COUNTER=1 - # while [ "$(./bin/kots get apps --namespace "$APP_SLUG" | awk 'NR>1{print $2}')" != "ready" ]; do - # ((COUNTER += 1)) - # if [ $COUNTER -gt 120 ]; then - # echo "Timed out waiting for app to be ready" - # ./bin/kots get apps --namespace "$APP_SLUG" - # echo "kotsadm logs:" - # kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" - # exit 1 - # fi - # sleep 1 - # done - - # # test that --undeploy deletes application resources from the cluster - - # ./bin/kots remove "$APP_SLUG" --namespace "$APP_SLUG" --undeploy - - # if [ "$(./bin/kots get apps --namespace "$APP_SLUG" --output=json | tr -d '\n')" != "[]" ]; then - # printf "App reference was not removed\n\n" - # exit 1 - # fi - - # if [ "$(kubectl get all,secrets,configmap,pvc -A -l "kots.io/app-slug=$APP_SLUG" --ignore-not-found | wc -l | tr -d '\n')" != "0" ]; then - # printf "Application resources are still found in the cluster\n\n" - # kubectl get all,secrets,configmap,pvc -A -l "kots.io/app-slug=$APP_SLUG" - # exit 1 - # fi - - # if [ "$(kubectl get crd rabbitmqclusters.rabbitmq.com --ignore-not-found | wc -l | tr -d '\n')" != "0" ]; then - # printf "rabbitmqclusters.rabbitmq.com CRD was not removed\n\n" - # kubectl get crd rabbitmqclusters.rabbitmq.com - # exit 1 - # fi - - # for ns in "${HELM_APP_NAMESPACES[@]}"; do - # if [ "$(kubectl get secret -n "$ns" --field-selector type=helm.sh/release.v1 --ignore-not-found | wc -l | tr -d '\n')" != "0" ]; then - # printf "Application Helm release secrets are still found in the namepspace %s\n\n" "$ns" - # kubectl get secret -n "$ns" --field-selector type=helm.sh/release.v1 - # exit 1 - # fi - # done - - # # re-install and test that running the remove command without --undeploy does _not_ delete any resources from the cluster - - # ./bin/kots \ - # install "$APP_SLUG/automated" \ - # --license-file license.yaml \ - # --no-port-forward \ - # --namespace "$APP_SLUG" \ - # --shared-password password \ - # --kotsadm-registry ttl.sh \ - # --kotsadm-namespace automated-${{ github.run_id }} \ - # --kotsadm-tag 24h - - # COUNTER=1 - # while [ "$(./bin/kots get apps --namespace "$APP_SLUG" | awk 'NR>1{print $2}')" != "ready" ]; do - # ((COUNTER += 1)) - # if [ $COUNTER -gt 120 ]; then - # echo "Timed out waiting for app to be ready" - # ./bin/kots get apps --namespace "$APP_SLUG" - # echo "kotsadm logs:" - # kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" - # exit 1 - # fi - # sleep 1 - # done - - # ./bin/kots remove "$APP_SLUG" --namespace "$APP_SLUG" --force - - # if [ "$(./bin/kots get apps --namespace "$APP_SLUG" --output=json | tr -d '\n')" != "[]" ]; then - # printf "App reference was not removed\n\n" - # exit 1 - # fi - - # for ns in "${APP_NAMESPACES[@]}"; do - # if [ "$(kubectl get all,secrets,configmap,pvc -n "$ns" -l "kots.io/app-slug=$APP_SLUG" --ignore-not-found | wc -l | tr -d '\n')" == "0" ]; then - # printf "%s namespace does not contain any resources\n\n" "$ns" - # kubectl get all,secrets,configmap,pvc -n "$ns" - # exit 1 - # fi - # done - - # if [ "$(kubectl get crd rabbitmqclusters.rabbitmq.com --ignore-not-found | wc -l | tr -d '\n')" == "0" ]; then - # printf "rabbitmqclusters.rabbitmq.com CRD was not found\n\n" - # kubectl get crd rabbitmqclusters.rabbitmq.com - # exit 1 - # fi - - # if [ "$(kubectl get rabbitmqclusters.rabbitmq.com -n rabbitmq-system --ignore-not-found | wc -l | tr -d '\n')" == "0" ]; then - # printf "rabbitmq-system namespace does not contain rabbitmqclusters\n\n" - # kubectl get rabbitmqclusters.rabbitmq.com -n rabbitmq-system - # exit 1 - # fi - - # for ns in "${HELM_APP_NAMESPACES[@]}"; do - # if [ "$(kubectl get secret -n "$ns" --field-selector type=helm.sh/release.v1 --ignore-not-found | wc -l | tr -d '\n')" == "0" ]; then - # printf "Application Helm release secrets are not found in namepspace %s\n\n" "$ns" - # kubectl get secret -n "$ns" --field-selector type=helm.sh/release.v1 - # exit 1 - # fi - # done - - # - name: Generate support bundle on failure - # if: failure() - # uses: ./.github/actions/generate-support-bundle - # with: - # kots-namespace: "$APP_SLUG" - # aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' - # aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' - - # - name: Remove Cluster - # id: remove-cluster - # uses: replicatedhq/replicated-actions/remove-cluster@v1 - # if: ${{ always() && steps.create-cluster.outputs.cluster-id != '' }} - # continue-on-error: true - # with: - # api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} - # cluster-id: ${{ steps.create-cluster.outputs.cluster-id }} - - - # validate-registry-check: - # runs-on: ubuntu-20.04 - # needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] - # strategy: - # fail-fast: false - # matrix: - # cluster: [ - # {distribution: kind, version: v1.28.0} - # ] - # steps: - # - name: Checkout - # uses: actions/checkout@v4 - - # - name: Create Cluster - # id: create-cluster - # uses: replicatedhq/replicated-actions/create-cluster@v1 - # with: - # api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} - # kubernetes-distribution: ${{ matrix.cluster.distribution }} - # kubernetes-version: ${{ matrix.cluster.version }} - # cluster-name: automated-kots-${{ github.run_id }}-${{ matrix.cluster.distribution }}-${{ matrix.cluster.version }} - # timeout-minutes: '120' - # ttl: 2h - # export-kubeconfig: true - - # - name: download kots binary - # uses: actions/download-artifact@v4 - # with: - # name: kots - # path: bin/ - - # - run: chmod +x bin/kots - - # - name: run the test - # run: | - # set +e - - # ./bin/kots admin-console push-images \ - # not-an-app.airgap localhost:1234/not-a-namespace \ - # --registry-username not-a-username \ - # --registry-password not-a-password > output.txt 2>&1 - - # if ! grep -q 'Failed to test access' output.txt; then - # printf "Expected registry validation to fail before pushing images, but did not.\n\n" - # cat output.txt - # exit 1 - # fi - - # rm output.txt - - # ./bin/kots install not-an-app \ - # --airgap-bundle not-an-app.airgap \ - # --kotsadm-registry localhost:1234/not-a-namespace \ - # --registry-username not-a-username \ - # --registry-password not-a-password > output.txt 2>&1 - - # if ! grep -q 'Failed to test access' output.txt; then - # printf "Expected registry validation to fail before installation, but did not.\n\n" - # cat output.txt - # exit 1 - # fi - - # rm output.txt - - # ./bin/kots upstream upgrade not-an-app \ - # --airgap-bundle not-an-app.airgap \ - # --kotsadm-registry localhost:1234/not-a-namespace \ - # --registry-username not-a-username \ - # --registry-password not-a-password \ - # --namespace not-a-namespace > output.txt 2>&1 - - # if ! grep -q 'Failed to test access' output.txt; then - # printf "Expected registry validation to fail before upgrading, but did not.\n\n" - # cat output.txt - # exit 1 - # fi - - # - name: Generate support bundle on failure - # if: failure() - # uses: ./.github/actions/generate-support-bundle - # with: - # kots-namespace: "$APP_SLUG" - # aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' - # aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' - - # - name: Remove Cluster - # id: remove-cluster - # uses: replicatedhq/replicated-actions/remove-cluster@v1 - # if: ${{ always() && steps.create-cluster.outputs.cluster-id != '' }} - # continue-on-error: true - # with: - # api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} - # cluster-id: ${{ steps.create-cluster.outputs.cluster-id }} - - - # validate-native-helm-v2: - # runs-on: ubuntu-20.04 - # needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] - # strategy: - # fail-fast: false - # matrix: - # cluster: [ - # {distribution: kind, version: v1.28.0} - # ] - # env: - # APP_SLUG: native-helm-v2 - # INITIAL_VERSION: '0.1.1' - # UPGRADE_VERSION: '0.2.1' - # steps: - # - name: Checkout - # uses: actions/checkout@v4 - - # - uses: azure/setup-helm@v3 - # with: - # token: ${{ secrets.GITHUB_TOKEN }} - - # - name: Create Cluster - # id: create-cluster - # uses: replicatedhq/replicated-actions/create-cluster@v1 - # with: - # api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} - # kubernetes-distribution: ${{ matrix.cluster.distribution }} - # kubernetes-version: ${{ matrix.cluster.version }} - # cluster-name: automated-kots-${{ github.run_id }}-${{ matrix.cluster.distribution }}-${{ matrix.cluster.version }} - # timeout-minutes: '120' - # ttl: 2h - # export-kubeconfig: true - - # - name: download kots binary - # uses: actions/download-artifact@v4 - # with: - # name: kots - # path: bin/ - - # - run: chmod +x bin/kots - - # - name: create namespace and dockerhub secret - # run: | - # kubectl create ns "$APP_SLUG" - # kubectl create secret docker-registry kotsadm-dockerhub --docker-server index.docker.io --docker-username "${{ secrets.E2E_DOCKERHUB_USERNAME }}" --docker-password "${{ secrets.E2E_DOCKERHUB_PASSWORD }}" --namespace "$APP_SLUG" - - # - name: run the test - # run: | - # set +e - # echo ${{ secrets.NATIVE_HELM_V2_LICENSE }} | base64 -d > license.yaml - # ./bin/kots \ - # install "$APP_SLUG/automated" \ - # --license-file license.yaml \ - # --app-version-label "$INITIAL_VERSION" \ - # --no-port-forward \ - # --namespace "$APP_SLUG" \ - # --shared-password password \ - # --kotsadm-registry ttl.sh \ - # --kotsadm-namespace automated-${{ github.run_id }} \ - # --kotsadm-tag 24h - - # EXIT_CODE=$? - # if [ $EXIT_CODE -ne 0 ]; then - # echo "------pods:" - # kubectl -n "$APP_SLUG" get pods - # echo "------kotsadm logs" - # kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" - # exit $EXIT_CODE - # fi - - # COUNTER=1 - # while [ "$(./bin/kots get apps --namespace "$APP_SLUG" | awk 'NR>1{print $3}')" != "$INITIAL_VERSION" ]; do - # ((COUNTER += 1)) - # if [ $COUNTER -gt 120 ]; then - # echo "Timed out waiting for app to be ready" - # ./bin/kots get apps --namespace "$APP_SLUG" - # echo "kotsadm logs:" - # kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" - # exit 1 - # fi - # sleep 1 - # done - - # COUNTER=1 - # while [ "$(./bin/kots get apps --namespace "$APP_SLUG" | awk 'NR>1{print $2}')" != "ready" ]; do - # ((COUNTER += 1)) - # if [ $COUNTER -gt 120 ]; then - # echo "Timed out waiting for app to be ready" - # ./bin/kots get apps --namespace "$APP_SLUG" - # echo "kotsadm logs:" - # kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" - # exit 1 - # fi - # sleep 1 - # done - - # # validate that helm charts installed using both native helm workflow were deployed via the helm CLI correctly - - # if ! helm ls -n "$APP_SLUG" | awk 'NR>1{print $1}' | grep -q my-chart-release; then - # printf "my-chart-release helm release not found in %s namespace\n\n" "$APP_SLUG" - # helm ls -n "$APP_SLUG" - # exit 1 - # fi - - # if ! helm ls -n "$APP_SLUG" | awk 'NR>1{print $1}' | grep -q my-other-chart-release; then - # printf "my-other-chart-release helm release not found in %s namespace\n\n" "$APP_SLUG" - # helm ls -n "$APP_SLUG" - # exit 1 - # fi - - # # validate that `helm get values` works for the v1beta2 chart - - # if ! helm get values my-chart-release -n "$APP_SLUG" | grep -q my-value; then - # printf "my-value not found in helm values for my-chart-release in %s namespace\n\n" "$APP_SLUG" - # helm get values my-chart-release -n "$APP_SLUG" - # exit 1 - # fi - - # # upgrade the app version - - # ./bin/kots upstream upgrade "$APP_SLUG" -n "$APP_SLUG" --deploy - - # # wait for my-chart-release to be uninstalled and my-other-chart-release to be upgraded to revision 2 + echo ${{ secrets.REMOVE_APP_LICENSE }} | base64 -d > license.yaml + HELM_APP_NAMESPACES=(postgres-test redis-test "$APP_SLUG") + APP_NAMESPACES=(postgres-test redis-test nginx-test rabbitmq-system "$APP_SLUG") + + ./bin/kots \ + install "$APP_SLUG/automated" \ + --license-file license.yaml \ + --no-port-forward \ + --namespace "$APP_SLUG" \ + --shared-password password \ + --kotsadm-registry ttl.sh \ + --kotsadm-namespace automated-${{ github.run_id }} \ + --kotsadm-tag 24h + + COUNTER=1 + while [ "$(./bin/kots get apps --namespace "$APP_SLUG" | awk 'NR>1{print $2}')" != "ready" ]; do + ((COUNTER += 1)) + if [ $COUNTER -gt 120 ]; then + echo "Timed out waiting for app to be ready" + ./bin/kots get apps --namespace "$APP_SLUG" + echo "kotsadm logs:" + kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" + exit 1 + fi + sleep 1 + done + + # test that --undeploy deletes application resources from the cluster + + ./bin/kots remove "$APP_SLUG" --namespace "$APP_SLUG" --undeploy + + if [ "$(./bin/kots get apps --namespace "$APP_SLUG" --output=json | tr -d '\n')" != "[]" ]; then + printf "App reference was not removed\n\n" + exit 1 + fi + + if [ "$(kubectl get all,secrets,configmap,pvc -A -l "kots.io/app-slug=$APP_SLUG" --ignore-not-found | wc -l | tr -d '\n')" != "0" ]; then + printf "Application resources are still found in the cluster\n\n" + kubectl get all,secrets,configmap,pvc -A -l "kots.io/app-slug=$APP_SLUG" + exit 1 + fi + + if [ "$(kubectl get crd rabbitmqclusters.rabbitmq.com --ignore-not-found | wc -l | tr -d '\n')" != "0" ]; then + printf "rabbitmqclusters.rabbitmq.com CRD was not removed\n\n" + kubectl get crd rabbitmqclusters.rabbitmq.com + exit 1 + fi + + for ns in "${HELM_APP_NAMESPACES[@]}"; do + if [ "$(kubectl get secret -n "$ns" --field-selector type=helm.sh/release.v1 --ignore-not-found | wc -l | tr -d '\n')" != "0" ]; then + printf "Application Helm release secrets are still found in the namepspace %s\n\n" "$ns" + kubectl get secret -n "$ns" --field-selector type=helm.sh/release.v1 + exit 1 + fi + done + + # re-install and test that running the remove command without --undeploy does _not_ delete any resources from the cluster + + ./bin/kots \ + install "$APP_SLUG/automated" \ + --license-file license.yaml \ + --no-port-forward \ + --namespace "$APP_SLUG" \ + --shared-password password \ + --kotsadm-registry ttl.sh \ + --kotsadm-namespace automated-${{ github.run_id }} \ + --kotsadm-tag 24h + + COUNTER=1 + while [ "$(./bin/kots get apps --namespace "$APP_SLUG" | awk 'NR>1{print $2}')" != "ready" ]; do + ((COUNTER += 1)) + if [ $COUNTER -gt 120 ]; then + echo "Timed out waiting for app to be ready" + ./bin/kots get apps --namespace "$APP_SLUG" + echo "kotsadm logs:" + kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" + exit 1 + fi + sleep 1 + done + + ./bin/kots remove "$APP_SLUG" --namespace "$APP_SLUG" --force + + if [ "$(./bin/kots get apps --namespace "$APP_SLUG" --output=json | tr -d '\n')" != "[]" ]; then + printf "App reference was not removed\n\n" + exit 1 + fi + + for ns in "${APP_NAMESPACES[@]}"; do + if [ "$(kubectl get all,secrets,configmap,pvc -n "$ns" -l "kots.io/app-slug=$APP_SLUG" --ignore-not-found | wc -l | tr -d '\n')" == "0" ]; then + printf "%s namespace does not contain any resources\n\n" "$ns" + kubectl get all,secrets,configmap,pvc -n "$ns" + exit 1 + fi + done + + if [ "$(kubectl get crd rabbitmqclusters.rabbitmq.com --ignore-not-found | wc -l | tr -d '\n')" == "0" ]; then + printf "rabbitmqclusters.rabbitmq.com CRD was not found\n\n" + kubectl get crd rabbitmqclusters.rabbitmq.com + exit 1 + fi + + if [ "$(kubectl get rabbitmqclusters.rabbitmq.com -n rabbitmq-system --ignore-not-found | wc -l | tr -d '\n')" == "0" ]; then + printf "rabbitmq-system namespace does not contain rabbitmqclusters\n\n" + kubectl get rabbitmqclusters.rabbitmq.com -n rabbitmq-system + exit 1 + fi + + for ns in "${HELM_APP_NAMESPACES[@]}"; do + if [ "$(kubectl get secret -n "$ns" --field-selector type=helm.sh/release.v1 --ignore-not-found | wc -l | tr -d '\n')" == "0" ]; then + printf "Application Helm release secrets are not found in namepspace %s\n\n" "$ns" + kubectl get secret -n "$ns" --field-selector type=helm.sh/release.v1 + exit 1 + fi + done + + - name: Generate support bundle on failure + if: failure() + uses: ./.github/actions/generate-support-bundle + with: + kots-namespace: "$APP_SLUG" + aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' + aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' + + - name: Remove Cluster + id: remove-cluster + uses: replicatedhq/replicated-actions/remove-cluster@v1 + if: ${{ always() && steps.create-cluster.outputs.cluster-id != '' }} + continue-on-error: true + with: + api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} + cluster-id: ${{ steps.create-cluster.outputs.cluster-id }} + + + validate-registry-check: + runs-on: ubuntu-20.04 + needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] + strategy: + fail-fast: false + matrix: + cluster: [ + {distribution: kind, version: v1.28.0} + ] + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Create Cluster + id: create-cluster + uses: replicatedhq/replicated-actions/create-cluster@v1 + with: + api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} + kubernetes-distribution: ${{ matrix.cluster.distribution }} + kubernetes-version: ${{ matrix.cluster.version }} + cluster-name: automated-kots-${{ github.run_id }}-${{ matrix.cluster.distribution }}-${{ matrix.cluster.version }} + timeout-minutes: '120' + ttl: 2h + export-kubeconfig: true + + - name: download kots binary + uses: actions/download-artifact@v4 + with: + name: kots + path: bin/ + + - run: chmod +x bin/kots + + - name: run the test + run: | + set +e + + ./bin/kots admin-console push-images \ + not-an-app.airgap localhost:1234/not-a-namespace \ + --registry-username not-a-username \ + --registry-password not-a-password > output.txt 2>&1 + + if ! grep -q 'Failed to test access' output.txt; then + printf "Expected registry validation to fail before pushing images, but did not.\n\n" + cat output.txt + exit 1 + fi + + rm output.txt + + ./bin/kots install not-an-app \ + --airgap-bundle not-an-app.airgap \ + --kotsadm-registry localhost:1234/not-a-namespace \ + --registry-username not-a-username \ + --registry-password not-a-password > output.txt 2>&1 + + if ! grep -q 'Failed to test access' output.txt; then + printf "Expected registry validation to fail before installation, but did not.\n\n" + cat output.txt + exit 1 + fi + + rm output.txt + + ./bin/kots upstream upgrade not-an-app \ + --airgap-bundle not-an-app.airgap \ + --kotsadm-registry localhost:1234/not-a-namespace \ + --registry-username not-a-username \ + --registry-password not-a-password \ + --namespace not-a-namespace > output.txt 2>&1 + + if ! grep -q 'Failed to test access' output.txt; then + printf "Expected registry validation to fail before upgrading, but did not.\n\n" + cat output.txt + exit 1 + fi + + - name: Generate support bundle on failure + if: failure() + uses: ./.github/actions/generate-support-bundle + with: + kots-namespace: "$APP_SLUG" + aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' + aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' + + - name: Remove Cluster + id: remove-cluster + uses: replicatedhq/replicated-actions/remove-cluster@v1 + if: ${{ always() && steps.create-cluster.outputs.cluster-id != '' }} + continue-on-error: true + with: + api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} + cluster-id: ${{ steps.create-cluster.outputs.cluster-id }} + + + validate-native-helm-v2: + runs-on: ubuntu-20.04 + needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] + strategy: + fail-fast: false + matrix: + cluster: [ + {distribution: kind, version: v1.28.0} + ] + env: + APP_SLUG: native-helm-v2 + INITIAL_VERSION: '0.1.1' + UPGRADE_VERSION: '0.2.1' + steps: + - name: Checkout + uses: actions/checkout@v4 + + - uses: azure/setup-helm@v3 + with: + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Create Cluster + id: create-cluster + uses: replicatedhq/replicated-actions/create-cluster@v1 + with: + api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} + kubernetes-distribution: ${{ matrix.cluster.distribution }} + kubernetes-version: ${{ matrix.cluster.version }} + cluster-name: automated-kots-${{ github.run_id }}-${{ matrix.cluster.distribution }}-${{ matrix.cluster.version }} + timeout-minutes: '120' + ttl: 2h + export-kubeconfig: true + + - name: download kots binary + uses: actions/download-artifact@v4 + with: + name: kots + path: bin/ + + - run: chmod +x bin/kots + + - name: create namespace and dockerhub secret + run: | + kubectl create ns "$APP_SLUG" + kubectl create secret docker-registry kotsadm-dockerhub --docker-server index.docker.io --docker-username "${{ secrets.E2E_DOCKERHUB_USERNAME }}" --docker-password "${{ secrets.E2E_DOCKERHUB_PASSWORD }}" --namespace "$APP_SLUG" + + - name: run the test + run: | + set +e + echo ${{ secrets.NATIVE_HELM_V2_LICENSE }} | base64 -d > license.yaml + ./bin/kots \ + install "$APP_SLUG/automated" \ + --license-file license.yaml \ + --app-version-label "$INITIAL_VERSION" \ + --no-port-forward \ + --namespace "$APP_SLUG" \ + --shared-password password \ + --kotsadm-registry ttl.sh \ + --kotsadm-namespace automated-${{ github.run_id }} \ + --kotsadm-tag 24h + + EXIT_CODE=$? + if [ $EXIT_CODE -ne 0 ]; then + echo "------pods:" + kubectl -n "$APP_SLUG" get pods + echo "------kotsadm logs" + kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" + exit $EXIT_CODE + fi + + COUNTER=1 + while [ "$(./bin/kots get apps --namespace "$APP_SLUG" | awk 'NR>1{print $3}')" != "$INITIAL_VERSION" ]; do + ((COUNTER += 1)) + if [ $COUNTER -gt 120 ]; then + echo "Timed out waiting for app to be ready" + ./bin/kots get apps --namespace "$APP_SLUG" + echo "kotsadm logs:" + kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" + exit 1 + fi + sleep 1 + done + + COUNTER=1 + while [ "$(./bin/kots get apps --namespace "$APP_SLUG" | awk 'NR>1{print $2}')" != "ready" ]; do + ((COUNTER += 1)) + if [ $COUNTER -gt 120 ]; then + echo "Timed out waiting for app to be ready" + ./bin/kots get apps --namespace "$APP_SLUG" + echo "kotsadm logs:" + kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" + exit 1 + fi + sleep 1 + done + + # validate that helm charts installed using both native helm workflow were deployed via the helm CLI correctly + + if ! helm ls -n "$APP_SLUG" | awk 'NR>1{print $1}' | grep -q my-chart-release; then + printf "my-chart-release helm release not found in %s namespace\n\n" "$APP_SLUG" + helm ls -n "$APP_SLUG" + exit 1 + fi + + if ! helm ls -n "$APP_SLUG" | awk 'NR>1{print $1}' | grep -q my-other-chart-release; then + printf "my-other-chart-release helm release not found in %s namespace\n\n" "$APP_SLUG" + helm ls -n "$APP_SLUG" + exit 1 + fi + + # validate that `helm get values` works for the v1beta2 chart + + if ! helm get values my-chart-release -n "$APP_SLUG" | grep -q my-value; then + printf "my-value not found in helm values for my-chart-release in %s namespace\n\n" "$APP_SLUG" + helm get values my-chart-release -n "$APP_SLUG" + exit 1 + fi + + # upgrade the app version + + ./bin/kots upstream upgrade "$APP_SLUG" -n "$APP_SLUG" --deploy + + # wait for my-chart-release to be uninstalled and my-other-chart-release to be upgraded to revision 2 - # COUNTER=1 - # while [ "$(helm ls -n "$APP_SLUG" | awk 'NR>1{print $1}' | grep my-chart-release)" != "" ]; do - # ((COUNTER += 1)) - # if [ $COUNTER -gt 120 ]; then - # echo "Timed out waiting for my-chart-release to be uninstalled" - # helm ls -n "$APP_SLUG" - # exit 1 - # fi - # sleep 1 - # done - - # COUNTER=1 - # while [ "$(helm ls -n "$APP_SLUG" | grep my-other-chart-release | awk 'NR>0{print $3}')" != "2" ]; do - # ((COUNTER += 1)) - # if [ $COUNTER -gt 120 ]; then - # echo "Timed out waiting for my-other-chart-release to be upgraded to revision 2" - # helm ls -n "$APP_SLUG" - # exit 1 - # fi - # sleep 1 - # done - - # # validate that `helm get values` works for the upgraded v1beta2 chart - # if ! helm get values my-other-chart-release -n "$APP_SLUG" | grep -q my-value; then - # printf "my-value not found in helm values for my-chart-release in %s namespace\n\n" "$APP_SLUG" - # helm get values my-chart-release -n "$APP_SLUG" - # exit 1 - # fi - - # # wait for the app to be ready again - - # COUNTER=1 - # while [ "$(./bin/kots get apps --namespace "$APP_SLUG" | awk 'NR>1{print $3}')" != "$UPGRADE_VERSION" ]; do - # ((COUNTER += 1)) - # if [ $COUNTER -gt 120 ]; then - # echo "Timed out waiting for app to be ready" - # ./bin/kots get apps --namespace "$APP_SLUG" - # echo "kotsadm logs:" - # kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" - # exit 1 - # fi - # sleep 1 - # done - - # COUNTER=1 - # while [ "$(./bin/kots get apps --namespace "$APP_SLUG" | awk 'NR>1{print $2}')" != "ready" ]; do - # ((COUNTER += 1)) - # if [ $COUNTER -gt 120 ]; then - # echo "Timed out waiting for app to be ready" - # ./bin/kots get apps --namespace "$APP_SLUG" - # echo "kotsadm logs:" - # kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" - # exit 1 - # fi - # sleep 1 - # done - - # # validate that the conditional chart is installed - # COUNTER=1 - # while [ "$(helm ls -n "$APP_SLUG" | awk 'NR>1{print $1}' | grep my-conditional-chart-release)" == "" ]; do - # ((COUNTER += 1)) - # if [ $COUNTER -gt 120 ]; then - # echo "Timed out waiting for my-conditional-chart-release to be installed" - # helm ls -n "$APP_SLUG" - # exit 1 - # fi - # sleep 1 - # done - - # # toggle the config option to exclude the conditional chart - # ./bin/kots set config "$APP_SLUG" install_conditional_chart=0 --deploy --namespace "$APP_SLUG" - - # # wait for my-conditional-chart-release to be uninstalled - # COUNTER=1 - # while [ "$(helm ls -n "$APP_SLUG" | awk 'NR>1{print $1}' | grep my-conditional-chart-release)" != "" ]; do - # ((COUNTER += 1)) - # if [ $COUNTER -gt 120 ]; then - # echo "Timed out waiting for my-conditional-chart-release to be uninstalled" - # helm ls -n "$APP_SLUG" - # exit 1 - # fi - # sleep 1 - # done - - # - name: Generate support bundle on failure - # if: failure() - # uses: ./.github/actions/generate-support-bundle - # with: - # kots-namespace: "$APP_SLUG" - # aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' - # aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' - - # - name: Remove Cluster - # id: remove-cluster - # uses: replicatedhq/replicated-actions/remove-cluster@v1 - # if: ${{ always() && steps.create-cluster.outputs.cluster-id != '' }} - # continue-on-error: true - # with: - # api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} - # cluster-id: ${{ steps.create-cluster.outputs.cluster-id }} - - - # validate-deployment-orchestration: - # runs-on: ubuntu-20.04 - # needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] - # strategy: - # fail-fast: false - # matrix: - # cluster: [ - # {distribution: kind, version: v1.28.0} - # ] - # env: - # APP_SLUG: deployment-orchestration - # steps: - # - name: Checkout - # uses: actions/checkout@v4 - - # - uses: azure/setup-helm@v3 - # with: - # token: ${{ secrets.GITHUB_TOKEN }} - - # - name: Create Cluster - # id: create-cluster - # uses: replicatedhq/replicated-actions/create-cluster@v1 - # with: - # api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} - # kubernetes-distribution: ${{ matrix.cluster.distribution }} - # kubernetes-version: ${{ matrix.cluster.version }} - # cluster-name: automated-kots-${{ github.run_id }}-${{ matrix.cluster.distribution }}-${{ matrix.cluster.version }} - # timeout-minutes: '120' - # ttl: 2h - # export-kubeconfig: true - - # - name: download kots binary - # uses: actions/download-artifact@v4 - # with: - # name: kots - # path: bin/ - - # - run: chmod +x bin/kots - - # - name: create namespace and dockerhub secret - # run: | - # kubectl create ns "$APP_SLUG" - # kubectl create secret docker-registry kotsadm-dockerhub --docker-server index.docker.io --docker-username "${{ secrets.E2E_DOCKERHUB_USERNAME }}" --docker-password "${{ secrets.E2E_DOCKERHUB_PASSWORD }}" --namespace "$APP_SLUG" - - # - name: run the test - # run: | - # set +e - # echo ${{ secrets.DEPLOYMENT_ORCHESTRATION_LICENSE }} | base64 -d > license.yaml - # ./bin/kots \ - # install "$APP_SLUG/automated" \ - # --license-file license.yaml \ - # --no-port-forward \ - # --namespace "$APP_SLUG" \ - # --shared-password password \ - # --kotsadm-registry ttl.sh \ - # --kotsadm-namespace automated-${{ github.run_id }} \ - # --kotsadm-tag 24h - - # EXIT_CODE=$? - # if [ $EXIT_CODE -ne 0 ]; then - # echo "------pods:" - # kubectl -n "$APP_SLUG" get pods - # echo "------kotsadm logs" - # kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" - # exit $EXIT_CODE - # fi - - # function wait_for_log { - # local counter=1 - # local timeout=30 - # local log_pattern="$1" - - # while ! kubectl logs deploy/kotsadm -c kotsadm -n "$APP_SLUG" | grep -i "$log_pattern"; do - # ((counter += 1)) - # if [ $counter -gt $timeout ]; then - # echo "Timed out waiting for log $log_pattern" - # exit 1 - # fi - # sleep 1 - # done - # } - - # wait_for_log "applying phase -9999" - - # wait_for_log "applying phase -3" - # wait_for_log "waiting for resource apiextensions.k8s.io/v1/CustomResourceDefinition/myresources.example.com in namespace $APP_SLUG to be ready" - - # wait_for_log "applying phase -2" - - # # validate that phase -1 has not deployed yet since we're waiting on the CR status fields - # if kubectl logs deploy/kotsadm -c kotsadm -n "$APP_SLUG" | grep -i "applying phase -1"; then - # printf "phase -1 was deployed before phase -2 completed" - # kubectl logs deploy/kotsadm -c kotsadm -n "$APP_SLUG" - # exit 1 - # fi - - # wait_for_log "waiting for resource example.com/v1beta1/MyResource/my-resource in namespace $APP_SLUG to have property .status.tasks.extract=true" - - # # set .status.tasks.extract=true - # kubectl patch myresources.example.com my-resource -n "$APP_SLUG" -p '{"status": {"tasks": {"extract": true}}}' --type=merge - - # wait_for_log "waiting for resource example.com/v1beta1/MyResource/my-resource in namespace $APP_SLUG to have property .status.tasks.transform=true" - - # # set .status.tasks.transform=true - # kubectl patch myresources.example.com my-resource -n "$APP_SLUG" -p '{"status": {"tasks": {"transform": true}}}' --type=merge - - # wait_for_log "waiting for resource example.com/v1beta1/MyResource/my-resource in namespace $APP_SLUG to have property .status.tasks.load=true" - - # # set .status.tasks.load=true - # kubectl patch myresources.example.com my-resource -n "$APP_SLUG" -p '{"status": {"tasks": {"load": true}}}' --type=merge - - # # now validate that the remaining phases are deployed (-1, 0, and 1) - # wait_for_log "applying phase -1" - # wait_for_log "waiting for resource apps/v1/Deployment/nginx-1 in namespace $APP_SLUG to be ready" - - # wait_for_log "applying phase 0" - # wait_for_log "waiting for resource apps/v1/Deployment/nginx-2 in namespace $APP_SLUG to be ready" - - # wait_for_log "applying phase 1" - # wait_for_log "waiting for resource /v1/Service/nginx-2 in namespace $APP_SLUG to be ready" - - # # wait for the app to be ready - # COUNTER=1 - # while [ "$(./bin/kots get apps --namespace "$APP_SLUG" | awk 'NR>1{print $2}')" != "ready" ]; do - # ((COUNTER += 1)) - # if [ $COUNTER -gt 120 ]; then - # echo "Timed out waiting for app to be ready" - # ./bin/kots get apps --namespace "$APP_SLUG" - # echo "kotsadm logs:" - # kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" - # exit 1 - # fi - # sleep 1 - # done - - # # remove the app - # ./bin/kots remove "$APP_SLUG" -n "$APP_SLUG" --undeploy - - # wait_for_log "deleting resources in phase -1" - # wait_for_log "deleting resources in phase 0" - # wait_for_log "deleting resources in phase 1" - # wait_for_log "deleting resources in phase 2" - # wait_for_log "deleting resources in phase 3" - - # # validate that the app reference was removed - # if [ "$(./bin/kots get apps --namespace "$APP_SLUG" --output=json | tr -d '\n')" != "[]" ]; then - # printf "App reference was not removed\n\n" - # exit 1 - # fi - - # - name: Generate support bundle on failure - # if: failure() - # uses: ./.github/actions/generate-support-bundle - # with: - # kots-namespace: "$APP_SLUG" - # aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' - # aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' - - # - name: Remove Cluster - # id: remove-cluster - # uses: replicatedhq/replicated-actions/remove-cluster@v1 - # if: ${{ always() && steps.create-cluster.outputs.cluster-id != '' }} - # continue-on-error: true - # with: - # api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} - # cluster-id: ${{ steps.create-cluster.outputs.cluster-id }} - - - # validate-replicated-sdk: - # runs-on: ubuntu-20.04 - # needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] - # strategy: - # fail-fast: false - # matrix: - # cluster: [ - # {distribution: kind, version: v1.28.0}, - # {distribution: openshift, version: 4.13.0-okd} - # ] - # env: - # KOTS_NAMESPACE: replicated-sdk - # steps: - # - name: Checkout - # uses: actions/checkout@v4 - - # - name: Create Cluster - # id: create-cluster - # uses: replicatedhq/replicated-actions/create-cluster@v1 - # with: - # api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} - # kubernetes-distribution: ${{ matrix.cluster.distribution }} - # kubernetes-version: ${{ matrix.cluster.version }} - # cluster-name: automated-kots-${{ github.run_id }}-${{ matrix.cluster.distribution }}-${{ matrix.cluster.version }} - # timeout-minutes: '120' - # ttl: 2h - # export-kubeconfig: true - - # - name: download kots binary - # uses: actions/download-artifact@v4 - # with: - # name: kots - # path: bin/ - - # - run: chmod +x bin/kots - - # - name: create namespace and dockerhub secret - # run: | - # kubectl create ns "$KOTS_NAMESPACE" - # kubectl create secret docker-registry kotsadm-dockerhub --docker-server index.docker.io --docker-username "${{ secrets.E2E_DOCKERHUB_USERNAME }}" --docker-password "${{ secrets.E2E_DOCKERHUB_PASSWORD }}" --namespace "$KOTS_NAMESPACE" - - # - name: run upgrade-to-replicated-sdk test - # id: upgrade-to-replicated-sdk - # env: - # APP_SLUG: upgrade-to-replicated-sdk - # run: | - # set +e - - # echo ${{ secrets.UPGRADE_TO_REPLICATED_SDK_LICENSE }} | base64 -d > license.yaml - # LICENSE_ID="$(grep -oP 'licenseID:\s*\K\w+' license.yaml)" - # echo "license-id=$LICENSE_ID" >> "$GITHUB_OUTPUT" - - # ./bin/kots \ - # install "$APP_SLUG/automated" \ - # --license-file license.yaml \ - # --no-port-forward \ - # --namespace "$KOTS_NAMESPACE" \ - # --shared-password password \ - # --app-version-label v1.0.0 \ - # --kotsadm-registry ttl.sh \ - # --kotsadm-namespace automated-${{ github.run_id }} \ - # --kotsadm-tag 24h - - # EXIT_CODE=$? - # if [ $EXIT_CODE -ne 0 ]; then - # echo "------pods:" - # kubectl -n "$KOTS_NAMESPACE" get pods - # echo "------kotsadm logs" - # kubectl logs -l app=kotsadm --tail=100 --namespace "$KOTS_NAMESPACE" - # exit $EXIT_CODE - # fi - - # # wait for the app to be ready - # COUNTER=1 - # while [ "$(./bin/kots get apps --namespace "$KOTS_NAMESPACE" | awk 'NR>1{print $2}')" != "ready" ]; do - # ((COUNTER += 1)) - # if [ $COUNTER -gt 120 ]; then - # echo "Timed out waiting for app to be ready" - # ./bin/kots get apps --namespace "$KOTS_NAMESPACE" - # echo "kotsadm logs:" - # kubectl logs -l app=kotsadm --tail=100 --namespace "$KOTS_NAMESPACE" - # exit 1 - # fi - # sleep 1 - # done - - # # upgrade the app to the new version - # ./bin/kots upstream upgrade "$APP_SLUG" -n "$KOTS_NAMESPACE" --deploy - # sleep 5 - - # # wait for the app to be ready again - # COUNTER=1 - # while [ "$(./bin/kots get apps --namespace "$KOTS_NAMESPACE" | awk 'NR>1{print $2}')" != "ready" ]; do - # ((COUNTER += 1)) - # if [ $COUNTER -gt 120 ]; then - # echo "Timed out waiting for app to be ready" - # ./bin/kots get apps --namespace "$KOTS_NAMESPACE" - # echo "kotsadm logs:" - # kubectl logs -l app=kotsadm --tail=100 --namespace "$KOTS_NAMESPACE" - # exit 1 - # fi - # sleep 1 - # done - - # # get the version of the replicated-sdk that is running and set it as an output - # REPLICATED_SDK_VERSION=$(kubectl get deploy replicated -n "$KOTS_NAMESPACE" -o jsonpath='{.spec.template.spec.containers[0].image}' | grep -oE '[^:]+$') - # echo "replicated-sdk-version=$REPLICATED_SDK_VERSION" >> "$GITHUB_OUTPUT" - - # - run: rm -rf ./replicated-sdk - # - name: Checkout replicated-sdk - # uses: actions/checkout@v4 - # with: - # repository: replicatedhq/replicated-sdk - # path: replicated-sdk - # ref: ${{ steps.upgrade-to-replicated-sdk.outputs.replicated-sdk-version }} - - # - name: Validate endpoints - # uses: ./replicated-sdk/.github/actions/validate-endpoints - # with: - # license-id: ${{ steps.upgrade-to-replicated-sdk.outputs.license-id }} - # license-fields: '[{"name":"expires_at","value": ""}]' - # integration-enabled: true - # namespace: "$KOTS_NAMESPACE" - - # - name: remove upgrade-to-replicated-sdk app - # env: - # APP_SLUG: upgrade-to-replicated-sdk - # run: | - # # remove the app - # ./bin/kots remove "$APP_SLUG" -n "$KOTS_NAMESPACE" --undeploy - - # # validate that the app reference was removed - # if [ "$(./bin/kots get apps --namespace "$KOTS_NAMESPACE" --output=json | tr -d '\n')" != "[]" ]; then - # printf "App reference was not removed\n\n" - # exit 1 - # fi - - # - name: run replicated-sdk-subchart-native-helm-v1 test - # id: replicated-sdk-subchart-native-helm-v1 - # env: - # APP_SLUG: replicated-sdk-subchart-native-helm-v1 - # run: | - # set +e - - # echo ${{ secrets.REPLICATED_SDK_SUBCHART_NATIVE_HELM_V1 }} | base64 -d > license.yaml - # LICENSE_ID="$(grep -oP 'licenseID:\s*\K\w+' license.yaml)" - # echo "license-id=$LICENSE_ID" >> "$GITHUB_OUTPUT" - - # ./bin/kots \ - # install "$APP_SLUG/automated" \ - # --license-file license.yaml \ - # --no-port-forward \ - # --namespace "$KOTS_NAMESPACE" \ - # --shared-password password \ - # --kotsadm-registry ttl.sh \ - # --kotsadm-namespace automated-${{ github.run_id }} \ - # --kotsadm-tag 24h - - # EXIT_CODE=$? - # if [ $EXIT_CODE -ne 0 ]; then - # echo "------pods:" - # kubectl -n "$KOTS_NAMESPACE" get pods - # echo "------kotsadm logs" - # kubectl logs -l app=kotsadm --tail=100 --namespace "$KOTS_NAMESPACE" - # exit $EXIT_CODE - # fi - - # # wait for the app to be ready - # COUNTER=1 - # while [ "$(./bin/kots get apps --namespace "$KOTS_NAMESPACE" | awk 'NR>1{print $2}')" != "ready" ]; do - # ((COUNTER += 1)) - # if [ $COUNTER -gt 120 ]; then - # echo "Timed out waiting for app to be ready" - # ./bin/kots get apps --namespace "$KOTS_NAMESPACE" - # echo "kotsadm logs:" - # kubectl logs -l app=kotsadm --tail=100 --namespace "$KOTS_NAMESPACE" - # exit 1 - # fi - # sleep 1 - # done - - # # get the version of the replicated-sdk that is running and set it as an output - # REPLICATED_SDK_VERSION=$(kubectl get deploy replicated -n "$KOTS_NAMESPACE" -o jsonpath='{.spec.template.spec.containers[0].image}' | grep -oE '[^:]+$') - # echo "replicated-sdk-version=$REPLICATED_SDK_VERSION" >> "$GITHUB_OUTPUT" - - # - run: rm -rf ./replicated-sdk - # - name: Checkout replicated-sdk - # uses: actions/checkout@v4 - # with: - # repository: replicatedhq/replicated-sdk - # path: replicated-sdk - # ref: ${{ steps.replicated-sdk-subchart-native-helm-v1.outputs.replicated-sdk-version }} - - # - name: Validate endpoints - # uses: ./replicated-sdk/.github/actions/validate-endpoints - # with: - # license-id: ${{ steps.replicated-sdk-subchart-native-helm-v1.outputs.license-id }} - # license-fields: '[{"name":"expires_at","value": ""}]' - # integration-enabled: true - # namespace: "$KOTS_NAMESPACE" - - # - name: remove replicated-sdk-subchart-native-helm-v1 app - # env: - # APP_SLUG: replicated-sdk-subchart-native-helm-v1 - # run: | - # # remove the app - # ./bin/kots remove "$APP_SLUG" -n "$KOTS_NAMESPACE" --undeploy - - # # validate that the app reference was removed - # if [ "$(./bin/kots get apps --namespace "$KOTS_NAMESPACE" --output=json | tr -d '\n')" != "[]" ]; then - # printf "App reference was not removed\n\n" - # exit 1 - # fi - - # - name: run replicated-sdk-subchart-replicated-helm test - # id: replicated-sdk-subchart-replicated-helm - # env: - # APP_SLUG: replicated-sdk-subchart-replicated-helm - # run: | - # set +e - - # echo ${{ secrets.REPLICATED_SDK_SUBCHART_REPLICATED_HELM }} | base64 -d > license.yaml - # LICENSE_ID="$(grep -oP 'licenseID:\s*\K\w+' license.yaml)" - # echo "license-id=$LICENSE_ID" >> "$GITHUB_OUTPUT" - - # ./bin/kots \ - # install "$APP_SLUG/automated" \ - # --license-file license.yaml \ - # --no-port-forward \ - # --namespace "$KOTS_NAMESPACE" \ - # --shared-password password \ - # --kotsadm-registry ttl.sh \ - # --kotsadm-namespace automated-${{ github.run_id }} \ - # --kotsadm-tag 24h - - # EXIT_CODE=$? - # if [ $EXIT_CODE -ne 0 ]; then - # echo "------pods:" - # kubectl -n "$KOTS_NAMESPACE" get pods - # echo "------kotsadm logs" - # kubectl logs -l app=kotsadm --tail=100 --namespace "$KOTS_NAMESPACE" - # exit $EXIT_CODE - # fi - - # # wait for the app to be ready - # COUNTER=1 - # while [ "$(./bin/kots get apps --namespace "$KOTS_NAMESPACE" | awk 'NR>1{print $2}')" != "ready" ]; do - # ((COUNTER += 1)) - # if [ $COUNTER -gt 120 ]; then - # echo "Timed out waiting for app to be ready" - # ./bin/kots get apps --namespace "$KOTS_NAMESPACE" - # echo "kotsadm logs:" - # kubectl logs -l app=kotsadm --tail=100 --namespace "$KOTS_NAMESPACE" - # exit 1 - # fi - # sleep 1 - # done - - # # get the version of the replicated-sdk that is running and set it as an output - # REPLICATED_SDK_VERSION=$(kubectl get deploy replicated -n "$KOTS_NAMESPACE" -o jsonpath='{.spec.template.spec.containers[0].image}' | grep -oE '[^:]+$') - # echo "replicated-sdk-version=$REPLICATED_SDK_VERSION" >> "$GITHUB_OUTPUT" - - # - run: rm -rf ./replicated-sdk - # - name: Checkout replicated-sdk - # uses: actions/checkout@v4 - # with: - # repository: replicatedhq/replicated-sdk - # path: replicated-sdk - # ref: ${{ steps.replicated-sdk-subchart-replicated-helm.outputs.replicated-sdk-version }} - - # - name: Validate endpoints - # uses: ./replicated-sdk/.github/actions/validate-endpoints - # with: - # license-id: ${{ steps.replicated-sdk-subchart-replicated-helm.outputs.license-id }} - # license-fields: '[{"name":"expires_at","value": ""}]' - # integration-enabled: true - # namespace: "$KOTS_NAMESPACE" - # deployed-via-kubectl: true - - # - name: remove replicated-sdk-subchart-replicated-helm app - # env: - # APP_SLUG: replicated-sdk-subchart-replicated-helm - # run: | - # # remove the app - # ./bin/kots remove "$APP_SLUG" -n "$KOTS_NAMESPACE" --undeploy - - # # validate that the app reference was removed - # if [ "$(./bin/kots get apps --namespace "$KOTS_NAMESPACE" --output=json | tr -d '\n')" != "[]" ]; then - # printf "App reference was not removed\n\n" - # exit 1 - # fi - - # - name: Generate support bundle on failure - # if: failure() - # uses: ./.github/actions/generate-support-bundle - # with: - # kots-namespace: "$APP_SLUG" - # aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' - # aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' - - # - name: Remove Cluster - # id: remove-cluster - # uses: replicatedhq/replicated-actions/remove-cluster@v1 - # if: ${{ always() && steps.create-cluster.outputs.cluster-id != '' }} - # continue-on-error: true - # with: - # api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} - # cluster-id: ${{ steps.create-cluster.outputs.cluster-id }} - - - # validate-support-bundle: - # runs-on: ubuntu-20.04 - # needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-e2e, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] - # strategy: - # fail-fast: false - # matrix: - # cluster: [ - # {distribution: kind, version: v1.28.0}, - # {distribution: openshift, version: 4.13.0-okd} - # ] - # steps: - # - name: Checkout - # uses: actions/checkout@v4 - # - name: download e2e deps - # uses: actions/download-artifact@v4 - # with: - # name: e2e - # path: e2e/bin/ - # - run: docker load -i e2e/bin/e2e-deps.tar - # - run: chmod +x e2e/bin/* - # - name: download kots binary - # uses: actions/download-artifact@v4 - # with: - # name: kots - # path: bin/ - # - run: chmod +x bin/* - # - uses: ./.github/actions/kots-e2e - # with: - # test-focus: 'Support Bundle' - # kots-namespace: 'support-bundle' - # k8s-distribution: ${{ matrix.cluster.distribution }} - # k8s-version: ${{ matrix.cluster.version }} - # testim-access-token: '${{ secrets.TESTIM_ACCESS_TOKEN }}' - # testim-branch: ${{ github.head_ref == 'main' && 'master' || github.head_ref }} - # aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' - # aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' - # replicated-api-token: '${{ secrets.C11Y_MATRIX_TOKEN }}' - # kots-dockerhub-username: '${{ secrets.E2E_DOCKERHUB_USERNAME }}' - # kots-dockerhub-password: '${{ secrets.E2E_DOCKERHUB_PASSWORD }}' - - - # validate-gitops: - # runs-on: ubuntu-20.04 - # needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-e2e, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] - # strategy: - # fail-fast: false - # matrix: - # cluster: [ - # {distribution: kind, version: v1.28.0} - # ] - # steps: - # - name: Checkout - # uses: actions/checkout@v4 - # - name: download e2e deps - # uses: actions/download-artifact@v4 - # with: - # name: e2e - # path: e2e/bin/ - # - run: docker load -i e2e/bin/e2e-deps.tar - # - run: chmod +x e2e/bin/* - # - name: download kots binary - # uses: actions/download-artifact@v4 - # with: - # name: kots - # path: bin/ - # - run: chmod +x bin/* - # - uses: ./.github/actions/kots-e2e - # with: - # test-focus: 'GitOps' - # kots-namespace: 'gitops' - # k8s-distribution: ${{ matrix.cluster.distribution }} - # k8s-version: ${{ matrix.cluster.version }} - # testim-access-token: '${{ secrets.TESTIM_ACCESS_TOKEN }}' - # testim-branch: ${{ github.head_ref == 'main' && 'master' || github.head_ref }} - # aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' - # aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' - # replicated-api-token: '${{ secrets.C11Y_MATRIX_TOKEN }}' - # kots-dockerhub-username: '${{ secrets.E2E_DOCKERHUB_USERNAME }}' - # kots-dockerhub-password: '${{ secrets.E2E_DOCKERHUB_PASSWORD }}' - - - # validate-pr-tests: - # runs-on: ubuntu-20.04 - # needs: - # # static and unit tests - # - lint-web - # - unit-test-web - # - vet-kots - # - ci-test-kots - # # testim tests - # - validate-existing-online-install-minimal - # - validate-smoke-test - # - validate-backup-and-restore - # - validate-no-required-config - # - validate-version-history-pagination - # - validate-change-license - # - validate-min-kots-version - # - validate-target-kots-version - # - validate-range-kots-version - # - validate-multi-app-backup-and-restore - # - validate-multi-app-install - # - validate-airgap-smoke-test - # - validate-config - # - validate-support-bundle - # - validate-gitops - # # non-testim tests - # - validate-minimal-rbac - # - validate-minimal-rbac-override - # - validate-multi-namespace - # - validate-kots-pull - # - validate-app-version-label - # - validate-helm-install-order - # - validate-no-redeploy-on-restart - # - validate-kubernetes-installer-preflight - # - validate-kots-upgrade - # - validate-remove-app - # - validate-registry-check - # - validate-kots-helm-release-secret-migration - # - validate-native-helm-v2 - # - validate-deployment-orchestration - # - validate-replicated-sdk - # - validate-strict-preflight-checks - # # cli-only tests - # - validate-kots-push-images-anonymous - # steps: - # - run: echo "All PR tests passed" - - - # # this job will validate that the kurl add-on validation did not fail and that all pr-tests succeed - # # it is used for the github branch protection rule - # validate-success: - # runs-on: ubuntu-20.04 - # needs: - # - validate-kurl-addon - # - validate-pr-tests - # if: always() - # steps: - # # https://docs.github.com/en/actions/learn-github-actions/contexts#needs-context - # # if the validate-kurl-addon job failed or was cancelled, this job will fail. it's ok if this job was skipped - # - name: fail if validate-kurl-addon job failed - # if: needs.validate-kurl-addon.result == 'failure' || needs.validate-kurl-addon.result == 'cancelled' - # run: exit 1 - # # if the validate-pr-tests job was not successful, this job will fail - # - name: fail if validate-pr-tests job was not successful - # if: needs.validate-pr-tests.result != 'success' - # run: exit 1 - # # if the validate-pr-tests job was successful, this job will succeed - # - name: succeed if validate-pr-tests job succeeded - # if: needs.validate-pr-tests.result == 'success' - # run: echo "Validation succeeded" + COUNTER=1 + while [ "$(helm ls -n "$APP_SLUG" | awk 'NR>1{print $1}' | grep my-chart-release)" != "" ]; do + ((COUNTER += 1)) + if [ $COUNTER -gt 120 ]; then + echo "Timed out waiting for my-chart-release to be uninstalled" + helm ls -n "$APP_SLUG" + exit 1 + fi + sleep 1 + done + + COUNTER=1 + while [ "$(helm ls -n "$APP_SLUG" | grep my-other-chart-release | awk 'NR>0{print $3}')" != "2" ]; do + ((COUNTER += 1)) + if [ $COUNTER -gt 120 ]; then + echo "Timed out waiting for my-other-chart-release to be upgraded to revision 2" + helm ls -n "$APP_SLUG" + exit 1 + fi + sleep 1 + done + + # validate that `helm get values` works for the upgraded v1beta2 chart + if ! helm get values my-other-chart-release -n "$APP_SLUG" | grep -q my-value; then + printf "my-value not found in helm values for my-chart-release in %s namespace\n\n" "$APP_SLUG" + helm get values my-chart-release -n "$APP_SLUG" + exit 1 + fi + + # wait for the app to be ready again + + COUNTER=1 + while [ "$(./bin/kots get apps --namespace "$APP_SLUG" | awk 'NR>1{print $3}')" != "$UPGRADE_VERSION" ]; do + ((COUNTER += 1)) + if [ $COUNTER -gt 120 ]; then + echo "Timed out waiting for app to be ready" + ./bin/kots get apps --namespace "$APP_SLUG" + echo "kotsadm logs:" + kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" + exit 1 + fi + sleep 1 + done + + COUNTER=1 + while [ "$(./bin/kots get apps --namespace "$APP_SLUG" | awk 'NR>1{print $2}')" != "ready" ]; do + ((COUNTER += 1)) + if [ $COUNTER -gt 120 ]; then + echo "Timed out waiting for app to be ready" + ./bin/kots get apps --namespace "$APP_SLUG" + echo "kotsadm logs:" + kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" + exit 1 + fi + sleep 1 + done + + # validate that the conditional chart is installed + COUNTER=1 + while [ "$(helm ls -n "$APP_SLUG" | awk 'NR>1{print $1}' | grep my-conditional-chart-release)" == "" ]; do + ((COUNTER += 1)) + if [ $COUNTER -gt 120 ]; then + echo "Timed out waiting for my-conditional-chart-release to be installed" + helm ls -n "$APP_SLUG" + exit 1 + fi + sleep 1 + done + + # toggle the config option to exclude the conditional chart + ./bin/kots set config "$APP_SLUG" install_conditional_chart=0 --deploy --namespace "$APP_SLUG" + + # wait for my-conditional-chart-release to be uninstalled + COUNTER=1 + while [ "$(helm ls -n "$APP_SLUG" | awk 'NR>1{print $1}' | grep my-conditional-chart-release)" != "" ]; do + ((COUNTER += 1)) + if [ $COUNTER -gt 120 ]; then + echo "Timed out waiting for my-conditional-chart-release to be uninstalled" + helm ls -n "$APP_SLUG" + exit 1 + fi + sleep 1 + done + + - name: Generate support bundle on failure + if: failure() + uses: ./.github/actions/generate-support-bundle + with: + kots-namespace: "$APP_SLUG" + aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' + aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' + + - name: Remove Cluster + id: remove-cluster + uses: replicatedhq/replicated-actions/remove-cluster@v1 + if: ${{ always() && steps.create-cluster.outputs.cluster-id != '' }} + continue-on-error: true + with: + api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} + cluster-id: ${{ steps.create-cluster.outputs.cluster-id }} + + + validate-deployment-orchestration: + runs-on: ubuntu-20.04 + needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] + strategy: + fail-fast: false + matrix: + cluster: [ + {distribution: kind, version: v1.28.0} + ] + env: + APP_SLUG: deployment-orchestration + steps: + - name: Checkout + uses: actions/checkout@v4 + + - uses: azure/setup-helm@v3 + with: + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Create Cluster + id: create-cluster + uses: replicatedhq/replicated-actions/create-cluster@v1 + with: + api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} + kubernetes-distribution: ${{ matrix.cluster.distribution }} + kubernetes-version: ${{ matrix.cluster.version }} + cluster-name: automated-kots-${{ github.run_id }}-${{ matrix.cluster.distribution }}-${{ matrix.cluster.version }} + timeout-minutes: '120' + ttl: 2h + export-kubeconfig: true + + - name: download kots binary + uses: actions/download-artifact@v4 + with: + name: kots + path: bin/ + + - run: chmod +x bin/kots + + - name: create namespace and dockerhub secret + run: | + kubectl create ns "$APP_SLUG" + kubectl create secret docker-registry kotsadm-dockerhub --docker-server index.docker.io --docker-username "${{ secrets.E2E_DOCKERHUB_USERNAME }}" --docker-password "${{ secrets.E2E_DOCKERHUB_PASSWORD }}" --namespace "$APP_SLUG" + + - name: run the test + run: | + set +e + echo ${{ secrets.DEPLOYMENT_ORCHESTRATION_LICENSE }} | base64 -d > license.yaml + ./bin/kots \ + install "$APP_SLUG/automated" \ + --license-file license.yaml \ + --no-port-forward \ + --namespace "$APP_SLUG" \ + --shared-password password \ + --kotsadm-registry ttl.sh \ + --kotsadm-namespace automated-${{ github.run_id }} \ + --kotsadm-tag 24h + + EXIT_CODE=$? + if [ $EXIT_CODE -ne 0 ]; then + echo "------pods:" + kubectl -n "$APP_SLUG" get pods + echo "------kotsadm logs" + kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" + exit $EXIT_CODE + fi + + function wait_for_log { + local counter=1 + local timeout=30 + local log_pattern="$1" + + while ! kubectl logs deploy/kotsadm -c kotsadm -n "$APP_SLUG" | grep -i "$log_pattern"; do + ((counter += 1)) + if [ $counter -gt $timeout ]; then + echo "Timed out waiting for log $log_pattern" + exit 1 + fi + sleep 1 + done + } + + wait_for_log "applying phase -9999" + + wait_for_log "applying phase -3" + wait_for_log "waiting for resource apiextensions.k8s.io/v1/CustomResourceDefinition/myresources.example.com in namespace $APP_SLUG to be ready" + + wait_for_log "applying phase -2" + + # validate that phase -1 has not deployed yet since we're waiting on the CR status fields + if kubectl logs deploy/kotsadm -c kotsadm -n "$APP_SLUG" | grep -i "applying phase -1"; then + printf "phase -1 was deployed before phase -2 completed" + kubectl logs deploy/kotsadm -c kotsadm -n "$APP_SLUG" + exit 1 + fi + + wait_for_log "waiting for resource example.com/v1beta1/MyResource/my-resource in namespace $APP_SLUG to have property .status.tasks.extract=true" + + # set .status.tasks.extract=true + kubectl patch myresources.example.com my-resource -n "$APP_SLUG" -p '{"status": {"tasks": {"extract": true}}}' --type=merge + + wait_for_log "waiting for resource example.com/v1beta1/MyResource/my-resource in namespace $APP_SLUG to have property .status.tasks.transform=true" + + # set .status.tasks.transform=true + kubectl patch myresources.example.com my-resource -n "$APP_SLUG" -p '{"status": {"tasks": {"transform": true}}}' --type=merge + + wait_for_log "waiting for resource example.com/v1beta1/MyResource/my-resource in namespace $APP_SLUG to have property .status.tasks.load=true" + + # set .status.tasks.load=true + kubectl patch myresources.example.com my-resource -n "$APP_SLUG" -p '{"status": {"tasks": {"load": true}}}' --type=merge + + # now validate that the remaining phases are deployed (-1, 0, and 1) + wait_for_log "applying phase -1" + wait_for_log "waiting for resource apps/v1/Deployment/nginx-1 in namespace $APP_SLUG to be ready" + + wait_for_log "applying phase 0" + wait_for_log "waiting for resource apps/v1/Deployment/nginx-2 in namespace $APP_SLUG to be ready" + + wait_for_log "applying phase 1" + wait_for_log "waiting for resource /v1/Service/nginx-2 in namespace $APP_SLUG to be ready" + + # wait for the app to be ready + COUNTER=1 + while [ "$(./bin/kots get apps --namespace "$APP_SLUG" | awk 'NR>1{print $2}')" != "ready" ]; do + ((COUNTER += 1)) + if [ $COUNTER -gt 120 ]; then + echo "Timed out waiting for app to be ready" + ./bin/kots get apps --namespace "$APP_SLUG" + echo "kotsadm logs:" + kubectl logs -l app=kotsadm --tail=100 --namespace "$APP_SLUG" + exit 1 + fi + sleep 1 + done + + # remove the app + ./bin/kots remove "$APP_SLUG" -n "$APP_SLUG" --undeploy + + wait_for_log "deleting resources in phase -1" + wait_for_log "deleting resources in phase 0" + wait_for_log "deleting resources in phase 1" + wait_for_log "deleting resources in phase 2" + wait_for_log "deleting resources in phase 3" + + # validate that the app reference was removed + if [ "$(./bin/kots get apps --namespace "$APP_SLUG" --output=json | tr -d '\n')" != "[]" ]; then + printf "App reference was not removed\n\n" + exit 1 + fi + + - name: Generate support bundle on failure + if: failure() + uses: ./.github/actions/generate-support-bundle + with: + kots-namespace: "$APP_SLUG" + aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' + aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' + + - name: Remove Cluster + id: remove-cluster + uses: replicatedhq/replicated-actions/remove-cluster@v1 + if: ${{ always() && steps.create-cluster.outputs.cluster-id != '' }} + continue-on-error: true + with: + api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} + cluster-id: ${{ steps.create-cluster.outputs.cluster-id }} + + + validate-replicated-sdk: + runs-on: ubuntu-20.04 + needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] + strategy: + fail-fast: false + matrix: + cluster: [ + {distribution: kind, version: v1.28.0}, + {distribution: openshift, version: 4.13.0-okd} + ] + env: + KOTS_NAMESPACE: replicated-sdk + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Create Cluster + id: create-cluster + uses: replicatedhq/replicated-actions/create-cluster@v1 + with: + api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} + kubernetes-distribution: ${{ matrix.cluster.distribution }} + kubernetes-version: ${{ matrix.cluster.version }} + cluster-name: automated-kots-${{ github.run_id }}-${{ matrix.cluster.distribution }}-${{ matrix.cluster.version }} + timeout-minutes: '120' + ttl: 2h + export-kubeconfig: true + + - name: download kots binary + uses: actions/download-artifact@v4 + with: + name: kots + path: bin/ + + - run: chmod +x bin/kots + + - name: create namespace and dockerhub secret + run: | + kubectl create ns "$KOTS_NAMESPACE" + kubectl create secret docker-registry kotsadm-dockerhub --docker-server index.docker.io --docker-username "${{ secrets.E2E_DOCKERHUB_USERNAME }}" --docker-password "${{ secrets.E2E_DOCKERHUB_PASSWORD }}" --namespace "$KOTS_NAMESPACE" + + - name: run upgrade-to-replicated-sdk test + id: upgrade-to-replicated-sdk + env: + APP_SLUG: upgrade-to-replicated-sdk + run: | + set +e + + echo ${{ secrets.UPGRADE_TO_REPLICATED_SDK_LICENSE }} | base64 -d > license.yaml + LICENSE_ID="$(grep -oP 'licenseID:\s*\K\w+' license.yaml)" + echo "license-id=$LICENSE_ID" >> "$GITHUB_OUTPUT" + + ./bin/kots \ + install "$APP_SLUG/automated" \ + --license-file license.yaml \ + --no-port-forward \ + --namespace "$KOTS_NAMESPACE" \ + --shared-password password \ + --app-version-label v1.0.0 \ + --kotsadm-registry ttl.sh \ + --kotsadm-namespace automated-${{ github.run_id }} \ + --kotsadm-tag 24h + + EXIT_CODE=$? + if [ $EXIT_CODE -ne 0 ]; then + echo "------pods:" + kubectl -n "$KOTS_NAMESPACE" get pods + echo "------kotsadm logs" + kubectl logs -l app=kotsadm --tail=100 --namespace "$KOTS_NAMESPACE" + exit $EXIT_CODE + fi + + # wait for the app to be ready + COUNTER=1 + while [ "$(./bin/kots get apps --namespace "$KOTS_NAMESPACE" | awk 'NR>1{print $2}')" != "ready" ]; do + ((COUNTER += 1)) + if [ $COUNTER -gt 120 ]; then + echo "Timed out waiting for app to be ready" + ./bin/kots get apps --namespace "$KOTS_NAMESPACE" + echo "kotsadm logs:" + kubectl logs -l app=kotsadm --tail=100 --namespace "$KOTS_NAMESPACE" + exit 1 + fi + sleep 1 + done + + # upgrade the app to the new version + ./bin/kots upstream upgrade "$APP_SLUG" -n "$KOTS_NAMESPACE" --deploy + sleep 5 + + # wait for the app to be ready again + COUNTER=1 + while [ "$(./bin/kots get apps --namespace "$KOTS_NAMESPACE" | awk 'NR>1{print $2}')" != "ready" ]; do + ((COUNTER += 1)) + if [ $COUNTER -gt 120 ]; then + echo "Timed out waiting for app to be ready" + ./bin/kots get apps --namespace "$KOTS_NAMESPACE" + echo "kotsadm logs:" + kubectl logs -l app=kotsadm --tail=100 --namespace "$KOTS_NAMESPACE" + exit 1 + fi + sleep 1 + done + + # get the version of the replicated-sdk that is running and set it as an output + REPLICATED_SDK_VERSION=$(kubectl get deploy replicated -n "$KOTS_NAMESPACE" -o jsonpath='{.spec.template.spec.containers[0].image}' | grep -oE '[^:]+$') + echo "replicated-sdk-version=$REPLICATED_SDK_VERSION" >> "$GITHUB_OUTPUT" + + - run: rm -rf ./replicated-sdk + - name: Checkout replicated-sdk + uses: actions/checkout@v4 + with: + repository: replicatedhq/replicated-sdk + path: replicated-sdk + ref: ${{ steps.upgrade-to-replicated-sdk.outputs.replicated-sdk-version }} + + - name: Validate endpoints + uses: ./replicated-sdk/.github/actions/validate-endpoints + with: + license-id: ${{ steps.upgrade-to-replicated-sdk.outputs.license-id }} + license-fields: '[{"name":"expires_at","value": ""}]' + integration-enabled: true + namespace: "$KOTS_NAMESPACE" + + - name: remove upgrade-to-replicated-sdk app + env: + APP_SLUG: upgrade-to-replicated-sdk + run: | + # remove the app + ./bin/kots remove "$APP_SLUG" -n "$KOTS_NAMESPACE" --undeploy + + # validate that the app reference was removed + if [ "$(./bin/kots get apps --namespace "$KOTS_NAMESPACE" --output=json | tr -d '\n')" != "[]" ]; then + printf "App reference was not removed\n\n" + exit 1 + fi + + - name: run replicated-sdk-subchart-native-helm-v1 test + id: replicated-sdk-subchart-native-helm-v1 + env: + APP_SLUG: replicated-sdk-subchart-native-helm-v1 + run: | + set +e + + echo ${{ secrets.REPLICATED_SDK_SUBCHART_NATIVE_HELM_V1 }} | base64 -d > license.yaml + LICENSE_ID="$(grep -oP 'licenseID:\s*\K\w+' license.yaml)" + echo "license-id=$LICENSE_ID" >> "$GITHUB_OUTPUT" + + ./bin/kots \ + install "$APP_SLUG/automated" \ + --license-file license.yaml \ + --no-port-forward \ + --namespace "$KOTS_NAMESPACE" \ + --shared-password password \ + --kotsadm-registry ttl.sh \ + --kotsadm-namespace automated-${{ github.run_id }} \ + --kotsadm-tag 24h + + EXIT_CODE=$? + if [ $EXIT_CODE -ne 0 ]; then + echo "------pods:" + kubectl -n "$KOTS_NAMESPACE" get pods + echo "------kotsadm logs" + kubectl logs -l app=kotsadm --tail=100 --namespace "$KOTS_NAMESPACE" + exit $EXIT_CODE + fi + + # wait for the app to be ready + COUNTER=1 + while [ "$(./bin/kots get apps --namespace "$KOTS_NAMESPACE" | awk 'NR>1{print $2}')" != "ready" ]; do + ((COUNTER += 1)) + if [ $COUNTER -gt 120 ]; then + echo "Timed out waiting for app to be ready" + ./bin/kots get apps --namespace "$KOTS_NAMESPACE" + echo "kotsadm logs:" + kubectl logs -l app=kotsadm --tail=100 --namespace "$KOTS_NAMESPACE" + exit 1 + fi + sleep 1 + done + + # get the version of the replicated-sdk that is running and set it as an output + REPLICATED_SDK_VERSION=$(kubectl get deploy replicated -n "$KOTS_NAMESPACE" -o jsonpath='{.spec.template.spec.containers[0].image}' | grep -oE '[^:]+$') + echo "replicated-sdk-version=$REPLICATED_SDK_VERSION" >> "$GITHUB_OUTPUT" + + - run: rm -rf ./replicated-sdk + - name: Checkout replicated-sdk + uses: actions/checkout@v4 + with: + repository: replicatedhq/replicated-sdk + path: replicated-sdk + ref: ${{ steps.replicated-sdk-subchart-native-helm-v1.outputs.replicated-sdk-version }} + + - name: Validate endpoints + uses: ./replicated-sdk/.github/actions/validate-endpoints + with: + license-id: ${{ steps.replicated-sdk-subchart-native-helm-v1.outputs.license-id }} + license-fields: '[{"name":"expires_at","value": ""}]' + integration-enabled: true + namespace: "$KOTS_NAMESPACE" + + - name: remove replicated-sdk-subchart-native-helm-v1 app + env: + APP_SLUG: replicated-sdk-subchart-native-helm-v1 + run: | + # remove the app + ./bin/kots remove "$APP_SLUG" -n "$KOTS_NAMESPACE" --undeploy + + # validate that the app reference was removed + if [ "$(./bin/kots get apps --namespace "$KOTS_NAMESPACE" --output=json | tr -d '\n')" != "[]" ]; then + printf "App reference was not removed\n\n" + exit 1 + fi + + - name: run replicated-sdk-subchart-replicated-helm test + id: replicated-sdk-subchart-replicated-helm + env: + APP_SLUG: replicated-sdk-subchart-replicated-helm + run: | + set +e + + echo ${{ secrets.REPLICATED_SDK_SUBCHART_REPLICATED_HELM }} | base64 -d > license.yaml + LICENSE_ID="$(grep -oP 'licenseID:\s*\K\w+' license.yaml)" + echo "license-id=$LICENSE_ID" >> "$GITHUB_OUTPUT" + + ./bin/kots \ + install "$APP_SLUG/automated" \ + --license-file license.yaml \ + --no-port-forward \ + --namespace "$KOTS_NAMESPACE" \ + --shared-password password \ + --kotsadm-registry ttl.sh \ + --kotsadm-namespace automated-${{ github.run_id }} \ + --kotsadm-tag 24h + + EXIT_CODE=$? + if [ $EXIT_CODE -ne 0 ]; then + echo "------pods:" + kubectl -n "$KOTS_NAMESPACE" get pods + echo "------kotsadm logs" + kubectl logs -l app=kotsadm --tail=100 --namespace "$KOTS_NAMESPACE" + exit $EXIT_CODE + fi + + # wait for the app to be ready + COUNTER=1 + while [ "$(./bin/kots get apps --namespace "$KOTS_NAMESPACE" | awk 'NR>1{print $2}')" != "ready" ]; do + ((COUNTER += 1)) + if [ $COUNTER -gt 120 ]; then + echo "Timed out waiting for app to be ready" + ./bin/kots get apps --namespace "$KOTS_NAMESPACE" + echo "kotsadm logs:" + kubectl logs -l app=kotsadm --tail=100 --namespace "$KOTS_NAMESPACE" + exit 1 + fi + sleep 1 + done + + # get the version of the replicated-sdk that is running and set it as an output + REPLICATED_SDK_VERSION=$(kubectl get deploy replicated -n "$KOTS_NAMESPACE" -o jsonpath='{.spec.template.spec.containers[0].image}' | grep -oE '[^:]+$') + echo "replicated-sdk-version=$REPLICATED_SDK_VERSION" >> "$GITHUB_OUTPUT" + + - run: rm -rf ./replicated-sdk + - name: Checkout replicated-sdk + uses: actions/checkout@v4 + with: + repository: replicatedhq/replicated-sdk + path: replicated-sdk + ref: ${{ steps.replicated-sdk-subchart-replicated-helm.outputs.replicated-sdk-version }} + + - name: Validate endpoints + uses: ./replicated-sdk/.github/actions/validate-endpoints + with: + license-id: ${{ steps.replicated-sdk-subchart-replicated-helm.outputs.license-id }} + license-fields: '[{"name":"expires_at","value": ""}]' + integration-enabled: true + namespace: "$KOTS_NAMESPACE" + deployed-via-kubectl: true + + - name: remove replicated-sdk-subchart-replicated-helm app + env: + APP_SLUG: replicated-sdk-subchart-replicated-helm + run: | + # remove the app + ./bin/kots remove "$APP_SLUG" -n "$KOTS_NAMESPACE" --undeploy + + # validate that the app reference was removed + if [ "$(./bin/kots get apps --namespace "$KOTS_NAMESPACE" --output=json | tr -d '\n')" != "[]" ]; then + printf "App reference was not removed\n\n" + exit 1 + fi + + - name: Generate support bundle on failure + if: failure() + uses: ./.github/actions/generate-support-bundle + with: + kots-namespace: "$APP_SLUG" + aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' + aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' + + - name: Remove Cluster + id: remove-cluster + uses: replicatedhq/replicated-actions/remove-cluster@v1 + if: ${{ always() && steps.create-cluster.outputs.cluster-id != '' }} + continue-on-error: true + with: + api-token: ${{ secrets.C11Y_MATRIX_TOKEN }} + cluster-id: ${{ steps.create-cluster.outputs.cluster-id }} + + + validate-support-bundle: + runs-on: ubuntu-20.04 + needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-e2e, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] + strategy: + fail-fast: false + matrix: + cluster: [ + {distribution: kind, version: v1.28.0}, + {distribution: openshift, version: 4.13.0-okd} + ] + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: download e2e deps + uses: actions/download-artifact@v4 + with: + name: e2e + path: e2e/bin/ + - run: docker load -i e2e/bin/e2e-deps.tar + - run: chmod +x e2e/bin/* + - name: download kots binary + uses: actions/download-artifact@v4 + with: + name: kots + path: bin/ + - run: chmod +x bin/* + - uses: ./.github/actions/kots-e2e + with: + test-focus: 'Support Bundle' + kots-namespace: 'support-bundle' + k8s-distribution: ${{ matrix.cluster.distribution }} + k8s-version: ${{ matrix.cluster.version }} + testim-access-token: '${{ secrets.TESTIM_ACCESS_TOKEN }}' + testim-branch: ${{ github.head_ref == 'main' && 'master' || github.head_ref }} + aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' + aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' + replicated-api-token: '${{ secrets.C11Y_MATRIX_TOKEN }}' + kots-dockerhub-username: '${{ secrets.E2E_DOCKERHUB_USERNAME }}' + kots-dockerhub-password: '${{ secrets.E2E_DOCKERHUB_PASSWORD }}' + + + validate-gitops: + runs-on: ubuntu-20.04 + needs: [ enable-tests, can-run-ci, build-kots, build-kotsadm, build-e2e, build-kurl-proxy, build-migrations, push-minio, push-rqlite ] + strategy: + fail-fast: false + matrix: + cluster: [ + {distribution: kind, version: v1.28.0} + ] + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: download e2e deps + uses: actions/download-artifact@v4 + with: + name: e2e + path: e2e/bin/ + - run: docker load -i e2e/bin/e2e-deps.tar + - run: chmod +x e2e/bin/* + - name: download kots binary + uses: actions/download-artifact@v4 + with: + name: kots + path: bin/ + - run: chmod +x bin/* + - uses: ./.github/actions/kots-e2e + with: + test-focus: 'GitOps' + kots-namespace: 'gitops' + k8s-distribution: ${{ matrix.cluster.distribution }} + k8s-version: ${{ matrix.cluster.version }} + testim-access-token: '${{ secrets.TESTIM_ACCESS_TOKEN }}' + testim-branch: ${{ github.head_ref == 'main' && 'master' || github.head_ref }} + aws-access-key-id: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_ACCESS_KEY_ID }}' + aws-secret-access-key: '${{ secrets.E2E_SUPPORT_BUNDLE_AWS_SECRET_ACCESS_KEY }}' + replicated-api-token: '${{ secrets.C11Y_MATRIX_TOKEN }}' + kots-dockerhub-username: '${{ secrets.E2E_DOCKERHUB_USERNAME }}' + kots-dockerhub-password: '${{ secrets.E2E_DOCKERHUB_PASSWORD }}' + + + validate-pr-tests: + runs-on: ubuntu-20.04 + needs: + # static and unit tests + - lint-web + - unit-test-web + - vet-kots + - ci-test-kots + # testim tests + - validate-existing-online-install-minimal + - validate-smoke-test + - validate-backup-and-restore + - validate-no-required-config + - validate-version-history-pagination + - validate-change-license + - validate-min-kots-version + - validate-target-kots-version + - validate-range-kots-version + - validate-multi-app-backup-and-restore + - validate-multi-app-install + - validate-airgap-smoke-test + - validate-config + - validate-support-bundle + - validate-gitops + # non-testim tests + - validate-minimal-rbac + - validate-minimal-rbac-override + - validate-multi-namespace + - validate-kots-pull + - validate-app-version-label + - validate-helm-install-order + - validate-no-redeploy-on-restart + - validate-kubernetes-installer-preflight + - validate-kots-upgrade + - validate-remove-app + - validate-registry-check + - validate-kots-helm-release-secret-migration + - validate-native-helm-v2 + - validate-deployment-orchestration + - validate-replicated-sdk + - validate-strict-preflight-checks + # cli-only tests + - validate-kots-push-images-anonymous + steps: + - run: echo "All PR tests passed" + + + # this job will validate that the kurl add-on validation did not fail and that all pr-tests succeed + # it is used for the github branch protection rule + validate-success: + runs-on: ubuntu-20.04 + needs: + - validate-kurl-addon + - validate-pr-tests + if: always() + steps: + # https://docs.github.com/en/actions/learn-github-actions/contexts#needs-context + # if the validate-kurl-addon job failed or was cancelled, this job will fail. it's ok if this job was skipped + - name: fail if validate-kurl-addon job failed + if: needs.validate-kurl-addon.result == 'failure' || needs.validate-kurl-addon.result == 'cancelled' + run: exit 1 + # if the validate-pr-tests job was not successful, this job will fail + - name: fail if validate-pr-tests job was not successful + if: needs.validate-pr-tests.result != 'success' + run: exit 1 + # if the validate-pr-tests job was successful, this job will succeed + - name: succeed if validate-pr-tests job succeeded + if: needs.validate-pr-tests.result == 'success' + run: echo "Validation succeeded" diff --git a/.github/workflows/regression.yaml b/.github/workflows/regression.yaml index c3368248af..fd03baba48 100644 --- a/.github/workflows/regression.yaml +++ b/.github/workflows/regression.yaml @@ -95,19 +95,23 @@ jobs: token: ${{ secrets.E2E_GH_PAT }} path: automation ref: main + - name: Download kots binary uses: actions/download-artifact@v4 with: name: kots path: ./automation/jumpbox/bin - - name: Download kotsadm release + + - name: Download kotsadm bundle uses: actions/download-artifact@v4 with: - name: kotsadm-release - path: ./automation/jumpbox/docker-archive - - name: Make kotsadm airgap archive with minio image - working-directory: automation/jumpbox - run: tar czf ./kotsadm.tar.gz -C ./ ./docker-archive + name: kotsadm-bundle + path: ./automation/jumpbox/kotsadm-bundle + + - name: Make kotsadm airgap archive + working-directory: automation/jumpbox/kotsadm-bundle + run: tar -czf ../kotsadm.tar.gz * + - name: Initialize terraform if: always() working-directory: automation/jumpbox @@ -115,12 +119,14 @@ jobs: terraform init terraform workspace new "$WORKSPACE" || true terraform workspace select "$WORKSPACE" + - name: Create Jumpbox Environment if: always() working-directory: automation/jumpbox run: | export TF_VAR_expires_on="${{ needs.get-workspace-expiration.outputs.expiration }}" terraform apply --auto-approve + - name: Notify Slack if: failure() && github.ref_name == 'main' uses: 8398a7/action-slack@v3 @@ -228,6 +234,7 @@ jobs: token: ${{ secrets.E2E_GH_PAT }} path: automation ref: main + - name: Initialize terraform if: always() working-directory: automation/cluster @@ -235,10 +242,12 @@ jobs: terraform init -backend-config ${{ matrix.test.backend_config }} terraform workspace new "$WORKSPACE" || true terraform workspace select "$WORKSPACE" + - name: Destroy test environment before re-run if: always() && github.run_attempt > 1 working-directory: automation/cluster run: ./${{ matrix.test.terraform_script }} destroy + - name: Create test environment if: always() working-directory: automation/cluster @@ -256,6 +265,7 @@ jobs: export TF_VAR_testim_branch="master" export TF_VAR_expires_on="${{ needs.get-workspace-expiration.outputs.expiration }}" ./${{ matrix.test.terraform_script }} apply + - name: Wait for instance to be ready working-directory: automation/cluster run: | @@ -265,12 +275,14 @@ jobs: do echo "waiting for instance to become ready" && sleep 1 done + - name: Run the test working-directory: automation/cluster run: | terraform output -raw jumpbox_private_key > ssh.pem chmod 600 ssh.pem ssh -i ssh.pem "ubuntu@$(terraform output -raw jumpbox_public_ip)" -oStrictHostKeyChecking=no -oServerAliveInterval=60 -oServerAliveCountMax=10 "ssh -tt ubuntu@$(terraform output -raw control_plane_private_ip) -oServerAliveInterval=60 -oServerAliveCountMax=10 -oConnectionAttempts=30 \"sudo /tmp/start.sh\"" + - name: Notify Slack if: always() && github.ref_name == 'main' uses: 8398a7/action-slack@v3 @@ -287,6 +299,7 @@ jobs: env: SLACK_WEBHOOK_URL: ${{ secrets.KOTS_BUILD_STATUS_SLACK_WEBHOOK_URL }} MATRIX_CONTEXT: ${{ toJson(matrix) }} + cleanup: needs: [get-workspace-name, tests] runs-on: ubuntu-20.04 diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 1e584db6aa..5c26c7f0ac 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -179,30 +179,45 @@ jobs: registry-username: ${{ secrets.DOCKERHUB_USER }} registry-password: ${{ secrets.DOCKERHUB_PASSWORD }} - build-release: + build-kotsadm-bundle: runs-on: ubuntu-20.04 needs: [generate-tag, build-migrations, build-kotsadm] steps: - name: Checkout uses: actions/checkout@v4 - - name: Read image tags from env file - uses: falti/dotenv-action@v1 - id: dotenv - with: - path: .image.env + - name: Run bundle registry + run: | + mkdir -p kotsadm-bundle/images + mkdir -p kotsadm-bundle-nominio/images + docker run -d -p 5000:5000 --restart=always --name registry -v "$(pwd)/kotsadm-bundle/images":/var/lib/registry registry:2 + docker run -d -p 5001:5000 --restart=always --name registry-nominio -v "$(pwd)/kotsadm-bundle-nominio/images":/var/lib/registry registry:2 + + - name: Build kotsadm bundle + env: + BUNDLE_DIR: kotsadm-bundle + BUNDLE_REGISTRY: localhost:5000 + GIT_TAG: ${{ needs.generate-tag.outputs.tag }} + run: mapfile -t envs < <(grep -v '#.*' < .image.env) && export "${envs[@]}" && make kotsadm-bundle - - name: Build tagged release + - name: Build kotsadm bundle without minio env: + BUNDLE_DIR: kotsadm-bundle-nominio + BUNDLE_REGISTRY: localhost:5001 GIT_TAG: ${{ needs.generate-tag.outputs.tag }} - DOCKER_CONFIG: ./.docker - run: mapfile -t envs < <(grep -v '#.*' < .image.env) && export "${envs[@]}" && make build-release + run: mapfile -t envs < <(grep -v '#.*' < .image.env) && export "${envs[@]}" && make kotsadm-bundle-nominio - - name: Upload kotsadm release + - name: Upload kotsadm bundle uses: actions/upload-artifact@v4 with: - name: kotsadm-release - path: ./bin/docker-archive + name: kotsadm-bundle + path: kotsadm-bundle + + - name: Upload kotsadm bundle without minio + uses: actions/upload-artifact@v4 + with: + name: kotsadm-bundle-nominio + path: kotsadm-bundle-nominio goreleaser: runs-on: ubuntu-20.04 @@ -264,7 +279,7 @@ jobs: generate-kurl-addon: runs-on: ubuntu-20.04 - needs: [ generate-tag, build-kurl-proxy, build-kots, build-release ] + needs: [ generate-tag, build-kurl-proxy, build-kots, build-kotsadm-bundle ] outputs: addon_package_url: ${{ steps.addon-generate.outputs.addon_package_url }} env: @@ -373,27 +388,35 @@ jobs: build-airgap: runs-on: ubuntu-20.04 if: github.ref_type != 'branch' - needs: [goreleaser, generate-tag, build-release] + needs: [goreleaser, generate-tag, build-kotsadm-bundle] steps: - - name: Download kotsadm release + - name: Download kotsadm bundle uses: actions/download-artifact@v4 with: - name: kotsadm-release - path: ./docker-archive - - name: Make kotsadm airgap archive with minio image - run: | - tar czf ./kotsadm.tar.gz -C ./ ./docker-archive - - name: Upload airgap bundle with minio image + name: kotsadm-bundle + path: kotsadm-bundle + + - name: Download kotsadm bundle without minio + uses: actions/download-artifact@v4 + with: + name: kotsadm-bundle-nominio + path: kotsadm-bundle-nominio + + - name: Make kotsadm airgap archive + working-directory: kotsadm-bundle + run: tar -czvf ../kotsadm.tar.gz * + + - name: Make kotsadm airgap archive without minio + working-directory: kotsadm-bundle-nominio + run: tar -czvf ../kotsadm-nominio.tar.gz * + + - name: Upload airgap archive uses: softprops/action-gh-release@v1 with: tag_name: ${{ needs.generate-tag.outputs.tag }} files: ./kotsadm.tar.gz - - name: Make kotsadm airgap archive without minio image - run: | - rm -rf ./docker-archive/minio - rm -f ./kotsadm.tar.gz - tar czf ./kotsadm-nominio.tar.gz -C ./ ./docker-archive - - name: Upload airgap bundle without minio image + + - name: Upload airgap archive without minio uses: softprops/action-gh-release@v1 with: tag_name: ${{ needs.generate-tag.outputs.tag }} @@ -426,7 +449,7 @@ jobs: regression-test: if: github.ref_type == 'branch' - needs: [ regression-test-setup, generate-tag, build-kots, generate-kurl-addon, build-release ] + needs: [ regression-test-setup, generate-tag, build-kots, generate-kurl-addon, build-kotsadm-bundle ] uses: ./.github/workflows/regression.yaml with: version_tag_old: ${{ needs.regression-test-setup.outputs.last_release_tag }} diff --git a/Makefile b/Makefile index 527cfba91b..580cfe4692 100644 --- a/Makefile +++ b/Makefile @@ -128,10 +128,8 @@ all-ttl.sh: build-ttl.sh .PHONY: kotsadm-bundle kotsadm-bundle: - # skopeo copy --all --dest-tls-verify=false docker://kotsadm/kotsadm:${GIT_TAG} docker://${BUNDLE_REGISTRY}/kotsadm:${GIT_TAG} - # skopeo copy --all --dest-tls-verify=false docker://kotsadm/kotsadm-migrations:${GIT_TAG} docker://${BUNDLE_REGISTRY}/kotsadm-migrations:${GIT_TAG} - skopeo copy --all --dest-tls-verify=false docker://ttl.sh/automated-7670109381/kotsadm:24h docker://${BUNDLE_REGISTRY}/kotsadm:v1.107.0 - skopeo copy --all --dest-tls-verify=false docker://ttl.sh/automated-7670109381/kotsadm-migrations:24h docker://${BUNDLE_REGISTRY}/kotsadm-migrations:v1.107.0 + skopeo copy --all --dest-tls-verify=false docker://kotsadm/kotsadm:${GIT_TAG} docker://${BUNDLE_REGISTRY}/kotsadm:${GIT_TAG} + skopeo copy --all --dest-tls-verify=false docker://kotsadm/kotsadm-migrations:${GIT_TAG} docker://${BUNDLE_REGISTRY}/kotsadm-migrations:${GIT_TAG} skopeo copy --all --dest-tls-verify=false docker://kotsadm/dex:${DEX_TAG} docker://${BUNDLE_REGISTRY}/dex:${DEX_TAG} skopeo copy --all --dest-tls-verify=false docker://kotsadm/minio:${MINIO_TAG} docker://${BUNDLE_REGISTRY}/minio:${MINIO_TAG} skopeo copy --all --dest-tls-verify=false docker://kotsadm/rqlite:${RQLITE_TAG} docker://${BUNDLE_REGISTRY}/rqlite:${RQLITE_TAG} @@ -141,10 +139,8 @@ kotsadm-bundle: .PHONY: kotsadm-bundle-nominio kotsadm-bundle-nominio: - # skopeo copy --all --dest-tls-verify=false docker://kotsadm/kotsadm:${GIT_TAG} docker://${BUNDLE_REGISTRY}/kotsadm:${GIT_TAG} - # skopeo copy --all --dest-tls-verify=false docker://kotsadm/kotsadm-migrations:${GIT_TAG} docker://${BUNDLE_REGISTRY}/kotsadm-migrations:${GIT_TAG} - skopeo copy --all --dest-tls-verify=false docker://ttl.sh/automated-7670109381/kotsadm:24h docker://${BUNDLE_REGISTRY}/kotsadm:v1.107.0 - skopeo copy --all --dest-tls-verify=false docker://ttl.sh/automated-7670109381/kotsadm-migrations:24h docker://${BUNDLE_REGISTRY}/kotsadm-migrations:v1.107.0 + skopeo copy --all --dest-tls-verify=false docker://kotsadm/kotsadm:${GIT_TAG} docker://${BUNDLE_REGISTRY}/kotsadm:${GIT_TAG} + skopeo copy --all --dest-tls-verify=false docker://kotsadm/kotsadm-migrations:${GIT_TAG} docker://${BUNDLE_REGISTRY}/kotsadm-migrations:${GIT_TAG} skopeo copy --all --dest-tls-verify=false docker://kotsadm/dex:${DEX_TAG} docker://${BUNDLE_REGISTRY}/dex:${DEX_TAG} skopeo copy --all --dest-tls-verify=false docker://kotsadm/rqlite:${RQLITE_TAG} docker://${BUNDLE_REGISTRY}/rqlite:${RQLITE_TAG} skopeo copy --all --dest-tls-verify=false docker://replicated/local-volume-provider:${LVP_TAG} docker://${BUNDLE_REGISTRY}/local-volume-provider:${LVP_TAG} diff --git a/cmd/kots/cli/admin-console-push-images.go b/cmd/kots/cli/admin-console-push-images.go index 29d8f9a1f8..90e8f31d9d 100644 --- a/cmd/kots/cli/admin-console-push-images.go +++ b/cmd/kots/cli/admin-console-push-images.go @@ -54,7 +54,7 @@ func AdminPushImagesCmd() *cobra.Command { } if _, err := os.Stat(imageSource); err == nil { - err = image.PushImages(imageSource, *options) + err = image.TagAndPushImagesFromBundle(imageSource, *options) if err != nil { return errors.Wrap(err, "failed to push images") } diff --git a/pkg/image/airgap.go b/pkg/image/airgap.go index edf9b78f57..354544badc 100644 --- a/pkg/image/airgap.go +++ b/pkg/image/airgap.go @@ -4,20 +4,16 @@ import ( "archive/tar" "bufio" "compress/gzip" - "context" "encoding/json" "fmt" "io" - "io/ioutil" "os" "path" "path/filepath" "strings" "time" - "github.com/containers/image/v5/copy" "github.com/containers/image/v5/transports/alltransports" - containerstypes "github.com/containers/image/v5/types" "github.com/mholt/archiver/v3" "github.com/pkg/errors" "github.com/replicatedhq/kots/pkg/archives" @@ -29,37 +25,8 @@ import ( "github.com/replicatedhq/kots/pkg/imageutil" "github.com/replicatedhq/kots/pkg/kotsutil" "github.com/replicatedhq/kots/pkg/logger" - "k8s.io/client-go/kubernetes/scheme" ) -// PushImages detects if airgap bundle is a KOTS or app bundle, then pushes images from airgap bundle to private registry accordingly -func PushImages(airgapArchive string, options imagetypes.PushImagesOptions) error { - airgapRootDir, err := ioutil.TempDir("", "kotsadm-airgap") - if err != nil { - return errors.Wrap(err, "failed to create temp dir") - } - defer os.RemoveAll(airgapRootDir) - - err = ExtractAppAirgapArchive(airgapArchive, airgapRootDir, false, options.ProgressWriter) - if err != nil { - return errors.Wrap(err, "failed to extract images") - } - - if isAppArchive(airgapRootDir) { - err := TagAndPushAppImagesFromPath(airgapRootDir, options) - if err != nil { - return errors.Wrap(err, "failed to push app images") - } - } else { - err := pushKotsadmImagesFromPath(airgapRootDir, options) - if err != nil { - return errors.Wrap(err, "failed to push kotsadm images") - } - } - - return nil -} - func ExtractAppAirgapArchive(archive string, destDir string, excludeImages bool, progressWriter io.Writer) error { reader, err := os.Open(archive) if err != nil { @@ -124,122 +91,11 @@ func ExtractAppAirgapArchive(archive string, destDir string, excludeImages bool, return nil } -func pushKotsadmImagesFromPath(rootDir string, options imagetypes.PushImagesOptions) error { - fileInfos, err := ioutil.ReadDir(rootDir) - if err != nil { - return errors.Wrap(err, "failed to read dir") - } - - for _, info := range fileInfos { - if !info.IsDir() { - continue - } - - err = processImageNames(rootDir, info.Name(), options) - if err != nil { - return errors.Wrapf(err, "failed list images names for format %s", info.Name()) - } - } - - return nil -} - -func processImageNames(rootDir string, format string, options imagetypes.PushImagesOptions) error { - fileInfos, err := ioutil.ReadDir(filepath.Join(rootDir, format)) - if err != nil { - return errors.Wrap(err, "failed to read dir") - } - - for _, info := range fileInfos { - if !info.IsDir() { - continue - } - - err = processImageTags(rootDir, format, info.Name(), options) - if err != nil { - return errors.Wrapf(err, "failed list tags for image %s", info.Name()) - } - } - - return nil -} - -func processImageTags(rootDir string, format string, imageName string, options imagetypes.PushImagesOptions) error { - fileInfos, err := ioutil.ReadDir(filepath.Join(rootDir, format, imageName)) - if err != nil { - return errors.Wrap(err, "failed to read dir") - } - - for _, info := range fileInfos { - if info.IsDir() { - continue - } - - err = pushOneImage(rootDir, format, imageName, info.Name(), options) - if err != nil { - return errors.Wrapf(err, "failed push image %s:%s", imageName, info.Name()) - } - } - - return nil -} - -func pushOneImage(rootDir string, format string, imageName string, tag string, options imagetypes.PushImagesOptions) error { - destCtx := &containerstypes.SystemContext{ - DockerInsecureSkipTLSVerify: containerstypes.OptionalBoolTrue, - DockerDisableV1Ping: true, - } - if options.Registry.Username != "" && options.Registry.Password != "" { - destCtx.DockerAuthConfig = &containerstypes.DockerAuthConfig{ - Username: options.Registry.Username, - Password: options.Registry.Password, - } - } - if os.Getenv("KOTSADM_INSECURE_SRCREGISTRY") == "true" { - // allow pulling images from http/invalid https docker repos - // intended for development only, _THIS MAKES THINGS INSECURE_ - destCtx.DockerInsecureSkipTLSVerify = containerstypes.OptionalBoolTrue - } - - dstTag := tag - if options.KotsadmTag != "" { - dstTag = options.KotsadmTag - } - - destStr := fmt.Sprintf("%s/%s:%s", options.Registry.Endpoint, imageName, dstTag) - destRef, err := alltransports.ParseImageName(fmt.Sprintf("docker://%s", destStr)) - if err != nil { - return errors.Wrapf(err, "failed to parse dest image name %s", destStr) - } - - imageFile := filepath.Join(rootDir, format, imageName, tag) - localRef, err := alltransports.ParseImageName(fmt.Sprintf("%s:%s", format, imageFile)) - if err != nil { - return errors.Wrapf(err, "failed to parse local image name: %s:%s", format, imageFile) - } - - WriteProgressLine(options.ProgressWriter, fmt.Sprintf("Pushing %s", destStr)) - - _, err = CopyImageWithGC(context.Background(), destRef, localRef, ©.Options{ - RemoveSignatures: true, - SignBy: "", - ReportWriter: options.ProgressWriter, - SourceCtx: nil, - DestinationCtx: destCtx, - ForceManifestMIMEType: "", - }) - if err != nil { - return errors.Wrapf(err, "failed to push image") - } - - return nil -} - func WriteProgressLine(progressWriter io.Writer, line string) { fmt.Fprint(progressWriter, fmt.Sprintf("%s\n", line)) } -// CopyAirgapImages pushes images found in the app airgap bundle/airgap root to the configured registry. +// CopyAirgapImages pushes images found in the airgap bundle/airgap root to the configured registry. func CopyAirgapImages(opts imagetypes.ProcessImageOptions, log *logger.CLILogger) error { pushOpts := imagetypes.PushImagesOptions{ Registry: dockerregistrytypes.RegistryOptions{ @@ -254,12 +110,12 @@ func CopyAirgapImages(opts imagetypes.ProcessImageOptions, log *logger.CLILogger } if opts.AirgapBundle != "" { - err := TagAndPushAppImagesFromBundle(opts.AirgapBundle, pushOpts) + err := TagAndPushImagesFromBundle(opts.AirgapBundle, pushOpts) if err != nil { return errors.Wrap(err, "failed to push images from bundle") } } else { - err := TagAndPushAppImagesFromPath(opts.AirgapRoot, pushOpts) + err := TagAndPushImagesFromPath(opts.AirgapRoot, pushOpts) if err != nil { return errors.Wrap(err, "failed to push images from dir") } @@ -268,7 +124,7 @@ func CopyAirgapImages(opts imagetypes.ProcessImageOptions, log *logger.CLILogger return nil } -func TagAndPushAppImagesFromPath(airgapRootDir string, options imagetypes.PushImagesOptions) error { +func TagAndPushImagesFromPath(airgapRootDir string, options imagetypes.PushImagesOptions) error { airgap, err := kotsutil.FindAirgapMetaInDir(airgapRootDir) if err != nil { return errors.Wrap(err, "failed to find airgap meta") @@ -276,15 +132,15 @@ func TagAndPushAppImagesFromPath(airgapRootDir string, options imagetypes.PushIm switch airgap.Spec.Format { case dockertypes.FormatDockerRegistry: - return PushAppImagesFromTempRegistry(airgapRootDir, airgap.Spec.SavedImages, options) + return PushImagesFromTempRegistry(airgapRootDir, airgap.Spec.SavedImages, options) case dockertypes.FormatDockerArchive, "": - return PushAppImagesFromDockerArchivePath(airgapRootDir, options) + return PushImagesFromDockerArchivePath(airgapRootDir, options) default: return errors.Errorf("Airgap bundle format '%s' is not supported", airgap.Spec.Format) } } -func TagAndPushAppImagesFromBundle(airgapBundle string, options imagetypes.PushImagesOptions) error { +func TagAndPushImagesFromBundle(airgapBundle string, options imagetypes.PushImagesOptions) error { airgap, err := kotsutil.FindAirgapMetaInBundle(airgapBundle) if err != nil { return errors.Wrap(err, "failed to find airgap meta") @@ -292,7 +148,7 @@ func TagAndPushAppImagesFromBundle(airgapBundle string, options imagetypes.PushI switch airgap.Spec.Format { case dockertypes.FormatDockerRegistry: - extractedBundle, err := ioutil.TempDir("", "extracted-airgap-kots") + extractedBundle, err := os.MkdirTemp("", "extracted-airgap-kots") if err != nil { return errors.Wrap(err, "failed to create temp dir for unarchived airgap bundle") } @@ -306,15 +162,15 @@ func TagAndPushAppImagesFromBundle(airgapBundle string, options imagetypes.PushI if err := tarGz.Unarchive(airgapBundle, extractedBundle); err != nil { return errors.Wrap(err, "falied to unarchive airgap bundle") } - return PushAppImagesFromTempRegistry(extractedBundle, airgap.Spec.SavedImages, options) + return PushImagesFromTempRegistry(extractedBundle, airgap.Spec.SavedImages, options) case dockertypes.FormatDockerArchive, "": - return PushAppImagesFromDockerArchiveBundle(airgapBundle, options) + return PushImagesFromDockerArchiveBundle(airgapBundle, options) default: return errors.Errorf("Airgap bundle format '%s' is not supported", airgap.Spec.Format) } } -func PushAppImagesFromTempRegistry(airgapRootDir string, imageList []string, options imagetypes.PushImagesOptions) error { +func PushImagesFromTempRegistry(airgapRootDir string, imageList []string, options imagetypes.PushImagesOptions) error { imagesDir := filepath.Join(airgapRootDir, "images") if _, err := os.Stat(imagesDir); os.IsNotExist(err) { // this can either be because images were already pushed from the CLI, or it's a diff airgap bundle with no images @@ -361,6 +217,15 @@ func PushAppImagesFromTempRegistry(airgapRootDir string, imageList []string, opt return errors.Wrapf(err, "failed to parse source image %s", imageID) } + // if kotsadm tag is set, change the tag of the kotsadm/kotsadm and kotsadm/kotsadm-migrations images + if options.KotsadmTag != "" && strings.HasPrefix(imageID, "kotsadm/kotsadm") { + i, err := imageutil.ChangeImageTag(imageID, options.KotsadmTag) + if err != nil { + return errors.Wrap(err, "failed to change kotsadm dest image tag") + } + imageID = i + } + destImage, err := imageutil.DestImage(options.Registry, imageID) if err != nil { return errors.Wrapf(err, "failed to get destination image for %s", imageID) @@ -371,12 +236,11 @@ func PushAppImagesFromTempRegistry(airgapRootDir string, imageList []string, opt return errors.Wrapf(err, "failed to parse dest image %s", destStr) } - rewrittenImage, err := imageutil.RewriteDockerRegistryImage(options.Registry, imageID) - if err != nil { - return errors.Wrapf(err, "failed to rewrite image %s", imageID) - } + // copy all architecures available in the bundle. + // this also handles kotsadm airgap bundles that have multi-arch images but are referenced by tag. + copyAll := true - pushAppImageOpts := imagetypes.PushAppImageOptions{ + pushImageOpts := imagetypes.PushImageOptions{ ImageID: imageID, ImageInfo: imageInfo, Log: options.Log, @@ -389,7 +253,7 @@ func PushAppImagesFromTempRegistry(airgapRootDir string, imageList []string, opt Username: options.Registry.Username, Password: options.Registry.Password, }, - CopyAll: rewrittenImage.Digest != "", // we only support multi-arch images using digests + CopyAll: copyAll, SrcDisableV1Ping: true, SrcSkipTLSVerify: true, DestDisableV1Ping: true, @@ -397,15 +261,15 @@ func PushAppImagesFromTempRegistry(airgapRootDir string, imageList []string, opt ReportWriter: reportWriter, }, } - if err := pushAppImage(pushAppImageOpts); err != nil { - return errors.Wrapf(err, "failed to push app image %s", imageID) + if err := pushImage(pushImageOpts); err != nil { + return errors.Wrapf(err, "failed to push image %s", imageID) } } return nil } -func PushAppImagesFromDockerArchivePath(airgapRootDir string, options imagetypes.PushImagesOptions) error { +func PushImagesFromDockerArchivePath(airgapRootDir string, options imagetypes.PushImagesOptions) error { imagesDir := filepath.Join(airgapRootDir, "images") if _, err := os.Stat(imagesDir); os.IsNotExist(err) { // images were already pushed from the CLI @@ -474,7 +338,7 @@ func PushAppImagesFromDockerArchivePath(airgapRootDir string, options imagetypes return errors.Wrapf(err, "failed to parse dest image name %s", destStr) } - pushAppImageOpts := imagetypes.PushAppImageOptions{ + pushImageOpts := imagetypes.PushImageOptions{ ImageID: imagePath, ImageInfo: imageInfo, Log: options.Log, @@ -493,15 +357,15 @@ func PushAppImagesFromDockerArchivePath(airgapRootDir string, options imagetypes ReportWriter: reportWriter, }, } - if err := pushAppImage(pushAppImageOpts); err != nil { - return errors.Wrapf(err, "failed to push app image %s", imagePath) + if err := pushImage(pushImageOpts); err != nil { + return errors.Wrapf(err, "failed to push image %s", imagePath) } } return nil } -func PushAppImagesFromDockerArchiveBundle(airgapBundle string, options imagetypes.PushImagesOptions) error { +func PushImagesFromDockerArchiveBundle(airgapBundle string, options imagetypes.PushImagesOptions) error { if exists, err := archives.DirExistsInAirgap("images", airgapBundle); err != nil { return errors.Wrap(err, "failed to check if images dir exists in airgap bundle") } else if !exists { @@ -562,7 +426,7 @@ func PushAppImagesFromDockerArchiveBundle(airgapBundle string, options imagetype WriteProgressLine(reportWriter, fmt.Sprintf("Extracting image %s", imagePath)) } - tmpFile, err := ioutil.TempFile("", "kotsadm-app-image-") + tmpFile, err := os.CreateTemp("", "kotsadm-image-") if err != nil { return errors.Wrap(err, "failed to create temp file") } @@ -600,7 +464,7 @@ func PushAppImagesFromDockerArchiveBundle(airgapBundle string, options imagetype return errors.Wrapf(err, "failed to parse dest image name %s", destStr) } - pushAppImageOpts := imagetypes.PushAppImageOptions{ + pushImageOpts := imagetypes.PushImageOptions{ ImageID: imagePath, ImageInfo: imageInfo, Log: options.Log, @@ -619,15 +483,15 @@ func PushAppImagesFromDockerArchiveBundle(airgapBundle string, options imagetype ReportWriter: reportWriter, }, } - if err := pushAppImage(pushAppImageOpts); err != nil { - return errors.Wrapf(err, "failed to push app image %s", imagePath) + if err := pushImage(pushImageOpts); err != nil { + return errors.Wrapf(err, "failed to push image %s", imagePath) } } return nil } -func pushAppImage(opts imagetypes.PushAppImageOptions) error { +func pushImage(opts imagetypes.PushImageOptions) error { opts.ImageInfo.UploadStart = time.Now() if opts.LogForUI { fmt.Printf("Pushing image %s\n", opts.ImageID) // still log in console for future reference @@ -941,33 +805,3 @@ func countLayersUploaded(image *imagetypes.ImageInfo) int64 { } return count } - -func isAppArchive(rootDir string) bool { - fileInfos, err := ioutil.ReadDir(rootDir) - if err != nil { - return false - } - - for _, info := range fileInfos { - if info.IsDir() || filepath.Ext(info.Name()) != ".yaml" { - continue - } - - contents, err := os.ReadFile(filepath.Join(rootDir, info.Name())) - if err != nil { - continue - } - - decode := scheme.Codecs.UniversalDeserializer().Decode - _, gvk, err := decode(contents, nil, nil) - if err != nil { - continue - } - - if gvk.Group == "kots.io" && gvk.Version == "v1beta1" && gvk.Kind == "Airgap" { - return true - } - } - - return false -} diff --git a/pkg/image/types/types.go b/pkg/image/types/types.go index bcb47ed231..09b54b70a8 100644 --- a/pkg/image/types/types.go +++ b/pkg/image/types/types.go @@ -54,7 +54,7 @@ type PushImagesOptions struct { LogForUI bool } -type PushAppImageOptions struct { +type PushImageOptions struct { ImageID string ImageInfo *ImageInfo Log *logger.CLILogger diff --git a/pkg/imageutil/image.go b/pkg/imageutil/image.go index 9743c44296..b26281cdb8 100644 --- a/pkg/imageutil/image.go +++ b/pkg/imageutil/image.go @@ -281,3 +281,27 @@ func KustomizeImage(destRegistry registrytypes.RegistryOptions, image string) ([ } return rewrittenImages, nil } + +func ChangeImageTag(image string, newTag string) (string, error) { + parsed, err := reference.ParseDockerRef(image) + if err != nil { + return "", errors.Wrap(err, "failed to parse image") + } + + if _, ok := parsed.(reference.Canonical); ok { + // TODO: change tag for digested image that also has a tag + return image, nil + } + + if _, ok := parsed.(reference.Tagged); !ok { + // image is not tagged, just append the tag + return fmt.Sprintf("%s:%s", image, newTag), nil + } + + imageParts := strings.Split(image, "/") + lastPart := imageParts[len(imageParts)-1] + lastPart = fmt.Sprintf("%s:%s", strings.Split(lastPart, ":")[0], newTag) + imageParts[len(imageParts)-1] = lastPart + + return strings.Join(imageParts, "/"), nil +} diff --git a/pkg/imageutil/image_test.go b/pkg/imageutil/image_test.go index 9e6d2a391f..d577536048 100644 --- a/pkg/imageutil/image_test.go +++ b/pkg/imageutil/image_test.go @@ -1049,3 +1049,51 @@ func TestGetTag(t *testing.T) { }) } } + +func TestChangeImageTag(t *testing.T) { + tests := []struct { + name string + image string + newTag string + want string + }{ + { + name: "valid image with tag", + image: "myregistry.com/myimage:oldtag", + newTag: "newtag", + want: "myregistry.com/myimage:newtag", + }, + { + name: "valid image without tag", + image: "myregistry.com/myimage", + newTag: "newtag", + want: "myregistry.com/myimage:newtag", + }, + { + name: "valid image with digest", + image: "myregistry.com/myimage@sha256:a3e387f1517c3629c2a2513591c60d22320548762f06270d085f668dbdb9c5d4", + newTag: "newtag", + want: "myregistry.com/myimage@sha256:a3e387f1517c3629c2a2513591c60d22320548762f06270d085f668dbdb9c5d4", + }, + { + name: "valid image with tag and digest - not yet supported", + image: "myregistry.com/myimage:oldtag@sha256:a3e387f1517c3629c2a2513591c60d22320548762f06270d085f668dbdb9c5d4", + newTag: "newtag", + want: "myregistry.com/myimage:oldtag@sha256:a3e387f1517c3629c2a2513591c60d22320548762f06270d085f668dbdb9c5d4", + }, + { + name: "registry with a port", + image: "myregistry.com:5000/myimage:oldtag", + newTag: "newtag", + want: "myregistry.com:5000/myimage:newtag", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + result, err := ChangeImageTag(test.image, test.newTag) + require.NoError(t, err) + assert.Equal(t, test.want, result) + }) + } +} diff --git a/pkg/kotsadm/main.go b/pkg/kotsadm/main.go index ae7f7fc776..b63f3a1a82 100644 --- a/pkg/kotsadm/main.go +++ b/pkg/kotsadm/main.go @@ -165,7 +165,7 @@ func Deploy(deployOptions types.DeployOptions, log *logger.CLILogger) error { } if !deployOptions.DisableImagePush { - err := image.TagAndPushAppImagesFromPath(deployOptions.AirgapRootDir, pushOptions) + err := image.TagAndPushImagesFromPath(deployOptions.AirgapRootDir, pushOptions) if err != nil { return errors.Wrap(err, "failed to tag and push app images from path") } diff --git a/pkg/upstream/upgrade.go b/pkg/upstream/upgrade.go index 3ffd35c713..7f945a3989 100644 --- a/pkg/upstream/upgrade.go +++ b/pkg/upstream/upgrade.go @@ -83,7 +83,7 @@ func Upgrade(appSlug string, options UpgradeOptions) (*UpgradeResponse, error) { } if !options.DisableImagePush { - err := image.TagAndPushAppImagesFromPath(airgapRootDir, pushOptions) + err := image.TagAndPushImagesFromPath(airgapRootDir, pushOptions) if err != nil { return nil, errors.Wrap(err, "failed to tag and push app images from path") }