From fd0e9f29fd419a7a8f9d676d6684a6a934dcc6e1 Mon Sep 17 00:00:00 2001 From: Salah Al Saleh Date: Fri, 10 May 2024 11:17:11 -0700 Subject: [PATCH 1/5] Use Playwright instead of testIM (#596) * Use Playwright instead of testIM --- .github/actions/e2e/action.yml | 14 +- .github/workflows/pull-request.yaml | 2 - .github/workflows/release-dev.yaml | 2 - e2e/README.md | 19 +- e2e/install_test.go | 313 ++++++------------ .../tests/deploy-airgap-upgrade/test.spec.ts | 16 + e2e/playwright/tests/deploy-app/test.spec.ts | 2 +- .../get-join-controller-command/test.spec.ts | 13 + .../get-join-worker-command/test.spec.ts | 13 + e2e/playwright/tests/shared/deploy-app.ts | 6 +- e2e/reset_test.go | 15 +- e2e/restore_test.go | 77 +++++ e2e/scripts/install-testim.sh | 19 -- e2e/scripts/testim.sh | 37 --- e2e/support-bundle_test.go | 1 + e2e/utils.go | 2 +- 16 files changed, 233 insertions(+), 318 deletions(-) create mode 100644 e2e/playwright/tests/deploy-airgap-upgrade/test.spec.ts create mode 100644 e2e/playwright/tests/get-join-controller-command/test.spec.ts create mode 100644 e2e/playwright/tests/get-join-worker-command/test.spec.ts create mode 100644 e2e/restore_test.go delete mode 100755 e2e/scripts/install-testim.sh delete mode 100755 e2e/scripts/testim.sh diff --git a/.github/actions/e2e/action.yml b/.github/actions/e2e/action.yml index 6d5caa48d..5f7a582b1 100644 --- a/.github/actions/e2e/action.yml +++ b/.github/actions/e2e/action.yml @@ -19,12 +19,6 @@ inputs: license: description: 'license (b64) to use for e2e tests' required: true - testim-access-token: - description: 'testim access token' - required: true - testim-branch: - description: 'testim branch' - required: true dr-aws-access-key-id: description: 'Disaster Recovery AWS Access Key ID' required: true @@ -90,8 +84,6 @@ runs: export SNAPSHOT_LICENSE_ID=${{ inputs.snapshot-license-id }} echo "${{ inputs.license }}" | base64 --decode > e2e/license.yaml echo "${{ inputs.snapshot-license }}" | base64 --decode > e2e/snapshot-license.yaml - export TESTIM_ACCESS_TOKEN=${{ inputs.testim-access-token }} - export TESTIM_BRANCH=${{ inputs.testim-branch }} export DR_AWS_S3_ENDPOINT=https://s3.amazonaws.com export DR_AWS_S3_REGION=us-east-1 export DR_AWS_S3_BUCKET=kots-testim-snapshots @@ -103,20 +95,20 @@ runs: uses: actions/upload-artifact@v4 if: ${{ failure() }} with: - name: ${{ github.job }}-support-bundle-host.tar.gz + name: ${{ inputs.test-name }}-support-bundle-host.tar.gz path: ./e2e/support-bundle-host.tar.gz - name: Upload Cluster Support Bundle uses: actions/upload-artifact@v4 if: ${{ failure() }} with: - name: ${{ github.job }}-support-bundle-cluster.tar.gz + name: ${{ inputs.test-name }}-support-bundle-cluster.tar.gz path: ./e2e/support-bundle-cluster.tar.gz - name: Upload Playwright Report id: upload-playwright-report uses: actions/upload-artifact@v4 if: ${{ failure() }} with: - name: ${{ github.job }}-playwright-report.tar.gz + name: ${{ inputs.test-name }}-playwright-report.tar.gz path: ./e2e/playwright-report.tar.gz - name: Print instructions to view Playwright report if: ${{ failure() && steps.upload-playwright-report.outputs.artifact-url != '' }} diff --git a/.github/workflows/pull-request.yaml b/.github/workflows/pull-request.yaml index e1ccd0396..180ff4e35 100644 --- a/.github/workflows/pull-request.yaml +++ b/.github/workflows/pull-request.yaml @@ -204,8 +204,6 @@ jobs: snapshot-license: ${{ secrets.STAGING_EMBEDDED_CLUSTER_SNAPSHOT_LICENSE }} license-id: ${{ secrets.STAGING_EMBEDDED_CLUSTER_LICENSE_ID }} license: ${{ secrets.STAGING_EMBEDDED_CLUSTER_LICENSE }} - testim-access-token: ${{ secrets.TESTIM_ACCESS_TOKEN }} - testim-branch: ${{ github.head_ref == 'main' && 'master' || github.head_ref }} dr-aws-access-key-id: ${{ secrets.TESTIM_AWS_ACCESS_KEY_ID }} dr-aws-secret-access-key: ${{ secrets.TESTIM_AWS_SECRET_ACCESS_KEY }} diff --git a/.github/workflows/release-dev.yaml b/.github/workflows/release-dev.yaml index aa9687eae..d77b6cea8 100644 --- a/.github/workflows/release-dev.yaml +++ b/.github/workflows/release-dev.yaml @@ -156,7 +156,5 @@ jobs: snapshot-license: ${{ secrets.STAGING_EMBEDDED_CLUSTER_SNAPSHOT_LICENSE }} license-id: ${{ secrets.STAGING_EMBEDDED_CLUSTER_LICENSE_ID }} license: ${{ secrets.STAGING_EMBEDDED_CLUSTER_LICENSE }} - testim-access-token: ${{ secrets.TESTIM_ACCESS_TOKEN }} - testim-branch: 'master' dr-aws-access-key-id: ${{ secrets.TESTIM_AWS_ACCESS_KEY_ID }} dr-aws-secret-access-key: ${{ secrets.TESTIM_AWS_SECRET_ACCESS_KEY }} diff --git a/e2e/README.md b/e2e/README.md index 18b3ad94f..f03523527 100644 --- a/e2e/README.md +++ b/e2e/README.md @@ -67,20 +67,9 @@ Make sure to update the application yaml files under kots-release-onmerge and kots-release-onpr directories if you create a new release of the remote application. -### Testim +### Playwright -We use [Testim](https://www.testim.io/) to run end to end tests on the UI. The tests live within the -"Embedded Cluster" Testim project. +We use [Playwright](https://playwright.dev/) to run end to end tests on the UI. +The tests live in the `playwright` directory. -When a git branch is pushed to GitHub, a cooresponding branch is created in -Testim. The on-PR tests will run against the matching branch in Testim, so if you need -to make changes to the tests as part of your PR, you should make those updates in your -Testim branch. - -When a PR is merged, the on-merge tests will run against the master branch in Testim. -If you have made changes to the tests in your Testim branch, you should merge those changes -to the master branch in Testim when merging your PR. Similarly, if you rebase your git -branch from main, you may need to "rebase" your Testim branch (merge changes from master) -if there have been changes to the tests. - -For more details on version control in Testim, refer to the [Testim documentation](https://help.testim.io/docs/version-control-branches). +For more details on how to write tests with Playwright, refer to the [Playwright documentation](https://playwright.dev/docs/writing-tests). diff --git a/e2e/install_test.go b/e2e/install_test.go index 4ab81eed1..897f6ec96 100644 --- a/e2e/install_test.go +++ b/e2e/install_test.go @@ -21,12 +21,8 @@ func TestSingleNodeInstallation(t *testing.T) { LicensePath: "license.yaml", EmbeddedClusterPath: "../output/bin/embedded-cluster", }) - defer func() { - if t.Failed() { - generateAndCopySupportBundle(t, tc) - } - tc.Destroy() - }() + defer cleanupCluster(t, tc) + t.Logf("%s: installing embedded-cluster on node 0", time.Now().Format(time.RFC3339)) line := []string{"single-node-install.sh", "ui"} if _, _, err := RunCommandOnNode(t, tc, 0, line); err != nil { @@ -64,12 +60,7 @@ func TestSingleNodeInstallationAlmaLinux8(t *testing.T) { LicensePath: "license.yaml", EmbeddedClusterPath: "../output/bin/embedded-cluster", }) - defer func() { - if t.Failed() { - generateAndCopySupportBundle(t, tc) - } - tc.Destroy() - }() + defer cleanupCluster(t, tc) t.Logf("%s: installing tar", time.Now().Format(time.RFC3339)) line := []string{"yum-install-tar.sh"} @@ -113,12 +104,7 @@ func TestSingleNodeInstallationDebian12(t *testing.T) { LicensePath: "license.yaml", EmbeddedClusterPath: "../output/bin/embedded-cluster", }) - defer func() { - if t.Failed() { - generateAndCopySupportBundle(t, tc) - } - tc.Destroy() - }() + defer cleanupCluster(t, tc) t.Logf("%s: installing test dependencies on node 0", time.Now().Format(time.RFC3339)) commands := [][]string{ @@ -166,12 +152,7 @@ func TestSingleNodeInstallationCentos8Stream(t *testing.T) { LicensePath: "license.yaml", EmbeddedClusterPath: "../output/bin/embedded-cluster", }) - defer func() { - if t.Failed() { - generateAndCopySupportBundle(t, tc) - } - tc.Destroy() - }() + defer cleanupCluster(t, tc) t.Logf("%s: installing tar", time.Now().Format(time.RFC3339)) line := []string{"yum-install-tar.sh"} @@ -248,12 +229,7 @@ func TestMultiNodeInstallation(t *testing.T) { LicensePath: "license.yaml", EmbeddedClusterPath: "../output/bin/embedded-cluster", }) - defer func() { - if t.Failed() { - generateAndCopySupportBundle(t, tc) - } - tc.Destroy() - }() + defer cleanupCluster(t, tc) // bootstrap the first node and makes sure it is healthy. also executes the kots // ssl certificate configuration (kurl-proxy). @@ -262,18 +238,18 @@ func TestMultiNodeInstallation(t *testing.T) { t.Fatalf("fail to install embedded-cluster on node %s: %v", tc.Nodes[0], err) } - if err := setupTestim(t, tc); err != nil { - t.Fatalf("fail to setup testim: %v", err) + if err := setupPlaywright(t, tc); err != nil { + t.Fatalf("fail to setup playwright: %v", err) } - if _, _, err := runTestimTest(t, tc, "deploy-kots-application"); err != nil { - t.Fatalf("fail to run testim test deploy-kots-application: %v", err) + if _, _, err := runPlaywrightTest(t, tc, "deploy-app"); err != nil { + t.Fatalf("fail to run playwright test deploy-app: %v", err) } // generate all node join commands (2 for controllers and 1 for worker). t.Logf("%s: generating two new controller token commands", time.Now().Format(time.RFC3339)) controllerCommands := []string{} for i := 0; i < 2; i++ { - stdout, stderr, err := runTestimTest(t, tc, "get-join-controller-command") + stdout, stderr, err := runPlaywrightTest(t, tc, "get-join-controller-command") if err != nil { t.Fatalf("fail to generate controller join token:\nstdout: %s\nstderr: %s", stdout, stderr) } @@ -285,7 +261,7 @@ func TestMultiNodeInstallation(t *testing.T) { t.Log("controller join token command:", command) } t.Logf("%s: generating a new worker token command", time.Now().Format(time.RFC3339)) - stdout, stderr, err := runTestimTest(t, tc, "get-join-worker-command") + stdout, stderr, err := runPlaywrightTest(t, tc, "get-join-worker-command") if err != nil { t.Fatalf("fail to generate worker join token:\nstdout: %s\nstderr: %s", stdout, stderr) } @@ -336,12 +312,8 @@ func TestInstallWithoutEmbed(t *testing.T) { LicensePath: "license.yaml", EmbeddedClusterPath: "../output/bin/embedded-cluster-original", }) - defer func() { - if t.Failed() { - generateAndCopySupportBundle(t, tc) - } - tc.Destroy() - }() + defer cleanupCluster(t, tc) + t.Logf("%s: installing embedded-cluster on node 0", time.Now().Format(time.RFC3339)) line := []string{"default-install.sh"} if _, _, err := RunCommandOnNode(t, tc, 0, line); err != nil { @@ -358,12 +330,8 @@ func TestInstallFromReplicatedApp(t *testing.T) { Nodes: 1, Image: "ubuntu/jammy", }) - defer func() { - if t.Failed() { - generateAndCopySupportBundle(t, tc) - } - tc.Destroy() - }() + defer cleanupCluster(t, tc) + t.Logf("%s: downloading embedded-cluster on node 0", time.Now().Format(time.RFC3339)) line := []string{"vandoor-prepare.sh", os.Getenv("SHORT_SHA"), os.Getenv("LICENSE_ID"), "false"} if _, _, err := RunCommandOnNode(t, tc, 0, line); err != nil { @@ -406,12 +374,8 @@ func TestResetAndReinstall(t *testing.T) { LicensePath: "license.yaml", EmbeddedClusterPath: "../output/bin/embedded-cluster", }) - defer func() { - if t.Failed() { - generateAndCopySupportBundle(t, tc) - } - tc.Destroy() - }() + defer cleanupCluster(t, tc) + t.Logf("%s: installing embedded-cluster on node 0", time.Now().Format(time.RFC3339)) line := []string{"single-node-install.sh", "cli"} if _, _, err := RunCommandOnNode(t, tc, 0, line); err != nil { @@ -489,12 +453,7 @@ func TestResetAndReinstallAirgap(t *testing.T) { WithProxy: true, AirgapInstallBundlePath: airgapBundlePath, }) - defer func() { - if t.Failed() { - generateAndCopySupportBundle(t, tc) - } - tc.Destroy() - }() + defer cleanupCluster(t, tc) t.Logf("%s: preparing embedded cluster airgap files", time.Now().Format(time.RFC3339)) line := []string{"airgap-prepare.sh"} @@ -531,12 +490,8 @@ func TestOldVersionUpgrade(t *testing.T) { Nodes: 1, Image: "ubuntu/jammy", }) - defer func() { - if t.Failed() { - generateAndCopySupportBundle(t, tc) - } - tc.Destroy() - }() + defer cleanupCluster(t, tc) + t.Logf("%s: downloading embedded-cluster on node 0", time.Now().Format(time.RFC3339)) line := []string{"vandoor-prepare.sh", fmt.Sprintf("%s-pre-minio-removal", os.Getenv("SHORT_SHA")), os.Getenv("LICENSE_ID"), "false"} if _, _, err := RunCommandOnNode(t, tc, 0, line); err != nil { @@ -596,12 +551,7 @@ func TestSingleNodeAirgapUpgradeUbuntuJammy(t *testing.T) { AirgapInstallBundlePath: airgapInstallBundlePath, AirgapUpgradeBundlePath: airgapUpgradeBundlePath, }) - defer func() { - if t.Failed() { - generateAndCopySupportBundle(t, tc) - } - tc.Destroy() - }() + defer cleanupCluster(t, tc) // delete airgap bundles once they've been copied to the nodes if err := os.Remove(airgapInstallBundlePath); err != nil { @@ -628,11 +578,11 @@ func TestSingleNodeAirgapUpgradeUbuntuJammy(t *testing.T) { t.Fatalf("fail to remove airgap bundle on node %s: %v", tc.Nodes[0], err) } - if err := setupTestim(t, tc); err != nil { - t.Fatalf("fail to setup testim: %v", err) + if err := setupPlaywright(t, tc); err != nil { + t.Fatalf("fail to setup playwright: %v", err) } - if _, _, err := runTestimTest(t, tc, "deploy-kots-application"); err != nil { - t.Fatalf("fail to run testim test deploy-kots-application: %v", err) + if _, _, err := runPlaywrightTest(t, tc, "deploy-app"); err != nil { + t.Fatalf("fail to run playwright test deploy-app: %v", err) } t.Logf("%s: checking installation state after app deployment", time.Now().Format(time.RFC3339)) @@ -652,8 +602,8 @@ func TestSingleNodeAirgapUpgradeUbuntuJammy(t *testing.T) { t.Fatalf("fail to remove airgap bundle on node %s: %v", tc.Nodes[0], err) } - if _, _, err := runTestimTest(t, tc, "deploy-airgap-upgrade"); err != nil { - t.Fatalf("fail to run testim test deploy-airgap-upgrade: %v", err) + if _, _, err := runPlaywrightTest(t, tc, "deploy-airgap-upgrade"); err != nil { + t.Fatalf("fail to run playwright test deploy-airgap-upgrade: %v", err) } t.Logf("%s: checking installation state after upgrade", time.Now().Format(time.RFC3339)) @@ -691,12 +641,7 @@ func TestMultiNodeAirgapUpgradeUbuntuJammy(t *testing.T) { AirgapInstallBundlePath: airgapInstallBundlePath, AirgapUpgradeBundlePath: airgapUpgradeBundlePath, }) - defer func() { - if t.Failed() { - generateAndCopySupportBundle(t, tc) - } - tc.Destroy() - }() + defer cleanupCluster(t, tc) // delete airgap bundles once they've been copied to the nodes if err := os.Remove(airgapInstallBundlePath); err != nil { @@ -723,22 +668,26 @@ func TestMultiNodeAirgapUpgradeUbuntuJammy(t *testing.T) { if _, _, err := RunCommandOnNode(t, tc, 0, line); err != nil { t.Fatalf("fail to install embedded-cluster on node %s: %v", tc.Nodes[0], err) } - // remove the airgap bundle after installation + // remove the airgap bundle and binary after installation line = []string{"rm", "/tmp/release.airgap"} if _, _, err := RunCommandOnNode(t, tc, 0, line); err != nil { t.Fatalf("fail to remove airgap bundle on node %s: %v", tc.Nodes[0], err) } + line = []string{"rm", "/usr/local/bin/embedded-cluster"} + if _, _, err := RunCommandOnNode(t, tc, 0, line); err != nil { + t.Fatalf("fail to remove embedded-cluster binary on node %s: %v", tc.Nodes[0], err) + } - if err := setupTestim(t, tc); err != nil { - t.Fatalf("fail to setup testim: %v", err) + if err := setupPlaywright(t, tc); err != nil { + t.Fatalf("fail to setup playwright: %v", err) } - if _, _, err := runTestimTest(t, tc, "deploy-kots-application"); err != nil { - t.Fatalf("fail to run testim test deploy-kots-application: %v", err) + if _, _, err := runPlaywrightTest(t, tc, "deploy-app"); err != nil { + t.Fatalf("fail to run playwright test deploy-app: %v", err) } // generate worker node join command. t.Logf("%s: generating a new worker token command", time.Now().Format(time.RFC3339)) - stdout, stderr, err := runTestimTest(t, tc, "get-join-worker-command") + stdout, stderr, err := runPlaywrightTest(t, tc, "get-join-worker-command") if err != nil { t.Fatalf("fail to generate worker join token:\nstdout: %s\nstderr: %s", stdout, stderr) } @@ -758,11 +707,15 @@ func TestMultiNodeAirgapUpgradeUbuntuJammy(t *testing.T) { if _, _, err := RunCommandOnNode(t, tc, 1, strings.Split(workerCommand, " ")); err != nil { t.Fatalf("fail to join worker node to the cluster: %v", err) } - // remove the airgap bundle after joining + // remove the airgap bundle and binary after joining line = []string{"rm", "/tmp/release.airgap"} if _, _, err := RunCommandOnNode(t, tc, 1, line); err != nil { t.Fatalf("fail to remove airgap bundle on worker node: %v", err) } + line = []string{"rm", "/usr/local/bin/embedded-cluster"} + if _, _, err := RunCommandOnNode(t, tc, 1, line); err != nil { + t.Fatalf("fail to remove embedded-cluster binary on worker node: %v", err) + } // wait for the nodes to report as ready. t.Logf("%s: all nodes joined, waiting for them to be ready", time.Now().Format(time.RFC3339)) @@ -783,14 +736,18 @@ func TestMultiNodeAirgapUpgradeUbuntuJammy(t *testing.T) { if _, _, err := RunCommandOnNode(t, tc, 0, line); err != nil { t.Fatalf("fail to run airgap update: %v", err) } - // remove the airgap bundle after upgrade + // remove the airgap bundle and binary after upgrade line = []string{"rm", "/tmp/upgrade/release.airgap"} if _, _, err := RunCommandOnNode(t, tc, 0, line); err != nil { t.Fatalf("fail to remove airgap bundle on node %s: %v", tc.Nodes[0], err) } + line = []string{"rm", "/usr/local/bin/embedded-cluster-upgrade"} + if _, _, err := RunCommandOnNode(t, tc, 0, line); err != nil { + t.Fatalf("fail to remove embedded-cluster-upgrade binary on node %s: %v", tc.Nodes[0], err) + } - if _, _, err := runTestimTest(t, tc, "deploy-airgap-upgrade"); err != nil { - t.Fatalf("fail to run testim test deploy-airgap-upgrade: %v", err) + if _, _, err := runPlaywrightTest(t, tc, "deploy-airgap-upgrade"); err != nil { + t.Fatalf("fail to run playwright test deploy-airgap-upgrade: %v", err) } t.Logf("%s: checking installation state after upgrade", time.Now().Format(time.RFC3339)) @@ -802,80 +759,6 @@ func TestMultiNodeAirgapUpgradeUbuntuJammy(t *testing.T) { t.Logf("%s: test complete", time.Now().Format(time.RFC3339)) } -func TestSingleNodeDisasterRecovery(t *testing.T) { - t.Parallel() - - requiredEnvVars := []string{ - "DR_AWS_S3_ENDPOINT", - "DR_AWS_S3_REGION", - "DR_AWS_S3_BUCKET", - "DR_AWS_S3_PREFIX", - "DR_AWS_ACCESS_KEY_ID", - "DR_AWS_SECRET_ACCESS_KEY", - } - for _, envVar := range requiredEnvVars { - if os.Getenv(envVar) == "" { - t.Fatalf("missing required environment variable: %s", envVar) - } - } - - testArgs := []string{} - for _, envVar := range requiredEnvVars { - testArgs = append(testArgs, os.Getenv(envVar)) - } - - tc := cluster.NewTestCluster(&cluster.Input{ - T: t, - Nodes: 1, - Image: "ubuntu/jammy", - LicensePath: "snapshot-license.yaml", - EmbeddedClusterPath: "../output/bin/embedded-cluster", - }) - defer func() { - if t.Failed() { - generateAndCopySupportBundle(t, tc) - copyPlaywrightReport(t, tc) - } - tc.Destroy() - }() - - t.Logf("%s: installing test dependencies on node 0", time.Now().Format(time.RFC3339)) - commands := [][]string{ - {"apt-get", "update", "-y"}, - {"apt-get", "install", "expect", "-y"}, - } - if err := RunCommandsOnNode(t, tc, 0, commands); err != nil { - t.Fatalf("fail to install test dependencies on node %s: %v", tc.Nodes[0], err) - } - - t.Logf("%s: installing embedded-cluster on node 0", time.Now().Format(time.RFC3339)) - line := []string{"single-node-install.sh", "ui"} - if _, _, err := RunCommandOnNode(t, tc, 0, line); err != nil { - t.Fatalf("fail to install embedded-cluster on node %s: %v", tc.Nodes[0], err) - } - - if err := setupPlaywright(t, tc); err != nil { - t.Fatalf("fail to setup playwright: %v", err) - } - if _, _, err := runPlaywrightTest(t, tc, "create-backup", testArgs...); err != nil { - t.Fatalf("fail to run playwright test create-backup: %v", err) - } - - t.Logf("%s: resetting the installation", time.Now().Format(time.RFC3339)) - line = []string{"reset-installation.sh"} - if _, _, err := RunCommandOnNode(t, tc, 0, line); err != nil { - t.Fatalf("fail to reset the installation: %v", err) - } - - t.Logf("%s: restoring the installation", time.Now().Format(time.RFC3339)) - line = append([]string{"restore-installation.exp"}, testArgs...) - if _, _, err := RunCommandOnNode(t, tc, 0, line); err != nil { - t.Fatalf("fail to restore the installation: %v", err) - } - - t.Logf("%s: test complete", time.Now().Format(time.RFC3339)) -} - func TestInstallSnapshotFromReplicatedApp(t *testing.T) { t.Parallel() tc := cluster.NewTestCluster(&cluster.Input{ @@ -883,12 +766,8 @@ func TestInstallSnapshotFromReplicatedApp(t *testing.T) { Nodes: 1, Image: "ubuntu/jammy", }) - defer func() { - if t.Failed() { - generateAndCopySupportBundle(t, tc) - } - tc.Destroy() - }() + defer cleanupCluster(t, tc) + t.Logf("%s: downloading embedded-cluster on node 0", time.Now().Format(time.RFC3339)) line := []string{"vandoor-prepare.sh", os.Getenv("SHORT_SHA"), os.Getenv("SNAPSHOT_LICENSE_ID"), "false"} if _, _, err := RunCommandOnNode(t, tc, 0, line); err != nil { @@ -963,70 +842,44 @@ func downloadAirgapBundle(t *testing.T, versionLabel string, destPath string) st return airgapBundlePath } -func setupTestim(t *testing.T, tc *cluster.Output) error { +func setupPlaywright(t *testing.T, tc *cluster.Output) error { t.Logf("%s: bypassing kurl-proxy on node 0", time.Now().Format(time.RFC3339)) line := []string{"bypass-kurl-proxy.sh"} if _, _, err := RunCommandOnNode(t, tc, 0, line); err != nil { return fmt.Errorf("fail to bypass kurl-proxy on node %s: %v", tc.Nodes[0], err) } - line = []string{"install-testim.sh"} + line = []string{"install-playwright.sh"} if tc.Proxy != "" { - t.Logf("%s: installing testim on proxy node", time.Now().Format(time.RFC3339)) + t.Logf("%s: installing playwright on proxy node", time.Now().Format(time.RFC3339)) if _, _, err := RunCommandOnProxyNode(t, tc, line); err != nil { - return fmt.Errorf("fail to install testim on node %s: %v", tc.Proxy, err) + return fmt.Errorf("fail to install playwright on node %s: %v", tc.Proxy, err) } } else { - t.Logf("%s: installing testim on node 0", time.Now().Format(time.RFC3339)) + t.Logf("%s: installing playwright on node 0", time.Now().Format(time.RFC3339)) if _, _, err := RunCommandOnNode(t, tc, 0, line); err != nil { - return fmt.Errorf("fail to install testim on node %s: %v", tc.Nodes[0], err) + return fmt.Errorf("fail to install playwright on node %s: %v", tc.Nodes[0], err) } } - return nil } -func runTestimTest(t *testing.T, tc *cluster.Output, testName string) (stdout, stderr string, err error) { - line := []string{"testim.sh", os.Getenv("TESTIM_ACCESS_TOKEN"), os.Getenv("TESTIM_BRANCH"), testName} +func runPlaywrightTest(t *testing.T, tc *cluster.Output, testName string, args ...string) (stdout, stderr string, err error) { + line := []string{"playwright.sh", testName} + line = append(line, args...) if tc.Proxy != "" { - t.Logf("%s: running testim test %s on proxy node", time.Now().Format(time.RFC3339), testName) + t.Logf("%s: running playwright test %s on proxy node", time.Now().Format(time.RFC3339), testName) stdout, stderr, err = RunCommandOnProxyNode(t, tc, line) if err != nil { - return stdout, stderr, fmt.Errorf("fail to run testim test %s on node %s: %v", testName, tc.Proxy, err) + return stdout, stderr, fmt.Errorf("fail to run playwright test %s on node %s: %v", testName, tc.Proxy, err) } } else { - t.Logf("%s: running testim test %s on node 0", time.Now().Format(time.RFC3339), testName) + t.Logf("%s: running playwright test %s on node 0", time.Now().Format(time.RFC3339), testName) stdout, stderr, err = RunCommandOnNode(t, tc, 0, line) if err != nil { - return stdout, stderr, fmt.Errorf("fail to run testim test %s on node %s: %v", testName, tc.Nodes[0], err) + return stdout, stderr, fmt.Errorf("fail to run playwright test %s on node %s: %v", testName, tc.Nodes[0], err) } } - - return stdout, stderr, nil -} - -func setupPlaywright(t *testing.T, tc *cluster.Output) error { - t.Logf("%s: bypassing kurl-proxy on node 0", time.Now().Format(time.RFC3339)) - line := []string{"bypass-kurl-proxy.sh"} - if _, _, err := RunCommandOnNode(t, tc, 0, line); err != nil { - return fmt.Errorf("fail to bypass kurl-proxy on node %s: %v", tc.Nodes[0], err) - } - line = []string{"install-playwright.sh"} - t.Logf("%s: installing playwright on node 0", time.Now().Format(time.RFC3339)) - if _, _, err := RunCommandOnNode(t, tc, 0, line); err != nil { - return fmt.Errorf("fail to install playwright on node %s: %v", tc.Nodes[0], err) - } - return nil -} - -func runPlaywrightTest(t *testing.T, tc *cluster.Output, testName string, args ...string) (stdout, stderr string, err error) { - t.Logf("%s: running playwright test %s on node 0", time.Now().Format(time.RFC3339), testName) - line := []string{"playwright.sh", testName} - line = append(line, args...) - stdout, stderr, err = RunCommandOnNode(t, tc, 0, line) - if err != nil { - return stdout, stderr, fmt.Errorf("fail to run playwright test %s on node %s: %v", testName, tc.Nodes[0], err) - } return stdout, stderr, nil } @@ -1050,14 +903,34 @@ func generateAndCopySupportBundle(t *testing.T, tc *cluster.Output) { } func copyPlaywrightReport(t *testing.T, tc *cluster.Output) { - t.Logf("%s: compressing playwright report", time.Now().Format(time.RFC3339)) line := []string{"tar", "-czf", "playwright-report.tar.gz", "-C", "/tmp/playwright/playwright-report", "."} - if _, _, err := RunCommandOnNode(t, tc, 0, line); err != nil { - t.Errorf("fail to compress playwright report: %v", err) - return + if tc.Proxy != "" { + t.Logf("%s: compressing playwright report on proxy node", time.Now().Format(time.RFC3339)) + if _, _, err := RunCommandOnProxyNode(t, tc, line); err != nil { + t.Errorf("fail to compress playwright report on node %s: %v", tc.Proxy, err) + return + } + t.Logf("%s: copying playwright report to local machine", time.Now().Format(time.RFC3339)) + if err := cluster.CopyFileFromNode(tc.Proxy, "/root/playwright-report.tar.gz", "playwright-report.tar.gz"); err != nil { + t.Errorf("fail to copy playwright report to local machine: %v", err) + } + } else { + t.Logf("%s: compressing playwright report on node 0", time.Now().Format(time.RFC3339)) + if _, _, err := RunCommandOnNode(t, tc, 0, line); err != nil { + t.Errorf("fail to compress playwright report on node %s: %v", tc.Nodes[0], err) + return + } + t.Logf("%s: copying playwright report to local machine", time.Now().Format(time.RFC3339)) + if err := cluster.CopyFileFromNode(tc.Nodes[0], "/root/playwright-report.tar.gz", "playwright-report.tar.gz"); err != nil { + t.Errorf("fail to copy playwright report to local machine: %v", err) + } } - t.Logf("%s: copying playwright report to local machine", time.Now().Format(time.RFC3339)) - if err := cluster.CopyFileFromNode(tc.Nodes[0], "/root/playwright-report.tar.gz", "playwright-report.tar.gz"); err != nil { - t.Errorf("fail to copy playwright report to local machine: %v", err) +} + +func cleanupCluster(t *testing.T, tc *cluster.Output) { + if t.Failed() { + generateAndCopySupportBundle(t, tc) + copyPlaywrightReport(t, tc) } + tc.Destroy() } diff --git a/e2e/playwright/tests/deploy-airgap-upgrade/test.spec.ts b/e2e/playwright/tests/deploy-airgap-upgrade/test.spec.ts new file mode 100644 index 000000000..75e254332 --- /dev/null +++ b/e2e/playwright/tests/deploy-airgap-upgrade/test.spec.ts @@ -0,0 +1,16 @@ +import { test, expect } from '@playwright/test'; +import { login } from '../shared'; + +test('deploy airgap upgrade', async ({ page }) => { + test.setTimeout(15 * 60 * 1000); // 15 minutes + await login(page); + await expect(page.locator('#app')).toContainText('Airgap Update'); + await page.getByRole('button', { name: 'Deploy', exact: true }).click(); + await expect(page.locator('.Modal-body')).toBeVisible(); + await page.getByRole('button', { name: 'Yes, Deploy' }).click(); + await expect(page.locator('#app')).toContainText('Updating cluster', { timeout: 60000 }); + await expect(page.locator('.Modal-body')).toContainText('Cluster update in progress', { timeout: 120000 }); + await expect(page.locator('#app')).toContainText('Currently deployed version', { timeout: 600000 }); + await expect(page.locator('#app')).toContainText('Up to date', { timeout: 30000 }); + await expect(page.locator('#app')).toContainText('Ready'); +}); diff --git a/e2e/playwright/tests/deploy-app/test.spec.ts b/e2e/playwright/tests/deploy-app/test.spec.ts index c11814aa3..4a8b78275 100644 --- a/e2e/playwright/tests/deploy-app/test.spec.ts +++ b/e2e/playwright/tests/deploy-app/test.spec.ts @@ -2,7 +2,7 @@ import { test, expect } from '@playwright/test'; import { login, deployApp } from '../shared'; test('deploy app', async ({ page }) => { - test.setTimeout(60 * 1000); // 1 minute + test.setTimeout(2 * 60 * 1000); // 2 minutes await login(page); await deployApp(page, expect); }); diff --git a/e2e/playwright/tests/get-join-controller-command/test.spec.ts b/e2e/playwright/tests/get-join-controller-command/test.spec.ts new file mode 100644 index 000000000..577057379 --- /dev/null +++ b/e2e/playwright/tests/get-join-controller-command/test.spec.ts @@ -0,0 +1,13 @@ +import { test, expect } from '@playwright/test'; +import { login } from '../shared'; + +test('get join controller command', async ({ page }) => { + await login(page); + await page.locator('.NavItem').getByText('Cluster Management', { exact: true }).click(); + await page.getByRole('button', { name: 'Add node', exact: true }).click(); + await expect(page.locator('.Modal-body')).toBeVisible(); + await expect(page.getByRole('heading')).toContainText('Add a Node'); + await page.locator('.BoxedCheckbox').getByText('controller-test', { exact: true }).click(); + const joinCommand = await page.locator('.react-prism.language-bash').first().textContent(); + console.log(`{"command":"${joinCommand}"}`); +}); diff --git a/e2e/playwright/tests/get-join-worker-command/test.spec.ts b/e2e/playwright/tests/get-join-worker-command/test.spec.ts new file mode 100644 index 000000000..14a56caaa --- /dev/null +++ b/e2e/playwright/tests/get-join-worker-command/test.spec.ts @@ -0,0 +1,13 @@ +import { test, expect } from '@playwright/test'; +import { login } from '../shared'; + +test('get join worker command', async ({ page }) => { + await login(page); + await page.locator('.NavItem').getByText('Cluster Management', { exact: true }).click(); + await page.getByRole('button', { name: 'Add node', exact: true }).click(); + await expect(page.locator('.Modal-body')).toBeVisible(); + await expect(page.getByRole('heading')).toContainText('Add a Node'); + await page.locator('.BoxedCheckbox').getByText('abc', { exact: true }).click(); + const joinCommand = await page.locator('.react-prism.language-bash').first().textContent(); + console.log(`{"command":"${joinCommand}"}`); +}); diff --git a/e2e/playwright/tests/shared/deploy-app.ts b/e2e/playwright/tests/shared/deploy-app.ts index ad131fb8c..b28c8f9d8 100644 --- a/e2e/playwright/tests/shared/deploy-app.ts +++ b/e2e/playwright/tests/shared/deploy-app.ts @@ -7,7 +7,7 @@ export const deployApp = async (page, expect) => { await page.locator('input[type="password"]').click(); await page.locator('input[type="password"]').fill('password'); await page.getByRole('button', { name: 'Continue' }).click(); - await expect(page.locator('#app')).toContainText('Ready', { timeout: 60000 }); - await expect(page.locator('#app')).toContainText('Up to date', { timeout: 30000 }); - await expect(page.locator('#app')).toContainText('Currently deployed version'); + await expect(page.locator('#app')).toContainText('Currently deployed version', { timeout: 90000 }); + await expect(page.locator('#app')).toContainText('Ready', { timeout: 30000 }); + await expect(page.locator('#app')).toContainText('Up to date'); }; diff --git a/e2e/reset_test.go b/e2e/reset_test.go index d2b7481ee..54d15eb43 100644 --- a/e2e/reset_test.go +++ b/e2e/reset_test.go @@ -19,7 +19,8 @@ func TestMultiNodeReset(t *testing.T) { LicensePath: "license.yaml", EmbeddedClusterPath: "../output/bin/embedded-cluster", }) - defer tc.Destroy() + defer cleanupCluster(t, tc) + // bootstrap the first node and makes sure it is healthy. also executes the kots // ssl certificate configuration (kurl-proxy). t.Logf("%s: installing embedded-cluster on node 0", time.Now().Format(time.RFC3339)) @@ -27,18 +28,18 @@ func TestMultiNodeReset(t *testing.T) { t.Fatalf("fail to install embedded-cluster on node %s: %v", tc.Nodes[0], err) } - if err := setupTestim(t, tc); err != nil { - t.Fatalf("fail to setup testim: %v", err) + if err := setupPlaywright(t, tc); err != nil { + t.Fatalf("fail to setup playwright: %v", err) } - if _, _, err := runTestimTest(t, tc, "deploy-kots-application"); err != nil { - t.Fatalf("fail to deploy kots application: %v", err) + if _, _, err := runPlaywrightTest(t, tc, "deploy-app"); err != nil { + t.Fatalf("fail to run playwright test deploy-app: %v", err) } // generate all node join commands (2 for controllers and 1 for worker). t.Logf("%s: generating two new controller token commands", time.Now().Format(time.RFC3339)) controllerCommands := []string{} for i := 0; i < 2; i++ { - stdout, stderr, err := runTestimTest(t, tc, "get-join-controller-command") + stdout, stderr, err := runPlaywrightTest(t, tc, "get-join-controller-command") if err != nil { t.Fatalf("fail to generate controller join token:\nstdout: %s\nstderr: %s", stdout, stderr) } @@ -50,7 +51,7 @@ func TestMultiNodeReset(t *testing.T) { t.Log("controller join token command:", command) } t.Logf("%s: generating a new worker token command", time.Now().Format(time.RFC3339)) - stdout, stderr, err := runTestimTest(t, tc, "get-join-worker-command") + stdout, stderr, err := runPlaywrightTest(t, tc, "get-join-worker-command") if err != nil { t.Fatalf("fail to generate worker join token:\nstdout: %s\nstderr: %s", stdout, stderr) } diff --git a/e2e/restore_test.go b/e2e/restore_test.go new file mode 100644 index 000000000..80b6c8f59 --- /dev/null +++ b/e2e/restore_test.go @@ -0,0 +1,77 @@ +package e2e + +import ( + "os" + "testing" + "time" + + "github.com/replicatedhq/embedded-cluster/e2e/cluster" +) + +func TestSingleNodeDisasterRecovery(t *testing.T) { + t.Parallel() + + requiredEnvVars := []string{ + "DR_AWS_S3_ENDPOINT", + "DR_AWS_S3_REGION", + "DR_AWS_S3_BUCKET", + "DR_AWS_S3_PREFIX", + "DR_AWS_ACCESS_KEY_ID", + "DR_AWS_SECRET_ACCESS_KEY", + } + for _, envVar := range requiredEnvVars { + if os.Getenv(envVar) == "" { + t.Fatalf("missing required environment variable: %s", envVar) + } + } + + testArgs := []string{} + for _, envVar := range requiredEnvVars { + testArgs = append(testArgs, os.Getenv(envVar)) + } + + tc := cluster.NewTestCluster(&cluster.Input{ + T: t, + Nodes: 1, + Image: "ubuntu/jammy", + LicensePath: "snapshot-license.yaml", + EmbeddedClusterPath: "../output/bin/embedded-cluster", + }) + defer cleanupCluster(t, tc) + + t.Logf("%s: installing test dependencies on node 0", time.Now().Format(time.RFC3339)) + commands := [][]string{ + {"apt-get", "update", "-y"}, + {"apt-get", "install", "expect", "-y"}, + } + if err := RunCommandsOnNode(t, tc, 0, commands); err != nil { + t.Fatalf("fail to install test dependencies on node %s: %v", tc.Nodes[0], err) + } + + t.Logf("%s: installing embedded-cluster on node 0", time.Now().Format(time.RFC3339)) + line := []string{"single-node-install.sh", "ui"} + if _, _, err := RunCommandOnNode(t, tc, 0, line); err != nil { + t.Fatalf("fail to install embedded-cluster on node %s: %v", tc.Nodes[0], err) + } + + if err := setupPlaywright(t, tc); err != nil { + t.Fatalf("fail to setup playwright: %v", err) + } + if _, _, err := runPlaywrightTest(t, tc, "create-backup", testArgs...); err != nil { + t.Fatalf("fail to run playwright test create-backup: %v", err) + } + + t.Logf("%s: resetting the installation", time.Now().Format(time.RFC3339)) + line = []string{"reset-installation.sh"} + if _, _, err := RunCommandOnNode(t, tc, 0, line); err != nil { + t.Fatalf("fail to reset the installation: %v", err) + } + + t.Logf("%s: restoring the installation", time.Now().Format(time.RFC3339)) + line = append([]string{"restore-installation.exp"}, testArgs...) + if _, _, err := RunCommandOnNode(t, tc, 0, line); err != nil { + t.Fatalf("fail to restore the installation: %v", err) + } + + t.Logf("%s: test complete", time.Now().Format(time.RFC3339)) +} diff --git a/e2e/scripts/install-testim.sh b/e2e/scripts/install-testim.sh deleted file mode 100755 index 32a13e176..000000000 --- a/e2e/scripts/install-testim.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env bash -set -euox pipefail - -main() { - apt-get update -y - apt-get install -y \ - ca-certificates \ - curl \ - gnupg \ - socat - - curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg - NODE_MAJOR=20 - echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_$NODE_MAJOR.x nodistro main" | tee /etc/apt/sources.list.d/nodesource.list - apt-get update && apt-get install nodejs -y - npm install -g @testim/testim-cli -} - -main "$@" diff --git a/e2e/scripts/testim.sh b/e2e/scripts/testim.sh deleted file mode 100755 index 4be2cb7b1..000000000 --- a/e2e/scripts/testim.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env bash -set -euox pipefail - -main() { - if [ -z "$1" ]; then - echo "Testim token is required" - exit 1 - fi - local testim_token="$1" - - if [ -z "$2" ]; then - echo "Testim branch is required" - exit 1 - fi - local testim_branch="$2" - - if [ -z "$3" ]; then - echo "Test name is required" - exit 1 - fi - local test_name="$3" - - echo "Running Testim test: $test_name on branch $testim_branch" - - # testim CLI can only tunnel to localhost, so this allows us to forward to the desired local address - socat TCP-LISTEN:3000,fork TCP:10.0.0.2:30001 & - socat_pid=$! - - # run the Testim test - testim --token=$testim_token --project=wSvaGXFJnnoonKzLxBfX --grid=Testim-grid --branch=$testim_branch --timeout=3600000 --name=$test_name --tunnel --tunnel-port=3000 - - kill $socat_pid - - echo "Testim test $test_name completed" -} - -main "$@" diff --git a/e2e/support-bundle_test.go b/e2e/support-bundle_test.go index b76e94618..9ecdbf914 100644 --- a/e2e/support-bundle_test.go +++ b/e2e/support-bundle_test.go @@ -17,6 +17,7 @@ func TestCollectSupportBundle(t *testing.T) { EmbeddedClusterPath: "../output/bin/embedded-cluster", }) defer tc.Destroy() + t.Logf("%s: installing embedded-cluster on node 0", time.Now().Format(time.RFC3339)) line := []string{"single-node-install.sh", "cli"} if _, _, err := RunCommandOnNode(t, tc, 0, line); err != nil { diff --git a/e2e/utils.go b/e2e/utils.go index e74085068..d8a381328 100644 --- a/e2e/utils.go +++ b/e2e/utils.go @@ -104,7 +104,7 @@ type nodeJoinResponse struct { Command string `json:"command"` } -// findJoinCommandInOutput parses the output of the testim.sh script and returns the join command. +// findJoinCommandInOutput parses the output of the playwright.sh script and returns the join command. func findJoinCommandInOutput(stdout string) (string, error) { output := commandOutputRegex.FindString(stdout) if output == "" { From 07167114f79285eab4270070d7dab3c2a8fcfcf1 Mon Sep 17 00:00:00 2001 From: Ricardo Maraschini Date: Tue, 14 May 2024 14:59:56 +0200 Subject: [PATCH 2/5] feat: apply built in overrides before end user overrides (#610) we should apply the release built in overrides before applying the overrides we read from the command line. we were only applying the latter before this patch so this also solves a bug. this commit also adds unit tests for the built in overrides and an a validation after the installation e2e test. --- cmd/embedded-cluster/install.go | 7 ++ e2e/kots-release-install/cluster-config.yaml | 10 +++ e2e/kots-release-upgrade/cluster-config.yaml | 10 +++ e2e/scripts/single-node-install.sh | 18 ++++ pkg/addons/applier.go | 9 -- pkg/addons/applier_test.go | 88 ------------------- ...helm-config-overrides-multiple-addons.yaml | 76 ---------------- ...enerate-helm-config-overrides-openebs.yaml | 45 ---------- ...nerate-helm-config-overrides-registry.yaml | 38 -------- pkg/config/config.go | 17 ++++ pkg/config/config_test.go | 33 +++++++ ...ions-overrides-override-admin-console.yaml | 57 ++++++++++++ ...ns-overrides-override-multiple-charts.yaml | 78 ++++++++++++++++ ...extensions-overrides-override-unknown.yaml | 66 ++++++++++++++ 14 files changed, 296 insertions(+), 256 deletions(-) delete mode 100644 pkg/addons/applier_test.go delete mode 100644 pkg/addons/testdata/generate-helm-config-overrides-multiple-addons.yaml delete mode 100644 pkg/addons/testdata/generate-helm-config-overrides-openebs.yaml delete mode 100644 pkg/addons/testdata/generate-helm-config-overrides-registry.yaml create mode 100644 pkg/config/testdata/builtin-extensions-overrides-override-admin-console.yaml create mode 100644 pkg/config/testdata/builtin-extensions-overrides-override-multiple-charts.yaml create mode 100644 pkg/config/testdata/builtin-extensions-overrides-override-unknown.yaml diff --git a/cmd/embedded-cluster/install.go b/cmd/embedded-cluster/install.go index 34a015b98..801825966 100644 --- a/cmd/embedded-cluster/install.go +++ b/cmd/embedded-cluster/install.go @@ -307,6 +307,7 @@ func ensureK0sConfig(c *cli.Context) error { // applyUnsupportedOverrides applies overrides to the k0s configuration. Applies first the // overrides embedded into the binary and after the ones provided by the user (--overrides). +// we first apply the k0s config override and then apply the built in overrides. func applyUnsupportedOverrides(c *cli.Context, cfg *k0sconfig.ClusterConfig) (*k0sconfig.ClusterConfig, error) { var err error if embcfg, err := release.GetEmbeddedClusterConfig(); err != nil { @@ -316,6 +317,9 @@ func applyUnsupportedOverrides(c *cli.Context, cfg *k0sconfig.ClusterConfig) (*k if cfg, err = config.PatchK0sConfig(cfg, overrides); err != nil { return nil, fmt.Errorf("unable to patch k0s config: %w", err) } + if cfg, err = config.ApplyBuiltInExtensionsOverrides(cfg, embcfg); err != nil { + return nil, fmt.Errorf("unable to release built in overrides: %w", err) + } } if c.String("overrides") == "" { return cfg, nil @@ -328,6 +332,9 @@ func applyUnsupportedOverrides(c *cli.Context, cfg *k0sconfig.ClusterConfig) (*k if cfg, err = config.PatchK0sConfig(cfg, overrides); err != nil { return nil, fmt.Errorf("unable to apply overrides: %w", err) } + if cfg, err = config.ApplyBuiltInExtensionsOverrides(cfg, eucfg); err != nil { + return nil, fmt.Errorf("unable to end user built in overrides: %w", err) + } return cfg, nil } diff --git a/e2e/kots-release-install/cluster-config.yaml b/e2e/kots-release-install/cluster-config.yaml index 762fe4492..b21a263f6 100644 --- a/e2e/kots-release-install/cluster-config.yaml +++ b/e2e/kots-release-install/cluster-config.yaml @@ -18,6 +18,16 @@ spec: xyz-test-label: xyz-value name: xyz unsupportedOverrides: + builtInExtensions: + - name: admin-console + values: | + labels: + release-custom-label: release-clustom-value + - name: embedded-cluster-operator + values: | + global: + labels: + release-custom-label: release-clustom-value k0s: | config: metadata: diff --git a/e2e/kots-release-upgrade/cluster-config.yaml b/e2e/kots-release-upgrade/cluster-config.yaml index 65162ce4e..1cea90f43 100644 --- a/e2e/kots-release-upgrade/cluster-config.yaml +++ b/e2e/kots-release-upgrade/cluster-config.yaml @@ -21,6 +21,16 @@ spec: final-test-label: final-value name: final unsupportedOverrides: + builtInExtensions: + - name: admin-console + values: | + labels: + release-custom-label: release-clustom-value + - name: embedded-cluster-operator + values: | + global: + labels: + release-custom-label: release-clustom-value k0s: | config: metadata: diff --git a/e2e/scripts/single-node-install.sh b/e2e/scripts/single-node-install.sh index 37bff5fb8..31204b33b 100755 --- a/e2e/scripts/single-node-install.sh +++ b/e2e/scripts/single-node-install.sh @@ -248,6 +248,21 @@ ensure_installation_label() { fi } +# ensure_release_builtin_overrides verifies if the built in overrides we provide as part +# of the release have been applied to the helm charts. +ensure_release_builtin_overrides() { + if ! kubectl get charts.helm.k0sproject.io -n kube-system k0s-addon-chart-admin-console -o yaml | grep -q -E "^ +release-custom-label"; then + echo "release-custom-label not found in k0s-addon-chart-admin-console" + kubectl get charts.helm.k0sproject.io -n kube-system k0s-addon-chart-admin-console -o yaml + return 1 + fi + if ! kubectl get charts.helm.k0sproject.io -n kube-system k0s-addon-chart-embedded-cluster-operator -o yaml | grep -q -E "^ +release-custom-label"; then + echo "release-custom-label not found in k0s-addon-chart-embedded-cluster-operator" + kubectl get charts.helm.k0sproject.io -n kube-system k0s-addon-chart-embedded-cluster-operator -o yaml + return 1 + fi +} + main() { local app_deploy_method="$1" @@ -319,6 +334,9 @@ main() { if ! ensure_installation_label; then exit 1 fi + if ! ensure_release_builtin_overrides; then + exit 1 + fi if ! systemctl status embedded-cluster; then echo "Failed to get status of embedded-cluster service" exit 1 diff --git a/pkg/addons/applier.go b/pkg/addons/applier.go index 519f9aa8a..d4c8465a4 100644 --- a/pkg/addons/applier.go +++ b/pkg/addons/applier.go @@ -115,15 +115,6 @@ func (a *Applier) GenerateHelmConfigs(additionalCharts []v1beta1.Chart, addition // charts required by the application charts = append(charts, additionalCharts...) - if a.endUserConfig != nil { - for i, chart := range charts { - values, err := a.endUserConfig.Spec.ApplyEndUserAddOnOverrides(chart.Name, chart.Values) - if err != nil { - return nil, nil, fmt.Errorf("unable to apply end user overrides for %s: %w", chart.Name, err) - } - charts[i].Values = values - } - } repositories = append(repositories, additionalRepositories...) return charts, repositories, nil diff --git a/pkg/addons/applier_test.go b/pkg/addons/applier_test.go deleted file mode 100644 index c50efb0cc..000000000 --- a/pkg/addons/applier_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package addons - -import ( - "embed" - "path/filepath" - "strings" - "testing" - - "github.com/replicatedhq/embedded-cluster-kinds/apis/v1beta1" - "github.com/stretchr/testify/require" - "gopkg.in/yaml.v2" - k8syaml "sigs.k8s.io/yaml" -) - -//go:embed testdata/* -var testData embed.FS - -func parseTestsYAML[T any](t *testing.T, prefix string) map[string]T { - entries, err := testData.ReadDir("testdata") - require.NoError(t, err) - tests := make(map[string]T, 0) - for _, entry := range entries { - if !strings.HasPrefix(entry.Name(), prefix) { - continue - } - - fpath := filepath.Join("testdata", entry.Name()) - data, err := testData.ReadFile(fpath) - require.NoError(t, err) - - var onetest T - err = yaml.Unmarshal(data, &onetest) - require.NoError(t, err) - - tests[fpath] = onetest - } - return tests -} - -func TestGenerateHelmConfigWithOverrides(t *testing.T) { - type addonConfig struct { - Name string `yaml:"name"` - Values string `yaml:"values"` - } - - type test struct { - Name string - EndUserConfig string `yaml:"endUserConfig"` - Expected []addonConfig - } - - for tname, tt := range parseTestsYAML[test](t, "generate-helm-config-overrides-") { - t.Run(tname, func(t *testing.T) { - var config v1beta1.Config - err := k8syaml.Unmarshal([]byte(tt.EndUserConfig), &config) - require.NoError(t, err) - applier := NewApplier( - WithEndUserConfig(&config), - WithoutPrompt(), - WithAirgapBundle("/does/not/exist"), - ) - charts, _, err := applier.GenerateHelmConfigs(nil, nil) - require.NoError(t, err) - - for _, exp := range tt.Expected { - var values string - for _, chart := range charts { - if chart.Name != exp.Name { - continue - } - values = chart.Values - break - } - require.NotEmpty(t, values, "addon %s not found", exp.Name) - - expected := map[string]interface{}{} - err = yaml.Unmarshal([]byte(exp.Values), &expected) - require.NoError(t, err) - - found := map[string]interface{}{} - err = yaml.Unmarshal([]byte(values), &found) - require.NoError(t, err) - - require.Equal(t, expected, found) - } - }) - } -} diff --git a/pkg/addons/testdata/generate-helm-config-overrides-multiple-addons.yaml b/pkg/addons/testdata/generate-helm-config-overrides-multiple-addons.yaml deleted file mode 100644 index 97ff8e20c..000000000 --- a/pkg/addons/testdata/generate-helm-config-overrides-multiple-addons.yaml +++ /dev/null @@ -1,76 +0,0 @@ -endUserConfig: | - apiVersion: embeddedcluster.replicated.com/v1beta1 - kind: Config - spec: - unsupportedOverrides: - builtInExtensions: - - name: docker-registry - values: | - replicaCount: 3 - persistence: - size: 50Gi - - name: openebs - values: |- - newProperty: newValue - localpv-provisioner: - analytics: - enabled: false - hostpathClass: - isDefaultClass: false - helperPod: - image: - tag: test -expected: - - name: docker-registry - values: | - configData: - auth: - htpasswd: - path: /auth/htpasswd - realm: Registry - extraVolumeMounts: - - mountPath: /auth - name: auth - extraVolumes: - - name: auth - secret: - secretName: registry-auth - fullnameOverride: registry - image: - tag: 2.8.3 - persistence: - accessMode: ReadWriteOnce - enabled: true - size: 50Gi - storageClass: openebs-hostpath - replicaCount: 3 - storage: filesystem - podAnnotations: - backup.velero.io/backup-volumes: data - - name: openebs - values: | - newProperty: newValue - localpv-provisioner: - analytics: - enabled: false - hostpathClass: - enabled: true - isDefaultClass: false - helperPod: - image: - tag: test - zfs-localpv: - enabled: false - lvm-localpv: - enabled: false - mayastor: - enabled: false - engines: - local: - lvm: - enabled: false - zfs: - enabled: false - replicated: - mayastor: - enabled: false diff --git a/pkg/addons/testdata/generate-helm-config-overrides-openebs.yaml b/pkg/addons/testdata/generate-helm-config-overrides-openebs.yaml deleted file mode 100644 index a0141c3a7..000000000 --- a/pkg/addons/testdata/generate-helm-config-overrides-openebs.yaml +++ /dev/null @@ -1,45 +0,0 @@ -endUserConfig: | - apiVersion: embeddedcluster.replicated.com/v1beta1 - kind: Config - spec: - unsupportedOverrides: - builtInExtensions: - - name: openebs - values: |- - newProperty: newValue - localpv-provisioner: - analytics: - enabled: false - hostpathClass: - isDefaultClass: false - helperPod: - image: - tag: test -expected: - - name: openebs - values: | - newProperty: newValue - localpv-provisioner: - analytics: - enabled: false - hostpathClass: - enabled: true - isDefaultClass: false - helperPod: - image: - tag: test - zfs-localpv: - enabled: false - lvm-localpv: - enabled: false - mayastor: - enabled: false - engines: - local: - lvm: - enabled: false - zfs: - enabled: false - replicated: - mayastor: - enabled: false diff --git a/pkg/addons/testdata/generate-helm-config-overrides-registry.yaml b/pkg/addons/testdata/generate-helm-config-overrides-registry.yaml deleted file mode 100644 index b8be3f690..000000000 --- a/pkg/addons/testdata/generate-helm-config-overrides-registry.yaml +++ /dev/null @@ -1,38 +0,0 @@ -endUserConfig: | - apiVersion: embeddedcluster.replicated.com/v1beta1 - kind: Config - spec: - unsupportedOverrides: - builtInExtensions: - - name: docker-registry - values: | - replicaCount: 8 - persistence: - size: 100Gi -expected: - - name: docker-registry - values: | - configData: - auth: - htpasswd: - path: /auth/htpasswd - realm: Registry - extraVolumeMounts: - - mountPath: /auth - name: auth - extraVolumes: - - name: auth - secret: - secretName: registry-auth - fullnameOverride: registry - image: - tag: 2.8.3 - persistence: - accessMode: ReadWriteOnce - enabled: true - size: 100Gi - storageClass: openebs-hostpath - replicaCount: 8 - storage: filesystem - podAnnotations: - backup.velero.io/backup-volumes: data diff --git a/pkg/config/config.go b/pkg/config/config.go index 2282fa7ae..b307084c4 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -9,6 +9,7 @@ import ( jsonpatch "github.com/evanphx/json-patch" "github.com/k0sproject/dig" k0sconfig "github.com/k0sproject/k0s/pkg/apis/k0s/v1beta1" + embeddedclusterv1beta1 "github.com/replicatedhq/embedded-cluster-kinds/apis/v1beta1" "github.com/replicatedhq/embedded-cluster-operator/controllers" "gopkg.in/yaml.v2" k8syaml "sigs.k8s.io/yaml" @@ -97,6 +98,22 @@ func extractK0sConfigPatch(raw string) (string, error) { return string(data), nil } +// ApplyBuiltIndExtensionsOverrides applies the cluster config built in extensions overrides on top +// of the provided cluster configuration. Returns the changed configuration. +func ApplyBuiltInExtensionsOverrides(cfg *k0sconfig.ClusterConfig, releaseConfig *embeddedclusterv1beta1.Config) (*k0sconfig.ClusterConfig, error) { + if cfg.Spec == nil || cfg.Spec.Extensions == nil || cfg.Spec.Extensions.Helm == nil { + return cfg, nil + } + for i, chart := range cfg.Spec.Extensions.Helm.Charts { + values, err := releaseConfig.Spec.ApplyEndUserAddOnOverrides(chart.Name, chart.Values) + if err != nil { + return nil, fmt.Errorf("unable to apply end user overrides for %s: %w", chart.Name, err) + } + cfg.Spec.Extensions.Helm.Charts[i].Values = values + } + return cfg, nil +} + // PatchK0sConfig patches a K0s config with the provided patch. Returns the patched config, // patch is expected to be a YAML encoded k0s configuration. We marshal the original config // and the patch into JSON and apply the latter as a merge patch to the former. diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go index 1308ebd67..fa81e5552 100644 --- a/pkg/config/config_test.go +++ b/pkg/config/config_test.go @@ -7,6 +7,8 @@ import ( "testing" k0sconfig "github.com/k0sproject/k0s/pkg/apis/k0s/v1beta1" + embeddedclusterv1beta1 "github.com/replicatedhq/embedded-cluster-kinds/apis/v1beta1" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" k8syaml "sigs.k8s.io/yaml" @@ -37,6 +39,37 @@ func parseTestsYAML[T any](t *testing.T, prefix string) map[string]T { return tests } +func TestApplyBuiltInExtensionsOverrides(t *testing.T) { + type test struct { + Name string + ReleaseConfig string `yaml:"releaseConfig"` + ClusterConfig string `yaml:"clusterConfig"` + Expected string `yaml:"expected"` + } + + for tname, tt := range parseTestsYAML[test](t, "builtin-extensions-overrides-") { + t.Run(tname, func(t *testing.T) { + req := require.New(t) + + var releaseConfig embeddedclusterv1beta1.Config + err := k8syaml.Unmarshal([]byte(tt.ReleaseConfig), &releaseConfig) + req.NoError(err) + + var clusterConfig k0sconfig.ClusterConfig + err = k8syaml.Unmarshal([]byte(tt.ClusterConfig), &clusterConfig) + req.NoError(err) + + var expected k0sconfig.ClusterConfig + err = k8syaml.Unmarshal([]byte(tt.Expected), &expected) + req.NoError(err) + + result, err := ApplyBuiltInExtensionsOverrides(&clusterConfig, &releaseConfig) + req.NoError(err) + assert.Equal(t, &expected, result) + }) + } +} + func TestPatchK0sConfig(t *testing.T) { type test struct { Name string diff --git a/pkg/config/testdata/builtin-extensions-overrides-override-admin-console.yaml b/pkg/config/testdata/builtin-extensions-overrides-override-admin-console.yaml new file mode 100644 index 000000000..cfe439ad4 --- /dev/null +++ b/pkg/config/testdata/builtin-extensions-overrides-override-admin-console.yaml @@ -0,0 +1,57 @@ +releaseConfig: | + apiVersion: embeddedcluster.replicated.com/v1beta1 + kind: Config + metadata: + name: "testconfig" + spec: + version: 1.1.0+k8s-1.28 + unsupportedOverrides: + builtInExtensions: + - name: admin-console + values: | + kurlProxy: + nodePort: 40000 +clusterConfig: | + apiVersion: k0s.k0sproject.io/v1beta1 + kind: ClusterConfig + spec: + extensions: + helm: + charts: + - name: admin-console + values: | + embeddedClusterID: 8dd4b480-93e7-434f-a8c8-b9c522bed816 + embeddedClusterVersion: v1.1.0+k8s-1.28 + isAirgap: "false" + isHelmManaged: false + kurlProxy: + enabled: true + nodePort: 30000 + labels: + replicated.com/disaster-recovery: infra + replicated.com/disaster-recovery-chart: kotsadm + minimalRBAC: false + service: + enabled: false +expected: | + apiVersion: k0s.k0sproject.io/v1beta1 + kind: ClusterConfig + spec: + extensions: + helm: + charts: + - name: admin-console + values: | + embeddedClusterID: 8dd4b480-93e7-434f-a8c8-b9c522bed816 + embeddedClusterVersion: v1.1.0+k8s-1.28 + isAirgap: "false" + isHelmManaged: false + kurlProxy: + enabled: true + nodePort: 40000 + labels: + replicated.com/disaster-recovery: infra + replicated.com/disaster-recovery-chart: kotsadm + minimalRBAC: false + service: + enabled: false diff --git a/pkg/config/testdata/builtin-extensions-overrides-override-multiple-charts.yaml b/pkg/config/testdata/builtin-extensions-overrides-override-multiple-charts.yaml new file mode 100644 index 000000000..debc41ed6 --- /dev/null +++ b/pkg/config/testdata/builtin-extensions-overrides-override-multiple-charts.yaml @@ -0,0 +1,78 @@ +releaseConfig: | + apiVersion: embeddedcluster.replicated.com/v1beta1 + kind: Config + metadata: + name: "testconfig" + spec: + version: 1.1.0+k8s-1.28 + unsupportedOverrides: + builtInExtensions: + - name: admin-console + values: | + isAirgap: "true" + labels: + test: test + service: + enabled: true + - name: another-chart + values: | + test: overwritten + object: + value: avalue +clusterConfig: | + apiVersion: k0s.k0sproject.io/v1beta1 + kind: ClusterConfig + spec: + extensions: + helm: + charts: + - name: admin-console + values: | + embeddedClusterID: 8dd4b480-93e7-434f-a8c8-b9c522bed816 + embeddedClusterVersion: v1.1.0+k8s-1.28 + isAirgap: "false" + isHelmManaged: false + kurlProxy: + enabled: true + nodePort: 30000 + labels: + replicated.com/disaster-recovery: infra + replicated.com/disaster-recovery-chart: kotsadm + minimalRBAC: false + service: + enabled: false + - name: another-chart + values: | + test: testing + service: + enabled: false +expected: | + apiVersion: k0s.k0sproject.io/v1beta1 + kind: ClusterConfig + spec: + extensions: + helm: + charts: + - name: admin-console + values: | + embeddedClusterID: 8dd4b480-93e7-434f-a8c8-b9c522bed816 + embeddedClusterVersion: v1.1.0+k8s-1.28 + isAirgap: "true" + isHelmManaged: false + kurlProxy: + enabled: true + nodePort: 30000 + labels: + replicated.com/disaster-recovery: infra + replicated.com/disaster-recovery-chart: kotsadm + test: test + minimalRBAC: false + service: + enabled: true + - name: another-chart + values: | + object: + value: avalue + service: + enabled: false + test: overwritten diff --git a/pkg/config/testdata/builtin-extensions-overrides-override-unknown.yaml b/pkg/config/testdata/builtin-extensions-overrides-override-unknown.yaml new file mode 100644 index 000000000..839eb8f87 --- /dev/null +++ b/pkg/config/testdata/builtin-extensions-overrides-override-unknown.yaml @@ -0,0 +1,66 @@ +releaseConfig: | + apiVersion: embeddedcluster.replicated.com/v1beta1 + kind: Config + metadata: + name: "testconfig" + spec: + version: 1.1.0+k8s-1.28 + unsupportedOverrides: + builtInExtensions: + - name: unknown + values: | + test: test +clusterConfig: | + apiVersion: k0s.k0sproject.io/v1beta1 + kind: ClusterConfig + spec: + extensions: + helm: + charts: + - name: admin-console + values: | + embeddedClusterID: 8dd4b480-93e7-434f-a8c8-b9c522bed816 + embeddedClusterVersion: v1.1.0+k8s-1.28 + isAirgap: "false" + isHelmManaged: false + kurlProxy: + enabled: true + nodePort: 30000 + labels: + replicated.com/disaster-recovery: infra + replicated.com/disaster-recovery-chart: kotsadm + minimalRBAC: false + service: + enabled: false + - name: another-chart + values: | + test: testing + service: + enabled: false +expected: | + apiVersion: k0s.k0sproject.io/v1beta1 + kind: ClusterConfig + spec: + extensions: + helm: + charts: + - name: admin-console + values: | + embeddedClusterID: 8dd4b480-93e7-434f-a8c8-b9c522bed816 + embeddedClusterVersion: v1.1.0+k8s-1.28 + isAirgap: "false" + isHelmManaged: false + kurlProxy: + enabled: true + nodePort: 30000 + labels: + replicated.com/disaster-recovery: infra + replicated.com/disaster-recovery-chart: kotsadm + minimalRBAC: false + service: + enabled: false + - name: another-chart + values: | + test: testing + service: + enabled: false From 36db3ad255cb4b8902ad68cb2622d5926dfbc908 Mon Sep 17 00:00:00 2001 From: Salah Al Saleh Date: Tue, 14 May 2024 08:40:19 -0700 Subject: [PATCH 3/5] feat: ability to resume a previously interrupted restore operation (#603) * resumable restores --- .github/workflows/pull-request.yaml | 1 + .github/workflows/release-dev.yaml | 1 + cmd/embedded-cluster/restore.go | 601 ++++++++++++++++++---------- e2e/restore_test.go | 68 ++++ e2e/scripts/resume-restore.exp | 399 ++++++++++++++++++ 5 files changed, 869 insertions(+), 201 deletions(-) create mode 100755 e2e/scripts/resume-restore.exp diff --git a/.github/workflows/pull-request.yaml b/.github/workflows/pull-request.yaml index 180ff4e35..878c89c5e 100644 --- a/.github/workflows/pull-request.yaml +++ b/.github/workflows/pull-request.yaml @@ -187,6 +187,7 @@ jobs: - TestInstallSnapshotFromReplicatedApp - TestMultiNodeAirgapUpgradeUbuntuJammy - TestSingleNodeDisasterRecovery + - TestSingleNodeResumeDisasterRecovery steps: - name: Checkout uses: actions/checkout@v4 diff --git a/.github/workflows/release-dev.yaml b/.github/workflows/release-dev.yaml index d77b6cea8..8ac8a45b2 100644 --- a/.github/workflows/release-dev.yaml +++ b/.github/workflows/release-dev.yaml @@ -140,6 +140,7 @@ jobs: - TestInstallSnapshotFromReplicatedApp - TestMultiNodeAirgapUpgradeUbuntuJammy - TestSingleNodeDisasterRecovery + - TestSingleNodeResumeDisasterRecovery steps: - name: Checkout uses: actions/checkout@v4 diff --git a/cmd/embedded-cluster/restore.go b/cmd/embedded-cluster/restore.go index b9730c347..a380cd05e 100644 --- a/cmd/embedded-cluster/restore.go +++ b/cmd/embedded-cluster/restore.go @@ -29,11 +29,36 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" k8sconfig "sigs.k8s.io/controller-runtime/pkg/client/config" k8syaml "sigs.k8s.io/yaml" ) +type ecRestoreState string + +const ( + ecRestoreStateNew ecRestoreState = "new" + ecRestoreStateConfirmBackup ecRestoreState = "confirm-backup" + ecRestoreStateRestoreInfra ecRestoreState = "restore-infra" + ecRestoreStateRestoreECInstall ecRestoreState = "restore-ec-install" + ecRestoreStateWaitForNodes ecRestoreState = "wait-for-nodes" + ecRestoreStateRestoreApp ecRestoreState = "restore-app" +) + +var ecRestoreStates = []ecRestoreState{ + ecRestoreStateNew, + ecRestoreStateConfirmBackup, + ecRestoreStateRestoreInfra, + ecRestoreStateRestoreECInstall, + ecRestoreStateWaitForNodes, + ecRestoreStateRestoreApp, +} + +const ( + ecRestoreStateCMName = "embedded-cluster-restore-state" +) + type s3BackupStore struct { endpoint string region string @@ -43,6 +68,14 @@ type s3BackupStore struct { secretAccessKey string } +type disasterRecoveryComponent string + +const ( + disasterRecoveryComponentInfra disasterRecoveryComponent = "infra" + disasterRecoveryComponentECInstall disasterRecoveryComponent = "ec-install" + disasterRecoveryComponentApp disasterRecoveryComponent = "app" +) + type invalidBackupsError struct { invalidBackups []velerov1.Backup invalidReasons []string @@ -59,6 +92,137 @@ func (e *invalidBackupsError) Error() string { return fmt.Sprintf("\nFound %d backups, but none are restorable:\n%s\n", len(e.invalidBackups), strings.Join(reasons, "\n")) } +// getECRestoreState returns the current restore state. +func getECRestoreState(ctx context.Context) ecRestoreState { + kcli, err := kubeutils.KubeClient() + if err != nil { + return ecRestoreStateNew + } + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "embedded-cluster", + Name: ecRestoreStateCMName, + }, + } + if err := kcli.Get(ctx, types.NamespacedName{Namespace: cm.Namespace, Name: cm.Name}, cm); err != nil { + return ecRestoreStateNew + } + state, ok := cm.Data["state"] + if !ok { + return ecRestoreStateNew + } + for _, s := range ecRestoreStates { + if s == ecRestoreState(state) { + return s + } + } + return ecRestoreStateNew +} + +// setECRestoreState sets the current restore state. +func setECRestoreState(ctx context.Context, state ecRestoreState, backupName string) error { + kcli, err := kubeutils.KubeClient() + if err != nil { + return fmt.Errorf("unable to create kube client: %w", err) + } + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "embedded-cluster", + }, + } + if err := kcli.Create(ctx, ns); err != nil && !errors.IsAlreadyExists(err) { + return fmt.Errorf("unable to create namespace: %w", err) + } + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "embedded-cluster", + Name: ecRestoreStateCMName, + }, + Data: map[string]string{ + "state": string(state), + }, + } + if backupName != "" { + cm.Data["backup-name"] = backupName + } + err = kcli.Create(ctx, cm) + if err != nil && !errors.IsAlreadyExists(err) { + return fmt.Errorf("unable to create config map: %w", err) + } + if errors.IsAlreadyExists(err) { + if err := kcli.Update(ctx, cm); err != nil { + return fmt.Errorf("unable to update config map: %w", err) + } + } + return nil +} + +// resetECRestoreState resets the restore state. +func resetECRestoreState(ctx context.Context) error { + kcli, err := kubeutils.KubeClient() + if err != nil { + return fmt.Errorf("unable to create kube client: %w", err) + } + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "embedded-cluster", + Name: ecRestoreStateCMName, + }, + } + if err := kcli.Delete(ctx, cm); err != nil && !errors.IsNotFound(err) { + return fmt.Errorf("unable to delete config map: %w", err) + } + return nil +} + +// getBackupFromRestoreState gets the backup defined in the restore state. +// If no backup is defined in the restore state, it returns nil. +// It returns an error if a backup is defined in the restore state but: +// - is not found by Velero anymore. +// - is not restorable by the current binary. +func getBackupFromRestoreState(ctx context.Context) (*velerov1.Backup, error) { + kcli, err := kubeutils.KubeClient() + if err != nil { + return nil, fmt.Errorf("unable to create kube client: %w", err) + } + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "embedded-cluster", + Name: ecRestoreStateCMName, + }, + } + if err := kcli.Get(ctx, types.NamespacedName{Namespace: cm.Namespace, Name: cm.Name}, cm); err != nil { + return nil, fmt.Errorf("unable to get restore state: %w", err) + } + backupName, ok := cm.Data["backup-name"] + if !ok || backupName == "" { + return nil, nil + } + cfg, err := k8sconfig.GetConfig() + if err != nil { + return nil, fmt.Errorf("unable to get kubernetes config: %w", err) + } + veleroClient, err := veleroclientv1.NewForConfig(cfg) + if err != nil { + return nil, fmt.Errorf("unable to create velero client: %w", err) + } + backup, err := veleroClient.Backups(defaults.VeleroNamespace).Get(ctx, backupName, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("unable to get backup: %w", err) + } + rel, err := release.GetChannelRelease() + if err != nil { + return nil, fmt.Errorf("unable to get release from binary: %w", err) + } + if rel == nil { + return nil, fmt.Errorf("no release found in binary") + } + if restorable, reason := isBackupRestorable(backup, rel); !restorable { + return nil, fmt.Errorf("backup %q %s", backup.Name, reason) + } + return backup, nil +} + // newS3BackupStore prompts the user for S3 backup store configuration. func newS3BackupStore() *s3BackupStore { store := &s3BackupStore{} @@ -153,9 +317,7 @@ func ensureK0sConfigForRestore(c *cli.Context) error { // runOutroForRestore calls Outro() in all enabled addons for restore operations by means of Applier. func runOutroForRestore(c *cli.Context) error { - os.Setenv("KUBECONFIG", defaults.PathToKubeConfig()) - opts := []addons.Option{} - return addons.NewApplier(opts...).OutroForRestore(c.Context) + return addons.NewApplier().OutroForRestore(c.Context) } func isBackupRestorable(backup *velerov1.Backup, rel *release.ChannelRelease) (bool, string) { @@ -275,58 +437,17 @@ func pickBackupToRestore(backups []velerov1.Backup) *velerov1.Backup { return latestBackup } -// waitForRestoreCompleted waits for a Velero restore to complete. -func waitForRestoreCompleted(ctx context.Context, restoreName string) (*velerov1.Restore, error) { - cfg, err := k8sconfig.GetConfig() - if err != nil { - return nil, fmt.Errorf("unable to get kubernetes config: %w", err) - } - - veleroClient, err := veleroclientv1.NewForConfig(cfg) - if err != nil { - return nil, fmt.Errorf("unable to create velero client: %w", err) - } - - for { - restore, err := veleroClient.Restores(defaults.VeleroNamespace).Get(ctx, restoreName, metav1.GetOptions{}) - if err != nil { - return nil, fmt.Errorf("unable to get restore: %w", err) - } - - switch restore.Status.Phase { - case velerov1.RestorePhaseCompleted: - return restore, nil - case velerov1.RestorePhaseFailed: - return restore, fmt.Errorf("restore failed") - case velerov1.RestorePhasePartiallyFailed: - return restore, fmt.Errorf("restore partially failed") - default: - // in progress - } - - time.Sleep(time.Second) - } -} - -type DisasterRecoveryComponent string - -const ( - DisasterRecoveryComponentInfra DisasterRecoveryComponent = "infra" - DisasterRecoveryComponentECInstall DisasterRecoveryComponent = "ec-install" - DisasterRecoveryComponentApp DisasterRecoveryComponent = "app" -) - -// restoreFromBackup restores a disaster recovery component from a backup. -func restoreFromBackup(ctx context.Context, backup *velerov1.Backup, drComponent DisasterRecoveryComponent) error { +// waitForDRComponent waits for a disaster recovery component to be restored. +func waitForDRComponent(ctx context.Context, drComponent disasterRecoveryComponent, restoreName string) error { loading := spinner.Start() defer loading.Close() switch drComponent { - case DisasterRecoveryComponentInfra: + case disasterRecoveryComponentInfra: loading.Infof("Restoring infrastructure") - case DisasterRecoveryComponentECInstall: + case disasterRecoveryComponentECInstall: loading.Infof("Restoring cluster state") - case DisasterRecoveryComponentApp: + case disasterRecoveryComponentApp: loading.Infof("Restoring application") } @@ -340,50 +461,26 @@ func restoreFromBackup(ctx context.Context, backup *velerov1.Backup, drComponent return fmt.Errorf("unable to create velero client: %w", err) } - // define the restore object - restore := &velerov1.Restore{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: defaults.VeleroNamespace, - Name: fmt.Sprintf("%s.%s", backup.Name, string(drComponent)), - Annotations: map[string]string{ - "kots.io/embedded-cluster": "true", - }, - }, - Spec: velerov1.RestoreSpec{ - BackupName: backup.Name, - LabelSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "replicated.com/disaster-recovery": string(drComponent), - }, - }, - RestorePVs: ptr.To(true), - IncludeClusterResources: ptr.To(true), - }, - } - - // delete existing restore object (if exists) - err = veleroClient.Restores(defaults.VeleroNamespace).Delete(ctx, restore.Name, metav1.DeleteOptions{}) - if err != nil && !strings.Contains(err.Error(), "not found") { - return fmt.Errorf("unable to delete restore %s: %w", restore.Name, err) - } - - // create new restore object - restore, err = veleroClient.Restores(defaults.VeleroNamespace).Create(ctx, restore, metav1.CreateOptions{}) - if err != nil { - return fmt.Errorf("unable to create restore: %w", err) - } - - // wait for restore to complete - restore, err = waitForRestoreCompleted(ctx, restore.Name) - if err != nil { - if restore != nil { - return fmt.Errorf("restore failed with %d errors and %d warnings.: %w", restore.Status.Errors, restore.Status.Warnings, err) + // wait for velero restore to complete + for { + restore, err := veleroClient.Restores(defaults.VeleroNamespace).Get(ctx, restoreName, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("unable to get restore: %w", err) } - return fmt.Errorf("unable to wait for velero restore to complete: %w", err) + if restore.Status.Phase == velerov1.RestorePhaseCompleted { + break + } + if restore.Status.Phase == velerov1.RestorePhaseFailed { + return fmt.Errorf("restore failed with %d errors and %d warnings: %w", restore.Status.Errors, restore.Status.Warnings, err) + } + if restore.Status.Phase == velerov1.RestorePhasePartiallyFailed { + return fmt.Errorf("restore partially failed with %d errors and %d warnings: %w", restore.Status.Errors, restore.Status.Warnings, err) + } + time.Sleep(time.Second) } // wait for embedded cluster installation to reconcile - if drComponent == DisasterRecoveryComponentECInstall { + if drComponent == disasterRecoveryComponentECInstall { kcli, err := kubeutils.KubeClient() if err != nil { return fmt.Errorf("unable to create kube client: %w", err) @@ -394,38 +491,74 @@ func restoreFromBackup(ctx context.Context, backup *velerov1.Backup, drComponent } switch drComponent { - case DisasterRecoveryComponentInfra: + case disasterRecoveryComponentInfra: loading.Infof("Infrastructure restored!") - case DisasterRecoveryComponentECInstall: + case disasterRecoveryComponentECInstall: loading.Infof("Cluster state restored!") - case DisasterRecoveryComponentApp: + case disasterRecoveryComponentApp: loading.Infof("Application restored!") } return nil } -func waitForAdditionalNodes(ctx context.Context) error { - // the admin console detects this config map and redirects the user to the cluster management page - kcli, err := kubeutils.KubeClient() +// restoreFromBackup restores a disaster recovery component from a backup. +func restoreFromBackup(ctx context.Context, backup *velerov1.Backup, drComponent disasterRecoveryComponent) error { + cfg, err := k8sconfig.GetConfig() if err != nil { - return fmt.Errorf("unable to create kube client: %w", err) + return fmt.Errorf("unable to get kubernetes config: %w", err) } - waitForNodesCM := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "embedded-cluster-wait-for-nodes", - Namespace: "embedded-cluster", - }, - Data: map[string]string{}, + + veleroClient, err := veleroclientv1.NewForConfig(cfg) + if err != nil { + return fmt.Errorf("unable to create velero client: %w", err) } - if err := kcli.Create(ctx, waitForNodesCM); err != nil && !errors.IsAlreadyExists(err) { - return fmt.Errorf("unable to create wait-for-nodes config map: %w", err) + + restoreName := fmt.Sprintf("%s.%s", backup.Name, string(drComponent)) + + // check if a restore object already exists + _, err = veleroClient.Restores(defaults.VeleroNamespace).Get(ctx, restoreName, metav1.GetOptions{}) + if err != nil && !errors.IsNotFound(err) { + return fmt.Errorf("unable to get restore: %w", err) } - defer func() { - if err := kcli.Delete(ctx, waitForNodesCM); err != nil && !errors.IsNotFound(err) { - logrus.Errorf("unable to delete wait-for-nodes config map: %v", err) + + // create a new restore object if it doesn't exist + if errors.IsNotFound(err) { + restore := &velerov1.Restore{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: defaults.VeleroNamespace, + Name: restoreName, + Annotations: map[string]string{ + "kots.io/embedded-cluster": "true", + }, + }, + Spec: velerov1.RestoreSpec{ + BackupName: backup.Name, + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "replicated.com/disaster-recovery": string(drComponent), + }, + }, + RestorePVs: ptr.To(true), + IncludeClusterResources: ptr.To(true), + }, } - }() + _, err := veleroClient.Restores(defaults.VeleroNamespace).Create(ctx, restore, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("unable to create restore: %w", err) + } + } + + // wait for restore to complete + return waitForDRComponent(ctx, drComponent, restoreName) +} + +// waitForAdditionalNodes waits for for user to add additional nodes to the cluster. +func waitForAdditionalNodes(ctx context.Context) error { + kcli, err := kubeutils.KubeClient() + if err != nil { + return fmt.Errorf("unable to create kube client: %w", err) + } loading := spinner.Start() loading.Infof("Waiting for Admin Console to deploy") @@ -470,114 +603,180 @@ var restoreCommand = &cli.Command{ if os.Getuid() != 0 { return fmt.Errorf("restore command must be run as root") } + os.Setenv("KUBECONFIG", defaults.PathToKubeConfig()) return nil }, Action: func(c *cli.Context) error { - logrus.Debugf("checking if %s is already installed", binName) - if installed, err := isAlreadyInstalled(); err != nil { - return err - } else if installed { - logrus.Errorf("An installation has been detected on this machine.") - logrus.Infof("If you want to restore you need to remove the existing installation") - logrus.Infof("first. You can do this by running the following command:") - logrus.Infof("\n sudo ./%s reset\n", binName) - return ErrNothingElseToAdd - } - - logrus.Infof("You'll be guided through the process of restoring %s from a backup.\n", binName) - logrus.Info("Enter information to configure access to your backup storage location.\n") - s3Store := newS3BackupStore() + logrus.Debugf("getting restore state") + state := getECRestoreState(c.Context) + logrus.Debugf("restore state is: %q", state) - logrus.Debugf("validating backup store configuration") - if err := validateS3BackupStore(s3Store); err != nil { - return fmt.Errorf("unable to validate backup store: %w", err) + if state != ecRestoreStateNew { + shouldResume := prompts.New().Confirm("A previous restore operation was detected. Would you like to resume?", true) + logrus.Info("") + if !shouldResume { + state = ecRestoreStateNew + } } - logrus.Debugf("configuring network manager") - if err := configureNetworkManager(c); err != nil { - return fmt.Errorf("unable to configure network manager: %w", err) - } - logrus.Debugf("materializing binaries") - if err := materializeFiles(c); err != nil { - return fmt.Errorf("unable to materialize binaries: %w", err) - } - logrus.Debugf("running host preflights") - if err := RunHostPreflightsForRestore(c); err != nil { - return fmt.Errorf("unable to finish preflight checks: %w", err) - } - logrus.Debugf("creating k0s configuration file") - if err := ensureK0sConfigForRestore(c); err != nil { - return fmt.Errorf("unable to create config file: %w", err) - } - logrus.Debugf("installing k0s") - if err := installK0s(); err != nil { - return fmt.Errorf("unable update cluster: %w", err) - } - logrus.Debugf("running post install") - if err := runPostInstall(); err != nil { - return fmt.Errorf("unable to run post install: %w", err) - } - logrus.Debugf("waiting for k0s to be ready") - if err := waitForK0s(); err != nil { - return fmt.Errorf("unable to wait for node: %w", err) - } - logrus.Debugf("running outro") - if err := runOutroForRestore(c); err != nil { - return fmt.Errorf("unable to run outro: %w", err) + // if the user wants to resume, check if a backup has already been picked. + var backupToRestore *velerov1.Backup + if state != ecRestoreStateNew { + logrus.Debugf("getting backup from restore state") + var err error + backupToRestore, err = getBackupFromRestoreState(c.Context) + if err != nil { + return fmt.Errorf("unable to resume: %w", err) + } + if backupToRestore != nil { + completionTimestamp := backupToRestore.Status.CompletionTimestamp.Time.Format("2006-01-02 15:04:05 UTC") + logrus.Infof("Resuming restore from backup %q (%s)\n", backupToRestore.Name, completionTimestamp) + } } - logrus.Debugf("configuring backup storage location") - if err := kotscli.VeleroConfigureOtherS3(kotscli.VeleroConfigureOtherS3Options{ - Endpoint: s3Store.endpoint, - Region: s3Store.region, - Bucket: s3Store.bucket, - Path: s3Store.prefix, - AccessKeyID: s3Store.accessKeyID, - SecretAccessKey: s3Store.secretAccessKey, - Namespace: defaults.KotsadmNamespace, - }); err != nil { - return err - } + switch state { + case ecRestoreStateNew: + logrus.Debugf("checking if %s is already installed", binName) + if installed, err := isAlreadyInstalled(); err != nil { + return err + } else if installed { + logrus.Errorf("An installation has been detected on this machine.") + logrus.Infof("If you want to restore you need to remove the existing installation") + logrus.Infof("first. You can do this by running the following command:") + logrus.Infof("\n sudo ./%s reset\n", binName) + return ErrNothingElseToAdd + } - logrus.Debugf("waiting for backups to become available") - backups, err := waitForBackups(c.Context) - if err != nil { - return err - } + logrus.Infof("You'll be guided through the process of restoring %s from a backup.\n", binName) + logrus.Info("Enter information to configure access to your backup storage location.\n") + s3Store := newS3BackupStore() - logrus.Debugf("picking backup to restore") - backup := pickBackupToRestore(backups) - if backup == nil { - return fmt.Errorf("no backups are candidates for restore") - } + logrus.Debugf("validating backup store configuration") + if err := validateS3BackupStore(s3Store); err != nil { + return fmt.Errorf("unable to validate backup store: %w", err) + } - logrus.Info("") - completionTimestamp := backup.Status.CompletionTimestamp.Time.Format("2006-01-02 15:04:05 UTC") - shouldRestore := prompts.New().Confirm(fmt.Sprintf("Restore from backup %q (%s)?", backup.Name, completionTimestamp), true) - logrus.Info("") - if !shouldRestore { - logrus.Infof("Aborting restore...") - return nil - } + logrus.Debugf("configuring network manager") + if err := configureNetworkManager(c); err != nil { + return fmt.Errorf("unable to configure network manager: %w", err) + } + logrus.Debugf("materializing binaries") + if err := materializeFiles(c); err != nil { + return fmt.Errorf("unable to materialize binaries: %w", err) + } + logrus.Debugf("running host preflights") + if err := RunHostPreflightsForRestore(c); err != nil { + return fmt.Errorf("unable to finish preflight checks: %w", err) + } + logrus.Debugf("creating k0s configuration file") + if err := ensureK0sConfigForRestore(c); err != nil { + return fmt.Errorf("unable to create config file: %w", err) + } + logrus.Debugf("installing k0s") + if err := installK0s(); err != nil { + return fmt.Errorf("unable update cluster: %w", err) + } + logrus.Debugf("running post install") + if err := runPostInstall(); err != nil { + return fmt.Errorf("unable to run post install: %w", err) + } + logrus.Debugf("waiting for k0s to be ready") + if err := waitForK0s(); err != nil { + return fmt.Errorf("unable to wait for node: %w", err) + } + logrus.Debugf("running outro") + if err := runOutroForRestore(c); err != nil { + return fmt.Errorf("unable to run outro: %w", err) + } - logrus.Debugf("restoring infra from backup %q", backup.Name) - if err := restoreFromBackup(c.Context, backup, DisasterRecoveryComponentInfra); err != nil { - return err - } + logrus.Debugf("configuring velero backup storage location") + if err := kotscli.VeleroConfigureOtherS3(kotscli.VeleroConfigureOtherS3Options{ + Endpoint: s3Store.endpoint, + Region: s3Store.region, + Bucket: s3Store.bucket, + Path: s3Store.prefix, + AccessKeyID: s3Store.accessKeyID, + SecretAccessKey: s3Store.secretAccessKey, + Namespace: defaults.KotsadmNamespace, + }); err != nil { + return err + } + fallthrough - logrus.Debugf("restoring embedded cluster installation from backup %q", backup.Name) - if err := restoreFromBackup(c.Context, backup, DisasterRecoveryComponentECInstall); err != nil { - return err - } + case ecRestoreStateConfirmBackup: + logrus.Debugf("setting restore state to %q", ecRestoreStateConfirmBackup) + if err := setECRestoreState(c.Context, ecRestoreStateConfirmBackup, ""); err != nil { + return fmt.Errorf("unable to set restore state: %w", err) + } - logrus.Debugf("waiting for additional nodes to be added") - if err := waitForAdditionalNodes(c.Context); err != nil { - return err - } + logrus.Debugf("waiting for backups to become available") + backups, err := waitForBackups(c.Context) + if err != nil { + return err + } + + logrus.Debugf("picking backup to restore") + backupToRestore = pickBackupToRestore(backups) - logrus.Debugf("restoring app from backup %q", backup.Name) - if err := restoreFromBackup(c.Context, backup, DisasterRecoveryComponentApp); err != nil { - return err + logrus.Info("") + completionTimestamp := backupToRestore.Status.CompletionTimestamp.Time.Format("2006-01-02 15:04:05 UTC") + shouldRestore := prompts.New().Confirm(fmt.Sprintf("Restore from backup %q (%s)?", backupToRestore.Name, completionTimestamp), true) + logrus.Info("") + if !shouldRestore { + logrus.Infof("Aborting restore...") + return nil + } + fallthrough + + case ecRestoreStateRestoreInfra: + logrus.Debugf("setting restore state to %q", ecRestoreStateRestoreInfra) + if err := setECRestoreState(c.Context, ecRestoreStateRestoreInfra, backupToRestore.Name); err != nil { + return fmt.Errorf("unable to set restore state: %w", err) + } + logrus.Debugf("restoring infra from backup %q", backupToRestore.Name) + if err := restoreFromBackup(c.Context, backupToRestore, disasterRecoveryComponentInfra); err != nil { + return err + } + fallthrough + + case ecRestoreStateRestoreECInstall: + logrus.Debugf("setting restore state to %q", ecRestoreStateRestoreECInstall) + if err := setECRestoreState(c.Context, ecRestoreStateRestoreECInstall, backupToRestore.Name); err != nil { + return fmt.Errorf("unable to set restore state: %w", err) + } + logrus.Debugf("restoring embedded cluster installation from backup %q", backupToRestore.Name) + if err := restoreFromBackup(c.Context, backupToRestore, disasterRecoveryComponentECInstall); err != nil { + return err + } + fallthrough + + case ecRestoreStateWaitForNodes: + logrus.Debugf("setting restore state to %q", ecRestoreStateWaitForNodes) + if err := setECRestoreState(c.Context, ecRestoreStateWaitForNodes, backupToRestore.Name); err != nil { + return fmt.Errorf("unable to set restore state: %w", err) + } + logrus.Debugf("waiting for additional nodes to be added") + if err := waitForAdditionalNodes(c.Context); err != nil { + return err + } + fallthrough + + case ecRestoreStateRestoreApp: + logrus.Debugf("setting restore state to %q", ecRestoreStateRestoreApp) + if err := setECRestoreState(c.Context, ecRestoreStateRestoreApp, backupToRestore.Name); err != nil { + return fmt.Errorf("unable to set restore state: %w", err) + } + logrus.Debugf("restoring app from backup %q", backupToRestore.Name) + if err := restoreFromBackup(c.Context, backupToRestore, disasterRecoveryComponentApp); err != nil { + return err + } + logrus.Debugf("resetting restore state") + if err := resetECRestoreState(c.Context); err != nil { + return fmt.Errorf("unable to reset restore state: %w", err) + } + + default: + return fmt.Errorf("unknown restore state: %q", state) } return nil diff --git a/e2e/restore_test.go b/e2e/restore_test.go index 80b6c8f59..38b0764a0 100644 --- a/e2e/restore_test.go +++ b/e2e/restore_test.go @@ -75,3 +75,71 @@ func TestSingleNodeDisasterRecovery(t *testing.T) { t.Logf("%s: test complete", time.Now().Format(time.RFC3339)) } + +func TestSingleNodeResumeDisasterRecovery(t *testing.T) { + t.Parallel() + + requiredEnvVars := []string{ + "DR_AWS_S3_ENDPOINT", + "DR_AWS_S3_REGION", + "DR_AWS_S3_BUCKET", + "DR_AWS_S3_PREFIX", + "DR_AWS_ACCESS_KEY_ID", + "DR_AWS_SECRET_ACCESS_KEY", + } + for _, envVar := range requiredEnvVars { + if os.Getenv(envVar) == "" { + t.Fatalf("missing required environment variable: %s", envVar) + } + } + + testArgs := []string{} + for _, envVar := range requiredEnvVars { + testArgs = append(testArgs, os.Getenv(envVar)) + } + + tc := cluster.NewTestCluster(&cluster.Input{ + T: t, + Nodes: 1, + Image: "ubuntu/jammy", + LicensePath: "snapshot-license.yaml", + EmbeddedClusterPath: "../output/bin/embedded-cluster", + }) + defer cleanupCluster(t, tc) + + t.Logf("%s: installing test dependencies on node 0", time.Now().Format(time.RFC3339)) + commands := [][]string{ + {"apt-get", "update", "-y"}, + {"apt-get", "install", "expect", "-y"}, + } + if err := RunCommandsOnNode(t, tc, 0, commands); err != nil { + t.Fatalf("fail to install test dependencies on node %s: %v", tc.Nodes[0], err) + } + + t.Logf("%s: installing embedded-cluster on node 0", time.Now().Format(time.RFC3339)) + line := []string{"single-node-install.sh", "ui"} + if _, _, err := RunCommandOnNode(t, tc, 0, line); err != nil { + t.Fatalf("fail to install embedded-cluster on node %s: %v", tc.Nodes[0], err) + } + + if err := setupPlaywright(t, tc); err != nil { + t.Fatalf("fail to setup playwright: %v", err) + } + if _, _, err := runPlaywrightTest(t, tc, "create-backup", testArgs...); err != nil { + t.Fatalf("fail to run playwright test create-backup: %v", err) + } + + t.Logf("%s: resetting the installation", time.Now().Format(time.RFC3339)) + line = []string{"reset-installation.sh"} + if _, _, err := RunCommandOnNode(t, tc, 0, line); err != nil { + t.Fatalf("fail to reset the installation: %v", err) + } + + t.Logf("%s: restoring the installation", time.Now().Format(time.RFC3339)) + line = append([]string{"resume-restore.exp"}, testArgs...) + if _, _, err := RunCommandOnNode(t, tc, 0, line); err != nil { + t.Fatalf("fail to restore the installation: %v", err) + } + + t.Logf("%s: test complete", time.Now().Format(time.RFC3339)) +} diff --git a/e2e/scripts/resume-restore.exp b/e2e/scripts/resume-restore.exp new file mode 100755 index 000000000..05e6ac67c --- /dev/null +++ b/e2e/scripts/resume-restore.exp @@ -0,0 +1,399 @@ +#!/usr/bin/env expect + +set env(EMBEDDED_CLUSTER_PLAIN_PROMPTS) "true" +set env(EMBEDDED_CLUSTER_METRICS_BASEURL) "https://staging.replicated.app" +set env(KUBECONFIG) "/var/lib/k0s/pki/admin.conf" +set env(PATH) "$env(PATH):/var/lib/embedded-cluster/bin" + +set dr_aws_s3_endpoint [lindex $argv 0] +set dr_aws_s3_region [lindex $argv 1] +set dr_aws_s3_bucket [lindex $argv 2] +set dr_aws_s3_prefix [lindex $argv 3] +set dr_aws_access_key_id [lindex $argv 4] +set dr_aws_secret_access_key [lindex $argv 5] + +proc send_interrupt {} { + send "\x03" + expect eof +} + +spawn embedded-cluster restore + +expect { + "Enter information to configure access to your backup storage location." {} + timeout { + puts "\n\nFailed to find introduction." + exit 1 + } +} + +expect { + "S3 endpoint:" { + send "$dr_aws_s3_endpoint\r" + } + timeout { + puts "\n\nFailed to find 'S3 endpoint' prompt." + exit 1 + } +} + +expect { + "Region:" { + send "$dr_aws_s3_region\r" + } + timeout { + puts "\n\nFailed to find 'Region' prompt." + exit 1 + } +} + +expect { + "Bucket:" { + send "$dr_aws_s3_bucket\r" + } + timeout { + puts "\n\nFailed to find 'Bucket' prompt." + exit 1 + } +} + +expect { + "Prefix (press Enter to skip):" { + send "$dr_aws_s3_prefix\r" + } + timeout { + puts "\n\nFailed to find 'Prefix' prompt." + exit 1 + } +} + +expect { + "Access key ID:" { + send "$dr_aws_access_key_id\r" + } + timeout { + puts "\n\nFailed to find 'Access key ID' prompt." + exit 1 + } +} + +expect { + "Secret access key:" { + send "$dr_aws_secret_access_key\r" + } + timeout { + puts "\n\nFailed to find 'Secret access key' prompt." + exit 1 + } +} + +expect { + -timeout 210 "Velero is ready!" {} + timeout { + puts "\n\nFailed to wait for Velero to be ready." + exit 1 + } +} + +expect { + -timeout 30 "Backup storage location configured!" {} + timeout { + puts "\n\nFailed to configure backup storage location." + exit 1 + } +} + +expect { + -timeout 30 "Found 1 restorable backup!" {} + timeout { + puts "\n\nFailed to find a restorable backup." + exit 1 + } +} + +expect { + -re "Restore from backup.*\?" { + send "n\r" + } + timeout { + puts "\n\nFailed to find 'Restore from backup' prompt." + exit 1 + } +} + +expect { + "Aborting restore..." {} + timeout { + puts "\n\nFailed to find 'aborting restore' prompt." + exit 1 + } +} + +expect { + eof {} + timeout { + puts "\n\nDid not receive EOF after aborting restore." + exit 1 + } +} + +spawn embedded-cluster restore + +expect { + "A previous restore operation was detected. Would you like to resume?" { + send "Y\r" + } + timeout { + puts "\n\nFailed to find 'previous restore operation was detected' prompt." + exit 1 + } +} + +expect { + -timeout 30 "Found 1 restorable backup!" {} + timeout { + puts "\n\nFailed to find a restorable backup." + exit 1 + } +} + +expect { + -re "Restore from backup.*\?" { + send "Y\r" + } + timeout { + puts "\n\nFailed to find 'Restore from backup' prompt." + exit 1 + } +} + +expect { + "Restoring infrastructure" {} + timeout { + puts "\n\nFailed to find 'restoring infrastructure' spinner." + exit 1 + } +} + +send_interrupt +spawn embedded-cluster restore + +expect { + "A previous restore operation was detected. Would you like to resume?" { + send "Y\r" + } + timeout { + puts "\n\nFailed to find 'previous restore operation was detected' prompt." + exit 1 + } +} + +expect { + "Resuming restore from backup" {} + timeout { + puts "\n\nFailed to find 'resuming restore from backup' message." + exit 1 + } +} + +expect { + "Restoring infrastructure" {} + timeout { + puts "\n\nFailed to find 'restoring infrastructure' spinner." + exit 1 + } +} + +expect { + -timeout 60 "Infrastructure restored!" {} + timeout { + puts "\n\nFailed to restore infrastructure." + exit 1 + } +} + +expect { + "Restoring cluster state" {} + timeout { + puts "\n\nFailed to find 'restoring cluster state' spinner." + exit 1 + } +} + +send_interrupt +spawn embedded-cluster restore + +expect { + "A previous restore operation was detected. Would you like to resume?" { + send "Y\r" + } + timeout { + puts "\n\nFailed to find 'previous restore operation was detected' prompt." + exit 1 + } +} + +expect { + "Resuming restore from backup" {} + timeout { + puts "\n\nFailed to find 'resuming restore from backup' message." + exit 1 + } +} + +expect { + "Restoring cluster state" {} + timeout { + puts "\n\nFailed to find 'restoring cluster state' spinner." + exit 1 + } +} + +expect { + -timeout 150 "Cluster state restored!" {} + timeout { + puts "\n\nFailed to restore cluster state." + exit 1 + } +} + +expect { + -timeout 60 "Admin Console is ready!" {} + timeout { + puts "\n\nFailed to wait for admin console to be ready." + exit 1 + } +} + +expect { + "Visit the admin console if you need to add nodes to the cluster" {} + timeout { + puts "\n\nFailed to find admin console URL." + exit 1 + } +} + +expect { + "Type 'continue' when you are done adding nodes" { + send "\r" + } + timeout { + puts "\n\nFailed to find 'done adding nodes' prompt." + exit 1 + } +} + +send_interrupt +spawn embedded-cluster restore + +expect { + "A previous restore operation was detected. Would you like to resume?" { + send "Y\r" + } + timeout { + puts "\n\nFailed to find 'previous restore operation was detected' prompt." + exit 1 + } +} + +expect { + "Resuming restore from backup" {} + timeout { + puts "\n\nFailed to find 'resuming restore from backup' message." + exit 1 + } +} + +expect { + -timeout 60 "Admin Console is ready!" {} + timeout { + puts "\n\nFailed to wait for admin console to be ready." + exit 1 + } +} + +expect { + "Visit the admin console if you need to add nodes to the cluster" {} + timeout { + puts "\n\nFailed to find admin console URL." + exit 1 + } +} + +expect { + "Type 'continue' when you are done adding nodes" { + send "\r" + } + timeout { + puts "\n\nFailed to find 'done adding nodes' prompt." + exit 1 + } +} + +expect { + "Please type 'continue' to proceed" { + send "continue\r" + } + timeout { + puts "\n\nFailed to find 'type continue to proceed' prompt." + exit 1 + } +} + +expect { + -timeout 30 "All nodes are ready!" {} + timeout { + puts "\n\nFailed to wait for nodes." + exit 1 + } +} + +expect { + "Restoring application" {} + timeout { + puts "\n\nFailed to find 'restoring application' spinner." + exit 1 + } +} + +send_interrupt +spawn embedded-cluster restore + +expect { + "A previous restore operation was detected. Would you like to resume?" { + send "Y\r" + } + timeout { + puts "\n\nFailed to find 'previous restore operation was detected' prompt." + exit 1 + } +} + +expect { + "Resuming restore from backup" {} + timeout { + puts "\n\nFailed to find 'resuming restore from backup' message." + exit 1 + } +} + +expect { + "Restoring application" {} + timeout { + puts "\n\nFailed to find 'restoring application' spinner." + exit 1 + } +} + +expect { + -timeout 60 "Application restored!" { + exit 0 + } + timeout { + puts "\n\nFailed to restore application." + exit 1 + } +} + +puts "\n\nCommand exited before finishing all validations." +exit 1 From 1441cd63b5d97cd2c0aeab20018c7942b32a43e7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 15 May 2024 16:13:24 +0000 Subject: [PATCH 4/5] build(deps): bump github.com/replicatedhq/embedded-cluster-operator from 0.30.3 to 0.31.1 (#615) --- go.mod | 16 ++++++++-------- go.sum | 40 ++++++++++++++++++++-------------------- 2 files changed, 28 insertions(+), 28 deletions(-) diff --git a/go.mod b/go.mod index bbc6c1415..64605cf20 100644 --- a/go.mod +++ b/go.mod @@ -14,8 +14,8 @@ require ( github.com/jedib0t/go-pretty v4.3.0+incompatible github.com/k0sproject/dig v0.2.0 github.com/k0sproject/k0s v1.29.5-0.20240418091920-5421d2dc4754 - github.com/replicatedhq/embedded-cluster-kinds v1.1.9 - github.com/replicatedhq/embedded-cluster-operator v0.30.3 + github.com/replicatedhq/embedded-cluster-kinds v1.2.1 + github.com/replicatedhq/embedded-cluster-operator v0.31.1 github.com/replicatedhq/embedded-cluster-utils v1.0.0 github.com/replicatedhq/kotskinds v0.0.0-20230724164735-f83482cc9cfe github.com/replicatedhq/troubleshoot v0.92.0 @@ -37,7 +37,7 @@ require ( require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/containerd/containerd v1.7.15 // indirect + github.com/containerd/containerd v1.7.16 // indirect github.com/coreos/go-semver v0.3.1 // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/frankban/quicktest v1.14.6 // indirect @@ -51,7 +51,7 @@ require ( github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0 // indirect github.com/prometheus/client_golang v1.18.0 // indirect - github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/client_model v0.6.0 // indirect github.com/prometheus/common v0.45.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect github.com/rs/cors v1.10.1 // indirect @@ -85,10 +85,10 @@ require ( github.com/go-macaroon-bakery/macaroon-bakery/v3 v3.0.1 // indirect github.com/go-macaroon-bakery/macaroonpb v1.0.0 // indirect github.com/go-openapi/errors v0.20.3 // indirect - github.com/go-openapi/jsonpointer v0.20.2 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/strfmt v0.21.7 // indirect - github.com/go-openapi/swag v0.22.10 // indirect + github.com/go-openapi/swag v0.23.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/gnostic-models v0.6.8 // indirect @@ -133,8 +133,8 @@ require ( github.com/zitadel/oidc/v2 v2.7.0 // indirect go.mongodb.org/mongo-driver v1.11.3 // indirect go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.26.0 // indirect - golang.org/x/net v0.24.0 // indirect + go.uber.org/zap v1.27.0 // indirect + golang.org/x/net v0.25.0 // indirect golang.org/x/oauth2 v0.18.0 // indirect golang.org/x/sys v0.20.0 // indirect golang.org/x/text v0.15.0 // indirect diff --git a/go.sum b/go.sum index 8638c6fbc..f8a7bb00f 100644 --- a/go.sum +++ b/go.sum @@ -18,8 +18,8 @@ github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/containerd/containerd v1.7.15 h1:afEHXdil9iAm03BmhjzKyXnnEBtjaLJefdU7DV0IFes= -github.com/containerd/containerd v1.7.15/go.mod h1:ISzRRTMF8EXNpJlTzyr2XMhN+j9K302C21/+cr3kUnY= +github.com/containerd/containerd v1.7.16 h1:7Zsfe8Fkj4Wi2My6DXGQ87hiqIrmOXolm72ZEkFU5Mg= +github.com/containerd/containerd v1.7.16/go.mod h1:NL49g7A/Fui7ccmxV6zkBWwqMgmMxFWzujYCc+JLt7k= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= @@ -67,15 +67,15 @@ github.com/go-macaroon-bakery/macaroonpb v1.0.0/go.mod h1:UzrGOcbiwTXISFP2XDLDPj github.com/go-openapi/errors v0.20.3 h1:rz6kiC84sqNQoqrtulzaL/VERgkoCyB6WdEkc2ujzUc= github.com/go-openapi/errors v0.20.3/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= -github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= -github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k= github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/swag v0.22.10 h1:4y86NVn7Z2yYd6pfS4Z+Nyh3aAUL3Nul+LMbhFKy0gA= -github.com/go-openapi/swag v0.22.10/go.mod h1:Cnn8BYtRlx6BNE3DPN86f/xkapGIcLWzh3CLEb4C1jI= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= @@ -245,16 +245,16 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos= +github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= -github.com/replicatedhq/embedded-cluster-kinds v1.1.9 h1:e0vjOKyEqo0Z3C5LoVNZATw+5AgrLln7IDt6lQ4x+pU= -github.com/replicatedhq/embedded-cluster-kinds v1.1.9/go.mod h1:wVal4dS9YGPKrrsuP3j4AwzG4qtyHCgHip+I+sG5U/s= -github.com/replicatedhq/embedded-cluster-operator v0.30.3 h1:SDnxG/LtAbXmC6Xmz97KXvRkSXv4YKh4OJYb2OPzmeE= -github.com/replicatedhq/embedded-cluster-operator v0.30.3/go.mod h1:6WKT7tDwVUvBTW6eSM4b1xyqk2ZWyi91WSulGO5qzNs= +github.com/replicatedhq/embedded-cluster-kinds v1.2.1 h1:iKRzBVMm4Gc8cbe7pL8D7WR8CXhfec2G7c4ygNSQINM= +github.com/replicatedhq/embedded-cluster-kinds v1.2.1/go.mod h1:QvjFYlRZnXC/szgEqTepB5DoU8v3O+mdxmJi0EoqDGY= +github.com/replicatedhq/embedded-cluster-operator v0.31.1 h1:BD7QwSjjbZYZCpWkVu46spakz+I8UcyiOSIjXRSCNiE= +github.com/replicatedhq/embedded-cluster-operator v0.31.1/go.mod h1:/lvWQrf6zfMLyWYtIN2X1VroiDxNh75ShtgdlKmCa74= github.com/replicatedhq/embedded-cluster-utils v1.0.0 h1:Axdni1nYfl5zeOP9g5U79yvN8cRdClyU6hz0wV1Hmdc= github.com/replicatedhq/embedded-cluster-utils v1.0.0/go.mod h1:4JmMC2CwMCLxq05GEW3XSPPVotqyamAF/omrbB3pH+c= github.com/replicatedhq/kotskinds v0.0.0-20230724164735-f83482cc9cfe h1:3AJInd06UxzqHmgy8+24CPsT2tYSE0zToJZyuX9q+MA= @@ -269,8 +269,8 @@ github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzG github.com/rogpeppe/fastuuid v1.2.0 h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/rs/cors v1.10.1 h1:L0uuZVXIKlI1SShY2nhFfo44TYvDPQ1w4oFkUJNfhyo= github.com/rs/cors v1.10.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= @@ -326,8 +326,8 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20180723164146-c126467f60eb/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -360,8 +360,8 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= -golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= @@ -416,8 +416,8 @@ golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY= -golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg= +golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw= +golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From b0ccb68e2f5f47982661cfaa9d9ca2d34f6195ac Mon Sep 17 00:00:00 2001 From: Salah Al Saleh Date: Thu, 16 May 2024 08:50:32 -0700 Subject: [PATCH 5/5] Update KOTS to v1.109.3 (#619) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index b5e896cbe..047d02215 100644 --- a/Makefile +++ b/Makefile @@ -4,7 +4,7 @@ ARCH := $(shell uname -m) APP_NAME = embedded-cluster ADMIN_CONSOLE_CHART_URL = oci://registry.replicated.com/library ADMIN_CONSOLE_CHART_NAME = admin-console -ADMIN_CONSOLE_CHART_VERSION = 1.109.0 +ADMIN_CONSOLE_CHART_VERSION = 1.109.3 ADMIN_CONSOLE_IMAGE_OVERRIDE = ADMIN_CONSOLE_MIGRATIONS_IMAGE_OVERRIDE = EMBEDDED_OPERATOR_CHART_URL = oci://registry.replicated.com/library