diff --git a/migrations/Makefile b/migrations/Makefile index 360143828b..349746a23f 100644 --- a/migrations/Makefile +++ b/migrations/Makefile @@ -3,5 +3,5 @@ SCHEMAHERO_TAG ?= 0.17.12 DOCKER_BUILD_ARGS ?= build_schema: - docker build --pull --build-arg SCHEMAHERO_TAG=${SCHEMAHERO_TAG} ${DOCKER_BUILD_ARGS} -f dev/Dockerfile.ttlsh -t ${IMAGE} . + docker build --pull --build-arg SCHEMAHERO_TAG=${SCHEMAHERO_TAG} ${DOCKER_BUILD_ARGS} -f ../dev/dockerfiles/kotsadm-migrations/Dockerfile.ttlsh -t ${IMAGE} . docker push ${IMAGE} diff --git a/pkg/docker/registry/registry.go b/pkg/docker/registry/registry.go index 3d84de17fb..0576578a68 100644 --- a/pkg/docker/registry/registry.go +++ b/pkg/docker/registry/registry.go @@ -238,7 +238,8 @@ func applicationPullSecretLabels() map[string]string { var secretLabels map[string]string if util.IsEmbeddedCluster() { secretLabels = map[string]string{ - kotsadmtypes.DisasterRecoveryLabel: kotsadmtypes.DisasterRecoveryLabelValueApp, + kotsadmtypes.DisasterRecoveryLabel: kotsadmtypes.DisasterRecoveryLabelValueInfra, + kotsadmtypes.DisasterRecoveryChartLabel: kotsadmtypes.DisasterRecoveryChartValue, } } diff --git a/pkg/docker/registry/registry_test.go b/pkg/docker/registry/registry_test.go index e5de0ee96b..7b06c4b794 100644 --- a/pkg/docker/registry/registry_test.go +++ b/pkg/docker/registry/registry_test.go @@ -416,7 +416,8 @@ func TestPullSecretForRegistries(t *testing.T) { "helm.sh/hook-weight": "-9999", }, Labels: map[string]string{ - "replicated.com/disaster-recovery": "app", + "replicated.com/disaster-recovery": "infra", + "replicated.com/disaster-recovery-chart": "admin-console", }, }, Type: corev1.SecretTypeDockerConfigJson, @@ -552,7 +553,8 @@ func TestGetDockerHubPullSecret(t *testing.T) { "helm.sh/hook-weight": "-9999", }, Labels: map[string]string{ - "replicated.com/disaster-recovery": "app", + "replicated.com/disaster-recovery": "infra", + "replicated.com/disaster-recovery-chart": "admin-console", }, }, Type: corev1.SecretTypeDockerConfigJson, diff --git a/pkg/k8sutil/clientset.go b/pkg/k8sutil/clientset.go index 4e690adf45..33a00e69df 100644 --- a/pkg/k8sutil/clientset.go +++ b/pkg/k8sutil/clientset.go @@ -9,7 +9,9 @@ import ( "github.com/pkg/errors" embeddedclusterv1beta1 "github.com/replicatedhq/embedded-cluster/kinds/apis/v1beta1" flag "github.com/spf13/pflag" + velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/client-go/discovery" @@ -167,3 +169,23 @@ func GetKubeClient(ctx context.Context) (kbclient.Client, error) { } return kcli, nil } + +func GetVeleroKubeClient(ctx context.Context) (kbclient.Client, error) { + k8slogger := zap.New(func(o *zap.Options) { + o.DestWriter = io.Discard + }) + log.SetLogger(k8slogger) + cfg, err := GetClusterConfig() + if err != nil { + return nil, errors.Wrap(err, "failed to get cluster config") + } + scheme := runtime.NewScheme() + velerov1.AddToScheme(scheme) + kcli, err := kbclient.New(cfg, kbclient.Options{ + Scheme: scheme, + }) + if err != nil { + return nil, errors.Wrap(err, "failed to create kubebuilder client") + } + return kcli, nil +} diff --git a/pkg/kotsadmsnapshot/backup.go b/pkg/kotsadmsnapshot/backup.go index 477f834960..b0809c05e4 100644 --- a/pkg/kotsadmsnapshot/backup.go +++ b/pkg/kotsadmsnapshot/backup.go @@ -271,14 +271,14 @@ func CreateInstanceBackup(ctx context.Context, cluster *downstreamtypes.Downstre } } - logger.Infof("Creating instance backup CR %s", veleroBackup.Name) + logger.Infof("Creating instance backup CR %s", veleroBackup.GenerateName) backup, err := veleroClient.Backups(metadata.backupStorageLocationNamespace).Create(ctx, veleroBackup, metav1.CreateOptions{}) if err != nil { return "", errors.Wrap(err, "failed to create velero backup") } if appVeleroBackup != nil { - logger.Infof("Creating instance app backup CR %s", appVeleroBackup.Name) + logger.Infof("Creating instance app backup CR %s", appVeleroBackup.GenerateName) _, err := veleroClient.Backups(metadata.backupStorageLocationNamespace).Create(ctx, appVeleroBackup, metav1.CreateOptions{}) if err != nil { return "", errors.Wrap(err, "failed to create application velero backup") @@ -492,7 +492,7 @@ func getInfrastructureInstanceBackupSpec(ctx context.Context, k8sClient kubernet } } - veleroBackup.Annotations, err = appendCommonAnnotations(k8sClient, veleroBackup.Annotations, metadata, hasAppBackup) + veleroBackup.Annotations, err = appendCommonAnnotations(k8sClient, veleroBackup.Annotations, metadata) if err != nil { return nil, errors.Wrap(err, "failed to add annotations to backup") } @@ -521,13 +521,11 @@ func getInfrastructureInstanceBackupSpec(ctx context.Context, k8sClient kubernet return veleroBackup, nil } -var EnableImprovedDR = false - // getAppInstanceBackup returns a backup spec only if this is Embedded Cluster and the vendor has // defined both a backup and restore custom resource (improved DR). func getAppInstanceBackupSpec(k8sClient kubernetes.Interface, metadata instanceBackupMetadata) (*velerov1.Backup, error) { // TODO(improveddr): remove this once we have fully implemented the improved DR - if !EnableImprovedDR { + if os.Getenv("ENABLE_IMPROVED_DR") != "true" { return nil, nil } @@ -564,7 +562,7 @@ func getAppInstanceBackupSpec(k8sClient kubernetes.Interface, metadata instanceB } var err error - appVeleroBackup.Annotations, err = appendCommonAnnotations(k8sClient, appVeleroBackup.Annotations, metadata, true) + appVeleroBackup.Annotations, err = appendCommonAnnotations(k8sClient, appVeleroBackup.Annotations, metadata) if err != nil { return nil, errors.Wrap(err, "failed to add annotations to application backup") } @@ -656,7 +654,7 @@ func mergeAppBackupSpec(backup *velerov1.Backup, appMeta appInstanceBackupMetada } // appendCommonAnnotations appends common annotations to the backup annotations -func appendCommonAnnotations(k8sClient kubernetes.Interface, annotations map[string]string, metadata instanceBackupMetadata, hasAppBackup bool) (map[string]string, error) { +func appendCommonAnnotations(k8sClient kubernetes.Interface, annotations map[string]string, metadata instanceBackupMetadata) (map[string]string, error) { kotsadmImage, err := k8sutil.FindKotsadmImage(k8sClient, metadata.kotsadmNamespace) if err != nil { return nil, errors.Wrap(err, "failed to find kotsadm image") diff --git a/pkg/kotsadmsnapshot/backup_test.go b/pkg/kotsadmsnapshot/backup_test.go index fec84bdc8e..142d5e8b39 100644 --- a/pkg/kotsadmsnapshot/backup_test.go +++ b/pkg/kotsadmsnapshot/backup_test.go @@ -961,10 +961,9 @@ func Test_appendCommonAnnotations(t *testing.T) { } type args struct { - k8sClient kubernetes.Interface - annotations map[string]string - metadata instanceBackupMetadata - hasAppBackup bool + k8sClient kubernetes.Interface + annotations map[string]string + metadata instanceBackupMetadata } tests := []struct { name string @@ -1014,7 +1013,6 @@ func Test_appendCommonAnnotations(t *testing.T) { snapshotTTL: 24 * time.Hour, ec: nil, }, - hasAppBackup: false, }, want: map[string]string{ "kots.io/apps-sequences": "{\"app-1\":1,\"app-2\":2}", @@ -1078,7 +1076,6 @@ func Test_appendCommonAnnotations(t *testing.T) { seaweedFSS3ServiceIP: "10.96.0.10", }, }, - hasAppBackup: true, }, want: map[string]string{ "kots.io/apps-sequences": "{\"app-1\":1}", @@ -1108,7 +1105,7 @@ func Test_appendCommonAnnotations(t *testing.T) { if tt.setup != nil { tt.setup(t) } - got, err := appendCommonAnnotations(tt.args.k8sClient, tt.args.annotations, tt.args.metadata, tt.args.hasAppBackup) + got, err := appendCommonAnnotations(tt.args.k8sClient, tt.args.annotations, tt.args.metadata) if tt.wantErr { require.Error(t, err) } else { @@ -1544,11 +1541,6 @@ func Test_mergeAppBackupSpec(t *testing.T) { } func Test_getAppInstanceBackupSpec(t *testing.T) { - EnableImprovedDR = true - t.Cleanup(func() { - EnableImprovedDR = false - }) - kotsadmSts := &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ Name: "kotsadm", @@ -1974,6 +1966,8 @@ func Test_getAppInstanceBackupSpec(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + t.Setenv("ENABLE_IMPROVED_DR", "true") + ctrl := gomock.NewController(t) defer ctrl.Finish() diff --git a/pkg/kotsadmsnapshot/download.go b/pkg/kotsadmsnapshot/download.go index 0ce5036508..1dad933120 100644 --- a/pkg/kotsadmsnapshot/download.go +++ b/pkg/kotsadmsnapshot/download.go @@ -12,8 +12,6 @@ import ( velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/cmd/util/downloadrequest" pkgresults "github.com/vmware-tanzu/velero/pkg/util/results" - "k8s.io/apimachinery/pkg/runtime" - kbclient "sigs.k8s.io/controller-runtime/pkg/client" ) func DownloadRestoreResults(ctx context.Context, veleroNamespace, restoreName string) ([]types.SnapshotError, []types.SnapshotError, error) { @@ -83,18 +81,9 @@ func DownloadRestoreResults(ctx context.Context, veleroNamespace, restoreName st } func DownloadRequest(ctx context.Context, veleroNamespace string, kind velerov1.DownloadTargetKind, name string) (io.ReadCloser, error) { - clientConfig, err := k8sutil.GetClusterConfig() + kbClient, err := k8sutil.GetVeleroKubeClient(ctx) if err != nil { - return nil, errors.Wrap(err, "failed to get cluster config") - } - - scheme := runtime.NewScheme() - velerov1.AddToScheme(scheme) - kbClient, err := kbclient.New(clientConfig, kbclient.Options{ - Scheme: scheme, - }) - if err != nil { - return nil, errors.Wrap(err, "failed to get kubebuilder client") + return nil, errors.Wrap(err, "failed to get velero kube client") } pr, pw := io.Pipe() diff --git a/pkg/snapshot/velero.go b/pkg/snapshot/velero.go index 4af14711b0..1b8f38b99c 100644 --- a/pkg/snapshot/velero.go +++ b/pkg/snapshot/velero.go @@ -13,7 +13,6 @@ import ( kotsadmresources "github.com/replicatedhq/kots/pkg/kotsadm/resources" kotsadmtypes "github.com/replicatedhq/kots/pkg/kotsadm/types" "github.com/replicatedhq/kots/pkg/util" - velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/cmd/cli/serverstatus" veleroclientv1 "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1" v1 "k8s.io/api/apps/v1" @@ -21,10 +20,8 @@ import ( kuberneteserrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/selection" "k8s.io/client-go/kubernetes" - kbclient "sigs.k8s.io/controller-runtime/pkg/client" ) var ( @@ -358,17 +355,9 @@ NodeAgentFound: } func getVersion(ctx context.Context, namespace string) (string, error) { - clientConfig, err := k8sutil.GetClusterConfig() + kbClient, err := k8sutil.GetVeleroKubeClient(ctx) if err != nil { - return "", errors.Wrap(err, "failed to get cluster config") - } - scheme := runtime.NewScheme() - velerov1api.AddToScheme(scheme) - kbClient, err := kbclient.New(clientConfig, kbclient.Options{ - Scheme: scheme, - }) - if err != nil { - return "", errors.Wrap(err, "failed to get velero client") + return "", errors.Wrap(err, "failed to get velero kube client") } ctx, cancel := context.WithTimeout(ctx, 5*time.Second)