Skip to content

Commit

Permalink
feat(ec): fixes for restore in new improved dr (#5038)
Browse files Browse the repository at this point in the history
* feat(ec): fixes for restore in new improved dr

* f

* f

* f

* f
  • Loading branch information
emosbaugh authored Dec 10, 2024
1 parent 7b51555 commit 92431cf
Show file tree
Hide file tree
Showing 8 changed files with 45 additions and 50 deletions.
2 changes: 1 addition & 1 deletion migrations/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -3,5 +3,5 @@ SCHEMAHERO_TAG ?= 0.17.12
DOCKER_BUILD_ARGS ?=

build_schema:
docker build --pull --build-arg SCHEMAHERO_TAG=${SCHEMAHERO_TAG} ${DOCKER_BUILD_ARGS} -f dev/Dockerfile.ttlsh -t ${IMAGE} .
docker build --pull --build-arg SCHEMAHERO_TAG=${SCHEMAHERO_TAG} ${DOCKER_BUILD_ARGS} -f ../dev/dockerfiles/kotsadm-migrations/Dockerfile.ttlsh -t ${IMAGE} .
docker push ${IMAGE}
3 changes: 2 additions & 1 deletion pkg/docker/registry/registry.go
Original file line number Diff line number Diff line change
Expand Up @@ -238,7 +238,8 @@ func applicationPullSecretLabels() map[string]string {
var secretLabels map[string]string
if util.IsEmbeddedCluster() {
secretLabels = map[string]string{
kotsadmtypes.DisasterRecoveryLabel: kotsadmtypes.DisasterRecoveryLabelValueApp,
kotsadmtypes.DisasterRecoveryLabel: kotsadmtypes.DisasterRecoveryLabelValueInfra,
kotsadmtypes.DisasterRecoveryChartLabel: kotsadmtypes.DisasterRecoveryChartValue,
}
}

Expand Down
6 changes: 4 additions & 2 deletions pkg/docker/registry/registry_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -416,7 +416,8 @@ func TestPullSecretForRegistries(t *testing.T) {
"helm.sh/hook-weight": "-9999",
},
Labels: map[string]string{
"replicated.com/disaster-recovery": "app",
"replicated.com/disaster-recovery": "infra",
"replicated.com/disaster-recovery-chart": "admin-console",
},
},
Type: corev1.SecretTypeDockerConfigJson,
Expand Down Expand Up @@ -552,7 +553,8 @@ func TestGetDockerHubPullSecret(t *testing.T) {
"helm.sh/hook-weight": "-9999",
},
Labels: map[string]string{
"replicated.com/disaster-recovery": "app",
"replicated.com/disaster-recovery": "infra",
"replicated.com/disaster-recovery-chart": "admin-console",
},
},
Type: corev1.SecretTypeDockerConfigJson,
Expand Down
22 changes: 22 additions & 0 deletions pkg/k8sutil/clientset.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,9 @@ import (
"github.com/pkg/errors"
embeddedclusterv1beta1 "github.com/replicatedhq/embedded-cluster/kinds/apis/v1beta1"
flag "github.com/spf13/pflag"
velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/client-go/discovery"
Expand Down Expand Up @@ -167,3 +169,23 @@ func GetKubeClient(ctx context.Context) (kbclient.Client, error) {
}
return kcli, nil
}

func GetVeleroKubeClient(ctx context.Context) (kbclient.Client, error) {
k8slogger := zap.New(func(o *zap.Options) {
o.DestWriter = io.Discard
})
log.SetLogger(k8slogger)
cfg, err := GetClusterConfig()
if err != nil {
return nil, errors.Wrap(err, "failed to get cluster config")
}
scheme := runtime.NewScheme()
velerov1.AddToScheme(scheme)
kcli, err := kbclient.New(cfg, kbclient.Options{
Scheme: scheme,
})
if err != nil {
return nil, errors.Wrap(err, "failed to create kubebuilder client")
}
return kcli, nil
}
14 changes: 6 additions & 8 deletions pkg/kotsadmsnapshot/backup.go
Original file line number Diff line number Diff line change
Expand Up @@ -271,14 +271,14 @@ func CreateInstanceBackup(ctx context.Context, cluster *downstreamtypes.Downstre
}
}

logger.Infof("Creating instance backup CR %s", veleroBackup.Name)
logger.Infof("Creating instance backup CR %s", veleroBackup.GenerateName)
backup, err := veleroClient.Backups(metadata.backupStorageLocationNamespace).Create(ctx, veleroBackup, metav1.CreateOptions{})
if err != nil {
return "", errors.Wrap(err, "failed to create velero backup")
}

if appVeleroBackup != nil {
logger.Infof("Creating instance app backup CR %s", appVeleroBackup.Name)
logger.Infof("Creating instance app backup CR %s", appVeleroBackup.GenerateName)
_, err := veleroClient.Backups(metadata.backupStorageLocationNamespace).Create(ctx, appVeleroBackup, metav1.CreateOptions{})
if err != nil {
return "", errors.Wrap(err, "failed to create application velero backup")
Expand Down Expand Up @@ -492,7 +492,7 @@ func getInfrastructureInstanceBackupSpec(ctx context.Context, k8sClient kubernet
}
}

veleroBackup.Annotations, err = appendCommonAnnotations(k8sClient, veleroBackup.Annotations, metadata, hasAppBackup)
veleroBackup.Annotations, err = appendCommonAnnotations(k8sClient, veleroBackup.Annotations, metadata)
if err != nil {
return nil, errors.Wrap(err, "failed to add annotations to backup")
}
Expand Down Expand Up @@ -521,13 +521,11 @@ func getInfrastructureInstanceBackupSpec(ctx context.Context, k8sClient kubernet
return veleroBackup, nil
}

var EnableImprovedDR = false

// getAppInstanceBackup returns a backup spec only if this is Embedded Cluster and the vendor has
// defined both a backup and restore custom resource (improved DR).
func getAppInstanceBackupSpec(k8sClient kubernetes.Interface, metadata instanceBackupMetadata) (*velerov1.Backup, error) {
// TODO(improveddr): remove this once we have fully implemented the improved DR
if !EnableImprovedDR {
if os.Getenv("ENABLE_IMPROVED_DR") != "true" {
return nil, nil
}

Expand Down Expand Up @@ -564,7 +562,7 @@ func getAppInstanceBackupSpec(k8sClient kubernetes.Interface, metadata instanceB
}

var err error
appVeleroBackup.Annotations, err = appendCommonAnnotations(k8sClient, appVeleroBackup.Annotations, metadata, true)
appVeleroBackup.Annotations, err = appendCommonAnnotations(k8sClient, appVeleroBackup.Annotations, metadata)
if err != nil {
return nil, errors.Wrap(err, "failed to add annotations to application backup")
}
Expand Down Expand Up @@ -656,7 +654,7 @@ func mergeAppBackupSpec(backup *velerov1.Backup, appMeta appInstanceBackupMetada
}

// appendCommonAnnotations appends common annotations to the backup annotations
func appendCommonAnnotations(k8sClient kubernetes.Interface, annotations map[string]string, metadata instanceBackupMetadata, hasAppBackup bool) (map[string]string, error) {
func appendCommonAnnotations(k8sClient kubernetes.Interface, annotations map[string]string, metadata instanceBackupMetadata) (map[string]string, error) {
kotsadmImage, err := k8sutil.FindKotsadmImage(k8sClient, metadata.kotsadmNamespace)
if err != nil {
return nil, errors.Wrap(err, "failed to find kotsadm image")
Expand Down
18 changes: 6 additions & 12 deletions pkg/kotsadmsnapshot/backup_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -961,10 +961,9 @@ func Test_appendCommonAnnotations(t *testing.T) {
}

type args struct {
k8sClient kubernetes.Interface
annotations map[string]string
metadata instanceBackupMetadata
hasAppBackup bool
k8sClient kubernetes.Interface
annotations map[string]string
metadata instanceBackupMetadata
}
tests := []struct {
name string
Expand Down Expand Up @@ -1014,7 +1013,6 @@ func Test_appendCommonAnnotations(t *testing.T) {
snapshotTTL: 24 * time.Hour,
ec: nil,
},
hasAppBackup: false,
},
want: map[string]string{
"kots.io/apps-sequences": "{\"app-1\":1,\"app-2\":2}",
Expand Down Expand Up @@ -1078,7 +1076,6 @@ func Test_appendCommonAnnotations(t *testing.T) {
seaweedFSS3ServiceIP: "10.96.0.10",
},
},
hasAppBackup: true,
},
want: map[string]string{
"kots.io/apps-sequences": "{\"app-1\":1}",
Expand Down Expand Up @@ -1108,7 +1105,7 @@ func Test_appendCommonAnnotations(t *testing.T) {
if tt.setup != nil {
tt.setup(t)
}
got, err := appendCommonAnnotations(tt.args.k8sClient, tt.args.annotations, tt.args.metadata, tt.args.hasAppBackup)
got, err := appendCommonAnnotations(tt.args.k8sClient, tt.args.annotations, tt.args.metadata)
if tt.wantErr {
require.Error(t, err)
} else {
Expand Down Expand Up @@ -1544,11 +1541,6 @@ func Test_mergeAppBackupSpec(t *testing.T) {
}

func Test_getAppInstanceBackupSpec(t *testing.T) {
EnableImprovedDR = true
t.Cleanup(func() {
EnableImprovedDR = false
})

kotsadmSts := &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: "kotsadm",
Expand Down Expand Up @@ -1974,6 +1966,8 @@ func Test_getAppInstanceBackupSpec(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Setenv("ENABLE_IMPROVED_DR", "true")

ctrl := gomock.NewController(t)
defer ctrl.Finish()

Expand Down
15 changes: 2 additions & 13 deletions pkg/kotsadmsnapshot/download.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,6 @@ import (
velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
"github.com/vmware-tanzu/velero/pkg/cmd/util/downloadrequest"
pkgresults "github.com/vmware-tanzu/velero/pkg/util/results"
"k8s.io/apimachinery/pkg/runtime"
kbclient "sigs.k8s.io/controller-runtime/pkg/client"
)

func DownloadRestoreResults(ctx context.Context, veleroNamespace, restoreName string) ([]types.SnapshotError, []types.SnapshotError, error) {
Expand Down Expand Up @@ -83,18 +81,9 @@ func DownloadRestoreResults(ctx context.Context, veleroNamespace, restoreName st
}

func DownloadRequest(ctx context.Context, veleroNamespace string, kind velerov1.DownloadTargetKind, name string) (io.ReadCloser, error) {
clientConfig, err := k8sutil.GetClusterConfig()
kbClient, err := k8sutil.GetVeleroKubeClient(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to get cluster config")
}

scheme := runtime.NewScheme()
velerov1.AddToScheme(scheme)
kbClient, err := kbclient.New(clientConfig, kbclient.Options{
Scheme: scheme,
})
if err != nil {
return nil, errors.Wrap(err, "failed to get kubebuilder client")
return nil, errors.Wrap(err, "failed to get velero kube client")
}

pr, pw := io.Pipe()
Expand Down
15 changes: 2 additions & 13 deletions pkg/snapshot/velero.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,18 +13,15 @@ import (
kotsadmresources "github.com/replicatedhq/kots/pkg/kotsadm/resources"
kotsadmtypes "github.com/replicatedhq/kots/pkg/kotsadm/types"
"github.com/replicatedhq/kots/pkg/util"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
"github.com/vmware-tanzu/velero/pkg/cmd/cli/serverstatus"
veleroclientv1 "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1"
v1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
kuberneteserrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/client-go/kubernetes"
kbclient "sigs.k8s.io/controller-runtime/pkg/client"
)

var (
Expand Down Expand Up @@ -358,17 +355,9 @@ NodeAgentFound:
}

func getVersion(ctx context.Context, namespace string) (string, error) {
clientConfig, err := k8sutil.GetClusterConfig()
kbClient, err := k8sutil.GetVeleroKubeClient(ctx)
if err != nil {
return "", errors.Wrap(err, "failed to get cluster config")
}
scheme := runtime.NewScheme()
velerov1api.AddToScheme(scheme)
kbClient, err := kbclient.New(clientConfig, kbclient.Options{
Scheme: scheme,
})
if err != nil {
return "", errors.Wrap(err, "failed to get velero client")
return "", errors.Wrap(err, "failed to get velero kube client")
}

ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
Expand Down

0 comments on commit 92431cf

Please sign in to comment.