Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(ec): fixes for restore in new improved dr #5038

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion migrations/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -3,5 +3,5 @@ SCHEMAHERO_TAG ?= 0.17.12
DOCKER_BUILD_ARGS ?=

build_schema:
docker build --pull --build-arg SCHEMAHERO_TAG=${SCHEMAHERO_TAG} ${DOCKER_BUILD_ARGS} -f dev/Dockerfile.ttlsh -t ${IMAGE} .
docker build --pull --build-arg SCHEMAHERO_TAG=${SCHEMAHERO_TAG} ${DOCKER_BUILD_ARGS} -f ../dev/dockerfiles/kotsadm-migrations/Dockerfile.ttlsh -t ${IMAGE} .
docker push ${IMAGE}
3 changes: 2 additions & 1 deletion pkg/docker/registry/registry.go
Original file line number Diff line number Diff line change
Expand Up @@ -238,7 +238,8 @@ func applicationPullSecretLabels() map[string]string {
var secretLabels map[string]string
if util.IsEmbeddedCluster() {
secretLabels = map[string]string{
kotsadmtypes.DisasterRecoveryLabel: kotsadmtypes.DisasterRecoveryLabelValueApp,
kotsadmtypes.DisasterRecoveryLabel: kotsadmtypes.DisasterRecoveryLabelValueInfra,
kotsadmtypes.DisasterRecoveryChartLabel: kotsadmtypes.DisasterRecoveryChartValue,
}
}

Expand Down
6 changes: 4 additions & 2 deletions pkg/docker/registry/registry_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -416,7 +416,8 @@ func TestPullSecretForRegistries(t *testing.T) {
"helm.sh/hook-weight": "-9999",
},
Labels: map[string]string{
"replicated.com/disaster-recovery": "app",
"replicated.com/disaster-recovery": "infra",
"replicated.com/disaster-recovery-chart": "admin-console",
},
},
Type: corev1.SecretTypeDockerConfigJson,
Expand Down Expand Up @@ -552,7 +553,8 @@ func TestGetDockerHubPullSecret(t *testing.T) {
"helm.sh/hook-weight": "-9999",
},
Labels: map[string]string{
"replicated.com/disaster-recovery": "app",
"replicated.com/disaster-recovery": "infra",
"replicated.com/disaster-recovery-chart": "admin-console",
},
},
Type: corev1.SecretTypeDockerConfigJson,
Expand Down
22 changes: 22 additions & 0 deletions pkg/k8sutil/clientset.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,9 @@ import (
"github.com/pkg/errors"
embeddedclusterv1beta1 "github.com/replicatedhq/embedded-cluster/kinds/apis/v1beta1"
flag "github.com/spf13/pflag"
velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/client-go/discovery"
Expand Down Expand Up @@ -167,3 +169,23 @@ func GetKubeClient(ctx context.Context) (kbclient.Client, error) {
}
return kcli, nil
}

func GetVeleroKubeClient(ctx context.Context) (kbclient.Client, error) {
k8slogger := zap.New(func(o *zap.Options) {
o.DestWriter = io.Discard
})
log.SetLogger(k8slogger)
cfg, err := GetClusterConfig()
if err != nil {
return nil, errors.Wrap(err, "failed to get cluster config")
}
scheme := runtime.NewScheme()
velerov1.AddToScheme(scheme)
kcli, err := kbclient.New(cfg, kbclient.Options{
Scheme: scheme,
})
if err != nil {
return nil, errors.Wrap(err, "failed to create kubebuilder client")
}
return kcli, nil
}
14 changes: 6 additions & 8 deletions pkg/kotsadmsnapshot/backup.go
Original file line number Diff line number Diff line change
Expand Up @@ -269,14 +269,14 @@ func CreateInstanceBackup(ctx context.Context, cluster *downstreamtypes.Downstre
}
}

logger.Infof("Creating instance backup CR %s", veleroBackup.Name)
logger.Infof("Creating instance backup CR %s", veleroBackup.GenerateName)
backup, err := veleroClient.Backups(metadata.backupStorageLocationNamespace).Create(ctx, veleroBackup, metav1.CreateOptions{})
if err != nil {
return "", errors.Wrap(err, "failed to create velero backup")
}

if appVeleroBackup != nil {
logger.Infof("Creating instance app backup CR %s", appVeleroBackup.Name)
logger.Infof("Creating instance app backup CR %s", appVeleroBackup.GenerateName)
_, err := veleroClient.Backups(metadata.backupStorageLocationNamespace).Create(ctx, appVeleroBackup, metav1.CreateOptions{})
if err != nil {
return "", errors.Wrap(err, "failed to create application velero backup")
Expand Down Expand Up @@ -490,7 +490,7 @@ func getInfrastructureInstanceBackupSpec(ctx context.Context, k8sClient kubernet
}
}

veleroBackup.Annotations, err = appendCommonAnnotations(k8sClient, veleroBackup.Annotations, metadata, hasAppBackup)
veleroBackup.Annotations, err = appendCommonAnnotations(k8sClient, veleroBackup.Annotations, metadata)
if err != nil {
return nil, errors.Wrap(err, "failed to add annotations to backup")
}
Expand Down Expand Up @@ -519,13 +519,11 @@ func getInfrastructureInstanceBackupSpec(ctx context.Context, k8sClient kubernet
return veleroBackup, nil
}

var EnableImprovedDR = false

// getAppInstanceBackup returns a backup spec only if this is Embedded Cluster and the vendor has
// defined both a backup and restore custom resource (improved DR).
func getAppInstanceBackupSpec(k8sClient kubernetes.Interface, metadata instanceBackupMetadata) (*velerov1.Backup, error) {
// TODO(improveddr): remove this once we have fully implemented the improved DR
if !EnableImprovedDR {
if os.Getenv("ENABLE_IMPROVED_DR") != "true" {
return nil, nil
}

Expand Down Expand Up @@ -562,7 +560,7 @@ func getAppInstanceBackupSpec(k8sClient kubernetes.Interface, metadata instanceB
}

var err error
appVeleroBackup.Annotations, err = appendCommonAnnotations(k8sClient, appVeleroBackup.Annotations, metadata, true)
appVeleroBackup.Annotations, err = appendCommonAnnotations(k8sClient, appVeleroBackup.Annotations, metadata)
if err != nil {
return nil, errors.Wrap(err, "failed to add annotations to application backup")
}
Expand Down Expand Up @@ -651,7 +649,7 @@ func mergeAppBackupSpec(backup *velerov1.Backup, appMeta appInstanceBackupMetada
}

// appendCommonAnnotations appends common annotations to the backup annotations
func appendCommonAnnotations(k8sClient kubernetes.Interface, annotations map[string]string, metadata instanceBackupMetadata, hasAppBackup bool) (map[string]string, error) {
func appendCommonAnnotations(k8sClient kubernetes.Interface, annotations map[string]string, metadata instanceBackupMetadata) (map[string]string, error) {
kotsadmImage, err := k8sutil.FindKotsadmImage(k8sClient, metadata.kotsadmNamespace)
if err != nil {
return nil, errors.Wrap(err, "failed to find kotsadm image")
Expand Down
18 changes: 6 additions & 12 deletions pkg/kotsadmsnapshot/backup_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -957,10 +957,9 @@ func Test_appendCommonAnnotations(t *testing.T) {
}

type args struct {
k8sClient kubernetes.Interface
annotations map[string]string
metadata instanceBackupMetadata
hasAppBackup bool
k8sClient kubernetes.Interface
annotations map[string]string
metadata instanceBackupMetadata
}
tests := []struct {
name string
Expand Down Expand Up @@ -1010,7 +1009,6 @@ func Test_appendCommonAnnotations(t *testing.T) {
snapshotTTL: 24 * time.Hour,
ec: nil,
},
hasAppBackup: false,
},
want: map[string]string{
"kots.io/apps-sequences": "{\"app-1\":1,\"app-2\":2}",
Expand Down Expand Up @@ -1074,7 +1072,6 @@ func Test_appendCommonAnnotations(t *testing.T) {
seaweedFSS3ServiceIP: "10.96.0.10",
},
},
hasAppBackup: true,
},
want: map[string]string{
"kots.io/apps-sequences": "{\"app-1\":1}",
Expand Down Expand Up @@ -1104,7 +1101,7 @@ func Test_appendCommonAnnotations(t *testing.T) {
if tt.setup != nil {
tt.setup(t)
}
got, err := appendCommonAnnotations(tt.args.k8sClient, tt.args.annotations, tt.args.metadata, tt.args.hasAppBackup)
got, err := appendCommonAnnotations(tt.args.k8sClient, tt.args.annotations, tt.args.metadata)
if tt.wantErr {
require.Error(t, err)
} else {
Expand Down Expand Up @@ -1512,11 +1509,6 @@ func Test_mergeAppBackupSpec(t *testing.T) {
}

func Test_getAppInstanceBackupSpec(t *testing.T) {
EnableImprovedDR = true
t.Cleanup(func() {
EnableImprovedDR = false
})

kotsadmSts := &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: "kotsadm",
Expand Down Expand Up @@ -1942,6 +1934,8 @@ func Test_getAppInstanceBackupSpec(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Setenv("ENABLE_IMPROVED_DR", "true")

ctrl := gomock.NewController(t)
defer ctrl.Finish()

Expand Down
15 changes: 2 additions & 13 deletions pkg/kotsadmsnapshot/download.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,6 @@ import (
velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
"github.com/vmware-tanzu/velero/pkg/cmd/util/downloadrequest"
pkgresults "github.com/vmware-tanzu/velero/pkg/util/results"
"k8s.io/apimachinery/pkg/runtime"
kbclient "sigs.k8s.io/controller-runtime/pkg/client"
)

func DownloadRestoreResults(ctx context.Context, veleroNamespace, restoreName string) ([]types.SnapshotError, []types.SnapshotError, error) {
Expand Down Expand Up @@ -83,18 +81,9 @@ func DownloadRestoreResults(ctx context.Context, veleroNamespace, restoreName st
}

func DownloadRequest(ctx context.Context, veleroNamespace string, kind velerov1.DownloadTargetKind, name string) (io.ReadCloser, error) {
clientConfig, err := k8sutil.GetClusterConfig()
kbClient, err := k8sutil.GetVeleroKubeClient(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to get cluster config")
}

scheme := runtime.NewScheme()
velerov1.AddToScheme(scheme)
kbClient, err := kbclient.New(clientConfig, kbclient.Options{
Scheme: scheme,
})
if err != nil {
return nil, errors.Wrap(err, "failed to get kubebuilder client")
return nil, errors.Wrap(err, "failed to get velero kube client")
}

pr, pw := io.Pipe()
Expand Down
15 changes: 2 additions & 13 deletions pkg/snapshot/velero.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,18 +13,15 @@ import (
kotsadmresources "github.com/replicatedhq/kots/pkg/kotsadm/resources"
kotsadmtypes "github.com/replicatedhq/kots/pkg/kotsadm/types"
"github.com/replicatedhq/kots/pkg/util"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
"github.com/vmware-tanzu/velero/pkg/cmd/cli/serverstatus"
veleroclientv1 "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1"
v1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
kuberneteserrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/client-go/kubernetes"
kbclient "sigs.k8s.io/controller-runtime/pkg/client"
)

var (
Expand Down Expand Up @@ -358,17 +355,9 @@ NodeAgentFound:
}

func getVersion(ctx context.Context, namespace string) (string, error) {
clientConfig, err := k8sutil.GetClusterConfig()
kbClient, err := k8sutil.GetVeleroKubeClient(ctx)
if err != nil {
return "", errors.Wrap(err, "failed to get cluster config")
}
scheme := runtime.NewScheme()
velerov1api.AddToScheme(scheme)
kbClient, err := kbclient.New(clientConfig, kbclient.Options{
Scheme: scheme,
})
if err != nil {
return "", errors.Wrap(err, "failed to get velero client")
return "", errors.Wrap(err, "failed to get velero kube client")
}

ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
Expand Down
Loading