diff --git a/pkg/controller/plan/BUILD.bazel b/pkg/controller/plan/BUILD.bazel index 0a233c18d..b0c7511ee 100644 --- a/pkg/controller/plan/BUILD.bazel +++ b/pkg/controller/plan/BUILD.bazel @@ -50,6 +50,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/runtime", "//vendor/k8s.io/apimachinery/pkg/types", "//vendor/k8s.io/apimachinery/pkg/util/validation", + "//vendor/k8s.io/apimachinery/pkg/util/wait", "//vendor/k8s.io/apiserver/pkg/storage/names", "//vendor/k8s.io/client-go/kubernetes/scheme", "//vendor/kubevirt.io/api/core/v1:core", diff --git a/pkg/controller/plan/kubevirt.go b/pkg/controller/plan/kubevirt.go index a2129d994..79f314040 100644 --- a/pkg/controller/plan/kubevirt.go +++ b/pkg/controller/plan/kubevirt.go @@ -21,6 +21,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" k8svalidation "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/wait" cnv "kubevirt.io/api/core/v1" libvirtxml "libvirt.org/libvirt-go-xml" @@ -1184,7 +1185,10 @@ func (r *KubeVirt) findTemplate(vm *plan.VMStatus) (tmpl *template.Template, err } func (r *KubeVirt) guestConversionPod(vm *plan.VMStatus, vmVolumes []cnv.Volume, configMap *core.ConfigMap, pvcs *[]core.PersistentVolumeClaim, v2vSecret *core.Secret) (pod *core.Pod, err error) { - volumes, volumeMounts, volumeDevices := r.podVolumeMounts(vmVolumes, configMap, pvcs) + volumes, volumeMounts, volumeDevices, err := r.podVolumeMounts(vmVolumes, configMap, pvcs) + if err != nil { + return + } // qemu group fsGroup := qemuGroup @@ -1290,7 +1294,7 @@ func (r *KubeVirt) guestConversionPod(vm *plan.VMStatus, vmVolumes []cnv.Volume, return } -func (r *KubeVirt) podVolumeMounts(vmVolumes []cnv.Volume, configMap *core.ConfigMap, pvcs *[]core.PersistentVolumeClaim) (volumes []core.Volume, mounts []core.VolumeMount, devices []core.VolumeDevice) { +func (r *KubeVirt) podVolumeMounts(vmVolumes []cnv.Volume, configMap *core.ConfigMap, pvcs *[]core.PersistentVolumeClaim) (volumes []core.Volume, mounts []core.VolumeMount, devices []core.VolumeDevice, err error) { pvcsByName := make(map[string]core.PersistentVolumeClaim) for _, pvc := range *pvcs { pvcsByName[pvc.Name] = pvc @@ -1336,23 +1340,22 @@ func (r *KubeVirt) podVolumeMounts(vmVolumes []cnv.Volume, configMap *core.Confi switch r.Source.Provider.Type() { case api.Ova: - server := r.Source.Provider.Spec.URL - splitted := strings.Split(server, ":") - - if len(splitted) != 2 { - r.Log.Info("The NFS server path format is wrong") + err = r.CreatePvForNfs() + if err != nil { + return + } + pvcName := getEntityName("pvc", r.Source.Provider.Name, r.Plan.Name) + err = r.CreatePvcForNfs(pvcName) + if err != nil { return } - nfsServer := splitted[0] - nfsPath := splitted[1] //path from disk volumes = append(volumes, core.Volume{ - Name: "nfs", + Name: "store-pv", VolumeSource: core.VolumeSource{ - NFS: &core.NFSVolumeSource{ - Server: nfsServer, - Path: nfsPath, + PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{ + ClaimName: pvcName, }, }, }) @@ -1366,7 +1369,7 @@ func (r *KubeVirt) podVolumeMounts(vmVolumes []cnv.Volume, configMap *core.Confi MountPath: "/opt", }, core.VolumeMount{ - Name: "nfs", + Name: "store-pv", MountPath: "/ova", }, ) @@ -1835,6 +1838,161 @@ func (r *KubeVirt) EnsurePersistentVolume(vmRef ref.Ref, persistentVolumes []cor return } +func GetOvaPvNfs(client client.Client, planName string, providerName string) (pv *core.PersistentVolume, found bool, err error) { + pv = &core.PersistentVolume{} + err = client.Get( + context.TODO(), + types.NamespacedName{ + Name: getEntityName("pv", providerName, planName), + }, + pv, + ) + + if err != nil { + if k8serr.IsNotFound(err) { + return nil, false, nil + } + err = liberr.Wrap(err) + return + } + return +} + +func GetOvaPvcNfs(client client.Client, planName string, planNamespace string, providerName string) (pvc *core.PersistentVolumeClaim, found bool, err error) { + pvc = &core.PersistentVolumeClaim{} + err = client.Get( + context.TODO(), + types.NamespacedName{ + Name: getEntityName("pvc", providerName, planName), + Namespace: planNamespace, + }, + pvc, + ) + + if err != nil { + if k8serr.IsNotFound(err) { + return nil, false, nil + } + err = liberr.Wrap(err) + return + } + return +} + +func (r *KubeVirt) CreatePvForNfs() (err error) { + sourceProvider := r.Source.Provider + splitted := strings.Split(sourceProvider.Spec.URL, ":") + nfsServer := splitted[0] + nfsPath := splitted[1] + + _, found, err := GetOvaPvNfs(r.Destination.Client, r.Plan.Name, r.Plan.Provider.Source.Name) + if err != nil { + r.Log.Error(err, "Failed to get ova PV") + return + } + pvName := getEntityName("pv", r.Source.Provider.Name, r.Plan.Name) + if found { + r.Log.Info("The PV for OVA NFS exists", "PV", pvName) + return + } + + labels := map[string]string{"provider": r.Plan.Provider.Source.Name, "app": "forklift", "migration": r.Migration.Name, "plan": r.Plan.Name} + pv := &core.PersistentVolume{ + ObjectMeta: meta.ObjectMeta{ + Name: pvName, + Labels: labels, + }, + Spec: core.PersistentVolumeSpec{ + Capacity: core.ResourceList{ + core.ResourceStorage: resource.MustParse("1Gi"), + }, + AccessModes: []core.PersistentVolumeAccessMode{ + core.ReadOnlyMany, + }, + PersistentVolumeSource: core.PersistentVolumeSource{ + NFS: &core.NFSVolumeSource{ + Path: nfsPath, + Server: nfsServer, + }, + }, + }, + } + err = r.Destination.Create(context.TODO(), pv) + if err != nil { + r.Log.Error(err, "Failed to create OVA plan PV") + return + } + return +} + +func (r *KubeVirt) CreatePvcForNfs(pvcName string) (err error) { + _, found, err := GetOvaPvcNfs(r.Destination.Client, r.Plan.Name, r.Plan.Spec.TargetNamespace, r.Plan.Provider.Source.Name) + if err != nil { + r.Log.Error(err, "Failed to get ova PVC") + return + } + if found { + r.Log.Info("The PVC for OVA NFS exists", "PVC", pvcName) + return + } + + sc := "" + pvName := getEntityName("pv", r.Source.Provider.Name, r.Plan.Name) + labels := map[string]string{"provider": r.Plan.Provider.Source.Name, "app": "forklift", "migration": r.Migration.Name, "plan": r.Plan.Name} + pvc := &core.PersistentVolumeClaim{ + ObjectMeta: meta.ObjectMeta{ + Name: pvcName, + Namespace: r.Plan.Spec.TargetNamespace, + Labels: labels, + }, + Spec: core.PersistentVolumeClaimSpec{ + Resources: core.ResourceRequirements{ + Requests: core.ResourceList{ + core.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + AccessModes: []core.PersistentVolumeAccessMode{ + core.ReadOnlyMany, + }, + VolumeName: pvName, + StorageClassName: &sc, + }, + } + err = r.Destination.Create(context.TODO(), pvc) + if err != nil { + r.Log.Error(err, "Failed to create OVA plan PVC") + return + } + + pvcNamespacedName := types.NamespacedName{ + Namespace: r.Plan.Spec.TargetNamespace, + Name: pvcName, + } + + timeout := 45 * time.Second + ctx, cancel := context.WithTimeout(context.TODO(), timeout) + defer cancel() + + // wait until pvc and pv are bound. + err = wait.PollImmediateUntil(5*time.Second, func() (bool, error) { + err := r.Get(context.TODO(), pvcNamespacedName, pvc) + if err != nil { + r.Log.Error(err, "Failed to get OVA plan PVC") + return false, err + } + return pvc.Status.Phase == "Bound", nil + }, ctx.Done()) + if err != nil { + r.Log.Error(err, "Failed to bind OVA PVC to PV ") + return + } + return nil +} + +func getEntityName(resourceType, providerName, planName string) string { + return fmt.Sprintf("ova-store-%s-%s-%s", resourceType, providerName, planName) +} + // Ensure the PV exist on the destination. func (r *KubeVirt) EnsurePersistentVolumeClaim(vmRef ref.Ref, persistentVolumeClaims []core.PersistentVolumeClaim) (err error) { list, err := r.getPVCs(vmRef) diff --git a/pkg/controller/plan/migration.go b/pkg/controller/plan/migration.go index f3f060b1e..7f7d46a12 100644 --- a/pkg/controller/plan/migration.go +++ b/pkg/controller/plan/migration.go @@ -12,6 +12,7 @@ import ( "strings" "time" + "github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1" "github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1/plan" "github.com/konveyor/forklift-controller/pkg/controller/plan/adapter" plancontext "github.com/konveyor/forklift-controller/pkg/controller/plan/context" @@ -336,6 +337,14 @@ func (r *Migration) Archive() { return } + if r.Plan.Provider.Source.Type() == v1beta1.Ova { + err = r.deletePvcPvForOva() + if err != nil { + r.Log.Error(err, "Failed to clean up the PVC and PV for the OVA plan") + return + } + } + for _, vm := range r.Plan.Status.Migration.VMs { err = r.CleanUp(vm) if err != nil { @@ -486,6 +495,41 @@ func (r *Migration) deleteImporterPods(vm *plan.VMStatus) (err error) { return } +func (r *Migration) deletePvcPvForOva() (err error) { + pvc, _, err := GetOvaPvcNfs(r.Destination.Client, r.Plan.Name, r.Plan.Spec.TargetNamespace, r.Plan.Provider.Source.Name) + if err != nil { + r.Log.Error(err, "Failed to get the plan PVC") + return + } + // The PVC was already deleted + if pvc == nil { + return + } + + err = r.Destination.Client.Delete(context.TODO(), pvc) + if err != nil { + r.Log.Error(err, "Failed to delete the plan PVC") + return + } + + pv, _, err := GetOvaPvNfs(r.Destination.Client, r.Plan.Name, r.Plan.Provider.Source.Name) + if err != nil { + r.Log.Error(err, "Failed to get the plan PV") + return + } + // The PV was already deleted + if pv == nil { + return + } + + err = r.Destination.Client.Delete(context.TODO(), pv) + if err != nil { + r.Log.Error(err, "Failed to delete the plan PV") + return + } + return +} + // Best effort attempt to resolve canceled refs. func (r *Migration) resolveCanceledRefs() { for i := range r.Context.Migration.Spec.Cancel { diff --git a/pkg/controller/provider/BUILD.bazel b/pkg/controller/provider/BUILD.bazel index 348974097..b5065cb1d 100644 --- a/pkg/controller/provider/BUILD.bazel +++ b/pkg/controller/provider/BUILD.bazel @@ -4,6 +4,7 @@ go_library( name = "provider", srcs = [ "controller.go", + "ova-setup.go", "predicate.go", "validation.go", ], @@ -30,6 +31,7 @@ go_library( "//vendor/k8s.io/api/apps/v1:apps", "//vendor/k8s.io/api/core/v1:core", "//vendor/k8s.io/apimachinery/pkg/api/errors", + "//vendor/k8s.io/apimachinery/pkg/api/resource", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:meta", "//vendor/k8s.io/apimachinery/pkg/util/intstr", "//vendor/k8s.io/apiserver/pkg/storage/names", diff --git a/pkg/controller/provider/container/ova/client.go b/pkg/controller/provider/container/ova/client.go index bb136a4e9..6656e7953 100644 --- a/pkg/controller/provider/container/ova/client.go +++ b/pkg/controller/provider/container/ova/client.go @@ -48,7 +48,7 @@ func (r *Client) Connect(provider *api.Provider) (err error) { }, } - serverURL := fmt.Sprintf("http://ova-service-%s:8080", provider.Name) + serverURL := fmt.Sprintf("http://ova-service-%s.%s.svc.cluster.local:8080", provider.Name, provider.Namespace) if serverURL == "" { return } diff --git a/pkg/controller/provider/controller.go b/pkg/controller/provider/controller.go index 8f1932a12..f831414c8 100644 --- a/pkg/controller/provider/controller.go +++ b/pkg/controller/provider/controller.go @@ -21,7 +21,6 @@ import ( "fmt" "os" "path/filepath" - "strings" "sync" api "github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1" @@ -42,8 +41,6 @@ import ( appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" k8serr "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apiserver/pkg/storage/names" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -64,13 +61,6 @@ var log = logging.WithName(Name) // Application settings. var Settings = &settings.Settings -const ( - ovaServerPrefix = "ova-server" - ovaImageVar = "OVA_PROVIDER_SERVER_IMAGE" - nfsVolumeNamePrefix = "nfs-volume" - mountPath = "/ova" -) - // Creates a new Inventory Controller and adds it to the Manager. func Add(mgr manager.Manager) error { libfb.WorkingDir = Settings.WorkingDir @@ -199,7 +189,7 @@ func (r Reconciler) Reconcile(ctx context.Context, request reconcile.Request) (r if provider.Type() == api.Ova { - deploymentName := fmt.Sprintf("%s-deployment-%s", ovaServerPrefix, provider.Name) + deploymentName := fmt.Sprintf("%s-deployment-%s", ovaServer, provider.Name) deployment := &appsv1.Deployment{} err = r.Get(context.TODO(), client.ObjectKey{ @@ -209,7 +199,7 @@ func (r Reconciler) Reconcile(ctx context.Context, request reconcile.Request) (r // If the deployment does not exist if k8serr.IsNotFound(err) { - r.createOVAServerDeployment(provider, ctx) + r.CreateOVAServerDeployment(provider, ctx) } else if err != nil { return } @@ -366,141 +356,6 @@ func (r *Reconciler) getSecret(provider *api.Provider) (*v1.Secret, error) { return secret, nil } -func (r *Reconciler) createOVAServerDeployment(provider *api.Provider, ctx context.Context) { - - deploymentName := fmt.Sprintf("%s-deployment-%s", ovaServerPrefix, provider.Name) - annotations := make(map[string]string) - labels := map[string]string{"providerName": provider.Name, "app": "forklift"} - url := provider.Spec.URL - var replicas int32 = 1 - - ownerReference := metav1.OwnerReference{ - APIVersion: "forklift.konveyor.io/v1beta1", - Kind: "Provider", - Name: provider.Name, - UID: provider.UID, - } - - //OVA server deployment - deployment := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: deploymentName, - Namespace: provider.Namespace, - Annotations: annotations, - Labels: labels, - OwnerReferences: []metav1.OwnerReference{ownerReference}, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: &replicas, - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "app": "forklift", - }, - }, - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "providerName": provider.Name, - "app": "forklift", - }, - }, - Spec: r.makeOvaProviderPodSpec(url, string(provider.Name)), - }, - }, - } - - err := r.Create(ctx, deployment) - if err != nil { - r.Log.Error(err, "Failed to create OVA server deployment") - return - } - - // OVA Server Service - serviceName := fmt.Sprintf("ova-service-%s", provider.Name) - service := &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: serviceName, - Namespace: provider.Namespace, - Labels: labels, - OwnerReferences: []metav1.OwnerReference{ownerReference}, - }, - Spec: v1.ServiceSpec{ - Selector: map[string]string{ - "providerName": provider.Name, - "app": "forklift", - }, - Ports: []v1.ServicePort{ - { - Name: "api-http", - Protocol: v1.ProtocolTCP, - Port: 8080, - TargetPort: intstr.FromInt(8080), - }, - }, - Type: v1.ServiceTypeClusterIP, - }, - } - - err = r.Create(ctx, service) - if err != nil { - r.Log.Error(err, "Failed to create OVA server service") - return - } -} - -func (r *Reconciler) makeOvaProviderPodSpec(url string, providerName string) v1.PodSpec { - splitted := strings.Split(url, ":") - nonRoot := false - - if len(splitted) != 2 { - r.Log.Error(nil, "NFS server path doesn't contains :") - } - nfsServer := splitted[0] - nfsPath := splitted[1] - - imageName, ok := os.LookupEnv(ovaImageVar) - if !ok { - r.Log.Error(nil, "Failed to find OVA server image") - } - - nfsVolumeName := fmt.Sprintf("%s-%s", nfsVolumeNamePrefix, providerName) - - ovaContainerName := fmt.Sprintf("%s-pod-%s", ovaServerPrefix, providerName) - - return v1.PodSpec{ - - Containers: []v1.Container{ - { - Name: ovaContainerName, - Ports: []v1.ContainerPort{{ContainerPort: 8080, Protocol: v1.ProtocolTCP}}, - SecurityContext: &v1.SecurityContext{ - RunAsNonRoot: &nonRoot, - }, - Image: imageName, - VolumeMounts: []v1.VolumeMount{ - { - Name: nfsVolumeName, - MountPath: "/ova", - }, - }, - }, - }, - ServiceAccountName: "forklift-controller", - Volumes: []v1.Volume{ - { - Name: nfsVolumeName, - VolumeSource: v1.VolumeSource{ - NFS: &v1.NFSVolumeSource{ - Server: nfsServer, - Path: nfsPath, - ReadOnly: false, - }, - }, - }, - }, - } -} - // Provider catalog. type Catalog struct { mutex sync.Mutex diff --git a/pkg/controller/provider/ova-setup.go b/pkg/controller/provider/ova-setup.go new file mode 100644 index 000000000..776b8cdf8 --- /dev/null +++ b/pkg/controller/provider/ova-setup.go @@ -0,0 +1,222 @@ +package provider + +import ( + "context" + "fmt" + "os" + "strings" + + api "github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1" + appsv1 "k8s.io/api/apps/v1" + core "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +const ( + ovaServer = "ova-server" + ovaImageVar = "OVA_PROVIDER_SERVER_IMAGE" + nfsVolumeNamePrefix = "nfs-volume" + mountPath = "/ova" + pvSize = "1Gi" +) + +func (r Reconciler) CreateOVAServerDeployment(provider *api.Provider, ctx context.Context) { + ownerReference := metav1.OwnerReference{ + APIVersion: "forklift.konveyor.io/v1beta1", + Kind: "Provider", + Name: provider.Name, + UID: provider.UID, + } + pvName := fmt.Sprintf("%s-pv-%s-%s", ovaServer, provider.Name, provider.Namespace) + err := r.createPvForNfs(provider, ctx, ownerReference, pvName) + if err != nil { + r.Log.Error(err, "Failed to create PV for the OVA server") + return + } + + pvcName := fmt.Sprintf("%s-pvc-%s", ovaServer, provider.Name) + err = r.createPvcForNfs(provider, ctx, ownerReference, pvName, pvcName) + if err != nil { + r.Log.Error(err, "Failed to create PVC for the OVA server") + return + } + + labels := map[string]string{"provider": provider.Name, "app": "forklift", "subapp": ovaServer} + err = r.createServerDeployment(provider, ctx, ownerReference, pvcName, labels) + if err != nil { + r.Log.Error(err, "Failed to create OVA server deployment") + return + } + + err = r.createServerService(provider, ctx, ownerReference, labels) + if err != nil { + r.Log.Error(err, "Failed to create OVA server service") + return + } +} + +func (r *Reconciler) createPvForNfs(provider *api.Provider, ctx context.Context, ownerReference metav1.OwnerReference, pvName string) (err error) { + splitted := strings.Split(provider.Spec.URL, ":") + nfsServer := splitted[0] + nfsPath := splitted[1] + labels := map[string]string{"provider": provider.Name, "app": "forklift", "subapp": ovaServer} + + pv := &core.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: pvName, + OwnerReferences: []metav1.OwnerReference{ownerReference}, + Labels: labels, + }, + Spec: core.PersistentVolumeSpec{ + Capacity: core.ResourceList{ + core.ResourceStorage: resource.MustParse(pvSize), + }, + AccessModes: []core.PersistentVolumeAccessMode{ + core.ReadOnlyMany, + }, + PersistentVolumeSource: core.PersistentVolumeSource{ + NFS: &core.NFSVolumeSource{ + Path: nfsPath, + Server: nfsServer, + }, + }, + }, + } + err = r.Create(ctx, pv) + if err != nil { + return + } + return +} + +func (r *Reconciler) createPvcForNfs(provider *api.Provider, ctx context.Context, ownerReference metav1.OwnerReference, pvName, pvcName string) (err error) { + sc := "" + labels := map[string]string{"provider": provider.Name, "app": "forklift", "subapp": ovaServer} + pvc := &core.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: pvcName, + Namespace: provider.Namespace, + OwnerReferences: []metav1.OwnerReference{ownerReference}, + Labels: labels, + }, + Spec: core.PersistentVolumeClaimSpec{ + Resources: core.ResourceRequirements{ + Requests: core.ResourceList{ + core.ResourceStorage: resource.MustParse(pvSize), + }, + }, + AccessModes: []core.PersistentVolumeAccessMode{ + core.ReadOnlyMany, + }, + VolumeName: pvName, + StorageClassName: &sc, + }, + } + err = r.Create(ctx, pvc) + if err != nil { + return + } + return +} + +func (r *Reconciler) createServerDeployment(provider *api.Provider, ctx context.Context, ownerReference metav1.OwnerReference, pvcName string, labels map[string]string) (err error) { + deploymentName := fmt.Sprintf("%s-deployment-%s", ovaServer, provider.Name) + annotations := make(map[string]string) + var replicas int32 = 1 + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: deploymentName, + Namespace: provider.Namespace, + Annotations: annotations, + Labels: labels, + OwnerReferences: []metav1.OwnerReference{ownerReference}, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + Template: core.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + Spec: r.makeOvaProviderPodSpec(pvcName, provider.Name), + }, + }, + } + + err = r.Create(ctx, deployment) + if err != nil { + return + } + return +} + +func (r *Reconciler) createServerService(provider *api.Provider, ctx context.Context, ownerReference metav1.OwnerReference, labels map[string]string) (err error) { + serviceName := fmt.Sprintf("ova-service-%s", provider.Name) + service := &core.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName, + Namespace: provider.Namespace, + Labels: labels, + OwnerReferences: []metav1.OwnerReference{ownerReference}, + }, + Spec: core.ServiceSpec{ + Selector: labels, + Ports: []core.ServicePort{ + { + Name: "api-http", + Protocol: core.ProtocolTCP, + Port: 8080, + TargetPort: intstr.FromInt(8080), + }, + }, + Type: core.ServiceTypeClusterIP, + }, + } + + err = r.Create(ctx, service) + if err != nil { + return + } + return +} + +func (r *Reconciler) makeOvaProviderPodSpec(pvcName string, providerName string) core.PodSpec { + imageName, ok := os.LookupEnv(ovaImageVar) + if !ok { + r.Log.Error(nil, "Failed to find OVA server image") + } + + nfsVolumeName := fmt.Sprintf("%s-%s", nfsVolumeNamePrefix, providerName) + ovaContainerName := fmt.Sprintf("%s-pod-%s", ovaServer, providerName) + + return core.PodSpec{ + Containers: []core.Container{ + { + Name: ovaContainerName, + Ports: []core.ContainerPort{{ContainerPort: 8080, Protocol: core.ProtocolTCP}}, + Image: imageName, + VolumeMounts: []core.VolumeMount{ + { + Name: nfsVolumeName, + MountPath: mountPath, + }, + }, + }, + }, + Volumes: []core.Volume{ + { + Name: nfsVolumeName, + VolumeSource: core.VolumeSource{ + PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{ + ClaimName: pvcName, + }, + }, + }, + }, + } +}