Skip to content

Commit

Permalink
OVA: changing nfs mount to server and conversion pod to use shared PVC
Browse files Browse the repository at this point in the history
Signed-off-by: Bella Khizgiyaev <[email protected]>
  • Loading branch information
bkhizgiy committed Oct 3, 2023
1 parent 658d34b commit 891b695
Show file tree
Hide file tree
Showing 5 changed files with 316 additions and 160 deletions.
120 changes: 107 additions & 13 deletions pkg/controller/plan/kubevirt.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ import (
libcnd "github.com/konveyor/forklift-controller/pkg/lib/condition"
liberr "github.com/konveyor/forklift-controller/pkg/lib/error"
core "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
k8serr "k8s.io/apimachinery/pkg/api/errors"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
Expand Down Expand Up @@ -1336,23 +1337,19 @@ func (r *KubeVirt) podVolumeMounts(vmVolumes []cnv.Volume, configMap *core.Confi

switch r.Source.Provider.Type() {
case api.Ova:
server := r.Source.Provider.Spec.URL
splitted := strings.Split(server, ":")

if len(splitted) != 2 {
r.Log.Info("The NFS server path format is wrong")
return
pvcName := fmt.Sprintf("ova-server-pvc-%s-%s", r.Source.Provider.Name, r.Plan.Name)
// If the provider runs in a different namespace then the destination VM,
// we need to create the PV and PVC to access the NFS.
if r.Plan.Spec.TargetNamespace != r.Source.Provider.Namespace {
r.CreatePvcForNfs(pvcName)
}
nfsServer := splitted[0]
nfsPath := splitted[1]

//path from disk
volumes = append(volumes, core.Volume{
Name: "nfs",
Name: "nfs-pvc",
VolumeSource: core.VolumeSource{
NFS: &core.NFSVolumeSource{
Server: nfsServer,
Path: nfsPath,
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: pvcName,
},
},
})
Expand All @@ -1366,7 +1363,7 @@ func (r *KubeVirt) podVolumeMounts(vmVolumes []cnv.Volume, configMap *core.Confi
MountPath: "/opt",
},
core.VolumeMount{
Name: "nfs",
Name: "nfs-pvc",
MountPath: "/ova",
},
)
Expand Down Expand Up @@ -1835,6 +1832,103 @@ func (r *KubeVirt) EnsurePersistentVolume(vmRef ref.Ref, persistentVolumes []cor
return
}

func (r *KubeVirt) CreatePvcForNfs(pvcName string) (err error) {
//TODO: set ownerReferenceso the PV and PVC deleted once the plan is done.
// ownerReference := meta.OwnerReference{
// APIVersion: "forklift.konveyor.io/v1beta1",
// Kind: "Plan",
// Name: r.Plan.Name,
// UID: r.Plan.UID,
// }

sourceProvider := r.Source.Provider
pvName := fmt.Sprintf("ova-server-pv-%s", r.Plan.Name)
splitted := strings.Split(sourceProvider.Spec.URL, ":")

if len(splitted) != 2 {
r.Log.Error(nil, "NFS server path doesn't contains :")
}
nfsServer := splitted[0]
nfsPath := splitted[1]

pv := &v1.PersistentVolume{
ObjectMeta: meta.ObjectMeta{
Name: pvName,
},
Spec: v1.PersistentVolumeSpec{
Capacity: v1.ResourceList{
v1.ResourceStorage: resource.MustParse("1Gi"),
},
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadOnlyMany,
},
PersistentVolumeSource: v1.PersistentVolumeSource{
NFS: &v1.NFSVolumeSource{
Path: nfsPath,
Server: nfsServer,
},
},
},
}
err = r.Create(context.TODO(), pv)
if err != nil {
r.Log.Error(err, "Failed to create OVA plan PV")
return
}

sc := ""
pvc := &v1.PersistentVolumeClaim{
ObjectMeta: meta.ObjectMeta{
Name: pvcName,
Namespace: r.Plan.Spec.TargetNamespace,
},
Spec: v1.PersistentVolumeClaimSpec{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: resource.MustParse("1Gi"),
},
},
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadOnlyMany,
},
VolumeName: pvName,
StorageClassName: &sc,
},
}
err = r.Create(context.TODO(), pvc)
if err != nil {
r.Log.Error(err, "Failed to create OVA plan PVC")
return
}

// wait until pvc and pv are bounded.
timeout := time.After(5 * time.Minute)
tick := time.Tick(5 * time.Second)
pvcNamespacedName := types.NamespacedName{
Namespace: r.Plan.Spec.TargetNamespace,
Name: pvcName,
}

for {
select {
case <-timeout:
r.Log.Error(err, "Timed out waiting for PVC to be bound")
return
case <-tick:
err = r.Get(context.TODO(), pvcNamespacedName, pvc)
fmt.Print("this is pvc: ", pvc)
if err != nil {
r.Log.Error(err, "Failed to bound OVA plan PVC")
return
}

if pvc.Status.Phase == "Bound" {
return
}
}
}
}

// Ensure the PV exist on the destination.
func (r *KubeVirt) EnsurePersistentVolumeClaim(vmRef ref.Ref, persistentVolumeClaims []core.PersistentVolumeClaim) (err error) {
list, err := r.getPVCs(vmRef)
Expand Down
2 changes: 2 additions & 0 deletions pkg/controller/provider/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ go_library(
name = "provider",
srcs = [
"controller.go",
"ova-setup.go",
"predicate.go",
"validation.go",
],
Expand All @@ -30,6 +31,7 @@ go_library(
"//vendor/k8s.io/api/apps/v1:apps",
"//vendor/k8s.io/api/core/v1:core",
"//vendor/k8s.io/apimachinery/pkg/api/errors",
"//vendor/k8s.io/apimachinery/pkg/api/resource",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:meta",
"//vendor/k8s.io/apimachinery/pkg/util/intstr",
"//vendor/k8s.io/apiserver/pkg/storage/names",
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/provider/container/ova/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ func (r *Client) Connect(provider *api.Provider) (err error) {
},
}

serverURL := fmt.Sprintf("http://ova-service-%s:8080", provider.Name)
serverURL := fmt.Sprintf("http://ova-service-%s.%s.svc.cluster.local:8080", provider.Name, provider.Namespace)
if serverURL == "" {
return
}
Expand Down
147 changes: 1 addition & 146 deletions pkg/controller/provider/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ import (
"fmt"
"os"
"path/filepath"
"strings"
"sync"

api "github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1"
Expand All @@ -42,8 +41,6 @@ import (
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
k8serr "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apiserver/pkg/storage/names"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
Expand All @@ -64,13 +61,6 @@ var log = logging.WithName(Name)
// Application settings.
var Settings = &settings.Settings

const (
ovaServerPrefix = "ova-server"
ovaImageVar = "OVA_PROVIDER_SERVER_IMAGE"
nfsVolumeNamePrefix = "nfs-volume"
mountPath = "/ova"
)

// Creates a new Inventory Controller and adds it to the Manager.
func Add(mgr manager.Manager) error {
libfb.WorkingDir = Settings.WorkingDir
Expand Down Expand Up @@ -207,7 +197,7 @@ func (r Reconciler) Reconcile(ctx context.Context, request reconcile.Request) (r

// If the deployment does not exist
if k8serr.IsNotFound(err) {
r.createOVAServerDeployment(provider, ctx)
r.CreateOVAServerDeployment(provider, ctx)
} else if err != nil {
return
}
Expand Down Expand Up @@ -364,141 +354,6 @@ func (r *Reconciler) getSecret(provider *api.Provider) (*v1.Secret, error) {
return secret, nil
}

func (r *Reconciler) createOVAServerDeployment(provider *api.Provider, ctx context.Context) {

deploymentName := fmt.Sprintf("%s-deployment-%s", ovaServerPrefix, provider.Name)
annotations := make(map[string]string)
labels := map[string]string{"providerName": provider.Name, "app": "forklift"}
url := provider.Spec.URL
var replicas int32 = 1

ownerReference := metav1.OwnerReference{
APIVersion: "forklift.konveyor.io/v1beta1",
Kind: "Provider",
Name: provider.Name,
UID: provider.UID,
}

//OVA server deployment
deployment := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: deploymentName,
Namespace: provider.Namespace,
Annotations: annotations,
Labels: labels,
OwnerReferences: []metav1.OwnerReference{ownerReference},
},
Spec: appsv1.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": "forklift",
},
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"providerName": provider.Name,
"app": "forklift",
},
},
Spec: r.makeOvaProviderPodSpec(url, string(provider.Name)),
},
},
}

err := r.Create(ctx, deployment)
if err != nil {
r.Log.Error(err, "Failed to create OVA server deployment")
return
}

// OVA Server Service
serviceName := fmt.Sprintf("ova-service-%s", provider.Name)
service := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: serviceName,
Namespace: provider.Namespace,
Labels: labels,
OwnerReferences: []metav1.OwnerReference{ownerReference},
},
Spec: v1.ServiceSpec{
Selector: map[string]string{
"providerName": provider.Name,
"app": "forklift",
},
Ports: []v1.ServicePort{
{
Name: "api-http",
Protocol: v1.ProtocolTCP,
Port: 8080,
TargetPort: intstr.FromInt(8080),
},
},
Type: v1.ServiceTypeClusterIP,
},
}

err = r.Create(ctx, service)
if err != nil {
r.Log.Error(err, "Failed to create OVA server service")
return
}
}

func (r *Reconciler) makeOvaProviderPodSpec(url string, providerName string) v1.PodSpec {
splitted := strings.Split(url, ":")
nonRoot := false

if len(splitted) != 2 {
r.Log.Error(nil, "NFS server path doesn't contains :")
}
nfsServer := splitted[0]
nfsPath := splitted[1]

imageName, ok := os.LookupEnv(ovaImageVar)
if !ok {
r.Log.Error(nil, "Failed to find OVA server image")
}

nfsVolumeName := fmt.Sprintf("%s-%s", nfsVolumeNamePrefix, providerName)

ovaContainerName := fmt.Sprintf("%s-pod-%s", ovaServerPrefix, providerName)

return v1.PodSpec{

Containers: []v1.Container{
{
Name: ovaContainerName,
Ports: []v1.ContainerPort{{ContainerPort: 8080, Protocol: v1.ProtocolTCP}},
SecurityContext: &v1.SecurityContext{
RunAsNonRoot: &nonRoot,
},
Image: imageName,
VolumeMounts: []v1.VolumeMount{
{
Name: nfsVolumeName,
MountPath: "/ova",
},
},
},
},
ServiceAccountName: "forklift-controller",
Volumes: []v1.Volume{
{
Name: nfsVolumeName,
VolumeSource: v1.VolumeSource{
NFS: &v1.NFSVolumeSource{
Server: nfsServer,
Path: nfsPath,
ReadOnly: false,
},
},
},
},
}
}

// Provider catalog.
type Catalog struct {
mutex sync.Mutex
Expand Down
Loading

0 comments on commit 891b695

Please sign in to comment.