Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

WIP: Primary udn integration #59

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions pkg/config/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
type RelevantConfig struct {
Name string `json:"name"`
AllowPersistentIPs bool `json:"allowPersistentIPs,omitempty"`
Role string `json:"role,omitempty"`
}

func NewConfig(nadSpec string) (*RelevantConfig, error) {
Expand Down
82 changes: 74 additions & 8 deletions pkg/ipamclaimswebhook/podmutator.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,14 +65,40 @@ func (a *IPAMClaimsValet) Handle(ctx context.Context, request admission.Request)
return admission.Errored(http.StatusBadRequest, err)
}

log.Info("webhook handling event")
vmName, hasVMAnnotation := pod.Annotations["kubevirt.io/domain"]
var primaryUDNNetwork *config.RelevantConfig
if hasVMAnnotation {
log.Info("webhook handling event - checking primary UDN flow for", "VM", vmName, "namespace", pod.Namespace)
var err error
primaryUDNNetwork, err = a.vmiPrimaryUDN(ctx, pod.Namespace)
if err != nil {
// TODO: figure out what to do. Probably fail
return admission.Errored(http.StatusInternalServerError, err)
}

if primaryUDNNetwork != nil && primaryUDNNetwork.AllowPersistentIPs {
log.Info(
"found primary UDN for",
"vmName",
vmName,
"namespace",
pod.Namespace,
"primary UDN name",
primaryUDNNetwork.Name,
)
annotatePodWithUDN(pod, vmName, primaryUDNNetwork.Name)
}
}

log.Info("webhook handling event - checking secondary networks flow for", "pod", pod.Name, "namespace", pod.Namespace)
networkSelectionElements, err := netutils.ParsePodNetworkAnnotation(pod)
if err != nil {
var goodTypeOfError *v1.NoK8sNetworkError
if errors.As(err, &goodTypeOfError) {
if errors.As(err, &goodTypeOfError) && primaryUDNNetwork == nil {
return admission.Allowed("no secondary networks requested")
} else if primaryUDNNetwork == nil {
return admission.Errored(http.StatusBadRequest, fmt.Errorf("failed to parse pod network selection elements"))
}
return admission.Errored(http.StatusBadRequest, fmt.Errorf("failed to parse pod network selection elements"))
}

var (
Expand Down Expand Up @@ -112,7 +138,6 @@ func (a *IPAMClaimsValet) Handle(ctx context.Context, request admission.Request)
"NAD", nadName,
"network", pluginConfig.Name,
)
vmName, hasVMAnnotation := pod.Annotations["kubevirt.io/domain"]
if !hasVMAnnotation {
log.Info(
"does not have the kubevirt VM annotation",
Expand Down Expand Up @@ -154,13 +179,12 @@ func (a *IPAMClaimsValet) Handle(ctx context.Context, request admission.Request)
podNetworkSelectionElements = append(podNetworkSelectionElements, *networkSelectionElement)
}

if len(podNetworkSelectionElements) > 0 {
if len(podNetworkSelectionElements) > 0 || (primaryUDNNetwork != nil && primaryUDNNetwork.AllowPersistentIPs && vmName != "") {
newPod, err := podWithUpdatedSelectionElements(pod, podNetworkSelectionElements)
if err != nil {
return admission.Errored(http.StatusInternalServerError, err)
}

if reflect.DeepEqual(newPod, pod) || !hasChangedNetworkSelectionElements {
if primaryUDNNetwork == nil && (reflect.DeepEqual(newPod, pod) || !hasChangedNetworkSelectionElements) {
return admission.Allowed("mutation not needed")
}

Expand Down Expand Up @@ -202,6 +226,48 @@ func podWithUpdatedSelectionElements(pod *corev1.Pod, networks []v1.NetworkSelec
if err != nil {
return nil, err
}
newPod.Annotations[v1.NetworkAttachmentAnnot] = string(newNets)
if string(newNets) != "[]" {
newPod.Annotations[v1.NetworkAttachmentAnnot] = string(newNets)
}
return newPod, nil
}

func annotatePodWithUDN(pod *corev1.Pod, vmName string, primaryUDNName string) {
const ovnUDNIPAMClaimName = "k8s.ovn.org/ovn-udn-ipamclaim-reference"
udnAnnotations := pod.Annotations
udnAnnotations[ovnUDNIPAMClaimName] = fmt.Sprintf("%s.%s-primary-udn", vmName, primaryUDNName)
pod.SetAnnotations(udnAnnotations)
}

func (a *IPAMClaimsValet) vmiPrimaryUDN(ctx context.Context, namespace string) (*config.RelevantConfig, error) {
const (
NetworkRolePrimary = "primary"
NetworkRoleSecondary = "secondary"
)

log := logf.FromContext(ctx)
var namespaceNads v1.NetworkAttachmentDefinitionList
if err := a.List(ctx, &namespaceNads, &client.ListOptions{}); err != nil {
return nil, fmt.Errorf("failed to list the NADs on namespace %q: %v", namespace, err)
}

for _, nad := range namespaceNads.Items {
networkConfig, err := config.NewConfig(nad.Spec.Config)
if err != nil {
log.Error(
err,
"failed extracting the relevant NAD configuration",
"NAD name",
nad.Name,
"NAD namespace",
nad.Namespace,
)
return nil, fmt.Errorf("failed to extract the relevant NAD information")
}

if networkConfig.Role == NetworkRolePrimary {
return networkConfig, nil
}
}
return nil, nil
}
146 changes: 111 additions & 35 deletions pkg/vminetworkscontroller/vmi_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -83,44 +83,31 @@ func (r *VirtualMachineInstanceReconciler) Reconcile(
}

ownerInfo := ownerReferenceFor(vmi, vm)
for logicalNetworkName, netConfigName := range vmiNetworks {
claimKey := fmt.Sprintf("%s.%s", vmi.Name, logicalNetworkName)
ipamClaim := &ipamclaimsapi.IPAMClaim{
ObjectMeta: controllerruntime.ObjectMeta{
Name: claimKey,
Namespace: vmi.Namespace,
OwnerReferences: []metav1.OwnerReference{ownerInfo},
Finalizers: []string{claims.KubevirtVMFinalizer},
Labels: claims.OwnedByVMLabel(vmi.Name),
},
Spec: ipamclaimsapi.IPAMClaimSpec{
Network: netConfigName,
},
}

if err := r.Client.Create(ctx, ipamClaim, &client.CreateOptions{}); err != nil {
if apierrors.IsAlreadyExists(err) {
claimKey := apitypes.NamespacedName{
Namespace: vmi.Namespace,
Name: claimKey,
}
// UDN code block
primaryUDN, err := r.vmiPrimaryUDN(ctx, vmi)
if err != nil {
return controllerruntime.Result{}, err
}
if primaryUDN != nil {
claimKey := fmt.Sprintf("%s.%s-primary-udn", vmi.Name, primaryUDN.Name)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We should normalize the key to follow k8s naming conventions, I have see errors like the following when the network name at the NAD has unexpected chars.

 Retry add failed for *v1.Pod kv-live-migration-8805/virt-launcher-worker1-987pq, will try again later: failed to update pod kv-live-migration-8805/virt-launcher-worker1-987pq: error retrieving IPAMClaim for pod kv-live-migration-8805/virt-launcher-worker1-987pq: failed to get IPAMClaim "worker1.jv45k_net1-primary-udn": ipamclaim.k8s.cni.cncf.io "worker1.jv45k_net1-primary-udn" not found

udnIPAMClaim := newIPAMClaim(claimKey, vmi, ownerInfo, primaryUDN.Name)
if err := r.ensureIPAMClaim(ctx, udnIPAMClaim, vmi, ownerInfo); err != nil {
return controllerruntime.Result{}, fmt.Errorf(
"failed ensuring IPAM claim for primary UDN network %q: %w",
primaryUDN.Name,
err,
)
}
}
// UDN code block END

existingIPAMClaim := &ipamclaimsapi.IPAMClaim{}
if err := r.Client.Get(ctx, claimKey, existingIPAMClaim); err != nil {
return controllerruntime.Result{}, fmt.Errorf("let us be on the safe side and retry later")
}
for logicalNetworkName, netConfigName := range vmiNetworks {
claimKey := fmt.Sprintf("%s.%s", vmi.Name, logicalNetworkName)
ipamClaim := newIPAMClaim(claimKey, vmi, ownerInfo, netConfigName)

if len(existingIPAMClaim.OwnerReferences) == 1 && existingIPAMClaim.OwnerReferences[0].UID == ownerInfo.UID {
r.Log.Info("found existing IPAMClaim belonging to this VM/VMI, nothing to do", "UID", ownerInfo.UID)
continue
} else {
err := fmt.Errorf("failed since it found an existing IPAMClaim for %q", claimKey.Name)
r.Log.Error(err, "leaked IPAMClaim found", "existing owner", existingIPAMClaim.UID)
return controllerruntime.Result{}, err
}
}
r.Log.Error(err, "failed to create the IPAMClaim")
return controllerruntime.Result{}, err
if err := r.ensureIPAMClaim(ctx, ipamClaim, vmi, ownerInfo); err != nil {
return controllerruntime.Result{}, fmt.Errorf("failed ensuring IPAM claim: %w", err)
}
}

Expand Down Expand Up @@ -242,3 +229,92 @@ func getOwningVM(ctx context.Context, c client.Client, name apitypes.NamespacedN
return nil, fmt.Errorf("failed getting VM %q: %w", name, err)
}
}

func (r *VirtualMachineInstanceReconciler) vmiPrimaryUDN(
ctx context.Context,
vmi *virtv1.VirtualMachineInstance,
) (*config.RelevantConfig, error) {
const (
NetworkRolePrimary = "primary"
NetworkRoleSecondary = "secondary"
)

var namespaceNads nadv1.NetworkAttachmentDefinitionList
if err := r.List(ctx, &namespaceNads, &client.ListOptions{}); err != nil {
return nil, fmt.Errorf("failed to list the NADs on namespace %q: %v", vmi.Namespace, err)
}

for _, nad := range namespaceNads.Items {
networkConfig, err := config.NewConfig(nad.Spec.Config)
if err != nil {
r.Log.Error(
err,
"failed extracting the relevant NAD configuration",
"NAD name",
nad.Name,
"NAD namespace",
nad.Namespace,
)
return nil, fmt.Errorf("failed to extract the relevant NAD information")
}

if networkConfig.Role == NetworkRolePrimary {
return networkConfig, nil
}
}
return nil, nil
}

func newIPAMClaim(
claimKey string,
vmi *virtv1.VirtualMachineInstance,
ownerInfo metav1.OwnerReference,
netConfigName string,
) *ipamclaimsapi.IPAMClaim {
return &ipamclaimsapi.IPAMClaim{
ObjectMeta: controllerruntime.ObjectMeta{
Name: claimKey,
Namespace: vmi.Namespace,
OwnerReferences: []metav1.OwnerReference{ownerInfo},
Finalizers: []string{claims.KubevirtVMFinalizer},
Labels: claims.OwnedByVMLabel(vmi.Name),
},
Spec: ipamclaimsapi.IPAMClaimSpec{
Network: netConfigName,
},
}
}

func (r *VirtualMachineInstanceReconciler) ensureIPAMClaim(
ctx context.Context,
ipamClaim *ipamclaimsapi.IPAMClaim,
vmi *virtv1.VirtualMachineInstance,
ownerInfo metav1.OwnerReference,
) error {
claimKey := ipamClaim.Name
if err := r.Client.Create(ctx, ipamClaim, &client.CreateOptions{}); err != nil {
if apierrors.IsAlreadyExists(err) {
claimKey := apitypes.NamespacedName{
Namespace: vmi.Namespace,
Name: claimKey,
}

existingIPAMClaim := &ipamclaimsapi.IPAMClaim{}
if err := r.Client.Get(ctx, claimKey, existingIPAMClaim); err != nil {
return fmt.Errorf("let us be on the safe side and retry later")
}

if len(existingIPAMClaim.OwnerReferences) == 1 && existingIPAMClaim.OwnerReferences[0].UID == ownerInfo.UID {
r.Log.Info("found existing IPAMClaim belonging to this VM/VMI, nothing to do", "UID", ownerInfo.UID)
return nil
} else {
err := fmt.Errorf("failed since it found an existing IPAMClaim for %q", claimKey.Name)
r.Log.Error(err, "leaked IPAMClaim found", "existing owner", existingIPAMClaim.UID)
return err
}
}
r.Log.Error(err, "failed to create the IPAMClaim")
return err
}
return nil
}
Loading