From df0310366e61f7aab5f3dd2004f5073223973077 Mon Sep 17 00:00:00 2001 From: Brendan Shephard Date: Thu, 25 Jul 2024 11:08:53 +1000 Subject: [PATCH 1/4] Use ansibleEE library instead of AnsibleEE v1 CR This change swaps out the use of the AnsibleEE custom resource for a generic Go library implementation. This enables us to stop using the unnecessary OpenStackAnsibleEE abstraction that we currently have around Kubernetes Jobs for AnsibleEE executions. Jira: https://issues.redhat.com/browse/OSPRH-8926 Signed-off-by: Brendan Shephard --- config/rbac/role.yaml | 12 - ...openstackdataplanedeployment_controller.go | 5 +- .../openstackdataplanenodeset_controller.go | 4 +- pkg/dataplane/deployment.go | 32 +- pkg/dataplane/service.go | 1 - pkg/dataplane/util/ansible_execution.go | 321 +++++++++--------- pkg/dataplane/util/version.go | 3 +- 7 files changed, 187 insertions(+), 191 deletions(-) diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 3634b0817..ac529c6f1 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -82,18 +82,6 @@ rules: - patch - update - watch -- apiGroups: - - ansibleee.openstack.org - resources: - - openstackansibleees - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - apiGroups: - barbican.openstack.org resources: diff --git a/controllers/dataplane/openstackdataplanedeployment_controller.go b/controllers/dataplane/openstackdataplanedeployment_controller.go index a438b1fab..40a116afd 100644 --- a/controllers/dataplane/openstackdataplanedeployment_controller.go +++ b/controllers/dataplane/openstackdataplanedeployment_controller.go @@ -22,6 +22,7 @@ import ( "time" "github.com/go-playground/validator/v10" + batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" k8s_errors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" @@ -40,7 +41,6 @@ import ( condition "github.com/openstack-k8s-operators/lib-common/modules/common/condition" "github.com/openstack-k8s-operators/lib-common/modules/common/helper" "github.com/openstack-k8s-operators/lib-common/modules/common/util" - ansibleeev1 "github.com/openstack-k8s-operators/openstack-ansibleee-operator/api/v1beta1" dataplanev1 "github.com/openstack-k8s-operators/openstack-operator/apis/dataplane/v1beta1" deployment "github.com/openstack-k8s-operators/openstack-operator/pkg/dataplane" dataplaneutil "github.com/openstack-k8s-operators/openstack-operator/pkg/dataplane/util" @@ -63,7 +63,6 @@ func (r *OpenStackDataPlaneDeploymentReconciler) GetLogger(ctx context.Context) // +kubebuilder:rbac:groups=dataplane.openstack.org,resources=openstackdataplanedeployments/finalizers,verbs=update;patch // +kubebuilder:rbac:groups=dataplane.openstack.org,resources=openstackdataplanenodesets,verbs=get;list;watch // +kubebuilder:rbac:groups=dataplane.openstack.org,resources=openstackdataplaneservices,verbs=get;list;watch -// +kubebuilder:rbac:groups=ansibleee.openstack.org,resources=openstackansibleees,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=discovery.k8s.io,resources=endpointslices,verbs=get;list;watch;create;update;patch;delete; // +kubebuilder:rbac:groups=cert-manager.io,resources=issuers,verbs=get;list;watch; // +kubebuilder:rbac:groups=cert-manager.io,resources=certificates,verbs=get;list;watch;create;update;patch;delete; @@ -494,7 +493,7 @@ func (r *OpenStackDataPlaneDeploymentReconciler) SetupWithManager(mgr ctrl.Manag predicate.GenerationChangedPredicate{}, predicate.AnnotationChangedPredicate{}, predicate.LabelChangedPredicate{}))). - Owns(&ansibleeev1.OpenStackAnsibleEE{}). + Owns(&batchv1.Job{}). Watches(&certmgrv1.Certificate{}, handler.EnqueueRequestsFromMapFunc(certFn)). Complete(r) diff --git a/controllers/dataplane/openstackdataplanenodeset_controller.go b/controllers/dataplane/openstackdataplanenodeset_controller.go index 93f5fc182..0f7736584 100644 --- a/controllers/dataplane/openstackdataplanenodeset_controller.go +++ b/controllers/dataplane/openstackdataplanenodeset_controller.go @@ -24,6 +24,7 @@ import ( "github.com/go-playground/validator/v10" "golang.org/x/exp/slices" + batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" k8s_errors "k8s.io/apimachinery/pkg/api/errors" @@ -48,7 +49,6 @@ import ( "github.com/openstack-k8s-operators/lib-common/modules/common/secret" "github.com/openstack-k8s-operators/lib-common/modules/common/serviceaccount" "github.com/openstack-k8s-operators/lib-common/modules/common/util" - ansibleeev1 "github.com/openstack-k8s-operators/openstack-ansibleee-operator/api/v1beta1" baremetalv1 "github.com/openstack-k8s-operators/openstack-baremetal-operator/api/v1beta1" openstackv1 "github.com/openstack-k8s-operators/openstack-operator/apis/core/v1beta1" dataplanev1 "github.com/openstack-k8s-operators/openstack-operator/apis/dataplane/v1beta1" @@ -622,7 +622,7 @@ func (r *OpenStackDataPlaneNodeSetReconciler) SetupWithManager( predicate.GenerationChangedPredicate{}, predicate.AnnotationChangedPredicate{}, predicate.LabelChangedPredicate{}))). - Owns(&ansibleeev1.OpenStackAnsibleEE{}). + Owns(&batchv1.Job{}). Owns(&baremetalv1.OpenStackBaremetalSet{}). Owns(&infranetworkv1.IPSet{}). Owns(&infranetworkv1.DNSData{}). diff --git a/pkg/dataplane/deployment.go b/pkg/dataplane/deployment.go index f094ce768..f0dd21fba 100644 --- a/pkg/dataplane/deployment.go +++ b/pkg/dataplane/deployment.go @@ -26,6 +26,9 @@ import ( "sort" "strconv" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + slices "golang.org/x/exp/slices" k8s_errors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" @@ -37,11 +40,9 @@ import ( "github.com/openstack-k8s-operators/lib-common/modules/common/helper" "github.com/openstack-k8s-operators/lib-common/modules/common/util" "github.com/openstack-k8s-operators/lib-common/modules/storage" - ansibleeev1 "github.com/openstack-k8s-operators/openstack-ansibleee-operator/api/v1beta1" openstackv1 "github.com/openstack-k8s-operators/openstack-operator/apis/core/v1beta1" dataplanev1 "github.com/openstack-k8s-operators/openstack-operator/apis/dataplane/v1beta1" dataplaneutil "github.com/openstack-k8s-operators/openstack-operator/pkg/dataplane/util" - corev1 "k8s.io/api/core/v1" ) // Deployer defines a data structure with all of the relevant objects required for a full deployment. @@ -197,13 +198,13 @@ func (d *Deployer) ConditionalDeploy( } if nsConditions.IsFalse(readyCondition) { - var ansibleEE *ansibleeev1.OpenStackAnsibleEE + var ansibleEE *batchv1.Job _, labelSelector := dataplaneutil.GetAnsibleExecutionNameAndLabels(&foundService, d.Deployment.Name, d.NodeSet.Name) ansibleEE, err = dataplaneutil.GetAnsibleExecution(d.Ctx, d.Helper, d.Deployment, labelSelector) if err != nil { // Return nil if we don't have AnsibleEE available yet if k8s_errors.IsNotFound(err) { - log.Info(fmt.Sprintf("%s OpenStackAnsibleEE not yet found", readyCondition)) + log.Info(fmt.Sprintf("%s AnsibleEE job is not yet found", readyCondition)) return nil } log.Error(err, fmt.Sprintf("Error getting ansibleEE job for %s", deployName)) @@ -215,15 +216,15 @@ func (d *Deployer) ConditionalDeploy( err.Error())) } - if ansibleEE.Status.JobStatus == ansibleeev1.JobStatusSucceeded { + if ansibleEE.Status.Succeeded > 0 { log.Info(fmt.Sprintf("Condition %s ready", readyCondition)) nsConditions.Set(condition.TrueCondition( readyCondition, readyMessage)) } - if ansibleEE.Status.JobStatus == ansibleeev1.JobStatusRunning || ansibleEE.Status.JobStatus == ansibleeev1.JobStatusPending { - log.Info(fmt.Sprintf("AnsibleEE job is not yet completed: Execution: %s, Status: %s", ansibleEE.Name, ansibleEE.Status.JobStatus)) + if ansibleEE.Status.Active > 0 { + log.Info(fmt.Sprintf("AnsibleEE job is not yet completed: Execution: %s, Active pods: %d", ansibleEE.Name, ansibleEE.Status.Active)) nsConditions.Set(condition.FalseCondition( readyCondition, condition.RequestedReason, @@ -231,18 +232,23 @@ func (d *Deployer) ConditionalDeploy( readyWaitingMessage)) } - if ansibleEE.Status.JobStatus == ansibleeev1.JobStatusFailed { - errorMsg := fmt.Sprintf("execution.name %s execution.namespace %s execution.status.jobstatus: %s", ansibleEE.Name, ansibleEE.Namespace, ansibleEE.Status.JobStatus) - ansibleCondition := ansibleEE.Status.Conditions.Get(condition.ReadyCondition) + var ansibleCondition *batchv1.JobCondition + if ansibleEE.Status.Failed > 0 { + errorMsg := fmt.Sprintf("execution.name %s execution.namespace %s failed pods: %d", ansibleEE.Name, ansibleEE.Namespace, ansibleEE.Status.Failed) + for _, condition := range ansibleEE.Status.Conditions { + if condition.Type == batchv1.JobFailed { + ansibleCondition = &condition + } + } if ansibleCondition.Reason == condition.JobReasonBackoffLimitExceeded { - errorMsg = fmt.Sprintf("backoff limit reached for execution.name %s execution.namespace %s execution.status.jobstatus: %s", ansibleEE.Name, ansibleEE.Namespace, ansibleEE.Status.JobStatus) + errorMsg = fmt.Sprintf("backoff limit reached for execution.name %s execution.namespace %s execution.condition.message: %s", ansibleEE.Name, ansibleEE.Namespace, ansibleCondition.Message) } log.Info(fmt.Sprintf("Condition %s error", readyCondition)) err = fmt.Errorf(errorMsg) nsConditions.Set(condition.FalseCondition( readyCondition, - ansibleCondition.Reason, - ansibleCondition.Severity, + condition.Reason(ansibleCondition.Reason), + condition.SeverityError, readyErrorMessage, err.Error())) } diff --git a/pkg/dataplane/service.go b/pkg/dataplane/service.go index 30500901c..47b208016 100644 --- a/pkg/dataplane/service.go +++ b/pkg/dataplane/service.go @@ -55,7 +55,6 @@ func (d *Deployer) DeployService(foundService dataplanev1.OpenStackDataPlaneServ d.InventorySecrets, d.AeeSpec, d.NodeSet) - if err != nil { d.Helper.GetLogger().Error(err, fmt.Sprintf("Unable to execute Ansible for %s", foundService.Name)) return err diff --git a/pkg/dataplane/util/ansible_execution.go b/pkg/dataplane/util/ansible_execution.go index f2f0160a8..842d8179e 100644 --- a/pkg/dataplane/util/ansible_execution.go +++ b/pkg/dataplane/util/ansible_execution.go @@ -24,21 +24,20 @@ import ( "strings" appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" apimachineryvalidation "k8s.io/apimachinery/pkg/util/validation" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "github.com/openstack-k8s-operators/lib-common/modules/common/helper" - "github.com/openstack-k8s-operators/lib-common/modules/common/util" + nad "github.com/openstack-k8s-operators/lib-common/modules/common/networkattachment" "github.com/openstack-k8s-operators/lib-common/modules/storage" - ansibleeev1 "github.com/openstack-k8s-operators/openstack-ansibleee-operator/api/v1beta1" dataplanev1 "github.com/openstack-k8s-operators/openstack-operator/apis/dataplane/v1beta1" ) -// AnsibleExecution creates a OpenStackAnsiblEE CR +// AnsibleExecution creates a batchv1 Job to execute Ansible func AnsibleExecution( ctx context.Context, helper *helper.Helper, @@ -58,206 +57,212 @@ func AnsibleExecution( var sshKeyMountPath string var sshKeyMountSubPath string + const jobRestartPolicy string = "OnFailure" + ansibleEEMounts := storage.VolMounts{} executionName, labels := GetAnsibleExecutionNameAndLabels(service, deployment.GetName(), nodeSet.GetName()) - ansibleEE, err := GetAnsibleExecution(ctx, helper, deployment, labels) + + existingAnsibleEE, err := GetAnsibleExecution(ctx, helper, deployment, labels) if err != nil && !k8serrors.IsNotFound(err) { return err } // Don't patch and re-run jobs if the job status is already completed. - if ansibleEE != nil && ansibleEE.Status.JobStatus == ansibleeev1.JobStatusSucceeded { + if existingAnsibleEE != nil && existingAnsibleEE.Status.Succeeded > 0 { return nil } - if ansibleEE == nil { - ansibleEE = &ansibleeev1.OpenStackAnsibleEE{ - ObjectMeta: metav1.ObjectMeta{ - Name: executionName, - Namespace: deployment.GetNamespace(), - Labels: labels, - }, - } + ansibleEE := EEJob{ + Name: executionName, + Namespace: deployment.GetNamespace(), + Labels: labels, } - _, err = controllerutil.CreateOrPatch(ctx, helper.GetClient(), ansibleEE, func() error { - ansibleEE.Spec.NetworkAttachments = aeeSpec.NetworkAttachments - if aeeSpec.DNSConfig != nil { - ansibleEE.Spec.DNSConfig = aeeSpec.DNSConfig - } - if len(aeeSpec.OpenStackAnsibleEERunnerImage) > 0 { - ansibleEE.Spec.Image = aeeSpec.OpenStackAnsibleEERunnerImage - } - if len(aeeSpec.ExtraVars) > 0 { - ansibleEE.Spec.ExtraVars = aeeSpec.ExtraVars - } - if len(aeeSpec.AnsibleTags) > 0 { - fmt.Fprintf(&cmdLineArguments, "--tags %s ", aeeSpec.AnsibleTags) - } - if len(aeeSpec.AnsibleLimit) > 0 { - fmt.Fprintf(&cmdLineArguments, "--limit %s ", aeeSpec.AnsibleLimit) - } - if len(aeeSpec.AnsibleSkipTags) > 0 { - fmt.Fprintf(&cmdLineArguments, "--skip-tags %s ", aeeSpec.AnsibleSkipTags) - } - if len(aeeSpec.ServiceAccountName) > 0 { - ansibleEE.Spec.ServiceAccountName = aeeSpec.ServiceAccountName - } - if cmdLineArguments.Len() > 0 { - ansibleEE.Spec.CmdLine = strings.TrimSpace(cmdLineArguments.String()) - } + ansibleEE.NetworkAttachments = aeeSpec.NetworkAttachments + ansibleEE.Annotations, err = nad.CreateNetworksAnnotation(deployment.Namespace, ansibleEE.NetworkAttachments) + if err != nil { + return fmt.Errorf("failed to create NetworkAttachment annotation. Error: %w", err) + } - if len(service.Spec.PlaybookContents) > 0 { - ansibleEE.Spec.PlaybookContents = service.Spec.PlaybookContents - } - if len(service.Spec.Playbook) > 0 { - ansibleEE.Spec.Playbook = service.Spec.Playbook - } - ansibleEE.Spec.BackoffLimit = deployment.Spec.BackoffLimit + if aeeSpec.DNSConfig != nil { + ansibleEE.DNSConfig = aeeSpec.DNSConfig + } + if len(aeeSpec.OpenStackAnsibleEERunnerImage) > 0 { + ansibleEE.Image = aeeSpec.OpenStackAnsibleEERunnerImage + } else { + ansibleEE.Image = *dataplanev1.ContainerImageDefaults.AnsibleeeImage + } + if len(aeeSpec.ExtraVars) > 0 { + ansibleEE.ExtraVars = aeeSpec.ExtraVars + } + if len(aeeSpec.AnsibleTags) > 0 { + fmt.Fprintf(&cmdLineArguments, "--tags %s ", aeeSpec.AnsibleTags) + } + if len(aeeSpec.AnsibleLimit) > 0 { + fmt.Fprintf(&cmdLineArguments, "--limit %s ", aeeSpec.AnsibleLimit) + } + if len(aeeSpec.AnsibleSkipTags) > 0 { + fmt.Fprintf(&cmdLineArguments, "--skip-tags %s ", aeeSpec.AnsibleSkipTags) + } + if len(aeeSpec.ServiceAccountName) > 0 { + ansibleEE.ServiceAccountName = aeeSpec.ServiceAccountName + } + if cmdLineArguments.Len() > 0 { + ansibleEE.CmdLine = strings.TrimSpace(cmdLineArguments.String()) + } - // If we have a service that ought to be deployed everywhere - // substitute the existing play target with 'all' - // Check if we have ExtraVars before accessing it - if ansibleEE.Spec.ExtraVars == nil { - ansibleEE.Spec.ExtraVars = make(map[string]json.RawMessage) - } - if service.Spec.DeployOnAllNodeSets { - ansibleEE.Spec.ExtraVars["edpm_override_hosts"] = json.RawMessage([]byte("\"all\"")) - util.LogForObject(helper, fmt.Sprintf("for service %s, substituting existing ansible play host with 'all'.", service.Name), ansibleEE) - } else { - ansibleEE.Spec.ExtraVars["edpm_override_hosts"] = json.RawMessage([]byte(fmt.Sprintf("\"%s\"", nodeSet.GetName()))) - util.LogForObject(helper, - fmt.Sprintf("for service %s, substituting existing ansible play host with '%s'.", service.Name, nodeSet.GetName()), ansibleEE) - } - if service.Spec.EDPMServiceType != "" { - ansibleEE.Spec.ExtraVars["edpm_service_type"] = json.RawMessage([]byte(fmt.Sprintf("\"%s\"", service.Spec.EDPMServiceType))) - } else { - ansibleEE.Spec.ExtraVars["edpm_service_type"] = json.RawMessage([]byte(fmt.Sprintf("\"%s\"", service.Name))) - } + if len(service.Spec.PlaybookContents) > 0 { + ansibleEE.PlaybookContents = service.Spec.PlaybookContents + } + if len(service.Spec.Playbook) > 0 { + ansibleEE.Playbook = service.Spec.Playbook + } + ansibleEE.BackoffLimit = deployment.Spec.BackoffLimit + ansibleEE.RestartPolicy = jobRestartPolicy + + // If we have a service that ought to be deployed everywhere + // substitute the existing play target with 'all' + // Check if we have ExtraVars before accessing it + if ansibleEE.ExtraVars == nil { + ansibleEE.ExtraVars = make(map[string]json.RawMessage) + } + if service.Spec.DeployOnAllNodeSets { + ansibleEE.ExtraVars["edpm_override_hosts"] = json.RawMessage([]byte("\"all\"")) + } else { + ansibleEE.ExtraVars["edpm_override_hosts"] = json.RawMessage([]byte(fmt.Sprintf("\"%s\"", nodeSet.GetName()))) + } + if service.Spec.EDPMServiceType != "" { + ansibleEE.ExtraVars["edpm_service_type"] = json.RawMessage([]byte(fmt.Sprintf("\"%s\"", service.Spec.EDPMServiceType))) + } else { + ansibleEE.ExtraVars["edpm_service_type"] = json.RawMessage([]byte(fmt.Sprintf("\"%s\"", service.Name))) + } - if len(deployment.Spec.ServicesOverride) > 0 { - ansibleEE.Spec.ExtraVars["edpm_services_override"] = json.RawMessage([]byte(fmt.Sprintf("\"%s\"", deployment.Spec.ServicesOverride))) - } + if len(deployment.Spec.ServicesOverride) > 0 { + ansibleEE.ExtraVars["edpm_services_override"] = json.RawMessage([]byte(fmt.Sprintf("\"%s\"", deployment.Spec.ServicesOverride))) + } - // Sort keys of the ssh secret map - sshKeys := make([]string, 0) - for k := range sshKeySecrets { - sshKeys = append(sshKeys, k) - } - sort.Strings(sshKeys) - - for _, sshKeyNodeName := range sshKeys { - sshKeySecret := sshKeySecrets[sshKeyNodeName] - if service.Spec.DeployOnAllNodeSets { - sshKeyName = fmt.Sprintf("ssh-key-%s", sshKeyNodeName) - sshKeyMountSubPath = fmt.Sprintf("ssh_key_%s", sshKeyNodeName) - sshKeyMountPath = fmt.Sprintf("/runner/env/ssh_key/%s", sshKeyMountSubPath) - } else { - if sshKeyNodeName != nodeSet.GetName() { - continue - } - sshKeyName = "ssh-key" - sshKeyMountSubPath = "ssh_key" - sshKeyMountPath = "/runner/env/ssh_key" + // Sort keys of the ssh secret map + sshKeys := make([]string, 0) + for k := range sshKeySecrets { + sshKeys = append(sshKeys, k) + } + sort.Strings(sshKeys) + + for _, sshKeyNodeName := range sshKeys { + sshKeySecret := sshKeySecrets[sshKeyNodeName] + if service.Spec.DeployOnAllNodeSets { + sshKeyName = fmt.Sprintf("ssh-key-%s", sshKeyNodeName) + sshKeyMountSubPath = fmt.Sprintf("ssh_key_%s", sshKeyNodeName) + sshKeyMountPath = fmt.Sprintf("/runner/env/ssh_key/%s", sshKeyMountSubPath) + } else { + if sshKeyNodeName != nodeSet.GetName() { + continue } - sshKeyVolume := corev1.Volume{ - Name: sshKeyName, - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: sshKeySecret, - Items: []corev1.KeyToPath{ - { - Key: "ssh-privatekey", - Path: sshKeyMountSubPath, - }, + sshKeyName = "ssh-key" + sshKeyMountSubPath = "ssh_key" + sshKeyMountPath = "/runner/env/ssh_key" + } + sshKeyVolume := corev1.Volume{ + Name: sshKeyName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: sshKeySecret, + Items: []corev1.KeyToPath{ + { + Key: "ssh-privatekey", + Path: sshKeyMountSubPath, }, }, }, - } - sshKeyMount := corev1.VolumeMount{ - Name: sshKeyName, - MountPath: sshKeyMountPath, - SubPath: sshKeyMountSubPath, - } - // Mount ssh secrets - ansibleEEMounts.Mounts = append(ansibleEEMounts.Mounts, sshKeyMount) - ansibleEEMounts.Volumes = append(ansibleEEMounts.Volumes, sshKeyVolume) + }, } - - // order the inventory keys otherwise it could lead to changing order and mount order changing - invKeys := make([]string, 0) - for k := range inventorySecrets { - invKeys = append(invKeys, k) + sshKeyMount := corev1.VolumeMount{ + Name: sshKeyName, + MountPath: sshKeyMountPath, + SubPath: sshKeyMountSubPath, } - sort.Strings(invKeys) - - // Mounting inventory and secrets - for inventoryIndex, nodeName := range invKeys { - if service.Spec.DeployOnAllNodeSets { - inventoryName = fmt.Sprintf("inventory-%d", inventoryIndex) - inventoryMountPath = fmt.Sprintf("/runner/inventory/%s", inventoryName) - } else { - if nodeName != nodeSet.GetName() { - continue - } - inventoryName = "inventory" - inventoryMountPath = "/runner/inventory/hosts" + // Mount ssh secrets + ansibleEEMounts.Mounts = append(ansibleEEMounts.Mounts, sshKeyMount) + ansibleEEMounts.Volumes = append(ansibleEEMounts.Volumes, sshKeyVolume) + } + + // order the inventory keys otherwise it could lead to changing order and mount order changing + invKeys := make([]string, 0) + for k := range inventorySecrets { + invKeys = append(invKeys, k) + } + sort.Strings(invKeys) + + // Mounting inventory and secrets + for inventoryIndex, nodeName := range invKeys { + if service.Spec.DeployOnAllNodeSets { + inventoryName = fmt.Sprintf("inventory-%d", inventoryIndex) + inventoryMountPath = fmt.Sprintf("/runner/inventory/%s", inventoryName) + } else { + if nodeName != nodeSet.GetName() { + continue } + inventoryName = "inventory" + inventoryMountPath = "/runner/inventory/hosts" + } - inventoryVolume = corev1.Volume{ - Name: inventoryName, - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: inventorySecrets[nodeName], - Items: []corev1.KeyToPath{ - { - Key: "inventory", - Path: inventoryName, - }, + inventoryVolume = corev1.Volume{ + Name: inventoryName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: inventorySecrets[nodeName], + Items: []corev1.KeyToPath{ + { + Key: "inventory", + Path: inventoryName, }, }, }, - } - inventoryMount := corev1.VolumeMount{ - Name: inventoryName, - MountPath: inventoryMountPath, - SubPath: inventoryName, - } - // Inventory mount - ansibleEEMounts.Mounts = append(ansibleEEMounts.Mounts, inventoryMount) - ansibleEEMounts.Volumes = append(ansibleEEMounts.Volumes, inventoryVolume) + }, + } + inventoryMount := corev1.VolumeMount{ + Name: inventoryName, + MountPath: inventoryMountPath, + SubPath: inventoryName, } + // Inventory mount + ansibleEEMounts.Mounts = append(ansibleEEMounts.Mounts, inventoryMount) + ansibleEEMounts.Volumes = append(ansibleEEMounts.Volumes, inventoryVolume) + } - ansibleEE.Spec.ExtraMounts = append(aeeSpec.ExtraMounts, []storage.VolMounts{ansibleEEMounts}...) - ansibleEE.Spec.Env = aeeSpec.Env + ansibleEE.ExtraMounts = append(aeeSpec.ExtraMounts, []storage.VolMounts{ansibleEEMounts}...) + ansibleEE.Env = aeeSpec.Env - err := controllerutil.SetControllerReference(deployment, ansibleEE, helper.GetScheme()) - if err != nil { - return err - } + aeeJob, err := ansibleEE.JobForOpenStackAnsibleEE(helper) + if err != nil { + return err + } - return nil + // CreateOrPatch the AnsibleExecution Job + _, err = controllerutil.CreateOrPatch(ctx, helper.GetClient(), aeeJob, func() error { + // Set controller reference on the Job object + err := controllerutil.SetControllerReference( + helper.GetBeforeObject(), aeeJob, helper.GetScheme()) + return err }) - if err != nil { - util.LogErrorForObject(helper, err, fmt.Sprintf("Unable to create AnsibleEE %s", ansibleEE.Name), ansibleEE) return err } return nil } -// GetAnsibleExecution gets and returns an OpenStackAnsibleEE with the given +// GetAnsibleExecution gets and returns a batchv1 Job with the given // labels where // "openstackdataplaneservice": , // "openstackdataplanedeployment": , // "openstackdataplanenodeset": , // If none or more than one is found, return nil and error func GetAnsibleExecution(ctx context.Context, - helper *helper.Helper, obj client.Object, labelSelector map[string]string) (*ansibleeev1.OpenStackAnsibleEE, error) { + helper *helper.Helper, obj client.Object, labelSelector map[string]string) (*batchv1.Job, error) { var err error - ansibleEEs := &ansibleeev1.OpenStackAnsibleEEList{} + ansibleEEs := &batchv1.JobList{} listOpts := []client.ListOption{ client.InNamespace(obj.GetNamespace()), @@ -271,7 +276,7 @@ func GetAnsibleExecution(ctx context.Context, return nil, err } - var ansibleEE *ansibleeev1.OpenStackAnsibleEE + var ansibleEE *batchv1.Job if len(ansibleEEs.Items) == 0 { return nil, k8serrors.NewNotFound(appsv1.Resource("OpenStackAnsibleEE"), fmt.Sprintf("with label %s", labelSelector)) } else if len(ansibleEEs.Items) == 1 { diff --git a/pkg/dataplane/util/version.go b/pkg/dataplane/util/version.go index 93e53199b..6a883075b 100644 --- a/pkg/dataplane/util/version.go +++ b/pkg/dataplane/util/version.go @@ -39,7 +39,7 @@ func GetVersion(ctx context.Context, helper *helper.Helper, namespace string) (* return nil, err } if len(versions.Items) > 1 { - errorMsg := "Found multiple OpenStackVersions when at most 1 should exist" + errorMsg := "found multiple OpenStackVersions when at most 1 should exist" err := errors.New(errorMsg) log.Error(err, errorMsg) return nil, err @@ -55,7 +55,6 @@ func GetVersion(ctx context.Context, helper *helper.Helper, namespace string) (* // GetContainerImages - get the container image values considering either the // OpenStackVersion or the defaults func GetContainerImages(version *openstackv1.OpenStackVersion) openstackv1.ContainerImages { - var containerImages openstackv1.ContainerImages // Set the containerImages variable for the container images If there is an From 16b76b76a6d1790a80a8e90f9d659cf61e6a2d7d Mon Sep 17 00:00:00 2001 From: Brendan Shephard Date: Wed, 31 Jul 2024 11:19:47 +1000 Subject: [PATCH 2/4] Update functional tests for ansible lib This change updates the functional tests to work with the changes from Ansibleeev1 to the new lib-common AnsibleEE library. Signed-off-by: Brendan Shephard --- tests/functional/dataplane/base_test.go | 15 ++++- ...tackdataplanedeployment_controller_test.go | 60 +++++++++---------- ...enstackdataplanenodeset_controller_test.go | 4 +- 3 files changed, 44 insertions(+), 35 deletions(-) diff --git a/tests/functional/dataplane/base_test.go b/tests/functional/dataplane/base_test.go index a3bfc2de7..010f5373e 100644 --- a/tests/functional/dataplane/base_test.go +++ b/tests/functional/dataplane/base_test.go @@ -5,6 +5,7 @@ import ( . "github.com/onsi/gomega" //revive:disable:dot-imports "gopkg.in/yaml.v3" + batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -13,7 +14,6 @@ import ( infrav1 "github.com/openstack-k8s-operators/infra-operator/apis/network/v1beta1" "github.com/openstack-k8s-operators/lib-common/modules/common/condition" - "github.com/openstack-k8s-operators/openstack-ansibleee-operator/api/v1beta1" dataplanev1 "github.com/openstack-k8s-operators/openstack-operator/apis/dataplane/v1beta1" ) @@ -590,8 +590,8 @@ func DataplaneDeploymentConditionGetter(name types.NamespacedName) condition.Con return instance.Status.Conditions } -func GetAnsibleee(name types.NamespacedName) *v1beta1.OpenStackAnsibleEE { - instance := &v1beta1.OpenStackAnsibleEE{} +func GetAnsibleee(name types.NamespacedName) *batchv1.Job { + instance := &batchv1.Job{} Eventually(func(g Gomega) { g.Expect(k8sClient.Get(ctx, name, instance)).Should(Succeed()) }, timeout, interval).Should(Succeed()) @@ -620,3 +620,12 @@ func getCtlPlaneIP(secret *corev1.Secret) string { } return inv.EdpmComputeNodeset.Hosts.Node.CtlPlaneIP } + +func findEnvVar(envVars []corev1.EnvVar) corev1.EnvVar { + for _, envVar := range envVars { + if envVar.Name == "RUNNER_EXTRA_VARS" { + return envVar + } + } + return corev1.EnvVar{} +} diff --git a/tests/functional/dataplane/openstackdataplanedeployment_controller_test.go b/tests/functional/dataplane/openstackdataplanedeployment_controller_test.go index d2938af45..bfec131e2 100644 --- a/tests/functional/dataplane/openstackdataplanedeployment_controller_test.go +++ b/tests/functional/dataplane/openstackdataplanedeployment_controller_test.go @@ -12,7 +12,6 @@ import ( //revive:disable-next-line:dot-imports . "github.com/openstack-k8s-operators/lib-common/modules/common/test/helpers" - ansibleeev1 "github.com/openstack-k8s-operators/openstack-ansibleee-operator/api/v1beta1" baremetalv1 "github.com/openstack-k8s-operators/openstack-baremetal-operator/api/v1beta1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -134,7 +133,8 @@ var _ = Describe("Dataplane Deployment Test", func() { CreateDataplaneService(dataplaneGlobalServiceName, true) // with EDPMServiceType set CreateDataPlaneServiceFromSpec(dataplaneUpdateServiceName, map[string]interface{}{ - "EDPMServiceType": "foo-service"}) + "edpmServiceType": "foo-update-service", + "openStackAnsibleEERunnerImage": "foo-image:latest"}) DeferCleanup(th.DeleteService, dataplaneServiceName) DeferCleanup(th.DeleteService, dataplaneGlobalServiceName) @@ -210,25 +210,22 @@ var _ = Describe("Dataplane Deployment Test", func() { Name: aeeName, Namespace: dataplaneDeploymentName.Namespace, } - ansibleEE := &ansibleeev1.OpenStackAnsibleEE{ - ObjectMeta: metav1.ObjectMeta{ - Name: ansibleeeName.Name, - Namespace: ansibleeeName.Namespace, - }} - g.Expect(th.K8sClient.Get(th.Ctx, ansibleeeName, ansibleEE)).To(Succeed()) - ansibleEE.Status.JobStatus = ansibleeev1.JobStatusSucceeded + ansibleEE := GetAnsibleee(ansibleeeName) + ansibleEE.Status.Succeeded = 1 g.Expect(th.K8sClient.Status().Update(th.Ctx, ansibleEE)).To(Succeed()) - g.Expect(ansibleEE.Spec.ExtraVars).To(HaveKey("edpm_override_hosts")) if service.Spec.EDPMServiceType != "" { - g.Expect(string(ansibleEE.Spec.ExtraVars["edpm_service_type"])).To(Equal(fmt.Sprintf("\"%s\"", service.Spec.EDPMServiceType))) + g.Expect(findEnvVar(ansibleEE.Spec.Template.Spec.Containers[0].Env).Value).To(ContainSubstring("edpm_service_type")) + g.Expect(findEnvVar(ansibleEE.Spec.Template.Spec.Containers[0].Env).Value).To(ContainSubstring(service.Spec.EDPMServiceType)) } else { - g.Expect(string(ansibleEE.Spec.ExtraVars["edpm_service_type"])).To(Equal(fmt.Sprintf("\"%s\"", serviceName))) + g.Expect(findEnvVar(ansibleEE.Spec.Template.Spec.Containers[0].Env).Value).To(ContainSubstring(serviceName)) } if service.Spec.DeployOnAllNodeSets { - g.Expect(string(ansibleEE.Spec.ExtraVars["edpm_override_hosts"])).To(Equal("\"all\"")) + g.Expect(findEnvVar(ansibleEE.Spec.Template.Spec.Containers[0].Env).Value).To(ContainSubstring("edpm_override_hosts")) + g.Expect(findEnvVar(ansibleEE.Spec.Template.Spec.Containers[0].Env).Value).To(ContainSubstring("all")) } else { - g.Expect(string(ansibleEE.Spec.ExtraVars["edpm_override_hosts"])).To(Equal(fmt.Sprintf("\"%s\"", dataplaneNodeSetName.Name))) + g.Expect(findEnvVar(ansibleEE.Spec.Template.Spec.Containers[0].Env).Value).To(ContainSubstring("edpm_override_hosts")) + g.Expect(findEnvVar(ansibleEE.Spec.Template.Spec.Containers[0].Env).Value).To(ContainSubstring(dataplaneNodeSetName.Name)) } }, th.Timeout, th.Interval).Should(Succeed()) } @@ -281,7 +278,8 @@ var _ = Describe("Dataplane Deployment Test", func() { CreateDataplaneService(dataplaneServiceName, false) CreateDataplaneService(dataplaneGlobalServiceName, true) CreateDataPlaneServiceFromSpec(dataplaneUpdateServiceName, map[string]interface{}{ - "EDPMServiceType": "foo-service"}) + "edpmServiceType": "foo-update-service", + "openStackAnsibleEERunnerImage": "foo-image:latest"}) DeferCleanup(th.DeleteService, dataplaneServiceName) DeferCleanup(th.DeleteService, dataplaneGlobalServiceName) @@ -430,19 +428,21 @@ var _ = Describe("Dataplane Deployment Test", func() { } ansibleEE := GetAnsibleee(ansibleeeName) if service.Spec.DeployOnAllNodeSets { - g.Expect(ansibleEE.Spec.ExtraMounts[0].Volumes).Should(HaveLen(4)) + g.Expect(ansibleEE.Spec.Template.Spec.Volumes).Should(HaveLen(4)) } else { - g.Expect(ansibleEE.Spec.ExtraMounts[0].Volumes).Should(HaveLen(2)) + g.Expect(ansibleEE.Spec.Template.Spec.Volumes).Should(HaveLen(2)) } - ansibleEE.Status.JobStatus = ansibleeev1.JobStatusSucceeded + ansibleEE.Status.Succeeded = 1 g.Expect(th.K8sClient.Status().Update(th.Ctx, ansibleEE)).To(Succeed()) if service.Spec.EDPMServiceType != "" { - g.Expect(string(ansibleEE.Spec.ExtraVars["edpm_service_type"])).To(Equal(fmt.Sprintf("\"%s\"", service.Spec.EDPMServiceType))) + g.Expect(findEnvVar(ansibleEE.Spec.Template.Spec.Containers[0].Env).Value).To(ContainSubstring(service.Spec.EDPMServiceType)) } else { - g.Expect(string(ansibleEE.Spec.ExtraVars["edpm_service_type"])).To(Equal(fmt.Sprintf("\"%s\"", serviceName))) + g.Expect(findEnvVar(ansibleEE.Spec.Template.Spec.Containers[0].Env).Value).To(ContainSubstring(serviceName)) } if service.Spec.DeployOnAllNodeSets { - g.Expect(string(ansibleEE.Spec.ExtraVars["edpm_override_hosts"])).To(Equal("\"all\"")) + g.Expect(findEnvVar(ansibleEE.Spec.Template.Spec.Containers[0].Env).Value).To(ContainSubstring("edpm_override_hosts")) + g.Expect(findEnvVar(ansibleEE.Spec.Template.Spec.Containers[0].Env).Value).To(ContainSubstring("all")) + } }, th.Timeout, th.Interval).Should(Succeed()) } @@ -467,16 +467,16 @@ var _ = Describe("Dataplane Deployment Test", func() { } ansibleEE := GetAnsibleee(ansibleeeName) if service.Spec.DeployOnAllNodeSets { - g.Expect(ansibleEE.Spec.ExtraMounts[0].Volumes).Should(HaveLen(4)) + g.Expect(ansibleEE.Spec.Template.Spec.Volumes).Should(HaveLen(4)) } else { - g.Expect(ansibleEE.Spec.ExtraMounts[0].Volumes).Should(HaveLen(2)) + g.Expect(ansibleEE.Spec.Template.Spec.Volumes).Should(HaveLen(2)) } - ansibleEE.Status.JobStatus = ansibleeev1.JobStatusSucceeded + ansibleEE.Status.Succeeded = 1 g.Expect(th.K8sClient.Status().Update(th.Ctx, ansibleEE)).To(Succeed()) if service.Spec.EDPMServiceType != "" { - g.Expect(string(ansibleEE.Spec.ExtraVars["edpm_service_type"])).To(Equal(fmt.Sprintf("\"%s\"", service.Spec.EDPMServiceType))) + g.Expect(findEnvVar(ansibleEE.Spec.Template.Spec.Containers[0].Env).Value).To(ContainSubstring(service.Spec.EDPMServiceType)) } else { - g.Expect(string(ansibleEE.Spec.ExtraVars["edpm_service_type"])).To(Equal(fmt.Sprintf("\"%s\"", serviceName))) + g.Expect(findEnvVar(ansibleEE.Spec.Template.Spec.Containers[0].Env).Value).To(ContainSubstring(serviceName)) } }, th.Timeout, th.Interval).Should(Succeed()) } @@ -694,7 +694,7 @@ var _ = Describe("Dataplane Deployment Test", func() { } Eventually(func(g Gomega) { ansibleEE := GetAnsibleee(ansibleeeName) - ansibleEE.Status.JobStatus = ansibleeev1.JobStatusSucceeded + ansibleEE.Status.Succeeded = 1 g.Expect(th.K8sClient.Status().Update(th.Ctx, ansibleEE)).To(Succeed()) }, th.Timeout, th.Interval).Should(Succeed()) @@ -897,7 +897,7 @@ var _ = Describe("Dataplane Deployment Test", func() { Namespace: dataplaneMultiNodesetDeploymentName.Namespace, } ansibleEE := GetAnsibleee(ansibleeeName) - ansibleEE.Status.JobStatus = ansibleeev1.JobStatusSucceeded + ansibleEE.Status.Succeeded = 1 g.Expect(th.K8sClient.Status().Update(th.Ctx, ansibleEE)).To(Succeed()) }, th.Timeout, th.Interval).Should(Succeed()) } @@ -922,7 +922,7 @@ var _ = Describe("Dataplane Deployment Test", func() { Namespace: dataplaneMultiNodesetDeploymentName.Namespace, } ansibleEE := GetAnsibleee(ansibleeeName) - ansibleEE.Status.JobStatus = ansibleeev1.JobStatusSucceeded + ansibleEE.Status.Succeeded = 1 g.Expect(th.K8sClient.Status().Update(th.Ctx, ansibleEE)).To(Succeed()) }, th.Timeout, th.Interval).Should(Succeed()) } @@ -1023,7 +1023,7 @@ var _ = Describe("Dataplane Deployment Test", func() { } Eventually(func(g Gomega) { ansibleEE := GetAnsibleee(ansibleeeName) - ansibleEE.Status.JobStatus = ansibleeev1.JobStatusSucceeded + ansibleEE.Status.Succeeded = 1 g.Expect(th.K8sClient.Status().Update(th.Ctx, ansibleEE)).To(Succeed()) }, th.Timeout, th.Interval).Should(Succeed()) diff --git a/tests/functional/dataplane/openstackdataplanenodeset_controller_test.go b/tests/functional/dataplane/openstackdataplanenodeset_controller_test.go index 69583aa5b..8d5ad0457 100644 --- a/tests/functional/dataplane/openstackdataplanenodeset_controller_test.go +++ b/tests/functional/dataplane/openstackdataplanenodeset_controller_test.go @@ -23,7 +23,6 @@ import ( . "github.com/onsi/ginkgo/v2" //revive:disable:dot-imports . "github.com/onsi/gomega" //revive:disable:dot-imports "github.com/openstack-k8s-operators/lib-common/modules/common/condition" - ansibleeev1 "github.com/openstack-k8s-operators/openstack-ansibleee-operator/api/v1beta1" openstackv1 "github.com/openstack-k8s-operators/openstack-operator/apis/core/v1beta1" dataplanev1 "github.com/openstack-k8s-operators/openstack-operator/apis/dataplane/v1beta1" @@ -1301,6 +1300,7 @@ var _ = Describe("Dataplane NodeSet Test", func() { When("A DataPlaneNodeSet is created with NoNodes and a MinorUpdate OpenStackDataPlaneDeployment is created", func() { BeforeEach(func() { + dataplanev1.SetupDefaults() updateServiceSpec := map[string]interface{}{ "playbook": "osp.edpm.update", } @@ -1331,7 +1331,7 @@ var _ = Describe("Dataplane NodeSet Test", func() { Namespace: namespace, } ansibleEE := GetAnsibleee(ansibleeeName) - ansibleEE.Status.JobStatus = ansibleeev1.JobStatusSucceeded + ansibleEE.Status.Succeeded = 1 g.Expect(th.K8sClient.Status().Update(th.Ctx, ansibleEE)).To(Succeed()) }, th.Timeout, th.Interval).Should(Succeed()) From b54fc0306a6821dcf8bec4c2714f2c66d5a554bb Mon Sep 17 00:00:00 2001 From: Brendan Shephard Date: Fri, 2 Aug 2024 09:15:34 +1000 Subject: [PATCH 3/4] Remove AnsibleEE Operator Signed-off-by: Brendan Shephard --- main.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/main.go b/main.go index 0d082420e..fa9996e06 100644 --- a/main.go +++ b/main.go @@ -46,7 +46,6 @@ import ( neutronv1 "github.com/openstack-k8s-operators/neutron-operator/api/v1beta1" novav1 "github.com/openstack-k8s-operators/nova-operator/api/v1beta1" octaviav1 "github.com/openstack-k8s-operators/octavia-operator/api/v1beta1" - ansibleeev1 "github.com/openstack-k8s-operators/openstack-ansibleee-operator/api/v1beta1" baremetalv1 "github.com/openstack-k8s-operators/openstack-baremetal-operator/api/v1beta1" ovnv1 "github.com/openstack-k8s-operators/ovn-operator/api/v1beta1" placementv1 "github.com/openstack-k8s-operators/placement-operator/api/v1beta1" @@ -108,7 +107,6 @@ func init() { utilruntime.Must(neutronv1.AddToScheme(scheme)) utilruntime.Must(octaviav1.AddToScheme(scheme)) utilruntime.Must(designatev1.AddToScheme(scheme)) - utilruntime.Must(ansibleeev1.AddToScheme(scheme)) utilruntime.Must(rabbitmqv1.AddToScheme(scheme)) utilruntime.Must(manilav1.AddToScheme(scheme)) utilruntime.Must(horizonv1.AddToScheme(scheme)) From 0ac1288d2b7ed64dcb0aaf354fcf889130b273b3 Mon Sep 17 00:00:00 2001 From: Brendan Shephard Date: Tue, 13 Aug 2024 13:06:53 +1000 Subject: [PATCH 4/4] Allow for Jobs instead of AnsibleEE CR in kuttl Signed-off-by: Brendan Shephard --- ...tackdataplanedeployment_controller_test.go | 6 +- .../01-assert.yaml | 2163 +++++++++++------ .../02-assert.yaml | 256 +- .../02-assert.yaml | 302 ++- .../01-assert.yaml | 2032 ++++++++++------ .../02-assert.yaml | 148 +- .../04-assert.yaml | 153 +- .../06-assert.yaml | 263 +- .../dataplane-deploy-tls-test/02-assert.yaml | 321 ++- .../dataplane-deploy-tls-test/03-assert.yaml | 355 +-- .../dataplane-extramounts/00-assert.yaml | 131 +- .../dataplane-service-config/00-assert.yaml | 277 ++- .../00-assert.yaml | 131 +- .../dataplane-service-failure/00-assert.yaml | 170 +- 14 files changed, 4383 insertions(+), 2325 deletions(-) diff --git a/tests/functional/dataplane/openstackdataplanedeployment_controller_test.go b/tests/functional/dataplane/openstackdataplanedeployment_controller_test.go index bfec131e2..499c8ba97 100644 --- a/tests/functional/dataplane/openstackdataplanedeployment_controller_test.go +++ b/tests/functional/dataplane/openstackdataplanedeployment_controller_test.go @@ -1217,7 +1217,7 @@ var _ = Describe("Dataplane Deployment Test", func() { Namespace: dataplaneMultiNodesetDeploymentName.Namespace, } ansibleEE := GetAnsibleee(ansibleeeName) - ansibleEE.Status.JobStatus = ansibleeev1.JobStatusSucceeded + ansibleEE.Status.Succeeded = 1 g.Expect(th.K8sClient.Status().Update(th.Ctx, ansibleEE)).To(Succeed()) }, th.Timeout, th.Interval).Should(Succeed()) } @@ -1424,7 +1424,7 @@ var _ = Describe("Dataplane Deployment Test", func() { Namespace: dataplaneMultiNodesetDeploymentName.Namespace, } ansibleEE := GetAnsibleee(ansibleeeName) - ansibleEE.Status.JobStatus = ansibleeev1.JobStatusSucceeded + ansibleEE.Status.Succeeded = 1 g.Expect(th.K8sClient.Status().Update(th.Ctx, ansibleEE)).To(Succeed()) }, th.Timeout, th.Interval).Should(Succeed()) } @@ -1449,7 +1449,7 @@ var _ = Describe("Dataplane Deployment Test", func() { Namespace: dataplaneMultiNodesetDeploymentName.Namespace, } ansibleEE := GetAnsibleee(ansibleeeName) - ansibleEE.Status.JobStatus = ansibleeev1.JobStatusSucceeded + ansibleEE.Status.Succeeded = 1 g.Expect(th.K8sClient.Status().Update(th.Ctx, ansibleEE)).To(Succeed()) }, th.Timeout, th.Interval).Should(Succeed()) } diff --git a/tests/kuttl/tests/dataplane-deploy-global-service-test/01-assert.yaml b/tests/kuttl/tests/dataplane-deploy-global-service-test/01-assert.yaml index 8c38bc513..7714ebbf3 100644 --- a/tests/kuttl/tests/dataplane-deploy-global-service-test/01-assert.yaml +++ b/tests/kuttl/tests/dataplane-deploy-global-service-test/01-assert.yaml @@ -70,10 +70,18 @@ status: status: "True" type: SetupReady --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: generation: 1 + labels: + app: openstackansibleee + job-name: custom-global-service-edpm-compute-global + openstackansibleee_cr: custom-global-service-edpm-compute-global + openstackdataplanedeployment: edpm-compute-global + openstackdataplanenodeset: edpm-compute-global + openstackdataplaneservice: custom-global-service + osaee: "true" name: custom-global-service-edpm-compute-global namespace: openstack-kuttl-tests ownerReferences: @@ -84,60 +92,107 @@ metadata: name: edpm-compute-global spec: backoffLimit: 6 - env: - - name: ANSIBLE_FORCE_COLOR - value: "True" - envConfigMapName: openstack-aee-default-env - extraMounts: - - mounts: - - mountPath: /runner/env/ssh_key/ssh_key_edpm-compute-global - name: ssh-key-edpm-compute-global - subPath: ssh_key_edpm-compute-global - - mountPath: /runner/inventory/inventory-0 - name: inventory-0 - subPath: inventory-0 - volumes: - - name: ssh-key-edpm-compute-global - secret: - items: - - key: ssh-privatekey - path: ssh_key_edpm-compute-global - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory-0 - secret: - items: - - key: inventory - path: inventory-0 - secretName: dataplanenodeset-edpm-compute-global - extraVars: - edpm_override_hosts: all - name: openstackansibleee - playbookContents: | - - hosts: localhost - gather_facts: no - name: global kuttl play - tasks: - - name: Sleep - command: sleep 1 - delegate_to: localhost - preserveJobs: true - restartPolicy: Never - uid: 1001 + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + labels: + app: openstackansibleee + batch.kubernetes.io/job-name: custom-global-service-edpm-compute-global + job-name: custom-global-service-edpm-compute-global + openstackansibleee_cr: custom-global-service-edpm-compute-global + openstackdataplanedeployment: edpm-compute-global + openstackdataplanenodeset: edpm-compute-global + openstackdataplaneservice: custom-global-service + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - playbook.yaml + - -i + - custom-global-service-edpm-compute-global + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + - name: RUNNER_PLAYBOOK + value: |2+ + + - hosts: localhost + gather_facts: no + name: global kuttl play + tasks: + - name: Sleep + command: sleep 1 + delegate_to: localhost + + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: all + edpm_service_type: custom-global-service + + + imagePullPolicy: Always + name: custom-global-service-edpm-compute-global + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /runner/env/ssh_key/ssh_key_edpm-compute-global + name: ssh-key-edpm-compute-global + subPath: ssh_key_edpm-compute-global + - mountPath: /runner/inventory/inventory-0 + name: inventory-0 + subPath: inventory-0 + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: edpm-compute-global + serviceAccountName: edpm-compute-global + terminationGracePeriodSeconds: 30 + volumes: + - name: ssh-key-edpm-compute-global + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key_edpm-compute-global + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory-0 + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory-0 + secretName: dataplanenodeset-edpm-compute-global status: - JobStatus: Succeeded conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady + - status: "True" + type: Complete + succeeded: 1 + uncountedTerminatedPods: {} --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: + generation: 1 + labels: + app: openstackansibleee + job-name: download-cache-edpm-compute-global-edpm-compute-global + openstackansibleee_cr: download-cache-edpm-compute-global-edpm-compute-global + openstackdataplanedeployment: edpm-compute-global + openstackdataplanenodeset: edpm-compute-global + openstackdataplaneservice: download-cache + osaee: "true" name: download-cache-edpm-compute-global-edpm-compute-global namespace: openstack-kuttl-tests ownerReferences: @@ -148,46 +203,103 @@ metadata: name: edpm-compute-global spec: backoffLimit: 6 - extraMounts: - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-edpm-compute-global - name: openstackansibleee - restartPolicy: Never - playbook: osp.edpm.download_cache - uid: 1001 + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + creationTimestamp: null + labels: + app: openstackansibleee + batch.kubernetes.io/job-name: download-cache-edpm-compute-global-edpm-compute-global + job-name: download-cache-edpm-compute-global-edpm-compute-global + openstackansibleee_cr: download-cache-edpm-compute-global-edpm-compute-global + openstackdataplanedeployment: edpm-compute-global + openstackdataplanenodeset: edpm-compute-global + openstackdataplaneservice: download-cache + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - osp.edpm.download_cache + - -i + - download-cache-edpm-compute-global-edpm-compute-global + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + - name: RUNNER_PLAYBOOK + value: |2+ + + osp.edpm.download_cache + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: edpm-compute-global + edpm_service_type: download-cache + + + imagePullPolicy: Always + name: download-cache-edpm-compute-global-edpm-compute-global + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: edpm-compute-global + serviceAccountName: edpm-compute-global + terminationGracePeriodSeconds: 30 + volumes: + - name: ssh-key + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-global status: - JobStatus: Succeeded conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady + - status: "True" + type: Complete + ready: 0 + succeeded: 1 + terminating: 0 + uncountedTerminatedPods: {} --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: + generation: 1 + labels: + app: openstackansibleee + job-name: bootstrap-edpm-compute-global-edpm-compute-global + openstackansibleee_cr: bootstrap-edpm-compute-global-edpm-compute-global + openstackdataplanedeployment: edpm-compute-global + openstackdataplanenodeset: edpm-compute-global + openstackdataplaneservice: bootstrap + osaee: "true" name: bootstrap-edpm-compute-global-edpm-compute-global namespace: openstack-kuttl-tests ownerReferences: @@ -198,47 +310,104 @@ metadata: name: edpm-compute-global spec: backoffLimit: 6 - extraMounts: - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-edpm-compute-global - name: openstackansibleee - restartPolicy: Never - playbook: osp.edpm.bootstrap - uid: 1001 + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + creationTimestamp: null + labels: + app: openstackansibleee + batch.kubernetes.io/job-name: bootstrap-edpm-compute-global-edpm-compute-global + job-name: bootstrap-edpm-compute-global-edpm-compute-global + openstackansibleee_cr: bootstrap-edpm-compute-global-edpm-compute-global + openstackdataplanedeployment: edpm-compute-global + openstackdataplanenodeset: edpm-compute-global + openstackdataplaneservice: bootstrap + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - osp.edpm.bootstrap + - -i + - bootstrap-edpm-compute-global-edpm-compute-global + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + - name: RUNNER_PLAYBOOK + value: |2+ + + osp.edpm.bootstrap + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: edpm-compute-global + edpm_service_type: bootstrap + + + imagePullPolicy: Always + name: bootstrap-edpm-compute-global-edpm-compute-global + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: edpm-compute-global + serviceAccountName: edpm-compute-global + terminationGracePeriodSeconds: 30 + volumes: + - name: ssh-key + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-global status: - JobStatus: Succeeded conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady + - status: "True" + type: Complete + ready: 0 + succeeded: 1 + terminating: 0 + uncountedTerminatedPods: {} --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: + generation: 1 + labels: + app: openstackansibleee + job-name: configure-network-edpm-compute-global-edpm-compute-global + openstackansibleee_cr: configure-network-edpm-compute-global-edpm-compute-global + openstackdataplanedeployment: edpm-compute-global + openstackdataplanenodeset: edpm-compute-global + openstackdataplaneservice: configure-network + osaee: "true" name: configure-network-edpm-compute-global-edpm-compute-global namespace: openstack-kuttl-tests ownerReferences: @@ -249,46 +418,104 @@ metadata: name: edpm-compute-global spec: backoffLimit: 6 - extraMounts: - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-edpm-compute-global - name: openstackansibleee - restartPolicy: Never - playbook: osp.edpm.configure_network - uid: 1001 + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + creationTimestamp: null + labels: + app: openstackansibleee + batch.kubernetes.io/job-name: configure-network-edpm-compute-global-edpm-compute-global + job-name: configure-network-edpm-compute-global-edpm-compute-global + openstackansibleee_cr: configure-network-edpm-compute-global-edpm-compute-global + openstackdataplanedeployment: edpm-compute-global + openstackdataplanenodeset: edpm-compute-global + openstackdataplaneservice: configure-network + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - osp.edpm.configure_network + - -i + - configure-network-edpm-compute-global-edpm-compute-global + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + - name: RUNNER_PLAYBOOK + value: |2+ + + osp.edpm.configure_network + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: edpm-compute-global + edpm_service_type: configure-network + + + imagePullPolicy: Always + name: configure-network-edpm-compute-global-edpm-compute-global + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: edpm-compute-global + serviceAccountName: edpm-compute-global + terminationGracePeriodSeconds: 30 + volumes: + - name: ssh-key + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-global status: - JobStatus: Succeeded conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady + - status: "True" + type: Complete + ready: 0 + succeeded: 1 + terminating: 0 + uncountedTerminatedPods: {} + --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: + generation: 1 + labels: + app: openstackansibleee + job-name: validate-network-edpm-compute-global-edpm-compute-global + openstackansibleee_cr: validate-network-edpm-compute-global-edpm-compute-global + openstackdataplanedeployment: edpm-compute-global + openstackdataplanenodeset: edpm-compute-global + openstackdataplaneservice: validate-network + osaee: "true" name: validate-network-edpm-compute-global-edpm-compute-global namespace: openstack-kuttl-tests ownerReferences: @@ -299,97 +526,104 @@ metadata: name: edpm-compute-global spec: backoffLimit: 6 - extraMounts: - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-edpm-compute-global - name: openstackansibleee - restartPolicy: Never - playbook: osp.edpm.validate_network - uid: 1001 -status: - JobStatus: Succeeded - conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady ---- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE -metadata: - name: install-os-edpm-compute-global-edpm-compute-global - namespace: openstack-kuttl-tests - ownerReferences: - - apiVersion: dataplane.openstack.org/v1beta1 - blockOwnerDeletion: true - controller: true - kind: OpenStackDataPlaneDeployment - name: edpm-compute-global -spec: - backoffLimit: 6 - extraMounts: - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-edpm-compute-global - name: openstackansibleee - restartPolicy: Never - playbook: osp.edpm.install_os - uid: 1001 + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + creationTimestamp: null + labels: + app: openstackansibleee + batch.kubernetes.io/job-name: validate-network-edpm-compute-global-edpm-compute-global + job-name: validate-network-edpm-compute-global-edpm-compute-global + openstackansibleee_cr: validate-network-edpm-compute-global-edpm-compute-global + openstackdataplanedeployment: edpm-compute-global + openstackdataplanenodeset: edpm-compute-global + openstackdataplaneservice: validate-network + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - osp.edpm.validate_network + - -i + - validate-network-edpm-compute-global-edpm-compute-global + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + - name: RUNNER_PLAYBOOK + value: |2+ + + osp.edpm.validate_network + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: edpm-compute-global + edpm_service_type: validate-network + + + imagePullPolicy: Always + name: validate-network-edpm-compute-global-edpm-compute-global + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: edpm-compute-global + serviceAccountName: edpm-compute-global + terminationGracePeriodSeconds: 30 + volumes: + - name: ssh-key + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-global status: - JobStatus: Succeeded conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady + - status: "True" + type: Complete + ready: 0 + succeeded: 1 + terminating: 0 + uncountedTerminatedPods: {} + --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: generation: 1 + labels: + app: openstackansibleee + job-name: configure-os-edpm-compute-global-edpm-compute-global + openstackansibleee_cr: configure-os-edpm-compute-global-edpm-compute-global + openstackdataplanedeployment: edpm-compute-global + openstackdataplanenodeset: edpm-compute-global + openstackdataplaneservice: configure-os + osaee: "true" name: configure-os-edpm-compute-global-edpm-compute-global namespace: openstack-kuttl-tests ownerReferences: @@ -400,96 +634,104 @@ metadata: name: edpm-compute-global spec: backoffLimit: 6 - extraMounts: - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-edpm-compute-global - name: openstackansibleee - restartPolicy: Never - playbook: osp.edpm.configure_os - uid: 1001 -status: - JobStatus: Succeeded - conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady ---- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE -metadata: - name: run-os-edpm-compute-global-edpm-compute-global - namespace: openstack-kuttl-tests - ownerReferences: - - apiVersion: dataplane.openstack.org/v1beta1 - blockOwnerDeletion: true - controller: true - kind: OpenStackDataPlaneDeployment - name: edpm-compute-global -spec: - backoffLimit: 6 - extraMounts: - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-edpm-compute-global - name: openstackansibleee - restartPolicy: Never - playbook: osp.edpm.run_os - uid: 1001 + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + creationTimestamp: null + labels: + app: openstackansibleee + batch.kubernetes.io/job-name: configure-os-edpm-compute-global-edpm-compute-global + job-name: configure-os-edpm-compute-global-edpm-compute-global + openstackansibleee_cr: configure-os-edpm-compute-global-edpm-compute-global + openstackdataplanedeployment: edpm-compute-global + openstackdataplanenodeset: edpm-compute-global + openstackdataplaneservice: configure-os + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - osp.edpm.configure_os + - -i + - configure-os-edpm-compute-global-edpm-compute-global + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + - name: RUNNER_PLAYBOOK + value: |2+ + + osp.edpm.configure_os + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: edpm-compute-global + edpm_service_type: configure-os + + + imagePullPolicy: Always + name: configure-os-edpm-compute-global-edpm-compute-global + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: edpm-compute-global + serviceAccountName: edpm-compute-global + terminationGracePeriodSeconds: 30 + volumes: + - name: ssh-key + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-global status: - JobStatus: Succeeded conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady + - status: "True" + type: Complete + ready: 0 + succeeded: 1 + terminating: 0 + uncountedTerminatedPods: {} + --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: + generation: 1 + labels: + app: openstackansibleee + job-name: install-certs-edpm-compute-global-edpm-compute-global + openstackansibleee_cr: install-certs-edpm-compute-global-edpm-compute-global + openstackdataplanedeployment: edpm-compute-global + openstackdataplanenodeset: edpm-compute-global + openstackdataplaneservice: install-certs + osaee: "true" name: install-certs-edpm-compute-global-edpm-compute-global namespace: openstack-kuttl-tests ownerReferences: @@ -500,47 +742,95 @@ metadata: name: edpm-compute-global spec: backoffLimit: 6 - extraMounts: - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-edpm-compute-global - name: openstackansibleee - restartPolicy: Never - playbook: osp.edpm.install_certs - uid: 1001 -status: - JobStatus: Succeeded - conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + creationTimestamp: null + labels: + app: openstackansibleee + batch.kubernetes.io/job-name: install-certs-edpm-compute-global-edpm-compute-global + job-name: install-certs-edpm-compute-global-edpm-compute-global + openstackansibleee_cr: install-certs-edpm-compute-global-edpm-compute-global + openstackdataplanedeployment: edpm-compute-global + openstackdataplanenodeset: edpm-compute-global + openstackdataplaneservice: install-certs + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - osp.edpm.install_certs + - -i + - install-certs-edpm-compute-global-edpm-compute-global + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + - name: RUNNER_PLAYBOOK + value: |2+ + + osp.edpm.install_certs + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: edpm-compute-global + edpm_service_type: install-certs + + + imagePullPolicy: Always + name: install-certs-edpm-compute-global-edpm-compute-global + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: edpm-compute-global + serviceAccountName: edpm-compute-global + terminationGracePeriodSeconds: 30 + volumes: + - name: ssh-key + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-global --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: generation: 1 + labels: + app: openstackansibleee + job-name: ovn-edpm-compute-global-edpm-compute-global + openstackansibleee_cr: ovn-edpm-compute-global-edpm-compute-global + openstackdataplanedeployment: edpm-compute-global + openstackdataplanenodeset: edpm-compute-global + openstackdataplaneservice: ovn + osaee: "true" name: ovn-edpm-compute-global-edpm-compute-global namespace: openstack-kuttl-tests ownerReferences: @@ -551,58 +841,105 @@ metadata: name: edpm-compute-global spec: backoffLimit: 6 - extraMounts: - - mounts: - - mountPath: /var/lib/openstack/configs/ovn/ovsdb-config - name: ovncontroller-config-0 - subPath: ovsdb-config - volumes: - - configMap: - items: - - key: ovsdb-config - path: ovsdb-config - name: ovncontroller-config - name: ovncontroller-config-0 - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-edpm-compute-global - name: openstackansibleee - restartPolicy: Never - playbook: osp.edpm.ovn - uid: 1001 -status: - JobStatus: Succeeded - conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + creationTimestamp: null + labels: + app: openstackansibleee + batch.kubernetes.io/job-name: ovn-edpm-compute-global-edpm-compute-global + job-name: ovn-edpm-compute-global-edpm-compute-global + openstackansibleee_cr: ovn-edpm-compute-global-edpm-compute-global + openstackdataplanedeployment: edpm-compute-global + openstackdataplanenodeset: edpm-compute-global + openstackdataplaneservice: ovn + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - osp.edpm.ovn + - -i + - ovn-edpm-compute-global-edpm-compute-global + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + - name: RUNNER_PLAYBOOK + value: |2+ + + osp.edpm.ovn + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: edpm-compute-global + edpm_service_type: ovn + + + imagePullPolicy: Always + name: ovn-edpm-compute-global-edpm-compute-global + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/lib/openstack/configs/ovn/ovsdb-config + name: ovncontroller-config-0 + subPath: ovsdb-config + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: edpm-compute-global + serviceAccountName: edpm-compute-global + terminationGracePeriodSeconds: 30 + volumes: + - configMap: + defaultMode: 420 + items: + - key: ovsdb-config + path: ovsdb-config + name: ovncontroller-config + name: ovncontroller-config-0 + - name: ssh-key + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-global --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: generation: 1 + labels: + app: openstackansibleee + job-name: neutron-metadata-edpm-compute-global-edpm-compute-global + openstackansibleee_cr: neutron-metadata-edpm-compute-global-edpm-compute-global + openstackdataplanedeployment: edpm-compute-global + openstackdataplanenodeset: edpm-compute-global + openstackdataplaneservice: neutron-metadata + osaee: "true" name: neutron-metadata-edpm-compute-global-edpm-compute-global namespace: openstack-kuttl-tests ownerReferences: @@ -613,87 +950,135 @@ metadata: name: edpm-compute-global spec: backoffLimit: 6 - extraMounts: - - mounts: - - mountPath: /var/lib/openstack/configs/neutron-metadata/10-neutron-metadata.conf - name: neutron-ovn-metadata-agent-neutron-config-0 - subPath: 10-neutron-metadata.conf - volumes: - - secret: - items: - - key: 10-neutron-metadata.conf - path: 10-neutron-metadata.conf - secretName: neutron-ovn-metadata-agent-neutron-config - name: neutron-ovn-metadata-agent-neutron-config-0 - - mounts: - - mountPath: /var/lib/openstack/configs/neutron-metadata/05-nova-metadata.conf - name: nova-metadata-neutron-config-0 - subPath: 05-nova-metadata.conf - - mountPath: /var/lib/openstack/configs/neutron-metadata/httpd.conf - name: nova-metadata-neutron-config-1 - subPath: httpd.conf - - mountPath: /var/lib/openstack/configs/neutron-metadata/nova-metadata-config.json - name: nova-metadata-neutron-config-2 - subPath: nova-metadata-config.json - volumes: - - secret: - items: - - key: 05-nova-metadata.conf - path: 05-nova-metadata.conf - secretName: nova-metadata-neutron-config - name: nova-metadata-neutron-config-0 - - name: nova-metadata-neutron-config-1 - secret: - items: - - key: httpd.conf - path: httpd.conf - secretName: nova-metadata-neutron-config - - name: nova-metadata-neutron-config-2 - secret: - items: - - key: nova-metadata-config.json - path: nova-metadata-config.json - secretName: nova-metadata-neutron-config - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-edpm-compute-global - name: openstackansibleee - restartPolicy: Never - playbook: osp.edpm.neutron_metadata - uid: 1001 -status: - JobStatus: Succeeded - conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + creationTimestamp: null + labels: + app: openstackansibleee + batch.kubernetes.io/job-name: neutron-metadata-edpm-compute-global-edpm-compute-global + job-name: neutron-metadata-edpm-compute-global-edpm-compute-global + openstackansibleee_cr: neutron-metadata-edpm-compute-global-edpm-compute-global + openstackdataplanedeployment: edpm-compute-global + openstackdataplanenodeset: edpm-compute-global + openstackdataplaneservice: neutron-metadata + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - osp.edpm.neutron_metadata + - -i + - neutron-metadata-edpm-compute-global-edpm-compute-global + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + - name: RUNNER_PLAYBOOK + value: |2+ + + osp.edpm.neutron_metadata + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: edpm-compute-global + edpm_service_type: neutron-metadata + + + imagePullPolicy: Always + name: neutron-metadata-edpm-compute-global-edpm-compute-global + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/lib/openstack/configs/neutron-metadata/10-neutron-metadata.conf + name: neutron-ovn-metadata-agent-neutron-config-0 + subPath: 10-neutron-metadata.conf + - mountPath: /var/lib/openstack/configs/neutron-metadata/05-nova-metadata.conf + name: nova-metadata-neutron-config-0 + subPath: 05-nova-metadata.conf + - mountPath: /var/lib/openstack/configs/neutron-metadata/httpd.conf + name: nova-metadata-neutron-config-1 + subPath: httpd.conf + - mountPath: /var/lib/openstack/configs/neutron-metadata/nova-metadata-config.json + name: nova-metadata-neutron-config-2 + subPath: nova-metadata-config.json + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: edpm-compute-global + serviceAccountName: edpm-compute-global + terminationGracePeriodSeconds: 30 + volumes: + - name: neutron-ovn-metadata-agent-neutron-config-0 + secret: + defaultMode: 420 + items: + - key: 10-neutron-metadata.conf + path: 10-neutron-metadata.conf + secretName: neutron-ovn-metadata-agent-neutron-config + - name: nova-metadata-neutron-config-0 + secret: + defaultMode: 420 + items: + - key: 05-nova-metadata.conf + path: 05-nova-metadata.conf + secretName: nova-metadata-neutron-config + - name: nova-metadata-neutron-config-1 + secret: + defaultMode: 420 + items: + - key: httpd.conf + path: httpd.conf + secretName: nova-metadata-neutron-config + - name: nova-metadata-neutron-config-2 + secret: + defaultMode: 420 + items: + - key: nova-metadata-config.json + path: nova-metadata-config.json + secretName: nova-metadata-neutron-config + - name: ssh-key + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-global --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: generation: 1 + labels: + app: openstackansibleee + job-name: neutron-ovn-edpm-compute-global-edpm-compute-global + openstackansibleee_cr: neutron-ovn-edpm-compute-global-edpm-compute-global + openstackdataplanedeployment: edpm-compute-global + openstackdataplanenodeset: edpm-compute-global + openstackdataplaneservice: neutron-ovn + osaee: "true" name: neutron-ovn-edpm-compute-global-edpm-compute-global namespace: openstack-kuttl-tests ownerReferences: @@ -704,58 +1089,105 @@ metadata: name: edpm-compute-global spec: backoffLimit: 6 - extraMounts: - - mounts: - - mountPath: /var/lib/openstack/configs/neutron-ovn/10-neutron-ovn.conf - name: neutron-ovn-agent-neutron-config-0 - subPath: 10-neutron-ovn.conf - volumes: - - secret: - items: - - key: 10-neutron-ovn.conf - path: 10-neutron-ovn.conf - secretName: neutron-ovn-agent-neutron-config - name: neutron-ovn-agent-neutron-config-0 - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-edpm-compute-global - name: openstackansibleee - restartPolicy: Never - playbook: osp.edpm.neutron_ovn - uid: 1001 -status: - JobStatus: Succeeded - conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + creationTimestamp: null + labels: + app: openstackansibleee + batch.kubernetes.io/job-name: neutron-ovn-edpm-compute-global-edpm-compute-global + job-name: neutron-ovn-edpm-compute-global-edpm-compute-global + openstackansibleee_cr: neutron-ovn-edpm-compute-global-edpm-compute-global + openstackdataplanedeployment: edpm-compute-global + openstackdataplanenodeset: edpm-compute-global + openstackdataplaneservice: neutron-ovn + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - osp.edpm.neutron_ovn + - -i + - neutron-ovn-edpm-compute-global-edpm-compute-global + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + - name: RUNNER_PLAYBOOK + value: |2+ + + osp.edpm.neutron_ovn + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: edpm-compute-global + edpm_service_type: neutron-ovn + + + imagePullPolicy: Always + name: neutron-ovn-edpm-compute-global-edpm-compute-global + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/lib/openstack/configs/neutron-ovn/10-neutron-ovn.conf + name: neutron-ovn-agent-neutron-config-0 + subPath: 10-neutron-ovn.conf + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: edpm-compute-global + serviceAccountName: edpm-compute-global + terminationGracePeriodSeconds: 30 + volumes: + - name: neutron-ovn-agent-neutron-config-0 + secret: + defaultMode: 420 + items: + - key: 10-neutron-ovn.conf + path: 10-neutron-ovn.conf + secretName: neutron-ovn-agent-neutron-config + - name: ssh-key + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-global --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: generation: 1 + labels: + app: openstackansibleee + job-name: neutron-sriov-edpm-compute-global-edpm-compute-global + openstackansibleee_cr: neutron-sriov-edpm-compute-global-edpm-compute-global + openstackdataplanedeployment: edpm-compute-global + openstackdataplanenodeset: edpm-compute-global + openstackdataplaneservice: neutron-sriov + osaee: "true" name: neutron-sriov-edpm-compute-global-edpm-compute-global namespace: openstack-kuttl-tests ownerReferences: @@ -766,58 +1198,105 @@ metadata: name: edpm-compute-global spec: backoffLimit: 6 - extraMounts: - - mounts: - - mountPath: /var/lib/openstack/configs/neutron-sriov/10-neutron-sriov.conf - name: neutron-sriov-agent-neutron-config-0 - subPath: 10-neutron-sriov.conf - volumes: - - secret: - items: - - key: 10-neutron-sriov.conf - path: 10-neutron-sriov.conf - secretName: neutron-sriov-agent-neutron-config - name: neutron-sriov-agent-neutron-config-0 - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-edpm-compute-global - name: openstackansibleee - restartPolicy: Never - playbook: osp.edpm.neutron_sriov - uid: 1001 -status: - JobStatus: Succeeded - conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + creationTimestamp: null + labels: + app: openstackansibleee + batch.kubernetes.io/job-name: neutron-sriov-edpm-compute-global-edpm-compute-global + job-name: neutron-sriov-edpm-compute-global-edpm-compute-global + openstackansibleee_cr: neutron-sriov-edpm-compute-global-edpm-compute-global + openstackdataplanedeployment: edpm-compute-global + openstackdataplanenodeset: edpm-compute-global + openstackdataplaneservice: neutron-sriov + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - osp.edpm.neutron_sriov + - -i + - neutron-sriov-edpm-compute-global-edpm-compute-global + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + - name: RUNNER_PLAYBOOK + value: |2+ + + osp.edpm.neutron_sriov + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: edpm-compute-global + edpm_service_type: neutron-sriov + + + imagePullPolicy: Always + name: neutron-sriov-edpm-compute-global-edpm-compute-global + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/lib/openstack/configs/neutron-sriov/10-neutron-sriov.conf + name: neutron-sriov-agent-neutron-config-0 + subPath: 10-neutron-sriov.conf + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: edpm-compute-global + serviceAccountName: edpm-compute-global + terminationGracePeriodSeconds: 30 + volumes: + - name: neutron-sriov-agent-neutron-config-0 + secret: + defaultMode: 420 + items: + - key: 10-neutron-sriov.conf + path: 10-neutron-sriov.conf + secretName: neutron-sriov-agent-neutron-config + - name: ssh-key + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-global --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: generation: 1 + labels: + app: openstackansibleee + job-name: neutron-dhcp-edpm-compute-global-edpm-compute-global + openstackansibleee_cr: neutron-dhcp-edpm-compute-global-edpm-compute-global + openstackdataplanedeployment: edpm-compute-global + openstackdataplanenodeset: edpm-compute-global + openstackdataplaneservice: neutron-dhcp + osaee: "true" name: neutron-dhcp-edpm-compute-global-edpm-compute-global namespace: openstack-kuttl-tests ownerReferences: @@ -828,57 +1307,105 @@ metadata: name: edpm-compute-global spec: backoffLimit: 6 - extraMounts: - - mounts: - - mountPath: /var/lib/openstack/configs/neutron-dhcp/10-neutron-dhcp.conf - name: neutron-dhcp-agent-neutron-config-0 - subPath: 10-neutron-dhcp.conf - volumes: - - secret: - items: - - key: 10-neutron-dhcp.conf - path: 10-neutron-dhcp.conf - secretName: neutron-dhcp-agent-neutron-config - name: neutron-dhcp-agent-neutron-config-0 - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-edpm-compute-global - name: openstackansibleee - restartPolicy: Never - playbook: osp.edpm.neutron_dhcp - uid: 1001 -status: - JobStatus: Succeeded - conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + creationTimestamp: null + labels: + app: openstackansibleee + batch.kubernetes.io/job-name: neutron-dhcp-edpm-compute-global-edpm-compute-global + job-name: neutron-dhcp-edpm-compute-global-edpm-compute-global + openstackansibleee_cr: neutron-dhcp-edpm-compute-global-edpm-compute-global + openstackdataplanedeployment: edpm-compute-global + openstackdataplanenodeset: edpm-compute-global + openstackdataplaneservice: neutron-dhcp + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - osp.edpm.neutron_dhcp + - -i + - neutron-dhcp-edpm-compute-global-edpm-compute-global + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + - name: RUNNER_PLAYBOOK + value: |2+ + + osp.edpm.neutron_dhcp + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: edpm-compute-global + edpm_service_type: neutron-dhcp + + + imagePullPolicy: Always + name: neutron-dhcp-edpm-compute-global-edpm-compute-global + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/lib/openstack/configs/neutron-dhcp/10-neutron-dhcp.conf + name: neutron-dhcp-agent-neutron-config-0 + subPath: 10-neutron-dhcp.conf + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: edpm-compute-global + serviceAccountName: edpm-compute-global + terminationGracePeriodSeconds: 30 + volumes: + - name: neutron-dhcp-agent-neutron-config-0 + secret: + defaultMode: 420 + items: + - key: 10-neutron-dhcp.conf + path: 10-neutron-dhcp.conf + secretName: neutron-dhcp-agent-neutron-config + - name: ssh-key + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-global --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: + generation: 1 + labels: + app: openstackansibleee + job-name: libvirt-edpm-compute-global-edpm-compute-global + openstackansibleee_cr: libvirt-edpm-compute-global-edpm-compute-global + openstackdataplanedeployment: edpm-compute-global + openstackdataplanenodeset: edpm-compute-global + openstackdataplaneservice: libvirt + osaee: "true" name: libvirt-edpm-compute-global-edpm-compute-global namespace: openstack-kuttl-tests ownerReferences: @@ -889,138 +1416,228 @@ metadata: name: edpm-compute-global spec: backoffLimit: 6 - envConfigMapName: openstack-aee-default-env - extraMounts: - - mounts: - - mountPath: /var/lib/openstack/configs/libvirt/LibvirtPassword - name: libvirt-secret-0 - subPath: LibvirtPassword - volumes: - - name: libvirt-secret-0 - secret: - items: - - key: LibvirtPassword - path: LibvirtPassword - secretName: libvirt-secret - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-edpm-compute-global - name: openstackansibleee - preserveJobs: true - restartPolicy: Never - playbook: osp.edpm.libvirt - uid: 1001 -status: - JobStatus: Succeeded - conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + creationTimestamp: null + labels: + app: openstackansibleee + batch.kubernetes.io/job-name: libvirt-edpm-compute-global-edpm-compute-global + job-name: libvirt-edpm-compute-global-edpm-compute-global + openstackansibleee_cr: libvirt-edpm-compute-global-edpm-compute-global + openstackdataplanedeployment: edpm-compute-global + openstackdataplanenodeset: edpm-compute-global + openstackdataplaneservice: libvirt + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - osp.edpm.libvirt + - -i + - libvirt-edpm-compute-global-edpm-compute-global + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + - name: RUNNER_PLAYBOOK + value: |2+ + + osp.edpm.libvirt + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: edpm-compute-global + edpm_service_type: libvirt + + + imagePullPolicy: Always + name: libvirt-edpm-compute-global-edpm-compute-global + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/lib/openstack/configs/libvirt/LibvirtPassword + name: libvirt-secret-0 + subPath: LibvirtPassword + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: edpm-compute-global + serviceAccountName: edpm-compute-global + terminationGracePeriodSeconds: 30 + volumes: + - name: libvirt-secret-0 + secret: + defaultMode: 420 + items: + - key: LibvirtPassword + path: LibvirtPassword + secretName: libvirt-secret + - name: ssh-key + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-global --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: + generation: 1 + labels: + app: openstackansibleee + job-name: nova-edpm-compute-global-edpm-compute-global + openstackansibleee_cr: nova-edpm-compute-global-edpm-compute-global + openstackdataplanedeployment: edpm-compute-global + openstackdataplanenodeset: edpm-compute-global + openstackdataplaneservice: nova + osaee: "true" name: nova-edpm-compute-global-edpm-compute-global namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: edpm-compute-global spec: backoffLimit: 6 - envConfigMapName: openstack-aee-default-env - extraMounts: - - mounts: - - mountPath: /var/lib/openstack/configs/nova/01-nova.conf - name: nova-cell1-compute-config-0 - subPath: 01-nova.conf - - mountPath: /var/lib/openstack/configs/nova/nova-blank.conf - name: nova-cell1-compute-config-1 - subPath: nova-blank.conf - volumes: - - name: nova-cell1-compute-config-0 - secret: - items: - - key: 01-nova.conf - path: 01-nova.conf - secretName: nova-cell1-compute-config - - name: nova-cell1-compute-config-1 - secret: - items: - - key: nova-blank.conf - path: nova-blank.conf - secretName: nova-cell1-compute-config - - mounts: - - mountPath: /var/lib/openstack/configs/nova/ssh-privatekey - name: nova-migration-ssh-key-0 - subPath: ssh-privatekey - - mountPath: /var/lib/openstack/configs/nova/ssh-publickey - name: nova-migration-ssh-key-1 - subPath: ssh-publickey - volumes: - - name: nova-migration-ssh-key-0 - secret: - items: - - key: ssh-privatekey - path: ssh-privatekey - secretName: nova-migration-ssh-key - - name: nova-migration-ssh-key-1 - secret: - items: - - key: ssh-publickey - path: ssh-publickey - secretName: nova-migration-ssh-key - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-edpm-compute-global - name: openstackansibleee - preserveJobs: true - restartPolicy: Never - playbook: osp.edpm.nova - uid: 1001 -status: - JobStatus: Succeeded - conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + creationTimestamp: null + labels: + app: openstackansibleee + batch.kubernetes.io/job-name: nova-edpm-compute-global-edpm-compute-global + job-name: nova-edpm-compute-global-edpm-compute-global + openstackansibleee_cr: nova-edpm-compute-global-edpm-compute-global + openstackdataplanedeployment: edpm-compute-global + openstackdataplanenodeset: edpm-compute-global + openstackdataplaneservice: nova + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - osp.edpm.nova + - -i + - nova-edpm-compute-global-edpm-compute-global + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + - name: RUNNER_PLAYBOOK + value: |2+ + + osp.edpm.nova + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: edpm-compute-global + edpm_service_type: nova + + + imagePullPolicy: Always + name: nova-edpm-compute-global-edpm-compute-global + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/lib/openstack/configs/nova/01-nova.conf + name: nova-cell1-compute-config-0 + subPath: 01-nova.conf + - mountPath: /var/lib/openstack/configs/nova/nova-blank.conf + name: nova-cell1-compute-config-1 + subPath: nova-blank.conf + - mountPath: /var/lib/openstack/configs/nova/ssh-privatekey + name: nova-migration-ssh-key-0 + subPath: ssh-privatekey + - mountPath: /var/lib/openstack/configs/nova/ssh-publickey + name: nova-migration-ssh-key-1 + subPath: ssh-publickey + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: edpm-compute-global + serviceAccountName: edpm-compute-global + terminationGracePeriodSeconds: 30 + volumes: + - name: nova-cell1-compute-config-0 + secret: + defaultMode: 420 + items: + - key: 01-nova.conf + path: 01-nova.conf + secretName: nova-cell1-compute-config + - name: nova-cell1-compute-config-1 + secret: + defaultMode: 420 + items: + - key: nova-blank.conf + path: nova-blank.conf + secretName: nova-cell1-compute-config + - name: nova-migration-ssh-key-0 + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh-privatekey + secretName: nova-migration-ssh-key + - name: nova-migration-ssh-key-1 + secret: + defaultMode: 420 + items: + - key: ssh-publickey + path: ssh-publickey + secretName: nova-migration-ssh-key + - name: ssh-key + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-global diff --git a/tests/kuttl/tests/dataplane-deploy-global-service-test/02-assert.yaml b/tests/kuttl/tests/dataplane-deploy-global-service-test/02-assert.yaml index 085adb780..90a7a8440 100644 --- a/tests/kuttl/tests/dataplane-deploy-global-service-test/02-assert.yaml +++ b/tests/kuttl/tests/dataplane-deploy-global-service-test/02-assert.yaml @@ -73,9 +73,17 @@ spec: - edpm-compute-global - edpm-compute-beta-nodeset --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: + labels: + app: openstackansibleee + job-name: download-cache-edpm-multinodeset-edpm-compute-beta-nodeset + openstackansibleee_cr: download-cache-edpm-multinodeset-edpm-compute-beta-nodeset + openstackdataplanedeployment: edpm-multinodeset + openstackdataplanenodeset: edpm-compute-beta-nodeset + openstackdataplaneservice: download-cache + osaee: "true" name: download-cache-edpm-multinodeset-edpm-compute-beta-nodeset namespace: openstack-kuttl-tests ownerReferences: @@ -86,46 +94,102 @@ metadata: name: edpm-multinodeset spec: backoffLimit: 6 - extraMounts: - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-edpm-compute-beta-nodeset - name: openstackansibleee - restartPolicy: Never - playbook: osp.edpm.download_cache - uid: 1001 + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + creationTimestamp: null + labels: + app: openstackansibleee + batch.kubernetes.io/job-name: download-cache-edpm-multinodeset-edpm-compute-beta-nodeset + job-name: download-cache-edpm-multinodeset-edpm-compute-beta-nodeset + openstackansibleee_cr: download-cache-edpm-multinodeset-edpm-compute-beta-nodeset + openstackdataplanedeployment: edpm-multinodeset + openstackdataplanenodeset: edpm-compute-beta-nodeset + openstackdataplaneservice: download-cache + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - osp.edpm.download_cache + - -i + - download-cache-edpm-multinodeset-edpm-compute-beta-nodeset + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + - name: RUNNER_PLAYBOOK + value: |2+ + + osp.edpm.download_cache + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: edpm-compute-beta-nodeset + edpm_service_type: download-cache + + + imagePullPolicy: Always + name: download-cache-edpm-multinodeset-edpm-compute-beta-nodeset + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: edpm-compute-beta-nodeset + serviceAccountName: edpm-compute-beta-nodeset + terminationGracePeriodSeconds: 30 + volumes: + - name: ssh-key + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-beta-nodeset status: - JobStatus: Succeeded conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady + - status: "True" + type: Complete + ready: 0 + succeeded: 1 + terminating: 0 + uncountedTerminatedPods: {} --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: + labels: + app: openstackansibleee + job-name: bootstrap-edpm-multinodeset-edpm-compute-beta-nodeset + openstackansibleee_cr: bootstrap-edpm-multinodeset-edpm-compute-beta-nodeset + openstackdataplanedeployment: edpm-multinodeset + openstackdataplanenodeset: edpm-compute-beta-nodeset + openstackdataplaneservice: bootstrap + osaee: "true" name: bootstrap-edpm-multinodeset-edpm-compute-beta-nodeset namespace: openstack-kuttl-tests ownerReferences: @@ -136,39 +200,87 @@ metadata: name: edpm-multinodeset spec: backoffLimit: 6 - extraMounts: - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-edpm-compute-beta-nodeset - name: openstackansibleee - restartPolicy: Never - playbook: osp.edpm.bootstrap - uid: 1001 + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + creationTimestamp: null + labels: + app: openstackansibleee + batch.kubernetes.io/job-name: bootstrap-edpm-multinodeset-edpm-compute-beta-nodeset + job-name: bootstrap-edpm-multinodeset-edpm-compute-beta-nodeset + openstackansibleee_cr: bootstrap-edpm-multinodeset-edpm-compute-beta-nodeset + openstackdataplanedeployment: edpm-multinodeset + openstackdataplanenodeset: edpm-compute-beta-nodeset + openstackdataplaneservice: bootstrap + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - osp.edpm.bootstrap + - -i + - bootstrap-edpm-multinodeset-edpm-compute-beta-nodeset + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + - name: RUNNER_PLAYBOOK + value: |2+ + + osp.edpm.bootstrap + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: edpm-compute-beta-nodeset + edpm_service_type: bootstrap + + + imagePullPolicy: Always + name: bootstrap-edpm-multinodeset-edpm-compute-beta-nodeset + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: edpm-compute-beta-nodeset + serviceAccountName: edpm-compute-beta-nodeset + terminationGracePeriodSeconds: 30 + volumes: + - name: ssh-key + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-beta-nodeset status: - JobStatus: Succeeded conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady + - status: "True" + type: Complete + ready: 0 + succeeded: 1 + terminating: 0 + uncountedTerminatedPods: {} diff --git a/tests/kuttl/tests/dataplane-deploy-multiple-secrets/02-assert.yaml b/tests/kuttl/tests/dataplane-deploy-multiple-secrets/02-assert.yaml index 124bf2d5a..9c09dade7 100644 --- a/tests/kuttl/tests/dataplane-deploy-multiple-secrets/02-assert.yaml +++ b/tests/kuttl/tests/dataplane-deploy-multiple-secrets/02-assert.yaml @@ -103,9 +103,18 @@ metadata: name: openstack-edpm-tls type: Opaque --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: + generation: 1 + labels: + app: openstackansibleee + job-name: install-certs-ovr-openstack-edpm-tls-openstack-edpm-tls + openstackansibleee_cr: install-certs-ovr-openstack-edpm-tls-openstack-edpm-tls + openstackdataplanedeployment: openstack-edpm-tls + openstackdataplanenodeset: openstack-edpm-tls + openstackdataplaneservice: install-certs-ovr + osaee: "true" name: install-certs-ovr-openstack-edpm-tls-openstack-edpm-tls namespace: openstack-kuttl-tests ownerReferences: @@ -116,105 +125,218 @@ metadata: name: openstack-edpm-tls spec: backoffLimit: 6 - extraMounts: - - mounts: - - mountPath: /var/lib/openstack/certs/generic-service1/default - name: openstack-edpm-tls-generic-service1-default-certs-0 - volumes: - - name: openstack-edpm-tls-generic-service1-default-certs-0 - projected: - sources: - - secret: - name: openstack-edpm-tls-generic-service1-default-certs-0 - - secret: - name: openstack-edpm-tls-generic-service1-default-certs-1 - - secret: - name: openstack-edpm-tls-generic-service1-default-certs-2 - - mounts: - - mountPath: /var/lib/openstack/cacerts/generic-service1 - name: generic-service1-combined-ca-bundle - volumes: - - name: generic-service1-combined-ca-bundle - secret: - secretName: combined-ca-bundle - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-openstack-edpm-tls - name: openstackansibleee - restartPolicy: Never - uid: 1001 + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + labels: + app: openstackansibleee + job-name: install-certs-ovr-openstack-edpm-tls-openstack-edpm-tls + openstackansibleee_cr: install-certs-ovr-openstack-edpm-tls-openstack-edpm-tls + openstackdataplanedeployment: openstack-edpm-tls + openstackdataplanenodeset: openstack-edpm-tls + openstackdataplaneservice: install-certs-ovr + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - playbook.yaml + - -i + - install-certs-ovr-openstack-edpm-tls-openstack-edpm-tls + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + - name: RUNNER_PLAYBOOK + value: |2+ + + - hosts: localhost + gather_facts: no + name: kuttl play + tasks: + - name: Sleep + command: sleep 1 + delegate_to: localhost + + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: openstack-edpm-tls + edpm_service_type: install-certs-ovr + + + imagePullPolicy: Always + name: install-certs-ovr-openstack-edpm-tls-openstack-edpm-tls + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/lib/openstack/certs/generic-service1/default + name: openstack-edpm-tls-generic-service1-default-certs-0 + - mountPath: /var/lib/openstack/cacerts/generic-service1 + name: generic-service1-combined-ca-bundle + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: openstack-edpm-tls + serviceAccountName: openstack-edpm-tls + terminationGracePeriodSeconds: 30 + volumes: + - name: openstack-edpm-tls-generic-service1-default-certs-0 + projected: + defaultMode: 420 + sources: + - secret: + name: openstack-edpm-tls-generic-service1-default-certs-0 + - secret: + name: openstack-edpm-tls-generic-service1-default-certs-1 + - secret: + name: openstack-edpm-tls-generic-service1-default-certs-2 + - name: generic-service1-combined-ca-bundle + secret: + defaultMode: 420 + secretName: combined-ca-bundle + - name: ssh-key + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-openstack-edpm-tls status: - JobStatus: Succeeded conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady + - status: "True" + type: Complete + succeeded: 1 + uncountedTerminatedPods: {} --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: + generation: 1 + labels: + app: openstackansibleee + job-name: generic-service1-openstack-edpm-tls-openstack-edpm-tls + openstackansibleee_cr: generic-service1-openstack-edpm-tls-openstack-edpm-tls + openstackdataplanedeployment: openstack-edpm-tls + openstackdataplanenodeset: openstack-edpm-tls + openstackdataplaneservice: generic-service1 + osaee: "true" name: generic-service1-openstack-edpm-tls-openstack-edpm-tls namespace: openstack-kuttl-tests ownerReferences: - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true kind: OpenStackDataPlaneDeployment name: openstack-edpm-tls spec: backoffLimit: 6 - extraMounts: - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-openstack-edpm-tls - name: openstackansibleee - restartPolicy: Never - uid: 1001 + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + labels: + app: openstackansibleee + job-name: generic-service1-openstack-edpm-tls-openstack-edpm-tls + openstackansibleee_cr: generic-service1-openstack-edpm-tls-openstack-edpm-tls + openstackdataplanedeployment: openstack-edpm-tls + openstackdataplanenodeset: openstack-edpm-tls + openstackdataplaneservice: generic-service1 + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - playbook.yaml + - -i + - generic-service1-openstack-edpm-tls-openstack-edpm-tls + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + - name: RUNNER_PLAYBOOK + value: |2+ + + - hosts: localhost + gather_facts: no + name: kuttl play + tasks: + - name: Sleep + command: sleep 1 + delegate_to: localhost + + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: openstack-edpm-tls + edpm_service_type: generic-service1 + + + imagePullPolicy: Always + name: generic-service1-openstack-edpm-tls-openstack-edpm-tls + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: openstack-edpm-tls + serviceAccountName: openstack-edpm-tls + terminationGracePeriodSeconds: 30 + volumes: + - name: ssh-key + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-openstack-edpm-tls status: - JobStatus: Succeeded conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady + - status: "True" + type: Complete + succeeded: 1 + uncountedTerminatedPods: {} diff --git a/tests/kuttl/tests/dataplane-deploy-no-nodes-test/01-assert.yaml b/tests/kuttl/tests/dataplane-deploy-no-nodes-test/01-assert.yaml index 1c79d6d8e..7633f7f79 100644 --- a/tests/kuttl/tests/dataplane-deploy-no-nodes-test/01-assert.yaml +++ b/tests/kuttl/tests/dataplane-deploy-no-nodes-test/01-assert.yaml @@ -66,9 +66,18 @@ status: status: "True" type: SetupReady --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: + generation: 1 + labels: + app: openstackansibleee + job-name: download-cache-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackansibleee_cr: download-cache-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackdataplanedeployment: edpm-compute-no-nodes + openstackdataplanenodeset: edpm-compute-no-nodes + openstackdataplaneservice: download-cache + osaee: "true" name: download-cache-edpm-compute-no-nodes-edpm-compute-no-nodes namespace: openstack-kuttl-tests ownerReferences: @@ -79,46 +88,104 @@ metadata: name: edpm-compute-no-nodes spec: backoffLimit: 6 - extraMounts: - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-edpm-compute-no-nodes - name: openstackansibleee - restartPolicy: Never - playbook: osp.edpm.download_cache - uid: 1001 + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + creationTimestamp: null + labels: + app: openstackansibleee + batch.kubernetes.io/job-name: download-cache-edpm-compute-no-nodes-edpm-compute-no-nodes + job-name: download-cache-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackansibleee_cr: download-cache-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackdataplanedeployment: edpm-compute-no-nodes + openstackdataplanenodeset: edpm-compute-no-nodes + openstackdataplaneservice: download-cache + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - osp.edpm.download_cache + - -i + - download-cache-edpm-compute-no-nodes-edpm-compute-no-nodes + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + - name: RUNNER_PLAYBOOK + value: |2+ + + osp.edpm.download_cache + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: edpm-compute-no-nodes + edpm_service_type: download-cache + foo: bar + + + imagePullPolicy: Always + name: download-cache-edpm-compute-no-nodes-edpm-compute-no-nodes + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: edpm-compute-no-nodes + serviceAccountName: edpm-compute-no-nodes + terminationGracePeriodSeconds: 30 + volumes: + - name: ssh-key + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-no-nodes status: - JobStatus: Succeeded conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady + - status: "True" + type: Complete + ready: 0 + succeeded: 1 + terminating: 0 + uncountedTerminatedPods: {} --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: + generation: 1 + labels: + app: openstackansibleee + job-name: bootstrap-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackansibleee_cr: bootstrap-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackdataplanedeployment: edpm-compute-no-nodes + openstackdataplanenodeset: edpm-compute-no-nodes + openstackdataplaneservice: bootstrap + osaee: "true" name: bootstrap-edpm-compute-no-nodes-edpm-compute-no-nodes namespace: openstack-kuttl-tests ownerReferences: @@ -129,49 +196,105 @@ metadata: name: edpm-compute-no-nodes spec: backoffLimit: 6 - extraVars: - foo: bar - extraMounts: - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-edpm-compute-no-nodes - name: openstackansibleee - restartPolicy: Never - playbook: osp.edpm.bootstrap - uid: 1001 + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + creationTimestamp: null + labels: + app: openstackansibleee + batch.kubernetes.io/job-name: bootstrap-edpm-compute-no-nodes-edpm-compute-no-nodes + job-name: bootstrap-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackansibleee_cr: bootstrap-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackdataplanedeployment: edpm-compute-no-nodes + openstackdataplanenodeset: edpm-compute-no-nodes + openstackdataplaneservice: bootstrap + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - osp.edpm.bootstrap + - -i + - bootstrap-edpm-compute-no-nodes-edpm-compute-no-nodes + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + - name: RUNNER_PLAYBOOK + value: |2+ + + osp.edpm.bootstrap + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: edpm-compute-no-nodes + edpm_service_type: bootstrap + foo: bar + + + imagePullPolicy: Always + name: bootstrap-edpm-compute-no-nodes-edpm-compute-no-nodes + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: edpm-compute-no-nodes + serviceAccountName: edpm-compute-no-nodes + terminationGracePeriodSeconds: 30 + volumes: + - name: ssh-key + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-no-nodes status: - JobStatus: Succeeded conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady + - status: "True" + type: Complete + ready: 0 + succeeded: 1 + terminating: 0 + uncountedTerminatedPods: {} --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: + generation: 1 + labels: + app: openstackansibleee + job-name: configure-network-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackansibleee_cr: configure-network-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackdataplanedeployment: edpm-compute-no-nodes + openstackdataplanenodeset: edpm-compute-no-nodes + openstackdataplaneservice: configure-network + osaee: "true" name: configure-network-edpm-compute-no-nodes-edpm-compute-no-nodes namespace: openstack-kuttl-tests ownerReferences: @@ -182,46 +305,105 @@ metadata: name: edpm-compute-no-nodes spec: backoffLimit: 6 - extraMounts: - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-edpm-compute-no-nodes - name: openstackansibleee - restartPolicy: Never - playbook: osp.edpm.configure_network - uid: 1001 + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + creationTimestamp: null + labels: + app: openstackansibleee + batch.kubernetes.io/job-name: configure-network-edpm-compute-no-nodes-edpm-compute-no-nodes + job-name: configure-network-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackansibleee_cr: configure-network-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackdataplanedeployment: edpm-compute-no-nodes + openstackdataplanenodeset: edpm-compute-no-nodes + openstackdataplaneservice: configure-network + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - osp.edpm.configure_network + - -i + - configure-network-edpm-compute-no-nodes-edpm-compute-no-nodes + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + - name: RUNNER_PLAYBOOK + value: |2+ + + osp.edpm.configure_network + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: edpm-compute-no-nodes + edpm_service_type: configure-network + foo: bar + + + imagePullPolicy: Always + name: configure-network-edpm-compute-no-nodes-edpm-compute-no-nodes + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: edpm-compute-no-nodes + serviceAccountName: edpm-compute-no-nodes + terminationGracePeriodSeconds: 30 + volumes: + - name: ssh-key + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-no-nodes status: - JobStatus: Succeeded conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady + - status: "True" + type: Complete + ready: 0 + succeeded: 1 + terminating: 0 + uncountedTerminatedPods: {} + --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: + generation: 1 + labels: + app: openstackansibleee + job-name: validate-network-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackansibleee_cr: validate-network-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackdataplanedeployment: edpm-compute-no-nodes + openstackdataplanenodeset: edpm-compute-no-nodes + openstackdataplaneservice: validate-network + osaee: "true" name: validate-network-edpm-compute-no-nodes-edpm-compute-no-nodes namespace: openstack-kuttl-tests ownerReferences: @@ -232,97 +414,105 @@ metadata: name: edpm-compute-no-nodes spec: backoffLimit: 6 - extraMounts: - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-edpm-compute-no-nodes - name: openstackansibleee - restartPolicy: Never - playbook: osp.edpm.validate_network - uid: 1001 -status: - JobStatus: Succeeded - conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady ---- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE -metadata: - name: install-os-edpm-compute-no-nodes-edpm-compute-no-nodes - namespace: openstack-kuttl-tests - ownerReferences: - - apiVersion: dataplane.openstack.org/v1beta1 - blockOwnerDeletion: true - controller: true - kind: OpenStackDataPlaneDeployment - name: edpm-compute-no-nodes -spec: - backoffLimit: 6 - extraMounts: - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-edpm-compute-no-nodes - name: openstackansibleee - restartPolicy: Never - playbook: osp.edpm.install_os - uid: 1001 + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + creationTimestamp: null + labels: + app: openstackansibleee + batch.kubernetes.io/job-name: validate-network-edpm-compute-no-nodes-edpm-compute-no-nodes + job-name: validate-network-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackansibleee_cr: validate-network-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackdataplanedeployment: edpm-compute-no-nodes + openstackdataplanenodeset: edpm-compute-no-nodes + openstackdataplaneservice: validate-network + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - osp.edpm.validate_network + - -i + - validate-network-edpm-compute-no-nodes-edpm-compute-no-nodes + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + - name: RUNNER_PLAYBOOK + value: |2+ + + osp.edpm.validate_network + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: edpm-compute-no-nodes + edpm_service_type: validate-network + foo: bar + + + imagePullPolicy: Always + name: validate-network-edpm-compute-no-nodes-edpm-compute-no-nodes + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: edpm-compute-no-nodes + serviceAccountName: edpm-compute-no-nodes + terminationGracePeriodSeconds: 30 + volumes: + - name: ssh-key + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-no-nodes status: - JobStatus: Succeeded conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady + - status: "True" + type: Complete + ready: 0 + succeeded: 1 + terminating: 0 + uncountedTerminatedPods: {} + --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: generation: 1 + labels: + app: openstackansibleee + job-name: configure-os-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackansibleee_cr: configure-os-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackdataplanedeployment: edpm-compute-no-nodes + openstackdataplanenodeset: edpm-compute-no-nodes + openstackdataplaneservice: configure-os + osaee: "true" name: configure-os-edpm-compute-no-nodes-edpm-compute-no-nodes namespace: openstack-kuttl-tests ownerReferences: @@ -333,96 +523,105 @@ metadata: name: edpm-compute-no-nodes spec: backoffLimit: 6 - extraMounts: - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-edpm-compute-no-nodes - name: openstackansibleee - restartPolicy: Never - playbook: osp.edpm.configure_os - uid: 1001 -status: - JobStatus: Succeeded - conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady ---- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE -metadata: - name: run-os-edpm-compute-no-nodes-edpm-compute-no-nodes - namespace: openstack-kuttl-tests - ownerReferences: - - apiVersion: dataplane.openstack.org/v1beta1 - blockOwnerDeletion: true - controller: true - kind: OpenStackDataPlaneDeployment - name: edpm-compute-no-nodes -spec: - backoffLimit: 6 - extraMounts: - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-edpm-compute-no-nodes - name: openstackansibleee - restartPolicy: Never - playbook: osp.edpm.run_os - uid: 1001 + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + creationTimestamp: null + labels: + app: openstackansibleee + batch.kubernetes.io/job-name: configure-os-edpm-compute-no-nodes-edpm-compute-no-nodes + job-name: configure-os-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackansibleee_cr: configure-os-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackdataplanedeployment: edpm-compute-no-nodes + openstackdataplanenodeset: edpm-compute-no-nodes + openstackdataplaneservice: configure-os + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - osp.edpm.configure_os + - -i + - configure-os-edpm-compute-no-nodes-edpm-compute-no-nodes + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + - name: RUNNER_PLAYBOOK + value: |2+ + + osp.edpm.configure_os + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: edpm-compute-no-nodes + edpm_service_type: configure-os + foo: bar + + + imagePullPolicy: Always + name: configure-os-edpm-compute-no-nodes-edpm-compute-no-nodes + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: edpm-compute-no-nodes + serviceAccountName: edpm-compute-no-nodes + terminationGracePeriodSeconds: 30 + volumes: + - name: ssh-key + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-no-nodes status: - JobStatus: Succeeded conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady + - status: "True" + type: Complete + ready: 0 + succeeded: 1 + terminating: 0 + uncountedTerminatedPods: {} + --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: + generation: 1 + labels: + app: openstackansibleee + job-name: install-certs-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackansibleee_cr: install-certs-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackdataplanedeployment: edpm-compute-no-nodes + openstackdataplanenodeset: edpm-compute-no-nodes + openstackdataplaneservice: install-certs + osaee: "true" name: install-certs-edpm-compute-no-nodes-edpm-compute-no-nodes namespace: openstack-kuttl-tests ownerReferences: @@ -433,47 +632,96 @@ metadata: name: edpm-compute-no-nodes spec: backoffLimit: 6 - extraMounts: - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-edpm-compute-no-nodes - name: openstackansibleee - restartPolicy: Never - playbook: osp.edpm.install_certs - uid: 1001 -status: - JobStatus: Succeeded - conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + creationTimestamp: null + labels: + app: openstackansibleee + batch.kubernetes.io/job-name: install-certs-edpm-compute-no-nodes-edpm-compute-no-nodes + job-name: install-certs-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackansibleee_cr: install-certs-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackdataplanedeployment: edpm-compute-no-nodes + openstackdataplanenodeset: edpm-compute-no-nodes + openstackdataplaneservice: install-certs + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - osp.edpm.install_certs + - -i + - install-certs-edpm-compute-no-nodes-edpm-compute-no-nodes + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + - name: RUNNER_PLAYBOOK + value: |2+ + + osp.edpm.install_certs + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: edpm-compute-no-nodes + edpm_service_type: install-certs + foo: bar + + + imagePullPolicy: Always + name: install-certs-edpm-compute-no-nodes-edpm-compute-no-nodes + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: edpm-compute-no-nodes + serviceAccountName: edpm-compute-no-nodes + terminationGracePeriodSeconds: 30 + volumes: + - name: ssh-key + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-no-nodes --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: generation: 1 + labels: + app: openstackansibleee + job-name: ovn-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackansibleee_cr: ovn-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackdataplanedeployment: edpm-compute-no-nodes + openstackdataplanenodeset: edpm-compute-no-nodes + openstackdataplaneservice: ovn + osaee: "true" name: ovn-edpm-compute-no-nodes-edpm-compute-no-nodes namespace: openstack-kuttl-tests ownerReferences: @@ -484,58 +732,106 @@ metadata: name: edpm-compute-no-nodes spec: backoffLimit: 6 - extraMounts: - - mounts: - - mountPath: /var/lib/openstack/configs/ovn/ovsdb-config - name: ovncontroller-config-0 - subPath: ovsdb-config - volumes: - - configMap: - items: - - key: ovsdb-config - path: ovsdb-config - name: ovncontroller-config - name: ovncontroller-config-0 - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-edpm-compute-no-nodes - name: openstackansibleee - restartPolicy: Never - playbook: osp.edpm.ovn - uid: 1001 -status: - JobStatus: Succeeded - conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + creationTimestamp: null + labels: + app: openstackansibleee + batch.kubernetes.io/job-name: ovn-edpm-compute-no-nodes-edpm-compute-no-nodes + job-name: ovn-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackansibleee_cr: ovn-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackdataplanedeployment: edpm-compute-no-nodes + openstackdataplanenodeset: edpm-compute-no-nodes + openstackdataplaneservice: ovn + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - osp.edpm.ovn + - -i + - ovn-edpm-compute-no-nodes-edpm-compute-no-nodes + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + - name: RUNNER_PLAYBOOK + value: |2+ + + osp.edpm.ovn + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: edpm-compute-no-nodes + edpm_service_type: ovn + foo: bar + + + imagePullPolicy: Always + name: ovn-edpm-compute-no-nodes-edpm-compute-no-nodes + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/lib/openstack/configs/ovn/ovsdb-config + name: ovncontroller-config-0 + subPath: ovsdb-config + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: edpm-compute-no-nodes + serviceAccountName: edpm-compute-no-nodes + terminationGracePeriodSeconds: 30 + volumes: + - configMap: + defaultMode: 420 + items: + - key: ovsdb-config + path: ovsdb-config + name: ovncontroller-config + name: ovncontroller-config-0 + - name: ssh-key + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-no-nodes --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: generation: 1 + labels: + app: openstackansibleee + job-name: neutron-metadata-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackansibleee_cr: neutron-metadata-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackdataplanedeployment: edpm-compute-no-nodes + openstackdataplanenodeset: edpm-compute-no-nodes + openstackdataplaneservice: neutron-metadata + osaee: "true" name: neutron-metadata-edpm-compute-no-nodes-edpm-compute-no-nodes namespace: openstack-kuttl-tests ownerReferences: @@ -546,87 +842,136 @@ metadata: name: edpm-compute-no-nodes spec: backoffLimit: 6 - extraMounts: - - mounts: - - mountPath: /var/lib/openstack/configs/neutron-metadata/10-neutron-metadata.conf - name: neutron-ovn-metadata-agent-neutron-config-0 - subPath: 10-neutron-metadata.conf - volumes: - - secret: - items: - - key: 10-neutron-metadata.conf - path: 10-neutron-metadata.conf - secretName: neutron-ovn-metadata-agent-neutron-config - name: neutron-ovn-metadata-agent-neutron-config-0 - - mounts: - - mountPath: /var/lib/openstack/configs/neutron-metadata/05-nova-metadata.conf - name: nova-metadata-neutron-config-0 - subPath: 05-nova-metadata.conf - - mountPath: /var/lib/openstack/configs/neutron-metadata/httpd.conf - name: nova-metadata-neutron-config-1 - subPath: httpd.conf - - mountPath: /var/lib/openstack/configs/neutron-metadata/nova-metadata-config.json - name: nova-metadata-neutron-config-2 - subPath: nova-metadata-config.json - volumes: - - secret: - items: - - key: 05-nova-metadata.conf - path: 05-nova-metadata.conf - secretName: nova-metadata-neutron-config - name: nova-metadata-neutron-config-0 - - name: nova-metadata-neutron-config-1 - secret: - items: - - key: httpd.conf - path: httpd.conf - secretName: nova-metadata-neutron-config - - name: nova-metadata-neutron-config-2 - secret: - items: - - key: nova-metadata-config.json - path: nova-metadata-config.json - secretName: nova-metadata-neutron-config - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-edpm-compute-no-nodes - name: openstackansibleee - restartPolicy: Never - playbook: osp.edpm.neutron_metadata - uid: 1001 -status: - JobStatus: Succeeded - conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + creationTimestamp: null + labels: + app: openstackansibleee + batch.kubernetes.io/job-name: neutron-metadata-edpm-compute-no-nodes-edpm-compute-no-nodes + job-name: neutron-metadata-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackansibleee_cr: neutron-metadata-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackdataplanedeployment: edpm-compute-no-nodes + openstackdataplanenodeset: edpm-compute-no-nodes + openstackdataplaneservice: neutron-metadata + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - osp.edpm.neutron_metadata + - -i + - neutron-metadata-edpm-compute-no-nodes-edpm-compute-no-nodes + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + - name: RUNNER_PLAYBOOK + value: |2+ + + osp.edpm.neutron_metadata + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: edpm-compute-no-nodes + edpm_service_type: neutron-metadata + foo: bar + + + imagePullPolicy: Always + name: neutron-metadata-edpm-compute-no-nodes-edpm-compute-no-nodes + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/lib/openstack/configs/neutron-metadata/10-neutron-metadata.conf + name: neutron-ovn-metadata-agent-neutron-config-0 + subPath: 10-neutron-metadata.conf + - mountPath: /var/lib/openstack/configs/neutron-metadata/05-nova-metadata.conf + name: nova-metadata-neutron-config-0 + subPath: 05-nova-metadata.conf + - mountPath: /var/lib/openstack/configs/neutron-metadata/httpd.conf + name: nova-metadata-neutron-config-1 + subPath: httpd.conf + - mountPath: /var/lib/openstack/configs/neutron-metadata/nova-metadata-config.json + name: nova-metadata-neutron-config-2 + subPath: nova-metadata-config.json + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: edpm-compute-no-nodes + serviceAccountName: edpm-compute-no-nodes + terminationGracePeriodSeconds: 30 + volumes: + - name: neutron-ovn-metadata-agent-neutron-config-0 + secret: + defaultMode: 420 + items: + - key: 10-neutron-metadata.conf + path: 10-neutron-metadata.conf + secretName: neutron-ovn-metadata-agent-neutron-config + - name: nova-metadata-neutron-config-0 + secret: + defaultMode: 420 + items: + - key: 05-nova-metadata.conf + path: 05-nova-metadata.conf + secretName: nova-metadata-neutron-config + - name: nova-metadata-neutron-config-1 + secret: + defaultMode: 420 + items: + - key: httpd.conf + path: httpd.conf + secretName: nova-metadata-neutron-config + - name: nova-metadata-neutron-config-2 + secret: + defaultMode: 420 + items: + - key: nova-metadata-config.json + path: nova-metadata-config.json + secretName: nova-metadata-neutron-config + - name: ssh-key + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-no-nodes --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: generation: 1 + labels: + app: openstackansibleee + job-name: neutron-ovn-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackansibleee_cr: neutron-ovn-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackdataplanedeployment: edpm-compute-no-nodes + openstackdataplanenodeset: edpm-compute-no-nodes + openstackdataplaneservice: neutron-ovn + osaee: "true" name: neutron-ovn-edpm-compute-no-nodes-edpm-compute-no-nodes namespace: openstack-kuttl-tests ownerReferences: @@ -637,58 +982,106 @@ metadata: name: edpm-compute-no-nodes spec: backoffLimit: 6 - extraMounts: - - mounts: - - mountPath: /var/lib/openstack/configs/neutron-ovn/10-neutron-ovn.conf - name: neutron-ovn-agent-neutron-config-0 - subPath: 10-neutron-ovn.conf - volumes: - - secret: - items: - - key: 10-neutron-ovn.conf - path: 10-neutron-ovn.conf - secretName: neutron-ovn-agent-neutron-config - name: neutron-ovn-agent-neutron-config-0 - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-edpm-compute-no-nodes - name: openstackansibleee - restartPolicy: Never - playbook: osp.edpm.neutron_ovn - uid: 1001 -status: - JobStatus: Succeeded - conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + creationTimestamp: null + labels: + app: openstackansibleee + batch.kubernetes.io/job-name: neutron-ovn-edpm-compute-no-nodes-edpm-compute-no-nodes + job-name: neutron-ovn-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackansibleee_cr: neutron-ovn-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackdataplanedeployment: edpm-compute-no-nodes + openstackdataplanenodeset: edpm-compute-no-nodes + openstackdataplaneservice: neutron-ovn + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - osp.edpm.neutron_ovn + - -i + - neutron-ovn-edpm-compute-no-nodes-edpm-compute-no-nodes + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + - name: RUNNER_PLAYBOOK + value: |2+ + + osp.edpm.neutron_ovn + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: edpm-compute-no-nodes + edpm_service_type: neutron-ovn + foo: bar + + + imagePullPolicy: Always + name: neutron-ovn-edpm-compute-no-nodes-edpm-compute-no-nodes + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/lib/openstack/configs/neutron-ovn/10-neutron-ovn.conf + name: neutron-ovn-agent-neutron-config-0 + subPath: 10-neutron-ovn.conf + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: edpm-compute-no-nodes + serviceAccountName: edpm-compute-no-nodes + terminationGracePeriodSeconds: 30 + volumes: + - name: neutron-ovn-agent-neutron-config-0 + secret: + defaultMode: 420 + items: + - key: 10-neutron-ovn.conf + path: 10-neutron-ovn.conf + secretName: neutron-ovn-agent-neutron-config + - name: ssh-key + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-no-nodes --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: generation: 1 + labels: + app: openstackansibleee + job-name: neutron-sriov-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackansibleee_cr: neutron-sriov-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackdataplanedeployment: edpm-compute-no-nodes + openstackdataplanenodeset: edpm-compute-no-nodes + openstackdataplaneservice: neutron-sriov + osaee: "true" name: neutron-sriov-edpm-compute-no-nodes-edpm-compute-no-nodes namespace: openstack-kuttl-tests ownerReferences: @@ -699,58 +1092,106 @@ metadata: name: edpm-compute-no-nodes spec: backoffLimit: 6 - extraMounts: - - mounts: - - mountPath: /var/lib/openstack/configs/neutron-sriov/10-neutron-sriov.conf - name: neutron-sriov-agent-neutron-config-0 - subPath: 10-neutron-sriov.conf - volumes: - - secret: - items: - - key: 10-neutron-sriov.conf - path: 10-neutron-sriov.conf - secretName: neutron-sriov-agent-neutron-config - name: neutron-sriov-agent-neutron-config-0 - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-edpm-compute-no-nodes - name: openstackansibleee - restartPolicy: Never - playbook: osp.edpm.neutron_sriov - uid: 1001 -status: - JobStatus: Succeeded - conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + creationTimestamp: null + labels: + app: openstackansibleee + batch.kubernetes.io/job-name: neutron-sriov-edpm-compute-no-nodes-edpm-compute-no-nodes + job-name: neutron-sriov-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackansibleee_cr: neutron-sriov-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackdataplanedeployment: edpm-compute-no-nodes + openstackdataplanenodeset: edpm-compute-no-nodes + openstackdataplaneservice: neutron-sriov + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - osp.edpm.neutron_sriov + - -i + - neutron-sriov-edpm-compute-no-nodes-edpm-compute-no-nodes + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + - name: RUNNER_PLAYBOOK + value: |2+ + + osp.edpm.neutron_sriov + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: edpm-compute-no-nodes + edpm_service_type: neutron-sriov + foo: bar + + + imagePullPolicy: Always + name: neutron-sriov-edpm-compute-no-nodes-edpm-compute-no-nodes + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/lib/openstack/configs/neutron-sriov/10-neutron-sriov.conf + name: neutron-sriov-agent-neutron-config-0 + subPath: 10-neutron-sriov.conf + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: edpm-compute-no-nodes + serviceAccountName: edpm-compute-no-nodes + terminationGracePeriodSeconds: 30 + volumes: + - name: neutron-sriov-agent-neutron-config-0 + secret: + defaultMode: 420 + items: + - key: 10-neutron-sriov.conf + path: 10-neutron-sriov.conf + secretName: neutron-sriov-agent-neutron-config + - name: ssh-key + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-no-nodes --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: generation: 1 + labels: + app: openstackansibleee + job-name: neutron-dhcp-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackansibleee_cr: neutron-dhcp-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackdataplanedeployment: edpm-compute-no-nodes + openstackdataplanenodeset: edpm-compute-no-nodes + openstackdataplaneservice: neutron-dhcp + osaee: "true" name: neutron-dhcp-edpm-compute-no-nodes-edpm-compute-no-nodes namespace: openstack-kuttl-tests ownerReferences: @@ -761,57 +1202,106 @@ metadata: name: edpm-compute-no-nodes spec: backoffLimit: 6 - extraMounts: - - mounts: - - mountPath: /var/lib/openstack/configs/neutron-dhcp/10-neutron-dhcp.conf - name: neutron-dhcp-agent-neutron-config-0 - subPath: 10-neutron-dhcp.conf - volumes: - - secret: - items: - - key: 10-neutron-dhcp.conf - path: 10-neutron-dhcp.conf - secretName: neutron-dhcp-agent-neutron-config - name: neutron-dhcp-agent-neutron-config-0 - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-edpm-compute-no-nodes - name: openstackansibleee - restartPolicy: Never - playbook: osp.edpm.neutron_dhcp - uid: 1001 -status: - JobStatus: Succeeded - conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + creationTimestamp: null + labels: + app: openstackansibleee + batch.kubernetes.io/job-name: neutron-dhcp-edpm-compute-no-nodes-edpm-compute-no-nodes + job-name: neutron-dhcp-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackansibleee_cr: neutron-dhcp-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackdataplanedeployment: edpm-compute-no-nodes + openstackdataplanenodeset: edpm-compute-no-nodes + openstackdataplaneservice: neutron-dhcp + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - osp.edpm.neutron_dhcp + - -i + - neutron-dhcp-edpm-compute-no-nodes-edpm-compute-no-nodes + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + - name: RUNNER_PLAYBOOK + value: |2+ + + osp.edpm.neutron_dhcp + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: edpm-compute-no-nodes + edpm_service_type: neutron-dhcp + foo: bar + + + imagePullPolicy: Always + name: neutron-dhcp-edpm-compute-no-nodes-edpm-compute-no-nodes + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/lib/openstack/configs/neutron-dhcp/10-neutron-dhcp.conf + name: neutron-dhcp-agent-neutron-config-0 + subPath: 10-neutron-dhcp.conf + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: edpm-compute-no-nodes + serviceAccountName: edpm-compute-no-nodes + terminationGracePeriodSeconds: 30 + volumes: + - name: neutron-dhcp-agent-neutron-config-0 + secret: + defaultMode: 420 + items: + - key: 10-neutron-dhcp.conf + path: 10-neutron-dhcp.conf + secretName: neutron-dhcp-agent-neutron-config + - name: ssh-key + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-no-nodes --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: + generation: 1 + labels: + app: openstackansibleee + job-name: libvirt-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackansibleee_cr: libvirt-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackdataplanedeployment: edpm-compute-no-nodes + openstackdataplanenodeset: edpm-compute-no-nodes + openstackdataplaneservice: libvirt + osaee: "true" name: libvirt-edpm-compute-no-nodes-edpm-compute-no-nodes namespace: openstack-kuttl-tests ownerReferences: @@ -822,138 +1312,230 @@ metadata: name: edpm-compute-no-nodes spec: backoffLimit: 6 - envConfigMapName: openstack-aee-default-env - extraMounts: - - mounts: - - mountPath: /var/lib/openstack/configs/libvirt/LibvirtPassword - name: libvirt-secret-0 - subPath: LibvirtPassword - volumes: - - name: libvirt-secret-0 - secret: - items: - - key: LibvirtPassword - path: LibvirtPassword - secretName: libvirt-secret - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-edpm-compute-no-nodes - name: openstackansibleee - preserveJobs: true - restartPolicy: Never - playbook: osp.edpm.libvirt - uid: 1001 -status: - JobStatus: Succeeded - conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + creationTimestamp: null + labels: + app: openstackansibleee + batch.kubernetes.io/job-name: libvirt-edpm-compute-no-nodes-edpm-compute-no-nodes + job-name: libvirt-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackansibleee_cr: libvirt-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackdataplanedeployment: edpm-compute-no-nodes + openstackdataplanenodeset: edpm-compute-no-nodes + openstackdataplaneservice: libvirt + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - osp.edpm.libvirt + - -i + - libvirt-edpm-compute-no-nodes-edpm-compute-no-nodes + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + - name: RUNNER_PLAYBOOK + value: |2+ + + osp.edpm.libvirt + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: edpm-compute-no-nodes + edpm_service_type: libvirt + foo: bar + + + imagePullPolicy: Always + name: libvirt-edpm-compute-no-nodes-edpm-compute-no-nodes + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/lib/openstack/configs/libvirt/LibvirtPassword + name: libvirt-secret-0 + subPath: LibvirtPassword + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: edpm-compute-no-nodes + serviceAccountName: edpm-compute-no-nodes + terminationGracePeriodSeconds: 30 + volumes: + - name: libvirt-secret-0 + secret: + defaultMode: 420 + items: + - key: LibvirtPassword + path: LibvirtPassword + secretName: libvirt-secret + - name: ssh-key + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-no-nodes --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: + generation: 1 + labels: + app: openstackansibleee + job-name: nova-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackansibleee_cr: nova-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackdataplanedeployment: edpm-compute-no-nodes + openstackdataplanenodeset: edpm-compute-no-nodes + openstackdataplaneservice: nova + osaee: "true" name: nova-edpm-compute-no-nodes-edpm-compute-no-nodes namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: edpm-compute-no-nodes spec: backoffLimit: 6 - envConfigMapName: openstack-aee-default-env - extraMounts: - - mounts: - - mountPath: /var/lib/openstack/configs/nova/01-nova.conf - name: nova-cell1-compute-config-0 - subPath: 01-nova.conf - - mountPath: /var/lib/openstack/configs/nova/nova-blank.conf - name: nova-cell1-compute-config-1 - subPath: nova-blank.conf - volumes: - - name: nova-cell1-compute-config-0 - secret: - items: - - key: 01-nova.conf - path: 01-nova.conf - secretName: nova-cell1-compute-config - - name: nova-cell1-compute-config-1 - secret: - items: - - key: nova-blank.conf - path: nova-blank.conf - secretName: nova-cell1-compute-config - - mounts: - - mountPath: /var/lib/openstack/configs/nova/ssh-privatekey - name: nova-migration-ssh-key-0 - subPath: ssh-privatekey - - mountPath: /var/lib/openstack/configs/nova/ssh-publickey - name: nova-migration-ssh-key-1 - subPath: ssh-publickey - volumes: - - name: nova-migration-ssh-key-0 - secret: - items: - - key: ssh-privatekey - path: ssh-privatekey - secretName: nova-migration-ssh-key - - name: nova-migration-ssh-key-1 - secret: - items: - - key: ssh-publickey - path: ssh-publickey - secretName: nova-migration-ssh-key - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-edpm-compute-no-nodes - name: openstackansibleee - preserveJobs: true - restartPolicy: Never - playbook: osp.edpm.nova - uid: 1001 -status: - JobStatus: Succeeded - conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + creationTimestamp: null + labels: + app: openstackansibleee + batch.kubernetes.io/job-name: nova-edpm-compute-no-nodes-edpm-compute-no-nodes + job-name: nova-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackansibleee_cr: nova-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackdataplanedeployment: edpm-compute-no-nodes + openstackdataplanenodeset: edpm-compute-no-nodes + openstackdataplaneservice: nova + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - osp.edpm.nova + - -i + - nova-edpm-compute-no-nodes-edpm-compute-no-nodes + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + - name: RUNNER_PLAYBOOK + value: |2+ + + osp.edpm.nova + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: edpm-compute-no-nodes + edpm_service_type: nova + foo: bar + + + imagePullPolicy: Always + name: nova-edpm-compute-no-nodes-edpm-compute-no-nodes + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/lib/openstack/configs/nova/01-nova.conf + name: nova-cell1-compute-config-0 + subPath: 01-nova.conf + - mountPath: /var/lib/openstack/configs/nova/nova-blank.conf + name: nova-cell1-compute-config-1 + subPath: nova-blank.conf + - mountPath: /var/lib/openstack/configs/nova/ssh-privatekey + name: nova-migration-ssh-key-0 + subPath: ssh-privatekey + - mountPath: /var/lib/openstack/configs/nova/ssh-publickey + name: nova-migration-ssh-key-1 + subPath: ssh-publickey + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: edpm-compute-no-nodes + serviceAccountName: edpm-compute-no-nodes + terminationGracePeriodSeconds: 30 + volumes: + - name: nova-cell1-compute-config-0 + secret: + defaultMode: 420 + items: + - key: 01-nova.conf + path: 01-nova.conf + secretName: nova-cell1-compute-config + - name: nova-cell1-compute-config-1 + secret: + defaultMode: 420 + items: + - key: nova-blank.conf + path: nova-blank.conf + secretName: nova-cell1-compute-config + - name: nova-migration-ssh-key-0 + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh-privatekey + secretName: nova-migration-ssh-key + - name: nova-migration-ssh-key-1 + secret: + defaultMode: 420 + items: + - key: ssh-publickey + path: ssh-publickey + secretName: nova-migration-ssh-key + - name: ssh-key + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-no-nodes diff --git a/tests/kuttl/tests/dataplane-deploy-no-nodes-test/02-assert.yaml b/tests/kuttl/tests/dataplane-deploy-no-nodes-test/02-assert.yaml index dee63901d..005edd66a 100644 --- a/tests/kuttl/tests/dataplane-deploy-no-nodes-test/02-assert.yaml +++ b/tests/kuttl/tests/dataplane-deploy-no-nodes-test/02-assert.yaml @@ -6,65 +6,113 @@ collectors: command: oc logs -n openstack-operators -l openstack.org/operator-name=openstack name: operator-logs --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: generation: 1 - name: custom-svc-edpm-compute-no-nodes-ovrd-edpm-compute-no-nodes + labels: + app: openstackansibleee + job-name: configure-os-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackansibleee_cr: configure-os-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackdataplanedeployment: edpm-compute-no-nodes + openstackdataplanenodeset: edpm-compute-no-nodes + openstackdataplaneservice: configure-os + osaee: "true" + name: configure-os-edpm-compute-no-nodes-edpm-compute-no-nodes namespace: openstack-kuttl-tests ownerReferences: - apiVersion: dataplane.openstack.org/v1beta1 blockOwnerDeletion: true controller: true kind: OpenStackDataPlaneDeployment - name: edpm-compute-no-nodes-ovrd + name: edpm-compute-no-nodes spec: backoffLimit: 6 - env: - - name: ANSIBLE_FORCE_COLOR - value: "True" - envConfigMapName: openstack-aee-default-env - extraMounts: - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-edpm-compute-no-nodes - name: openstackansibleee - playbookContents: | - - hosts: localhost - gather_facts: no - name: kuttl play - tasks: - - name: Sleep - command: sleep 1 - delegate_to: localhost - preserveJobs: true - restartPolicy: Never - uid: 1001 + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + annotations: + k8s.v1.cni.cncf.io/networks: '[]' + creationTimestamp: null + labels: + app: openstackansibleee + batch.kubernetes.io/job-name: configure-os-edpm-compute-no-nodes-edpm-compute-no-nodes + job-name: configure-os-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackansibleee_cr: configure-os-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackdataplanedeployment: edpm-compute-no-nodes + openstackdataplanenodeset: edpm-compute-no-nodes + openstackdataplaneservice: configure-os + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - osp.edpm.configure_os + - -i + - configure-os-edpm-compute-no-nodes-edpm-compute-no-nodes + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + - name: RUNNER_PLAYBOOK + value: |2+ + + osp.edpm.configure_os + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: edpm-compute-no-nodes + edpm_service_type: configure-os + foo: bar + + + imagePullPolicy: Always + name: configure-os-edpm-compute-no-nodes-edpm-compute-no-nodes + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + dnsPolicy: ClusterFirst + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: edpm-compute-no-nodes + serviceAccountName: edpm-compute-no-nodes + terminationGracePeriodSeconds: 30 + volumes: + - name: ssh-key + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-no-nodes status: - JobStatus: Succeeded conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady + - status: "True" + type: Complete + ready: 0 + succeeded: 1 + terminating: 0 + uncountedTerminatedPods: {} diff --git a/tests/kuttl/tests/dataplane-deploy-no-nodes-test/04-assert.yaml b/tests/kuttl/tests/dataplane-deploy-no-nodes-test/04-assert.yaml index 14f8ce1eb..7bced2e02 100644 --- a/tests/kuttl/tests/dataplane-deploy-no-nodes-test/04-assert.yaml +++ b/tests/kuttl/tests/dataplane-deploy-no-nodes-test/04-assert.yaml @@ -66,10 +66,17 @@ status: status: "True" type: SetupReady --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: - generation: 1 + labels: + app: openstackansibleee + job-name: ovn-edpm-compute-no-nodes-updated-ovn-cm-edpm-compute-no-nodes + openstackansibleee_cr: ovn-edpm-compute-no-nodes-updated-ovn-cm-edpm-compute-no-nodes + openstackdataplanedeployment: edpm-compute-no-nodes-updated-ovn-cm + openstackdataplanenodeset: edpm-compute-no-nodes + openstackdataplaneservice: ovn + osaee: "true" name: ovn-edpm-compute-no-nodes-updated-ovn-cm-edpm-compute-no-nodes namespace: openstack-kuttl-tests ownerReferences: @@ -80,50 +87,100 @@ metadata: name: edpm-compute-no-nodes-updated-ovn-cm spec: backoffLimit: 6 - extraMounts: - - mounts: - - mountPath: /var/lib/openstack/configs/ovn/ovsdb-config - name: ovncontroller-config-0 - subPath: ovsdb-config - volumes: - - configMap: - items: - - key: ovsdb-config - path: ovsdb-config - name: ovncontroller-config - name: ovncontroller-config-0 - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-edpm-compute-no-nodes - name: openstackansibleee - restartPolicy: Never - playbook: osp.edpm.ovn - uid: 1001 + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + annotations: + k8s.v1.cni.cncf.io/networks: '[]' + creationTimestamp: null + labels: + app: openstackansibleee + batch.kubernetes.io/job-name: ovn-edpm-compute-no-nodes-updated-ovn-cm-edpm-compute-no-nodes + openstackansibleee_cr: ovn-edpm-compute-no-nodes-updated-ovn-cm-edpm-compute-no-nodes + openstackdataplanedeployment: edpm-compute-no-nodes-updated-ovn-cm + openstackdataplanenodeset: edpm-compute-no-nodes + openstackdataplaneservice: ovn + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - osp.edpm.ovn + - -i + - ovn-edpm-compute-no-nodes-updated-ovn-cm-edpm-compute-no-nodes + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + - name: RUNNER_PLAYBOOK + value: |2+ + + osp.edpm.ovn + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: edpm-compute-no-nodes + edpm_service_type: ovn + edpm_services_override: [ovn] + + + imagePullPolicy: Always + name: ovn-edpm-compute-no-nodes-updated-ovn-cm-edpm-compute-no-nodes + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/lib/openstack/configs/ovn/ovsdb-config + name: ovncontroller-config-0 + subPath: ovsdb-config + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + dnsPolicy: ClusterFirst + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: edpm-compute-no-nodes + serviceAccountName: edpm-compute-no-nodes + terminationGracePeriodSeconds: 30 + volumes: + - configMap: + defaultMode: 420 + items: + - key: ovsdb-config + path: ovsdb-config + name: ovncontroller-config + name: ovncontroller-config-0 + - name: ssh-key + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-no-nodes status: - JobStatus: Succeeded conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady + - status: "True" + type: Complete + ready: 0 + succeeded: 1 + terminating: 0 + uncountedTerminatedPods: {} diff --git a/tests/kuttl/tests/dataplane-deploy-no-nodes-test/06-assert.yaml b/tests/kuttl/tests/dataplane-deploy-no-nodes-test/06-assert.yaml index 26f623ba2..e4d5b21c3 100644 --- a/tests/kuttl/tests/dataplane-deploy-no-nodes-test/06-assert.yaml +++ b/tests/kuttl/tests/dataplane-deploy-no-nodes-test/06-assert.yaml @@ -71,10 +71,18 @@ spec: - edpm-compute-no-nodes - edpm-compute-beta-nodeset --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: - name: download-cache-edpm-multinodeset-edpm-compute-beta-nodeset + labels: + app: openstackansibleee + job-name: bootstrap-edpm-multinodeset-edpm-compute-beta-nodeset + openstackansibleee_cr: bootstrap-edpm-multinodeset-edpm-compute-beta-nodeset + openstackdataplanedeployment: edpm-multinodeset + openstackdataplanenodeset: edpm-compute-beta-nodeset + openstackdataplaneservice: bootstrap + osaee: "true" + name: bootstrap-edpm-multinodeset-edpm-compute-beta-nodeset namespace: openstack-kuttl-tests ownerReferences: - apiVersion: dataplane.openstack.org/v1beta1 @@ -84,47 +92,103 @@ metadata: name: edpm-multinodeset spec: backoffLimit: 6 - extraMounts: - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-edpm-compute-beta-nodeset - name: openstackansibleee - restartPolicy: Never - playbook: osp.edpm.download_cache - uid: 1001 + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + labels: + app: openstackansibleee + batch.kubernetes.io/job-name: bootstrap-edpm-multinodeset-edpm-compute-beta-nodeset + job-name: bootstrap-edpm-multinodeset-edpm-compute-beta-nodeset + openstackansibleee_cr: bootstrap-edpm-multinodeset-edpm-compute-beta-nodeset + openstackdataplanedeployment: edpm-multinodeset + openstackdataplanenodeset: edpm-compute-beta-nodeset + openstackdataplaneservice: bootstrap + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - osp.edpm.bootstrap + - -i + - bootstrap-edpm-multinodeset-edpm-compute-beta-nodeset + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + - name: RUNNER_PLAYBOOK + value: |2+ + + osp.edpm.bootstrap + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: edpm-compute-beta-nodeset + edpm_service_type: bootstrap + foo: bar + + + imagePullPolicy: Always + name: bootstrap-edpm-multinodeset-edpm-compute-beta-nodeset + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + dnsPolicy: ClusterFirst + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: edpm-compute-beta-nodeset + serviceAccountName: edpm-compute-beta-nodeset + terminationGracePeriodSeconds: 30 + volumes: + - name: ssh-key + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-beta-nodeset status: - JobStatus: Succeeded conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady + - status: "True" + type: Complete + ready: 0 + succeeded: 1 + terminating: 0 + uncountedTerminatedPods: {} --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: - name: bootstrap-edpm-multinodeset-edpm-compute-beta-nodeset + labels: + app: openstackansibleee + job-name: download-cache-edpm-multinodeset-edpm-compute-beta-nodeset + openstackansibleee_cr: download-cache-edpm-multinodeset-edpm-compute-beta-nodeset + openstackdataplanedeployment: edpm-multinodeset + openstackdataplanenodeset: edpm-compute-beta-nodeset + openstackdataplaneservice: download-cache + osaee: "true" + name: download-cache-edpm-multinodeset-edpm-compute-beta-nodeset namespace: openstack-kuttl-tests ownerReferences: - apiVersion: dataplane.openstack.org/v1beta1 @@ -134,39 +198,90 @@ metadata: name: edpm-multinodeset spec: backoffLimit: 6 - extraMounts: - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-edpm-compute-beta-nodeset - name: openstackansibleee - restartPolicy: Never - playbook: osp.edpm.bootstrap - uid: 1001 + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + annotations: + k8s.v1.cni.cncf.io/networks: '[]' + creationTimestamp: null + labels: + app: openstackansibleee + batch.kubernetes.io/job-name: download-cache-edpm-multinodeset-edpm-compute-beta-nodeset + job-name: download-cache-edpm-multinodeset-edpm-compute-beta-nodeset + openstackansibleee_cr: download-cache-edpm-multinodeset-edpm-compute-beta-nodeset + openstackdataplanedeployment: edpm-multinodeset + openstackdataplanenodeset: edpm-compute-beta-nodeset + openstackdataplaneservice: download-cache + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - osp.edpm.download_cache + - -i + - download-cache-edpm-multinodeset-edpm-compute-beta-nodeset + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + - name: RUNNER_PLAYBOOK + value: |2+ + + osp.edpm.download_cache + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: edpm-compute-beta-nodeset + edpm_service_type: download-cache + foo: bar + + + imagePullPolicy: Always + name: download-cache-edpm-multinodeset-edpm-compute-beta-nodeset + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + dnsPolicy: ClusterFirst + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: edpm-compute-beta-nodeset + serviceAccountName: edpm-compute-beta-nodeset + terminationGracePeriodSeconds: 30 + volumes: + - name: ssh-key + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-beta-nodeset status: - JobStatus: Succeeded conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady + - status: "True" + type: Complete + ready: 0 + succeeded: 1 + terminating: 0 + uncountedTerminatedPods: {} diff --git a/tests/kuttl/tests/dataplane-deploy-tls-test/02-assert.yaml b/tests/kuttl/tests/dataplane-deploy-tls-test/02-assert.yaml index 3b154fc9f..9c0553db8 100644 --- a/tests/kuttl/tests/dataplane-deploy-tls-test/02-assert.yaml +++ b/tests/kuttl/tests/dataplane-deploy-tls-test/02-assert.yaml @@ -133,9 +133,17 @@ metadata: name: openstack-edpm-tls type: Opaque --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: + labels: + app: openstackansibleee + job-name: install-certs-ovrd-openstack-edpm-tls-openstack-edpm-tls + openstackansibleee_cr: install-certs-ovrd-openstack-edpm-tls-openstack-edpm-tls + openstackdataplanedeployment: openstack-edpm-tls + openstackdataplanenodeset: openstack-edpm-tls + openstackdataplaneservice: install-certs-ovrd + osaee: "true" name: install-certs-ovrd-openstack-edpm-tls-openstack-edpm-tls namespace: openstack-kuttl-tests ownerReferences: @@ -146,110 +154,233 @@ metadata: name: openstack-edpm-tls spec: backoffLimit: 6 - extraMounts: - - mounts: - - mountPath: /var/lib/openstack/certs/tls-dnsnames/default - name: openstack-edpm-tls-tls-dnsnames-default-certs-0 - volumes: - - name: openstack-edpm-tls-tls-dnsnames-default-certs-0 - projected: - sources: - - secret: - name: openstack-edpm-tls-tls-dnsnames-default-certs-0 - - mounts: - - mountPath: /var/lib/openstack/certs/tls-dnsnames/second - name: openstack-edpm-tls-tls-dnsnames-second-certs-0 - volumes: - - name: openstack-edpm-tls-tls-dnsnames-second-certs-0 - projected: - sources: - - secret: - name: openstack-edpm-tls-tls-dnsnames-second-certs-0 - - mounts: - - mountPath: /var/lib/openstack/cacerts/tls-dnsnames - name: tls-dnsnames-combined-ca-bundle - volumes: - - name: tls-dnsnames-combined-ca-bundle - secret: - secretName: combined-ca-bundle - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-openstack-edpm-tls - name: openstackansibleee - restartPolicy: Never - uid: 1001 + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + annotations: + k8s.v1.cni.cncf.io/networks: '[]' + creationTimestamp: null + labels: + app: openstackansibleee + batch.kubernetes.io/job-name: install-certs-ovrd-openstack-edpm-tls-openstack-edpm-tls + job-name: install-certs-ovrd-openstack-edpm-tls-openstack-edpm-tls + openstackansibleee_cr: install-certs-ovrd-openstack-edpm-tls-openstack-edpm-tls + openstackdataplanedeployment: openstack-edpm-tls + openstackdataplanenodeset: openstack-edpm-tls + openstackdataplaneservice: install-certs-ovrd + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - playbook.yaml + - -i + - install-certs-ovrd-openstack-edpm-tls-openstack-edpm-tls + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + - name: RUNNER_PLAYBOOK + value: |2+ + + - hosts: localhost + gather_facts: no + name: kuttl play + tasks: + - name: Sleep + command: sleep 1 + delegate_to: localhost + + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: openstack-edpm-tls + edpm_service_type: install-certs-ovrd + + + imagePullPolicy: Always + name: install-certs-ovrd-openstack-edpm-tls-openstack-edpm-tls + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/lib/openstack/certs/tls-dnsnames/default + name: openstack-edpm-tls-tls-dnsnames-default-certs-0 + - mountPath: /var/lib/openstack/certs/tls-dnsnames/second + name: openstack-edpm-tls-tls-dnsnames-second-certs-0 + - mountPath: /var/lib/openstack/cacerts/tls-dnsnames + name: tls-dnsnames-combined-ca-bundle + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: openstack-edpm-tls + serviceAccountName: openstack-edpm-tls + terminationGracePeriodSeconds: 30 + volumes: + - name: openstack-edpm-tls-tls-dnsnames-default-certs-0 + projected: + defaultMode: 420 + sources: + - secret: + name: openstack-edpm-tls-tls-dnsnames-default-certs-0 + - name: openstack-edpm-tls-tls-dnsnames-second-certs-0 + projected: + defaultMode: 420 + sources: + - secret: + name: openstack-edpm-tls-tls-dnsnames-second-certs-0 + - name: tls-dnsnames-combined-ca-bundle + secret: + defaultMode: 420 + secretName: combined-ca-bundle + - name: ssh-key + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-openstack-edpm-tls status: - JobStatus: Succeeded conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady + - status: "True" + type: Complete + ready: 0 + succeeded: 1 + terminating: 0 + uncountedTerminatedPods: {} --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: + labels: + app: openstackansibleee + job-name: tls-dnsnames-openstack-edpm-tls-openstack-edpm-tls + openstackansibleee_cr: tls-dnsnames-openstack-edpm-tls-openstack-edpm-tls + openstackdataplanedeployment: openstack-edpm-tls + openstackdataplanenodeset: openstack-edpm-tls + openstackdataplaneservice: tls-dnsnames + osaee: "true" name: tls-dnsnames-openstack-edpm-tls-openstack-edpm-tls namespace: openstack-kuttl-tests ownerReferences: - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true kind: OpenStackDataPlaneDeployment name: openstack-edpm-tls spec: backoffLimit: 6 - extraMounts: - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-openstack-edpm-tls - name: openstackansibleee - restartPolicy: Never - uid: 1001 + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + annotations: + k8s.v1.cni.cncf.io/networks: '[]' + creationTimestamp: null + labels: + app: openstackansibleee + batch.kubernetes.io/job-name: tls-dnsnames-openstack-edpm-tls-openstack-edpm-tls + job-name: tls-dnsnames-openstack-edpm-tls-openstack-edpm-tls + openstackansibleee_cr: tls-dnsnames-openstack-edpm-tls-openstack-edpm-tls + openstackdataplanedeployment: openstack-edpm-tls + openstackdataplanenodeset: openstack-edpm-tls + openstackdataplaneservice: tls-dnsnames + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - playbook.yaml + - -i + - tls-dnsnames-openstack-edpm-tls-openstack-edpm-tls + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + - name: RUNNER_PLAYBOOK + value: |2+ + + - hosts: localhost + gather_facts: no + name: kuttl play + tasks: + - name: Sleep + command: sleep 1 + delegate_to: localhost + + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: openstack-edpm-tls + edpm_service_type: tls-dnsnames + + + imagePullPolicy: Always + name: tls-dnsnames-openstack-edpm-tls-openstack-edpm-tls + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: openstack-edpm-tls + serviceAccountName: openstack-edpm-tls + terminationGracePeriodSeconds: 30 + volumes: + - name: ssh-key + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-openstack-edpm-tls status: - JobStatus: Succeeded conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady + - status: "True" + type: Complete + ready: 0 + succeeded: 1 + terminating: 0 + uncountedTerminatedPods: {} diff --git a/tests/kuttl/tests/dataplane-deploy-tls-test/03-assert.yaml b/tests/kuttl/tests/dataplane-deploy-tls-test/03-assert.yaml index 1bff8a38b..386bc8a83 100644 --- a/tests/kuttl/tests/dataplane-deploy-tls-test/03-assert.yaml +++ b/tests/kuttl/tests/dataplane-deploy-tls-test/03-assert.yaml @@ -139,10 +139,18 @@ metadata: name: openstack-edpm-tls type: Opaque --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: - name: install-certs-ovrd-openstack-edpm-tls-ovrd-openstack-edpm-tls + labels: + app: openstackansibleee + job-name: tls-dns-ips-openstack-edpm-tls-ovrd-openstack-edpm-tls + openstackansibleee_cr: tls-dns-ips-openstack-edpm-tls-ovrd-openstack-edpm-tls + openstackdataplanedeployment: openstack-edpm-tls-ovrd + openstackdataplanenodeset: openstack-edpm-tls + openstackdataplaneservice: tls-dns-ips + osaee: "true" + name: tls-dns-ips-openstack-edpm-tls-ovrd-openstack-edpm-tls namespace: openstack-kuttl-tests ownerReferences: - apiVersion: dataplane.openstack.org/v1beta1 @@ -152,164 +160,211 @@ metadata: name: openstack-edpm-tls-ovrd spec: backoffLimit: 6 - extraMounts: - - mounts: - - mountPath: /var/lib/openstack/certs/tls-dns-ips/default - name: openstack-edpm-tls-tls-dns-ips-default-certs-0 - volumes: - - name: openstack-edpm-tls-tls-dns-ips-default-certs-0 - projected: - sources: - - secret: - name: openstack-edpm-tls-tls-dns-ips-default-certs-0 - - mounts: - - mountPath: /var/lib/openstack/cacerts/tls-dns-ips - name: tls-dns-ips-combined-ca-bundle - volumes: - - name: tls-dns-ips-combined-ca-bundle - secret: - secretName: combined-ca-bundle - - mounts: - - mountPath: /var/lib/openstack/certs/custom-tls-dns/default - name: openstack-edpm-tls-custom-tls-dns-default-certs-0 - volumes: - - name: openstack-edpm-tls-custom-tls-dns-default-certs-0 - projected: - sources: - - secret: - name: openstack-edpm-tls-custom-tls-dns-default-certs-0 - - mounts: - - mountPath: /var/lib/openstack/cacerts/custom-tls-dns - name: custom-tls-dns-combined-ca-bundle - volumes: - - name: custom-tls-dns-combined-ca-bundle - secret: - secretName: combined-ca-bundle - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-openstack-edpm-tls - name: openstackansibleee - restartPolicy: Never - uid: 1001 + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + annotations: + k8s.v1.cni.cncf.io/networks: '[]' + creationTimestamp: null + labels: + app: openstackansibleee + batch.kubernetes.io/job-name: tls-dns-ips-openstack-edpm-tls-ovrd-openstack-edpm-tls + job-name: tls-dns-ips-openstack-edpm-tls-ovrd-openstack-edpm-tls + openstackansibleee_cr: tls-dns-ips-openstack-edpm-tls-ovrd-openstack-edpm-tls + openstackdataplanedeployment: openstack-edpm-tls-ovrd + openstackdataplanenodeset: openstack-edpm-tls + openstackdataplaneservice: tls-dns-ips + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - playbook.yaml + - -i + - tls-dns-ips-openstack-edpm-tls-ovrd-openstack-edpm-tls + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + - name: RUNNER_PLAYBOOK + value: |2+ + + - hosts: localhost + gather_facts: no + name: kuttl play + tasks: + - name: Sleep + command: sleep 1 + delegate_to: localhost + + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: openstack-edpm-tls + edpm_service_type: tls-dns-ips + edpm_services_override: [install-certs-ovrd tls-dns-ips custom-tls-dns] + + + imagePullPolicy: Always + name: tls-dns-ips-openstack-edpm-tls-ovrd-openstack-edpm-tls + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: openstack-edpm-tls + serviceAccountName: openstack-edpm-tls + terminationGracePeriodSeconds: 30 + volumes: + - name: ssh-key + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-openstack-edpm-tls status: - JobStatus: Succeeded conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady + - status: "True" + type: Complete + ready: 0 + succeeded: 1 + terminating: 0 + uncountedTerminatedPods: {} --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE -metadata: - name: tls-dns-ips-openstack-edpm-tls-ovrd-openstack-edpm-tls - namespace: openstack-kuttl-tests - ownerReferences: - - apiVersion: dataplane.openstack.org/v1beta1 - kind: OpenStackDataPlaneDeployment - name: openstack-edpm-tls-ovrd -spec: - backoffLimit: 6 - extraMounts: - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-openstack-edpm-tls - name: openstackansibleee - restartPolicy: Never - uid: 1001 -status: - JobStatus: Succeeded - conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady ---- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: + generation: 1 + labels: + app: openstackansibleee + job-name: custom-tls-dns-openstack-edpm-tls-ovrd-openstack-edpm-tls + openstackansibleee_cr: custom-tls-dns-openstack-edpm-tls-ovrd-openstack-edpm-tls + openstackdataplanedeployment: openstack-edpm-tls-ovrd + openstackdataplanenodeset: openstack-edpm-tls + openstackdataplaneservice: custom-tls-dns + osaee: "true" name: custom-tls-dns-openstack-edpm-tls-ovrd-openstack-edpm-tls namespace: openstack-kuttl-tests ownerReferences: - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true kind: OpenStackDataPlaneDeployment name: openstack-edpm-tls-ovrd spec: backoffLimit: 6 - extraMounts: - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-openstack-edpm-tls - name: openstackansibleee - restartPolicy: Never - uid: 1001 + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + labels: + app: openstackansibleee + batch.kubernetes.io/job-name: custom-tls-dns-openstack-edpm-tls-ovrd-openstack-edpm-tls + job-name: custom-tls-dns-openstack-edpm-tls-ovrd-openstack-edpm-tls + openstackansibleee_cr: custom-tls-dns-openstack-edpm-tls-ovrd-openstack-edpm-tls + openstackdataplanedeployment: openstack-edpm-tls-ovrd + openstackdataplanenodeset: openstack-edpm-tls + openstackdataplaneservice: custom-tls-dns + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - playbook.yaml + - -i + - custom-tls-dns-openstack-edpm-tls-ovrd-openstack-edpm-tls + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + - name: RUNNER_PLAYBOOK + value: |2+ + + - hosts: localhost + gather_facts: no + name: kuttl play + tasks: + - name: Sleep + command: sleep 1 + delegate_to: localhost + + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: openstack-edpm-tls + edpm_service_type: custom-tls-dns + edpm_services_override: [install-certs-ovrd tls-dns-ips custom-tls-dns] + + + imagePullPolicy: Always + name: custom-tls-dns-openstack-edpm-tls-ovrd-openstack-edpm-tls + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: openstack-edpm-tls + serviceAccountName: openstack-edpm-tls + terminationGracePeriodSeconds: 30 + volumes: + - name: ssh-key + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-openstack-edpm-tls status: - JobStatus: Succeeded conditions: - - message: Job completed - reason: Ready - status: "True" - type: Ready - - message: Job completed - reason: Ready - status: "True" - type: JobReady + - status: "True" + type: Complete + ready: 0 + succeeded: 1 + terminating: 0 + uncountedTerminatedPods: {} diff --git a/tests/kuttl/tests/dataplane-extramounts/00-assert.yaml b/tests/kuttl/tests/dataplane-extramounts/00-assert.yaml index 21897befd..67cb57e81 100644 --- a/tests/kuttl/tests/dataplane-extramounts/00-assert.yaml +++ b/tests/kuttl/tests/dataplane-extramounts/00-assert.yaml @@ -27,9 +27,17 @@ spec: claimName: edpm-ansible readOnly: true --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: + labels: + app: openstackansibleee + job-name: test-service-edpm-extramounts-edpm-extramounts + openstackansibleee_cr: test-service-edpm-extramounts-edpm-extramounts + openstackdataplanedeployment: edpm-extramounts + openstackdataplanenodeset: edpm-extramounts + openstackdataplaneservice: test-service + osaee: "true" name: test-service-edpm-extramounts-edpm-extramounts namespace: openstack-kuttl-tests ownerReferences: @@ -39,33 +47,92 @@ metadata: kind: OpenStackDataPlaneDeployment name: edpm-extramounts spec: - extraMounts: - - extraVolType: edpm-ansible - mounts: - - mountPath: /usr/share/ansible/collections/ansible_collections/osp/edpm - name: edpm-ansible - volumes: - - name: edpm-ansible - persistentVolumeClaim: - claimName: edpm-ansible - readOnly: true - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-edpm-extramounts + backoffLimit: 6 + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + annotations: + k8s.v1.cni.cncf.io/networks: '[]' + creationTimestamp: null + labels: + app: openstackansibleee + batch.kubernetes.io/job-name: test-service-edpm-extramounts-edpm-extramounts + job-name: test-service-edpm-extramounts-edpm-extramounts + openstackansibleee_cr: test-service-edpm-extramounts-edpm-extramounts + openstackdataplanedeployment: edpm-extramounts + openstackdataplanenodeset: edpm-extramounts + openstackdataplaneservice: test-service + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - test.yml + - -i + - test-service-edpm-extramounts-edpm-extramounts + env: + - name: RUNNER_PLAYBOOK + value: |2+ + + test.yml + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: edpm-extramounts + edpm_service_type: test-service + + + imagePullPolicy: Always + name: test-service-edpm-extramounts-edpm-extramounts + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /usr/share/ansible/collections/ansible_collections/osp/edpm + name: edpm-ansible + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + dnsPolicy: ClusterFirst + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: edpm-extramounts + serviceAccountName: edpm-extramounts + terminationGracePeriodSeconds: 30 + volumes: + - name: edpm-ansible + persistentVolumeClaim: + claimName: edpm-ansible + readOnly: true + - name: ssh-key + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-extramounts +status: + active: 1 + ready: 0 + terminating: 0 + uncountedTerminatedPods: {} diff --git a/tests/kuttl/tests/dataplane-service-config/00-assert.yaml b/tests/kuttl/tests/dataplane-service-config/00-assert.yaml index 2f7e527ab..5c8256cbf 100644 --- a/tests/kuttl/tests/dataplane-service-config/00-assert.yaml +++ b/tests/kuttl/tests/dataplane-service-config/00-assert.yaml @@ -6,9 +6,18 @@ collectors: command: oc logs -n openstack-operators -l openstack.org/operator-name=openstack name: operator-logs --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: + generation: 1 + labels: + app: openstackansibleee + job-name: kuttl-service-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackansibleee_cr: kuttl-service-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackdataplanedeployment: edpm-compute-no-nodes + openstackdataplanenodeset: edpm-compute-no-nodes + openstackdataplaneservice: kuttl-service + osaee: "true" name: kuttl-service-edpm-compute-no-nodes-edpm-compute-no-nodes namespace: openstack-kuttl-tests ownerReferences: @@ -18,109 +27,165 @@ metadata: kind: OpenStackDataPlaneDeployment name: edpm-compute-no-nodes spec: - env: - - name: ANSIBLE_FORCE_COLOR - value: "True" backoffLimit: 6 - envConfigMapName: openstack-aee-default-env - extraMounts: - - mounts: - - mountPath: /var/lib/openstack/configs/kuttl-service/00-ansibleVars - subPath: 00-ansibleVars - - mountPath: /var/lib/openstack/configs/kuttl-service/00-kuttl-service.conf - subPath: 00-kuttl-service.conf - - mountPath: /var/lib/openstack/configs/kuttl-service/01-kuttl-service.conf - subPath: 01-kuttl-service.conf - volumes: - - configMap: - items: - - key: 00-ansibleVars - path: 00-ansibleVars - name: kuttl-service-cm-0 - - configMap: - items: - - key: 00-kuttl-service.conf - path: 00-kuttl-service.conf - name: kuttl-service-cm-0 - - configMap: - items: - - key: 01-kuttl-service.conf - path: 01-kuttl-service.conf - name: kuttl-service-cm-0 - - mounts: - - mountPath: /var/lib/openstack/configs/kuttl-service/01-ansibleVars - subPath: 01-ansibleVars - - mountPath: /var/lib/openstack/configs/kuttl-service/10-kuttl-service.conf - subPath: 10-kuttl-service.conf - - mountPath: /var/lib/openstack/configs/kuttl-service/20-kuttl-service.conf - subPath: 20-kuttl-service.conf - volumes: - - configMap: - items: - - key: 01-ansibleVars - path: 01-ansibleVars - name: kuttl-service-cm-1 - - configMap: - items: - - key: 10-kuttl-service.conf - path: 10-kuttl-service.conf - name: kuttl-service-cm-1 - - configMap: - items: - - key: 20-kuttl-service.conf - path: 20-kuttl-service.conf - name: kuttl-service-cm-1 - - mounts: - - mountPath: /var/lib/openstack/configs/kuttl-service/30-kuttl-service.conf - subPath: 30-kuttl-service.conf - volumes: - - configMap: - items: - - key: 30-kuttl-service.conf - path: 30-kuttl-service.conf - name: kuttl-service-cm-2 - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-edpm-compute-no-nodes - name: openstackansibleee - playbookContents: | - - hosts: localhost - gather_facts: no - name: kuttl play - tasks: - - name: Sleep - command: sleep infinity - delegate_to: localhost - preserveJobs: true - restartPolicy: Never - uid: 1001 + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + annotations: + k8s.v1.cni.cncf.io/networks: '[]' + creationTimestamp: null + labels: + app: openstackansibleee + batch.kubernetes.io/job-name: kuttl-service-edpm-compute-no-nodes-edpm-compute-no-nodes + job-name: kuttl-service-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackansibleee_cr: kuttl-service-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackdataplanedeployment: edpm-compute-no-nodes + openstackdataplanenodeset: edpm-compute-no-nodes + openstackdataplaneservice: kuttl-service + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - playbook.yaml + - -i + - kuttl-service-edpm-compute-no-nodes-edpm-compute-no-nodes + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + - name: RUNNER_PLAYBOOK + value: |2+ + + - hosts: localhost + gather_facts: no + name: kuttl play + tasks: + - name: Sleep + command: sleep infinity + delegate_to: localhost + + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: edpm-compute-no-nodes + edpm_service_type: kuttl-service + + + imagePullPolicy: Always + name: kuttl-service-edpm-compute-no-nodes-edpm-compute-no-nodes + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/lib/openstack/configs/kuttl-service/00-ansibleVars + name: kuttl-service-cm-0-0 + subPath: 00-ansibleVars + - mountPath: /var/lib/openstack/configs/kuttl-service/00-kuttl-service.conf + name: kuttl-service-cm-0-1 + subPath: 00-kuttl-service.conf + - mountPath: /var/lib/openstack/configs/kuttl-service/01-kuttl-service.conf + name: kuttl-service-cm-0-2 + subPath: 01-kuttl-service.conf + - mountPath: /var/lib/openstack/configs/kuttl-service/01-ansibleVars + name: kuttl-service-cm-1-0 + subPath: 01-ansibleVars + - mountPath: /var/lib/openstack/configs/kuttl-service/10-kuttl-service.conf + name: kuttl-service-cm-1-1 + subPath: 10-kuttl-service.conf + - mountPath: /var/lib/openstack/configs/kuttl-service/20-kuttl-service.conf + name: kuttl-service-cm-1-2 + subPath: 20-kuttl-service.conf + - mountPath: /var/lib/openstack/configs/kuttl-service/30-kuttl-service.conf + name: kuttl-service-cm-2-0 + subPath: 30-kuttl-service.conf + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + dnsPolicy: ClusterFirst + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: edpm-compute-no-nodes + serviceAccountName: edpm-compute-no-nodes + terminationGracePeriodSeconds: 30 + volumes: + - configMap: + defaultMode: 420 + items: + - key: 00-ansibleVars + path: 00-ansibleVars + name: kuttl-service-cm-0 + name: kuttl-service-cm-0-0 + - configMap: + defaultMode: 420 + items: + - key: 00-kuttl-service.conf + path: 00-kuttl-service.conf + name: kuttl-service-cm-0 + name: kuttl-service-cm-0-1 + - configMap: + defaultMode: 420 + items: + - key: 01-kuttl-service.conf + path: 01-kuttl-service.conf + name: kuttl-service-cm-0 + name: kuttl-service-cm-0-2 + - configMap: + defaultMode: 420 + items: + - key: 01-ansibleVars + path: 01-ansibleVars + name: kuttl-service-cm-1 + name: kuttl-service-cm-1-0 + - configMap: + defaultMode: 420 + items: + - key: 10-kuttl-service.conf + path: 10-kuttl-service.conf + name: kuttl-service-cm-1 + name: kuttl-service-cm-1-1 + - configMap: + defaultMode: 420 + items: + - key: 20-kuttl-service.conf + path: 20-kuttl-service.conf + name: kuttl-service-cm-1 + name: kuttl-service-cm-1-2 + - configMap: + defaultMode: 420 + items: + - key: 30-kuttl-service.conf + path: 30-kuttl-service.conf + name: kuttl-service-cm-2 + name: kuttl-service-cm-2-0 + - name: ssh-key + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-no-nodes status: - JobStatus: Running - conditions: - - message: Job in progress - reason: Requested - severity: Info - status: "False" - type: Ready - - message: Job in progress - reason: Requested - severity: Info - status: "False" - type: JobReady + active: 1 + ready: 1 + terminating: 0 + uncountedTerminatedPods: {} diff --git a/tests/kuttl/tests/dataplane-service-custom-image/00-assert.yaml b/tests/kuttl/tests/dataplane-service-custom-image/00-assert.yaml index cb77b1109..9c765e896 100644 --- a/tests/kuttl/tests/dataplane-service-custom-image/00-assert.yaml +++ b/tests/kuttl/tests/dataplane-service-custom-image/00-assert.yaml @@ -54,9 +54,17 @@ status: status: "True" type: SetupReady --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: + labels: + app: openstackansibleee + job-name: custom-img-svc-edpm-compute-no-nodes-edpm-no-nodes-custom-svc + openstackansibleee_cr: custom-img-svc-edpm-compute-no-nodes-edpm-no-nodes-custom-svc + openstackdataplanedeployment: edpm-compute-no-nodes + openstackdataplanenodeset: edpm-no-nodes-custom-svc + openstackdataplaneservice: custom-img-svc + osaee: "true" name: custom-img-svc-edpm-compute-no-nodes-edpm-no-nodes-custom-svc namespace: openstack-kuttl-tests ownerReferences: @@ -67,41 +75,86 @@ metadata: name: edpm-compute-no-nodes spec: backoffLimit: 6 - extraMounts: - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-edpm-no-nodes-custom-svc - image: example.com/repo/runner-image:latest - name: openstackansibleee - restartPolicy: Never - uid: 1001 + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + annotations: + k8s.v1.cni.cncf.io/networks: '[]' + creationTimestamp: null + labels: + app: openstackansibleee + batch.kubernetes.io/job-name: custom-img-svc-edpm-compute-no-nodes-edpm-no-nodes-custom-svc + job-name: custom-img-svc-edpm-compute-no-nodes-edpm-no-nodes-custom-svc + openstackansibleee_cr: custom-img-svc-edpm-compute-no-nodes-edpm-no-nodes-custom-svc + openstackdataplanedeployment: edpm-compute-no-nodes + openstackdataplanenodeset: edpm-no-nodes-custom-svc + openstackdataplaneservice: custom-img-svc + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - playbook.yaml + - -i + - custom-img-svc-edpm-compute-no-nodes-edpm-no-nodes-custom-svc + env: + - name: RUNNER_PLAYBOOK + value: |2+ + + playbook.yaml + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: edpm-no-nodes-custom-svc + edpm_service_type: custom-img-svc + + + image: example.com/repo/runner-image:latest + imagePullPolicy: Always + name: custom-img-svc-edpm-compute-no-nodes-edpm-no-nodes-custom-svc + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + dnsPolicy: ClusterFirst + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: edpm-no-nodes-custom-svc + serviceAccountName: edpm-no-nodes-custom-svc + terminationGracePeriodSeconds: 30 + volumes: + - name: ssh-key + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-no-nodes-custom-svc status: - JobStatus: Running - conditions: - - message: Job in progress - reason: Requested - severity: Info - status: "False" - type: Ready - - message: Job in progress - reason: Requested - severity: Info - status: "False" - type: JobReady + active: 1 + ready: 0 + terminating: 0 + uncountedTerminatedPods: {} diff --git a/tests/kuttl/tests/dataplane-service-failure/00-assert.yaml b/tests/kuttl/tests/dataplane-service-failure/00-assert.yaml index b27179bda..a07dc6272 100644 --- a/tests/kuttl/tests/dataplane-service-failure/00-assert.yaml +++ b/tests/kuttl/tests/dataplane-service-failure/00-assert.yaml @@ -6,14 +6,17 @@ collectors: command: oc logs -n openstack-operators -l openstack.org/operator-name=openstack name: operator-logs --- -apiVersion: ansibleee.openstack.org/v1beta1 -kind: OpenStackAnsibleEE +apiVersion: batch/v1 +kind: Job metadata: - generation: 1 labels: + app: openstackansibleee + job-name: failed-service-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackansibleee_cr: failed-service-edpm-compute-no-nodes-edpm-compute-no-nodes openstackdataplanedeployment: edpm-compute-no-nodes openstackdataplanenodeset: edpm-compute-no-nodes openstackdataplaneservice: failed-service + osaee: "true" name: failed-service-edpm-compute-no-nodes-edpm-compute-no-nodes namespace: openstack-kuttl-tests ownerReferences: @@ -24,64 +27,95 @@ metadata: name: edpm-compute-no-nodes spec: backoffLimit: 3 - env: - - name: ANSIBLE_FORCE_COLOR - value: "True" - envConfigMapName: openstack-aee-default-env - extraMounts: - - mounts: - - mountPath: /runner/env/ssh_key - name: ssh-key - subPath: ssh_key - - mountPath: /runner/inventory/hosts - name: inventory - subPath: inventory - volumes: - - name: ssh-key - secret: - items: - - key: ssh-privatekey - path: ssh_key - secretName: dataplane-ansible-ssh-private-key-secret - - name: inventory - secret: - items: - - key: inventory - path: inventory - secretName: dataplanenodeset-edpm-compute-no-nodes - extraVars: - edpm_override_hosts: edpm-compute-no-nodes - edpm_service_type: failed-service - name: openstackansibleee - playbookContents: | - - hosts: localhost - gather_facts: no - name: kuttl play - tasks: - - name: Copy absent file - ansible.builtin.shell: | - set -euxo pipefail - cp absent failed_op - preserveJobs: true - restartPolicy: Never - serviceAccountName: edpm-compute-no-nodes - uid: 1001 + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + suspend: false + template: + metadata: + annotations: + k8s.v1.cni.cncf.io/networks: '[]' + creationTimestamp: null + labels: + app: openstackansibleee + batch.kubernetes.io/job-name: failed-service-edpm-compute-no-nodes-edpm-compute-no-nodes + job-name: failed-service-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackansibleee_cr: failed-service-edpm-compute-no-nodes-edpm-compute-no-nodes + openstackdataplanedeployment: edpm-compute-no-nodes + openstackdataplanenodeset: edpm-compute-no-nodes + openstackdataplaneservice: failed-service + osaee: "true" + spec: + containers: + - args: + - ansible-runner + - run + - /runner + - -p + - playbook.yaml + - -i + - failed-service-edpm-compute-no-nodes-edpm-compute-no-nodes + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + - name: RUNNER_PLAYBOOK + value: |2+ + + - hosts: localhost + gather_facts: no + name: kuttl play + tasks: + - name: Copy absent file + ansible.builtin.shell: | + set -euxo pipefail + cp absent failed_op + + + - name: RUNNER_EXTRA_VARS + value: |2+ + + edpm_override_hosts: edpm-compute-no-nodes + edpm_service_type: failed-service + + + imagePullPolicy: Always + name: failed-service-edpm-compute-no-nodes-edpm-compute-no-nodes + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + dnsPolicy: ClusterFirst + restartPolicy: OnFailure + schedulerName: default-scheduler + securityContext: {} + serviceAccount: edpm-compute-no-nodes + serviceAccountName: edpm-compute-no-nodes + terminationGracePeriodSeconds: 30 + volumes: + - name: ssh-key + secret: + defaultMode: 420 + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + defaultMode: 420 + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-no-nodes status: - JobStatus: Failed - conditions: - - message: 'Job error occurred Internal error occurred: Job has reached the specified - backoff limit. Check job logs' - reason: BackoffLimitExceeded - severity: Error - status: "False" - type: Ready - - message: 'Job error occurred Internal error occurred: Job has reached the specified - backoff limit. Check job logs' - reason: BackoffLimitExceeded - severity: Error - status: "False" - type: JobReady - observedGeneration: 1 + failed: 1 --- apiVersion: dataplane.openstack.org/v1beta1 kind: OpenStackDataPlaneNodeSet @@ -102,14 +136,14 @@ status: conditions: - message: 'Deployment error occurred in failed-service service error backoff limit reached for execution.name failed-service-edpm-compute-no-nodes-edpm-compute-no-nodes - execution.namespace openstack-kuttl-tests execution.status.jobstatus: Failed' + execution.namespace openstack-kuttl-tests execution.condition.message: Job has reached the specified backoff limit' reason: Error severity: Error status: "False" type: Ready - message: 'Deployment error occurred in failed-service service error backoff limit reached for execution.name failed-service-edpm-compute-no-nodes-edpm-compute-no-nodes - execution.namespace openstack-kuttl-tests execution.status.jobstatus: Failed' + execution.namespace openstack-kuttl-tests execution.condition.message: Job has reached the specified backoff limit' reason: Error severity: Error status: "False" @@ -138,14 +172,14 @@ status: edpm-compute-no-nodes: - message: 'Deployment error occurred in failed-service service error backoff limit reached for execution.name failed-service-edpm-compute-no-nodes-edpm-compute-no-nodes - execution.namespace openstack-kuttl-tests execution.status.jobstatus: Failed' + execution.namespace openstack-kuttl-tests execution.condition.message: Job has reached the specified backoff limit' reason: BackoffLimitExceeded severity: Error status: "False" type: NodeSetDeploymentReady - message: 'Deployment error occurred in failed-service service error backoff limit reached for execution.name failed-service-edpm-compute-no-nodes-edpm-compute-no-nodes - execution.namespace openstack-kuttl-tests execution.status.jobstatus: Failed' + execution.namespace openstack-kuttl-tests execution.condition.message: Job has reached the specified backoff limit' reason: BackoffLimitExceeded severity: Error status: "False" @@ -167,14 +201,14 @@ status: conditions: - message: 'Deployment error occurred nodeSet: edpm-compute-no-nodes error: backoff limit reached for execution.name failed-service-edpm-compute-no-nodes-edpm-compute-no-nodes - execution.namespace openstack-kuttl-tests execution.status.jobstatus: Failed' + execution.namespace openstack-kuttl-tests execution.condition.message: Job has reached the specified backoff limit' reason: BackoffLimitExceeded severity: Error status: "False" type: Ready - message: 'Deployment error occurred nodeSet: edpm-compute-no-nodes error: backoff limit reached for execution.name failed-service-edpm-compute-no-nodes-edpm-compute-no-nodes - execution.namespace openstack-kuttl-tests execution.status.jobstatus: Failed' + execution.namespace openstack-kuttl-tests execution.condition.message: Job has reached the specified backoff limit' reason: BackoffLimitExceeded severity: Error status: "False" @@ -187,14 +221,14 @@ status: edpm-compute-no-nodes: - message: 'Deployment error occurred in failed-service service error backoff limit reached for execution.name failed-service-edpm-compute-no-nodes-edpm-compute-no-nodes - execution.namespace openstack-kuttl-tests execution.status.jobstatus: Failed' + execution.namespace openstack-kuttl-tests execution.condition.message: Job has reached the specified backoff limit' reason: BackoffLimitExceeded severity: Error status: "False" type: NodeSetDeploymentReady - message: 'Deployment error occurred in failed-service service error backoff limit reached for execution.name failed-service-edpm-compute-no-nodes-edpm-compute-no-nodes - execution.namespace openstack-kuttl-tests execution.status.jobstatus: Failed' + execution.namespace openstack-kuttl-tests execution.condition.message: Job has reached the specified backoff limit' reason: BackoffLimitExceeded severity: Error status: "False"