diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml new file mode 100644 index 000000000..fc880d757 --- /dev/null +++ b/.github/workflows/docs.yaml @@ -0,0 +1,58 @@ +name: Build Docs +on: + workflow_dispatch: + push: + branches: + - main + pull_request: + branches: + - main + paths: + - .github/workflows/docs* + - apis/client/v1beta1/** + - apis/core/v1beta1/** + - apis/dataplane/v1beta1/** + - docs/** + - Gemfile +jobs: + deploy: + runs-on: ubuntu-latest + steps: + - name: Install Go + uses: actions/setup-go@v3 + with: + go-version: 1.20.x + - uses: actions/checkout@v4 + with: + # this fetches all branches. Needed because we need gh-pages branch for deploy to work + fetch-depth: 0 + - uses: ruby/setup-ruby@v1.160.0 + with: + ruby-version: '3.2' + + - name: Install Asciidoc + run: make docs-dependencies + - name: Build docs + run: | + make docs + + - name: Prepare gh-pages branch + run: | + git restore docs/assemblies/custom_resources.adoc + git config user.name github-actions + git config user.email github-actions@github.com + + git branch -D gh-pages &>/dev/null || true + git checkout -b gh-pages 4cd0193fc6c5bc7e76f3a0148d0447fb0d7fbe6a + + - name: Commit asciidoc docs + run: | + mkdir user dev + mv docs_build/ctlplane/index-upstream.html index.html + git add index.html + git commit -m "Rendered docs" + + - name: Push rendered docs to gh-pages + if: github.event_name == 'push' && github.ref == 'refs/heads/main' + run: | + git push --force origin gh-pages diff --git a/.github/workflows/kustom.yaml b/.github/workflows/kustom.yaml new file mode 100644 index 000000000..0c6e0d434 --- /dev/null +++ b/.github/workflows/kustom.yaml @@ -0,0 +1,36 @@ +name: Kustomize Build +on: + workflow_dispatch: + push: + branches: + - main + pull_request: + branches: + - main + paths: + - config/samples/** +jobs: + kustomize: + runs-on: ubuntu-latest + steps: + - name: Install Go + uses: actions/setup-go@v3 + with: + go-version: 1.20.x + - uses: actions/checkout@v4 + with: + # this fetches all branches. Needed because we need gh-pages branch for deploy to work + fetch-depth: 0 + - name: download kustomize + run: | + mkdir bin + LINK=https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh + curl -Ss $LINK | bash -s -- 5.0.1 ./bin + - name: kustomize build + run: | + cd config/samples/dataplane + + for d in */ ; do + echo "=============== $d ===============" + ../../../bin/kustomize build --load-restrictor LoadRestrictionsNone "$d" + done diff --git a/.gitignore b/.gitignore index c81d32a07..116da3b77 100644 --- a/.gitignore +++ b/.gitignore @@ -37,3 +37,9 @@ CI_TOOLS_REPO # generated workspace file go.work go.work.sum + +# docs +.bundle/ +docs_build/ +Gemfile.lock +local/ diff --git a/Dockerfile b/Dockerfile index aae778daa..c9d6afc64 100644 --- a/Dockerfile +++ b/Dockerfile @@ -27,6 +27,8 @@ RUN if [ ! -f $CACHITO_ENV_FILE ]; then go mod download ; fi # Build manager RUN if [ -f $CACHITO_ENV_FILE ] ; then source $CACHITO_ENV_FILE ; fi ; env ${GO_BUILD_EXTRA_ENV_ARGS} go build ${GO_BUILD_EXTRA_ARGS} -a -o ${DEST_ROOT}/manager main.go +RUN cp -r config/services ${DEST_ROOT}/services + # Use distroless as minimal base image to package the manager binary # Refer to https://github.com/GoogleContainerTools/distroless for more details FROM $OPERATOR_BASE_IMAGE @@ -56,13 +58,17 @@ LABEL com.redhat.component="${IMAGE_COMPONENT}" \ io.openshift.tags="${IMAGE_TAGS}" ### DO NOT EDIT LINES ABOVE -ENV USER_UID=$USER_ID +ENV USER_UID=$USER_ID \ + OPERATOR_SERVICES=/usr/share/openstack-operator/services/ WORKDIR / # Install operator binary to WORKDIR COPY --from=builder ${DEST_ROOT}/manager . +# Install services +COPY --from=builder ${DEST_ROOT}/services ${OPERATOR_SERVICES} + USER $USER_ID ENV PATH="/:${PATH}" diff --git a/Gemfile b/Gemfile new file mode 100644 index 000000000..002b2b82c --- /dev/null +++ b/Gemfile @@ -0,0 +1,9 @@ +source 'https://rubygems.org' + +gem 'asciidoctor', '~> 2.0', '>= 2.0.20' + +# Uncomment for ability to render pdf: +# gem 'asciidoctor-pdf', '~> 2.0', '>= 2.0.20' + +# Uncomment for ability to convert Markdown to AsciiDoc +gem 'kramdown-asciidoc' diff --git a/Makefile b/Makefile index 59d8f65b3..bb0193339 100644 --- a/Makefile +++ b/Makefile @@ -76,6 +76,36 @@ DOCKER_BUILD_ARGS ?= .PHONY: all all: build +##@ Docs + +.PHONY: docs-dependencies +docs-dependencies: .bundle + +.PHONY: .bundle +.bundle: + if ! type bundle; then \ + echo "Bundler not found. On Linux run 'sudo dnf install /usr/bin/bundle' to install it."; \ + exit 1; \ + fi + + bundle config set --local path 'local/bundle'; bundle install + +.PHONY: docs +docs: manifests docs-dependencies crd-to-markdown ## Build docs + CRD_MARKDOWN=$(CRD_MARKDOWN) MAKE=$(MAKE) ./docs/build_docs.sh + +.PHONY: docs-preview +docs-preview: docs + cd docs; $(MAKE) open-html + +.PHONY: docs-watch +docs-watch: docs-preview + cd docs; $(MAKE) watch-html + +.PHONY: docs-clean +docs-clean: + rm -r docs_build + ##@ General # The help target prints out all targets with their descriptions organized @@ -133,11 +163,21 @@ golangci-lint: $(LOCALBIN)/golangci-lint run --fix .PHONY: test -test: manifests generate gowork fmt vet envtest ginkgo ## Run tests. +test: manifests generate gowork fmt vet envtest ginkgo ginkgo-run ## Run ginkgo tests with dependencies. + +.PHONY: ginkgo-run +ginkgo-run: ## Run ginkgo. source hack/export_related_images.sh && \ KUBEBUILDER_ASSETS="$(shell $(ENVTEST) -v debug --bin-dir $(LOCALBIN) use $(ENVTEST_K8S_VERSION) -p path)" \ OPERATOR_TEMPLATES="$(PWD)/templates" \ - $(GINKGO) --trace --cover --coverpkg=../../pkg/openstack,../../pkg/openstackclient,../../pkg/util,../../controllers,../../apis/client/v1beta1,../../apis/core/v1beta1 --coverprofile cover.out --covermode=atomic ${PROC_CMD} $(GINKGO_ARGS) ./tests/... ./apis/client/... + $(GINKGO) --trace --cover --coverpkg=../../pkg/openstack,../../pkg/openstackclient,../../pkg/util,../../controllers,../../apis/client/v1beta1,../../apis/core/v1beta1,../../apis/dataplane/v1beta1 --coverprofile cover.out --covermode=atomic ${PROC_CMD} $(GINKGO_ARGS) ./tests/... ./apis/client/... + +.PHONY: test-all +test-all: test golint golangci golangci-lint ## Run all tests. + +.PHONY: cover +cover: test ## Run tests and display functional test coverage + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go tool cover -html=cover.out ##@ Build @@ -176,7 +216,7 @@ docker-buildx: ## Build and push docker image for the manager for cross-platfor sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross - docker buildx create --name project-v3-builder docker buildx use project-v3-builder - - docker buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross + - docker buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross . - docker buildx rm project-v3-builder rm Dockerfile.cross @@ -214,11 +254,15 @@ $(LOCALBIN): KUSTOMIZE ?= $(LOCALBIN)/kustomize CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen ENVTEST ?= $(LOCALBIN)/setup-envtest +CRD_MARKDOWN ?= $(LOCALBIN)/crd-to-markdown GINKGO ?= $(LOCALBIN)/ginkgo +KUTTL ?= $(LOCALBIN)/kubectl-kuttl ## Tool Versions KUSTOMIZE_VERSION ?= v3.8.7 CONTROLLER_TOOLS_VERSION ?= v0.11.1 +CRD_MARKDOWN_VERSION ?= v0.0.3 +KUTTL_VERSION ?= 0.15.0 KUSTOMIZE_INSTALL_SCRIPT ?= "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" .PHONY: kustomize @@ -236,6 +280,11 @@ $(CONTROLLER_GEN): $(LOCALBIN) test -s $(LOCALBIN)/controller-gen && $(LOCALBIN)/controller-gen --version | grep -q $(CONTROLLER_TOOLS_VERSION) || \ GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_TOOLS_VERSION) +.PHONY: crd-to-markdown +crd-to-markdown: $(CRD_MARKDOWN) ## Download crd-to-markdown locally if necessary. +$(CRD_MARKDOWN): $(LOCALBIN) + test -s $(LOCALBIN)/crd-to-markdown || GOBIN=$(LOCALBIN) go install github.com/clamoriniere/crd-to-markdown@$(CRD_MARKDOWN_VERSION) + .PHONY: envtest envtest: $(ENVTEST) ## Download envtest-setup locally if necessary. $(ENVTEST): $(LOCALBIN) @@ -246,6 +295,16 @@ ginkgo: $(GINKGO) ## Download ginkgo locally if necessary. $(GINKGO): $(LOCALBIN) test -s $(LOCALBIN)/ginkgo || GOBIN=$(LOCALBIN) go install github.com/onsi/ginkgo/v2/ginkgo +.PHONY: kuttl-test +kuttl-test: ## Run kuttl tests + $(LOCALBIN)/kubectl-kuttl test --config kuttl-test.yaml tests/kuttl/tests $(KUTTL_ARGS) + +.PHONY: kuttl +kuttl: $(KUTTL) ## Download kubectl-kuttl locally if necessary. +$(KUTTL): $(LOCALBIN) + test -s $(LOCALBIN)/kubectl-kuttl || curl -L -o $(LOCALBIN)/kubectl-kuttl https://github.com/kudobuilder/kuttl/releases/download/v$(KUTTL_VERSION)/kubectl-kuttl_$(KUTTL_VERSION)_linux_x86_64 + chmod +x $(LOCALBIN)/kubectl-kuttl + .PHONY: operator-sdk OPERATOR_SDK ?= $(LOCALBIN)/operator-sdk operator-sdk: ## Download operator-sdk locally if necessary. @@ -296,6 +355,12 @@ OPM = $(shell which opm) endif endif +.PHONY: yq +yq: ## Download and install yq in local env + test -s $(LOCALBIN)/yq || ( cd $(LOCALBIN) &&\ + wget https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64.tar.gz -O - |\ + tar xz && mv yq_linux_amd64 $(LOCALBIN)/yq ) + # Build make variables to export for shell MAKE_ENV := $(shell echo '$(.VARIABLES)' | awk -v RS=' ' '/^(IMAGE)|.*?(REGISTRY)$$/') SHELL_EXPORT = $(foreach v,$(MAKE_ENV),$(v)='$($(v))') @@ -394,6 +459,10 @@ run-with-webhook: manifests generate fmt vet ## Run a controller from your host. source hack/export_related_images.sh && \ go run ./main.go -metrics-bind-address ":$(METRICS_PORT)" -health-probe-bind-address ":$(HEALTH_PORT)" +.PHONY: webhook-cleanup +webhook-cleanup: + /bin/bash hack/clean_local_webhook.sh + # refresh the bundle extra data based on go.mod entries # bundle extra data includes: # - extracted ENV vars from all operators (required for webhooks) diff --git a/OWNERS b/OWNERS index c23a45061..564982606 100644 --- a/OWNERS +++ b/OWNERS @@ -2,7 +2,9 @@ approvers: - ci-approvers - openstack-approvers + - dataplane-approvers reviewers: - ci-approvers - openstack-approvers + - dataplane-approvers diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index b70792ba3..b417b23c4 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -5,8 +5,18 @@ aliases: - lewisdenny - frenzyfriday - viroel + docs-approvers: + - igallagh-redhat openstack-approvers: - abays - dprince - olliewalsh - stuggi + dataplane-approvers: + - fao89 + - fultonj + - rebtoor + - slagle + - bshephar + - rabi + - jpodivin diff --git a/PROJECT b/PROJECT index 27e77b65b..6112f528e 100644 --- a/PROJECT +++ b/PROJECT @@ -46,4 +46,35 @@ resources: defaulting: true validation: true webhookVersion: v1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: openstack.org + group: dataplane + kind: OpenStackDataPlaneNodeSet + path: github.com/openstack-k8s-operators/openstack-operator/apis/dataplane/v1beta1 + version: v1beta1 + webhooks: + defaulting: true + validation: true + webhookVersion: v1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: openstack.org + group: dataplane + kind: OpenStackDataPlaneService + path: github.com/openstack-k8s-operators/openstack-operator/apis/dataplane/v1beta1 + version: v1beta1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: openstack.org + group: dataplane + kind: OpenStackDataPlaneDeployment + path: github.com/openstack-k8s-operators/openstack-operator/apis/dataplane/v1beta1 + version: v1beta1 version: "3" diff --git a/apis/bases/dataplane.openstack.org_openstackdataplanedeployments.yaml b/apis/bases/dataplane.openstack.org_openstackdataplanedeployments.yaml new file mode 100644 index 000000000..e1cea05f8 --- /dev/null +++ b/apis/bases/dataplane.openstack.org_openstackdataplanedeployments.yaml @@ -0,0 +1,155 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.1 + creationTimestamp: null + name: openstackdataplanedeployments.dataplane.openstack.org +spec: + group: dataplane.openstack.org + names: + kind: OpenStackDataPlaneDeployment + listKind: OpenStackDataPlaneDeploymentList + plural: openstackdataplanedeployments + shortNames: + - osdpd + - osdpdeployment + - osdpdeployments + singular: openstackdataplanedeployment + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: NodeSets + jsonPath: .spec.nodeSets + name: NodeSets + type: string + - description: Status + jsonPath: .status.conditions[0].status + name: Status + type: string + - description: Message + jsonPath: .status.conditions[0].message + name: Message + type: string + name: v1beta1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + ansibleExtraVars: + x-kubernetes-preserve-unknown-fields: true + ansibleLimit: + type: string + ansibleSkipTags: + type: string + ansibleTags: + type: string + backoffLimit: + default: 6 + format: int32 + type: integer + deploymentRequeueTime: + default: 15 + minimum: 1 + type: integer + nodeSets: + items: + type: string + minItems: 1 + type: array + servicesOverride: + items: + type: string + type: array + required: + - deploymentRequeueTime + - nodeSets + type: object + x-kubernetes-validations: + - message: OpenStackDataPlaneDeployment Spec is immutable + rule: self == oldSelf + status: + properties: + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + severity: + type: string + status: + type: string + type: + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + configMapHashes: + additionalProperties: + type: string + type: object + containerImages: + additionalProperties: + type: string + type: object + deployed: + type: boolean + deployedVersion: + type: string + nodeSetConditions: + additionalProperties: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + severity: + type: string + status: + type: string + type: + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + type: object + nodeSetHashes: + additionalProperties: + type: string + type: object + observedGeneration: + format: int64 + type: integer + secretHashes: + additionalProperties: + type: string + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/apis/bases/dataplane.openstack.org_openstackdataplanenodesets.yaml b/apis/bases/dataplane.openstack.org_openstackdataplanenodesets.yaml new file mode 100644 index 000000000..a2cfc7d69 --- /dev/null +++ b/apis/bases/dataplane.openstack.org_openstackdataplanenodesets.yaml @@ -0,0 +1,2045 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.1 + creationTimestamp: null + name: openstackdataplanenodesets.dataplane.openstack.org +spec: + group: dataplane.openstack.org + names: + kind: OpenStackDataPlaneNodeSet + listKind: OpenStackDataPlaneNodeSetList + plural: openstackdataplanenodesets + shortNames: + - osdpns + - osdpnodeset + - osdpnodesets + singular: openstackdataplanenodeset + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Status + jsonPath: .status.conditions[0].status + name: Status + type: string + - description: Message + jsonPath: .status.conditions[0].message + name: Message + type: string + name: v1beta1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + baremetalSetTemplate: + properties: + agentImageUrl: + type: string + apacheImageUrl: + type: string + automatedCleaningMode: + default: metadata + enum: + - metadata + - disabled + type: string + baremetalHosts: + additionalProperties: + properties: + ctlPlaneIP: + type: string + networkData: + properties: + name: + type: string + namespace: + type: string + type: object + x-kubernetes-map-type: atomic + preprovisioningNetworkDataName: + type: string + userData: + properties: + name: + type: string + namespace: + type: string + type: object + x-kubernetes-map-type: atomic + type: object + type: object + bmhLabelSelector: + additionalProperties: + type: string + type: object + bmhNamespace: + default: openshift-machine-api + type: string + bootstrapDns: + items: + type: string + type: array + cloudUserName: + default: cloud-admin + type: string + ctlplaneGateway: + type: string + ctlplaneInterface: + type: string + ctlplaneNetmask: + default: 255.255.255.0 + type: string + deploymentSSHSecret: + type: string + dnsSearchDomains: + items: + type: string + type: array + domainName: + type: string + hardwareReqs: + properties: + cpuReqs: + properties: + arch: + enum: + - x86_64 + - ppc64le + type: string + countReq: + properties: + count: + minimum: 1 + type: integer + exactMatch: + type: boolean + type: object + mhzReq: + properties: + exactMatch: + type: boolean + mhz: + minimum: 1 + type: integer + type: object + type: object + diskReqs: + properties: + gbReq: + properties: + exactMatch: + type: boolean + gb: + minimum: 1 + type: integer + type: object + ssdReq: + properties: + exactMatch: + type: boolean + ssd: + type: boolean + type: object + type: object + memReqs: + properties: + gbReq: + properties: + exactMatch: + type: boolean + gb: + minimum: 1 + type: integer + type: object + type: object + type: object + networkData: + properties: + name: + type: string + namespace: + type: string + type: object + x-kubernetes-map-type: atomic + osContainerImageUrl: + type: string + osImage: + type: string + passwordSecret: + properties: + name: + type: string + namespace: + type: string + type: object + x-kubernetes-map-type: atomic + provisionServerName: + type: string + provisioningInterface: + type: string + userData: + properties: + name: + type: string + namespace: + type: string + type: object + x-kubernetes-map-type: atomic + required: + - cloudUserName + - ctlplaneInterface + - deploymentSSHSecret + type: object + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + networkAttachments: + items: + type: string + type: array + nodeTemplate: + properties: + ansible: + properties: + ansibleHost: + type: string + ansiblePort: + type: integer + ansibleUser: + type: string + ansibleVars: + x-kubernetes-preserve-unknown-fields: true + ansibleVarsFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + type: object + ansibleSSHPrivateKeySecret: + type: string + extraMounts: + items: + properties: + extraVolType: + type: string + mounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + propagation: + items: + type: string + type: array + volumes: + items: + properties: + awsElasticBlockStore: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + azureDisk: + properties: + cachingMode: + type: string + diskName: + type: string + diskURI: + type: string + fsType: + type: string + kind: + type: string + readOnly: + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + properties: + readOnly: + type: boolean + secretName: + type: string + shareName: + type: string + required: + - secretName + - shareName + type: object + cephfs: + properties: + monitors: + items: + type: string + type: array + path: + type: string + readOnly: + type: boolean + secretFile: + type: string + secretRef: + properties: + name: + type: string + type: object + x-kubernetes-map-type: atomic + user: + type: string + required: + - monitors + type: object + cinder: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + type: string + required: + - volumeID + type: object + configMap: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + name: + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + properties: + driver: + type: string + fsType: + type: string + nodePublishSecretRef: + properties: + name: + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + type: boolean + volumeAttributes: + additionalProperties: + type: string + type: object + required: + - driver + type: object + downwardAPI: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + emptyDir: + properties: + medium: + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + properties: + volumeClaimTemplate: + properties: + metadata: + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - kind + - name + type: object + resources: + properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + required: + - spec + type: object + type: object + fc: + properties: + fsType: + type: string + lun: + format: int32 + type: integer + readOnly: + type: boolean + targetWWNs: + items: + type: string + type: array + wwids: + items: + type: string + type: array + type: object + flexVolume: + properties: + driver: + type: string + fsType: + type: string + options: + additionalProperties: + type: string + type: object + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + properties: + datasetName: + type: string + datasetUUID: + type: string + type: object + gcePersistentDisk: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + pdName: + type: string + readOnly: + type: boolean + required: + - pdName + type: object + gitRepo: + properties: + directory: + type: string + repository: + type: string + revision: + type: string + required: + - repository + type: object + glusterfs: + properties: + endpoints: + type: string + path: + type: string + readOnly: + type: boolean + required: + - endpoints + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + iscsi: + properties: + chapAuthDiscovery: + type: boolean + chapAuthSession: + type: boolean + fsType: + type: string + initiatorName: + type: string + iqn: + type: string + iscsiInterface: + type: string + lun: + format: int32 + type: integer + portals: + items: + type: string + type: array + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + type: string + nfs: + properties: + path: + type: string + readOnly: + type: boolean + server: + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + properties: + fsType: + type: string + pdID: + type: string + required: + - pdID + type: object + portworxVolume: + properties: + fsType: + type: string + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + projected: + properties: + defaultMode: + format: int32 + type: integer + sources: + items: + properties: + configMap: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + name: + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + properties: + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + secret: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + name: + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + format: int64 + type: integer + path: + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + properties: + group: + type: string + readOnly: + type: boolean + registry: + type: string + tenant: + type: string + user: + type: string + volume: + type: string + required: + - registry + - volume + type: object + rbd: + properties: + fsType: + type: string + image: + type: string + keyring: + type: string + monitors: + items: + type: string + type: array + pool: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + x-kubernetes-map-type: atomic + user: + type: string + required: + - image + - monitors + type: object + scaleIO: + properties: + fsType: + type: string + gateway: + type: string + protectionDomain: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + type: boolean + storageMode: + type: string + storagePool: + type: string + system: + type: string + volumeName: + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + optional: + type: boolean + secretName: + type: string + type: object + storageos: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + type: string + volumeNamespace: + type: string + type: object + vsphereVolume: + properties: + fsType: + type: string + storagePolicyID: + type: string + storagePolicyName: + type: string + volumePath: + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + required: + - mounts + - volumes + type: object + type: array + managementNetwork: + default: ctlplane + type: string + networkData: + properties: + name: + type: string + namespace: + type: string + type: object + x-kubernetes-map-type: atomic + networks: + items: + properties: + defaultRoute: + type: boolean + fixedIP: + type: string + name: + pattern: ^[a-zA-Z0-9][a-zA-Z0-9\-_]*[a-zA-Z0-9]$ + type: string + subnetName: + pattern: ^[a-zA-Z0-9][a-zA-Z0-9\-_]*[a-zA-Z0-9]$ + type: string + required: + - name + - subnetName + type: object + type: array + userData: + properties: + name: + type: string + namespace: + type: string + type: object + x-kubernetes-map-type: atomic + required: + - ansibleSSHPrivateKeySecret + type: object + nodes: + additionalProperties: + properties: + ansible: + properties: + ansibleHost: + type: string + ansiblePort: + type: integer + ansibleUser: + type: string + ansibleVars: + x-kubernetes-preserve-unknown-fields: true + ansibleVarsFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + type: object + extraMounts: + items: + properties: + extraVolType: + type: string + mounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + propagation: + items: + type: string + type: array + volumes: + items: + properties: + awsElasticBlockStore: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + azureDisk: + properties: + cachingMode: + type: string + diskName: + type: string + diskURI: + type: string + fsType: + type: string + kind: + type: string + readOnly: + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + properties: + readOnly: + type: boolean + secretName: + type: string + shareName: + type: string + required: + - secretName + - shareName + type: object + cephfs: + properties: + monitors: + items: + type: string + type: array + path: + type: string + readOnly: + type: boolean + secretFile: + type: string + secretRef: + properties: + name: + type: string + type: object + x-kubernetes-map-type: atomic + user: + type: string + required: + - monitors + type: object + cinder: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + type: string + required: + - volumeID + type: object + configMap: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + name: + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + properties: + driver: + type: string + fsType: + type: string + nodePublishSecretRef: + properties: + name: + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + type: boolean + volumeAttributes: + additionalProperties: + type: string + type: object + required: + - driver + type: object + downwardAPI: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + emptyDir: + properties: + medium: + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + properties: + volumeClaimTemplate: + properties: + metadata: + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - kind + - name + type: object + resources: + properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + required: + - spec + type: object + type: object + fc: + properties: + fsType: + type: string + lun: + format: int32 + type: integer + readOnly: + type: boolean + targetWWNs: + items: + type: string + type: array + wwids: + items: + type: string + type: array + type: object + flexVolume: + properties: + driver: + type: string + fsType: + type: string + options: + additionalProperties: + type: string + type: object + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + properties: + datasetName: + type: string + datasetUUID: + type: string + type: object + gcePersistentDisk: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + pdName: + type: string + readOnly: + type: boolean + required: + - pdName + type: object + gitRepo: + properties: + directory: + type: string + repository: + type: string + revision: + type: string + required: + - repository + type: object + glusterfs: + properties: + endpoints: + type: string + path: + type: string + readOnly: + type: boolean + required: + - endpoints + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + iscsi: + properties: + chapAuthDiscovery: + type: boolean + chapAuthSession: + type: boolean + fsType: + type: string + initiatorName: + type: string + iqn: + type: string + iscsiInterface: + type: string + lun: + format: int32 + type: integer + portals: + items: + type: string + type: array + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + type: string + nfs: + properties: + path: + type: string + readOnly: + type: boolean + server: + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + properties: + fsType: + type: string + pdID: + type: string + required: + - pdID + type: object + portworxVolume: + properties: + fsType: + type: string + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + projected: + properties: + defaultMode: + format: int32 + type: integer + sources: + items: + properties: + configMap: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + name: + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + properties: + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + secret: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + name: + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + format: int64 + type: integer + path: + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + properties: + group: + type: string + readOnly: + type: boolean + registry: + type: string + tenant: + type: string + user: + type: string + volume: + type: string + required: + - registry + - volume + type: object + rbd: + properties: + fsType: + type: string + image: + type: string + keyring: + type: string + monitors: + items: + type: string + type: array + pool: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + x-kubernetes-map-type: atomic + user: + type: string + required: + - image + - monitors + type: object + scaleIO: + properties: + fsType: + type: string + gateway: + type: string + protectionDomain: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + type: boolean + storageMode: + type: string + storagePool: + type: string + system: + type: string + volumeName: + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + optional: + type: boolean + secretName: + type: string + type: object + storageos: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + type: string + volumeNamespace: + type: string + type: object + vsphereVolume: + properties: + fsType: + type: string + storagePolicyID: + type: string + storagePolicyName: + type: string + volumePath: + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + required: + - mounts + - volumes + type: object + type: array + hostName: + type: string + managementNetwork: + type: string + networkData: + properties: + name: + type: string + namespace: + type: string + type: object + x-kubernetes-map-type: atomic + networks: + items: + properties: + defaultRoute: + type: boolean + fixedIP: + type: string + name: + pattern: ^[a-zA-Z0-9][a-zA-Z0-9\-_]*[a-zA-Z0-9]$ + type: string + subnetName: + pattern: ^[a-zA-Z0-9][a-zA-Z0-9\-_]*[a-zA-Z0-9]$ + type: string + required: + - name + - subnetName + type: object + type: array + preprovisioningNetworkDataName: + type: string + userData: + properties: + name: + type: string + namespace: + type: string + type: object + x-kubernetes-map-type: atomic + type: object + type: object + preProvisioned: + type: boolean + secretMaxSize: + default: 1048576 + type: integer + services: + default: + - download-cache + - bootstrap + - configure-network + - validate-network + - install-os + - configure-os + - ssh-known-hosts + - run-os + - reboot-os + - install-certs + - ovn + - neutron-metadata + - libvirt + - nova + - telemetry + items: + type: string + type: array + tags: + items: + type: string + type: array + tlsEnabled: + default: true + type: boolean + required: + - nodeTemplate + - nodes + type: object + status: + properties: + allHostnames: + additionalProperties: + additionalProperties: + type: string + type: object + type: object + allIPs: + additionalProperties: + additionalProperties: + type: string + type: object + type: object + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + severity: + type: string + status: + type: string + type: + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + configHash: + type: string + configMapHashes: + additionalProperties: + type: string + type: object + containerImages: + additionalProperties: + type: string + type: object + ctlplaneSearchDomain: + type: string + deployedConfigHash: + type: string + deployedVersion: + type: string + deploymentStatuses: + additionalProperties: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + severity: + type: string + status: + type: string + type: + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + type: object + dnsClusterAddresses: + items: + type: string + type: array + observedGeneration: + format: int64 + type: integer + secretHashes: + additionalProperties: + type: string + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/apis/bases/dataplane.openstack.org_openstackdataplaneservices.yaml b/apis/bases/dataplane.openstack.org_openstackdataplaneservices.yaml new file mode 100644 index 000000000..940c4d7f2 --- /dev/null +++ b/apis/bases/dataplane.openstack.org_openstackdataplaneservices.yaml @@ -0,0 +1,165 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.1 + creationTimestamp: null + name: openstackdataplaneservices.dataplane.openstack.org +spec: + group: dataplane.openstack.org + names: + kind: OpenStackDataPlaneService + listKind: OpenStackDataPlaneServiceList + plural: openstackdataplaneservices + shortNames: + - osdps + - osdpservice + - osdpservices + singular: openstackdataplaneservice + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + addCertMounts: + default: false + type: boolean + caCerts: + type: string + certsFrom: + type: string + configMaps: + items: + type: string + type: array + containerImageFields: + items: + type: string + type: array + dataSources: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + deployOnAllNodeSets: + type: boolean + edpmServiceType: + type: string + openStackAnsibleEERunnerImage: + type: string + playbook: + type: string + playbookContents: + type: string + secrets: + items: + type: string + type: array + tlsCerts: + additionalProperties: + properties: + contents: + items: + type: string + minItems: 1 + type: array + edpmRoleServiceName: + type: string + issuer: + type: string + keyUsages: + items: + enum: + - signing + - digital signature + - content commitment + - key encipherment + - key agreement + - data encipherment + - cert sign + - crl sign + - encipher only + - decipher only + - any + - server auth + - client auth + - code signing + - email protection + - s/mime + - ipsec end system + - ipsec tunnel + - ipsec user + - timestamping + - ocsp signing + - microsoft sgc + - netscape sgc + type: string + type: array + networks: + items: + pattern: ^[a-zA-Z0-9][a-zA-Z0-9\-_]*[a-zA-Z0-9]$ + type: string + type: array + required: + - contents + type: object + type: object + type: object + status: + properties: + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + severity: + type: string + status: + type: string + type: + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/apis/client/OWNERS b/apis/client/OWNERS new file mode 100644 index 000000000..b90dd082a --- /dev/null +++ b/apis/client/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners +approvers: + - openstack-approvers + +reviewers: + - openstack-approvers diff --git a/apis/dataplane/v1beta1/common.go b/apis/dataplane/v1beta1/common.go new file mode 100644 index 000000000..21dc57d5c --- /dev/null +++ b/apis/dataplane/v1beta1/common.go @@ -0,0 +1,199 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "encoding/json" + + infranetworkv1 "github.com/openstack-k8s-operators/infra-operator/apis/network/v1beta1" + "github.com/openstack-k8s-operators/lib-common/modules/storage" + corev1 "k8s.io/api/core/v1" +) + +// LocalObjectReference contains enough information to let you locate the +// referenced object inside the same namespace. +// +structType=atomic +type LocalObjectReference struct { + // Name of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + // TODO: Add other useful fields. apiVersion, kind, uid? + // +optional + Name string `json:"name,omitempty" yaml:"name,omitempty"` +} + +// ConfigMapEnvSource selects a ConfigMap to populate the environment +// variables with. +// +// The contents of the target ConfigMap's Data field will represent the +// key-value pairs as environment variables. +type ConfigMapEnvSource struct { + // The ConfigMap to select from. + LocalObjectReference `json:",inline" yaml:",inline"` + // Specify whether the ConfigMap must be defined + // +optional + Optional *bool `json:"optional,omitempty" yaml:"optional,omitempty"` +} + +// SecretEnvSource selects a Secret to populate the environment +// variables with. +// +// The contents of the target Secret's Data field will represent the +// key-value pairs as environment variables. +type SecretEnvSource struct { + // The Secret to select from. + LocalObjectReference `json:",inline" yaml:",inline"` + // Specify whether the Secret must be defined + // +optional + Optional *bool `json:"optional,omitempty" yaml:"optional,omitempty"` +} + +// DataSource represents the source of a set of ConfigMaps/Secrets +type DataSource struct { + // An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + // +optional + Prefix string `json:"prefix,omitempty" yaml:"prefix,omitempty"` + // The ConfigMap to select from + // +optional + ConfigMapRef *ConfigMapEnvSource `json:"configMapRef,omitempty" yaml:"configMapRef,omitempty"` + // The Secret to select from + // +optional + SecretRef *SecretEnvSource `json:"secretRef,omitempty" yaml:"secretRef,omitempty"` +} + +// AnsibleOpts defines a logical grouping of Ansible related configuration options. +type AnsibleOpts struct { + // AnsibleUser SSH user for Ansible connection + // +kubebuilder:validation:Optional + AnsibleUser string `json:"ansibleUser"` + + // AnsibleHost SSH host for Ansible connection + // +kubebuilder:validation:Optional + AnsibleHost string `json:"ansibleHost,omitempty"` + + // AnsibleVars for configuring ansible + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless + AnsibleVars map[string]json.RawMessage `json:"ansibleVars,omitempty"` + + // AnsibleVarsFrom is a list of sources to populate ansible variables from. + // Values defined by an AnsibleVars with a duplicate key take precedence. + // +kubebuilder:validation:Optional + AnsibleVarsFrom []DataSource `json:"ansibleVarsFrom,omitempty"` + + // AnsiblePort SSH port for Ansible connection + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:number"} + AnsiblePort int `json:"ansiblePort,omitempty"` +} + +// NodeSection defines the top level attributes inherited by nodes in the CR. +type NodeSection struct { + // ExtraMounts containing files which can be mounted into an Ansible Execution Pod + // +kubebuilder:validation:Optional + ExtraMounts []storage.VolMounts `json:"extraMounts,omitempty"` + + // Networks - Instance networks + // +kubebuilder:validation:Optional + Networks []infranetworkv1.IPSetNetwork `json:"networks,omitempty"` + + // UserData node specific user-data + // +kubebuilder:validation:Optional + UserData *corev1.SecretReference `json:"userData,omitempty"` + + // NetworkData node specific network-data + // +kubebuilder:validation:Optional + NetworkData *corev1.SecretReference `json:"networkData,omitempty"` + + // Ansible is the group of Ansible related configuration options. + // +kubebuilder:validation:Optional + Ansible AnsibleOpts `json:"ansible,omitempty"` + + // HostName - node name + // +kubebuilder:validation:Optional + HostName string `json:"hostName,omitempty"` + + // ManagementNetwork - Name of network to use for management (SSH/Ansible) + // +kubebuilder:validation:Optional + ManagementNetwork string `json:"managementNetwork,omitempty"` + + // +kubebuilder:validation:Optional + // PreprovisioningNetworkDataName - NetworkData secret name in the local namespace for pre-provisioing + PreprovisioningNetworkDataName string `json:"preprovisioningNetworkDataName,omitempty"` +} + +// NodeTemplate is a specification of the node attributes that override top level attributes. +type NodeTemplate struct { + // ExtraMounts containing files which can be mounted into an Ansible Execution Pod + // +kubebuilder:validation:Optional + ExtraMounts []storage.VolMounts `json:"extraMounts,omitempty"` + + // Networks - Instance networks + // +kubebuilder:validation:Optional + Networks []infranetworkv1.IPSetNetwork `json:"networks,omitempty"` + + // UserData node specific user-data + // +kubebuilder:validation:Optional + UserData *corev1.SecretReference `json:"userData,omitempty"` + + // NetworkData node specific network-data + // +kubebuilder:validation:Optional + NetworkData *corev1.SecretReference `json:"networkData,omitempty"` + + // AnsibleSSHPrivateKeySecret Name of a private SSH key secret containing + // private SSH key for connecting to node. + // The named secret must be of the form: + // Secret.data.ssh-privatekey: + // + // +kubebuilder:validation:Required + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:io.kubernetes:Secret"} + AnsibleSSHPrivateKeySecret string `json:"ansibleSSHPrivateKeySecret"` + // ManagementNetwork - Name of network to use for management (SSH/Ansible) + // +kubebuilder:validation:Optional + // +kubebuilder:default=ctlplane + ManagementNetwork string `json:"managementNetwork"` + + // Ansible is the group of Ansible related configuration options. + // +kubebuilder:validation:Optional + Ansible AnsibleOpts `json:"ansible,omitempty"` +} + +// AnsibleEESpec is a specification of the ansible EE attributes +type AnsibleEESpec struct { + // ExtraMounts containing files which can be mounted into an Ansible Execution Pod + ExtraMounts []storage.VolMounts `json:"extraMounts,omitempty"` + // Env is a list containing the environment variables to pass to the pod + Env []corev1.EnvVar `json:"env,omitempty"` + // ExtraVars for ansible execution + ExtraVars map[string]json.RawMessage `json:"extraVars,omitempty"` + // DNSConfig for setting dnsservers + DNSConfig *corev1.PodDNSConfig `json:"dnsConfig,omitempty"` + // NetworkAttachments is a list of NetworkAttachment resource names to pass to the ansibleee resource + // which allows to connect the ansibleee runner to the given network + NetworkAttachments []string `json:"networkAttachments"` + // OpenStackAnsibleEERunnerImage image to use as the ansibleEE runner image + OpenStackAnsibleEERunnerImage string `json:"openStackAnsibleEERunnerImage,omitempty"` + // AnsibleTags for ansible execution + AnsibleTags string `json:"ansibleTags,omitempty"` + // AnsibleLimit for ansible execution + AnsibleLimit string `json:"ansibleLimit,omitempty"` + // AnsibleSkipTags for ansible execution + AnsibleSkipTags string `json:"ansibleSkipTags,omitempty"` + // ServiceAccountName allows to specify what ServiceAccountName do we want + // the ansible execution run with. Without specifying, it will run with + // default serviceaccount + ServiceAccountName string +} diff --git a/apis/dataplane/v1beta1/conditions.go b/apis/dataplane/v1beta1/conditions.go new file mode 100644 index 000000000..89c0ffbad --- /dev/null +++ b/apis/dataplane/v1beta1/conditions.go @@ -0,0 +1,112 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "github.com/openstack-k8s-operators/lib-common/modules/common/condition" +) + +const ( + // DataPlaneNodeSetErrorMessage error + DataPlaneNodeSetErrorMessage = "DataPlaneNodeSet error occurred %s" + + // ServiceReadyCondition Status=True condition indicates if the + // service is finished and successful. + ServiceReadyCondition condition.Type = "ServiceReady" + + // ServiceReadyMessage ready + ServiceReadyMessage = "%s service ready" + + // ServiceReadyWaitingMessage not yet ready + ServiceReadyWaitingMessage = "%s service not yet ready" + + // ServiceErrorMessage error + ServiceErrorMessage = "Service error occurred %s" + + // SetupReadyCondition - Overall setup condition + SetupReadyCondition condition.Type = "SetupReady" + + // NodeSetReadyMessage - NodeSet Ready + NodeSetReadyMessage = "NodeSet Ready" + + // NodeSetBareMetalProvisionReadyCondition Status=True condition indicates + // all baremetal nodes provisioned for the NodeSet. + NodeSetBareMetalProvisionReadyCondition condition.Type = "NodeSetBaremetalProvisionReady" + + // NodeSetBaremetalProvisionReadyMessage ready + NodeSetBaremetalProvisionReadyMessage = "NodeSetBaremetalProvisionReady ready" + + // NodeSetBaremetalProvisionReadyWaitingMessage not yet ready + NodeSetBaremetalProvisionReadyWaitingMessage = "NodeSetBaremetalProvisionReady not yet ready" + + // NodeSetBaremetalProvisionErrorMessage error + NodeSetBaremetalProvisionErrorMessage = "NodeSetBaremetalProvisionReady error occurred" + + // NodeSetIPReservationReadyCondition Status=True condition indicates + // IPSets reserved for all nodes in a NodeSet. + NodeSetIPReservationReadyCondition condition.Type = "NodeSetIPReservationReady" + + // NodeSetIPReservationReadyMessage ready + NodeSetIPReservationReadyMessage = "NodeSetIPReservationReady ready" + + // NodeSetIPReservationReadyWaitingMessage not yet ready + NodeSetIPReservationReadyWaitingMessage = "NodeSetIPReservationReady not yet ready" + + // NodeSetIPReservationReadyErrorMessage error + NodeSetIPReservationReadyErrorMessage = "NodeSetIPReservationReady error occurred" + + // NodeSetDNSDataReadyCondition Status=True condition indicates + // DNSData created for the NodeSet. + NodeSetDNSDataReadyCondition condition.Type = "NodeSetDNSDataReady" + + // NodeSetDNSDataReadyMessage ready + NodeSetDNSDataReadyMessage = "NodeSetDNSDataReady ready" + + // NodeSetDNSDataReadyWaitingMessage not yet ready + NodeSetDNSDataReadyWaitingMessage = "NodeSetDNSDataReady not yet ready" + + // NodeSetDNSDataReadyErrorMessage error + NodeSetDNSDataReadyErrorMessage = "NodeSetDNSDataReady error occurred" + + // NodeSetDNSDataMultipleDNSMasqErrorMessage error + NodeSetDNSDataMultipleDNSMasqErrorMessage = "NodeSet DNSData error occurred. Multiple DNSMasq resources exist." + + // InputReadyWaitingMessage not yet ready + InputReadyWaitingMessage = "Waiting for input %s, not yet ready" + + // NodeSetDeploymentReadyCondition Status=True condition indicates if the + // NodeSet Deployment is finished and successful. + NodeSetDeploymentReadyCondition condition.Type = "NodeSetDeploymentReady" + + // NodeSetDeploymentReadyMessage ready + NodeSetDeploymentReadyMessage = "Deployment ready for NodeSet" + + // NodeSetDeploymentReadyWaitingMessage not yet ready + NodeSetDeploymentReadyWaitingMessage = "Deployment not yet ready for NodeSet" + + // NodeSetDeploymentErrorMessage error + NodeSetDeploymentErrorMessage = "Deployment error occurred %s for NodeSet" + + // NodeSetServiceDeploymentReadyMessage ready + NodeSetServiceDeploymentReadyMessage = "Deployment ready for %s service" + + // NodeSetServiceDeploymentReadyWaitingMessage not yet ready + NodeSetServiceDeploymentReadyWaitingMessage = "Deployment not yet ready for %s service" + + // NodeSetServiceDeploymentErrorMessage error + NodeSetServiceDeploymentErrorMessage = "Deployment error occurred in %s service" +) diff --git a/apis/dataplane/v1beta1/groupversion_info.go b/apis/dataplane/v1beta1/groupversion_info.go new file mode 100644 index 000000000..8d652e550 --- /dev/null +++ b/apis/dataplane/v1beta1/groupversion_info.go @@ -0,0 +1,36 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1beta1 contains API Schema definitions for the dataplane v1beta1 API group +// +kubebuilder:object:generate=true +// +groupName=dataplane.openstack.org +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "dataplane.openstack.org", Version: "v1beta1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/dataplane/v1beta1/openstackdataplanedeployment_types.go b/apis/dataplane/v1beta1/openstackdataplanedeployment_types.go new file mode 100644 index 000000000..d5b318399 --- /dev/null +++ b/apis/dataplane/v1beta1/openstackdataplanedeployment_types.go @@ -0,0 +1,156 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "encoding/json" + + "github.com/openstack-k8s-operators/lib-common/modules/common/condition" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// OpenStackDataPlaneDeploymentSpec defines the desired state of OpenStackDataPlaneDeployment +type OpenStackDataPlaneDeploymentSpec struct { + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinItems:=1 + // NodeSets is the list of NodeSets deployed + NodeSets []string `json:"nodeSets"` + + // BackoffLimit allows to define the maximum number of retried executions (defaults to 6). + // +kubebuilder:default:=6 + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:number"} + BackoffLimit *int32 `json:"backoffLimit,omitempty"` + + // AnsibleTags for ansible execution + // +kubebuilder:validation:Optional + AnsibleTags string `json:"ansibleTags,omitempty"` + + // AnsibleLimit for ansible execution + // +kubebuilder:validation:Optional + AnsibleLimit string `json:"ansibleLimit,omitempty"` + + // AnsibleSkipTags for ansible execution + // +kubebuilder:validation:Optional + AnsibleSkipTags string `json:"ansibleSkipTags,omitempty"` + + // +kubebuilder:validation:Optional + // AnsibleExtraVars for ansible execution + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless + AnsibleExtraVars map[string]json.RawMessage `json:"ansibleExtraVars,omitempty"` + + // +kubebuilder:validation:Optional + // ServicesOverride list + ServicesOverride []string `json:"servicesOverride,omitempty"` + + // Time before the deployment is requeued in seconds + // +kubebuilder:validation:Minimum:=1 + // +kubebuilder:default:=15 + DeploymentRequeueTime int `json:"deploymentRequeueTime"` +} + +// OpenStackDataPlaneDeploymentStatus defines the observed state of OpenStackDataPlaneDeployment +type OpenStackDataPlaneDeploymentStatus struct { + // NodeSetConditions + NodeSetConditions map[string]condition.Conditions `json:"nodeSetConditions,omitempty" optional:"true"` + + // ConfigMapHashes + ConfigMapHashes map[string]string `json:"configMapHashes,omitempty" optional:"true"` + + // SecretHashes + SecretHashes map[string]string `json:"secretHashes,omitempty" optional:"true"` + + // NodeSetHashes + NodeSetHashes map[string]string `json:"nodeSetHashes,omitempty" optional:"true"` + + // ContainerImages + ContainerImages map[string]string `json:"containerImages,omitempty"` + + // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors={"urn:alm:descriptor:io.kubernetes.conditions"} + // Conditions + Conditions condition.Conditions `json:"conditions,omitempty" optional:"true"` + + //ObservedGeneration - the most recent generation observed for this Deployment. If the observed generation is less than the spec generation, then the controller has not processed the latest changes. + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // DeployedVersion + DeployedVersion string `json:"deployedVersion,omitempty"` + + // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:booleanSwitch"} + // Deployed + Deployed bool `json:"deployed,omitempty" optional:"true"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +//+operator-sdk:csv:customresourcedefinitions:displayName="OpenStack Data Plane Deployments" +//+kubebuilder:resource:shortName=osdpd;osdpdeployment;osdpdeployments +//+kubebuilder:printcolumn:name="NodeSets",type="string",JSONPath=".spec.nodeSets",description="NodeSets" +//+kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[0].status",description="Status" +//+kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.conditions[0].message",description="Message" + +// OpenStackDataPlaneDeployment is the Schema for the openstackdataplanedeployments API +// OpenStackDataPlaneDeployment name must be a valid RFC1123 as it is used in labels +type OpenStackDataPlaneDeployment struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="OpenStackDataPlaneDeployment Spec is immutable" + Spec OpenStackDataPlaneDeploymentSpec `json:"spec,omitempty"` + Status OpenStackDataPlaneDeploymentStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// OpenStackDataPlaneDeploymentList contains a list of OpenStackDataPlaneDeployment +type OpenStackDataPlaneDeploymentList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []OpenStackDataPlaneDeployment `json:"items"` +} + +func init() { + SchemeBuilder.Register(&OpenStackDataPlaneDeployment{}, &OpenStackDataPlaneDeploymentList{}) +} + +// IsReady - returns true if the OpenStackDataPlaneDeployment is ready +func (instance OpenStackDataPlaneDeployment) IsReady() bool { + return instance.Status.Conditions.IsTrue(condition.ReadyCondition) +} + +// InitConditions - Initializes Status Conditons +func (instance *OpenStackDataPlaneDeployment) InitConditions() { + instance.Status.Conditions = condition.Conditions{} + + cl := condition.CreateList( + condition.UnknownCondition(condition.DeploymentReadyCondition, condition.InitReason, condition.DeploymentReadyInitMessage), + condition.UnknownCondition(condition.InputReadyCondition, condition.InitReason, condition.InputReadyInitMessage), + ) + instance.Status.Conditions.Init(&cl) + instance.Status.NodeSetConditions = make(map[string]condition.Conditions) + if instance.Spec.NodeSets != nil { + for _, nodeSet := range instance.Spec.NodeSets { + nsConds := condition.Conditions{} + nsConds.Set(condition.UnknownCondition( + NodeSetDeploymentReadyCondition, condition.InitReason, condition.DeploymentReadyInitMessage)) + instance.Status.NodeSetConditions[nodeSet] = nsConds + + } + } + + instance.Status.Deployed = false +} diff --git a/apis/dataplane/v1beta1/openstackdataplanedeployment_webhook.go b/apis/dataplane/v1beta1/openstackdataplanedeployment_webhook.go new file mode 100644 index 000000000..d82983546 --- /dev/null +++ b/apis/dataplane/v1beta1/openstackdataplanedeployment_webhook.go @@ -0,0 +1,126 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +var openstackdataplanedeploymentlog = logf.Log.WithName("openstackdataplanedeployment-resource") + +// SetupWebhookWithManager sets up the webhook with the Manager +func (r *OpenStackDataPlaneDeployment) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr).For(r).Complete() +} + +// TODO(user): EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! + +// +kubebuilder:webhook:path=/mutate-dataplane-openstack-org-v1beta1-openstackdataplanedeployment,mutating=true,failurePolicy=fail,sideEffects=None,groups=dataplane.openstack.org,resources=openstackdataplanedeployments,verbs=create;update,versions=v1beta1,name=mopenstackdataplanedeployment.kb.io,admissionReviewVersions=v1 + +var _ webhook.Defaulter = &OpenStackDataPlaneDeployment{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *OpenStackDataPlaneDeployment) Default() { + + openstackdataplanedeploymentlog.Info("default", "name", r.Name) + r.Spec.Default() +} + +// Default - set defaults for this OpenStackDataPlaneDeployment +func (spec *OpenStackDataPlaneDeploymentSpec) Default() { + +} + +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. +// +kubebuilder:webhook:path=/validate-dataplane-openstack-org-v1beta1-openstackdataplanedeployment,mutating=false,failurePolicy=fail,sideEffects=None,groups=dataplane.openstack.org,resources=openstackdataplanedeployments,verbs=create;update,versions=v1beta1,name=vopenstackdataplanedeployment.kb.io,admissionReviewVersions=v1 + +var _ webhook.Validator = &OpenStackDataPlaneDeployment{} + +func (r *OpenStackDataPlaneDeployment) ValidateCreate() (admission.Warnings, error) { + + openstackdataplanedeploymentlog.Info("validate create", "name", r.Name) + + errors := r.Spec.ValidateCreate() + if len(errors) != 0 { + openstackdataplanedeploymentlog.Info("validation failed", "name", r.Name) + + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "dataplane.openstack.org", Kind: "OpenStackDataPlaneDeployment"}, + r.Name, + errors) + } + + return nil, nil +} + +func (r *OpenStackDataPlaneDeploymentSpec) ValidateCreate() field.ErrorList { + // TODO(user): fill in your validation logic upon object creation. + + return field.ErrorList{} +} + +func (r *OpenStackDataPlaneDeployment) ValidateUpdate(original runtime.Object) (admission.Warnings, error) { + openstackdataplanedeploymentlog.Info("validate update", "name", r.Name) + + errors := r.Spec.ValidateUpdate() + + if len(errors) != 0 { + openstackdataplanedeploymentlog.Info("validation failed", "name", r.Name) + + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "dataplane.openstack.org", Kind: "OpenStackDataPlaneDeployment"}, + r.Name, + errors) + } + + return nil, nil +} + +func (r *OpenStackDataPlaneDeploymentSpec) ValidateUpdate() field.ErrorList { + // TODO(user): fill in your validation logic upon object update. + + return field.ErrorList{} +} + +func (r *OpenStackDataPlaneDeployment) ValidateDelete() (admission.Warnings, error) { + openstackdataplanedeploymentlog.Info("validate delete", "name", r.Name) + + errors := r.Spec.ValidateDelete() + + if len(errors) != 0 { + openstackdataplanedeploymentlog.Info("validation failed", "name", r.Name) + + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "dataplane.openstack.org", Kind: "OpenStackDataPlaneDeployment"}, + r.Name, + errors) + } + return nil, nil +} + +func (r *OpenStackDataPlaneDeploymentSpec) ValidateDelete() field.ErrorList { + // TODO(user): fill in your validation logic upon object creation. + + return field.ErrorList{} +} diff --git a/apis/dataplane/v1beta1/openstackdataplanenodeset.go b/apis/dataplane/v1beta1/openstackdataplanenodeset.go new file mode 100644 index 000000000..317f8c641 --- /dev/null +++ b/apis/dataplane/v1beta1/openstackdataplanenodeset.go @@ -0,0 +1,31 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "regexp" +) + +// NodeHostNameIsFQDN Helper to check if a hostname is fqdn +func NodeHostNameIsFQDN(hostname string) bool { + // Regular expression to match a valid FQDN + // This regex assumes that the hostname and domain name segments only contain letters, digits, hyphens, and periods. + regex := `^([a-zA-Z0-9-]+\.)*[a-zA-Z0-9-]+\.[a-zA-Z]{2,}$` + + match, _ := regexp.MatchString(regex, hostname) + return match +} diff --git a/apis/dataplane/v1beta1/openstackdataplanenodeset_types.go b/apis/dataplane/v1beta1/openstackdataplanenodeset_types.go new file mode 100644 index 000000000..83e3a9bb5 --- /dev/null +++ b/apis/dataplane/v1beta1/openstackdataplanenodeset_types.go @@ -0,0 +1,292 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + + "golang.org/x/exp/slices" + + infranetworkv1 "github.com/openstack-k8s-operators/infra-operator/apis/network/v1beta1" + condition "github.com/openstack-k8s-operators/lib-common/modules/common/condition" + "github.com/openstack-k8s-operators/lib-common/modules/common/util" + baremetalv1 "github.com/openstack-k8s-operators/openstack-baremetal-operator/api/v1beta1" + openstackv1 "github.com/openstack-k8s-operators/openstack-operator/apis/core/v1beta1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// OpenStackDataPlaneNodeSetSpec defines the desired state of OpenStackDataPlaneNodeSet +type OpenStackDataPlaneNodeSetSpec struct { + // +kubebuilder:validation:Optional + // BaremetalSetTemplate Template for BaremetalSet for the NodeSet + BaremetalSetTemplate baremetalv1.OpenStackBaremetalSetSpec `json:"baremetalSetTemplate,omitempty"` + + // +kubebuilder:validation:Required + // NodeTemplate - node attributes specific to nodes defined by this resource. These + // attributes can be overriden at the individual node level, else take their defaults + // from valus in this section. + NodeTemplate NodeTemplate `json:"nodeTemplate"` + + // Nodes - Map of Node Names and node specific data. Values here override defaults in the + // upper level section. + // +kubebuilder:validation:Required + Nodes map[string]NodeSection `json:"nodes"` + + // Env is a list containing the environment variables to pass to the pod + // Variables modifying behavior of AnsibleEE can be specified here. + // +kubebuilder:validation:Optional + Env []corev1.EnvVar `json:"env,omitempty"` + + // +kubebuilder:validation:Optional + // NetworkAttachments is a list of NetworkAttachment resource names to pass to the ansibleee resource + // which allows to connect the ansibleee runner to the given network + NetworkAttachments []string `json:"networkAttachments,omitempty"` + + // +kubebuilder:validation:Optional + // +kubebuilder:default={download-cache,bootstrap,configure-network,validate-network,install-os,configure-os,ssh-known-hosts,run-os,reboot-os,install-certs,ovn,neutron-metadata,libvirt,nova,telemetry} + // Services list + Services []string `json:"services"` + + // Tags - Additional tags for NodeSet + // +kubebuilder:validation:Optional + Tags []string `json:"tags,omitempty"` + + // SecretMaxSize - Maximum size in bytes of a Kubernetes secret. This size is currently situated around + // 1 MiB (nearly 1 MB). + // +kubebuilder:validation:Optional + // +kubebuilder:default=1048576 + SecretMaxSize int `json:"secretMaxSize" yaml:"secretMaxSize"` + + // +kubebuilder:validation:Optional + // + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:booleanSwitch"} + // PreProvisioned - Set to true if the nodes have been Pre Provisioned. + PreProvisioned bool `json:"preProvisioned,omitempty"` + + // TLSEnabled - Whether the node set has TLS enabled. + // +kubebuilder:validation:Optional + // +kubebuilder:default=true + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:booleanSwitch"} + TLSEnabled bool `json:"tlsEnabled" yaml:"tlsEnabled"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +//+operator-sdk:csv:customresourcedefinitions:displayName="OpenStack Data Plane NodeSet" +//+kubebuilder:resource:shortName=osdpns;osdpnodeset;osdpnodesets +//+kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[0].status",description="Status" +//+kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.conditions[0].message",description="Message" + +// OpenStackDataPlaneNodeSet is the Schema for the openstackdataplanenodesets API +// OpenStackDataPlaneNodeSet name must be a valid RFC1123 as it is used in labels +type OpenStackDataPlaneNodeSet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec OpenStackDataPlaneNodeSetSpec `json:"spec,omitempty"` + Status OpenStackDataPlaneNodeSetStatus `json:"status,omitempty"` +} + +// OpenStackDataPlaneNodeSetStatus defines the observed state of OpenStackDataPlaneNodeSet +type OpenStackDataPlaneNodeSetStatus struct { + // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors={"urn:alm:descriptor:io.kubernetes.conditions"} + // Conditions + Conditions condition.Conditions `json:"conditions,omitempty" optional:"true"` + + // DeploymentStatuses + DeploymentStatuses map[string]condition.Conditions `json:"deploymentStatuses,omitempty" optional:"true"` + + // AllHostnames + AllHostnames map[string]map[infranetworkv1.NetNameStr]string `json:"allHostnames,omitempty" optional:"true"` + + // AllIPs + AllIPs map[string]map[infranetworkv1.NetNameStr]string `json:"allIPs,omitempty" optional:"true"` + + // ConfigMapHashes + ConfigMapHashes map[string]string `json:"configMapHashes,omitempty" optional:"true"` + + // SecretHashes + SecretHashes map[string]string `json:"secretHashes,omitempty" optional:"true"` + + // DNSClusterAddresses + DNSClusterAddresses []string `json:"dnsClusterAddresses,omitempty" optional:"true"` + + // ContainerImages + ContainerImages map[string]string `json:"containerImages,omitempty" optional:"true"` + + // CtlplaneSearchDomain + CtlplaneSearchDomain string `json:"ctlplaneSearchDomain,omitempty" optional:"true"` + + // ConfigHash - holds the curret hash of the NodeTemplate and Node sections of the struct. + // This hash is used to determine when new Ansible executions are required to roll + // out config changes. + ConfigHash string `json:"configHash,omitempty"` + + // DeployedConfigHash - holds the hash of the NodeTemplate and Node sections of the struct + // that was last deployed. + // This hash is used to determine when new Ansible executions are required to roll + // out config changes. + DeployedConfigHash string `json:"deployedConfigHash,omitempty"` + + //ObservedGeneration - the most recent generation observed for this NodeSet. If the observed generation is less than the spec generation, then the controller has not processed the latest changes. + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // DeployedVersion + DeployedVersion string `json:"deployedVersion,omitempty"` +} + +//+kubebuilder:object:root=true + +// OpenStackDataPlaneNodeSetList contains a list of OpenStackDataPlaneNodeSets +type OpenStackDataPlaneNodeSetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []OpenStackDataPlaneNodeSet `json:"items"` +} + +func init() { + SchemeBuilder.Register(&OpenStackDataPlaneNodeSet{}, &OpenStackDataPlaneNodeSetList{}) +} + +// IsReady - returns true if the DataPlane is ready +func (instance OpenStackDataPlaneNodeSet) IsReady() bool { + return instance.Status.Conditions.IsTrue(condition.ReadyCondition) +} + +// InitConditions - Initializes Status Conditons +func (instance *OpenStackDataPlaneNodeSet) InitConditions() { + instance.Status.Conditions = condition.Conditions{} + instance.Status.DeploymentStatuses = make(map[string]condition.Conditions) + + cl := condition.CreateList( + condition.UnknownCondition(condition.DeploymentReadyCondition, condition.InitReason, condition.DeploymentReadyInitMessage), + condition.UnknownCondition(condition.InputReadyCondition, condition.InitReason, condition.InputReadyInitMessage), + condition.UnknownCondition(SetupReadyCondition, condition.InitReason, condition.InitReason), + condition.UnknownCondition(NodeSetIPReservationReadyCondition, condition.InitReason, condition.InitReason), + condition.UnknownCondition(NodeSetDNSDataReadyCondition, condition.InitReason, condition.InitReason), + condition.UnknownCondition(condition.ServiceAccountReadyCondition, condition.InitReason, condition.ServiceAccountReadyInitMessage), + ) + + // Only set Baremetal related conditions if we have baremetal hosts included in the + // baremetalSetTemplate. + if len(instance.Spec.BaremetalSetTemplate.BaremetalHosts) > 0 { + cl = append(cl, *condition.UnknownCondition(NodeSetBareMetalProvisionReadyCondition, condition.InitReason, condition.InitReason)) + } + + instance.Status.Conditions.Init(&cl) +} + +// GetAnsibleEESpec - get the fields that will be passed to AEE +func (instance OpenStackDataPlaneNodeSet) GetAnsibleEESpec() AnsibleEESpec { + return AnsibleEESpec{ + NetworkAttachments: instance.Spec.NetworkAttachments, + ExtraMounts: instance.Spec.NodeTemplate.ExtraMounts, + Env: instance.Spec.Env, + ServiceAccountName: instance.Name, + } +} + +// ContainerImageDefaults - the hardcoded defaults which are the last fallback +// if no values are set elsewhere. +var ContainerImageDefaults = openstackv1.ContainerImages{ + ContainerTemplate: openstackv1.ContainerTemplate{ + AgentImage: getStrPtr("quay.io/openstack-k8s-operators/openstack-baremetal-operator-agent:current-podified"), + AnsibleeeImage: getStrPtr("quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest"), + ApacheImage: getStrPtr("registry.redhat.io/ubi9/httpd-24:latest"), + EdpmFrrImage: getStrPtr("quay.io/podified-antelope-centos9/openstack-frr:current-podified"), + EdpmIscsidImage: getStrPtr("quay.io/podified-antelope-centos9/openstack-iscsid:current-podified"), + EdpmLogrotateCrondImage: getStrPtr("quay.io/podified-antelope-centos9/openstack-cron:current-podified"), + EdpmNeutronDhcpAgentImage: getStrPtr("quay.io/podified-antelope-centos9/openstack-neutron-dhcp-agent:current-podified"), + EdpmNeutronMetadataAgentImage: getStrPtr("quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified"), + EdpmNeutronOvnAgentImage: getStrPtr("quay.io/podified-antelope-centos9/openstack-neutron-ovn-agent:current-podified"), + EdpmNeutronSriovAgentImage: getStrPtr("quay.io/podified-antelope-centos9/openstack-neutron-sriov-agent:current-podified"), + EdpmMultipathdImage: getStrPtr("quay.io/podified-antelope-centos9/openstack-multipathd:current-podified"), + NovaComputeImage: getStrPtr("quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified"), + OvnControllerImage: getStrPtr("quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified"), + EdpmOvnBgpAgentImage: getStrPtr("quay.io/podified-antelope-centos9/openstack-ovn-bgp-agent:current-podified"), + CeilometerComputeImage: getStrPtr("quay.io/podified-antelope-centos9/openstack-telemetry-ceilometer-compute:current-podified"), + CeilometerIpmiImage: getStrPtr("quay.io/podified-antelope-centos9/openstack-telemetry-ceilometer-ipmi:current-podified"), + EdpmNodeExporterImage: getStrPtr("quay.io/podified-antelope-centos9/openstack-telemetry-node-exporter:current-podified"), + OsContainerImage: getStrPtr("quay.io/podified-antelope-centos9/edpm-hardened-uefi:current-podified"), + }} + +// ContainerImages - the values if no OpenStackVersion is used +var ContainerImages openstackv1.ContainerImages + +// SetupDefaults - initializes any CRD field defaults based on environment variables +// called from main.go +func SetupDefaults() { + // Acquire environmental defaults and initialize dataplane defaults with them + ContainerImages = openstackv1.ContainerImages{ + ContainerTemplate: openstackv1.ContainerTemplate{ + AgentImage: getImageDefault("RELATED_IMAGE_AGENT_IMAGE_URL_DEFAULT", ContainerImageDefaults.AgentImage), + AnsibleeeImage: getImageDefault("RELATED_IMAGE_ANSIBLEEE_IMAGE_URL_DEFAULT", ContainerImageDefaults.AnsibleeeImage), + ApacheImage: getImageDefault("RELATED_IMAGE_APACHE_IMAGE_URL_DEFAULT", ContainerImageDefaults.ApacheImage), + EdpmFrrImage: getImageDefault("RELATED_IMAGE_EDPM_FRR_IMAGE_URL_DEFAULT", ContainerImageDefaults.EdpmFrrImage), + EdpmIscsidImage: getImageDefault("RELATED_IMAGE_EDPM_ISCSID_IMAGE_URL_DEFAULT", ContainerImageDefaults.EdpmIscsidImage), + EdpmLogrotateCrondImage: getImageDefault("RELATED_IMAGE_EDPM_LOGROTATE_CROND_IMAGE_URL_DEFAULT", ContainerImageDefaults.EdpmLogrotateCrondImage), + EdpmMultipathdImage: getImageDefault("RELATED_IMAGE_EDPM_MULTIPATHD_IMAGE_URL_DEFAULT", ContainerImageDefaults.EdpmMultipathdImage), + EdpmNeutronDhcpAgentImage: getImageDefault("RELATED_IMAGE_EDPM_NEUTRON_DHCP_AGENT_IMAGE_URL_DEFAULT", ContainerImageDefaults.EdpmNeutronDhcpAgentImage), + EdpmNeutronMetadataAgentImage: getImageDefault("RELATED_IMAGE_EDPM_NEUTRON_METADATA_AGENT_IMAGE_URL_DEFAULT", ContainerImageDefaults.EdpmNeutronMetadataAgentImage), + EdpmNeutronOvnAgentImage: getImageDefault("RELATED_IMAGE_EDPM_NEUTRON_OVN_AGENT_IMAGE_URL_DEFAULT", ContainerImageDefaults.EdpmNeutronOvnAgentImage), + EdpmNeutronSriovAgentImage: getImageDefault("RELATED_IMAGE_EDPM_NEUTRON_SRIOV_AGENT_IMAGE_URL_DEFAULT", ContainerImageDefaults.EdpmNeutronSriovAgentImage), + EdpmNodeExporterImage: getImageDefault("RELATED_IMAGE_EDPM_NODE_EXPORTER_IMAGE_URL_DEFAULT", ContainerImageDefaults.EdpmNodeExporterImage), + EdpmOvnBgpAgentImage: getImageDefault("RELATED_IMAGE_EDPM_OVN_BGP_AGENT_IMAGE_URL_DEFAULT", ContainerImageDefaults.EdpmOvnBgpAgentImage), + CeilometerComputeImage: getImageDefault("RELATED_IMAGE_CEILOMETER_COMPUTE_IMAGE_URL_DEFAULT", ContainerImageDefaults.CeilometerComputeImage), + CeilometerIpmiImage: getImageDefault("RELATED_IMAGE_CEILOMETER_IPMI_IMAGE_URL_DEFAULT", ContainerImageDefaults.CeilometerIpmiImage), + NovaComputeImage: getImageDefault("RELATED_IMAGE_NOVA_COMPUTE_IMAGE_URL_DEFAULT", ContainerImageDefaults.NovaComputeImage), + OvnControllerImage: getImageDefault("RELATED_IMAGE_OVN_CONTROLLER_AGENT_IMAGE_URL_DEFAULT", ContainerImageDefaults.OvnControllerImage), + OsContainerImage: getImageDefault("RELATED_IMAGE_OS_CONTAINER_IMAGE_URL_DEFAULT", ContainerImageDefaults.OsContainerImage), + }, + } +} + +func getImageDefault(envVar string, defaultImage *string) *string { + d := util.GetEnvVar(envVar, *defaultImage) + return &d +} + +func getStrPtr(in string) *string { + return &in +} + +// duplicateNodeCheck checks the NodeSetList for pre-existing nodes. If the user is trying to redefine an +// existing node, we will return an error and block resource creation. +func (r *OpenStackDataPlaneNodeSetSpec) duplicateNodeCheck(nodeSetList *OpenStackDataPlaneNodeSetList) (errors field.ErrorList) { + existingNodeNames := make([]string, 0) + for _, existingNode := range nodeSetList.Items { + for _, node := range existingNode.Spec.Nodes { + existingNodeNames = append(existingNodeNames, node.HostName) + if node.Ansible.AnsibleHost != "" { + existingNodeNames = append(existingNodeNames, node.Ansible.AnsibleHost) + } + } + } + + for _, newNodeName := range r.Nodes { + if slices.Contains(existingNodeNames, newNodeName.HostName) || slices.Contains(existingNodeNames, newNodeName.Ansible.AnsibleHost) { + errors = append(errors, field.Invalid( + field.NewPath("spec").Child("nodes"), + newNodeName, + fmt.Sprintf("node %s already exists in another cluster", newNodeName.HostName))) + } + } + + return +} diff --git a/apis/dataplane/v1beta1/openstackdataplanenodeset_webhook.go b/apis/dataplane/v1beta1/openstackdataplanenodeset_webhook.go new file mode 100644 index 000000000..a58f1b3fb --- /dev/null +++ b/apis/dataplane/v1beta1/openstackdataplanenodeset_webhook.go @@ -0,0 +1,241 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + "fmt" + "reflect" + "strings" + + "github.com/go-playground/validator/v10" + "github.com/openstack-k8s-operators/lib-common/modules/common/condition" + baremetalv1 "github.com/openstack-k8s-operators/openstack-baremetal-operator/api/v1beta1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// Client needed for API calls (manager's client, set by first SetupWebhookWithManager() call +// to any particular webhook) +var webhookClient client.Client + +// log is for logging in this package. +var openstackdataplanenodesetlog = logf.Log.WithName("openstackdataplanenodeset-resource") + +// SetupWebhookWithManager sets up the webhook with the Manager +func (r *OpenStackDataPlaneNodeSet) SetupWebhookWithManager(mgr ctrl.Manager) error { + if webhookClient == nil { + webhookClient = mgr.GetClient() + } + + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// TODO(user): EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! + +//+kubebuilder:webhook:path=/mutate-dataplane-openstack-org-v1beta1-openstackdataplanenodeset,mutating=true,failurePolicy=fail,sideEffects=None,groups=dataplane.openstack.org,resources=openstackdataplanenodesets,verbs=create;update,versions=v1beta1,name=mopenstackdataplanenodeset.kb.io,admissionReviewVersions=v1 + +var _ webhook.Defaulter = &OpenStackDataPlaneNodeSet{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *OpenStackDataPlaneNodeSet) Default() { + openstackdataplanenodesetlog.Info("default", "name", r.Name) + r.Spec.Default() +} + +// Default - set defaults for this OpenStackDataPlaneNodeSet Spec +func (spec *OpenStackDataPlaneNodeSetSpec) Default() { + domain := spec.BaremetalSetTemplate.DomainName + for nodeName, node := range spec.Nodes { + if node.HostName == "" { + node.HostName = nodeName + } + if !spec.PreProvisioned { + if !NodeHostNameIsFQDN(node.HostName) && domain != "" { + node.HostName = strings.Join([]string{nodeName, domain}, ".") + } + } + spec.Nodes[nodeName] = *node.DeepCopy() + } + + if !spec.PreProvisioned { + spec.NodeTemplate.Ansible.AnsibleUser = spec.BaremetalSetTemplate.CloudUserName + if spec.BaremetalSetTemplate.DeploymentSSHSecret == "" { + spec.BaremetalSetTemplate.DeploymentSSHSecret = spec.NodeTemplate.AnsibleSSHPrivateKeySecret + } + nodeSetHostMap := make(map[string]baremetalv1.InstanceSpec) + for _, node := range spec.Nodes { + instanceSpec := baremetalv1.InstanceSpec{} + instanceSpec.UserData = node.UserData + instanceSpec.NetworkData = node.NetworkData + instanceSpec.PreprovisioningNetworkDataName = node.PreprovisioningNetworkDataName + nodeSetHostMap[node.HostName] = instanceSpec + } + spec.BaremetalSetTemplate.BaremetalHosts = nodeSetHostMap + } else if spec.NodeTemplate.Ansible.AnsibleUser == "" { + spec.NodeTemplate.Ansible.AnsibleUser = "cloud-admin" + } +} + +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. +//+kubebuilder:webhook:path=/validate-dataplane-openstack-org-v1beta1-openstackdataplanenodeset,mutating=false,failurePolicy=fail,sideEffects=None,groups=dataplane.openstack.org,resources=openstackdataplanenodesets,verbs=create;update,versions=v1beta1,name=vopenstackdataplanenodeset.kb.io,admissionReviewVersions=v1 + +var _ webhook.Validator = &OpenStackDataPlaneNodeSet{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *OpenStackDataPlaneNodeSet) ValidateCreate() (admission.Warnings, error) { + openstackdataplanenodesetlog.Info("validate create", "name", r.Name) + + var errors field.ErrorList + + nodeSetList := &OpenStackDataPlaneNodeSetList{} + opts := &client.ListOptions{ + Namespace: r.ObjectMeta.Namespace, + } + + err := webhookClient.List(context.TODO(), nodeSetList, opts) + if err != nil { + return nil, err + } + // Check if OpenStackDataPlaneNodeSet name matches RFC1123 for use in labels + validate := validator.New() + if err = validate.Var(r.Name, "hostname_rfc1123"); err != nil { + openstackdataplanenodesetlog.Error(err, "Error validating OpenStackDataPlaneNodeSet name, name must follow RFC1123") + errors = append(errors, field.Invalid( + field.NewPath("Name"), + r.Name, + fmt.Sprintf("Error validating OpenStackDataPlaneNodeSet name %s, name must follow RFC1123", r.Name))) + } + + errors = append(errors, r.Spec.ValidateCreate(nodeSetList)...) + + if len(errors) > 0 { + openstackdataplanenodesetlog.Info("validation failed", "name", r.Name) + + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "dataplane.openstack.org", Kind: "OpenStackDataPlaneNodeSet"}, + r.Name, + errors) + } + return nil, nil +} + +func (r *OpenStackDataPlaneNodeSetSpec) ValidateCreate(nodeSetList *OpenStackDataPlaneNodeSetList) field.ErrorList { + var errors field.ErrorList + // Currently, this check is only valid for PreProvisioned nodes. Since we can't possibly + // have duplicates in Baremetal Deployments, we can exit early here for Baremetal NodeSets. + // If this is the first NodeSet being created, then there can be no duplicates + // we can exit early here. + if r.PreProvisioned && len(nodeSetList.Items) != 0 { + errors = append(errors, r.duplicateNodeCheck(nodeSetList)...) + } + + return errors + +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *OpenStackDataPlaneNodeSet) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + openstackdataplanenodesetlog.Info("validate update", "name", r.Name) + oldNodeSet, ok := old.(*OpenStackDataPlaneNodeSet) + if !ok { + return nil, apierrors.NewInternalError( + fmt.Errorf("expected a OpenStackDataPlaneNodeSet object, but got %T", oldNodeSet)) + } + + errors := r.Spec.ValidateUpdate(&oldNodeSet.Spec) + + if errors != nil { + openstackdataplanenodesetlog.Info("validation failed", "name", r.Name) + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "dataplane.openstack.org", Kind: "OpenStackDataPlaneNodeSet"}, + r.Name, + errors, + ) + + } + if oldNodeSet.Status.DeploymentStatuses != nil { + for deployName, deployConditions := range oldNodeSet.Status.DeploymentStatuses { + deployCondition := deployConditions.Get(NodeSetDeploymentReadyCondition) + if !deployConditions.IsTrue(NodeSetDeploymentReadyCondition) && !condition.IsError(deployCondition) { + return nil, apierrors.NewConflict( + schema.GroupResource{Group: "dataplane.openstack.org", Resource: "OpenStackDataPlaneNodeSet"}, + r.Name, + fmt.Errorf("could not patch openstackdataplanenodeset while openstackdataplanedeployment %s (blocked on %s condition) is running", + deployName, string(deployCondition.Type)), + ) + } + } + } + + return nil, nil +} + +func (r *OpenStackDataPlaneNodeSetSpec) ValidateUpdate(oldSpec *OpenStackDataPlaneNodeSetSpec) field.ErrorList { + + var errors field.ErrorList + // Some changes to the baremetalSetTemplate after the initial deployment would necessitate + // a redeploy of the node. Thus we should block these changes and require the user to + // delete and redeploy should they wish to make such changes after the initial deploy. + // If the BaremetalSetTemplate is changed, we will offload the parsing of these details + // to the openstack-baremetal-operator webhook to avoid duplicating logic. + if !reflect.DeepEqual(r.BaremetalSetTemplate, oldSpec.BaremetalSetTemplate) { + + // Call openstack-baremetal-operator webhook Validate() to parse changes + err := r.BaremetalSetTemplate.Validate(oldSpec.BaremetalSetTemplate) + if err != nil { + errors = append(errors, field.Forbidden( + field.NewPath("spec.baremetalSetTemplate"), + fmt.Sprintf("%s", err))) + } + } + + return errors +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *OpenStackDataPlaneNodeSet) ValidateDelete() (admission.Warnings, error) { + openstackdataplanenodesetlog.Info("validate delete", "name", r.Name) + errors := r.Spec.ValidateDelete() + + if len(errors) != 0 { + openstackdataplanenodesetlog.Info("validation failed", "name", r.Name) + + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "dataplane.openstack.org", Kind: "OpenStackDataPlaneNodeSet"}, + r.Name, + errors, + ) + } + return nil, nil +} + +func (r *OpenStackDataPlaneNodeSetSpec) ValidateDelete() field.ErrorList { + // TODO(user): fill in your validation logic upon object deletion. + + return field.ErrorList{} + +} diff --git a/apis/dataplane/v1beta1/openstackdataplaneservice_types.go b/apis/dataplane/v1beta1/openstackdataplaneservice_types.go new file mode 100644 index 000000000..d68554616 --- /dev/null +++ b/apis/dataplane/v1beta1/openstackdataplaneservice_types.go @@ -0,0 +1,170 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + certmgrv1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" + infranetworkv1 "github.com/openstack-k8s-operators/infra-operator/apis/network/v1beta1" + "github.com/openstack-k8s-operators/lib-common/modules/common/condition" +) + +// OpenstackDataPlaneServiceCert defines the property of a TLS cert issued for +// a dataplane service +type OpenstackDataPlaneServiceCert struct { + // Contents of the certificate + // This is a list of strings for properties that are needed in the cert + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinItems:=1 + Contents []string `json:"contents"` + + // Networks to include in SNI for the cert + // +kubebuilder:validation:Optional + Networks []infranetworkv1.NetNameStr `json:"networks,omitempty"` + + // Issuer is the label for the issuer to issue the cert + // Only one issuer should have this label + // +kubebuilder:validation:Optional + Issuer string `json:"issuer,omitempty"` + + // KeyUsages to be added to the issued cert + // +kubebuilder:validation:Optional + KeyUsages []certmgrv1.KeyUsage `json:"keyUsages,omitempty" yaml:"keyUsages,omitempty"` + + // EDPMRoleServiceName is the value of the _service_name variable from + // the edpm-ansible role where this certificate is used. For example if the + // certificate is for edpm_ovn from edpm-ansible, EDPMRoleServiceName must be + // ovn, which matches the edpm_ovn_service_name variable from the role. If + // not set, OpenStackDataPlaneService.Spec.EDPMServiceType is used. If + // OpenStackDataPlaneService.Spec.EDPMServiceType is not set, then + // OpenStackDataPlaneService.Name is used. + EDPMRoleServiceName string `json:"edpmRoleServiceName,omitempty"` +} + +// OpenStackDataPlaneServiceSpec defines the desired state of OpenStackDataPlaneService +type OpenStackDataPlaneServiceSpec struct { + // ConfigMaps list of ConfigMap names to mount as ExtraMounts for the OpenStackAnsibleEE + // +kubebuilder:validation:Optional + ConfigMaps []string `json:"configMaps,omitempty" yaml:"configMaps,omitempty"` + + // Secrets list of Secret names to mount as ExtraMounts for the OpenStackAnsibleEE + // +kubebuilder:validation:Optional + Secrets []string `json:"secrets,omitempty"` + + // DataSources list of DataSource objects to mount as ExtraMounts for the + // OpenStackAnsibleEE + DataSources []DataSource `json:"dataSources,omitempty" yaml:"dataSources,omitempty"` + + // TLSCerts tls certs to be generated + // +kubebuilder:validation:Optional + TLSCerts map[string]OpenstackDataPlaneServiceCert `json:"tlsCerts,omitempty" yaml:"tlsCerts,omitempty"` + + // PlaybookContents is an inline playbook contents that ansible will run on execution. + PlaybookContents string `json:"playbookContents,omitempty"` + + // Playbook is a path to the playbook that ansible will run on this execution + Playbook string `json:"playbook,omitempty"` + + // CACerts - Secret containing the CA certificate chain + // +kubebuilder:validation:Optional + CACerts string `json:"caCerts,omitempty" yaml:"caCerts,omitempty"` + + // OpenStackAnsibleEERunnerImage image to use as the ansibleEE runner image + // +kubebuilder:validation:Optional + OpenStackAnsibleEERunnerImage string `json:"openStackAnsibleEERunnerImage,omitempty" yaml:"openStackAnsibleEERunnerImage,omitempty"` + + // CertsFrom - Service name used to obtain TLSCert and CACerts data. If both + // CertsFrom and either TLSCert or CACerts is set, then those fields take + // precedence. + // +kubebuilder:validation:Optional + CertsFrom string `json:"certsFrom,omitempty" yaml:"certsFrom,omitempty"` + + // AddCertMounts - Whether to add cert mounts + // +kubebuilder:validation:Optional + // +kubebuilder:default=false + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:booleanSwitch"} + AddCertMounts bool `json:"addCertMounts" yaml:"addCertMounts"` + + // DeployOnAllNodeSets - should the service be deploy across all nodesets + // This will override default target of a service play, setting it to 'all'. + // +kubebuilder:validation:Optional + DeployOnAllNodeSets bool `json:"deployOnAllNodeSets,omitempty" yaml:"deployOnAllNodeSets,omitempty"` + + // ContainerImageFields - list of container image fields names that this + // service deploys. The field names should match the + // ContainerImages struct field names from + // github.com/openstack-k8s-operators/openstack-operator/apis/core/v1beta1 + ContainerImageFields []string `json:"containerImageFields,omitempty" yaml:"containerImageFields,omitempty"` + + // EDPMServiceType - service type, which typically corresponds to one of + // the default service names (such as nova, ovn, etc). Also typically + // corresponds to the ansible role name (without the "edpm_" prefix) used + // to manage the service. If not set, will default to the + // OpenStackDataPlaneService name. + EDPMServiceType string `json:"edpmServiceType,omitempty" yaml:"edpmServiceType,omitempty"` +} + +// OpenStackDataPlaneServiceStatus defines the observed state of OpenStackDataPlaneService +type OpenStackDataPlaneServiceStatus struct { + // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors={"urn:alm:descriptor:io.kubernetes.conditions"} + // Conditions + Conditions condition.Conditions `json:"conditions,omitempty" optional:"true"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +// +kubebuilder:resource:shortName=osdps;osdpservice;osdpservices +//+operator-sdk:csv:customresourcedefinitions:displayName="OpenStack Data Plane Service" + +// OpenStackDataPlaneService is the Schema for the openstackdataplaneservices API +// OpenStackDataPlaneService name must be a valid RFC1123 as it is used in labels +type OpenStackDataPlaneService struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec OpenStackDataPlaneServiceSpec `json:"spec,omitempty"` + Status OpenStackDataPlaneServiceStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// OpenStackDataPlaneServiceList contains a list of OpenStackDataPlaneService +type OpenStackDataPlaneServiceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []OpenStackDataPlaneService `json:"items"` +} + +func init() { + SchemeBuilder.Register(&OpenStackDataPlaneService{}, &OpenStackDataPlaneServiceList{}) +} + +// IsReady - returns true if service is ready to serve requests +func (instance OpenStackDataPlaneService) IsReady() bool { + return instance.Status.Conditions.IsTrue(condition.ReadyCondition) +} + +// InitConditions - Initializes Status Conditons +func (instance OpenStackDataPlaneService) InitConditions() { + if instance.Status.Conditions == nil { + instance.Status.Conditions = condition.Conditions{} + } + cl := condition.CreateList(condition.UnknownCondition(condition.ReadyCondition, condition.InitReason, condition.InitReason)) + // initialize conditions used later as Status=Unknown + instance.Status.Conditions.Init(&cl) +} diff --git a/apis/dataplane/v1beta1/openstackdataplaneservice_webhook.go b/apis/dataplane/v1beta1/openstackdataplaneservice_webhook.go new file mode 100644 index 000000000..db65ccc03 --- /dev/null +++ b/apis/dataplane/v1beta1/openstackdataplaneservice_webhook.go @@ -0,0 +1,127 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +var openstackdataplaneservicelog = logf.Log.WithName("openstackdataplaneservice-resource") + +// SetupWebhookWithManager sets up the webhook with the Manager +func (r *OpenStackDataPlaneService) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr).For(r).Complete() +} + +// TODO(user): EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! + +// +kubebuilder:webhook:path=/mutate-dataplane-openstack-org-v1beta1-openstackdataplaneservice,mutating=true,failurePolicy=fail,sideEffects=None,groups=dataplane.openstack.org,resources=openstackdataplaneservices,verbs=create;update,versions=v1beta1,name=mopenstackdataplaneservice.kb.io,admissionReviewVersions=v1 + +var _ webhook.Defaulter = &OpenStackDataPlaneService{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *OpenStackDataPlaneService) Default() { + + openstackdataplaneservicelog.Info("default", "name", r.Name) + r.Spec.Default(r.Name) +} + +// Default - set defaults for this OpenStackDataPlaneService +func (spec *OpenStackDataPlaneServiceSpec) Default(name string) { + if spec.EDPMServiceType == "" { + spec.EDPMServiceType = name + } +} + +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. +// +kubebuilder:webhook:path=/validate-dataplane-openstack-org-v1beta1-openstackdataplaneservice,mutating=false,failurePolicy=fail,sideEffects=None,groups=dataplane.openstack.org,resources=openstackdataplaneservices,verbs=create;update,versions=v1beta1,name=vopenstackdataplaneservice.kb.io,admissionReviewVersions=v1 + +var _ webhook.Validator = &OpenStackDataPlaneService{} + +func (r *OpenStackDataPlaneService) ValidateCreate() (admission.Warnings, error) { + + openstackdataplaneservicelog.Info("validate create", "name", r.Name) + + errors := r.Spec.ValidateCreate() + + if len(errors) != 0 { + openstackdataplaneservicelog.Info("validation failed", "name", r.Name) + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "dataplane.openstack.org", Kind: "OpenStackDataPlaneService"}, + r.Name, + errors, + ) + } + + return nil, nil +} + +func (r *OpenStackDataPlaneServiceSpec) ValidateCreate() field.ErrorList { + // TODO(user): fill in your validation logic upon object creation. + + return field.ErrorList{} +} + +func (r *OpenStackDataPlaneService) ValidateUpdate(original runtime.Object) (admission.Warnings, error) { + openstackdataplaneservicelog.Info("validate update", "name", r.Name) + errors := r.Spec.ValidateUpdate() + + if len(errors) != 0 { + openstackdataplaneservicelog.Info("validation failed", "name", r.Name) + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "dataplane.openstack.org", Kind: "OpenStackDataPlaneService"}, + r.Name, + errors, + ) + } + return nil, nil +} + +func (r *OpenStackDataPlaneServiceSpec) ValidateUpdate() field.ErrorList { + // TODO(user): fill in your validation logic upon object creation. + + return field.ErrorList{} +} + +func (r *OpenStackDataPlaneService) ValidateDelete() (admission.Warnings, error) { + openstackdataplaneservicelog.Info("validate delete", "name", r.Name) + + errors := r.Spec.ValidateDelete() + + if len(errors) != 0 { + openstackdataplaneservicelog.Info("validation failed", "name", r.Name) + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "dataplane.openstack.org", Kind: "OpenStackDataPlaneService"}, + r.Name, + errors, + ) + } + return nil, nil +} + +func (r *OpenStackDataPlaneServiceSpec) ValidateDelete() field.ErrorList { + // TODO(user): fill in your validation logic upon object creation. + + return field.ErrorList{} +} diff --git a/apis/dataplane/v1beta1/zz_generated.deepcopy.go b/apis/dataplane/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..22ae6d42b --- /dev/null +++ b/apis/dataplane/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,815 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "encoding/json" + certmanagerv1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" + networkv1beta1 "github.com/openstack-k8s-operators/infra-operator/apis/network/v1beta1" + "github.com/openstack-k8s-operators/lib-common/modules/common/condition" + "github.com/openstack-k8s-operators/lib-common/modules/storage" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnsibleEESpec) DeepCopyInto(out *AnsibleEESpec) { + *out = *in + if in.ExtraMounts != nil { + in, out := &in.ExtraMounts, &out.ExtraMounts + *out = make([]storage.VolMounts, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]v1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExtraVars != nil { + in, out := &in.ExtraVars, &out.ExtraVars + *out = make(map[string]json.RawMessage, len(*in)) + for key, val := range *in { + var outVal []byte + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(json.RawMessage, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.DNSConfig != nil { + in, out := &in.DNSConfig, &out.DNSConfig + *out = new(v1.PodDNSConfig) + (*in).DeepCopyInto(*out) + } + if in.NetworkAttachments != nil { + in, out := &in.NetworkAttachments, &out.NetworkAttachments + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnsibleEESpec. +func (in *AnsibleEESpec) DeepCopy() *AnsibleEESpec { + if in == nil { + return nil + } + out := new(AnsibleEESpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnsibleOpts) DeepCopyInto(out *AnsibleOpts) { + *out = *in + if in.AnsibleVars != nil { + in, out := &in.AnsibleVars, &out.AnsibleVars + *out = make(map[string]json.RawMessage, len(*in)) + for key, val := range *in { + var outVal []byte + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(json.RawMessage, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.AnsibleVarsFrom != nil { + in, out := &in.AnsibleVarsFrom, &out.AnsibleVarsFrom + *out = make([]DataSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnsibleOpts. +func (in *AnsibleOpts) DeepCopy() *AnsibleOpts { + if in == nil { + return nil + } + out := new(AnsibleOpts) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapEnvSource) DeepCopyInto(out *ConfigMapEnvSource) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapEnvSource. +func (in *ConfigMapEnvSource) DeepCopy() *ConfigMapEnvSource { + if in == nil { + return nil + } + out := new(ConfigMapEnvSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSource) DeepCopyInto(out *DataSource) { + *out = *in + if in.ConfigMapRef != nil { + in, out := &in.ConfigMapRef, &out.ConfigMapRef + *out = new(ConfigMapEnvSource) + (*in).DeepCopyInto(*out) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(SecretEnvSource) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSource. +func (in *DataSource) DeepCopy() *DataSource { + if in == nil { + return nil + } + out := new(DataSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalObjectReference) DeepCopyInto(out *LocalObjectReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalObjectReference. +func (in *LocalObjectReference) DeepCopy() *LocalObjectReference { + if in == nil { + return nil + } + out := new(LocalObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSection) DeepCopyInto(out *NodeSection) { + *out = *in + if in.ExtraMounts != nil { + in, out := &in.ExtraMounts, &out.ExtraMounts + *out = make([]storage.VolMounts, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Networks != nil { + in, out := &in.Networks, &out.Networks + *out = make([]networkv1beta1.IPSetNetwork, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.UserData != nil { + in, out := &in.UserData, &out.UserData + *out = new(v1.SecretReference) + **out = **in + } + if in.NetworkData != nil { + in, out := &in.NetworkData, &out.NetworkData + *out = new(v1.SecretReference) + **out = **in + } + in.Ansible.DeepCopyInto(&out.Ansible) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSection. +func (in *NodeSection) DeepCopy() *NodeSection { + if in == nil { + return nil + } + out := new(NodeSection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeTemplate) DeepCopyInto(out *NodeTemplate) { + *out = *in + if in.ExtraMounts != nil { + in, out := &in.ExtraMounts, &out.ExtraMounts + *out = make([]storage.VolMounts, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Networks != nil { + in, out := &in.Networks, &out.Networks + *out = make([]networkv1beta1.IPSetNetwork, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.UserData != nil { + in, out := &in.UserData, &out.UserData + *out = new(v1.SecretReference) + **out = **in + } + if in.NetworkData != nil { + in, out := &in.NetworkData, &out.NetworkData + *out = new(v1.SecretReference) + **out = **in + } + in.Ansible.DeepCopyInto(&out.Ansible) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeTemplate. +func (in *NodeTemplate) DeepCopy() *NodeTemplate { + if in == nil { + return nil + } + out := new(NodeTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackDataPlaneDeployment) DeepCopyInto(out *OpenStackDataPlaneDeployment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackDataPlaneDeployment. +func (in *OpenStackDataPlaneDeployment) DeepCopy() *OpenStackDataPlaneDeployment { + if in == nil { + return nil + } + out := new(OpenStackDataPlaneDeployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenStackDataPlaneDeployment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackDataPlaneDeploymentList) DeepCopyInto(out *OpenStackDataPlaneDeploymentList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OpenStackDataPlaneDeployment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackDataPlaneDeploymentList. +func (in *OpenStackDataPlaneDeploymentList) DeepCopy() *OpenStackDataPlaneDeploymentList { + if in == nil { + return nil + } + out := new(OpenStackDataPlaneDeploymentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenStackDataPlaneDeploymentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackDataPlaneDeploymentSpec) DeepCopyInto(out *OpenStackDataPlaneDeploymentSpec) { + *out = *in + if in.NodeSets != nil { + in, out := &in.NodeSets, &out.NodeSets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.BackoffLimit != nil { + in, out := &in.BackoffLimit, &out.BackoffLimit + *out = new(int32) + **out = **in + } + if in.AnsibleExtraVars != nil { + in, out := &in.AnsibleExtraVars, &out.AnsibleExtraVars + *out = make(map[string]json.RawMessage, len(*in)) + for key, val := range *in { + var outVal []byte + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(json.RawMessage, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.ServicesOverride != nil { + in, out := &in.ServicesOverride, &out.ServicesOverride + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackDataPlaneDeploymentSpec. +func (in *OpenStackDataPlaneDeploymentSpec) DeepCopy() *OpenStackDataPlaneDeploymentSpec { + if in == nil { + return nil + } + out := new(OpenStackDataPlaneDeploymentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackDataPlaneDeploymentStatus) DeepCopyInto(out *OpenStackDataPlaneDeploymentStatus) { + *out = *in + if in.NodeSetConditions != nil { + in, out := &in.NodeSetConditions, &out.NodeSetConditions + *out = make(map[string]condition.Conditions, len(*in)) + for key, val := range *in { + var outVal []condition.Condition + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(condition.Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + (*out)[key] = outVal + } + } + if in.ConfigMapHashes != nil { + in, out := &in.ConfigMapHashes, &out.ConfigMapHashes + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.SecretHashes != nil { + in, out := &in.SecretHashes, &out.SecretHashes + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.NodeSetHashes != nil { + in, out := &in.NodeSetHashes, &out.NodeSetHashes + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ContainerImages != nil { + in, out := &in.ContainerImages, &out.ContainerImages + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(condition.Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackDataPlaneDeploymentStatus. +func (in *OpenStackDataPlaneDeploymentStatus) DeepCopy() *OpenStackDataPlaneDeploymentStatus { + if in == nil { + return nil + } + out := new(OpenStackDataPlaneDeploymentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackDataPlaneNodeSet) DeepCopyInto(out *OpenStackDataPlaneNodeSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackDataPlaneNodeSet. +func (in *OpenStackDataPlaneNodeSet) DeepCopy() *OpenStackDataPlaneNodeSet { + if in == nil { + return nil + } + out := new(OpenStackDataPlaneNodeSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenStackDataPlaneNodeSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackDataPlaneNodeSetList) DeepCopyInto(out *OpenStackDataPlaneNodeSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OpenStackDataPlaneNodeSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackDataPlaneNodeSetList. +func (in *OpenStackDataPlaneNodeSetList) DeepCopy() *OpenStackDataPlaneNodeSetList { + if in == nil { + return nil + } + out := new(OpenStackDataPlaneNodeSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenStackDataPlaneNodeSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackDataPlaneNodeSetSpec) DeepCopyInto(out *OpenStackDataPlaneNodeSetSpec) { + *out = *in + in.BaremetalSetTemplate.DeepCopyInto(&out.BaremetalSetTemplate) + in.NodeTemplate.DeepCopyInto(&out.NodeTemplate) + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = make(map[string]NodeSection, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]v1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkAttachments != nil { + in, out := &in.NetworkAttachments, &out.NetworkAttachments + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Services != nil { + in, out := &in.Services, &out.Services + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackDataPlaneNodeSetSpec. +func (in *OpenStackDataPlaneNodeSetSpec) DeepCopy() *OpenStackDataPlaneNodeSetSpec { + if in == nil { + return nil + } + out := new(OpenStackDataPlaneNodeSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackDataPlaneNodeSetStatus) DeepCopyInto(out *OpenStackDataPlaneNodeSetStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(condition.Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeploymentStatuses != nil { + in, out := &in.DeploymentStatuses, &out.DeploymentStatuses + *out = make(map[string]condition.Conditions, len(*in)) + for key, val := range *in { + var outVal []condition.Condition + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(condition.Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + (*out)[key] = outVal + } + } + if in.AllHostnames != nil { + in, out := &in.AllHostnames, &out.AllHostnames + *out = make(map[string]map[networkv1beta1.NetNameStr]string, len(*in)) + for key, val := range *in { + var outVal map[networkv1beta1.NetNameStr]string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(map[networkv1beta1.NetNameStr]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + (*out)[key] = outVal + } + } + if in.AllIPs != nil { + in, out := &in.AllIPs, &out.AllIPs + *out = make(map[string]map[networkv1beta1.NetNameStr]string, len(*in)) + for key, val := range *in { + var outVal map[networkv1beta1.NetNameStr]string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(map[networkv1beta1.NetNameStr]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + (*out)[key] = outVal + } + } + if in.ConfigMapHashes != nil { + in, out := &in.ConfigMapHashes, &out.ConfigMapHashes + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.SecretHashes != nil { + in, out := &in.SecretHashes, &out.SecretHashes + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.DNSClusterAddresses != nil { + in, out := &in.DNSClusterAddresses, &out.DNSClusterAddresses + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ContainerImages != nil { + in, out := &in.ContainerImages, &out.ContainerImages + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackDataPlaneNodeSetStatus. +func (in *OpenStackDataPlaneNodeSetStatus) DeepCopy() *OpenStackDataPlaneNodeSetStatus { + if in == nil { + return nil + } + out := new(OpenStackDataPlaneNodeSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackDataPlaneService) DeepCopyInto(out *OpenStackDataPlaneService) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackDataPlaneService. +func (in *OpenStackDataPlaneService) DeepCopy() *OpenStackDataPlaneService { + if in == nil { + return nil + } + out := new(OpenStackDataPlaneService) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenStackDataPlaneService) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackDataPlaneServiceList) DeepCopyInto(out *OpenStackDataPlaneServiceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OpenStackDataPlaneService, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackDataPlaneServiceList. +func (in *OpenStackDataPlaneServiceList) DeepCopy() *OpenStackDataPlaneServiceList { + if in == nil { + return nil + } + out := new(OpenStackDataPlaneServiceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenStackDataPlaneServiceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackDataPlaneServiceSpec) DeepCopyInto(out *OpenStackDataPlaneServiceSpec) { + *out = *in + if in.ConfigMaps != nil { + in, out := &in.ConfigMaps, &out.ConfigMaps + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DataSources != nil { + in, out := &in.DataSources, &out.DataSources + *out = make([]DataSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TLSCerts != nil { + in, out := &in.TLSCerts, &out.TLSCerts + *out = make(map[string]OpenstackDataPlaneServiceCert, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.ContainerImageFields != nil { + in, out := &in.ContainerImageFields, &out.ContainerImageFields + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackDataPlaneServiceSpec. +func (in *OpenStackDataPlaneServiceSpec) DeepCopy() *OpenStackDataPlaneServiceSpec { + if in == nil { + return nil + } + out := new(OpenStackDataPlaneServiceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackDataPlaneServiceStatus) DeepCopyInto(out *OpenStackDataPlaneServiceStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(condition.Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackDataPlaneServiceStatus. +func (in *OpenStackDataPlaneServiceStatus) DeepCopy() *OpenStackDataPlaneServiceStatus { + if in == nil { + return nil + } + out := new(OpenStackDataPlaneServiceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenstackDataPlaneServiceCert) DeepCopyInto(out *OpenstackDataPlaneServiceCert) { + *out = *in + if in.Contents != nil { + in, out := &in.Contents, &out.Contents + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Networks != nil { + in, out := &in.Networks, &out.Networks + *out = make([]networkv1beta1.NetNameStr, len(*in)) + copy(*out, *in) + } + if in.KeyUsages != nil { + in, out := &in.KeyUsages, &out.KeyUsages + *out = make([]certmanagerv1.KeyUsage, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenstackDataPlaneServiceCert. +func (in *OpenstackDataPlaneServiceCert) DeepCopy() *OpenstackDataPlaneServiceCert { + if in == nil { + return nil + } + out := new(OpenstackDataPlaneServiceCert) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretEnvSource) DeepCopyInto(out *SecretEnvSource) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretEnvSource. +func (in *SecretEnvSource) DeepCopy() *SecretEnvSource { + if in == nil { + return nil + } + out := new(SecretEnvSource) + in.DeepCopyInto(out) + return out +} diff --git a/apis/go.mod b/apis/go.mod index ca6403d7d..de5b413c2 100644 --- a/apis/go.mod +++ b/apis/go.mod @@ -3,6 +3,8 @@ module github.com/openstack-k8s-operators/openstack-operator/apis go 1.20 require ( + github.com/cert-manager/cert-manager v1.13.6 + github.com/go-playground/validator/v10 v10.21.0 github.com/onsi/ginkgo/v2 v2.19.0 github.com/onsi/gomega v1.33.1 github.com/openstack-k8s-operators/barbican-operator/api v0.0.0-20240603141403-1ad477d065a2 @@ -21,6 +23,7 @@ require ( github.com/openstack-k8s-operators/neutron-operator/api v0.3.1-0.20240610185003-a4d7a93c0982 github.com/openstack-k8s-operators/nova-operator/api v0.3.1-0.20240605161214-e3146f7cd92c github.com/openstack-k8s-operators/octavia-operator/api v0.3.1-0.20240610164852-6fd75398e8ea + github.com/openstack-k8s-operators/openstack-baremetal-operator/api v0.3.1-0.20240604070904-cdec81ca1825 github.com/openstack-k8s-operators/ovn-operator/api v0.3.1-0.20240610180318-22bca1cb3fd4 github.com/openstack-k8s-operators/placement-operator/api v0.3.1-0.20240606155430-0863f223076f github.com/openstack-k8s-operators/swift-operator/api v0.3.1-0.20240604073634-259c9bde9cd1 @@ -30,7 +33,7 @@ require ( github.com/rhobs/observability-operator v0.0.28 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20240409090435-93d18d7e34b8 // indirect + golang.org/x/exp v0.0.0-20240409090435-93d18d7e34b8 golang.org/x/tools v0.21.0 // indirect k8s.io/api v0.28.10 k8s.io/apimachinery v0.28.10 @@ -47,11 +50,14 @@ require ( github.com/emicklei/go-restful/v3 v3.11.2 // indirect github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/gabriel-vasile/mimetype v1.4.3 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.20.2 // indirect github.com/go-openapi/jsonreference v0.20.4 // indirect github.com/go-openapi/swag v0.22.9 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect @@ -65,7 +71,10 @@ require ( github.com/imdario/mergo v0.3.16 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/leodido/go-urn v1.4.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect + github.com/metal3-io/baremetal-operator/apis v0.5.1 // indirect + github.com/metal3-io/baremetal-operator/pkg/hardwareutils v0.4.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect @@ -78,6 +87,7 @@ require ( github.com/prometheus/procfs v0.12.0 // indirect github.com/robfig/cron/v3 v3.0.1 // indirect github.com/spf13/pflag v1.0.5 // indirect + golang.org/x/crypto v0.23.0 // indirect golang.org/x/net v0.25.0 // indirect golang.org/x/oauth2 v0.16.0 // indirect golang.org/x/sys v0.20.0 // indirect @@ -94,6 +104,7 @@ require ( k8s.io/component-base v0.28.10 // indirect k8s.io/klog/v2 v2.120.1 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + sigs.k8s.io/gateway-api v0.8.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect sigs.k8s.io/yaml v1.4.0 // indirect diff --git a/apis/go.sum b/apis/go.sum index 1e3853950..73e0d96d3 100644 --- a/apis/go.sum +++ b/apis/go.sum @@ -1,5 +1,7 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/cert-manager/cert-manager v1.13.6 h1:yngKM4ZQoyFQ3LGHTx95fWqyiNJP11UM5PWq7pAr3T0= +github.com/cert-manager/cert-manager v1.13.6/go.mod h1:iWFePja8XKEl+Dv1kZtwPshT8D0SmC4Hyu5Qc5KS0tM= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -13,6 +15,8 @@ github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0 github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= +github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= @@ -23,6 +27,13 @@ github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdX github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE= github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.21.0 h1:4fZA11ovvtkdgaeev9RGWPgc1uj3H8W+rNYyH/ySBb0= +github.com/go-playground/validator/v10 v10.21.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= @@ -58,8 +69,14 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= +github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/metal3-io/baremetal-operator/apis v0.5.1 h1:l6VCuM2nSYMsdir3mocXvF80F7HnTXVZ7NNIoMEYbio= +github.com/metal3-io/baremetal-operator/apis v0.5.1/go.mod h1:Q3MHes59mRabjHM6ARoHfgd2uXUjJIytl3/uflzhyew= +github.com/metal3-io/baremetal-operator/pkg/hardwareutils v0.4.0 h1:AnA8XLLp3RKYjjlB4KI0fyPSDN/d5gb3ZtM2cVyxwOc= +github.com/metal3-io/baremetal-operator/pkg/hardwareutils v0.4.0/go.mod h1:399nvdaqoU9rTI25UdFw2EWcVjmJPpeZPIhfDAIx/XU= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -107,6 +124,8 @@ github.com/openstack-k8s-operators/nova-operator/api v0.3.1-0.20240605161214-e31 github.com/openstack-k8s-operators/nova-operator/api v0.3.1-0.20240605161214-e3146f7cd92c/go.mod h1:z9EogVqyRE6uwlyIQ0xYlM0CA8XokFITRspebZEsD98= github.com/openstack-k8s-operators/octavia-operator/api v0.3.1-0.20240610164852-6fd75398e8ea h1:n9WEA32gAI6M7mIjOm93M4eVq4eW/K8B4V1RK8YSDDk= github.com/openstack-k8s-operators/octavia-operator/api v0.3.1-0.20240610164852-6fd75398e8ea/go.mod h1:h03g3HXawiEFYu4r9MD79RXrjc+UGivcHo/Sq2ByUC0= +github.com/openstack-k8s-operators/openstack-baremetal-operator/api v0.3.1-0.20240604070904-cdec81ca1825 h1:kr9NflFeXZfNBmx2ilSbcsXn8uI9Qx+RzOQXNiCQnX8= +github.com/openstack-k8s-operators/openstack-baremetal-operator/api v0.3.1-0.20240604070904-cdec81ca1825/go.mod h1:h6Nak5Gev8lUM/5aKpjgDXy9Rj3/iICEySV+NPYr7Nw= github.com/openstack-k8s-operators/ovn-operator/api v0.3.1-0.20240610180318-22bca1cb3fd4 h1:7riSCcK0gEuUGN/z6FHEVwjGvUZS4VEcrPNMmYwGfRc= github.com/openstack-k8s-operators/ovn-operator/api v0.3.1-0.20240610180318-22bca1cb3fd4/go.mod h1:A9xAbyixV8iqr34GV+Rqz1JUVIAmOtgJ02XTwxA0xx8= github.com/openstack-k8s-operators/placement-operator/api v0.3.1-0.20240606155430-0863f223076f h1:VQUN77K8zKXFSIeDbfPJD5PZ9iEB2oTIgTa9h75Hf00= @@ -154,6 +173,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/exp v0.0.0-20240409090435-93d18d7e34b8 h1:ESSUROHIBHg7USnszlcdmjBEwdMj9VUvU+OPk4yl2mc= golang.org/x/exp v0.0.0-20240409090435-93d18d7e34b8/go.mod h1:/lliqkxwWAhPjf5oSOIJup2XcqJaw8RGS6k3TGEc7GI= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -243,6 +264,8 @@ k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 h1:jgGTlFYnhF1PM1Ax/lAlxUPE+KfCI k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/controller-runtime v0.16.6 h1:FiXwTuFF5ZJKmozfP2Z0j7dh6kmxP4Ou1KLfxgKKC3I= sigs.k8s.io/controller-runtime v0.16.6/go.mod h1:+dQzkZxnylD0u49e0a+7AR+vlibEBaThmPca7lTyUsI= +sigs.k8s.io/gateway-api v0.8.0 h1:isQQ3Jx2qFP7vaA3ls0846F0Amp9Eq14P08xbSwVbQg= +sigs.k8s.io/gateway-api v0.8.0/go.mod h1:okOnjPNBFbIS/Rw9kAhuIUaIkLhTKEu+ARIuXk2dgaM= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= diff --git a/cmd/csv-merger/csv-merger.go b/cmd/csv-merger/csv-merger.go index 2b9d22346..e5d9efb5f 100644 --- a/cmd/csv-merger/csv-merger.go +++ b/cmd/csv-merger/csv-merger.go @@ -79,7 +79,6 @@ var ( rabbitmqCsv = flag.String("rabbitmq-csv", "", "RabbitMQ CSV filename") infraCsv = flag.String("infra-csv", "", "Infra CSV filename") ansibleEECsv = flag.String("ansibleee-csv", "", "Ansible EE CSV filename") - dataplaneCsv = flag.String("dataplane-csv", "", "Data plane CSV filename") novaCsv = flag.String("nova-csv", "", "Nova CSV filename") heatCsv = flag.String("heat-csv", "", "Heat CSV filename") neutronCsv = flag.String("neutron-csv", "", "Neutron CSV filename") @@ -127,7 +126,6 @@ func main() { *rabbitmqCsv, *infraCsv, *ansibleEECsv, - *dataplaneCsv, *novaCsv, *neutronCsv, *manilaCsv, diff --git a/config/crd/bases/dataplane.openstack.org_openstackdataplanedeployments.yaml b/config/crd/bases/dataplane.openstack.org_openstackdataplanedeployments.yaml new file mode 100644 index 000000000..e1cea05f8 --- /dev/null +++ b/config/crd/bases/dataplane.openstack.org_openstackdataplanedeployments.yaml @@ -0,0 +1,155 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.1 + creationTimestamp: null + name: openstackdataplanedeployments.dataplane.openstack.org +spec: + group: dataplane.openstack.org + names: + kind: OpenStackDataPlaneDeployment + listKind: OpenStackDataPlaneDeploymentList + plural: openstackdataplanedeployments + shortNames: + - osdpd + - osdpdeployment + - osdpdeployments + singular: openstackdataplanedeployment + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: NodeSets + jsonPath: .spec.nodeSets + name: NodeSets + type: string + - description: Status + jsonPath: .status.conditions[0].status + name: Status + type: string + - description: Message + jsonPath: .status.conditions[0].message + name: Message + type: string + name: v1beta1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + ansibleExtraVars: + x-kubernetes-preserve-unknown-fields: true + ansibleLimit: + type: string + ansibleSkipTags: + type: string + ansibleTags: + type: string + backoffLimit: + default: 6 + format: int32 + type: integer + deploymentRequeueTime: + default: 15 + minimum: 1 + type: integer + nodeSets: + items: + type: string + minItems: 1 + type: array + servicesOverride: + items: + type: string + type: array + required: + - deploymentRequeueTime + - nodeSets + type: object + x-kubernetes-validations: + - message: OpenStackDataPlaneDeployment Spec is immutable + rule: self == oldSelf + status: + properties: + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + severity: + type: string + status: + type: string + type: + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + configMapHashes: + additionalProperties: + type: string + type: object + containerImages: + additionalProperties: + type: string + type: object + deployed: + type: boolean + deployedVersion: + type: string + nodeSetConditions: + additionalProperties: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + severity: + type: string + status: + type: string + type: + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + type: object + nodeSetHashes: + additionalProperties: + type: string + type: object + observedGeneration: + format: int64 + type: integer + secretHashes: + additionalProperties: + type: string + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/dataplane.openstack.org_openstackdataplanenodesets.yaml b/config/crd/bases/dataplane.openstack.org_openstackdataplanenodesets.yaml new file mode 100644 index 000000000..a2cfc7d69 --- /dev/null +++ b/config/crd/bases/dataplane.openstack.org_openstackdataplanenodesets.yaml @@ -0,0 +1,2045 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.1 + creationTimestamp: null + name: openstackdataplanenodesets.dataplane.openstack.org +spec: + group: dataplane.openstack.org + names: + kind: OpenStackDataPlaneNodeSet + listKind: OpenStackDataPlaneNodeSetList + plural: openstackdataplanenodesets + shortNames: + - osdpns + - osdpnodeset + - osdpnodesets + singular: openstackdataplanenodeset + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Status + jsonPath: .status.conditions[0].status + name: Status + type: string + - description: Message + jsonPath: .status.conditions[0].message + name: Message + type: string + name: v1beta1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + baremetalSetTemplate: + properties: + agentImageUrl: + type: string + apacheImageUrl: + type: string + automatedCleaningMode: + default: metadata + enum: + - metadata + - disabled + type: string + baremetalHosts: + additionalProperties: + properties: + ctlPlaneIP: + type: string + networkData: + properties: + name: + type: string + namespace: + type: string + type: object + x-kubernetes-map-type: atomic + preprovisioningNetworkDataName: + type: string + userData: + properties: + name: + type: string + namespace: + type: string + type: object + x-kubernetes-map-type: atomic + type: object + type: object + bmhLabelSelector: + additionalProperties: + type: string + type: object + bmhNamespace: + default: openshift-machine-api + type: string + bootstrapDns: + items: + type: string + type: array + cloudUserName: + default: cloud-admin + type: string + ctlplaneGateway: + type: string + ctlplaneInterface: + type: string + ctlplaneNetmask: + default: 255.255.255.0 + type: string + deploymentSSHSecret: + type: string + dnsSearchDomains: + items: + type: string + type: array + domainName: + type: string + hardwareReqs: + properties: + cpuReqs: + properties: + arch: + enum: + - x86_64 + - ppc64le + type: string + countReq: + properties: + count: + minimum: 1 + type: integer + exactMatch: + type: boolean + type: object + mhzReq: + properties: + exactMatch: + type: boolean + mhz: + minimum: 1 + type: integer + type: object + type: object + diskReqs: + properties: + gbReq: + properties: + exactMatch: + type: boolean + gb: + minimum: 1 + type: integer + type: object + ssdReq: + properties: + exactMatch: + type: boolean + ssd: + type: boolean + type: object + type: object + memReqs: + properties: + gbReq: + properties: + exactMatch: + type: boolean + gb: + minimum: 1 + type: integer + type: object + type: object + type: object + networkData: + properties: + name: + type: string + namespace: + type: string + type: object + x-kubernetes-map-type: atomic + osContainerImageUrl: + type: string + osImage: + type: string + passwordSecret: + properties: + name: + type: string + namespace: + type: string + type: object + x-kubernetes-map-type: atomic + provisionServerName: + type: string + provisioningInterface: + type: string + userData: + properties: + name: + type: string + namespace: + type: string + type: object + x-kubernetes-map-type: atomic + required: + - cloudUserName + - ctlplaneInterface + - deploymentSSHSecret + type: object + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + networkAttachments: + items: + type: string + type: array + nodeTemplate: + properties: + ansible: + properties: + ansibleHost: + type: string + ansiblePort: + type: integer + ansibleUser: + type: string + ansibleVars: + x-kubernetes-preserve-unknown-fields: true + ansibleVarsFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + type: object + ansibleSSHPrivateKeySecret: + type: string + extraMounts: + items: + properties: + extraVolType: + type: string + mounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + propagation: + items: + type: string + type: array + volumes: + items: + properties: + awsElasticBlockStore: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + azureDisk: + properties: + cachingMode: + type: string + diskName: + type: string + diskURI: + type: string + fsType: + type: string + kind: + type: string + readOnly: + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + properties: + readOnly: + type: boolean + secretName: + type: string + shareName: + type: string + required: + - secretName + - shareName + type: object + cephfs: + properties: + monitors: + items: + type: string + type: array + path: + type: string + readOnly: + type: boolean + secretFile: + type: string + secretRef: + properties: + name: + type: string + type: object + x-kubernetes-map-type: atomic + user: + type: string + required: + - monitors + type: object + cinder: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + type: string + required: + - volumeID + type: object + configMap: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + name: + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + properties: + driver: + type: string + fsType: + type: string + nodePublishSecretRef: + properties: + name: + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + type: boolean + volumeAttributes: + additionalProperties: + type: string + type: object + required: + - driver + type: object + downwardAPI: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + emptyDir: + properties: + medium: + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + properties: + volumeClaimTemplate: + properties: + metadata: + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - kind + - name + type: object + resources: + properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + required: + - spec + type: object + type: object + fc: + properties: + fsType: + type: string + lun: + format: int32 + type: integer + readOnly: + type: boolean + targetWWNs: + items: + type: string + type: array + wwids: + items: + type: string + type: array + type: object + flexVolume: + properties: + driver: + type: string + fsType: + type: string + options: + additionalProperties: + type: string + type: object + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + properties: + datasetName: + type: string + datasetUUID: + type: string + type: object + gcePersistentDisk: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + pdName: + type: string + readOnly: + type: boolean + required: + - pdName + type: object + gitRepo: + properties: + directory: + type: string + repository: + type: string + revision: + type: string + required: + - repository + type: object + glusterfs: + properties: + endpoints: + type: string + path: + type: string + readOnly: + type: boolean + required: + - endpoints + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + iscsi: + properties: + chapAuthDiscovery: + type: boolean + chapAuthSession: + type: boolean + fsType: + type: string + initiatorName: + type: string + iqn: + type: string + iscsiInterface: + type: string + lun: + format: int32 + type: integer + portals: + items: + type: string + type: array + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + type: string + nfs: + properties: + path: + type: string + readOnly: + type: boolean + server: + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + properties: + fsType: + type: string + pdID: + type: string + required: + - pdID + type: object + portworxVolume: + properties: + fsType: + type: string + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + projected: + properties: + defaultMode: + format: int32 + type: integer + sources: + items: + properties: + configMap: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + name: + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + properties: + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + secret: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + name: + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + format: int64 + type: integer + path: + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + properties: + group: + type: string + readOnly: + type: boolean + registry: + type: string + tenant: + type: string + user: + type: string + volume: + type: string + required: + - registry + - volume + type: object + rbd: + properties: + fsType: + type: string + image: + type: string + keyring: + type: string + monitors: + items: + type: string + type: array + pool: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + x-kubernetes-map-type: atomic + user: + type: string + required: + - image + - monitors + type: object + scaleIO: + properties: + fsType: + type: string + gateway: + type: string + protectionDomain: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + type: boolean + storageMode: + type: string + storagePool: + type: string + system: + type: string + volumeName: + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + optional: + type: boolean + secretName: + type: string + type: object + storageos: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + type: string + volumeNamespace: + type: string + type: object + vsphereVolume: + properties: + fsType: + type: string + storagePolicyID: + type: string + storagePolicyName: + type: string + volumePath: + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + required: + - mounts + - volumes + type: object + type: array + managementNetwork: + default: ctlplane + type: string + networkData: + properties: + name: + type: string + namespace: + type: string + type: object + x-kubernetes-map-type: atomic + networks: + items: + properties: + defaultRoute: + type: boolean + fixedIP: + type: string + name: + pattern: ^[a-zA-Z0-9][a-zA-Z0-9\-_]*[a-zA-Z0-9]$ + type: string + subnetName: + pattern: ^[a-zA-Z0-9][a-zA-Z0-9\-_]*[a-zA-Z0-9]$ + type: string + required: + - name + - subnetName + type: object + type: array + userData: + properties: + name: + type: string + namespace: + type: string + type: object + x-kubernetes-map-type: atomic + required: + - ansibleSSHPrivateKeySecret + type: object + nodes: + additionalProperties: + properties: + ansible: + properties: + ansibleHost: + type: string + ansiblePort: + type: integer + ansibleUser: + type: string + ansibleVars: + x-kubernetes-preserve-unknown-fields: true + ansibleVarsFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + type: object + extraMounts: + items: + properties: + extraVolType: + type: string + mounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + propagation: + items: + type: string + type: array + volumes: + items: + properties: + awsElasticBlockStore: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + azureDisk: + properties: + cachingMode: + type: string + diskName: + type: string + diskURI: + type: string + fsType: + type: string + kind: + type: string + readOnly: + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + properties: + readOnly: + type: boolean + secretName: + type: string + shareName: + type: string + required: + - secretName + - shareName + type: object + cephfs: + properties: + monitors: + items: + type: string + type: array + path: + type: string + readOnly: + type: boolean + secretFile: + type: string + secretRef: + properties: + name: + type: string + type: object + x-kubernetes-map-type: atomic + user: + type: string + required: + - monitors + type: object + cinder: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + type: string + required: + - volumeID + type: object + configMap: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + name: + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + properties: + driver: + type: string + fsType: + type: string + nodePublishSecretRef: + properties: + name: + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + type: boolean + volumeAttributes: + additionalProperties: + type: string + type: object + required: + - driver + type: object + downwardAPI: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + emptyDir: + properties: + medium: + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + properties: + volumeClaimTemplate: + properties: + metadata: + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - kind + - name + type: object + resources: + properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + required: + - spec + type: object + type: object + fc: + properties: + fsType: + type: string + lun: + format: int32 + type: integer + readOnly: + type: boolean + targetWWNs: + items: + type: string + type: array + wwids: + items: + type: string + type: array + type: object + flexVolume: + properties: + driver: + type: string + fsType: + type: string + options: + additionalProperties: + type: string + type: object + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + properties: + datasetName: + type: string + datasetUUID: + type: string + type: object + gcePersistentDisk: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + pdName: + type: string + readOnly: + type: boolean + required: + - pdName + type: object + gitRepo: + properties: + directory: + type: string + repository: + type: string + revision: + type: string + required: + - repository + type: object + glusterfs: + properties: + endpoints: + type: string + path: + type: string + readOnly: + type: boolean + required: + - endpoints + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + iscsi: + properties: + chapAuthDiscovery: + type: boolean + chapAuthSession: + type: boolean + fsType: + type: string + initiatorName: + type: string + iqn: + type: string + iscsiInterface: + type: string + lun: + format: int32 + type: integer + portals: + items: + type: string + type: array + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + type: string + nfs: + properties: + path: + type: string + readOnly: + type: boolean + server: + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + properties: + fsType: + type: string + pdID: + type: string + required: + - pdID + type: object + portworxVolume: + properties: + fsType: + type: string + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + projected: + properties: + defaultMode: + format: int32 + type: integer + sources: + items: + properties: + configMap: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + name: + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + properties: + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + secret: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + name: + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + format: int64 + type: integer + path: + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + properties: + group: + type: string + readOnly: + type: boolean + registry: + type: string + tenant: + type: string + user: + type: string + volume: + type: string + required: + - registry + - volume + type: object + rbd: + properties: + fsType: + type: string + image: + type: string + keyring: + type: string + monitors: + items: + type: string + type: array + pool: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + x-kubernetes-map-type: atomic + user: + type: string + required: + - image + - monitors + type: object + scaleIO: + properties: + fsType: + type: string + gateway: + type: string + protectionDomain: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + type: boolean + storageMode: + type: string + storagePool: + type: string + system: + type: string + volumeName: + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + optional: + type: boolean + secretName: + type: string + type: object + storageos: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + type: string + volumeNamespace: + type: string + type: object + vsphereVolume: + properties: + fsType: + type: string + storagePolicyID: + type: string + storagePolicyName: + type: string + volumePath: + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + required: + - mounts + - volumes + type: object + type: array + hostName: + type: string + managementNetwork: + type: string + networkData: + properties: + name: + type: string + namespace: + type: string + type: object + x-kubernetes-map-type: atomic + networks: + items: + properties: + defaultRoute: + type: boolean + fixedIP: + type: string + name: + pattern: ^[a-zA-Z0-9][a-zA-Z0-9\-_]*[a-zA-Z0-9]$ + type: string + subnetName: + pattern: ^[a-zA-Z0-9][a-zA-Z0-9\-_]*[a-zA-Z0-9]$ + type: string + required: + - name + - subnetName + type: object + type: array + preprovisioningNetworkDataName: + type: string + userData: + properties: + name: + type: string + namespace: + type: string + type: object + x-kubernetes-map-type: atomic + type: object + type: object + preProvisioned: + type: boolean + secretMaxSize: + default: 1048576 + type: integer + services: + default: + - download-cache + - bootstrap + - configure-network + - validate-network + - install-os + - configure-os + - ssh-known-hosts + - run-os + - reboot-os + - install-certs + - ovn + - neutron-metadata + - libvirt + - nova + - telemetry + items: + type: string + type: array + tags: + items: + type: string + type: array + tlsEnabled: + default: true + type: boolean + required: + - nodeTemplate + - nodes + type: object + status: + properties: + allHostnames: + additionalProperties: + additionalProperties: + type: string + type: object + type: object + allIPs: + additionalProperties: + additionalProperties: + type: string + type: object + type: object + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + severity: + type: string + status: + type: string + type: + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + configHash: + type: string + configMapHashes: + additionalProperties: + type: string + type: object + containerImages: + additionalProperties: + type: string + type: object + ctlplaneSearchDomain: + type: string + deployedConfigHash: + type: string + deployedVersion: + type: string + deploymentStatuses: + additionalProperties: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + severity: + type: string + status: + type: string + type: + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + type: object + dnsClusterAddresses: + items: + type: string + type: array + observedGeneration: + format: int64 + type: integer + secretHashes: + additionalProperties: + type: string + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/dataplane.openstack.org_openstackdataplaneservices.yaml b/config/crd/bases/dataplane.openstack.org_openstackdataplaneservices.yaml new file mode 100644 index 000000000..940c4d7f2 --- /dev/null +++ b/config/crd/bases/dataplane.openstack.org_openstackdataplaneservices.yaml @@ -0,0 +1,165 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.1 + creationTimestamp: null + name: openstackdataplaneservices.dataplane.openstack.org +spec: + group: dataplane.openstack.org + names: + kind: OpenStackDataPlaneService + listKind: OpenStackDataPlaneServiceList + plural: openstackdataplaneservices + shortNames: + - osdps + - osdpservice + - osdpservices + singular: openstackdataplaneservice + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + addCertMounts: + default: false + type: boolean + caCerts: + type: string + certsFrom: + type: string + configMaps: + items: + type: string + type: array + containerImageFields: + items: + type: string + type: array + dataSources: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + deployOnAllNodeSets: + type: boolean + edpmServiceType: + type: string + openStackAnsibleEERunnerImage: + type: string + playbook: + type: string + playbookContents: + type: string + secrets: + items: + type: string + type: array + tlsCerts: + additionalProperties: + properties: + contents: + items: + type: string + minItems: 1 + type: array + edpmRoleServiceName: + type: string + issuer: + type: string + keyUsages: + items: + enum: + - signing + - digital signature + - content commitment + - key encipherment + - key agreement + - data encipherment + - cert sign + - crl sign + - encipher only + - decipher only + - any + - server auth + - client auth + - code signing + - email protection + - s/mime + - ipsec end system + - ipsec tunnel + - ipsec user + - timestamping + - ocsp signing + - microsoft sgc + - netscape sgc + type: string + type: array + networks: + items: + pattern: ^[a-zA-Z0-9][a-zA-Z0-9\-_]*[a-zA-Z0-9]$ + type: string + type: array + required: + - contents + type: object + type: object + type: object + status: + properties: + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + severity: + type: string + status: + type: string + type: + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index cface3851..d4049952f 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -5,6 +5,9 @@ resources: - bases/core.openstack.org_openstackcontrolplanes.yaml - bases/core.openstack.org_openstackversions.yaml - bases/client.openstack.org_openstackclients.yaml +- bases/dataplane.openstack.org_openstackdataplanenodesets.yaml +- bases/dataplane.openstack.org_openstackdataplaneservices.yaml +- bases/dataplane.openstack.org_openstackdataplanedeployments.yaml #+kubebuilder:scaffold:crdkustomizeresource patchesStrategicMerge: diff --git a/config/default/manager_default_images.yaml b/config/default/manager_default_images.yaml index 6d4b8c21c..b56ab8f02 100644 --- a/config/default/manager_default_images.yaml +++ b/config/default/manager_default_images.yaml @@ -15,3 +15,25 @@ spec: value: quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified - name: RELATED_IMAGE_OPENSTACK_CLIENT_IMAGE_URL_DEFAULT value: quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified + - name: RELATED_IMAGE_EDPM_FRR_IMAGE_URL_DEFAULT + value: quay.io/podified-antelope-centos9/openstack-frr:current-podified + - name: RELATED_IMAGE_EDPM_ISCSID_IMAGE_URL_DEFAULT + value: quay.io/podified-antelope-centos9/openstack-iscsid:current-podified + - name: RELATED_IMAGE_EDPM_LOGROTATE_CROND_IMAGE_URL_DEFAULT + value: quay.io/podified-antelope-centos9/openstack-cron:current-podified + - name: RELATED_IMAGE_EDPM_OVN_CONTROLLER_AGENT_IMAGE_URL_DEFAULT + value: quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified + - name: RELATED_IMAGE_EDPM_NEUTRON_DHCP_AGENT_IMAGE_URL_DEFAULT + value: quay.io/podified-antelope-centos9/openstack-neutron-dhcp-agent:current-podified + - name: RELATED_IMAGE_EDPM_NEUTRON_METADATA_AGENT_IMAGE_URL_DEFAULT + value: quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified + - name: RELATED_IMAGE_EDPM_NEUTRON_OVN_AGENT_IMAGE_URL_DEFAULT + value: quay.io/podified-antelope-centos9/openstack-neutron-ovn-agent:current-podified + - name: RELATED_IMAGE_EDPM_NEUTRON_SRIOV_AGENT_IMAGE_URL_DEFAULT + value: quay.io/podified-antelope-centos9/openstack-neutron-sriov-agent:current-podified + - name: RELATED_IMAGE_EDPM_OVN_BGP_AGENT_IMAGE_URL_DEFAULT + value: quay.io/podified-antelope-centos9/openstack-ovn-bgp-agent:current-podified + - name: RELATED_IMAGE_EDPM_NODE_EXPORTER_IMAGE_URL_DEFAULT + value: quay.io/prometheus/node-exporter:v1.5.0 + - name: RELATED_IMAGE_EDPM_MULTIPATHD_IMAGE_URL_DEFAULT + value: quay.io/podified-antelope-centos9/openstack-multipathd:current-podified diff --git a/config/manifests/bases/openstack-operator.clusterserviceversion.yaml b/config/manifests/bases/openstack-operator.clusterserviceversion.yaml index af31d6307..44148d95c 100644 --- a/config/manifests/bases/openstack-operator.clusterserviceversion.yaml +++ b/config/manifests/bases/openstack-operator.clusterserviceversion.yaml @@ -495,6 +495,92 @@ spec: displayName: TLS path: tls version: v1beta1 + - description: OpenStackDataPlaneDeployment is the Schema for the openstackdataplanedeployments + API OpenStackDataPlaneDeployment name must be a valid RFC1123 as it is used + in labels + displayName: OpenStack Data Plane Deployments + kind: OpenStackDataPlaneDeployment + name: openstackdataplanedeployments.dataplane.openstack.org + specDescriptors: + - description: BackoffLimit allows to define the maximum number of retried executions + (defaults to 6). + displayName: Backoff Limit + path: backoffLimit + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + statusDescriptors: + - description: Conditions + displayName: Conditions + path: conditions + x-descriptors: + - urn:alm:descriptor:io.kubernetes.conditions + - description: Deployed + displayName: Deployed + path: deployed + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + version: v1beta1 + - description: OpenStackDataPlaneNodeSet is the Schema for the openstackdataplanenodesets + API OpenStackDataPlaneNodeSet name must be a valid RFC1123 as it is used in + labels + displayName: OpenStack Data Plane NodeSet + kind: OpenStackDataPlaneNodeSet + name: openstackdataplanenodesets.dataplane.openstack.org + specDescriptors: + - description: AnsiblePort SSH port for Ansible connection + displayName: Ansible Port + path: nodeTemplate.ansible.ansiblePort + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: 'AnsibleSSHPrivateKeySecret Name of a private SSH key secret + containing private SSH key for connecting to node. The named secret must + be of the form: Secret.data.ssh-privatekey: ' + displayName: Ansible SSHPrivate Key Secret + path: nodeTemplate.ansibleSSHPrivateKeySecret + x-descriptors: + - urn:alm:descriptor:io.kubernetes:Secret + - description: AnsiblePort SSH port for Ansible connection + displayName: Ansible Port + path: nodes.ansible.ansiblePort + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: PreProvisioned - Set to true if the nodes have been Pre Provisioned. + displayName: Pre Provisioned + path: preProvisioned + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: TLSEnabled - Whether the node set has TLS enabled. + displayName: TLSEnabled + path: tlsEnabled + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + statusDescriptors: + - description: Conditions + displayName: Conditions + path: conditions + x-descriptors: + - urn:alm:descriptor:io.kubernetes.conditions + version: v1beta1 + - description: OpenStackDataPlaneService is the Schema for the openstackdataplaneservices + API OpenStackDataPlaneService name must be a valid RFC1123 as it is used in + labels + displayName: OpenStack Data Plane Service + kind: OpenStackDataPlaneService + name: openstackdataplaneservices.dataplane.openstack.org + specDescriptors: + - description: AddCertMounts - Whether to add cert mounts + displayName: Add Cert Mounts + path: addCertMounts + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + statusDescriptors: + - description: Conditions + displayName: Conditions + path: conditions + x-descriptors: + - urn:alm:descriptor:io.kubernetes.conditions + version: v1beta1 - description: OpenStackVersion is the Schema for the openstackversionupdates API displayName: OpenStack Version diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 324676894..fb4904f40 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -5,6 +5,54 @@ metadata: creationTimestamp: null name: manager-role rules: +- apiGroups: + - "" + resources: + - imagestreamimages + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - imagestreammappings + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - imagestreams + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - imagestreams/layers + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - imagestreamtags + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - imagetags + verbs: + - get + - list + - watch - apiGroups: - "" resources: @@ -17,6 +65,12 @@ rules: - patch - update - watch +- apiGroups: + - "" + resources: + - projects + verbs: + - get - apiGroups: - "" resources: @@ -27,6 +81,18 @@ rules: - list - update - watch +- apiGroups: + - ansibleee.openstack.org + resources: + - openstackansibleees + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - barbican.openstack.org resources: @@ -39,6 +105,42 @@ rules: - patch - update - watch +- apiGroups: + - baremetal.openstack.org + resources: + - openstackbaremetalsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - baremetal.openstack.org + resources: + - openstackbaremetalsets/finalizers + verbs: + - update +- apiGroups: + - baremetal.openstack.org + resources: + - openstackbaremetalsets/status + verbs: + - get +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - cert-manager.io resources: @@ -106,8 +208,12 @@ rules: resources: - configmaps verbs: + - create + - delete - get - list + - patch + - update - watch - apiGroups: - "" @@ -126,8 +232,13 @@ rules: resources: - services verbs: + - create + - delete - get - list + - patch + - update + - watch - apiGroups: - core.openstack.org resources: @@ -180,14 +291,73 @@ rules: - get - patch - update +- apiGroups: + - dataplane.openstack.org + resources: + - openstackdataplanedeployments + verbs: + - create + - delete + - get + - list + - watch +- apiGroups: + - dataplane.openstack.org + resources: + - openstackdataplanedeployments/finalizers + verbs: + - update +- apiGroups: + - dataplane.openstack.org + resources: + - openstackdataplanedeployments/status + verbs: + - get + - patch + - update - apiGroups: - dataplane.openstack.org resources: - openstackdataplanenodesets verbs: + - create + - delete - get - list + - patch + - update - watch +- apiGroups: + - dataplane.openstack.org + resources: + - openstackdataplanenodesets/finalizers + verbs: + - update +- apiGroups: + - dataplane.openstack.org + resources: + - openstackdataplanenodesets/status + verbs: + - get + - patch + - update +- apiGroups: + - dataplane.openstack.org + resources: + - openstackdataplaneservices + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - dataplane.openstack.org + resources: + - openstackdataplaneservices/finalizers + verbs: + - update - apiGroups: - designate.openstack.org resources: @@ -200,6 +370,18 @@ rules: - patch - update - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - glance.openstack.org resources: @@ -236,6 +418,52 @@ rules: - patch - update - watch +- apiGroups: + - image.openshift.io + resources: + - imagestreamimages + verbs: + - get + - list + - watch +- apiGroups: + - image.openshift.io + resources: + - imagestreammappings + verbs: + - get + - list + - watch +- apiGroups: + - image.openshift.io + resources: + - imagestreams + verbs: + - get + - list + - watch +- apiGroups: + - image.openshift.io + resources: + - imagestreams/layers + verbs: + - get +- apiGroups: + - image.openshift.io + resources: + - imagestreamtags + verbs: + - get + - list + - watch +- apiGroups: + - image.openshift.io + resources: + - imagetags + verbs: + - get + - list + - watch - apiGroups: - ironic.openstack.org resources: @@ -248,6 +476,14 @@ rules: - patch - update - watch +- apiGroups: + - k8s.cni.cncf.io + resources: + - network-attachment-definitions + verbs: + - get + - list + - watch - apiGroups: - keystone.openstack.org resources: @@ -296,6 +532,30 @@ rules: - patch - update - watch +- apiGroups: + - network.openstack.org + resources: + - dnsdata + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - network.openstack.org + resources: + - dnsdata/finalizers + verbs: + - update +- apiGroups: + - network.openstack.org + resources: + - dnsdata/status + verbs: + - get - apiGroups: - network.openstack.org resources: @@ -308,6 +568,44 @@ rules: - patch - update - watch +- apiGroups: + - network.openstack.org + resources: + - dnsmasqs/status + verbs: + - get +- apiGroups: + - network.openstack.org + resources: + - ipsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - network.openstack.org + resources: + - ipsets/finalizers + verbs: + - update +- apiGroups: + - network.openstack.org + resources: + - ipsets/status + verbs: + - get +- apiGroups: + - network.openstack.org + resources: + - netconfigs + verbs: + - get + - list + - watch - apiGroups: - neutron.openstack.org resources: @@ -392,6 +690,12 @@ rules: - patch - update - watch +- apiGroups: + - project.openshift.io + resources: + - projects + verbs: + - get - apiGroups: - rabbitmq.com resources: @@ -412,6 +716,7 @@ rules: - create - get - list + - patch - update - watch - apiGroups: diff --git a/config/samples/dataplane/README.md b/config/samples/dataplane/README.md new file mode 100644 index 000000000..f44f7f276 --- /dev/null +++ b/config/samples/dataplane/README.md @@ -0,0 +1,6 @@ +# Kustomize examples + +Requires [OpenShift CLI](https://docs.openshift.com/container-platform/4.14/cli_reference/openshift_cli/getting-started-cli.html#installing-openshift-cli) (oc) 4.14 or higher +``` +oc kustomize --load-restrictor LoadRestrictionsNone examples/ +``` diff --git a/config/samples/dataplane/baremetal/kustomization.yaml b/config/samples/dataplane/baremetal/kustomization.yaml new file mode 100644 index 000000000..19ed67a1b --- /dev/null +++ b/config/samples/dataplane/baremetal/kustomization.yaml @@ -0,0 +1,68 @@ +apiVersion: kustomize.config.k8s.io/v1alpha1 +kind: Component + +namespace: openstack +nameSuffix: -ipam + +components: +- ../base + +resources: + - values.yaml + +replacements: +# OpenStackDataPlaneNodeSet customizations +- source: + kind: DataPlaneConfig + name: edpm-baremetal-values + fieldPath: data.nodeset.nodetemplate.ansible.vars.edpm_sshd_allowed_ranges + targets: + - select: + kind: OpenStackDataPlaneNodeSet + fieldPaths: + - spec.nodeTemplate.ansible.ansibleVars.edpm_sshd_allowed_ranges + options: + create: true +- source: + kind: DataPlaneConfig + name: edpm-baremetal-values + fieldPath: data.nodeset.baremetalsettemplate + targets: + - select: + kind: OpenStackDataPlaneNodeSet + fieldPaths: + - spec.baremetalSetTemplate + options: + create: true +- source: + kind: DataPlaneConfig + name: edpm-baremetal-values + fieldPath: data.preProvisioned + targets: + - select: + kind: OpenStackDataPlaneNodeSet + fieldPaths: + - spec.preProvisioned + options: + create: true +- source: + kind: DataPlaneConfig + name: edpm-baremetal-values + fieldPath: data.nodeset.nodetemplate.networks + targets: + - select: + kind: OpenStackDataPlaneNodeSet + fieldPaths: + - spec.nodeTemplate.networks + options: + create: true + +patches: +- target: + kind: OpenStackDataPlaneNodeSet + name: .* + patch: |- + - op: remove + path: /spec/nodes/edpm-compute-0/ansible + - op: remove + path: /spec/nodes/edpm-compute-0/networks diff --git a/config/samples/dataplane/baremetal/values.yaml b/config/samples/dataplane/baremetal/values.yaml new file mode 100644 index 000000000..18b5f9c35 --- /dev/null +++ b/config/samples/dataplane/baremetal/values.yaml @@ -0,0 +1,33 @@ +# local-config: referenced, but not emitted by kustomize +apiVersion: v1 +kind: DataPlaneConfig +metadata: + name: edpm-baremetal-values + annotations: + config.kubernetes.io/local-config: "true" +data: + preProvisioned: false + nodeset: + baremetalsettemplate: + bmhLabelSelector: + app: openstack + ctlplaneInterface: enp1s0 + cloudUserName: cloud-admin + nodetemplate: + ansible: + vars: + # edpm firewall, change the allowed CIDR if needed + edpm_sshd_allowed_ranges: ['192.168.111.0/24'] + networks: + - defaultRoute: true + name: ctlplane + subnetName: subnet1 + - name: internalapi + subnetName: subnet1 + - name: storage + subnetName: subnet1 + - name: tenant + subnetName: subnet1 + nodes: + edpm-compute-0: + hostName: edpm-compute-0 diff --git a/config/samples/dataplane/base/config/nodesetname.yaml b/config/samples/dataplane/base/config/nodesetname.yaml new file mode 100644 index 000000000..317abd17c --- /dev/null +++ b/config/samples/dataplane/base/config/nodesetname.yaml @@ -0,0 +1,10 @@ +# This file is for teaching kustomize how to substitute OpenStackDataPlaneNodeSet name reference in OpenStackDataPlaneDeployment +nameReference: +- kind: OpenStackDataPlaneNodeSet + version: v1beta1 + group: dataplane.openstack.org + fieldSpecs: + - kind: OpenStackDataPlaneDeployment + version: v1beta1 + group: dataplane.openstack.org + path: spec/nodeSets diff --git a/config/samples/dataplane/base/config/varsfromname.yaml b/config/samples/dataplane/base/config/varsfromname.yaml new file mode 100644 index 000000000..35be02c95 --- /dev/null +++ b/config/samples/dataplane/base/config/varsfromname.yaml @@ -0,0 +1,13 @@ +# This file is for teaching kustomize how to substitute ansibleVarsFrom name reference in OpenStackDataPlaneNodeSet +nameReference: +- kind: ConfigMap + version: v1 + fieldSpecs: + - kind: OpenStackDataPlaneNodeSet + version: v1beta1 + group: dataplane.openstack.org + path: spec/nodeTemplate/ansible/ansibleVarsFrom/configMapRef/name + - kind: OpenStackDataPlaneNodeSet + version: v1beta1 + group: dataplane.openstack.org + path: spec/nodeTemplate/ansible/ansibleVarsFrom/secretRef/name diff --git a/config/samples/dataplane/base/files/nic-config.j2 b/config/samples/dataplane/base/files/nic-config.j2 new file mode 100644 index 000000000..9fb986c1e --- /dev/null +++ b/config/samples/dataplane/base/files/nic-config.j2 @@ -0,0 +1,31 @@ +--- +{% set mtu_list = [ctlplane_mtu] %} +{% for network in nodeset_networks %} +{{ mtu_list.append(lookup('vars', networks_lower[network] ~ '_mtu')) }} +{%- endfor %} +{% set min_viable_mtu = mtu_list | max %} +network_config: +- type: ovs_bridge + name: {{ neutron_physical_bridge_name }} + mtu: {{ min_viable_mtu }} + use_dhcp: false + dns_servers: {{ ctlplane_dns_nameservers }} + domain: {{ dns_search_domains }} + addresses: + - ip_netmask: {{ ctlplane_ip }}/{{ ctlplane_cidr }} + routes: {{ ctlplane_host_routes }} + members: + - type: interface + name: nic1 + mtu: {{ min_viable_mtu }} + # force the MAC address of the bridge to this interface + primary: true +{% for network in nodeset_networks %} + - type: vlan + mtu: {{ lookup('vars', networks_lower[network] ~ '_mtu') }} + vlan_id: {{ lookup('vars', networks_lower[network] ~ '_vlan_id') }} + addresses: + - ip_netmask: + {{ lookup('vars', networks_lower[network] ~ '_ip') }}/{{ lookup('vars', networks_lower[network] ~ '_cidr') }} + routes: {{ lookup('vars', networks_lower[network] ~ '_host_routes') }} +{% endfor %} diff --git a/config/samples/dataplane/base/kustomization.yaml b/config/samples/dataplane/base/kustomization.yaml new file mode 100644 index 000000000..2b66b3e30 --- /dev/null +++ b/config/samples/dataplane/base/kustomization.yaml @@ -0,0 +1,25 @@ +apiVersion: kustomize.config.k8s.io/v1alpha1 +kind: Component + +resources: +- ../../dataplane_v1beta1_openstackdataplanenodeset.yaml +- ../../dataplane_v1beta1_openstackdataplanedeployment.yaml + +namespace: openstack + +configMapGenerator: +- name: network-config-template + files: + - network_config_template=files/nic-config.j2 + options: + disableNameSuffixHash: true +- name: neutron-edpm + literals: + - physical_bridge_name=br-ex + - public_interface_name=eth0 + options: + disableNameSuffixHash: true + +configurations: +- config/nodesetname.yaml +- config/varsfromname.yaml diff --git a/config/samples/dataplane/bgp/kustomization.yaml b/config/samples/dataplane/bgp/kustomization.yaml new file mode 100644 index 000000000..bbb833661 --- /dev/null +++ b/config/samples/dataplane/bgp/kustomization.yaml @@ -0,0 +1,53 @@ +apiVersion: kustomize.config.k8s.io/v1alpha1 +kind: Component + +namespace: openstack + +components: +- ../preprovisioned # for baremetal nodes, replace with baremetal + +resources: + - values.yaml + +replacements: +# OpenStackDataPlaneNodeSet customizations +- source: + kind: DataPlaneConfig + name: edpm-bgp-values + fieldPath: data.nodeset.nodetemplate.ansible.vars + targets: + - select: + kind: OpenStackDataPlaneNodeSet + fieldPaths: + - spec.nodeTemplate.ansible.ansibleVars + options: + create: true +- source: + kind: DataPlaneConfig + name: edpm-bgp-values + fieldPath: data.nodeset.services + targets: + - select: + kind: OpenStackDataPlaneNodeSet + fieldPaths: + - spec.services + options: + create: true + +patches: +- target: + kind: OpenStackDataPlaneNodeSet + name: .* + patch: |- + - op: copy + from: /spec/nodes/edpm-compute-0 + path: /spec/nodes/edpm-compute-1 + - op: replace + path: /spec/nodes/edpm-compute-1/ansible/ansibleHost + value: 192.168.122.101 + - op: replace + path: /spec/nodes/edpm-compute-1/hostName + value: edpm-compute-1 + - op: replace + path: /spec/nodes/edpm-compute-1/networks/0/fixedIP + value: 192.168.122.101 diff --git a/config/samples/dataplane/bgp/values.yaml b/config/samples/dataplane/bgp/values.yaml new file mode 100644 index 000000000..571373634 --- /dev/null +++ b/config/samples/dataplane/bgp/values.yaml @@ -0,0 +1,104 @@ +# local-config: referenced, but not emitted by kustomize +apiVersion: v1 +kind: DataPlaneConfig +metadata: + name: edpm-bgp-values + annotations: + config.kubernetes.io/local-config: "true" +data: + nodeset: + nodetemplate: + ansible: + vars: + # CHANGEME -- see https://access.redhat.com/solutions/253273 + # edpm_bootstrap_command: | + # subscription-manager register --username {{ subscription_manager_username }} --password {{ subscription_manager_password }} + # podman login -u {{ registry_username }} -p {{ registry_password }} registry.redhat.io + timesync_ntp_servers: + - hostname: pool.ntp.org + # edpm_network_config + # Default nic config template for a EDPM compute node + # These vars are edpm_network_config role vars + edpm_network_config_hide_sensitive_logs: false + edpm_network_config_template: | + --- + {% set mtu_list = [ctlplane_mtu] %} + {% for network in nodeset_networks %} + {{ mtu_list.append(lookup('vars', networks_lower[network] ~ '_mtu')) }} + {%- endfor %} + {% set min_viable_mtu = mtu_list | max %} + network_config: + - type: interface + name: nic1 + mtu: {{ ctlplane_mtu }} + dns_servers: {{ ctlplane_dns_nameservers }} + domain: {{ dns_search_domains }} + use_dhcp: false + addresses: + - ip_netmask: {{ ctlplane_ip }}/{{ ctlplane_cidr }} + {% for network in nodeset_networks %} + {% if lookup('vars', networks_lower[network] ~ '_vlan_id', default='') %} + - type: vlan + device: nic1 + mtu: {{ lookup('vars', networks_lower[network] ~ '_mtu') }} + vlan_id: {{ lookup('vars', networks_lower[network] ~ '_vlan_id') }} + addresses: + - ip_netmask: + {{ lookup('vars', networks_lower[network] ~ '_ip') }}/{{ lookup('vars', networks_lower[network] ~ '_cidr') }} + routes: {{ lookup('vars', networks_lower[network] ~ '_host_routes') }} + {% endif %} + {%- endfor %} + - type: ovs_bridge + name: {{ neutron_physical_bridge_name }} + use_dhcp: false + - type: interface + name: nic2 + use_dhcp: false + addresses: + - ip_netmask: {{ lookup('vars', 'bgpnet1_ip') }}/30 + - type: interface + name: nic3 + use_dhcp: false + addresses: + - ip_netmask: {{ lookup('vars', 'bgpnet2_ip') }}/30 + - type: interface + name: lo + addresses: + - ip_netmask: {{ lookup('vars', 'bgpmainnet_ip') }}/32 + - ip_netmask: {{ lookup('vars', 'bgpmainnet6_ip') }}/128 + + # These vars are for the network config templates themselves and are + # considered EDPM network defaults. + neutron_physical_bridge_name: br-ex + neutron_public_interface_name: eth0 + # edpm_nodes_validation + edpm_nodes_validation_validate_controllers_icmp: false + edpm_nodes_validation_validate_gateway_icmp: false + ctlplane_dns_nameservers: + - 192.168.122.1 + dns_search_domains: [] + # edpm firewall, change the allowed CIDR if needed + edpm_sshd_allowed_ranges: ['192.168.122.0/24'] + edpm_frr_bgp_uplinks: ['nic2', 'nic3'] + edpm_frr_bgp_neighbor_password: f00barZ + edpm_frr_bgp_ipv4_src_network: bgpmainnet + edpm_frr_bgp_ipv6_src_network: bgpmainnet6 + edpm_ovn_bgp_agent_expose_tenant_networks: true + services: + - download-cache + - bootstrap + - configure-network + - validate-network + - frr + - install-os + - configure-os + - ssh-known-hosts + - run-os + - reboot-os + - install-certs + - ovn + - neutron-metadata + - ovn-bgp-agent + - libvirt + - nova + - telemetry diff --git a/config/samples/dataplane/bgp_ovn_cluster/kustomization.yaml b/config/samples/dataplane/bgp_ovn_cluster/kustomization.yaml new file mode 100644 index 000000000..249d23f65 --- /dev/null +++ b/config/samples/dataplane/bgp_ovn_cluster/kustomization.yaml @@ -0,0 +1,53 @@ +apiVersion: kustomize.config.k8s.io/v1alpha1 +kind: Component + +namespace: openstack + +components: +- ../preprovisioned # for baremetal nodes, replace with baremetal + +resources: + - values.yaml + +replacements: +# OpenStackDataPlaneNodeSet customizations +- source: + kind: DataPlaneConfig + name: edpm-bgp-ovn-cluster-values + fieldPath: data.nodeset.nodetemplate.ansible.vars + targets: + - select: + kind: OpenStackDataPlaneNodeSet + fieldPaths: + - spec.nodeTemplate.ansible.ansibleVars + options: + create: true +- source: + kind: DataPlaneConfig + name: edpm-bgp-ovn-cluster-values + fieldPath: data.nodeset.services + targets: + - select: + kind: OpenStackDataPlaneNodeSet + fieldPaths: + - spec.services + options: + create: true + +patches: +- target: + kind: OpenStackDataPlaneNodeSet + name: .* + patch: |- + - op: copy + from: /spec/nodes/edpm-compute-0 + path: /spec/nodes/edpm-compute-1 + - op: replace + path: /spec/nodes/edpm-compute-1/ansible/ansibleHost + value: 192.168.122.101 + - op: replace + path: /spec/nodes/edpm-compute-1/hostName + value: edpm-compute-1 + - op: replace + path: /spec/nodes/edpm-compute-1/networks/0/fixedIP + value: 192.168.122.101 diff --git a/config/samples/dataplane/bgp_ovn_cluster/values.yaml b/config/samples/dataplane/bgp_ovn_cluster/values.yaml new file mode 100644 index 000000000..a8c6b7eb3 --- /dev/null +++ b/config/samples/dataplane/bgp_ovn_cluster/values.yaml @@ -0,0 +1,128 @@ +# local-config: referenced, but not emitted by kustomize +apiVersion: v1 +kind: DataPlaneConfig +metadata: + name: edpm-bgp-ovn-cluster-values + annotations: + config.kubernetes.io/local-config: "true" +data: + nodeset: + nodetemplate: + ansible: + vars: + # CHANGEME -- see https://access.redhat.com/solutions/253273 + # edpm_bootstrap_command: | + # subscription-manager register --username {{ subscription_manager_username }} --password {{ subscription_manager_password }} + # podman login -u {{ registry_username }} -p {{ registry_password }} registry.redhat.io + timesync_ntp_servers: + - hostname: pool.ntp.org + # edpm_network_config + # Default nic config template for a EDPM compute node + # These vars are edpm_network_config role vars + edpm_network_config_hide_sensitive_logs: false + edpm_network_config_template: | + --- + {% set mtu_list = [ctlplane_mtu] %} + {% for network in nodeset_networks %} + {{ mtu_list.append(lookup('vars', networks_lower[network] ~ '_mtu')) }} + {%- endfor %} + {% set min_viable_mtu = mtu_list | max %} + network_config: + - type: interface + name: nic1 + mtu: {{ ctlplane_mtu }} + dns_servers: {{ ctlplane_dns_nameservers }} + domain: {{ dns_search_domains }} + use_dhcp: false + addresses: + - ip_netmask: {{ ctlplane_ip }}/{{ ctlplane_cidr }} + {% for network in nodeset_networks %} + {% if lookup('vars', networks_lower[network] ~ '_vlan_id', default='') %} + - type: vlan + device: nic1 + mtu: {{ lookup('vars', networks_lower[network] ~ '_mtu') }} + vlan_id: {{ lookup('vars', networks_lower[network] ~ '_vlan_id') }} + addresses: + - ip_netmask: + {{ lookup('vars', networks_lower[network] ~ '_ip') }}/{{ lookup('vars', networks_lower[network] ~ '_cidr') }} + routes: {{ lookup('vars', networks_lower[network] ~ '_host_routes') }} + {% endif %} + {%- endfor %} + - type: ovs_bridge + name: br-provider + use_dhcp: false + - type: ovs_bridge + name: {{ neutron_physical_bridge_name }} + mtu: {{ min_viable_mtu }} + use_dhcp: false + addresses: + - ip_netmask: {{ lookup('vars', 'bgpnet1_ip') }}/30 + members: + - type: interface + name: nic2 + mtu: {{ min_viable_mtu }} + # force the MAC address of the bridge to this interface + primary: true + - type: ovs_bridge + name: {{ neutron_physical_bridge_name }}-2 + mtu: {{ min_viable_mtu }} + use_dhcp: false + addresses: + - ip_netmask: {{ lookup('vars', 'bgpnet2_ip') }}/30 + members: + - type: interface + name: nic3 + mtu: {{ min_viable_mtu }} + # force the MAC address of the bridge to this interface + primary: true + - type: interface + name: lo + addresses: + - ip_netmask: {{ lookup('vars', 'bgpmainnet_ip') }}/32 + - ip_netmask: {{ lookup('vars', 'bgpmainnet6_ip') }}/128 + + # These vars are for the network config templates themselves and are + # considered EDPM network defaults. + neutron_physical_bridge_name: br-ex + neutron_public_interface_name: eth0 + # edpm_nodes_validation + edpm_nodes_validation_validate_controllers_icmp: false + edpm_nodes_validation_validate_gateway_icmp: false + ctlplane_dns_nameservers: + - 192.168.122.1 + dns_search_domains: [] + gather_facts: false + enable_debug: false + # edpm firewall, change the allowed CIDR if needed + edpm_sshd_configure_firewall: true + edpm_sshd_allowed_ranges: ['192.168.122.0/24'] + edpm_frr_bgp_uplinks: ['nic2', 'nic3'] + edpm_frr_bgp_neighbor_password: f00barZ + edpm_frr_bgp_ipv4_src_network: bgpmainnet + edpm_frr_bgp_ipv6_src_network: bgpmainnet6 + edpm_frr_bgp_peers: ['100.64.1.5', '100.65.1.5'] + edpm_ovn_bgp_agent_expose_tenant_networks: true + edpm_ovn_bgp_agent_local_ovn_routing: true + edpm_ovn_bridge_mappings: ['bgp:br-provider'] + edpm_ovn_bgp_agent_local_ovn_external_nics: ['eth1', 'eth2'] + edpm_ovn_bgp_agent_local_ovn_peer_ips: ['100.64.1.5', '100.65.1.5'] + edpm_ovn_bgp_agent_exposing_method: ovn + edpm_ovn_bgp_agent_provider_networks_pool_prefixes: '172.16.0.0/16' + services: + - download-cache + - bootstrap + - configure-network + - validate-network + - frr + - install-os + - configure-os + - ssh-known-hosts + - run-os + - reboot-os + - install-certs + - ovn + - neutron-metadata + - ovn-bgp-agent + - libvirt + - nova + - telemetry diff --git a/config/samples/dataplane/ceph/kustomization.yaml b/config/samples/dataplane/ceph/kustomization.yaml new file mode 100644 index 000000000..24d140900 --- /dev/null +++ b/config/samples/dataplane/ceph/kustomization.yaml @@ -0,0 +1,66 @@ +apiVersion: kustomize.config.k8s.io/v1alpha1 +kind: Component + +namespace: openstack +nameSuffix: -ceph + +components: +- ../preprovisioned # for baremetal nodes, replace with baremetal + +resources: + - values.yaml + +replacements: +# OpenStackDataPlaneNodeSet customizations +- source: + kind: DataPlaneConfig + name: edpm-ceph-values + fieldPath: data.nodeset.services + targets: + - select: + kind: OpenStackDataPlaneNodeSet + fieldPaths: + - spec.services + options: + create: true +- source: + kind: DataPlaneConfig + name: edpm-ceph-values + fieldPath: data.nodetemplate.extramounts + targets: + - select: + kind: OpenStackDataPlaneNodeSet + fieldPaths: + - spec.nodeTemplate.extraMounts + options: + create: true + +patches: +- target: + kind: OpenStackDataPlaneNodeSet + name: .* + patch: |- + - op: copy + from: /spec/nodes/edpm-compute-0 + path: /spec/nodes/edpm-compute-1 + - op: replace + path: /spec/nodes/edpm-compute-1/ansible/ansibleHost + value: 192.168.122.101 + - op: replace + path: /spec/nodes/edpm-compute-1/hostName + value: edpm-compute-1 + - op: replace + path: /spec/nodes/edpm-compute-1/networks/0/fixedIP + value: 192.168.122.101 + - op: copy + from: /spec/nodes/edpm-compute-0 + path: /spec/nodes/edpm-compute-2 + - op: replace + path: /spec/nodes/edpm-compute-2/ansible/ansibleHost + value: 192.168.122.102 + - op: replace + path: /spec/nodes/edpm-compute-2/hostName + value: edpm-compute-2 + - op: replace + path: /spec/nodes/edpm-compute-2/networks/0/fixedIP + value: 192.168.122.102 diff --git a/config/samples/dataplane/ceph/values.yaml b/config/samples/dataplane/ceph/values.yaml new file mode 100644 index 000000000..57553fc46 --- /dev/null +++ b/config/samples/dataplane/ceph/values.yaml @@ -0,0 +1,51 @@ +# local-config: referenced, but not emitted by kustomize +apiVersion: v1 +kind: DataPlaneConfig +metadata: + name: edpm-ceph-values + annotations: + config.kubernetes.io/local-config: "true" +data: + nodeset: + # Create a nova-custom-ceph service which uses a ConfigMap + # containing libvirt overrides for Ceph RBD. + services: + - bootstrap + - download-cache + - configure-network + - validate-network + - install-os + - configure-os + - ssh-known-hosts + - run-os + - reboot-os + - ceph-client + - install-certs + - ovn + - neutron-metadata + - libvirt + - nova-custom-ceph + nodetemplate: + # Create a secret called ceph-conf-files with the cephx key and + # ceph.conf file and mount it so the ceph-client service can copy + # those files to the EDPM nodes. + extramounts: + - extraVolType: Logs + mounts: + - mountPath: /runner/artifacts + name: ansible-logs + volumes: + - name: ansible-logs + persistentVolumeClaim: + claimName: ansible-ee-logs + - extraVolType: Ceph + mounts: + - mountPath: /etc/ceph + name: ceph + readOnly: true + volumes: + - name: ceph + projected: + sources: + - secret: + name: ceph-conf-files diff --git a/config/samples/dataplane/customnetworks/kustomization.yaml b/config/samples/dataplane/customnetworks/kustomization.yaml new file mode 100644 index 000000000..00ba307c5 --- /dev/null +++ b/config/samples/dataplane/customnetworks/kustomization.yaml @@ -0,0 +1,69 @@ +apiVersion: kustomize.config.k8s.io/v1alpha1 +kind: Component + +namespace: openstack +nameSuffix: -custom-network + +components: +- ../preprovisioned # for baremetal nodes, replace with baremetal + +resources: + - values.yaml + +replacements: +# OpenStackDataPlaneNodeSet customizations +- source: + kind: DataPlaneConfig + name: edpm-custom-networks-values + fieldPath: data.nodeset.nodetemplate.ansible.vars.neutron_public_interface_name + targets: + - select: + kind: OpenStackDataPlaneNodeSet + fieldPaths: + - spec.nodeTemplate.ansible.ansibleVars.neutron_public_interface_name + options: + create: true +- source: + kind: DataPlaneConfig + name: edpm-custom-networks-values + fieldPath: data.nodeset.nodetemplate.ansible.vars.ctlplane_dns_nameservers + targets: + - select: + kind: OpenStackDataPlaneNodeSet + fieldPaths: + - spec.nodeTemplate.ansible.ansibleVars.ctlplane_dns_nameservers + options: + create: true +- source: + kind: DataPlaneConfig + name: edpm-custom-networks-values + fieldPath: data.nodeset.nodetemplate.ansible.vars.edpm_ovn_dbs + targets: + - select: + kind: OpenStackDataPlaneNodeSet + fieldPaths: + - spec.nodeTemplate.ansible.ansibleVars.edpm_ovn_dbs + options: + create: true +- source: + kind: DataPlaneConfig + name: edpm-custom-networks-values + fieldPath: data.nodeset.nodetemplate.ansible.vars.edpm_sshd_allowed_ranges + targets: + - select: + kind: OpenStackDataPlaneNodeSet + fieldPaths: + - spec.nodeTemplate.ansible.ansibleVars.edpm_sshd_allowed_ranges + options: + create: true +- source: + kind: DataPlaneConfig + name: edpm-custom-networks-values + fieldPath: data.nodeset.nodes + targets: + - select: + kind: OpenStackDataPlaneNodeSet + fieldPaths: + - spec.nodes + options: + create: true diff --git a/config/samples/dataplane/customnetworks/values.yaml b/config/samples/dataplane/customnetworks/values.yaml new file mode 100644 index 000000000..81c95e7c3 --- /dev/null +++ b/config/samples/dataplane/customnetworks/values.yaml @@ -0,0 +1,39 @@ +# local-config: referenced, but not emitted by kustomize +apiVersion: v1 +kind: DataPlaneConfig +metadata: + name: edpm-custom-networks-values + annotations: + config.kubernetes.io/local-config: "true" +data: + nodeset: + nodetemplate: + ansible: + vars: + # CHANGEME -- see https://access.redhat.com/solutions/253273 + # edpm_bootstrap_command: | + # subscription-manager register --username {{ subscription_manager_username }} --password {{ subscription_manager_password }} + # podman login -u {{ registry_username }} -p {{ registry_password }} registry.redhat.io + + neutron_public_interface_name: enp7s0 + ctlplane_dns_nameservers: + - 192.168.1.254 + edpm_ovn_dbs: + - 192.168.24.1 + edpm_sshd_allowed_ranges: ['192.168.0.0/24', '172.20.0.0/16'] + nodes: + edpm-compute-0: + hostName: edpm-compute-0 + ansible: + ansibleHost: 192.168.1.5 + networks: + - name: ctlplane + subnetName: subnet1 + defaultRoute: true + fixedIP: 192.168.1.5 + - name: internalapi + subnetName: subnet1 + - name: storage + subnetName: subnet1 + - name: tenant + subnetName: subnet1 diff --git a/config/samples/dataplane/networker/kustomization.yaml b/config/samples/dataplane/networker/kustomization.yaml new file mode 100644 index 000000000..14d01ddba --- /dev/null +++ b/config/samples/dataplane/networker/kustomization.yaml @@ -0,0 +1,47 @@ +apiVersion: kustomize.config.k8s.io/v1alpha1 +kind: Component + +namespace: openstack +nameSuffix: -networker + +components: +- ../preprovisioned # for baremetal nodes, replace with baremetal + +resources: + - values.yaml + +replacements: +# OpenStackDataPlaneNodeSet customizations +- source: + kind: DataPlaneConfig + name: edpm-networker-values + fieldPath: data.nodeset.nodetemplate.ansible.vars.edpm_enable_chassis_gw + targets: + - select: + kind: OpenStackDataPlaneNodeSet + fieldPaths: + - spec.nodeTemplate.ansible.ansibleVars.edpm_enable_chassis_gw + options: + create: true +- source: + kind: DataPlaneConfig + name: edpm-networker-values + fieldPath: data.nodeset.services + targets: + - select: + kind: OpenStackDataPlaneNodeSet + fieldPaths: + - spec.services + options: + create: true +- source: + kind: DataPlaneConfig + name: edpm-networker-values + fieldPath: data.nodeset.nodes + targets: + - select: + kind: OpenStackDataPlaneNodeSet + fieldPaths: + - spec.nodes + options: + create: true diff --git a/config/samples/dataplane/networker/values.yaml b/config/samples/dataplane/networker/values.yaml new file mode 100644 index 000000000..73ee399c8 --- /dev/null +++ b/config/samples/dataplane/networker/values.yaml @@ -0,0 +1,60 @@ +# local-config: referenced, but not emitted by kustomize +apiVersion: v1 +kind: DataPlaneConfig +metadata: + name: edpm-networker-values + annotations: + config.kubernetes.io/local-config: "true" +data: + nodeset: + nodetemplate: + ansible: + vars: + # CHANGEME -- see https://access.redhat.com/solutions/253273 + # edpm_bootstrap_command: | + # subscription-manager register --username {{ subscription_manager_username }} --password {{ subscription_manager_password }} + # podman login -u {{ registry_username }} -p {{ registry_password }} registry.redhat.io + edpm_enable_chassis_gw: true + nodes: + edpm-networker-0: + hostName: edpm-networker-0 + ansible: + ansibleHost: 192.168.122.100 + networks: + - name: ctlplane + subnetName: subnet1 + defaultRoute: true + fixedIP: 192.168.122.100 + - name: internalapi + subnetName: subnet1 + - name: storage + subnetName: subnet1 + - name: tenant + subnetName: subnet1 + edpm-networker-1: + hostName: edpm-networker-1 + ansible: + ansibleHost: 192.168.122.101 + networks: + - name: ctlplane + subnetName: subnet1 + defaultRoute: true + fixedIP: 192.168.122.101 + - name: internalapi + subnetName: subnet1 + - name: storage + subnetName: subnet1 + - name: tenant + subnetName: subnet1 + services: + - download-cache + - bootstrap + - configure-network + - validate-network + - install-os + - configure-os + - ssh-known-hosts + - run-os + - reboot-os + - install-certs + - ovn diff --git a/config/samples/dataplane/nmstate/kustomization.yaml b/config/samples/dataplane/nmstate/kustomization.yaml new file mode 100644 index 000000000..adc002370 --- /dev/null +++ b/config/samples/dataplane/nmstate/kustomization.yaml @@ -0,0 +1,42 @@ +apiVersion: kustomize.config.k8s.io/v1alpha1 +kind: Component + +namespace: openstack + +components: +- ../preprovisioned # for baremetal nodes, replace with baremetal + +resources: + - values.yaml + +replacements: +# OpenStackDataPlaneNodeSet customizations +- source: + kind: DataPlaneConfig + name: edpm-nmstate-values + fieldPath: data.nodeset.nodetemplate.ansible.vars + targets: + - select: + kind: OpenStackDataPlaneNodeSet + fieldPaths: + - spec.nodeTemplate.ansible.ansibleVars + options: + create: true + +patches: +- target: + kind: OpenStackDataPlaneNodeSet + name: .* + patch: |- + - op: copy + from: /spec/nodes/edpm-compute-0 + path: /spec/nodes/edpm-compute-1 + - op: replace + path: /spec/nodes/edpm-compute-1/ansible/ansibleHost + value: 192.168.122.101 + - op: replace + path: /spec/nodes/edpm-compute-1/hostName + value: edpm-compute-1 + - op: replace + path: /spec/nodes/edpm-compute-1/networks/0/fixedIP + value: 192.168.122.101 diff --git a/config/samples/dataplane/nmstate/values.yaml b/config/samples/dataplane/nmstate/values.yaml new file mode 100644 index 000000000..a54703e70 --- /dev/null +++ b/config/samples/dataplane/nmstate/values.yaml @@ -0,0 +1,90 @@ +# local-config: referenced, but not emitted by kustomize +apiVersion: v1 +kind: DataPlaneConfig +metadata: + name: edpm-nmstate-values + annotations: + config.kubernetes.io/local-config: "true" +data: + nodeset: + nodetemplate: + ansible: + vars: + # CHANGEME -- see https://access.redhat.com/solutions/253273 + # edpm_bootstrap_command: | + # subscription-manager register --username {{ subscription_manager_username }} --password {{ subscription_manager_password }} + # podman login -u {{ registry_username }} -p {{ registry_password }} registry.redhat.io + edpm_network_config_hide_sensitive_logs: false + edpm_network_config_template: | + --- + {% set mtu_list = [ctlplane_mtu] %} + {% for network in nodeset_networks %} + {{ mtu_list.append(lookup('vars', networks_lower[network] ~ '_mtu')) }} + {%- endfor %} + {% set min_viable_mtu = mtu_list | max %} + dns-resolver: + config: + search: {{ dns_search_domains }} + server: {{ ctlplane_dns_nameservers }} + interfaces: + - name: {{ neutron_public_interface_name }} + type: ethernet + state: up + - name: {{ neutron_physical_bridge_name }} + type: ovs-interface + state: up + mtu: {{ min_viable_mtu }} + ipv4: + enabled: true + address: + - ip: {{ ctlplane_ip }} + prefix-length: {{ ctlplane_cidr }} + {% for network in nodeset_networks %} + - name: {{ "vlan" ~ lookup('vars', networks_lower[network] ~ '_vlan_id') }} + type: ovs-interface + state: up + mtu: {{ lookup('vars', networks_lower[network] ~ '_mtu') }} + ipv4: + enabled: true + address: + - ip: {{ lookup('vars', networks_lower[network] ~ '_ip') }} + prefix-length: {{ lookup('vars', networks_lower[network] ~ '_cidr') }} + {% endfor %} + - name: {{ neutron_physical_bridge_name }} + type: ovs-bridge + bridge: + options: + fail-mode: standalone + port: + - name: {{ neutron_public_interface_name }} + - name: {{ neutron_physical_bridge_name }} + {% for network in nodeset_networks %} + - name: {{ "vlan" ~ lookup('vars', networks_lower[network] ~ '_vlan_id') }} + vlan: + mode: access + tag: {{ lookup('vars', networks_lower[network] ~ '_vlan_id') }} + {% endfor %} + routes: + config: + - destination: {{ ctlplane_host_routes.ip_netmask }} + next-hop-address: {{ ctlplane_host_routes.next_hop }} + next-hop-interface: {{ neutron_physical_bridge_name }} + # edpm_network_config - nmstate + edpm_network_config_tool: 'nmstate' + # These vars are for the network config templates themselves and are + # considered EDPM network defaults. + neutron_physical_bridge_name: br-ex + neutron_public_interface_name: eth0 + # edpm_nodes_validation + edpm_nodes_validation_validate_controllers_icmp: false + edpm_nodes_validation_validate_gateway_icmp: false + ctlplane_dns_nameservers: + - 192.168.122.1 + dns_search_domains: [] + gather_facts: false + enable_debug: false + # edpm firewall, change the allowed CIDR if needed + edpm_sshd_configure_firewall: true + edpm_sshd_allowed_ranges: ['192.168.122.0/24'] + # SELinux module + edpm_selinux_mode: enforcing diff --git a/config/samples/dataplane/no_vars_from/kustomization.yaml b/config/samples/dataplane/no_vars_from/kustomization.yaml new file mode 100644 index 000000000..89f8450ee --- /dev/null +++ b/config/samples/dataplane/no_vars_from/kustomization.yaml @@ -0,0 +1,53 @@ +apiVersion: kustomize.config.k8s.io/v1alpha1 +kind: Component + +components: +- ../preprovisioned # for baremetal nodes, replace with baremetal + +replacements: +# OpenStackDataPlaneNodeSet customizations +- source: + kind: ConfigMap + name: neutron-edpm + fieldPath: data.physical_bridge_name + targets: + - select: + kind: OpenStackDataPlaneNodeSet + fieldPaths: + - spec.nodeTemplate.ansible.ansibleVars.neutron_physical_bridge_name + options: + create: true +- source: + kind: ConfigMap + name: neutron-edpm + fieldPath: data.public_interface_name + targets: + - select: + kind: OpenStackDataPlaneNodeSet + fieldPaths: + - spec.nodeTemplate.ansible.ansibleVars.neutron_public_interface_name + options: + create: true +- source: + kind: ConfigMap + name: network-config-template + fieldPath: data.network_config_template + targets: + - select: + kind: OpenStackDataPlaneNodeSet + fieldPaths: + - spec.nodeTemplate.ansible.ansibleVars.edpm_network_config_template + options: + create: true + + +patches: +- target: + kind: OpenStackDataPlaneNodeSet + name: openstack-edpm + patch: |- + - op: remove + path: /spec/nodeTemplate/ansible/ansibleVarsFrom + - op: add + path: /spec/nodeTemplate/ansible/ansibleUser + value: cloud-admin diff --git a/config/samples/dataplane/nova_cell_custom/kustomization.yaml b/config/samples/dataplane/nova_cell_custom/kustomization.yaml new file mode 100644 index 000000000..d0ed5968f --- /dev/null +++ b/config/samples/dataplane/nova_cell_custom/kustomization.yaml @@ -0,0 +1,33 @@ +apiVersion: kustomize.config.k8s.io/v1alpha1 +kind: Component + +namespace: openstack + +components: +- ../preprovisioned # for baremetal nodes, replace with baremetal + +resources: +- ../../../services/dataplane_v1beta1_openstackdataplaneservice_nova.yaml + +configMapGenerator: +- name: nova-extra-config + files: + - 25-nova-extra.conf=nova-extra.conf + options: + disableNameSuffixHash: true + +patches: +- target: + kind: OpenStackDataPlaneService + name: nova + patch: |- + - op: replace + path: /metadata/name + value: nova-cell-custom + - op: add + path: /spec/configMaps + value: + - nova-extra-config + +configurations: +- servicename.yaml diff --git a/config/samples/dataplane/nova_cell_custom/nova-extra.conf b/config/samples/dataplane/nova_cell_custom/nova-extra.conf new file mode 100644 index 000000000..e9de0ef51 --- /dev/null +++ b/config/samples/dataplane/nova_cell_custom/nova-extra.conf @@ -0,0 +1,3 @@ +[compute] +cpu_shared_set = 2,6 +cpu_dedicated_set = 1,3,5,7 diff --git a/config/samples/dataplane/nova_cell_custom/servicename.yaml b/config/samples/dataplane/nova_cell_custom/servicename.yaml new file mode 100644 index 000000000..7d43116bd --- /dev/null +++ b/config/samples/dataplane/nova_cell_custom/servicename.yaml @@ -0,0 +1,10 @@ +# This file is for teaching kustomize how to substitute OpenStackDataPlaneService name reference in OpenStackDataPlaneNodeSet +nameReference: +- kind: OpenStackDataPlaneService + version: v1beta1 + group: dataplane.openstack.org + fieldSpecs: + - kind: OpenStackDataPlaneNodeSet + version: v1beta1 + group: dataplane.openstack.org + path: spec/services diff --git a/config/samples/dataplane/ovs_dpdk/kustomization.yaml b/config/samples/dataplane/ovs_dpdk/kustomization.yaml new file mode 100644 index 000000000..b93691528 --- /dev/null +++ b/config/samples/dataplane/ovs_dpdk/kustomization.yaml @@ -0,0 +1,80 @@ +apiVersion: kustomize.config.k8s.io/v1alpha1 +kind: Component + +namespace: openstack +nameSuffix: -ovs-dpdk + +components: +- ../baremetal + +resources: + - values.yaml + +replacements: +# OpenStackDataPlaneNodeSet customizations +- source: + kind: DataPlaneConfig + name: edpm-ovs-dpdk-values + fieldPath: data.nodeset.nodetemplate.ansible.vars + targets: + - select: + kind: OpenStackDataPlaneNodeSet + fieldPaths: + - spec.nodeTemplate.ansible.ansibleVars + options: + create: true +- source: + kind: DataPlaneConfig + name: edpm-ovs-dpdk-values + fieldPath: data.nodeset.services + targets: + - select: + kind: OpenStackDataPlaneNodeSet + fieldPaths: + - spec.services + options: + create: true +- source: + kind: DataPlaneConfig + name: edpm-ovs-dpdk-values + fieldPath: data.nodeset.nodes + targets: + - select: + kind: OpenStackDataPlaneNodeSet + fieldPaths: + - spec.nodes + options: + create: true +- source: + kind: DataPlaneConfig + name: edpm-ovs-dpdk-values + fieldPath: data.nodeset.baremetalsettemplate + targets: + - select: + kind: OpenStackDataPlaneNodeSet + fieldPaths: + - spec.baremetalSetTemplate + options: + create: true +- source: + kind: DataPlaneConfig + name: edpm-ovs-dpdk-values + fieldPath: data.preProvisioned + targets: + - select: + kind: OpenStackDataPlaneNodeSet + fieldPaths: + - spec.preProvisioned + options: + create: true +- source: + kind: DataPlaneConfig + name: edpm-ovs-dpdk-values + fieldPath: data.nodeset.nodetemplate.networks + targets: + - select: + kind: OpenStackDataPlaneNodeSet + fieldPaths: + - spec.nodeTemplate.networks + options: + create: true diff --git a/config/samples/dataplane/ovs_dpdk/values.yaml b/config/samples/dataplane/ovs_dpdk/values.yaml new file mode 100644 index 000000000..3c92edbde --- /dev/null +++ b/config/samples/dataplane/ovs_dpdk/values.yaml @@ -0,0 +1,130 @@ +# local-config: referenced, but not emitted by kustomize +apiVersion: v1 +kind: DataPlaneConfig +metadata: + name: edpm-ovs-dpdk-values + annotations: + config.kubernetes.io/local-config: "true" +data: + preProvisioned: false + nodeset: + baremetalsettemplate: + bmhLabelSelector: + app: openstack + ctlplaneInterface: enp1s0 + cloudUserName: cloud-admin + nodetemplate: + ansible: + vars: + # CHANGEME -- see https://access.redhat.com/solutions/253273 + # edpm_bootstrap_command: | + # subscription-manager register --username {{ subscription_manager_username }} --password {{ subscription_manager_password }} + # podman login -u {{ registry_username }} -p {{ registry_password }} registry.redhat.io + edpm_network_config_hide_sensitive_logs: false + edpm_network_config_template: | + --- + {% set mtu_list = [ctlplane_mtu] %} + {% for network in nodeset_networks %} + {{ mtu_list.append(lookup('vars', networks_lower[network] ~ '_mtu')) }} + {%- endfor %} + {% set min_viable_mtu = mtu_list | max %} + network_config: + - type: ovs_bridge + name: {{ neutron_physical_bridge_name }} + mtu: {{ min_viable_mtu }} + use_dhcp: false + dns_servers: {{ ctlplane_dns_nameservers }} + domain: {{ dns_search_domains }} + addresses: + - ip_netmask: {{ ctlplane_ip }}/{{ ctlplane_cidr }} + routes: {{ ctlplane_host_routes }} + members: + - type: interface + name: nic1 + mtu: {{ min_viable_mtu }} + # force the MAC address of the bridge to this interface + primary: true + {% for network in nodeset_networks if network not in ["external", "tenant"] %} + - type: vlan + mtu: {{ lookup('vars', networks_lower[network] ~ '_mtu') }} + vlan_id: {{ lookup('vars', networks_lower[network] ~ '_vlan_id') }} + addresses: + - ip_netmask: + {{ lookup('vars', networks_lower[network] ~ '_ip') }}/{{ lookup('vars', networks_lower[network] ~ '_cidr') }} + routes: {{ lookup('vars', networks_lower[network] ~ '_host_routes') }} + {% endfor %} + - type: ovs_user_bridge + name: br-link1 + use_dhcp: false + ovs_extra: "set port br-link1 tag={{ lookup('vars', networks_lower['tenant'] ~ '_vlan_id') }}" + addresses: + - ip_netmask: {{ lookup('vars', networks_lower['tenant'] ~ '_ip') }}/{{ lookup('vars', networks_lower['tenant'] ~ '_cidr') }} + + mtu: {{ lookup('vars', networks_lower['tenant'] ~ '_mtu') }} + members: + - type: ovs_dpdk_port + name: dpdk1 + members: + - type: interface + name: nic3 + - type: ovs_user_bridge + name: br-link2 + use_dhcp: false + mtu: 9000 + members: + - type: ovs_dpdk_port + name: dpdk2 + members: + - type: interface + name: nic4 + + neutron_physical_bridge_name: br-ex + # edpm_nodes_validation + edpm_nodes_validation_validate_controllers_icmp: false + edpm_nodes_validation_validate_gateway_icmp: false + # edpm nfv ovs dpdk config + edpm_kernel_args: "default_hugepagesz=1GB hugepagesz=1G hugepages=64 iommu=pt intel_iommu=on tsx=off isolcpus=2-11,14-23" + edpm_tuned_profile: "cpu-partitioning" + edpm_nova_libvirt_qemu_group: "hugetlbfs" + edpm_tuned_isolated_cores: "2-11,14-23" + edpm_ovs_dpdk_pmd_core_list: "1,13,2,14,3,15" + edpm_ovs_dpdk_socket_memory: "4096" + edpm_ovs_dpdk_memory_channels: "4" + edpm_ovs_dpdk_vhost_postcopy_support: "true" + edpm_ovn_bridge_mappings: ['dpdk2:br-link2','dpdk1:br-link1'] + gather_facts: false + enable_debug: false + edpm_sshd_allowed_ranges: ['192.168.122.0/24'] + networks: + - defaultRoute: true + name: ctlplane + subnetName: subnet1 + - name: internalapi + subnetName: subnet1 + - name: storage + subnetName: subnet1 + - name: tenant + subnetName: subnet1 + nodes: + edpm-compute-0: + hostName: edpm-compute-0 + edpm-compute-1: + hostName: edpm-compute-1 + services: + - bootstrap + - download-cache + - reboot-os + - configure-ovs-dpdk + - configure-network + - validate-network + - install-os + - configure-os + - ssh-known-hosts + - run-os + - install-certs + - ovn + - neutron-ovn + - neutron-metadata + - libvirt + - nova + - telemetry diff --git a/config/samples/dataplane/post_ceph_hci/kustomization.yaml b/config/samples/dataplane/post_ceph_hci/kustomization.yaml new file mode 100644 index 000000000..bdd34af22 --- /dev/null +++ b/config/samples/dataplane/post_ceph_hci/kustomization.yaml @@ -0,0 +1,71 @@ +apiVersion: kustomize.config.k8s.io/v1alpha1 +kind: Component + +namespace: openstack + +nameSuffix: -ceph + +components: +- ../preprovisioned # for baremetal nodes, replace with baremetal + +resources: + - values.yaml + +replacements: +# OpenStackDataPlaneNodeSet customizations +- source: + kind: DataPlaneConfig + name: edpm-ceph-values + fieldPath: data.nodeset.nodetemplate.ansible.vars + targets: + - select: + kind: OpenStackDataPlaneNodeSet + fieldPaths: + - spec.nodeTemplate.ansible.ansibleVars + options: + create: true +- source: + kind: DataPlaneConfig + name: edpm-ceph-values + fieldPath: data.nodeset.nodes + targets: + - select: + kind: OpenStackDataPlaneNodeSet + fieldPaths: + - spec.nodes + options: + create: true +- source: + kind: DataPlaneConfig + name: edpm-ceph-values + fieldPath: data.nodeset.nodetemplate.extramounts + targets: + - select: + kind: OpenStackDataPlaneNodeSet + fieldPaths: + - spec.nodeTemplate.extraMounts + options: + create: true +# OpenStackDataPlaneDeployment customizations +- source: + kind: DataPlaneConfig + name: edpm-ceph-values + fieldPath: data.deployment.services + targets: + - select: + kind: OpenStackDataPlaneDeployment + fieldPaths: + - spec.servicesOverride + options: + create: true +- source: + kind: DataPlaneConfig + name: edpm-ceph-values + fieldPath: data.deployment.name + targets: + - select: + kind: OpenStackDataPlaneDeployment + fieldPaths: + - metadata.name + options: + create: true diff --git a/config/samples/dataplane/post_ceph_hci/values.yaml b/config/samples/dataplane/post_ceph_hci/values.yaml new file mode 100644 index 000000000..2d3a4e905 --- /dev/null +++ b/config/samples/dataplane/post_ceph_hci/values.yaml @@ -0,0 +1,164 @@ +# local-config: referenced, but not emitted by kustomize +apiVersion: v1 +kind: DataPlaneConfig +metadata: + name: edpm-ceph-values + annotations: + config.kubernetes.io/local-config: "true" +data: + nodeset: + nodetemplate: + ansible: + vars: + timesync_ntp_servers: + - hostname: clock.redhat.com + # CHANGEME -- see https://access.redhat.com/solutions/253273 + # edpm_bootstrap_command: | + # subscription-manager register --username {{ subscription_manager_username }} --password {{ subscription_manager_password }} + # podman login -u {{ registry_username }} -p {{ registry_password }} registry.redhat.io + edpm_iscsid_image: '{{ registry_url }}/openstack-iscsid:{{ image_tag }}' + edpm_logrotate_crond_image: '{{ registry_url }}/openstack-cron:{{ image_tag }}' + edpm_network_config_hide_sensitive_logs: false + edpm_network_config_os_net_config_mappings: + edpm-compute: + nic2: enp7s0 + edpm_network_config_template: | + --- + {% set mtu_list = [ctlplane_mtu] %} + {% for network in nodeset_networks %} + {{ mtu_list.append(lookup('vars', networks_lower[network] ~ '_mtu')) }} + {%- endfor %} + {% set min_viable_mtu = mtu_list | max %} + network_config: + - type: ovs_bridge + name: {{ neutron_physical_bridge_name }} + mtu: {{ min_viable_mtu }} + use_dhcp: false + dns_servers: {{ ctlplane_dns_nameservers }} + domain: {{ dns_search_domains }} + addresses: + - ip_netmask: {{ ctlplane_ip }}/{{ ctlplane_cidr }} + routes: {{ ctlplane_host_routes }} + members: + - type: interface + name: nic2 + mtu: {{ min_viable_mtu }} + # force the MAC address of the bridge to this interface + primary: true + {% for network in nodeset_networks %} + - type: vlan + mtu: {{ lookup('vars', networks_lower[network] ~ '_mtu') }} + vlan_id: {{ lookup('vars', networks_lower[network] ~ '_vlan_id') }} + addresses: + - ip_netmask: + {{ lookup('vars', networks_lower[network] ~ '_ip') }}/{{ lookup('vars', networks_lower[network] ~ '_cidr') }} + routes: {{ lookup('vars', networks_lower[network] ~ '_host_routes') }} + {% endfor %} + edpm_neutron_metadata_agent_image: '{{ registry_url }}/openstack-neutron-metadata-agent-ovn:{{ image_tag }}' + edpm_nodes_validation_validate_controllers_icmp: false + edpm_nodes_validation_validate_gateway_icmp: false + edpm_nova_compute_container_image: '{{ registry_url }}/openstack-nova-compute:{{ image_tag }}' + edpm_nova_libvirt_container_image: '{{ registry_url }}/openstack-nova-libvirt:{{ image_tag }}' + edpm_ovn_controller_agent_image: '{{ registry_url }}/openstack-ovn-controller:{{ image_tag }}' + edpm_selinux_mode: enforcing + edpm_sshd_allowed_ranges: + - 192.168.122.0/24 + edpm_sshd_configure_firewall: true + enable_debug: false + gather_facts: false + image_tag: current-podified + neutron_physical_bridge_name: br-ex + neutron_public_interface_name: eth0 + registry_url: quay.io/podified-antelope-centos9 + edpm_ceph_hci_pre_enabled_services: + - ceph_mon + - ceph_mgr + - ceph_osd + - ceph_rgw + - ceph_nfs + - ceph_rgw_frontend + - ceph_nfs_frontend + storage_mtu: 9000 + storage_mgmt_mtu: 9000 + storage_mgmt_vlan_id: 23 + storage_mgmt_cidr: "24" + storage_mgmt_host_routes: [] + extramounts: + - extraVolType: Logs + mounts: + - mountPath: /runner/artifacts + name: ansible-logs + volumes: + - name: ansible-logs + persistentVolumeClaim: + claimName: ansible-ee-logs + - extraVolType: Ceph + mounts: + - mountPath: /etc/ceph + name: ceph + readOnly: true + volumes: + - name: ceph + projected: + sources: + - secret: + name: ceph-conf-files + nodes: + edpm-compute-0: + ansible: + ansibleHost: 192.168.122.100 + hostName: edpm-compute-0 + networks: + - defaultRoute: true + fixedIP: 192.168.122.100 + name: ctlplane + subnetName: subnet1 + - name: internalapi + subnetName: subnet1 + - name: storage + subnetName: subnet1 + - name: storagemgmt + subnetName: subnet1 + - name: tenant + subnetName: subnet1 + edpm-compute-1: + ansible: + ansibleHost: 192.168.122.101 + hostName: edpm-compute-1 + networks: + - defaultRoute: true + fixedIP: 192.168.122.101 + name: ctlplane + subnetName: subnet1 + - name: internalapi + subnetName: subnet1 + - name: storage + subnetName: subnet1 + - name: storagemgmt + subnetName: subnet1 + - name: tenant + subnetName: subnet1 + edpm-compute-2: + ansible: + ansibleHost: 192.168.122.102 + hostName: edpm-compute-2 + networks: + - defaultRoute: true + fixedIP: 192.168.122.102 + name: ctlplane + subnetName: subnet1 + - name: internalapi + subnetName: subnet1 + - name: storage + subnetName: subnet1 + - name: storagemgmt + subnetName: subnet1 + - name: tenant + subnetName: subnet1 + deployment: + name: edpm-deployment-ipam-post-ceph + services: + - ceph-client + - ovn + - libvirt + - nova-custom-ceph diff --git a/config/samples/dataplane/pre_ceph_hci/kustomization.yaml b/config/samples/dataplane/pre_ceph_hci/kustomization.yaml new file mode 100644 index 000000000..fba6b45ed --- /dev/null +++ b/config/samples/dataplane/pre_ceph_hci/kustomization.yaml @@ -0,0 +1,60 @@ +apiVersion: kustomize.config.k8s.io/v1alpha1 +kind: Component + +namespace: openstack + +nameSuffix: -ceph + +components: +- ../preprovisioned # for baremetal nodes, replace with baremetal + +resources: + - values.yaml + +replacements: +# OpenStackDataPlaneNodeSet customizations +- source: + kind: DataPlaneConfig + name: edpm-ceph-values + fieldPath: data.nodeset.nodetemplate.ansible.vars + targets: + - select: + kind: OpenStackDataPlaneNodeSet + fieldPaths: + - spec.nodeTemplate.ansible.ansibleVars + options: + create: true +- source: + kind: DataPlaneConfig + name: edpm-ceph-values + fieldPath: data.nodeset.nodes + targets: + - select: + kind: OpenStackDataPlaneNodeSet + fieldPaths: + - spec.nodes + options: + create: true +# OpenStackDataPlaneDeployment customizations +- source: + kind: DataPlaneConfig + name: edpm-ceph-values + fieldPath: data.deployment.services + targets: + - select: + kind: OpenStackDataPlaneDeployment + fieldPaths: + - spec.servicesOverride + options: + create: true +- source: + kind: DataPlaneConfig + name: edpm-ceph-values + fieldPath: data.deployment.name + targets: + - select: + kind: OpenStackDataPlaneDeployment + fieldPaths: + - metadata.name + options: + create: true diff --git a/config/samples/dataplane/pre_ceph_hci/values.yaml b/config/samples/dataplane/pre_ceph_hci/values.yaml new file mode 100644 index 000000000..b78e33ab3 --- /dev/null +++ b/config/samples/dataplane/pre_ceph_hci/values.yaml @@ -0,0 +1,148 @@ +# local-config: referenced, but not emitted by kustomize +apiVersion: v1 +kind: DataPlaneConfig +metadata: + name: edpm-ceph-values + annotations: + config.kubernetes.io/local-config: "true" +data: + nodeset: + nodetemplate: + ansible: + vars: + timesync_ntp_servers: + - hostname: clock.redhat.com + # CHANGEME -- see https://access.redhat.com/solutions/253273 + # edpm_bootstrap_command: | + # subscription-manager register --username {{ subscription_manager_username }} --password {{ subscription_manager_password }} + # podman login -u {{ registry_username }} -p {{ registry_password }} registry.redhat.io + edpm_iscsid_image: '{{ registry_url }}/openstack-iscsid:{{ image_tag }}' + edpm_logrotate_crond_image: '{{ registry_url }}/openstack-cron:{{ image_tag }}' + edpm_network_config_hide_sensitive_logs: false + edpm_network_config_os_net_config_mappings: + edpm-compute: + nic2: enp7s0 + edpm_network_config_template: | + --- + {% set mtu_list = [ctlplane_mtu] %} + {% for network in nodeset_networks %} + {{ mtu_list.append(lookup('vars', networks_lower[network] ~ '_mtu')) }} + {%- endfor %} + {% set min_viable_mtu = mtu_list | max %} + network_config: + - type: ovs_bridge + name: {{ neutron_physical_bridge_name }} + mtu: {{ min_viable_mtu }} + use_dhcp: false + dns_servers: {{ ctlplane_dns_nameservers }} + domain: {{ dns_search_domains }} + addresses: + - ip_netmask: {{ ctlplane_ip }}/{{ ctlplane_cidr }} + routes: {{ ctlplane_host_routes }} + members: + - type: interface + name: nic2 + mtu: {{ min_viable_mtu }} + # force the MAC address of the bridge to this interface + primary: true + {% for network in nodeset_networks %} + - type: vlan + mtu: {{ lookup('vars', networks_lower[network] ~ '_mtu') }} + vlan_id: {{ lookup('vars', networks_lower[network] ~ '_vlan_id') }} + addresses: + - ip_netmask: + {{ lookup('vars', networks_lower[network] ~ '_ip') }}/{{ lookup('vars', networks_lower[network] ~ '_cidr') }} + routes: {{ lookup('vars', networks_lower[network] ~ '_host_routes') }} + {% endfor %} + edpm_neutron_metadata_agent_image: '{{ registry_url }}/openstack-neutron-metadata-agent-ovn:{{ image_tag }}' + edpm_nodes_validation_validate_controllers_icmp: false + edpm_nodes_validation_validate_gateway_icmp: false + edpm_nova_compute_container_image: '{{ registry_url }}/openstack-nova-compute:{{ image_tag }}' + edpm_nova_libvirt_container_image: '{{ registry_url }}/openstack-nova-libvirt:{{ image_tag }}' + edpm_ovn_controller_agent_image: '{{ registry_url }}/openstack-ovn-controller:{{ image_tag }}' + edpm_selinux_mode: enforcing + edpm_sshd_allowed_ranges: + - 192.168.122.0/24 + edpm_sshd_configure_firewall: true + enable_debug: false + gather_facts: false + image_tag: current-podified + neutron_physical_bridge_name: br-ex + neutron_public_interface_name: eth0 + registry_url: quay.io/podified-antelope-centos9 + edpm_ceph_hci_pre_enabled_services: + - ceph_mon + - ceph_mgr + - ceph_osd + - ceph_rgw + - ceph_nfs + - ceph_rgw_frontend + - ceph_nfs_frontend + storage_mtu: 9000 + storage_mgmt_mtu: 9000 + storage_mgmt_vlan_id: 23 + storage_mgmt_cidr: "24" + storage_mgmt_host_routes: [] + nodes: + edpm-compute-0: + ansible: + ansibleHost: 192.168.122.100 + hostName: edpm-compute-0 + networks: + - defaultRoute: true + fixedIP: 192.168.122.100 + name: ctlplane + subnetName: subnet1 + - name: internalapi + subnetName: subnet1 + - name: storage + subnetName: subnet1 + - name: storagemgmt + subnetName: subnet1 + - name: tenant + subnetName: subnet1 + edpm-compute-1: + ansible: + ansibleHost: 192.168.122.101 + hostName: edpm-compute-1 + networks: + - defaultRoute: true + fixedIP: 192.168.122.101 + name: ctlplane + subnetName: subnet1 + - name: internalapi + subnetName: subnet1 + - name: storage + subnetName: subnet1 + - name: storagemgmt + subnetName: subnet1 + - name: tenant + subnetName: subnet1 + edpm-compute-2: + ansible: + ansibleHost: 192.168.122.102 + hostName: edpm-compute-2 + networks: + - defaultRoute: true + fixedIP: 192.168.122.102 + name: ctlplane + subnetName: subnet1 + - name: internalapi + subnetName: subnet1 + - name: storage + subnetName: subnet1 + - name: storagemgmt + subnetName: subnet1 + - name: tenant + subnetName: subnet1 + deployment: + name: edpm-deployment-ipam-pre-ceph + services: + - bootstrap + - configure-network + - validate-network + - install-os + - ceph-hci-pre + - configure-os + - ssh-known-hosts + - run-os diff --git a/config/samples/dataplane/preprovisioned/kustomization.yaml b/config/samples/dataplane/preprovisioned/kustomization.yaml new file mode 100644 index 000000000..585eb787e --- /dev/null +++ b/config/samples/dataplane/preprovisioned/kustomization.yaml @@ -0,0 +1,8 @@ +apiVersion: kustomize.config.k8s.io/v1alpha1 +kind: Component + +namespace: openstack +nameSuffix: -ipam + +components: +- ../base diff --git a/config/samples/dataplane/sriov/kustomization.yaml b/config/samples/dataplane/sriov/kustomization.yaml new file mode 100644 index 000000000..02b1cec9b --- /dev/null +++ b/config/samples/dataplane/sriov/kustomization.yaml @@ -0,0 +1,43 @@ +apiVersion: kustomize.config.k8s.io/v1alpha1 +kind: Component + +namespace: openstack +nameSuffix: -sriov + +components: +- ../preprovisioned # for baremetal nodes, replace with baremetal + +resources: + - values.yaml + +replacements: +# OpenStackDataPlaneNodeSet customizations +- source: + kind: DataPlaneConfig + name: edpm-sriov-values + fieldPath: data.nodeset.services + targets: + - select: + kind: OpenStackDataPlaneNodeSet + fieldPaths: + - spec.services + options: + create: true + +patches: +- target: + kind: OpenStackDataPlaneNodeSet + name: .* + patch: |- + - op: copy + from: /spec/nodes/edpm-compute-0 + path: /spec/nodes/edpm-compute-1 + - op: replace + path: /spec/nodes/edpm-compute-1/ansible/ansibleHost + value: 192.168.122.101 + - op: replace + path: /spec/nodes/edpm-compute-1/hostName + value: edpm-compute-1 + - op: replace + path: /spec/nodes/edpm-compute-1/networks/0/fixedIP + value: 192.168.122.101 diff --git a/config/samples/dataplane/sriov/values.yaml b/config/samples/dataplane/sriov/values.yaml new file mode 100644 index 000000000..a05519173 --- /dev/null +++ b/config/samples/dataplane/sriov/values.yaml @@ -0,0 +1,26 @@ +# local-config: referenced, but not emitted by kustomize +apiVersion: v1 +kind: DataPlaneConfig +metadata: + name: edpm-sriov-values + annotations: + config.kubernetes.io/local-config: "true" +data: + nodeset: + services: + - bootstrap + - download-cache + - configure-network + - validate-network + - install-os + - configure-os + - ssh-known-hosts + - run-os + - reboot-os + - install-certs + - ovn + - neutron-ovn + - neutron-sriov + - libvirt + - nova + - telemetry diff --git a/config/samples/dataplane/swift/kustomization.yaml b/config/samples/dataplane/swift/kustomization.yaml new file mode 100644 index 000000000..11e29d7de --- /dev/null +++ b/config/samples/dataplane/swift/kustomization.yaml @@ -0,0 +1,57 @@ +apiVersion: kustomize.config.k8s.io/v1alpha1 +kind: Component + +namespace: openstack + +components: +- ../preprovisioned # for baremetal nodes, replace with baremetal + +resources: + - values.yaml + +replacements: +# OpenStackDataPlaneNodeSet customizations +- source: + kind: DataPlaneConfig + name: edpm-swift-values + fieldPath: data.nodeset.nodetemplate.ansible.vars.edpm_swift_disks + targets: + - select: + kind: OpenStackDataPlaneNodeSet + fieldPaths: + - spec.nodeTemplate.ansible.ansibleVars.edpm_swift_disks + options: + create: true +- source: + kind: DataPlaneConfig + name: edpm-swift-values + fieldPath: data.nodeset.services + targets: + - select: + kind: OpenStackDataPlaneNodeSet + fieldPaths: + - spec.services + options: + create: true +- source: + kind: DataPlaneConfig + name: edpm-swift-values + fieldPath: data.nodeset.networkattachments + targets: + - select: + kind: OpenStackDataPlaneNodeSet + fieldPaths: + - spec.networkAttachments + options: + create: true +- source: + kind: DataPlaneConfig + name: edpm-swift-values + fieldPath: data.nodeset.nodes + targets: + - select: + kind: OpenStackDataPlaneNodeSet + fieldPaths: + - spec.nodes + options: + create: true diff --git a/config/samples/dataplane/swift/values.yaml b/config/samples/dataplane/swift/values.yaml new file mode 100644 index 000000000..960bf5ff7 --- /dev/null +++ b/config/samples/dataplane/swift/values.yaml @@ -0,0 +1,65 @@ +# local-config: referenced, but not emitted by kustomize +apiVersion: v1 +kind: DataPlaneConfig +metadata: + name: edpm-swift-values + annotations: + config.kubernetes.io/local-config: "true" +data: + nodeset: + networkattachments: + - ctlplane + - storage + nodetemplate: + ansible: + vars: + # Swift disks defined here apply to all nodes. Node-specific disks + # might be defined in the nodes: section below + # + # weight, region and zone are not used in the playbook, but + # in swift-operator itself to determine Swift ring values. weight + # should be usually set to the GiB of the disk; region and + # zone are optional and might be used to enforce distribution of + # replicas + edpm_swift_disks: + - device: /dev/vdb + path: /srv/node/vdb + weight: 4000 + region: 0 + zone: 0 + nodes: + edpm-swift-0: + ansible: + ansibleHost: 192.168.122.100 + ansibleVars: + # Same options as above for all nodes, this time for an individual + # node with a different disk. This node will use only vdc. It would + # use vdb from parent section if not defined + edpm_swift_disks: + - device: /dev/vdc + path: /srv/node/vdc + weight: 1000 + hostName: edpm-swift-0 + networks: + - defaultRoute: true + fixedIP: 192.168.122.100 + name: ctlplane + subnetName: subnet1 + - name: internalapi + subnetName: subnet1 + - name: storage + subnetName: subnet1 + - name: tenant + subnetName: subnet1 + services: + - bootstrap + - download-cache + - configure-network + - validate-network + - install-os + - configure-os + - ssh-known-hosts + - run-os + - reboot-os + - install-certs + - swift diff --git a/config/samples/dataplane_v1beta1_openstackdataplanedeployment.yaml b/config/samples/dataplane_v1beta1_openstackdataplanedeployment.yaml new file mode 100644 index 000000000..211f503db --- /dev/null +++ b/config/samples/dataplane_v1beta1_openstackdataplanedeployment.yaml @@ -0,0 +1,7 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneDeployment +metadata: + name: edpm-deployment +spec: + nodeSets: + - openstack-edpm diff --git a/config/samples/dataplane_v1beta1_openstackdataplanenodeset.yaml b/config/samples/dataplane_v1beta1_openstackdataplanenodeset.yaml new file mode 100644 index 000000000..ad35b6390 --- /dev/null +++ b/config/samples/dataplane_v1beta1_openstackdataplanenodeset.yaml @@ -0,0 +1,72 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneNodeSet +metadata: + name: openstack-edpm +spec: + tlsEnabled: true + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + services: + - bootstrap + - download-cache + - configure-network + - validate-network + - install-os + - configure-os + - ssh-known-hosts + - run-os + - reboot-os + - install-certs + - ovn + - neutron-metadata + - libvirt + - nova + - telemetry + preProvisioned: true + networkAttachments: + - ctlplane + nodes: + edpm-compute-0: + hostName: edpm-compute-0 + ansible: + ansibleHost: 192.168.122.100 + networks: + - name: ctlplane + subnetName: subnet1 + defaultRoute: true + fixedIP: 192.168.122.100 + - name: internalapi + subnetName: subnet1 + - name: storage + subnetName: subnet1 + - name: tenant + subnetName: subnet1 + nodeTemplate: + ansibleSSHPrivateKeySecret: dataplane-ansible-ssh-private-key-secret + ansible: + ansibleVarsFrom: + - prefix: edpm_ + configMapRef: + name: network-config-template + - prefix: neutron_ + configMapRef: + name: neutron-edpm + # CHANGEME -- see https://access.redhat.com/solutions/253273 + # - prefix: subscription_manager_ + # secretRef: + # name: subscription-manager + # - prefix: registry_ + # secretRef: + # name: redhat-registry + ansibleVars: + # CHANGEME -- see https://access.redhat.com/solutions/253273 + # edpm_bootstrap_command: | + # subscription-manager register --username {{ subscription_manager_username }} --password {{ subscription_manager_password }} + # podman login -u {{ registry_username }} -p {{ registry_password }} registry.redhat.io + edpm_nodes_validation_validate_controllers_icmp: false + edpm_nodes_validation_validate_gateway_icmp: false + gather_facts: false + enable_debug: false + # edpm firewall, change the allowed CIDR if needed + edpm_sshd_allowed_ranges: ['192.168.122.0/24'] diff --git a/config/samples/dataplane_v1beta1_openstackdataplaneservice.yaml b/config/samples/dataplane_v1beta1_openstackdataplaneservice.yaml new file mode 100644 index 000000000..13f953e4e --- /dev/null +++ b/config/samples/dataplane_v1beta1_openstackdataplaneservice.yaml @@ -0,0 +1,12 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + labels: + app.kubernetes.io/name: openstackdataplaneservice + app.kubernetes.io/instance: openstackdataplaneservice-sample + app.kubernetes.io/part-of: dataplane-operator + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: dataplane-operator + name: openstackdataplaneservice-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 2b198bae2..75adeda52 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -5,4 +5,7 @@ resources: - core_v1beta1_openstackcontrolplane_network_isolation.yaml - client_v1beta1_openstackclient.yaml - core_v1beta1_openstackversion.yaml +- dataplane_v1beta1_openstackdataplanenodeset.yaml +- dataplane_v1beta1_openstackdataplaneservice.yaml +- dataplane_v1beta1_openstackdataplanedeployment.yaml #+kubebuilder:scaffold:manifestskustomizesamples diff --git a/config/services/dataplane_v1beta1_openstackdataplaneservice_bootstrap.yaml b/config/services/dataplane_v1beta1_openstackdataplaneservice_bootstrap.yaml new file mode 100644 index 000000000..bfd69dd31 --- /dev/null +++ b/config/services/dataplane_v1beta1_openstackdataplaneservice_bootstrap.yaml @@ -0,0 +1,12 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + labels: + app.kubernetes.io/name: openstackdataplaneservice + app.kubernetes.io/instance: openstackdataplaneservice-bootstrap + app.kubernetes.io/part-of: dataplane-operator + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: dataplane-operator + name: bootstrap +spec: + playbook: osp.edpm.bootstrap diff --git a/config/services/dataplane_v1beta1_openstackdataplaneservice_ceph_client.yaml b/config/services/dataplane_v1beta1_openstackdataplaneservice_ceph_client.yaml new file mode 100644 index 000000000..f79704a36 --- /dev/null +++ b/config/services/dataplane_v1beta1_openstackdataplaneservice_ceph_client.yaml @@ -0,0 +1,6 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + name: ceph-client +spec: + playbook: osp.edpm.ceph_client diff --git a/config/services/dataplane_v1beta1_openstackdataplaneservice_ceph_hci_pre.yaml b/config/services/dataplane_v1beta1_openstackdataplaneservice_ceph_hci_pre.yaml new file mode 100644 index 000000000..0d83e88e6 --- /dev/null +++ b/config/services/dataplane_v1beta1_openstackdataplaneservice_ceph_hci_pre.yaml @@ -0,0 +1,6 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + name: ceph-hci-pre +spec: + playbook: osp.edpm.ceph_hci_pre diff --git a/config/services/dataplane_v1beta1_openstackdataplaneservice_configure_network.yaml b/config/services/dataplane_v1beta1_openstackdataplaneservice_configure_network.yaml new file mode 100644 index 000000000..bb968d101 --- /dev/null +++ b/config/services/dataplane_v1beta1_openstackdataplaneservice_configure_network.yaml @@ -0,0 +1,12 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + labels: + app.kubernetes.io/name: openstackdataplaneservice + app.kubernetes.io/instance: openstackdataplaneservice-configure-network + app.kubernetes.io/part-of: dataplane-operator + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: dataplane-operator + name: configure-network +spec: + playbook: osp.edpm.configure_network diff --git a/config/services/dataplane_v1beta1_openstackdataplaneservice_configure_os.yaml b/config/services/dataplane_v1beta1_openstackdataplaneservice_configure_os.yaml new file mode 100644 index 000000000..2e475ddcc --- /dev/null +++ b/config/services/dataplane_v1beta1_openstackdataplaneservice_configure_os.yaml @@ -0,0 +1,6 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + name: configure-os +spec: + playbook: osp.edpm.configure_os diff --git a/config/services/dataplane_v1beta1_openstackdataplaneservice_configure_ovs_dpdk.yaml b/config/services/dataplane_v1beta1_openstackdataplaneservice_configure_ovs_dpdk.yaml new file mode 100644 index 000000000..77988fd8d --- /dev/null +++ b/config/services/dataplane_v1beta1_openstackdataplaneservice_configure_ovs_dpdk.yaml @@ -0,0 +1,12 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + labels: + app.kubernetes.io/name: openstackdataplaneservice + app.kubernetes.io/instance: openstackdataplaneservice-configure-ovs-dpdk + app.kubernetes.io/part-of: dataplane-operator + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: dataplane-operator + name: configure-ovs-dpdk +spec: + playbook: osp.edpm.configure_ovs_dpdk diff --git a/config/services/dataplane_v1beta1_openstackdataplaneservice_ddp_package_option.yaml b/config/services/dataplane_v1beta1_openstackdataplaneservice_ddp_package_option.yaml new file mode 100644 index 000000000..9ca47eb83 --- /dev/null +++ b/config/services/dataplane_v1beta1_openstackdataplaneservice_ddp_package_option.yaml @@ -0,0 +1,6 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + name: ddp-package-option +spec: + playbook: osp.edpm.select_kernel_ddp_package diff --git a/config/services/dataplane_v1beta1_openstackdataplaneservice_derive_pci_devicespec.yaml b/config/services/dataplane_v1beta1_openstackdataplaneservice_derive_pci_devicespec.yaml new file mode 100644 index 000000000..86e1b9ebd --- /dev/null +++ b/config/services/dataplane_v1beta1_openstackdataplaneservice_derive_pci_devicespec.yaml @@ -0,0 +1,12 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + labels: + app.kubernetes.io/name: openstackdataplaneservice + app.kubernetes.io/instance: openstackdataplaneservice-derive-pci-devicespec + app.kubernetes.io/part-of: dataplane-operator + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: dataplane-operator + name: derive-pci-devicespec +spec: + playbook: osp.edpm.sriov_derive_device_spec diff --git a/config/services/dataplane_v1beta1_openstackdataplaneservice_download_cache.yaml b/config/services/dataplane_v1beta1_openstackdataplaneservice_download_cache.yaml new file mode 100644 index 000000000..c480703b9 --- /dev/null +++ b/config/services/dataplane_v1beta1_openstackdataplaneservice_download_cache.yaml @@ -0,0 +1,12 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + labels: + app.kubernetes.io/name: openstackdataplaneservice + app.kubernetes.io/instance: openstackdataplaneservice-download-cache + app.kubernetes.io/part-of: dataplane-operator + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: dataplane-operator + name: download-cache +spec: + playbook: osp.edpm.download_cache diff --git a/config/services/dataplane_v1beta1_openstackdataplaneservice_fips_status.yaml b/config/services/dataplane_v1beta1_openstackdataplaneservice_fips_status.yaml new file mode 100644 index 000000000..b98890661 --- /dev/null +++ b/config/services/dataplane_v1beta1_openstackdataplaneservice_fips_status.yaml @@ -0,0 +1,7 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + name: fips-status +spec: + label: fips-status + playbook: osp.edpm.fips_status diff --git a/config/services/dataplane_v1beta1_openstackdataplaneservice_frr.yaml b/config/services/dataplane_v1beta1_openstackdataplaneservice_frr.yaml new file mode 100644 index 000000000..0561424fd --- /dev/null +++ b/config/services/dataplane_v1beta1_openstackdataplaneservice_frr.yaml @@ -0,0 +1,8 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + name: frr +spec: + playbook: osp.edpm.frr + containerImageFields: + - EdpmFrrImage diff --git a/config/services/dataplane_v1beta1_openstackdataplaneservice_install_certs.yaml b/config/services/dataplane_v1beta1_openstackdataplaneservice_install_certs.yaml new file mode 100644 index 000000000..45654d467 --- /dev/null +++ b/config/services/dataplane_v1beta1_openstackdataplaneservice_install_certs.yaml @@ -0,0 +1,13 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + labels: + app.kubernetes.io/name: openstackdataplaneservice + app.kubernetes.io/instance: openstackdataplaneservice-install-certs + app.kubernetes.io/part-of: dataplane-operator + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: dataplane-operator + name: install-certs +spec: + playbook: osp.edpm.install_certs + addCertMounts: True diff --git a/config/services/dataplane_v1beta1_openstackdataplaneservice_install_os.yaml b/config/services/dataplane_v1beta1_openstackdataplaneservice_install_os.yaml new file mode 100644 index 000000000..ff9bb9072 --- /dev/null +++ b/config/services/dataplane_v1beta1_openstackdataplaneservice_install_os.yaml @@ -0,0 +1,6 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + name: install-os +spec: + playbook: osp.edpm.install_os diff --git a/config/services/dataplane_v1beta1_openstackdataplaneservice_libvirt.yaml b/config/services/dataplane_v1beta1_openstackdataplaneservice_libvirt.yaml new file mode 100644 index 000000000..7defe3840 --- /dev/null +++ b/config/services/dataplane_v1beta1_openstackdataplaneservice_libvirt.yaml @@ -0,0 +1,24 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + name: libvirt +spec: + playbook: osp.edpm.libvirt + secrets: + # NOTE: this Secret needs to be created before deploying the data plane. + # It should contain the libvirt sasl auth password using the key LibvirtPassword + - libvirt-secret + tlsCerts: + default: + contents: + - dnsnames + - ips + networks: + - ctlplane + keyUsages: + - digital signature + - key encipherment + - server auth + - client auth + issuer: osp-rootca-issuer-libvirt + caCerts: combined-ca-bundle diff --git a/config/services/dataplane_v1beta1_openstackdataplaneservice_logging.yaml b/config/services/dataplane_v1beta1_openstackdataplaneservice_logging.yaml new file mode 100644 index 000000000..2d5a5dac6 --- /dev/null +++ b/config/services/dataplane_v1beta1_openstackdataplaneservice_logging.yaml @@ -0,0 +1,8 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + name: logging +spec: + secrets: + - logging-compute-config-data + playbook: osp.edpm.telemetry_logging diff --git a/config/services/dataplane_v1beta1_openstackdataplaneservice_neutron_dhcp.yaml b/config/services/dataplane_v1beta1_openstackdataplaneservice_neutron_dhcp.yaml new file mode 100644 index 000000000..c12bf69a8 --- /dev/null +++ b/config/services/dataplane_v1beta1_openstackdataplaneservice_neutron_dhcp.yaml @@ -0,0 +1,11 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + name: neutron-dhcp +spec: + playbook: osp.edpm.neutron_dhcp + secrets: + - neutron-dhcp-agent-neutron-config + caCerts: combined-ca-bundle + containerImageFields: + - EdpmNeutronDhcpAgentImage diff --git a/config/services/dataplane_v1beta1_openstackdataplaneservice_neutron_metadata.yaml b/config/services/dataplane_v1beta1_openstackdataplaneservice_neutron_metadata.yaml new file mode 100644 index 000000000..e487b5bab --- /dev/null +++ b/config/services/dataplane_v1beta1_openstackdataplaneservice_neutron_metadata.yaml @@ -0,0 +1,24 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + name: neutron-metadata +spec: + playbook: osp.edpm.neutron_metadata + secrets: + - neutron-ovn-metadata-agent-neutron-config + - nova-metadata-neutron-config + tlsCerts: + default: + contents: + - dnsnames + - ips + networks: + - ctlplane + issuer: osp-rootca-issuer-ovn + keyUsages: + - digital signature + - key encipherment + - client auth + caCerts: combined-ca-bundle + containerImageFields: + - EdpmNeutronMetadataAgentImage diff --git a/config/services/dataplane_v1beta1_openstackdataplaneservice_neutron_ovn.yaml b/config/services/dataplane_v1beta1_openstackdataplaneservice_neutron_ovn.yaml new file mode 100644 index 000000000..04391b22a --- /dev/null +++ b/config/services/dataplane_v1beta1_openstackdataplaneservice_neutron_ovn.yaml @@ -0,0 +1,23 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + name: neutron-ovn +spec: + playbook: osp.edpm.neutron_ovn + secrets: + - neutron-ovn-agent-neutron-config + tlsCerts: + default: + contents: + - dnsnames + - ips + networks: + - ctlplane + issuer: osp-rootca-issuer-ovn + keyUsages: + - digital signature + - key encipherment + - client auth + caCerts: combined-ca-bundle + containerImageFields: + - EdpmNeutronOvnAgentImage diff --git a/config/services/dataplane_v1beta1_openstackdataplaneservice_neutron_sriov.yaml b/config/services/dataplane_v1beta1_openstackdataplaneservice_neutron_sriov.yaml new file mode 100644 index 000000000..89fc2f036 --- /dev/null +++ b/config/services/dataplane_v1beta1_openstackdataplaneservice_neutron_sriov.yaml @@ -0,0 +1,11 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + name: neutron-sriov +spec: + playbook: osp.edpm.neutron_sriov + secrets: + - neutron-sriov-agent-neutron-config + caCerts: combined-ca-bundle + containerImageFields: + - EdpmNeutronSriovAgentImage diff --git a/config/services/dataplane_v1beta1_openstackdataplaneservice_nova.yaml b/config/services/dataplane_v1beta1_openstackdataplaneservice_nova.yaml new file mode 100644 index 000000000..c17f1f761 --- /dev/null +++ b/config/services/dataplane_v1beta1_openstackdataplaneservice_nova.yaml @@ -0,0 +1,18 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + name: nova +spec: + dataSources: + - secretRef: + name: nova-cell1-compute-config + - secretRef: + name: nova-migration-ssh-key + - configMapRef: + name: nova-extra-config + optional: true + playbook: osp.edpm.nova + caCerts: combined-ca-bundle + containerImageFields: + - NovaComputeImage + - EdpmIscsidImage diff --git a/config/services/dataplane_v1beta1_openstackdataplaneservice_os_reboot.yaml b/config/services/dataplane_v1beta1_openstackdataplaneservice_os_reboot.yaml new file mode 100644 index 000000000..fa64fe152 --- /dev/null +++ b/config/services/dataplane_v1beta1_openstackdataplaneservice_os_reboot.yaml @@ -0,0 +1,6 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + name: reboot-os +spec: + playbook: osp.edpm.reboot diff --git a/config/services/dataplane_v1beta1_openstackdataplaneservice_ovn.yaml b/config/services/dataplane_v1beta1_openstackdataplaneservice_ovn.yaml new file mode 100644 index 000000000..3493a3071 --- /dev/null +++ b/config/services/dataplane_v1beta1_openstackdataplaneservice_ovn.yaml @@ -0,0 +1,24 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + name: ovn +spec: + playbook: osp.edpm.ovn + configMaps: + - ovncontroller-config + tlsCerts: + default: + contents: + - dnsnames + - ips + networks: + - ctlplane + issuer: osp-rootca-issuer-ovn + keyUsages: + - digital signature + - key encipherment + - server auth + - client auth + caCerts: combined-ca-bundle + containerImageFields: + - OvnControllerImage diff --git a/config/services/dataplane_v1beta1_openstackdataplaneservice_ovn_bgp_agent.yaml b/config/services/dataplane_v1beta1_openstackdataplaneservice_ovn_bgp_agent.yaml new file mode 100644 index 000000000..808b2ca52 --- /dev/null +++ b/config/services/dataplane_v1beta1_openstackdataplaneservice_ovn_bgp_agent.yaml @@ -0,0 +1,24 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + name: ovn-bgp-agent +spec: + playbook: osp.edpm.ovn_bgp_agent + secrets: + - neutron-ovn-agent-neutron-config + tlsCerts: + default: + contents: + - dnsnames + - ips + networks: + - ctlplane + issuer: osp-rootca-issuer-ovn + keyUsages: + - digital signature + - key encipherment + - server auth + - client auth + caCerts: combined-ca-bundle + containerImageFields: + - EdpmOvnBgpAgentImage diff --git a/config/services/dataplane_v1beta1_openstackdataplaneservice_run_os.yaml b/config/services/dataplane_v1beta1_openstackdataplaneservice_run_os.yaml new file mode 100644 index 000000000..1a8362fe3 --- /dev/null +++ b/config/services/dataplane_v1beta1_openstackdataplaneservice_run_os.yaml @@ -0,0 +1,9 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + name: run-os +spec: + playbook: osp.edpm.run_os + containerImageFields: + - EdpmLogrotateCrondImage + - EdpmIscsidImage diff --git a/config/services/dataplane_v1beta1_openstackdataplaneservice_ssh_known_hosts.yaml b/config/services/dataplane_v1beta1_openstackdataplaneservice_ssh_known_hosts.yaml new file mode 100644 index 000000000..7181a98ad --- /dev/null +++ b/config/services/dataplane_v1beta1_openstackdataplaneservice_ssh_known_hosts.yaml @@ -0,0 +1,7 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + name: ssh-known-hosts +spec: + playbook: osp.edpm.ssh_known_hosts + deployOnAllNodeSets: true diff --git a/config/services/dataplane_v1beta1_openstackdataplaneservice_swift.yaml b/config/services/dataplane_v1beta1_openstackdataplaneservice_swift.yaml new file mode 100644 index 000000000..c8ebf2448 --- /dev/null +++ b/config/services/dataplane_v1beta1_openstackdataplaneservice_swift.yaml @@ -0,0 +1,11 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + name: swift +spec: + playbook: osp.edpm.swift + secrets: + - swift-conf + configMaps: + - swift-storage-config-data + - swift-ring-files diff --git a/config/services/dataplane_v1beta1_openstackdataplaneservice_telemetry.yaml b/config/services/dataplane_v1beta1_openstackdataplaneservice_telemetry.yaml new file mode 100644 index 000000000..995f884db --- /dev/null +++ b/config/services/dataplane_v1beta1_openstackdataplaneservice_telemetry.yaml @@ -0,0 +1,17 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + name: telemetry +spec: + secrets: + - ceilometer-compute-config-data + playbook: osp.edpm.telemetry + tlsCerts: + default: + contents: + - ips + caCerts: combined-ca-bundle + containerImageFields: + - CeilometerComputeImage + - CeilometerIpmiImage + - EdpmNodeExporterImage diff --git a/config/services/dataplane_v1beta1_openstackdataplaneservice_update.yaml b/config/services/dataplane_v1beta1_openstackdataplaneservice_update.yaml new file mode 100644 index 000000000..18ec4a4c9 --- /dev/null +++ b/config/services/dataplane_v1beta1_openstackdataplaneservice_update.yaml @@ -0,0 +1,6 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + name: update +spec: + playbook: osp.edpm.update diff --git a/config/services/dataplane_v1beta1_openstackdataplaneservice_validate_network.yaml b/config/services/dataplane_v1beta1_openstackdataplaneservice_validate_network.yaml new file mode 100644 index 000000000..723d59f55 --- /dev/null +++ b/config/services/dataplane_v1beta1_openstackdataplaneservice_validate_network.yaml @@ -0,0 +1,12 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + labels: + app.kubernetes.io/name: openstackdataplaneservice + app.kubernetes.io/instance: openstackdataplaneservice-validate-network + app.kubernetes.io/part-of: dataplane-operator + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: dataplane-operator + name: validate-network +spec: + playbook: osp.edpm.validate_network diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml index b5d33b665..98d40b57b 100644 --- a/config/webhook/manifests.yaml +++ b/config/webhook/manifests.yaml @@ -65,6 +65,66 @@ webhooks: resources: - openstackversions sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-dataplane-openstack-org-v1beta1-openstackdataplanedeployment + failurePolicy: Fail + name: mopenstackdataplanedeployment.kb.io + rules: + - apiGroups: + - dataplane.openstack.org + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - openstackdataplanedeployments + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-dataplane-openstack-org-v1beta1-openstackdataplanenodeset + failurePolicy: Fail + name: mopenstackdataplanenodeset.kb.io + rules: + - apiGroups: + - dataplane.openstack.org + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - openstackdataplanenodesets + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-dataplane-openstack-org-v1beta1-openstackdataplaneservice + failurePolicy: Fail + name: mopenstackdataplaneservice.kb.io + rules: + - apiGroups: + - dataplane.openstack.org + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - openstackdataplaneservices + sideEffects: None --- apiVersion: admissionregistration.k8s.io/v1 kind: ValidatingWebhookConfiguration @@ -132,3 +192,63 @@ webhooks: resources: - openstackversions sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-dataplane-openstack-org-v1beta1-openstackdataplanedeployment + failurePolicy: Fail + name: vopenstackdataplanedeployment.kb.io + rules: + - apiGroups: + - dataplane.openstack.org + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - openstackdataplanedeployments + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-dataplane-openstack-org-v1beta1-openstackdataplanenodeset + failurePolicy: Fail + name: vopenstackdataplanenodeset.kb.io + rules: + - apiGroups: + - dataplane.openstack.org + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - openstackdataplanenodesets + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-dataplane-openstack-org-v1beta1-openstackdataplaneservice + failurePolicy: Fail + name: vopenstackdataplaneservice.kb.io + rules: + - apiGroups: + - dataplane.openstack.org + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - openstackdataplaneservices + sideEffects: None diff --git a/controllers/core/openstackversion_controller.go b/controllers/core/openstackversion_controller.go index c716907d9..ffc8cab14 100644 --- a/controllers/core/openstackversion_controller.go +++ b/controllers/core/openstackversion_controller.go @@ -32,10 +32,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/go-logr/logr" - dataplanev1 "github.com/openstack-k8s-operators/dataplane-operator/api/v1beta1" condition "github.com/openstack-k8s-operators/lib-common/modules/common/condition" "github.com/openstack-k8s-operators/lib-common/modules/common/helper" corev1beta1 "github.com/openstack-k8s-operators/openstack-operator/apis/core/v1beta1" + dataplanev1 "github.com/openstack-k8s-operators/openstack-operator/apis/dataplane/v1beta1" "github.com/openstack-k8s-operators/openstack-operator/pkg/openstack" ) diff --git a/controllers/dataplane/openstackdataplanedeployment_controller.go b/controllers/dataplane/openstackdataplanedeployment_controller.go new file mode 100644 index 000000000..170e25ec7 --- /dev/null +++ b/controllers/dataplane/openstackdataplanedeployment_controller.go @@ -0,0 +1,455 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dataplane + +import ( + "context" + "fmt" + "time" + + "github.com/go-playground/validator/v10" + corev1 "k8s.io/api/core/v1" + k8s_errors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + + "github.com/go-logr/logr" + condition "github.com/openstack-k8s-operators/lib-common/modules/common/condition" + "github.com/openstack-k8s-operators/lib-common/modules/common/helper" + "github.com/openstack-k8s-operators/lib-common/modules/common/util" + ansibleeev1 "github.com/openstack-k8s-operators/openstack-ansibleee-operator/api/v1beta1" + dataplanev1 "github.com/openstack-k8s-operators/openstack-operator/apis/dataplane/v1beta1" + deployment "github.com/openstack-k8s-operators/openstack-operator/pkg/dataplane" + dataplaneutil "github.com/openstack-k8s-operators/openstack-operator/pkg/dataplane/util" +) + +// OpenStackDataPlaneDeploymentReconciler reconciles a OpenStackDataPlaneDeployment object +type OpenStackDataPlaneDeploymentReconciler struct { + client.Client + Kclient kubernetes.Interface + Scheme *runtime.Scheme +} + +// GetLogger returns a logger object with a prefix of "controller.name" and additional controller context fields +func (r *OpenStackDataPlaneDeploymentReconciler) GetLogger(ctx context.Context) logr.Logger { + return log.FromContext(ctx).WithName("Controllers").WithName("OpenStackDataPlaneDeployment") +} + +//+kubebuilder:rbac:groups=dataplane.openstack.org,resources=openstackdataplanedeployments,verbs=get;list;watch;create;delete +//+kubebuilder:rbac:groups=dataplane.openstack.org,resources=openstackdataplanedeployments/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=dataplane.openstack.org,resources=openstackdataplanedeployments/finalizers,verbs=update +//+kubebuilder:rbac:groups=dataplane.openstack.org,resources=openstackdataplanenodesets,verbs=get;list;watch +//+kubebuilder:rbac:groups=dataplane.openstack.org,resources=openstackdataplaneservices,verbs=get;list;watch +//+kubebuilder:rbac:groups=ansibleee.openstack.org,resources=openstackansibleees,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=discovery.k8s.io,resources=endpointslices,verbs=get;list;watch;create;update;patch;delete; +//+kubebuilder:rbac:groups=cert-manager.io,resources=issuers,verbs=get;list;watch; +//+kubebuilder:rbac:groups=cert-manager.io,resources=certificates,verbs=get;list;watch;create;update;patch;delete; + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +func (r *OpenStackDataPlaneDeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, _err error) { + + Log := r.GetLogger(ctx) + Log.Info("Reconciling Deployment") + + // Check if deployment name matches RFC1123 for use in labels + validate := validator.New() + if err := validate.Var(req.Name, "hostname_rfc1123"); err != nil { + Log.Error(err, "error validating OpenStackDataPlaneDeployment name, the name must follow RFC1123") + return ctrl.Result{}, err + } + // Fetch the OpenStackDataPlaneDeployment instance + instance := &dataplanev1.OpenStackDataPlaneDeployment{} + err := r.Client.Get(ctx, req.NamespacedName, instance) + if err != nil { + if k8s_errors.IsNotFound(err) { + // Request object not found, could have been deleted after reconcile request. + // Owned objects are automatically garbage collected. + // For additional cleanup logic use finalizers. Return and don't requeue. + return ctrl.Result{}, nil + } + // Error reading the object - requeue the request. + return ctrl.Result{}, err + } + + helper, _ := helper.NewHelper( + instance, + r.Client, + r.Kclient, + r.Scheme, + Log, + ) + + // If the deploy is already done, return immediately. + if instance.Status.Deployed { + Log.Info("Already deployed", "instance.Status.Deployed", instance.Status.Deployed) + return ctrl.Result{}, nil + } + + // initialize status if Conditions is nil, but do not reset if it already + // exists + isNewInstance := instance.Status.Conditions == nil + if isNewInstance { + instance.Status.Conditions = condition.Conditions{} + } + + // Save a copy of the conditions so that we can restore the LastTransitionTime + // when a condition's state doesn't change. + savedConditions := instance.Status.Conditions.DeepCopy() + + // Reset all conditions to Unknown as the state is not yet known for + // this reconcile loop. + instance.InitConditions() + // Set ObservedGeneration since we've reset conditions + instance.Status.ObservedGeneration = instance.Generation + + // Always patch the instance status when exiting this function so we can persist any changes. + defer func() { // update the Ready condition based on the sub conditions + condition.RestoreLastTransitionTimes( + &instance.Status.Conditions, savedConditions) + if instance.Status.Conditions.AllSubConditionIsTrue() { + instance.Status.Conditions.MarkTrue( + condition.ReadyCondition, condition.ReadyMessage) + } else if instance.Status.Conditions.IsUnknown(condition.ReadyCondition) { + // Recalculate ReadyCondition based on the state of the rest of the conditions + instance.Status.Conditions.Set( + instance.Status.Conditions.Mirror(condition.ReadyCondition)) + } + + err := helper.PatchInstance(ctx, instance) + if err != nil { + Log.Error(err, "Error updating instance status conditions") + _err = err + return + } + }() + + if instance.Status.ConfigMapHashes == nil { + instance.Status.ConfigMapHashes = make(map[string]string) + } + if instance.Status.SecretHashes == nil { + instance.Status.SecretHashes = make(map[string]string) + } + if instance.Status.NodeSetHashes == nil { + instance.Status.NodeSetHashes = make(map[string]string) + } + if instance.Status.ContainerImages == nil { + instance.Status.ContainerImages = make(map[string]string) + } + + // Ensure NodeSets + nodeSets := dataplanev1.OpenStackDataPlaneNodeSetList{} + for _, nodeSet := range instance.Spec.NodeSets { + + // Fetch the OpenStackDataPlaneNodeSet instance + nodeSetInstance := &dataplanev1.OpenStackDataPlaneNodeSet{} + err := r.Client.Get( + ctx, + types.NamespacedName{ + Namespace: instance.GetNamespace(), + Name: nodeSet, + }, + nodeSetInstance) + if err != nil { + // NodeSet not found, force a requeue + if k8s_errors.IsNotFound(err) { + Log.Info("NodeSet not found", "NodeSet", nodeSet) + return ctrl.Result{RequeueAfter: time.Second * time.Duration(instance.Spec.DeploymentRequeueTime)}, nil + } + instance.Status.Conditions.MarkFalse( + dataplanev1.SetupReadyCondition, + condition.ErrorReason, + condition.SeverityError, + dataplanev1.DataPlaneNodeSetErrorMessage, + err.Error()) + // Error reading the object - requeue the request. + return ctrl.Result{}, err + } + nodeSets.Items = append(nodeSets.Items, *nodeSetInstance) + } + + // Check that all nodeSets are SetupReady + for _, nodeSet := range nodeSets.Items { + if !nodeSet.Status.Conditions.IsTrue(dataplanev1.SetupReadyCondition) { + Log.Info("NodeSet SetupReadyCondition is not True", "NodeSet", nodeSet.Name) + return ctrl.Result{RequeueAfter: time.Second * time.Duration(instance.Spec.DeploymentRequeueTime)}, nil + } + } + + // get TLS certs + for _, nodeSet := range nodeSets.Items { + if nodeSet.Spec.TLSEnabled { + var services []string + if len(instance.Spec.ServicesOverride) != 0 { + services = instance.Spec.ServicesOverride + } else { + services = nodeSet.Spec.Services + } + nsConditions := instance.Status.NodeSetConditions[nodeSet.Name] + + for _, serviceName := range services { + service, err := deployment.GetService(ctx, helper, serviceName) + if err != nil { + instance.Status.Conditions.MarkFalse( + condition.InputReadyCondition, + condition.ErrorReason, + condition.SeverityError, + dataplanev1.ServiceErrorMessage, + err.Error()) + nsConditions.MarkFalse( + dataplanev1.NodeSetDeploymentReadyCondition, + condition.ErrorReason, + condition.SeverityError, + dataplanev1.ServiceErrorMessage, + err.Error()) + return ctrl.Result{}, err + } + if service.Spec.TLSCerts != nil { + for certKey := range service.Spec.TLSCerts { + result, err := deployment.EnsureTLSCerts(ctx, helper, &nodeSet, + nodeSet.Status.AllHostnames, nodeSet.Status.AllIPs, service, certKey) + if err != nil { + instance.Status.Conditions.MarkFalse( + condition.InputReadyCondition, + condition.ErrorReason, + condition.SeverityError, + condition.TLSInputErrorMessage, + err.Error()) + nsConditions.MarkFalse( + dataplanev1.NodeSetDeploymentReadyCondition, + condition.ErrorReason, + condition.SeverityError, + condition.TLSInputErrorMessage, + err.Error()) + return ctrl.Result{}, err + } else if (*result != ctrl.Result{}) { + return *result, nil // requeue here + } + } + } + } + } + } + + // All nodeSets successfully fetched. + // Mark InputReadyCondition=True + instance.Status.Conditions.MarkTrue(condition.InputReadyCondition, condition.InputReadyMessage) + shouldRequeue := false + haveError := false + deploymentErrMsg := "" + backoffLimitReached := false + + globalInventorySecrets := map[string]string{} + globalSSHKeySecrets := map[string]string{} + + // Gathering individual inventory and ssh secrets for later use + for _, nodeSet := range nodeSets.Items { + // Add inventory secret to list of inventories for global services + globalInventorySecrets[nodeSet.Name] = fmt.Sprintf("dataplanenodeset-%s", nodeSet.Name) + globalSSHKeySecrets[nodeSet.Name] = nodeSet.Spec.NodeTemplate.AnsibleSSHPrivateKeySecret + } + + if instance.Spec.ServicesOverride == nil { + if err := deployment.CheckGlobalServiceExecutionConsistency(ctx, helper, nodeSets.Items); err != nil { + util.LogErrorForObject(helper, err, "OpenStackDeployment error for deployment", instance) + instance.Status.Conditions.MarkFalse( + condition.DeploymentReadyCondition, + condition.ErrorReason, + condition.SeverityError, + dataplanev1.ServiceErrorMessage, + err.Error()) + return ctrl.Result{}, err + } + + } + + version, err := dataplaneutil.GetVersion(ctx, helper, instance.Namespace) + if err != nil { + return ctrl.Result{}, err + } + + // Deploy each nodeSet + // The loop starts and checks NodeSet deployments sequentially. However, after they + // are started, they are running in parallel, since the loop does not wait + // for the first started NodeSet to finish before starting the next. + for _, nodeSet := range nodeSets.Items { + + Log.Info(fmt.Sprintf("Deploying NodeSet: %s", nodeSet.Name)) + Log.Info("Set Status.Deployed to false", "instance", instance) + instance.Status.Deployed = false + Log.Info("Set DeploymentReadyCondition false") + instance.Status.Conditions.MarkFalse( + condition.DeploymentReadyCondition, condition.RequestedReason, + condition.SeverityInfo, condition.DeploymentReadyRunningMessage) + ansibleEESpec := nodeSet.GetAnsibleEESpec() + ansibleEESpec.AnsibleTags = instance.Spec.AnsibleTags + ansibleEESpec.AnsibleSkipTags = instance.Spec.AnsibleSkipTags + ansibleEESpec.AnsibleLimit = instance.Spec.AnsibleLimit + ansibleEESpec.ExtraVars = instance.Spec.AnsibleExtraVars + + if nodeSet.Status.DNSClusterAddresses != nil && nodeSet.Status.CtlplaneSearchDomain != "" { + ansibleEESpec.DNSConfig = &corev1.PodDNSConfig{ + Nameservers: nodeSet.Status.DNSClusterAddresses, + Searches: []string{nodeSet.Status.CtlplaneSearchDomain}, + } + } + + deployer := deployment.Deployer{ + Ctx: ctx, + Helper: helper, + NodeSet: &nodeSet, + Deployment: instance, + Status: &instance.Status, + AeeSpec: &ansibleEESpec, + InventorySecrets: globalInventorySecrets, + AnsibleSSHPrivateKeySecrets: globalSSHKeySecrets, + Version: version, + } + + // When ServicesOverride is set on the OpenStackDataPlaneDeployment, + // deploy those services for each OpenStackDataPlaneNodeSet. Otherwise, + // deploy with the OpenStackDataPlaneNodeSet's Services. + var deployResult *ctrl.Result + if len(instance.Spec.ServicesOverride) != 0 { + deployResult, err = deployer.Deploy(instance.Spec.ServicesOverride) + } else { + deployResult, err = deployer.Deploy(nodeSet.Spec.Services) + } + + nsConditions := instance.Status.NodeSetConditions[nodeSet.Name] + nsConditions.Set(nsConditions.Mirror(dataplanev1.NodeSetDeploymentReadyCondition)) + + if err != nil { + util.LogErrorForObject(helper, err, fmt.Sprintf("OpenStackDeployment error for NodeSet %s", nodeSet.Name), instance) + Log.Info("Set NodeSetDeploymentReadyCondition false", "nodeSet", nodeSet.Name) + haveError = true + errMsg := fmt.Sprintf("nodeSet: %s error: %s", nodeSet.Name, err.Error()) + if len(deploymentErrMsg) == 0 { + deploymentErrMsg = errMsg + } else { + deploymentErrMsg = fmt.Sprintf("%s & %s", deploymentErrMsg, errMsg) + } + errorReason := nsConditions.Get(dataplanev1.NodeSetDeploymentReadyCondition).Reason + backoffLimitReached = errorReason == condition.JobReasonBackoffLimitExceeded + } + + if deployResult != nil { + shouldRequeue = true + } else { + Log.Info("OpenStackDeployment succeeded for NodeSet", "NodeSet", nodeSet.Name) + Log.Info("Set NodeSetDeploymentReadyCondition true", "nodeSet", nodeSet.Name) + nsConditions.MarkTrue( + dataplanev1.NodeSetDeploymentReadyCondition, + condition.DeploymentReadyMessage) + } + } + + if haveError { + var reason condition.Reason + reason = condition.ErrorReason + severity := condition.SeverityWarning + if backoffLimitReached { + reason = condition.JobReasonBackoffLimitExceeded + severity = condition.SeverityError + } + instance.Status.Conditions.MarkFalse( + condition.DeploymentReadyCondition, + reason, + severity, + condition.DeploymentReadyErrorMessage, + deploymentErrMsg) + return ctrl.Result{}, fmt.Errorf(deploymentErrMsg) + } + + if shouldRequeue { + Log.Info("Not all NodeSets done for OpenStackDeployment") + return ctrl.Result{}, nil + } + + Log.Info("Set DeploymentReadyCondition true") + instance.Status.Conditions.MarkTrue(condition.DeploymentReadyCondition, condition.DeploymentReadyMessage) + Log.Info("Set Status.Deployed to true", "instance", instance) + instance.Status.Deployed = true + if version != nil { + instance.Status.DeployedVersion = version.Spec.TargetVersion + } + err = r.setHashes(ctx, helper, instance, nodeSets) + if err != nil { + Log.Error(err, "Error setting service hashes") + } + return ctrl.Result{}, nil +} + +func (r *OpenStackDataPlaneDeploymentReconciler) setHashes( + ctx context.Context, + helper *helper.Helper, + instance *dataplanev1.OpenStackDataPlaneDeployment, + nodeSets dataplanev1.OpenStackDataPlaneNodeSetList, +) error { + + var err error + services := []string{} + + if len(instance.Spec.ServicesOverride) > 0 { + services = instance.Spec.ServicesOverride + } else { + // get the union of services across nodesets + type void struct{} + var member void + s := make(map[string]void) + for _, nodeSet := range nodeSets.Items { + for _, serviceName := range nodeSet.Spec.Services { + s[serviceName] = member + } + } + for service := range s { + services = append(services, service) + } + } + + for _, serviceName := range services { + err = deployment.GetDeploymentHashesForService( + ctx, + helper, + instance.Namespace, + serviceName, + instance.Status.ConfigMapHashes, + instance.Status.SecretHashes, + nodeSets) + if err != nil { + return err + } + } + + for _, nodeSet := range nodeSets.Items { + instance.Status.NodeSetHashes[nodeSet.Name] = nodeSet.Status.ConfigHash + } + + return nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *OpenStackDataPlaneDeploymentReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&dataplanev1.OpenStackDataPlaneDeployment{}). + Owns(&ansibleeev1.OpenStackAnsibleEE{}). + Complete(r) +} diff --git a/controllers/dataplane/openstackdataplanenodeset_controller.go b/controllers/dataplane/openstackdataplanenodeset_controller.go new file mode 100644 index 000000000..d3a544858 --- /dev/null +++ b/controllers/dataplane/openstackdataplanenodeset_controller.go @@ -0,0 +1,705 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dataplane + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/go-playground/validator/v10" + "golang.org/x/exp/slices" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + k8s_errors "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/go-logr/logr" + infranetworkv1 "github.com/openstack-k8s-operators/infra-operator/apis/network/v1beta1" + condition "github.com/openstack-k8s-operators/lib-common/modules/common/condition" + "github.com/openstack-k8s-operators/lib-common/modules/common/helper" + "github.com/openstack-k8s-operators/lib-common/modules/common/rolebinding" + "github.com/openstack-k8s-operators/lib-common/modules/common/secret" + "github.com/openstack-k8s-operators/lib-common/modules/common/serviceaccount" + "github.com/openstack-k8s-operators/lib-common/modules/common/util" + ansibleeev1 "github.com/openstack-k8s-operators/openstack-ansibleee-operator/api/v1beta1" + baremetalv1 "github.com/openstack-k8s-operators/openstack-baremetal-operator/api/v1beta1" + openstackv1 "github.com/openstack-k8s-operators/openstack-operator/apis/core/v1beta1" + dataplanev1 "github.com/openstack-k8s-operators/openstack-operator/apis/dataplane/v1beta1" + deployment "github.com/openstack-k8s-operators/openstack-operator/pkg/dataplane" + dataplaneutil "github.com/openstack-k8s-operators/openstack-operator/pkg/dataplane/util" +) + +const ( + // AnsibleSSHPrivateKey ssh private key + AnsibleSSHPrivateKey = "ssh-privatekey" + // AnsibleSSHAuthorizedKeys authorized keys + AnsibleSSHAuthorizedKeys = "authorized_keys" +) + +// OpenStackDataPlaneNodeSetReconciler reconciles a OpenStackDataPlaneNodeSet object +type OpenStackDataPlaneNodeSetReconciler struct { + client.Client + Kclient kubernetes.Interface + Scheme *runtime.Scheme +} + +// GetLogger returns a logger object with a prefix of "controller.name" and additional controller context fields +func (r *OpenStackDataPlaneNodeSetReconciler) GetLogger(ctx context.Context) logr.Logger { + return log.FromContext(ctx).WithName("Controllers").WithName("OpenStackDataPlaneNodeSet") +} + +//+kubebuilder:rbac:groups=dataplane.openstack.org,resources=openstackdataplanenodesets,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=dataplane.openstack.org,resources=openstackdataplanenodesets/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=dataplane.openstack.org,resources=openstackdataplanenodesets/finalizers,verbs=update +//+kubebuilder:rbac:groups=dataplane.openstack.org,resources=openstackdataplaneservices,verbs=get;list;watch;create;update;patch +//+kubebuilder:rbac:groups=dataplane.openstack.org,resources=openstackdataplaneservices/finalizers,verbs=update +//+kubebuilder:rbac:groups=baremetal.openstack.org,resources=openstackbaremetalsets,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=baremetal.openstack.org,resources=openstackbaremetalsets/status,verbs=get +//+kubebuilder:rbac:groups=baremetal.openstack.org,resources=openstackbaremetalsets/finalizers,verbs=update +//+kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;watch;create;update;patch;delete; +//+kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;create;update;patch;delete; +//+kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;update;patch;delete; +//+kubebuilder:rbac:groups=k8s.cni.cncf.io,resources=network-attachment-definitions,verbs=get;list;watch +//+kubebuilder:rbac:groups=network.openstack.org,resources=ipsets,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=network.openstack.org,resources=ipsets/status,verbs=get +//+kubebuilder:rbac:groups=network.openstack.org,resources=ipsets/finalizers,verbs=update +//+kubebuilder:rbac:groups=network.openstack.org,resources=netconfigs,verbs=get;list;watch +//+kubebuilder:rbac:groups=network.openstack.org,resources=dnsmasqs,verbs=get;list;watch +//+kubebuilder:rbac:groups=network.openstack.org,resources=dnsmasqs/status,verbs=get +//+kubebuilder:rbac:groups=network.openstack.org,resources=dnsdata,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=network.openstack.org,resources=dnsdata/status,verbs=get +//+kubebuilder:rbac:groups=network.openstack.org,resources=dnsdata/finalizers,verbs=update +//+kubebuilder:rbac:groups=core,resources=services,verbs=get;list;watch;create;update;patch;delete; +//+kubebuilder:rbac:groups=core.openstack.org,resources=openstackversions,verbs=get;list;watch + +// RBAC for the ServiceAccount for the internal image registry +//+kubebuilder:rbac:groups="",resources=serviceaccounts,verbs=get;list;watch;create;update +//+kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources=roles,verbs=get;list;watch;create;update +//+kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources=rolebindings,verbs=get;list;watch;create;update;patch +//+kubebuilder:rbac:groups="security.openshift.io",resourceNames=anyuid,resources=securitycontextconstraints,verbs=use +//+kubebuilder:rbac:groups="",resources=pods,verbs=create;delete;get;list;patch;update;watch +//+kubebuilder:rbac:groups="",resources=projects,verbs=get +//+kubebuilder:rbac:groups="project.openshift.io",resources=projects,verbs=get +//+kubebuilder:rbac:groups="",resources=imagestreamimages,verbs=get;list;watch +//+kubebuilder:rbac:groups="",resources=imagestreammappings,verbs=get;list;watch +//+kubebuilder:rbac:groups="",resources=imagestreams,verbs=get;list;watch +//+kubebuilder:rbac:groups="",resources=imagestreams/layers,verbs=get;list;watch +//+kubebuilder:rbac:groups="",resources=imagestreamtags,verbs=get;list;watch +//+kubebuilder:rbac:groups="",resources=imagetags,verbs=get;list;watch +//+kubebuilder:rbac:groups="image.openshift.io",resources=imagestreamimages,verbs=get;list;watch +//+kubebuilder:rbac:groups="image.openshift.io",resources=imagestreammappings,verbs=get;list;watch +//+kubebuilder:rbac:groups="image.openshift.io",resources=imagestreams,verbs=get;list;watch +//+kubebuilder:rbac:groups="image.openshift.io",resources=imagestreams/layers,verbs=get +//+kubebuilder:rbac:groups="image.openshift.io",resources=imagetags,verbs=get;list;watch +//+kubebuilder:rbac:groups="image.openshift.io",resources=imagestreamtags,verbs=get;list;watch + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the OpenStackDataPlaneNodeSet object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.12.2/pkg/reconcile +func (r *OpenStackDataPlaneNodeSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, _err error) { + Log := r.GetLogger(ctx) + Log.Info("Reconciling NodeSet") + + validate := validator.New() + + // Fetch the OpenStackDataPlaneNodeSet instance + instance := &dataplanev1.OpenStackDataPlaneNodeSet{} + err := r.Client.Get(ctx, req.NamespacedName, instance) + if err != nil { + if k8s_errors.IsNotFound(err) { + // Request object not found, could have been deleted after reconcile request. + // Owned objects are automatically garbage collected. + // For additional cleanup logic use finalizers. Return and don't requeue. + return ctrl.Result{}, nil + } + // Error reading the object - requeue the request. + return ctrl.Result{}, err + } + + helper, _ := helper.NewHelper( + instance, + r.Client, + r.Kclient, + r.Scheme, + Log, + ) + + // initialize status if Conditions is nil, but do not reset if it already + // exists + isNewInstance := instance.Status.Conditions == nil + if isNewInstance { + instance.Status.Conditions = condition.Conditions{} + } + + // Save a copy of the conditions so that we can restore the LastTransitionTime + // when a condition's state doesn't change. + savedConditions := instance.Status.Conditions.DeepCopy() + + // Reset all conditions to Unknown as the state is not yet known for + // this reconcile loop. + instance.InitConditions() + // Set ObservedGeneration since we've reset conditions + instance.Status.ObservedGeneration = instance.Generation + + // Always patch the instance status when exiting this function so we can persist any changes. + defer func() { // update the Ready condition based on the sub conditions + condition.RestoreLastTransitionTimes( + &instance.Status.Conditions, savedConditions) + if instance.Status.Conditions.AllSubConditionIsTrue() { + instance.Status.Conditions.MarkTrue( + condition.ReadyCondition, dataplanev1.NodeSetReadyMessage) + } else if instance.Status.Conditions.IsUnknown(condition.ReadyCondition) { + // Recalculate ReadyCondition based on the state of the rest of the conditions + instance.Status.Conditions.Set( + instance.Status.Conditions.Mirror(condition.ReadyCondition)) + } + + err := helper.PatchInstance(ctx, instance) + if err != nil { + Log.Error(err, "Error updating instance status conditions") + _err = err + return + } + }() + + if instance.Status.ConfigMapHashes == nil { + instance.Status.ConfigMapHashes = make(map[string]string) + } + if instance.Status.SecretHashes == nil { + instance.Status.SecretHashes = make(map[string]string) + } + if instance.Status.ContainerImages == nil { + instance.Status.ContainerImages = make(map[string]string) + } + + instance.Status.Conditions.MarkFalse(dataplanev1.SetupReadyCondition, condition.RequestedReason, condition.SeverityInfo, condition.ReadyInitMessage) + + // Detect config changes and set Status ConfigHash + configHash, err := r.GetSpecConfigHash(instance) + if err != nil { + return ctrl.Result{}, err + } + + if configHash != instance.Status.DeployedConfigHash { + instance.Status.ConfigHash = configHash + } + + // Ensure Services + err = deployment.EnsureServices(ctx, helper, instance, validate) + if err != nil { + instance.Status.Conditions.MarkFalse( + dataplanev1.SetupReadyCondition, + condition.ErrorReason, + condition.SeverityError, + dataplanev1.DataPlaneNodeSetErrorMessage, + err.Error()) + return ctrl.Result{}, err + } + + // Ensure IPSets Required for Nodes + allIPSets, isReady, err := deployment.EnsureIPSets(ctx, helper, instance) + if err != nil || !isReady { + return ctrl.Result{}, err + } + + // Ensure DNSData Required for Nodes + dnsDetails, err := deployment.EnsureDNSData( + ctx, helper, + instance, allIPSets) + if err != nil || !dnsDetails.IsReady { + return ctrl.Result{}, err + } + instance.Status.DNSClusterAddresses = dnsDetails.ClusterAddresses + instance.Status.CtlplaneSearchDomain = dnsDetails.CtlplaneSearchDomain + instance.Status.AllHostnames = dnsDetails.Hostnames + instance.Status.AllIPs = dnsDetails.AllIPs + + ansibleSSHPrivateKeySecret := instance.Spec.NodeTemplate.AnsibleSSHPrivateKeySecret + + secretKeys := []string{} + secretKeys = append(secretKeys, AnsibleSSHPrivateKey) + if !instance.Spec.PreProvisioned { + secretKeys = append(secretKeys, AnsibleSSHAuthorizedKeys) + } + _, result, err = secret.VerifySecret( + ctx, + types.NamespacedName{ + Namespace: instance.Namespace, + Name: ansibleSSHPrivateKeySecret, + }, + secretKeys, + helper.GetClient(), + time.Second*5, + ) + if err != nil { + if (result != ctrl.Result{}) { + instance.Status.Conditions.MarkFalse( + condition.InputReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + dataplanev1.InputReadyWaitingMessage, + "secret/"+ansibleSSHPrivateKeySecret) + } else { + instance.Status.Conditions.MarkFalse( + condition.InputReadyCondition, + condition.RequestedReason, + condition.SeverityError, + err.Error()) + } + return result, err + } + + // all our input checks out so report InputReady + instance.Status.Conditions.MarkTrue(condition.InputReadyCondition, condition.InputReadyMessage) + + // Reconcile ServiceAccount + nodeSetServiceAccount := serviceaccount.NewServiceAccount( + &corev1.ServiceAccount{ + ObjectMeta: v1.ObjectMeta{ + Namespace: instance.Namespace, + Name: instance.Name, + }, + }, + time.Duration(10), + ) + saResult, err := nodeSetServiceAccount.CreateOrPatch(ctx, helper) + if err != nil { + instance.Status.Conditions.MarkFalse( + condition.ServiceAccountReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.ServiceAccountReadyErrorMessage, + err.Error()) + return saResult, err + } else if (saResult != ctrl.Result{}) { + instance.Status.Conditions.MarkFalse( + condition.ServiceAccountReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + condition.ServiceAccountCreatingMessage) + return saResult, nil + } + + regViewerRoleBinding := rolebinding.NewRoleBinding( + &rbacv1.RoleBinding{ + ObjectMeta: v1.ObjectMeta{ + Namespace: instance.Namespace, + Name: instance.Name, + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: instance.Name, + Namespace: instance.Namespace, + }, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: "registry-viewer", + }, + }, + time.Duration(10), + ) + rbResult, err := regViewerRoleBinding.CreateOrPatch(ctx, helper) + if err != nil { + instance.Status.Conditions.MarkFalse( + condition.ServiceAccountReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.ServiceAccountReadyErrorMessage, + err.Error()) + return rbResult, err + } else if (rbResult != ctrl.Result{}) { + instance.Status.Conditions.MarkFalse( + condition.ServiceAccountReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + condition.ServiceAccountCreatingMessage) + return rbResult, nil + } + + instance.Status.Conditions.MarkTrue( + condition.ServiceAccountReadyCondition, + condition.ServiceAccountReadyMessage) + + version, err := dataplaneutil.GetVersion(ctx, helper, instance.Namespace) + if err != nil { + return ctrl.Result{}, err + } + containerImages := dataplaneutil.GetContainerImages(version) + + // Reconcile BaremetalSet if required + if !instance.Spec.PreProvisioned { + // Reset the NodeSetBareMetalProvisionReadyCondition to unknown + instance.Status.Conditions.MarkUnknown(dataplanev1.NodeSetBareMetalProvisionReadyCondition, + condition.InitReason, condition.InitReason) + + // Set Images + if containerImages.OsContainerImage != nil { + instance.Spec.BaremetalSetTemplate.OSContainerImageURL = *containerImages.OsContainerImage + } + if containerImages.AgentImage != nil { + instance.Spec.BaremetalSetTemplate.AgentImageURL = *containerImages.AgentImage + } + if containerImages.ApacheImage != nil { + instance.Spec.BaremetalSetTemplate.ApacheImageURL = *containerImages.ApacheImage + } + isReady, err := deployment.DeployBaremetalSet(ctx, helper, instance, + allIPSets, dnsDetails.ServerAddresses) + if err != nil || !isReady { + return ctrl.Result{}, err + } + } + + isDeploymentReady, isDeploymentRunning, isDeploymentFailed, err := checkDeployment(helper, instance) + if !isDeploymentFailed && err != nil { + instance.Status.Conditions.MarkFalse( + condition.DeploymentReadyCondition, + condition.ErrorReason, + condition.SeverityError, + condition.DeploymentReadyErrorMessage, + err.Error()) + Log.Error(err, "Unable to get deployed OpenStackDataPlaneDeployments.") + return ctrl.Result{}, err + } + + if !isDeploymentRunning && !isDeploymentFailed { + // Generate NodeSet Inventory + _, err = deployment.GenerateNodeSetInventory(ctx, helper, instance, + allIPSets, dnsDetails.ServerAddresses, containerImages) + if err != nil { + errorMsg := fmt.Sprintf("Unable to generate inventory for %s", instance.Name) + util.LogErrorForObject(helper, err, errorMsg, instance) + instance.Status.Conditions.MarkFalse( + dataplanev1.SetupReadyCondition, + condition.ErrorReason, + condition.SeverityError, + dataplanev1.DataPlaneNodeSetErrorMessage, + errorMsg) + return ctrl.Result{}, err + } + } + // all setup tasks complete, mark SetupReadyCondition True + instance.Status.Conditions.MarkTrue(dataplanev1.SetupReadyCondition, condition.ReadyMessage) + + // Set DeploymentReadyCondition to False if it was unknown. + // Handles the case where the NodeSet is created, but not yet deployed. + if instance.Status.Conditions.IsUnknown(condition.DeploymentReadyCondition) { + Log.Info("Set NodeSet DeploymentReadyCondition false") + instance.Status.Conditions.MarkFalse(condition.DeploymentReadyCondition, + condition.RequestedReason, condition.SeverityInfo, + condition.DeploymentReadyInitMessage) + } + + if isDeploymentReady { + Log.Info("Set NodeSet DeploymentReadyCondition true") + instance.Status.Conditions.MarkTrue(condition.DeploymentReadyCondition, + condition.DeploymentReadyMessage) + } else if isDeploymentRunning { + Log.Info("Deployment still running...", "instance", instance) + Log.Info("Set NodeSet DeploymentReadyCondition false") + instance.Status.Conditions.MarkFalse(condition.DeploymentReadyCondition, + condition.RequestedReason, condition.SeverityInfo, + condition.DeploymentReadyRunningMessage) + } else if isDeploymentFailed { + Log.Info("Set NodeSet DeploymentReadyCondition false") + deployErrorMsg := "" + if err != nil { + deployErrorMsg = err.Error() + } + instance.Status.Conditions.MarkFalse(condition.DeploymentReadyCondition, + condition.ErrorReason, condition.SeverityError, + deployErrorMsg) + } + + return ctrl.Result{}, err +} + +func checkDeployment(helper *helper.Helper, + instance *dataplanev1.OpenStackDataPlaneNodeSet, +) (bool, bool, bool, error) { + // Get all completed deployments + deployments := &dataplanev1.OpenStackDataPlaneDeploymentList{} + opts := []client.ListOption{ + client.InNamespace(instance.Namespace), + } + err := helper.GetClient().List(context.Background(), deployments, opts...) + if err != nil { + helper.GetLogger().Error(err, "Unable to retrieve OpenStackDataPlaneDeployment CRs %v") + return false, false, false, err + } + + var isDeploymentReady bool + var isDeploymentRunning bool + var isDeploymentFailed bool + + // Sort deployments from oldest to newest by the LastTransitionTime of + // their DeploymentReadyCondition + slices.SortFunc(deployments.Items, func(a, b dataplanev1.OpenStackDataPlaneDeployment) int { + aReady := a.Status.Conditions.Get(condition.DeploymentReadyCondition) + bReady := b.Status.Conditions.Get(condition.DeploymentReadyCondition) + if aReady != nil && bReady != nil { + if aReady.LastTransitionTime.Before(&bReady.LastTransitionTime) { + return -1 + } + } + return 1 + }) + + for _, deployment := range deployments.Items { + if !deployment.DeletionTimestamp.IsZero() { + continue + } + if slices.Contains( + deployment.Spec.NodeSets, instance.Name) { + + // Reset the vars for every deployment + isDeploymentReady = false + isDeploymentRunning = false + deploymentConditions := deployment.Status.NodeSetConditions[instance.Name] + if instance.Status.DeploymentStatuses == nil { + instance.Status.DeploymentStatuses = make(map[string]condition.Conditions) + } + instance.Status.DeploymentStatuses[deployment.Name] = deploymentConditions + deploymentCondition := deploymentConditions.Get(dataplanev1.NodeSetDeploymentReadyCondition) + if condition.IsError(deploymentCondition) { + err = fmt.Errorf(deploymentCondition.Message) + isDeploymentFailed = true + break + } else if deploymentConditions.IsFalse(dataplanev1.NodeSetDeploymentReadyCondition) { + isDeploymentRunning = true + } else if deploymentConditions.IsTrue(dataplanev1.NodeSetDeploymentReadyCondition) { + isDeploymentReady = true + } + + if isDeploymentReady { + for k, v := range deployment.Status.ConfigMapHashes { + instance.Status.ConfigMapHashes[k] = v + } + for k, v := range deployment.Status.SecretHashes { + instance.Status.SecretHashes[k] = v + } + for k, v := range deployment.Status.ContainerImages { + instance.Status.ContainerImages[k] = v + } + instance.Status.DeployedConfigHash = deployment.Status.NodeSetHashes[instance.Name] + instance.Status.DeployedVersion = deployment.Status.DeployedVersion + } + + } + } + + return isDeploymentReady, isDeploymentRunning, isDeploymentFailed, err +} + +// SetupWithManager sets up the controller with the Manager. +func (r *OpenStackDataPlaneNodeSetReconciler) SetupWithManager(mgr ctrl.Manager) error { + // index for ConfigMaps listed on ansibleVarsFrom + if err := mgr.GetFieldIndexer().IndexField(context.Background(), + &dataplanev1.OpenStackDataPlaneNodeSet{}, "spec.ansibleVarsFrom.ansible.configMaps", + func(rawObj client.Object) []string { + nodeSet := rawObj.(*dataplanev1.OpenStackDataPlaneNodeSet) + configMaps := make([]string, 0) + + appendConfigMaps := func(varsFrom []dataplanev1.DataSource) { + for _, ref := range varsFrom { + if ref.ConfigMapRef != nil { + configMaps = append(configMaps, ref.ConfigMapRef.Name) + } + } + } + + appendConfigMaps(nodeSet.Spec.NodeTemplate.Ansible.AnsibleVarsFrom) + for _, node := range nodeSet.Spec.Nodes { + appendConfigMaps(node.Ansible.AnsibleVarsFrom) + } + return configMaps + }); err != nil { + return err + } + + // index for Secrets listed on ansibleVarsFrom + if err := mgr.GetFieldIndexer().IndexField(context.Background(), + &dataplanev1.OpenStackDataPlaneNodeSet{}, "spec.ansibleVarsFrom.ansible.secrets", + func(rawObj client.Object) []string { + nodeSet := rawObj.(*dataplanev1.OpenStackDataPlaneNodeSet) + secrets := make([]string, 0, len(nodeSet.Spec.Nodes)+1) + if nodeSet.Spec.NodeTemplate.AnsibleSSHPrivateKeySecret != "" { + secrets = append(secrets, nodeSet.Spec.NodeTemplate.AnsibleSSHPrivateKeySecret) + } + + appendSecrets := func(varsFrom []dataplanev1.DataSource) { + for _, ref := range varsFrom { + if ref.SecretRef != nil { + secrets = append(secrets, ref.SecretRef.Name) + } + } + } + + appendSecrets(nodeSet.Spec.NodeTemplate.Ansible.AnsibleVarsFrom) + for _, node := range nodeSet.Spec.Nodes { + appendSecrets(node.Ansible.AnsibleVarsFrom) + } + return secrets + }); err != nil { + return err + } + return ctrl.NewControllerManagedBy(mgr). + For(&dataplanev1.OpenStackDataPlaneNodeSet{}). + Owns(&ansibleeev1.OpenStackAnsibleEE{}). + Owns(&baremetalv1.OpenStackBaremetalSet{}). + Owns(&infranetworkv1.IPSet{}). + Owns(&infranetworkv1.DNSData{}). + Owns(&corev1.Secret{}). + Watches(&infranetworkv1.DNSMasq{}, + handler.EnqueueRequestsFromMapFunc(r.genericWatcherFn)). + Watches(&dataplanev1.OpenStackDataPlaneDeployment{}, + handler.EnqueueRequestsFromMapFunc(r.deploymentWatcherFn)). + Watches(&corev1.ConfigMap{}, + handler.EnqueueRequestsFromMapFunc(r.secretWatcherFn), + builder.WithPredicates(predicate.ResourceVersionChangedPredicate{})). + Watches(&corev1.Secret{}, + handler.EnqueueRequestsFromMapFunc(r.secretWatcherFn), + builder.WithPredicates(predicate.ResourceVersionChangedPredicate{})). + Watches(&openstackv1.OpenStackVersion{}, + handler.EnqueueRequestsFromMapFunc(r.genericWatcherFn)). + Complete(r) +} + +func (r *OpenStackDataPlaneNodeSetReconciler) secretWatcherFn( + ctx context.Context, obj client.Object) []reconcile.Request { + Log := r.GetLogger(ctx) + nodeSets := &dataplanev1.OpenStackDataPlaneNodeSetList{} + kind := strings.ToLower(obj.GetObjectKind().GroupVersionKind().Kind) + + selector := "spec.ansibleVarsFrom.ansible.configMaps" + if kind == "secret" { + selector = "spec.ansibleVarsFrom.ansible.secrets" + } + + listOpts := &client.ListOptions{ + FieldSelector: fields.OneTermEqualSelector(selector, obj.GetName()), + Namespace: obj.GetNamespace(), + } + + if err := r.List(ctx, nodeSets, listOpts); err != nil { + Log.Error(err, "Unable to retrieve OpenStackDataPlaneNodeSetList") + return nil + } + + requests := make([]reconcile.Request, 0, len(nodeSets.Items)) + for _, nodeSet := range nodeSets.Items { + requests = append(requests, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: obj.GetNamespace(), + Name: nodeSet.Name, + }, + }) + Log.Info(fmt.Sprintf("reconcile loop for openstackdataplanenodeset %s triggered by %s %s", + nodeSet.Name, kind, obj.GetName())) + } + return requests +} + +func (r *OpenStackDataPlaneNodeSetReconciler) genericWatcherFn( + ctx context.Context, obj client.Object) []reconcile.Request { + Log := r.GetLogger(ctx) + nodeSets := &dataplanev1.OpenStackDataPlaneNodeSetList{} + + listOpts := []client.ListOption{ + client.InNamespace(obj.GetNamespace()), + } + if err := r.Client.List(ctx, nodeSets, listOpts...); err != nil { + Log.Error(err, "Unable to retrieve OpenStackDataPlaneNodeSetList") + return nil + } + + requests := make([]reconcile.Request, 0, len(nodeSets.Items)) + for _, nodeSet := range nodeSets.Items { + requests = append(requests, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: obj.GetNamespace(), + Name: nodeSet.Name, + }, + }) + Log.Info(fmt.Sprintf("Reconciling NodeSet %s due to watcher on %s/%s", nodeSet.Name, obj.GetObjectKind().GroupVersionKind().Kind, obj.GetName())) + } + return requests +} + +func (r *OpenStackDataPlaneNodeSetReconciler) deploymentWatcherFn( + ctx context.Context, obj client.Object) []reconcile.Request { + Log := r.GetLogger(ctx) + namespace := obj.GetNamespace() + deployment := obj.(*dataplanev1.OpenStackDataPlaneDeployment) + + requests := make([]reconcile.Request, 0, len(deployment.Spec.NodeSets)) + for _, nodeSet := range deployment.Spec.NodeSets { + requests = append(requests, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: namespace, + Name: nodeSet, + }, + }) + } + + podsInterface := r.Kclient.CoreV1().Pods(namespace) + podsList, err := podsInterface.List(ctx, v1.ListOptions{ + LabelSelector: fmt.Sprintf("openstackdataplanedeployment=%s", deployment.Name), + FieldSelector: "status.phase=Failed", + }) + + if err != nil { + Log.Error(err, "unable to retrieve list of pods for dataplane diagnostic") + } else { + for _, pod := range podsList.Items { + Log.Info(fmt.Sprintf("openstackansibleee job %s failed due to %s with message: %s", pod.Name, pod.Status.Reason, pod.Status.Message)) + } + } + return requests +} + +// GetSpecConfigHash initialises a new struct with only the field we want to check for variances in. +// We then hash the contents of the new struct using md5 and return the hashed string. +func (r *OpenStackDataPlaneNodeSetReconciler) GetSpecConfigHash(instance *dataplanev1.OpenStackDataPlaneNodeSet) (string, error) { + configHash, err := util.ObjectHash(&instance.Spec) + if err != nil { + return "", err + } + + return configHash, nil +} diff --git a/custom-bundle.Dockerfile b/custom-bundle.Dockerfile index f0123f920..7137fd025 100644 --- a/custom-bundle.Dockerfile +++ b/custom-bundle.Dockerfile @@ -31,14 +31,12 @@ USER root # local operator manifests COPY bundle/manifests /manifests/ COPY bundle_extra_data /bundle_extra_data -RUN cp -a /bundle_extra_data/manifests/* /manifests/ # Merge things into our openstack-operator CSV: # -dataplane-operator CSV # -ENV vars from all operators (for webhooks) RUN /workspace/csv-merger \ --import-env-files=/bundle_extra_data/env-vars.yaml \ - --dataplane-csv=/bundle_extra_data/manifests/dataplane-operator.clusterserviceversion.yaml \ --base-csv=/manifests/openstack-operator.clusterserviceversion.yaml | tee /openstack-operator.clusterserviceversion.yaml.new # remove all individual operator CSV's diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 000000000..2d31c8040 --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,61 @@ +BUILD = upstream +BUILD_DIR = ../docs_build +ROOTDIR = $(realpath .) +NAME = ctlplane +DEST_DIR = $(BUILD_DIR)/$(NAME) +DEST_HTML = $(DEST_DIR)/index-$(BUILD).html +DEST_PDF = $(BUILD_DIR)/$(NAME)-$(BUILD).pdf +IMAGES_DIR = $(DEST_DIR)/images +IMAGES_TS = $(DEST_DIR)/.timestamp-images +MAIN_SOURCE = user.adoc +OTHER_SOURCES = $(shell find ./assemblies -type f) +IMAGES = $(shell find ./images -type f) +ALL_SOURCES = $(MAIN_SOURCE) $(OTHER_SOURCES) $(IMAGES) +UNAME = $(shell uname) +BUNDLE_EXEC ?= bundle exec + +ifeq ($(UNAME), Linux) +BROWSER_OPEN = xdg-open +endif +ifeq ($(UNAME), Darwin) +BROWSER_OPEN = open +endif + +all: html + +html: html-latest + +html-latest: prepare $(IMAGES_TS) $(DEST_HTML) + +pdf: prepare $(DEST_PDF) + +prepare: + @mkdir -p $(BUILD_DIR) + @mkdir -p $(DEST_DIR) $(IMAGES_DIR) + +clean: + @rm -rf "$(DEST_DIR)" "$(DEST_PDF)" + +watch-html: + @which inotifywait > /dev/null || ( echo "ERROR: inotifywait not found, install inotify-tools" && exit 1 ) + while true; do \ + inotifywait -r -e modify -e create -e delete .; \ + sleep 0.5; \ + $(MAKE) html; \ + done + +open-html: html + ${BROWSER_OPEN} "file://$(realpath $(ROOTDIR)/$(DEST_HTML))" + +open-pdf: pdf + ${BROWSER_OPEN} "$(realpath $(ROOTDIR)/$(DEST_PDF))" + +$(IMAGES_TS): $(IMAGES) + cp $? $(IMAGES_DIR) + touch $(IMAGES_TS) + +$(DEST_HTML): $(ALL_SOURCES) + $(BUNDLE_EXEC) asciidoctor -a source-highlighter=highlightjs -a highlightjs-languages="yaml,bash" -a highlightjs-theme="monokai" -a build=$(BUILD) -b xhtml5 -d book -o $@ $(MAIN_SOURCE) + +$(DEST_PDF): $(ALL_SOURCES) + $(BUNDLE_EXEC) asciidoctor-pdf -a build=$(BUILD) -d book -o $@ $(MAIN_SOURCE) $(IMAGES) diff --git a/docs/OWNERS b/docs/OWNERS new file mode 100644 index 000000000..e9c5cf81c --- /dev/null +++ b/docs/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners +approvers: + - docs-approvers + +reviewers: + - docs-approvers diff --git a/docs/assemblies/custom_resources.adoc b/docs/assemblies/custom_resources.adoc new file mode 100644 index 000000000..81649c464 --- /dev/null +++ b/docs/assemblies/custom_resources.adoc @@ -0,0 +1,17395 @@ +[#custom-resources] +== Custom Resources + +* <> +* <> +* <> +* <> +* <> +* <> + +[#sub-resources] +=== Sub Resources + +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> + +[#openstackclient] +==== OpenStackClient + +OpenStackClient is the Schema for the openstackclients API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#openstackclientlist] +==== OpenStackClientList + +OpenStackClientList contains a list of OpenStackClient + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#openstackclientspec] +==== OpenStackClientSpec + +OpenStackClientSpec defines the desired state of OpenStackClient + +|=== +| Field | Description | Scheme | Required + +| containerImage +| ContainerImage for the the OpenstackClient container (will be set to environmental default if empty) +| string +| true +|=== + +<> + +[#openstackclientspeccore] +==== OpenStackClientSpecCore + +OpenStackClientSpecCore defines the desired state of OpenStackClient + +|=== +| Field | Description | Scheme | Required + +| openStackConfigMap +| OpenStackConfigMap is the name of the ConfigMap containing the clouds.yaml +| *string +| true + +| openStackConfigSecret +| OpenStackConfigSecret is the name of the Secret containing the secure.yaml +| *string +| true + +| nodeSelector +| NodeSelector to target subset of worker nodes running control plane services (currently only applies to KeystoneAPI and PlacementAPI) +| map[string]string +| false +|=== + +<> + +[#openstackclientstatus] +==== OpenStackClientStatus + +OpenStackClientStatus defines the observed state of OpenStackClient + +|=== +| Field | Description | Scheme | Required + +| podName +| PodName +| string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this object. +| int64 +| false +|=== + +<> + +[#barbicansection] +==== BarbicanSection + +BarbicanSection defines the desired state of Barbican service + +|=== +| Field | Description | Scheme | Required + +| enabled +| Enabled - Whether Barbican service should be deployed and managed +| bool +| true + +| template +| Template - Overrides to use when creating the Barbican Service +| *barbicanv1.BarbicanSpecCore +| false + +| apiOverride +| APIOverride, provides the ability to override the generated manifest of several child resources. +| <> +| false +|=== + +<> + +[#cacertconfig] +==== CACertConfig + +CACertConfig defines details for ca cert configs + +|=== +| Field | Description | Scheme | Required + +| duration +| The requested 'duration' (i.e. lifetime) of the Certificate. The Certificate will be renewed either 2/3 through its duration or `renewBefore` period before its expiry, whichever is later. Minimum accepted duration is 1 hour. Value must be in units accepted by Go time.ParseDuration https://golang.org/pkg/time/#ParseDuration +| *metav1.Duration +| true + +| renewBefore +| How long before the currently issued certificate's expiry cert-manager should renew the certificate. The default is 2/3 of the issued certificate's duration. Minimum accepted value is 5 minutes. Value must be in units accepted by Go time.ParseDuration https://golang.org/pkg/time/#ParseDuration +| *metav1.Duration +| false + +| customIssuer +| CustomIssuer - use pre-created issue for this CA. No CA and issure is being created the CA cert and chain needs to be added using the CaBundleSecretName. +| *string +| false +|=== + +<> + +[#certconfig] +==== CertConfig + +CertConfig defines details for cert configs + +|=== +| Field | Description | Scheme | Required + +| duration +| The requested 'duration' (i.e. lifetime) of the Certificate. The Certificate will be renewed either 2/3 through its duration or `renewBefore` period before its expiry, whichever is later. Minimum accepted duration is 1 hour. Value must be in units accepted by Go time.ParseDuration https://golang.org/pkg/time/#ParseDuration +| *metav1.Duration +| true + +| renewBefore +| How long before the currently issued certificate's expiry cert-manager should renew the certificate. The default is 2/3 of the issued certificate's duration. Minimum accepted value is 5 minutes. Value must be in units accepted by Go time.ParseDuration https://golang.org/pkg/time/#ParseDuration +| *metav1.Duration +| false +|=== + +<> + +[#certsection] +==== CertSection + +CertSection defines details for CA config and its certs + +|=== +| Field | Description | Scheme | Required + +| cert +| Cert - defines details for cert config +| <> +| true + +| ca +| Ca - defines details for CA cert config +| <> +| true +|=== + +<> + +[#cindersection] +==== CinderSection + +CinderSection defines the desired state of Cinder service + +|=== +| Field | Description | Scheme | Required + +| enabled +| Enabled - Whether Cinder service should be deployed and managed +| bool +| true + +| template +| Template - Overrides to use when creating Cinder Resources +| *cinderv1.CinderSpecCore +| false + +| apiOverride +| APIOverride, provides the ability to override the generated manifest of several child resources. +| <> +| false +|=== + +<> + +[#dnsmasqsection] +==== DNSMasqSection + +DNSMasqSection defines the desired state of DNSMasq service + +|=== +| Field | Description | Scheme | Required + +| enabled +| Enabled - Whether DNSMasq service should be deployed and managed +| bool +| true + +| template +| Template - Overrides to use when creating the DNSMasq service +| *networkv1.DNSMasqSpec +| false +|=== + +<> + +[#designatesection] +==== DesignateSection + +DesignateSection defines the desired state of the Designate service + +|=== +| Field | Description | Scheme | Required + +| enabled +| Enabled - Whether the Designate service should be deployed and managed +| bool +| true + +| template +| Template - Overrides to use when creating Designate Resources +| *designatev1.DesignateSpecCore +| false + +| apiOverride +| APIOverride, provides the ability to override the generated manifest of several child resources. +| <> +| false +|=== + +<> + +[#galerasection] +==== GaleraSection + +GaleraSection defines the desired state of Galera services + +|=== +| Field | Description | Scheme | Required + +| enabled +| Enabled - Whether Galera services should be deployed and managed +| bool +| true + +| templates +| Templates - Overrides to use when creating the Galera databases +| *map[string]mariadbv1.GaleraSpecCore +| false +|=== + +<> + +[#glancesection] +==== GlanceSection + +GlanceSection defines the desired state of Glance service + +|=== +| Field | Description | Scheme | Required + +| enabled +| Enabled - Whether Glance service should be deployed and managed +| bool +| true + +| template +| Template - Overrides to use when creating the Glance Service +| *glancev1.GlanceSpecCore +| false + +| apiOverrides +| APIOverride, provides the ability to override the generated manifest of several child resources. +| map[string]<> +| false +|=== + +<> + +[#heatsection] +==== HeatSection + +HeatSection defines the desired state of Heat services + +|=== +| Field | Description | Scheme | Required + +| enabled +| Enabled - Whether Heat services should be deployed and managed +| bool +| true + +| template +| Template - Overrides to use when creating the Heat services +| *heatv1.HeatSpecCore +| false + +| apiOverride +| APIOverride, provides the ability to override the generated manifest of several child resources. +| <> +| false + +| cnfAPIOverride +| CnfAPIOverride, provides the ability to override the generated manifest of several child resources. +| <> +| false +|=== + +<> + +[#horizonsection] +==== HorizonSection + +HorizonSection defines the desired state of Horizon services + +|=== +| Field | Description | Scheme | Required + +| enabled +| Enabled - Whether Horizon services should be deployed and managed +| bool +| true + +| template +| Template - Overrides to use when creating the Horizon services +| *horizonv1.HorizonSpecCore +| false + +| apiOverride +| APIOverride, provides the ability to override the generated manifest of several child resources. +| <> +| false +|=== + +<> + +[#ironicsection] +==== IronicSection + +IronicSection defines the desired state of Ironic services + +|=== +| Field | Description | Scheme | Required + +| enabled +| Enabled - Whether Ironic services should be deployed and managed +| bool +| true + +| template +| Template - Overrides to use when creating the Ironic services +| *ironicv1.IronicSpecCore +| false + +| apiOverride +| APIOverride, provides the ability to override the generated manifest of several child resources. +| <> +| false + +| inspectorOverride +| InspectorOverride, provides the ability to override the generated manifest of several child resources. +| <> +| false +|=== + +<> + +[#keystonesection] +==== KeystoneSection + +KeystoneSection defines the desired state of Keystone service + +|=== +| Field | Description | Scheme | Required + +| enabled +| Enabled - Whether Keystone service should be deployed and managed +| bool +| true + +| template +| Template - Overrides to use when creating the Keystone service +| *keystonev1.KeystoneAPISpecCore +| false + +| apiOverride +| APIOverride, provides the ability to override the generated manifest of several child resources. +| <> +| false +|=== + +<> + +[#manilasection] +==== ManilaSection + +ManilaSection defines the desired state of Manila service + +|=== +| Field | Description | Scheme | Required + +| enabled +| Enabled - Whether Manila service should be deployed and managed +| bool +| true + +| template +| Template - Overrides to use when creating Manila Resources +| *manilav1.ManilaSpecCore +| false + +| apiOverride +| APIOverride, provides the ability to override the generated manifest of several child resources. +| <> +| false +|=== + +<> + +[#memcachedsection] +==== MemcachedSection + +MemcachedSection defines the desired state of Memcached services + +|=== +| Field | Description | Scheme | Required + +| enabled +| Enabled - Whether Memcached services should be deployed and managed +| bool +| true + +| templates +| Templates - Overrides to use when creating the Memcached databases +| *map[string]memcachedv1.MemcachedSpecCore +| false +|=== + +<> + +[#neutronsection] +==== NeutronSection + +NeutronSection defines the desired state of Neutron service + +|=== +| Field | Description | Scheme | Required + +| enabled +| Enabled - Whether Neutron service should be deployed and managed +| bool +| true + +| template +| Template - Overrides to use when creating the Neutron Service +| *neutronv1.NeutronAPISpecCore +| false + +| apiOverride +| APIOverride, provides the ability to override the generated manifest of several child resources. +| <> +| false +|=== + +<> + +[#novacelloverridespec] +==== NovaCellOverrideSpec + +NovaCellOverrideSpec to override the generated manifest of several child resources. + +|=== +| Field | Description | Scheme | Required + +| noVNCProxy +| +| <> +| false +|=== + +<> + +[#novasection] +==== NovaSection + +NovaSection defines the desired state of Nova services + +|=== +| Field | Description | Scheme | Required + +| enabled +| Enabled - Whether Nova services should be deployed and managed +| bool +| true + +| template +| Template - Overrides to use when creating the Nova services +| *novav1.NovaSpec +| false + +| apiOverride +| APIOverride, provides the ability to override the generated manifest of several child resources. +| <> +| false + +| cellOverride +| CellOverride, provides the ability to override the generated manifest of several child resources for a nova cell. cell0 never have compute nodes and therefore it won't have a noVNCProxy deployed. Providing an override for cell0 noVNCProxy does not have an effect. +| map[string]<> +| false +|=== + +<> + +[#octaviasection] +==== OctaviaSection + +OctaviaSection defines the desired state of the Octavia service + +|=== +| Field | Description | Scheme | Required + +| enabled +| Enabled - Whether the Octavia service should be deployed and managed +| bool +| true + +| template +| Template - Overrides to use when creating Octavia Resources +| *octaviav1.OctaviaSpecCore +| false + +| apiOverride +| APIOverride, provides the ability to override the generated manifest of several child resources. +| <> +| false +|=== + +<> + +[#openstackclientsection] +==== OpenStackClientSection + +OpenStackClientSection defines the desired state of the OpenStackClient + +|=== +| Field | Description | Scheme | Required + +| template +| Template - Overrides to use when creating the OpenStackClient Resource +| v1beta1.OpenStackClientSpecCore +| false +|=== + +<> + +[#openstackcontrolplane] +==== OpenStackControlPlane + +OpenStackControlPlane is the Schema for the openstackcontrolplanes API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#openstackcontrolplanelist] +==== OpenStackControlPlaneList + +OpenStackControlPlaneList contains a list of OpenStackControlPlane + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#openstackcontrolplanespec] +==== OpenStackControlPlaneSpec + +OpenStackControlPlaneSpec defines the desired state of OpenStackControlPlane + +|=== +| Field | Description | Scheme | Required + +| secret +| Secret - FIXME: make this optional +| string +| true + +| storageClass +| StorageClass - +| string +| true + +| nodeSelector +| NodeSelector to target subset of worker nodes running control plane services (currently only applies to KeystoneAPI and PlacementAPI) +| map[string]string +| false + +| tls +| TLS - Parameters related to the TLS +| <> +| true + +| dns +| DNS - Parameters related to the DNSMasq service +| <> +| false + +| keystone +| Keystone - Parameters related to the Keystone service +| <> +| false + +| placement +| Placement - Parameters related to the Placement service +| <> +| false + +| glance +| Glance - Parameters related to the Glance service +| <> +| false + +| cinder +| Cinder - Parameters related to the Cinder service +| <> +| false + +| galera +| Galera - Parameters related to the Galera services +| <> +| false + +| rabbitmq +| Rabbitmq - Parameters related to the Rabbitmq service +| <> +| false + +| memcached +| Memcached - Parameters related to the Memcached service +| <> +| false + +| ovn +| Ovn - Overrides to use when creating the OVN Services +| <> +| false + +| neutron +| Neutron - Overrides to use when creating the Neutron Service +| <> +| false + +| nova +| Nova - Parameters related to the Nova services +| <> +| false + +| heat +| Heat - Parameters related to the Heat services +| <> +| false + +| ironic +| Ironic - Parameters related to the Ironic services +| <> +| false + +| manila +| Manila - Parameters related to the Manila service +| <> +| false + +| horizon +| Horizon - Parameters related to the Horizon services +| <> +| false + +| telemetry +| Telemetry - Parameters related to the OpenStack Telemetry services +| <> +| false + +| swift +| Swift - Parameters related to the Swift service +| <> +| false + +| octavia +| Octavia - Parameters related to the Octavia service +| <> +| false + +| designate +| Designate - Parameters related to the Designate service +| <> +| false + +| barbican +| Barbican - Parameters related to the Barbican service +| <> +| false + +| openstackclient +| OpenStackClient - Parameters related to the OpenStackClient +| <> +| false + +| extraMounts +| ExtraMounts containing conf files and credentials that should be provided to the underlying operators. This struct can be defined in the top level CR and propagated to the underlying operators that accept it in their API (e.g., cinder/glance). However, if extraVolumes are specified within the single operator template Section, the globally defined ExtraMounts are ignored and overridden for the operator which has this section already. +| []<> +| false +|=== + +<> + +[#openstackcontrolplanestatus] +==== OpenStackControlPlaneStatus + +OpenStackControlPlaneStatus defines the observed state of OpenStackControlPlane + +|=== +| Field | Description | Scheme | Required + +| conditions +| Conditions +| condition.Conditions +| false + +| tls +| TLS +| <> +| false + +| deployedVersion +| DeployedVersion +| *string +| false + +| deployedOVNVersion +| DeployedOVNVersion +| *string +| false + +| containerImages +| ContainerImages +| <> +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this object. +| int64 +| false +|=== + +<> + +[#openstackextravolmounts] +==== OpenStackExtraVolMounts + +OpenStackExtraVolMounts exposes additional parameters processed by the openstack-operator and defines the common VolMounts structure provided by the main storage module + +|=== +| Field | Description | Scheme | Required + +| name +| +| string +| false + +| region +| +| string +| false + +| extraVol +| +| []storage.VolMounts +| true +|=== + +<> + +[#override] +==== Override + +Override to override the generated manifest of several child resources. + +|=== +| Field | Description | Scheme | Required + +| route +| Route overrides to use when creating the public service endpoint +| *route.OverrideSpec +| false + +| tls +| TLS - overrides tls parameters for public endpoint +| *<> +| false +|=== + +<> + +[#ovnresources] +==== OvnResources + +OvnResources defines the desired state of OVN services + +|=== +| Field | Description | Scheme | Required + +| ovnDBCluster +| OVNDBCluster - Overrides to use when creating the OVNDBCluster services +| map[string]ovnv1.OVNDBClusterSpecCore +| false + +| ovnNorthd +| OVNNorthd - Overrides to use when creating the OVNNorthd service +| ovnv1.OVNNorthdSpecCore +| false + +| ovnController +| OVNController - Overrides to use when creating the OVNController service +| ovnv1.OVNControllerSpecCore +| false +|=== + +<> + +[#ovnsection] +==== OvnSection + +OvnSection defines the desired state of OVN services + +|=== +| Field | Description | Scheme | Required + +| enabled +| Enabled - Whether OVN services should be deployed and managed +| bool +| true + +| template +| Template - Overrides to use when creating the OVN services +| *<> +| false +|=== + +<> + +[#placementsection] +==== PlacementSection + +PlacementSection defines the desired state of Placement service + +|=== +| Field | Description | Scheme | Required + +| enabled +| Enabled - Whether Placement service should be deployed and managed +| bool +| true + +| template +| Template - Overrides to use when creating the Placement API +| *placementv1.PlacementAPISpecCore +| false + +| apiOverride +| APIOverride, provides the ability to override the generated manifest of several child resources. +| <> +| false +|=== + +<> + +[#rabbitmqsection] +==== RabbitmqSection + +RabbitmqSection defines the desired state of RabbitMQ service + +|=== +| Field | Description | Scheme | Required + +| enabled +| Enabled - Whether RabbitMQ services should be deployed and managed +| bool +| true + +| templates +| Templates - Overrides to use when creating the Rabbitmq clusters +| *map[string]<> +| true +|=== + +<> + +[#swiftsection] +==== SwiftSection + +SwiftSection defines the desired state of Swift service + +|=== +| Field | Description | Scheme | Required + +| enabled +| Enabled - Whether Swift service should be deployed and managed +| bool +| true + +| template +| Template - Overrides to use when creating Swift Resources +| *swiftv1.SwiftSpecCore +| false + +| proxyOverride +| ProxyOverride, provides the ability to override the generated manifest of several child resources. +| <> +| false +|=== + +<> + +[#tlscastatus] +==== TLSCAStatus + +TLSCAStatus defines the observed state of TLS + +|=== +| Field | Description | Scheme | Required + +| name +| +| string +| true + +| expires +| +| string +| true +|=== + +<> + +[#tlsingressconfig] +==== TLSIngressConfig + +TLSIngressConfig defines the desired state of the TLS configuration for the ingress configuration (route) + +|=== +| Field | Description | Scheme | Required + +| enabled +| Enabled - Whether TLS should be enabled for endpoint type +| bool +| true +|=== + +<> + +[#tlspodlevelconfig] +==== TLSPodLevelConfig + +TLSPodLevelConfig defines the desired state of the TLS configuration for TLS termination at the pod level + +|=== +| Field | Description | Scheme | Required + +| enabled +| Enabled - Whether TLS should be enabled for endpoint type +| bool +| true + +| internal +| Internal - default CA used for all OpenStackControlPlane and OpenStackDataplane endpoints, except OVN related CA and certs +| <> +| true + +| libvirt +| Libvirt - CA used for libvirt/qemu services on OpenStackControlPlane and OpenStackDataplane +| <> +| true + +| ovn +| Ovn - CA used for all OVN services on OpenStackControlPlane and OpenStackDataplane +| <> +| true +|=== + +<> + +[#tlssection] +==== TLSSection + +TLSSection defines the desired state of TLS configuration + +|=== +| Field | Description | Scheme | Required + +| ingress +| +| <> +| false + +| podLevel +| +| <> +| false +|=== + +<> + +[#tlsserviceoverride] +==== TLSServiceOverride + +TLSServiceOverride overrides tls parameters for public endpoint + +|=== +| Field | Description | Scheme | Required + +| secretName +| Name of a Secret in the same Namespace as the service, containing the server's private key, public certificate and CA certificate for TLS. The Secret must store these as tls.key, tls.crt and ca.crt respectively. +| string +| false +|=== + +<> + +[#tlsstatus] +==== TLSStatus + +TLSStatus defines the observed state of TLS + +|=== +| Field | Description | Scheme | Required + +| caList +| +| []<> +| false +|=== + +<> + +[#telemetrysection] +==== TelemetrySection + +TelemetrySection defines the desired state of OpenStack Telemetry services + +|=== +| Field | Description | Scheme | Required + +| enabled +| Enabled - Whether OpenStack Telemetry services should be deployed and managed +| bool +| true + +| template +| Template - Overrides to use when creating the OpenStack Telemetry services +| *telemetryv1.TelemetrySpecCore +| false + +| aodhApiOverride +| AodhAPIOverride, provides the ability to override the generated manifest of several child resources. +| <> +| false + +| prometheusOverride +| PrometheusOverride, provides the ability to override the generated manifest of several child resources. +| <> +| false + +| alertmanagerOverride +| AlertmanagerOverride, provides the ability to override the generated manifest of several child resources. +| <> +| false +|=== + +<> + +[#containerdefaults] +==== ContainerDefaults + +ContainerDefaults - struct that contains container image default URLs for each service (internal use only) + +|=== +| Field | Description | Scheme | Required + +| cinderVolumeImage +| +| *string +| false + +| manilaShareImage +| +| *string +| false +|=== + +<> + +[#containerimages] +==== ContainerImages + +ContainerImages - struct acts as the source of truth for container image URLs to be deployed + +|=== +| Field | Description | Scheme | Required + +| cinderVolumeImages +| CinderVolumeImages custom Cinder Volume images for each backend (default Cinder volume image is stored 'default' key) +| map[string]*string +| false + +| manilaShareImages +| ManilaShareImages custom Manila Share images for each backend (default Manila share image is stored 'default' key) +| map[string]*string +| false +|=== + +<> + +[#containertemplate] +==== ContainerTemplate + +ContainerTemplate - struct that contains container image URLs for each service in OpenStackControlplane + +|=== +| Field | Description | Scheme | Required + +| agentImage +| +| *string +| false + +| ansibleeeImage +| +| *string +| false + +| aodhAPIImage +| +| *string +| false + +| aodhEvaluatorImage +| +| *string +| false + +| aodhListenerImage +| +| *string +| false + +| aodhNotifierImage +| +| *string +| false + +| apacheImage +| +| *string +| false + +| barbicanAPIImage +| +| *string +| false + +| barbicanKeystoneListenerImage +| +| *string +| false + +| barbicanWorkerImage +| +| *string +| false + +| ceilometerCentralImage +| +| *string +| false + +| ceilometerComputeImage +| +| *string +| false + +| ceilometerIpmiImage +| +| *string +| false + +| ceilometerNotificationImage +| +| *string +| false + +| ceilometerSgcoreImage +| +| *string +| false + +| ceilometerProxyImage +| +| *string +| false + +| cinderAPIImage +| +| *string +| false + +| cinderBackupImage +| +| *string +| false + +| cinderSchedulerImage +| +| *string +| false + +| designateAPIImage +| +| *string +| false + +| designateBackendbind9Image +| +| *string +| false + +| designateCentralImage +| +| *string +| false + +| designateMdnsImage +| +| *string +| false + +| designateProducerImage +| +| *string +| false + +| designateUnboundImage +| +| *string +| false + +| designateWorkerImage +| +| *string +| false + +| edpmFrrImage +| +| *string +| false + +| edpmIscsidImage +| +| *string +| false + +| edpmLogrotateCrondImage +| +| *string +| false + +| edpmMultipathdImage +| +| *string +| false + +| edpmNeutronDhcpAgentImage +| +| *string +| false + +| edpmNeutronMetadataAgentImage +| +| *string +| false + +| edpmNeutronOvnAgentImage +| +| *string +| false + +| edpmNeutronSriovAgentImage +| +| *string +| false + +| edpmOvnBgpAgentImage +| +| *string +| false + +| edpmNodeExporterImage +| +| *string +| false + +| glanceAPIImage +| +| *string +| false + +| heatAPIImage +| +| *string +| false + +| heatCfnapiImage +| +| *string +| false + +| heatEngineImage +| +| *string +| false + +| horizonImage +| +| *string +| false + +| infraDnsmasqImage +| +| *string +| false + +| infraMemcachedImage +| +| *string +| false + +| ironicAPIImage +| +| *string +| false + +| ironicConductorImage +| +| *string +| false + +| ironicInspectorImage +| +| *string +| false + +| ironicNeutronAgentImage +| +| *string +| false + +| ironicPxeImage +| +| *string +| false + +| ironicPythonAgentImage +| +| *string +| false + +| keystoneAPIImage +| +| *string +| false + +| manilaAPIImage +| +| *string +| false + +| manilaSchedulerImage +| +| *string +| false + +| mariadbImage +| +| *string +| false + +| neutronAPIImage +| +| *string +| false + +| novaAPIImage +| +| *string +| false + +| novaComputeImage +| +| *string +| false + +| novaConductorImage +| +| *string +| false + +| novaNovncImage +| +| *string +| false + +| novaSchedulerImage +| +| *string +| false + +| octaviaAPIImage +| +| *string +| false + +| octaviaHealthmanagerImage +| +| *string +| false + +| octaviaHousekeepingImage +| +| *string +| false + +| octaviaWorkerImage +| +| *string +| false + +| openstackClientImage +| +| *string +| false + +| osContainerImage +| +| *string +| false + +| ovnControllerImage +| +| *string +| false + +| ovnControllerOvsImage +| +| *string +| false + +| ovnNbDbclusterImage +| +| *string +| false + +| ovnNorthdImage +| +| *string +| false + +| ovnSbDbclusterImage +| +| *string +| false + +| placementAPIImage +| +| *string +| false + +| rabbitmqImage +| +| *string +| false + +| swiftAccountImage +| +| *string +| false + +| swiftContainerImage +| +| *string +| false + +| swiftObjectImage +| +| *string +| false + +| swiftProxyImage +| +| *string +| false + +| telemetryNodeExporterImage +| +| *string +| false +|=== + +<> + +[#customcontainerimages] +=== CustomContainerImages + +CustomContainerImages - struct for custom container images + +|=== +| Field | Description | Scheme | Required + +| cinderVolumeImages +| +| map[string]*string +| false + +| manilaShareImages +| +| map[string]*string +| false +|=== + +<> + +[#openstackversion] +==== OpenStackVersion + +OpenStackVersion is the Schema for the openstackversionupdates API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#openstackversionlist] +==== OpenStackVersionList + +OpenStackVersionList contains a list of OpenStackVersion + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#openstackversionspec] +==== OpenStackVersionSpec + +OpenStackVersionSpec - defines the desired state of OpenStackVersion + +|=== +| Field | Description | Scheme | Required + +| targetVersion +| TargetVersion is the version of OpenStack to install (based on the availableVersion in the OpenStackVersion CR status) +| string +| true + +| customContainerImages +| CustomContainerImages is a list of containerImages to customize for deployment +| <> +| false +|=== + +<> + +[#openstackversionstatus] +==== OpenStackVersionStatus + +OpenStackVersionStatus defines the observed state of OpenStackVersion + +|=== +| Field | Description | Scheme | Required + +| conditions +| Conditions +| condition.Conditions +| false + +| deployedVersion +| +| *string +| false + +| availableVersion +| +| *string +| false + +| containerImages +| This is the source of truth for the container images to be deployed. +| <> +| false + +| containerImageVersionDefaults +| where we keep track of the container images for previous versions +| map[string]*<> +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this object. +| int64 +| false +|=== + +<> + +[#barbican] +==== Barbican + +Barbican is the Schema for the barbicans API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#barbicanlist] +==== BarbicanList + +BarbicanList contains a list of Barbican + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#barbicanspec] +==== BarbicanSpec + +BarbicanSpec defines the desired state of Barbican + +|=== +| Field | Description | Scheme | Required + +| barbicanAPI +| +| <> +| true + +| barbicanWorker +| +| <> +| true + +| barbicanKeystoneListener +| +| <> +| true +|=== + +<> + +[#barbicanspecbase] +==== BarbicanSpecBase + +BarbicanSpecBase - + +|=== +| Field | Description | Scheme | Required + +| preserveJobs +| PreserveJobs - do not delete jobs after they finished e.g. to check logs +| bool +| true + +| nodeSelector +| NodeSelector to target subset of worker nodes running this component. Setting here overrides any global NodeSelector settings within the Barbican CR. +| map[string]string +| false + +| customServiceConfig +| CustomServiceConfig - customize the service config using this parameter to change service defaults, or overwrite rendered information using raw OpenStack config format. The content gets added to to /etc/++++++/++++++.conf.d directory as custom.conf file.++++++++++++ +| string +| false + +| defaultConfigOverwrite +| ConfigOverwrite - interface to overwrite default config files like e.g. logging.conf or policy.json. But can also be used to add additional files. Those get added to the service config dir in /etc/++++++.++++++ +| map[string]string +| false +|=== + +<> + +[#barbicanspeccore] +==== BarbicanSpecCore + +BarbicanSpecCore defines the desired state of Barbican, for use with the OpenStackControlplane CR (no containerImages) + +|=== +| Field | Description | Scheme | Required + +| barbicanAPI +| +| <> +| true + +| barbicanWorker +| +| <> +| true + +| barbicanKeystoneListener +| +| <> +| true +|=== + +<> + +[#barbicanstatus] +==== BarbicanStatus + +BarbicanStatus defines the observed state of Barbican + +|=== +| Field | Description | Scheme | Required + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| serviceID +| ServiceID +| string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| barbicanAPIReadyCount +| ReadyCount of Barbican API instances +| int32 +| false + +| barbicanWorkerReadyCount +| ReadyCount of Barbican Worker instances +| int32 +| false + +| barbicanKeystoneListenerReadyCount +| ReadyCount of Barbican KeystoneListener instances +| int32 +| false + +| transportURLSecret +| TransportURLSecret - Secret containing RabbitMQ transportURL +| string +| false + +| databaseHostname +| Barbican Database Hostname +| string +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the opentack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#apioverridespec] +==== APIOverrideSpec + +APIOverrideSpec to override the generated manifest of several child resources. + +|=== +| Field | Description | Scheme | Required + +| service +| Override configuration for the Service created to serve traffic to the cluster. The key must be the endpoint type (public, internal) +| map[service.Endpoint]service.RoutedOverrideSpec +| false +|=== + +<> + +[#barbicanapi] +==== BarbicanAPI + +BarbicanAPI is the Schema for the barbicanapis API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#barbicanapilist] +==== BarbicanAPIList + +BarbicanAPIList contains a list of BarbicanAPI + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#barbicanapispec] +==== BarbicanAPISpec + +BarbicanAPISpec defines the desired state of BarbicanAPI + +|=== +| Field | Description | Scheme | Required + +| databaseHostname +| DatabaseHostname - Barbican Database Hostname +| string +| true + +| transportURLSecret +| TransportURLSecret - Secret containing RabbitMQ transportURL +| string +| false +|=== + +<> + +[#barbicanapistatus] +==== BarbicanAPIStatus + +BarbicanAPIStatus defines the observed state of BarbicanAPI + +|=== +| Field | Description | Scheme | Required + +| readyCount +| ReadyCount of barbican API instances +| int32 +| false + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| apiEndpoint +| API endpoint +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| networkAttachments +| NetworkAttachments status of the deployment pods +| map[string][]string +| false + +| databaseHostname +| Barbican Database Hostname +| string +| false +|=== + +<> + +[#barbicanapitemplate] +==== BarbicanAPITemplate + +BarbicanAPITemplate defines the input parameters for the Barbican API service + +|=== +| Field | Description | Scheme | Required + +| containerImage +| ContainerImage - Barbican Container Image URL (will be set to environmental default if empty) +| string +| true +|=== + +<> + +[#barbicanapitemplatecore] +==== BarbicanAPITemplateCore + +BarbicanAPITemplateCore - + +|=== +| Field | Description | Scheme | Required + +| enableSecureRBAC +| EnableSecureRBAC - Enable Consistent and Secure RBAC policies +| bool +| true + +| override +| Override, provides the ability to override the generated manifest of several child resources. +| <> +| false + +| tls +| TLS - Parameters related to the TLS +| tls.API +| false +|=== + +<> + +[#barbicankeystonelistener] +==== BarbicanKeystoneListener + +BarbicanKeystoneListener is the Schema for the barbicankeystonelistener API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#barbicankeystonelistenerlist] +==== BarbicanKeystoneListenerList + +BarbicanKeystoneListenerList contains a list of BarbicanKeystoneListener + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#barbicankeystonelistenerspec] +==== BarbicanKeystoneListenerSpec + +BarbicanKeystoneListenerSpec defines the desired state of BarbicanKeystoneListener + +|=== +| Field | Description | Scheme | Required + +| databaseHostname +| +| string +| true + +| transportURLSecret +| +| string +| false + +| tls +| TLS - Parameters related to the TLS +| tls.Ca +| false +|=== + +<> + +[#barbicankeystonelistenerstatus] +==== BarbicanKeystoneListenerStatus + +BarbicanKeystoneListenerStatus defines the observed state of BarbicanKeystoneListener + +|=== +| Field | Description | Scheme | Required + +| readyCount +| INSERT ADDITIONAL STATUS FIELD - define observed state of cluster Important: Run "make" to regenerate code after modifying this file ReadyCount of barbican API instances +| int32 +| false + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| networkAttachments +| NetworkAttachments status of the deployment pods +| map[string][]string +| false + +| databaseHostname +| Barbican Database Hostname +| string +| false +|=== + +<> + +[#barbicankeystonelistenertemplate] +==== BarbicanKeystoneListenerTemplate + +BarbicanKeystoneListenerTemplate defines common Spec elements for the KeystoneListener process + +|=== +| Field | Description | Scheme | Required + +| containerImage +| ContainerImage - Barbican Container Image URL (will be set to environmental default if empty) +| string +| true +|=== + +<> + +[#barbicanworker] +==== BarbicanWorker + +BarbicanWorker is the Schema for the barbicanworkers API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#barbicanworkerlist] +==== BarbicanWorkerList + +BarbicanWorkerList contains a list of BarbicanWorker + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#barbicanworkerspec] +==== BarbicanWorkerSpec + +BarbicanWorkerSpec defines the desired state of BarbicanWorker + +|=== +| Field | Description | Scheme | Required + +| databaseHostname +| +| string +| true + +| transportURLSecret +| +| string +| false + +| tls +| TLS - Parameters related to the TLS +| tls.Ca +| false +|=== + +<> + +[#barbicanworkerstatus] +==== BarbicanWorkerStatus + +BarbicanWorkerStatus defines the observed state of BarbicanWorker + +|=== +| Field | Description | Scheme | Required + +| readyCount +| INSERT ADDITIONAL STATUS FIELD - define observed state of cluster Important: Run "make" to regenerate code after modifying this file ReadyCount of barbican API instances +| int32 +| false + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| networkAttachments +| NetworkAttachments status of the deployment pods +| map[string][]string +| false + +| databaseHostname +| Barbican Database Hostname +| string +| false +|=== + +<> + +[#barbicanworkertemplate] +==== BarbicanWorkerTemplate + +BarbicanWorkerTemplate defines common Spec elements for the Worker process + +|=== +| Field | Description | Scheme | Required + +| containerImage +| ContainerImage - Barbican Container Image URL (will be set to environmental default if empty) +| string +| true +|=== + +<> + +[#barbicancomponenttemplate] +==== BarbicanComponentTemplate + +BarbicanComponentTemplate - Variables used by every sub-component of Barbican (e.g. API, Worker, Listener) + +|=== +| Field | Description | Scheme | Required + +| nodeSelector +| NodeSelector to target subset of worker nodes running this component. Setting here overrides any global NodeSelector settings within the Barbican CR. +| map[string]string +| false + +| replicas +| Replicas of Barbican API to run +| *int32 +| true + +| customServiceConfig +| CustomServiceConfig - customize the service config using this parameter to change service defaults, or overwrite rendered information using raw OpenStack config format. The content gets added to to /etc/++++++/++++++.conf.d directory as a custom config file.++++++++++++ +| string +| false + +| defaultConfigOverwrite +| ConfigOverwrite - interface to overwrite default config files like e.g. policy.json. But can also be used to add additional files. Those get added to the service config dir in /etc/++++++.++++++ +| map[string]string +| false + +| customServiceConfigSecrets +| CustomServiceConfigSecrets - customize the service config using this parameter to specify Secrets that contain sensitive service config data. The content of each Secret gets added to the /etc/++++++/++++++.conf.d directory as a custom config file.++++++++++++ +| []string +| false + +| resources +| Resources - Compute Resources required by this service (Limits/Requests). https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +| corev1.ResourceRequirements +| false + +| networkAttachments +| NetworkAttachments is a list of NetworkAttachment resource names to expose the services to the given network +| []string +| false +|=== + +<> + +[#barbicantemplate] +==== BarbicanTemplate + +BarbicanTemplate defines common Spec elements for all Barbican components including the top level CR + +|=== +| Field | Description | Scheme | Required + +| serviceUser +| ServiceUser - optional username used for this service to register in keystone +| string +| true + +| databaseInstance +| MariaDB instance name Right now required by the maridb-operator to get the credentials from the instance to create the DB Might not be required in future +| string +| true + +| databaseAccount +| DatabaseAccount - optional MariaDBAccount CR name used for barbican DB, defaults to barbican +| string +| true + +| rabbitMqClusterName +| RabbitMQ instance name Needed to request a transportURL that is created and used in Barbican +| string +| true + +| simpleCryptoBackendSecret +| Secret containing the Key Encryption Key (KEK) used for the Simple Crypto backend +| string +| true + +| secret +| Secret containing all passwords / keys needed +| string +| true + +| passwordSelectors +| PasswordSelectors - Selectors to identify the ServiceUser password from the Secret +| <> +| true + +| customServiceConfig +| CustomServiceConfig - customize the service config using this parameter to change service defaults, or overwrite rendered information using raw OpenStack config format. The content gets added to to /etc/++++++/++++++.conf.d directory as custom.conf file.++++++++++++ +| string +| false + +| serviceAccount +| ServiceAccount - service account name used internally to provide Barbican services the default SA name +| string +| true +|=== + +<> + +[#passwordselector] +==== PasswordSelector + +PasswordSelector to identify the DB and AdminUser password from the Secret + +|=== +| Field | Description | Scheme | Required + +| service +| Service - Selector to get the barbican service user password from the Secret +| string +| true + +| simplecryptokek +| +| string +| true +|=== + +<> + +[#cinder] +==== Cinder + +Cinder is the Schema for the cinders API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#cinderextravolmounts] +==== CinderExtraVolMounts + +CinderExtraVolMounts exposes additional parameters processed by the cinder-operator and defines the common VolMounts structure provided by the main storage module + +|=== +| Field | Description | Scheme | Required + +| name +| +| string +| false + +| region +| +| string +| false + +| extraVol +| +| []storage.VolMounts +| true +|=== + +<> + +[#cinderlist] +==== CinderList + +CinderList contains a list of Cinder + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#cinderspec] +==== CinderSpec + +CinderSpec defines the desired state of Cinder + +|=== +| Field | Description | Scheme | Required + +| cinderAPI +| CinderAPI - Spec definition for the API service of this Cinder deployment +| <> +| true + +| cinderScheduler +| CinderScheduler - Spec definition for the Scheduler service of this Cinder deployment +| <> +| true + +| cinderBackup +| CinderBackup - Spec definition for the Backup service of this Cinder deployment +| <> +| true + +| cinderVolumes +| CinderVolumes - Map of chosen names to spec definitions for the Volume(s) service(s) of this Cinder deployment +| map[string]<> +| false +|=== + +<> + +[#cinderspecbase] +==== CinderSpecBase + +|=== +| Field | Description | Scheme | Required + +| databaseInstance +| MariaDB instance name Right now required by the maridb-operator to get the credentials from the instance to create the DB Might not be required in future +| string +| true + +| rabbitMqClusterName +| RabbitMQ instance name Needed to request a transportURL that is created and used in Cinder +| string +| true + +| memcachedInstance +| Memcached instance name. +| string +| true + +| preserveJobs +| PreserveJobs - do not delete jobs after they finished e.g. to check logs +| bool +| true + +| customServiceConfig +| CustomServiceConfig - customize the service config for all Cinder services using this parameter to change service defaults, or overwrite rendered information using raw OpenStack config format. The content gets added to to /etc/++++++/++++++.conf.d directory as a custom config file.++++++++++++ +| string +| false + +| extraMounts +| ExtraMounts containing conf files and credentials +| []<> +| false + +| nodeSelector +| NodeSelector to target subset of worker nodes running this service. Setting NodeSelector here acts as a default value and can be overridden by service specific NodeSelector Settings. +| map[string]string +| false + +| dbPurge +| DBPurge parameters - +| <> +| false + +| apiTimeout +| APITimeout for HAProxy, Apache, and rpc_response_timeout +| int +| true +|=== + +<> + +[#cinderspeccore] +==== CinderSpecCore + +CinderSpecCore the same as CinderSpec without ContainerImage references + +|=== +| Field | Description | Scheme | Required + +| cinderAPI +| CinderAPI - Spec definition for the API service of this Cinder deployment +| <> +| true + +| cinderScheduler +| CinderScheduler - Spec definition for the Scheduler service of this Cinder deployment +| <> +| true + +| cinderBackup +| CinderBackup - Spec definition for the Backup service of this Cinder deployment +| <> +| true + +| cinderVolumes +| CinderVolumes - Map of chosen names to spec definitions for the Volume(s) service(s) of this Cinder deployment +| map[string]<> +| false +|=== + +<> + +[#cinderstatus] +==== CinderStatus + +CinderStatus defines the observed state of Cinder + +|=== +| Field | Description | Scheme | Required + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| databaseHostname +| Cinder Database Hostname +| string +| false + +| transportURLSecret +| TransportURLSecret - Secret containing RabbitMQ transportURL +| string +| false + +| apiEndpoints +| API endpoints +| map[string]map[string]string +| false + +| serviceIDs +| ServiceIDs +| map[string]string +| false + +| cinderAPIReadyCount +| ReadyCount of Cinder API instance +| int32 +| true + +| cinderBackupReadyCount +| ReadyCount of Cinder Backup instance +| int32 +| true + +| cinderSchedulerReadyCount +| ReadyCount of Cinder Scheduler instance +| int32 +| true + +| cinderVolumesReadyCounts +| ReadyCounts of Cinder Volume instances +| map[string]int32 +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is different than the spec generation, then the controller has not started processing the latest changes, and the status and its conditions are likely stale. +| int64 +| false +|=== + +<> + +[#dbpurge] +==== DBPurge + +DBPurge struct is used to model the parameters exposed to the Cinder cronJob + +|=== +| Field | Description | Scheme | Required + +| age +| Age is the DBPurgeAge parameter and indicates the number of days of purging DB records +| int +| true + +| schedule +| Schedule defines the crontab format string to schedule the DBPurge cronJob +| string +| true +|=== + +<> + +[#apioverridespec-2] +==== APIOverrideSpec + +APIOverrideSpec to override the generated manifest of several child resources. + +|=== +| Field | Description | Scheme | Required + +| service +| Override configuration for the Service created to serve traffic to the cluster. The key must be the endpoint type (public, internal) +| map[service.Endpoint]service.RoutedOverrideSpec +| false +|=== + +<> + +[#cinderapi] +==== CinderAPI + +CinderAPI is the Schema for the cinderapis API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#cinderapilist] +==== CinderAPIList + +CinderAPIList contains a list of CinderAPI + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#cinderapispec] +==== CinderAPISpec + +CinderAPISpec defines the desired state of CinderAPI + +|=== +| Field | Description | Scheme | Required + +| databaseHostname +| DatabaseHostname - Cinder Database Hostname +| string +| true + +| transportURLSecret +| Secret containing RabbitMq transport URL +| string +| true + +| extraMounts +| ExtraMounts containing conf files and credentials +| []<> +| false + +| serviceAccount +| ServiceAccount - service account name used internally to provide Cinder services the default SA name +| string +| true +|=== + +<> + +[#cinderapistatus] +==== CinderAPIStatus + +CinderAPIStatus defines the observed state of CinderAPI + +|=== +| Field | Description | Scheme | Required + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| apiEndpoints +| API endpoints +| map[string]map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| readyCount +| ReadyCount of Cinder API instances +| int32 +| true + +| serviceIDs +| ServiceIDs +| map[string]string +| false + +| networkAttachments +| NetworkAttachments status of the deployment pods +| map[string][]string +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is different than the spec generation, then the controller has not started processing the latest changes, and the status and its conditions are likely stale. +| int64 +| false +|=== + +<> + +[#cinderapitemplate] +==== CinderAPITemplate + +CinderAPITemplate defines the input parameters for the Cinder API service + +|=== +| Field | Description | Scheme | Required + +| containerImage +| ContainerImage - Cinder Container Image URL (will be set to environmental default if empty) +| string +| true +|=== + +<> + +[#cinderapitemplatecore] +==== CinderAPITemplateCore + +CinderAPITemplate defines the input parameters for the Cinder API service + +|=== +| Field | Description | Scheme | Required + +| replicas +| Replicas - Cinder API Replicas +| *int32 +| true + +| override +| Override, provides the ability to override the generated manifest of several child resources. +| <> +| false + +| tls +| TLS - Parameters related to the TLS +| tls.API +| false +|=== + +<> + +[#cinderbackup] +==== CinderBackup + +CinderBackup is the Schema for the cinderbackups API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#cinderbackuplist] +==== CinderBackupList + +CinderBackupList contains a list of CinderBackup + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#cinderbackupspec] +==== CinderBackupSpec + +CinderBackupSpec defines the desired state of CinderBackup + +|=== +| Field | Description | Scheme | Required + +| databaseHostname +| DatabaseHostname - Cinder Database Hostname +| string +| true + +| transportURLSecret +| Secret containing RabbitMq transport URL +| string +| true + +| extraMounts +| ExtraMounts containing conf files and credentials +| []<> +| false + +| serviceAccount +| ServiceAccount - service account name used internally to provide Cinder services the default SA name +| string +| true + +| tls +| TLS - Parameters related to the TLS +| tls.Ca +| false +|=== + +<> + +[#cinderbackupstatus] +==== CinderBackupStatus + +CinderBackupStatus defines the observed state of CinderBackup + +|=== +| Field | Description | Scheme | Required + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| readyCount +| ReadyCount of Cinder Backup instances +| int32 +| true + +| networkAttachments +| NetworkAttachments status of the deployment pods +| map[string][]string +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is different than the spec generation, then the controller has not started processing the latest changes, and the status and its conditions are likely stale. +| int64 +| false +|=== + +<> + +[#cinderbackuptemplate] +==== CinderBackupTemplate + +CinderBackupTemplate defines the input parameters for the Cinder Backup service + +|=== +| Field | Description | Scheme | Required + +| containerImage +| ContainerImage - Cinder Container Image URL (will be set to environmental default if empty) +| string +| true +|=== + +<> + +[#cinderbackuptemplatecore] +==== CinderBackupTemplateCore + +CinderBackupTemplate defines the input parameters for the Cinder Backup service + +|=== +| Field | Description | Scheme | Required + +| replicas +| Replicas - Cinder Backup Replicas +| *int32 +| true +|=== + +<> + +[#cinderscheduler] +==== CinderScheduler + +CinderScheduler is the Schema for the cinderschedulers API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#cinderschedulerlist] +==== CinderSchedulerList + +CinderSchedulerList contains a list of CinderScheduler + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#cinderschedulerspec] +==== CinderSchedulerSpec + +CinderSchedulerSpec defines the desired state of CinderScheduler + +|=== +| Field | Description | Scheme | Required + +| databaseHostname +| DatabaseHostname - Cinder Database Hostname +| string +| true + +| transportURLSecret +| Secret containing RabbitMq transport URL +| string +| true + +| extraMounts +| ExtraMounts containing conf files and credentials +| []<> +| false + +| serviceAccount +| ServiceAccount - service account name used internally to provide Cinder services the default SA name +| string +| true + +| tls +| TLS - Parameters related to the TLS +| tls.Ca +| false +|=== + +<> + +[#cinderschedulerstatus] +==== CinderSchedulerStatus + +CinderSchedulerStatus defines the observed state of CinderScheduler + +|=== +| Field | Description | Scheme | Required + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| readyCount +| ReadyCount of Cinder Scheduler instances +| int32 +| true + +| networkAttachments +| NetworkAttachments status of the deployment pods +| map[string][]string +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is different than the spec generation, then the controller has not started processing the latest changes, and the status and its conditions are likely stale. +| int64 +| false +|=== + +<> + +[#cinderschedulertemplate] +==== CinderSchedulerTemplate + +CinderSchedulerTemplate defines the input parameters for the Cinder Scheduler service + +|=== +| Field | Description | Scheme | Required + +| containerImage +| ContainerImage - Cinder Container Image URL (will be set to environmental default if empty) +| string +| true +|=== + +<> + +[#cinderschedulertemplatecore] +==== CinderSchedulerTemplateCore + +CinderSchedulerTemplate defines the input parameters for the Cinder Scheduler service + +|=== +| Field | Description | Scheme | Required + +| replicas +| Replicas - Cinder Scheduler Replicas +| *int32 +| true +|=== + +<> + +[#cindervolume] +==== CinderVolume + +CinderVolume is the Schema for the cindervolumes API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#cindervolumelist] +==== CinderVolumeList + +CinderVolumeList contains a list of CinderVolume + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#cindervolumespec] +==== CinderVolumeSpec + +CinderVolumeSpec defines the desired state of CinderVolume + +|=== +| Field | Description | Scheme | Required + +| databaseHostname +| DatabaseHostname - Cinder Database Hostname +| string +| true + +| transportURLSecret +| Secret containing RabbitMq transport URL +| string +| true + +| extraMounts +| ExtraMounts containing conf files and credentials +| []<> +| false + +| serviceAccount +| ServiceAccount - service account name used internally to provide Cinder services the default SA name +| string +| true + +| tls +| TLS - Parameters related to the TLS +| tls.Ca +| false +|=== + +<> + +[#cindervolumestatus] +==== CinderVolumeStatus + +CinderVolumeStatus defines the observed state of CinderVolume + +|=== +| Field | Description | Scheme | Required + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| readyCount +| ReadyCount of Cinder Volume instances +| int32 +| true + +| networkAttachments +| NetworkAttachments status of the deployment pods +| map[string][]string +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is different than the spec generation, then the controller has not started processing the latest changes, and the status and its conditions are likely stale. +| int64 +| false +|=== + +<> + +[#cindervolumetemplate] +==== CinderVolumeTemplate + +CinderVolumeTemplate defines the input parameters for the Cinder Volume service + +|=== +| Field | Description | Scheme | Required + +| containerImage +| ContainerImage - Cinder Container Image URL (will be set to environmental default if empty) +| string +| true +|=== + +<> + +[#cindervolumetemplatecore] +==== CinderVolumeTemplateCore + +CinderVolumeTemplate defines the input parameters for the Cinder Volume service + +|=== +| Field | Description | Scheme | Required + +| replicas +| Replicas - Cinder Volume Replicas +| *int32 +| true +|=== + +<> + +[#cinderservicetemplate] +==== CinderServiceTemplate + +CinderServiceTemplate defines the input parameters that can be defined for a given Cinder service + +|=== +| Field | Description | Scheme | Required + +| nodeSelector +| NodeSelector to target subset of worker nodes running this service. Setting here overrides any global NodeSelector settings within the Cinder CR. +| map[string]string +| false + +| customServiceConfig +| CustomServiceConfig - customize the service config using this parameter to change service defaults, or overwrite rendered information using raw OpenStack config format. The content gets added to to /etc/++++++/++++++.conf.d directory as a custom config file.++++++++++++ +| string +| false + +| customServiceConfigSecrets +| CustomServiceConfigSecrets - customize the service config using this parameter to specify Secrets that contain sensitive service config data. The content of each Secret gets added to the /etc/++++++/++++++.conf.d directory as a custom config file.++++++++++++ +| []string +| false + +| resources +| Resources - Compute Resources required by this service (Limits/Requests). https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +| corev1.ResourceRequirements +| false + +| networkAttachments +| NetworkAttachments is a list of NetworkAttachment resource names to expose the services to the given network +| []string +| false +|=== + +<> + +[#cindertemplate] +==== CinderTemplate + +CinderTemplate defines common input parameters used by all Cinder services + +|=== +| Field | Description | Scheme | Required + +| serviceUser +| ServiceUser - optional username used for this service to register in cinder +| string +| true + +| databaseAccount +| DatabaseAccount - optional MariaDBAccount used for cinder DB, defaults to cinder +| string +| true + +| secret +| Secret containing OpenStack password information +| string +| true + +| passwordSelectors +| PasswordSelectors - Selectors to identify the ServiceUser password from the Secret +| <> +| true +|=== + +<> + +[#passwordselector-2] +==== PasswordSelector + +PasswordSelector to identify the DB and AdminUser password from the Secret + +|=== +| Field | Description | Scheme | Required + +| service +| Service - Selector to get the cinder service password from the Secret +| string +| true +|=== + +<> + +[#designateservicetemplate] +==== DesignateServiceTemplate + +DesignateServiceTemplate defines the input parameters that can be defined for a given Designate service + +|=== +| Field | Description | Scheme | Required + +| containerImage +| ContainerImage - Designate Container Image URL (will be set to environmental default if empty) +| string +| true +|=== + +<> + +[#designateservicetemplatecore] +==== DesignateServiceTemplateCore + +DesignateServiceTemplate defines the input parameters that can be defined for a given Designate service + +|=== +| Field | Description | Scheme | Required + +| nodeSelector +| NodeSelector to target subset of worker nodes running this service. Setting here overrides any global NodeSelector settings within the Designate CR. +| map[string]string +| false + +| customServiceConfig +| CustomServiceConfig - customize the service config using this parameter to change service defaults, or overwrite rendered information using raw OpenStack config format. The content gets added to to /etc/++++++/++++++.conf.d directory as a custom config file.++++++++++++ +| string +| false + +| customServiceConfigSecrets +| CustomServiceConfigSecrets - customize the service config using this parameter to specify Secrets that contain sensitive service config data. The content of each Secret gets added to the /etc/++++++/++++++.conf.d directory as a custom config file.++++++++++++ +| []string +| false + +| defaultConfigOverwrite +| ConfigOverwrite - interface to overwrite default config files like e.g. policy.json. But can also be used to add additional files. Those get added to the service config dir in /etc/++++++.++++++ +| map[string]string +| false + +| resources +| Resources - Compute Resources required by this service (Limits/Requests). https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +| corev1.ResourceRequirements +| false + +| networkAttachments +| NetworkAttachments is a list of NetworkAttachment resource names to expose the services to the given network +| []string +| false +|=== + +<> + +[#designatetemplate] +==== DesignateTemplate + +DesignateTemplate defines common input parameters used by all Designate services + +|=== +| Field | Description | Scheme | Required + +| serviceUser +| ServiceUser - optional username used for this service to register in designate +| string +| true + +| databaseAccount +| DatabaseAccount - name of MariaDBAccount which will be used to connect. +| string +| true + +| secret +| Secret containing OpenStack password information for DesignatePassword +| string +| true + +| passwordSelectors +| PasswordSelectors - Selectors to identify the DB and ServiceUser password from the Secret +| <> +| true + +| None +| BackendType - Defines the backend service/configuration we are using, i.e. bind9, PowerDNS, BYO, etc.. Helps maintain a single init container/init.sh to do container setup +| string +| true + +| backendWorkerServerProtocol +| BackendTypeProtocol - Defines the backend protocol to be used between the designate-worker & designate_mdns to/from the DNS server. Acceptable values are: "UDP", "TCP" Please Note: this MUST match what is in the /etc/designate.conf ['service:worker'] +| string +| true + +| backendMdnsServerProtocol +| BackendTypeProtocol - Defines the backend protocol to be used between the designate-worker & designate_mdns to/from the DNS server. Acceptable values are: "UDP", "TCP" Please Note: this MUST match what is in the /etc/designate.conf ['service:mdns'] +| string +| true +|=== + +<> + +[#passwordselector-2] +==== PasswordSelector + +PasswordSelector to identify the DB and AdminUser password from the Secret + +|=== +| Field | Description | Scheme | Required + +| service +| Service - Selector to get the designate service password from the Secret +| string +| true +|=== + +<> + +[#designate] +==== Designate + +Designate is the Schema for the designates API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#designateextravolmounts] +==== DesignateExtraVolMounts + +DesignateExtraVolMounts exposes additional parameters processed by the designate-operator and defines the common VolMounts structure provided by the main storage module + +|=== +| Field | Description | Scheme | Required + +| name +| +| string +| false + +| region +| +| string +| false + +| extraVol +| +| []storage.VolMounts +| true +|=== + +<> + +[#designatelist] +==== DesignateList + +DesignateList contains a list of Designate + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#designatespec] +==== DesignateSpec + +DesignateAPISpec defines the desired state of DesignateAPI + +|=== +| Field | Description | Scheme | Required + +| designateAPI +| DesignateAPI - Spec definition for the API service of this Designate deployment +| <> +| true + +| designateCentral +| DesignateCentral - Spec definition for the Central service of this Designate deployment +| <> +| true + +| designateWorker +| DesignateWorker - Spec definition for the Worker service of this Designate deployment +| <> +| true + +| designateMdns +| DesignateMdns - Spec definition for the Mdns service of this Designate deployment +| <> +| true + +| designateProducer +| DesignateProducer - Spec definition for the Producer service of this Designate deployment +| <> +| true + +| designateBackendbind9 +| DesignateBackendbind9 - Spec definition for the Backendbind9 service of this Designate deployment +| <> +| true + +| designateUnbound +| DesignateUnbound - Spec definition for the Unbound Resolver service of this Designate deployment +| <> +| true +|=== + +<> + +[#designatespecbase] +==== DesignateSpecBase + +DesignateSpecBase - + +|=== +| Field | Description | Scheme | Required + +| serviceUser +| ServiceUser - optional username used for this service to register in designate +| string +| true + +| databaseInstance +| MariaDB instance name Right now required by the maridb-operator to get the credentials from the instance to create the DB Might not be required in future +| string +| true + +| databaseAccount +| DatabaseAccount - name of MariaDBAccount which will be used to connect. +| string +| true + +| rabbitMqClusterName +| RabbitMQ instance name Needed to request a transportURL that is created and used in Designate +| string +| true + +| secret +| Secret containing OpenStack password information for designate AdminPassword +| string +| true + +| passwordSelectors +| PasswordSelectors - Selectors to identify the DB and AdminUser password from the Secret +| <> +| false + +| None +| BackendType - Defines the backend service/configuration we are using, i.e. bind9, unhbound, PowerDNS, BYO, etc.. Helps maintain a single init container/init.sh to do container setup +| string +| true + +| backendWorkerServerProtocol +| BackendTypeProtocol - Defines the backend protocol to be used between the designate-worker & designate_mdns to/from the DNS server. Acceptable values are: "UDP", "TCP" Please Note: this MUST match what is in the /etc/designate.conf ['service:worker'] +| string +| true + +| backendMdnsServerProtocol +| BackendTypeProtocol - Defines the backend protocol to be used between the designate-worker & designate_mdns to/from the DNS server. Acceptable values are: "UDP", "TCP" Please Note: this MUST match what is in the /etc/designate.conf ['service:mdns'] +| string +| true + +| nodeSelector +| NodeSelector to target subset of worker nodes running this service +| map[string]string +| false + +| preserveJobs +| PreserveJobs - do not delete jobs after they finished e.g. to check logs +| bool +| false + +| customServiceConfig +| CustomServiceConfig - customize the service config using this parameter to change service defaults, or overwrite rendered information using raw OpenStack config format. The content gets added to to /etc/++++++/++++++.conf.d directory as custom.conf file.++++++++++++ +| string +| false + +| defaultConfigOverwrite +| ConfigOverwrite - interface to overwrite default config files like e.g. logging.conf or policy.json. But can also be used to add additional files. Those get added to the service config dir in /etc/++++++.++++++ +| map[string]string +| false + +| resources +| Resources - Compute Resources required by this service (Limits/Requests). https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +| corev1.ResourceRequirements +| false +|=== + +<> + +[#designatespeccore] +==== DesignateSpecCore + +DesignateAPISpecCore - this version has no containerImage for use with the OpenStackControlplane + +|=== +| Field | Description | Scheme | Required + +| designateAPI +| DesignateAPI - Spec definition for the API service of this Designate deployment +| <> +| true + +| designateCentral +| DesignateCentral - Spec definition for the Central service of this Designate deployment +| <> +| true + +| designateWorker +| DesignateWorker - Spec definition for the Worker service of this Designate deployment +| <> +| true + +| designateMdns +| DesignateMdns - Spec definition for the Mdns service of this Designate deployment +| <> +| true + +| designateProducer +| DesignateProducer - Spec definition for the Producer service of this Designate deployment +| <> +| true + +| designateBackendbind9 +| DesignateBackendbind9 - Spec definition for the Backendbind9 service of this Designate deployment +| <> +| true + +| designateUnbound +| DesignateUnbound - Spec definition for the Unbound Resolver service of this Designate deployment +| <> +| true +|=== + +<> + +[#designatestatus] +==== DesignateStatus + +DesignateStatus defines the observed state of Designate + +|=== +| Field | Description | Scheme | Required + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| apiEndpoint +| API endpoint +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| databaseHostname +| Designate Database Hostname +| string +| false + +| transportURLSecret +| TransportURLSecret - Secret containing RabbitMQ transportURL +| string +| false + +| designateAPIReadyCount +| ReadyCount of Designate API instance +| int32 +| false + +| designateCentralReadyCount +| ReadyCount of Designate Central instance +| int32 +| false + +| designateWorkerReadyCount +| ReadyCount of Designate Worker instance +| int32 +| false + +| designateMdnsReadyCount +| ReadyCount of Designate Mdns instance +| int32 +| false + +| designateProducerReadyCount +| ReadyCount of Designate Producer instance +| int32 +| false + +| designateBackendbind9ReadyCount +| ReadyCount of Designate Backendbind9 instance +| int32 +| false + +| designateUnboundReadyCount +| ReadyCount of Designate Unbound instance +| int32 +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the opentack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#apioverridespec-2] +==== APIOverrideSpec + +APIOverrideSpec to override the generated manifest of several child resources. + +|=== +| Field | Description | Scheme | Required + +| service +| Override configuration for the Service created to serve traffic to the cluster. The key must be the endpoint type (public, internal) +| map[service.Endpoint]service.RoutedOverrideSpec +| false +|=== + +<> + +[#designateapi] +==== DesignateAPI + +DesignateAPI is the Schema for the designateapis API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#designateapilist] +==== DesignateAPIList + +DesignateAPIList contains a list of DesignateAPI + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#designateapispecbase] +==== DesignateAPISpecBase + +DesignateAPISpecBase - + +|=== +| Field | Description | Scheme | Required + +| replicas +| Replicas - Designate API Replicas +| *int32 +| true + +| databaseHostname +| DatabaseHostname - Designate Database Hostname +| string +| false + +| transportURLSecret +| Secret containing RabbitMq transport URL +| string +| false + +| serviceAccount +| ServiceAccount - service account name used internally to provide Designate services the default SA name +| string +| true + +| override +| Override, provides the ability to override the generated manifest of several child resources. +| <> +| false +|=== + +<> + +[#designateapistatus] +==== DesignateAPIStatus + +DesignateAPIStatus defines the observed state of DesignateAPI + +|=== +| Field | Description | Scheme | Required + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| apiEndpoints +| API endpoints +| map[string]map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| readyCount +| ReadyCount of designate API instances +| int32 +| false + +| networkAttachments +| NetworkAttachments status of the deployment pods +| map[string][]string +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the opentack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#designatebackendbind9] +==== DesignateBackendbind9 + +DesignateBackendbind9 is the Schema for the designatebackendbind9 + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#designatebackendbind9list] +==== DesignateBackendbind9List + +DesignateBackendbind9List contains a list of DesignateBackendbind9 + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#designatebackendbind9specbase] +==== DesignateBackendbind9SpecBase + +DesignateBackendbind9SpecBase - + +|=== +| Field | Description | Scheme | Required + +| replicas +| Replicas - Designate Backendbind9 Replicas +| *int32 +| true + +| databaseHostname +| DatabaseHostname - Designate Database Hostname +| string +| false + +| transportURLSecret +| Secret containing RabbitMq transport URL +| string +| false + +| serviceAccount +| ServiceAccount - service account name used internally to provide Designate services the default SA name +| string +| true +|=== + +<> + +[#designatebackendbind9status] +==== DesignateBackendbind9Status + +DesignateBackendbind9Status defines the observed state of DesignateBackendbind9 + +|=== +| Field | Description | Scheme | Required + +| readyCount +| ReadyCount of designate backendbind9 instances +| int32 +| false + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| networkAttachments +| NetworkAttachments status of the deployment pods +| map[string][]string +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the opentack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#designatecentral] +==== DesignateCentral + +DesignateCentral is the Schema for the designatecentral API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#designatecentrallist] +==== DesignateCentralList + +DesignateCentralList contains a list of DesignateCentral + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#designatecentralspecbase] +==== DesignateCentralSpecBase + +DesignateCentralSpecBase - + +|=== +| Field | Description | Scheme | Required + +| replicas +| Replicas - Designate Central Replicas +| *int32 +| true + +| databaseHostname +| DatabaseHostname - Designate Database Hostname +| string +| false + +| transportURLSecret +| Secret containing RabbitMq transport URL +| string +| false + +| serviceAccount +| ServiceAccount - service account name used internally to provide Designate services the default SA name +| string +| true +|=== + +<> + +[#designatecentralstatus] +==== DesignateCentralStatus + +DesignateCentralStatus defines the observed state of DesignateCentral + +|=== +| Field | Description | Scheme | Required + +| readyCount +| ReadyCount of designate central instances +| int32 +| false + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| networkAttachments +| NetworkAttachments status of the deployment pods +| map[string][]string +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the opentack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#designatemdns] +==== DesignateMdns + +DesignateMdns is the Schema for the designatemdnses API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#designatemdnslist] +==== DesignateMdnsList + +DesignateMdnsList contains a list of DesignateMdns + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#designatemdnsspecbase] +==== DesignateMdnsSpecBase + +DesignateMdnsSpecBase - + +|=== +| Field | Description | Scheme | Required + +| replicas +| Replicas - Designate Mdns Replicas +| *int32 +| true + +| databaseHostname +| DatabaseHostname - Designate Database Hostname +| string +| false + +| transportURLSecret +| Secret containing RabbitMq transport URL +| string +| false + +| serviceAccount +| ServiceAccount - service account name used internally to provide Designate services the default SA name +| string +| true +|=== + +<> + +[#designatemdnsstatus] +==== DesignateMdnsStatus + +DesignateMdnsStatus defines the observed state of DesignateMdns + +|=== +| Field | Description | Scheme | Required + +| readyCount +| ReadyCount of designate MDNS instances +| int32 +| false + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| networkAttachments +| NetworkAttachments status of the deployment pods +| map[string][]string +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the opentack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#designateproducer] +==== DesignateProducer + +DesignateProducer is the Schema for the designateproducer API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#designateproducerlist] +==== DesignateProducerList + +DesignateProducerList contains a list of DesignateProducer + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#designateproducerspecbase] +==== DesignateProducerSpecBase + +DesignateProducerSpec defines the desired state of DesignateProducer + +|=== +| Field | Description | Scheme | Required + +| replicas +| Replicas - Designate Producer Replicas +| *int32 +| true + +| databaseHostname +| DatabaseHostname - Designate Database Hostname +| string +| false + +| transportURLSecret +| Secret containing RabbitMq transport URL +| string +| false + +| serviceAccount +| ServiceAccount - service account name used internally to provide Designate services the default SA name +| string +| true +|=== + +<> + +[#designateproducerstatus] +==== DesignateProducerStatus + +DesignateProducerStatus defines the observed state of DesignateProducer + +|=== +| Field | Description | Scheme | Required + +| readyCount +| ReadyCount of designate Producer instances +| int32 +| false + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| networkAttachments +| NetworkAttachments status of the deployment pods +| map[string][]string +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the opentack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#designateunbound] +==== DesignateUnbound + +DesignateUnbound is the Schema for the designateworker API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#designateunboundlist] +==== DesignateUnboundList + +DesignateUnboundList contains a list of DesignateUnbound + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#designateunboundspecbase] +==== DesignateUnboundSpecBase + +DesignateUnboundSpecBase - + +|=== +| Field | Description | Scheme | Required + +| serviceAccount +| ServiceAccount - service account name used internally to provide Designate services the default SA name +| string +| true + +| replicas +| Replicas - Designate Unbound Replicas +| *int32 +| true +|=== + +<> + +[#designateunboundstatus] +==== DesignateUnboundStatus + +DesignateUnboundStatus defines the observed state of DesignateUnbound + +|=== +| Field | Description | Scheme | Required + +| readyCount +| ReadyCount of designate central instances +| int32 +| false + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| networkAttachments +| NetworkAttachments status of the deployment pods +| map[string][]string +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the opentack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#designateworker] +==== DesignateWorker + +DesignateWorker is the Schema for the designateworker API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#designateworkerlist] +==== DesignateWorkerList + +DesignateWorkerList contains a list of DesignateWorker + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#designateworkerspecbase] +==== DesignateWorkerSpecBase + +DesignateWorkerSpecBase - + +|=== +| Field | Description | Scheme | Required + +| replicas +| Replicas - Designate Worker Replicas +| *int32 +| true + +| databaseHostname +| DatabaseHostname - Designate Database Hostname +| string +| false + +| transportURLSecret +| Secret containing RabbitMq transport URL +| string +| false + +| serviceAccount +| ServiceAccount - service account name used internally to provide Designate services the default SA name +| string +| true +|=== + +<> + +[#designateworkerstatus] +==== DesignateWorkerStatus + +DesignateWorkerStatus defines the observed state of DesignateWorker + +|=== +| Field | Description | Scheme | Required + +| readyCount +| ReadyCount of designate central instances +| int32 +| false + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| networkAttachments +| NetworkAttachments status of the deployment pods +| map[string][]string +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the opentack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#apioverridespec-2] +==== APIOverrideSpec + +APIOverrideSpec to override the generated manifest of several child resources. + +|=== +| Field | Description | Scheme | Required + +| service +| Override configuration for the Service created to serve traffic to the cluster. The key must be the endpoint type (public, internal) +| map[service.Endpoint]service.RoutedOverrideSpec +| false +|=== + +<> + +[#glanceapitemplate] +==== GlanceAPITemplate + +GlanceAPITemplate defines the desired state of GlanceAPI + +|=== +| Field | Description | Scheme | Required + +| replicas +| Replicas of glance API to run +| *int32 +| true + +| containerImage +| Glance Container Image URL (will be set to environmental default if empty) +| string +| true + +| nodeSelector +| NodeSelector to target subset of worker nodes running this service +| map[string]string +| false + +| pvc +| Pvc - Storage claim for file-backed Glance +| string +| false + +| customServiceConfig +| CustomServiceConfig - customize the service config using this parameter to change service defaults, or overwrite rendered information using raw OpenStack config format. The content gets added to to /etc/++++++/++++++.conf.d directory as custom.conf file.++++++++++++ +| string +| false + +| customServiceConfigSecrets +| CustomServiceConfigSecrets - customize the service config using this parameter to specify Secrets that contain sensitive service config data. The content of each Secret gets added to the /etc/++++++/++++++.conf.d directory as a custom config file.++++++++++++ +| []string +| false + +| resources +| Resources - Compute Resources required by this service (Limits/Requests). https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +| corev1.ResourceRequirements +| false + +| networkAttachments +| NetworkAttachments is a list of NetworkAttachment resource names to expose the services to the given network +| []string +| false + +| override +| Override, provides the ability to override the generated manifest of several child resources. +| <> +| false + +| storageClass +| StorageClass +| string +| false + +| storageRequest +| StorageRequest +| string +| true + +| type +| Type - represents the layout of the glanceAPI deployment. +| string +| false + +| tls +| TLS - Parameters related to the TLS +| tls.API +| false + +| imageCache +| ImageCache - It represents the struct to expose the ImageCache related parameters (size of the PVC and cronJob schedule) +| <> +| false + +| apiTimeout +| APITimeout for HAProxy and Apache defaults to GlanceSpecCore APITimeout +| int +| false +|=== + +<> + +[#imagecache] +==== ImageCache + +ImageCache - struct where the exposed imageCache params are defined + +|=== +| Field | Description | Scheme | Required + +| size +| Size - Local storage request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) +| string +| true + +| cleanerScheduler +| Schedule defines the crontab format string to schedule the Cleaner cronJob +| string +| true + +| prunerScheduler +| Schedule defines the crontab format string to schedule the Pruner cronJob +| string +| true +|=== + +<> + +[#dbpurge-2] +==== DBPurge + +DBPurge struct is used to model the parameters exposed to the Glance API CronJob + +|=== +| Field | Description | Scheme | Required + +| age +| Age is the DBPurgeAge parameter and indicates the number of days of purging DB records +| int +| true + +| schedule +| Schedule defines the crontab format string to schedule the DBPurge cronJob +| string +| true +|=== + +<> + +[#glance] +==== Glance + +Glance is the Schema for the glances API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#glanceextravolmounts] +==== GlanceExtraVolMounts + +GlanceExtraVolMounts exposes additional parameters processed by the glance-operator and defines the common VolMounts structure provided by the main storage module + +|=== +| Field | Description | Scheme | Required + +| name +| +| string +| false + +| region +| +| string +| false + +| extraVol +| +| []storage.VolMounts +| true +|=== + +<> + +[#glancelist] +==== GlanceList + +GlanceList contains a list of Glance + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#glancespec] +==== GlanceSpec + +GlanceSpec defines the desired state of Glance + +|=== +| Field | Description | Scheme | Required + +| containerImage +| Glance Container Image URL (will be set to environmental default if empty) +| string +| true +|=== + +<> + +[#glancespeccore] +==== GlanceSpecCore + +GlanceSpec defines the desired state of Glance + +|=== +| Field | Description | Scheme | Required + +| serviceUser +| ServiceUser - optional username used for this service to register in glance +| string +| true + +| databaseInstance +| MariaDB instance name Right now required by the maridb-operator to get the credentials from the instance to create the DB Might not be required in future +| string +| true + +| databaseAccount +| DatabaseAccount - name of MariaDBAccount which will be used to connect. +| string +| true + +| memcachedInstance +| Memcached instance name. +| string +| true + +| secret +| Secret containing OpenStack password information for glance's keystone password; no longer used for database password +| string +| true + +| passwordSelectors +| PasswordSelectors - Selectors to identify the DB and ServiceUser password from the Secret +| <> +| true + +| nodeSelector +| NodeSelector to target subset of worker nodes running this service +| map[string]string +| false + +| preserveJobs +| PreserveJobs - do not delete jobs after they finished e.g. to check logs +| bool +| true + +| customServiceConfig +| CustomServiceConfig - customize the service config using this parameter to change service defaults, or overwrite rendered information using raw OpenStack config format. The content gets added to to /etc/++++++/++++++.conf.d directory as custom.conf file.++++++++++++ +| string +| false + +| customServiceConfigSecrets +| CustomServiceConfigSecrets - customize the service config using this parameter to specify Secrets that contain sensitive service config data. The content of each Secret gets added to the /etc/++++++/++++++.conf.d directory as a custom config file.++++++++++++ +| []string +| false + +| storageClass +| StorageClass +| string +| false + +| storageRequest +| StorageRequest +| string +| true + +| glanceAPIs +| GlanceAPIs - Spec definition for the API service of this Glance deployment +| map[string]<> +| true + +| extraMounts +| ExtraMounts containing conf files and credentials +| []<> +| false + +| quotas +| Quotas is defined, per-tenant quotas are enforced according to the registered keystone limits +| <> +| false + +| imageCache +| ImageCache - +| <> +| true + +| keystoneEndpoint +| KeystoneEndpoint - indicates which glanceAPI should be registered in the keystone catalog, and it acts as a selector for the underlying glanceAPI(s) that can be specified by name +| string +| true + +| dbPurge +| DBPurge parameters - +| <> +| false + +| apiTimeout +| Default APITimeout for HAProxy and Apache, defaults to 60 seconds +| int +| true +|=== + +<> + +[#glancestatus] +==== GlanceStatus + +GlanceStatus defines the observed state of Glance + +|=== +| Field | Description | Scheme | Required + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| apiEndpoint +| API endpoint +| map[string]string +| false + +| serviceID +| ServiceID +| string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| databaseHostname +| Glance Database Hostname +| string +| false + +| glanceAPIReadyCounts +| GlanceAPIReadyCounts - +| map[string]int32 +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the opentack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#passwordselector-2] +==== PasswordSelector + +PasswordSelector to identify the DB and AdminUser password from the Secret + +|=== +| Field | Description | Scheme | Required + +| service +| Service - Selector to get the glance service password from the Secret +| string +| true +|=== + +<> + +[#quotalimits] +==== QuotaLimits + +QuotaLimits - The parameters exposed to the top level glance CR that represents the limits we set in keystone + +|=== +| Field | Description | Scheme | Required + +| imageSizeTotal +| +| int +| true + +| imageStageTotal +| +| int +| true + +| imageCountTotal +| +| int +| true + +| imageCountUpload +| +| int +| true +|=== + +<> + +[#glanceapi] +==== GlanceAPI + +GlanceAPI is the Schema for the glanceapis API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#glanceapilist] +==== GlanceAPIList + +GlanceAPIList contains a list of GlanceAPI + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#glanceapispec] +==== GlanceAPISpec + +GlanceAPISpec defines the desired state of GlanceAPI + +|=== +| Field | Description | Scheme | Required + +| apiType +| +| string +| true + +| serviceUser +| ServiceUser - optional username used for this service to register in glance +| string +| true + +| serviceAccount +| ServiceAccount - service account name used internally to provide GlanceAPI the default SA name +| string +| true + +| databaseHostname +| DatabaseHostname - Glance Database Hostname +| string +| true + +| databaseAccount +| DatabaseAccount - name of MariaDBAccount which will be used to connect. +| string +| true + +| secret +| Secret containing OpenStack password information for glance AdminPassword +| string +| true + +| passwordSelectors +| PasswordSelectors - Selectors to identify the DB and ServiceUser password from the Secret +| <> +| true + +| extraMounts +| ExtraMounts containing conf files and credentials +| []<> +| false + +| quota +| QuotaEnforce if true, per-tenant quotas are enforced according to the registered keystone limits +| bool +| true + +| memcachedInstance +| Memcached instance name. +| string +| true +|=== + +<> + +[#glanceapistatus] +==== GlanceAPIStatus + +GlanceAPIStatus defines the observed state of GlanceAPI + +|=== +| Field | Description | Scheme | Required + +| readyCount +| ReadyCount of glance API instances +| int32 +| true + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| apiEndpoint +| API endpoint +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| networkAttachments +| NetworkAttachments status of the deployment pods +| map[string][]string +| false + +| domain +| Domain is a parameter used by each glanceAPI replicas to setup a worker and set the worker_self_reference_url config option. It's requierd when distributed image import is enabled and it enables pod to pod communication via the associated hostnames +| string +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the opentack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#apioverridespec-2] +==== APIOverrideSpec + +APIOverrideSpec to override the generated manifest of several child resources. + +|=== +| Field | Description | Scheme | Required + +| service +| Override configuration for the Service created to serve traffic to the cluster. The key must be the endpoint type (public, internal) +| map[service.Endpoint]service.RoutedOverrideSpec +| false +|=== + +<> + +[#heatservicetemplate] +==== HeatServiceTemplate + +HeatServiceTemplate - + +|=== +| Field | Description | Scheme | Required + +| replicas +| Replicas - +| *int32 +| true + +| defaultConfigOverwrite +| ConfigOverwrite - interface to overwrite default config files like e.g. policy.json. But can also be used to add additional files. Those get added to the service config dir in /etc/++++++.++++++ +| map[string]string +| false + +| nodeSelector +| NodeSelector to target subset of worker nodes for running the service +| map[string]string +| false + +| customServiceConfig +| CustomServiceConfig - customize the service config using this parameter to change service defaults, or overwrite rendered information using raw OpenStack config format. The content gets added to to /etc/++++++/++++++.conf.d directory as custom.conf file.++++++++++++ +| string +| false + +| resources +| Resources - Compute Resources required by this service (Limits/Requests). https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +| corev1.ResourceRequirements +| false +|=== + +<> + +[#heattemplate] +==== HeatTemplate + +HeatTemplate - + +|=== +| Field | Description | Scheme | Required + +| serviceUser +| ServiceUser - optional username used for this service to register in heat +| string +| true + +| databaseAccount +| DatabaseAccount - optional MariaDBAccount used for heat DB, defaults to heat. +| string +| true + +| secret +| Secret containing OpenStack password information for heat HeatDatabasePassword, HeatPassword and HeatAuthEncryptionKey +| string +| true + +| passwordSelectors +| PasswordSelectors - Selectors to identify the DB and ServiceUser password from the Secret +| <> +| true +|=== + +<> + +[#passwordselector-2] +==== PasswordSelector + +PasswordSelector .. + +|=== +| Field | Description | Scheme | Required + +| service +| Service - Selector to get the heat service password from the Secret +| string +| true + +| authEncryptionKey +| AuthEncryptionKey - Selector to get the heat auth encryption key from the Secret +| string +| true +|=== + +<> + +[#heat] +==== Heat + +Heat is the Schema for the heats API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#heatlist] +==== HeatList + +HeatList contains a list of Heat + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#heatspec] +==== HeatSpec + +HeatSpec defines the desired state of Heat + +|=== +| Field | Description | Scheme | Required + +| heatAPI +| HeatAPI - Spec definition for the API service of this Heat deployment +| <> +| true + +| heatCfnAPI +| HeatCfnAPI - Spec definition for the CfnAPI service of this Heat deployment +| <> +| true + +| heatEngine +| HeatEngine - Spec definition for the Engine service of this Heat deployment +| <> +| true +|=== + +<> + +[#heatspecbase] +==== HeatSpecBase + +HeatSpec defines the desired state of Heat + +|=== +| Field | Description | Scheme | Required + +| databaseInstance +| MariaDB instance name. Right now required by the maridb-operator to get the credentials from the instance to create the DB. Might not be required in future. +| string +| true + +| memcachedInstance +| Memcached instance name. +| string +| true + +| rabbitMqClusterName +| RabbitMQ instance name Needed to request a transportURL that is created and used in Heat +| string +| true + +| customServiceConfig +| CustomServiceConfig - customize the service config using this parameter to change service defaults, or overwrite rendered information using raw OpenStack config format. The content gets added to to /etc/++++++/++++++.conf.d directory as custom.conf file.++++++++++++ +| string +| false + +| preserveJobs +| PreserveJobs - do not delete jobs after they finished e.g. to check logs +| bool +| true + +| defaultConfigOverwrite +| ConfigOverwrite - interface to overwrite default config files like e.g. policy.json. But can also be used to add additional files. Those get added to the service config dir in /etc/++++++.++++++ +| map[string]string +| false + +| nodeSelector +| NodeSelector to target subset of worker nodes for running the Heat services +| map[string]string +| false +|=== + +<> + +[#heatspeccore] +==== HeatSpecCore + +HeatSpecCore defines the desired state of Heat, for use with OpenStackControlplane (no containerImages) + +|=== +| Field | Description | Scheme | Required + +| heatAPI +| HeatAPI - Spec definition for the API service of this Heat deployment +| <> +| true + +| heatCfnAPI +| HeatCfnAPI - Spec definition for the CfnAPI service of this Heat deployment +| <> +| true + +| heatEngine +| HeatEngine - Spec definition for the Engine service of this Heat deployment +| <> +| true +|=== + +<> + +[#heatstatus] +==== HeatStatus + +HeatStatus defines the observed state of Heat + +|=== +| Field | Description | Scheme | Required + +| conditions +| Conditions +| condition.Conditions +| false + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| databaseHostname +| Heat Database Hostname +| string +| false + +| transportURLSecret +| TransportURLSecret - Secret containing RabbitMQ transportURL +| string +| false + +| heatApiReadyCount +| ReadyCount of Heat API instance +| int32 +| false + +| heatCfnApiReadyCount +| ReadyCount of Heat CfnAPI instance +| int32 +| false + +| heatEngineReadyCount +| ReadyCount of Heat Engine instance +| int32 +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the opentack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#heatapi] +==== HeatAPI + +HeatAPI ... + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#heatapilist] +==== HeatAPIList + +HeatAPIList contains a list of HeatAPI + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#heatapispec] +==== HeatAPISpec + +HeatAPISpec defines the desired state of HeatAPI + +|=== +| Field | Description | Scheme | Required + +| databaseHostname +| DatabaseHostname - Heat Database Hostname +| string +| true + +| transportURLSecret +| TransportURLSecret - Secret containing RabbitMQ transportURL +| string +| true + +| serviceAccount +| ServiceAccount - service account name used internally to provide Heat services the default SA name +| string +| true +|=== + +<> + +[#heatapistatus] +==== HeatAPIStatus + +HeatAPIStatus defines the observed state of HeatAPI + +|=== +| Field | Description | Scheme | Required + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| readyCount +| ReadyCount of HeatAPI instances +| int32 +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the opentack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#heatapitemplate] +==== HeatAPITemplate + +HeatAPITemplate defines the input parameters for the Heat API service + +|=== +| Field | Description | Scheme | Required + +| containerImage +| ContainerImage - Container Image URL +| string +| true +|=== + +<> + +[#heatapitemplatecore] +==== HeatAPITemplateCore + +HeatAPITemplateCore - + +|=== +| Field | Description | Scheme | Required + +| override +| Override, provides the ability to override the generated manifest of several child resources. +| <> +| false + +| tls +| TLS - Parameters related to the TLS +| tls.API +| false +|=== + +<> + +[#heatcfnapi] +==== HeatCfnAPI + +HeatCfnAPI ... + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#heatcfnapilist] +==== HeatCfnAPIList + +HeatCfnAPIList contains a list of HeatCfnAPI + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#heatcfnapispec] +==== HeatCfnAPISpec + +HeatCfnAPISpec defines the desired state of HeatCfnAPI + +|=== +| Field | Description | Scheme | Required + +| databaseHostname +| DatabaseHostname - Heat Database Hostname +| string +| true + +| transportURLSecret +| TransportURLSecret - Secret containing RabbitMQ transportURL +| string +| true + +| serviceAccount +| ServiceAccount - service account name used internally to provide Heat services the default SA name +| string +| true +|=== + +<> + +[#heatcfnapistatus] +==== HeatCfnAPIStatus + +HeatCfnAPIStatus defines the observed state of HeatCfnAPI + +|=== +| Field | Description | Scheme | Required + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| readyCount +| ReadyCount of HeatCfnAPI instances +| int32 +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the opentack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#heatcfnapitemplate] +==== HeatCfnAPITemplate + +HeatCfnAPITemplate defines the input parameters for the Heat Cfn API service + +|=== +| Field | Description | Scheme | Required + +| containerImage +| ContainerImage - Container Image URL +| string +| true +|=== + +<> + +[#heatcfnapitemplatecore] +==== HeatCfnAPITemplateCore + +HeatCfnAPITemplateCore - + +|=== +| Field | Description | Scheme | Required + +| override +| Override, provides the ability to override the generated manifest of several child resources. +| <> +| false + +| tls +| TLS - Parameters related to the TLS +| tls.API +| false +|=== + +<> + +[#heatengine] +==== HeatEngine + +HeatEngine defined. + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#heatenginelist] +==== HeatEngineList + +HeatEngineList contains a list of HeatEngine + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#heatenginespec] +==== HeatEngineSpec + +HeatEngineSpec defines the desired state of HeatEngine + +|=== +| Field | Description | Scheme | Required + +| databaseHostname +| DatabaseHostname - Heat Database Hostname +| string +| true + +| transportURLSecret +| TransportURLSecret - Secret containing RabbitMQ transportURL +| string +| true + +| serviceAccount +| ServiceAccount - service account name used internally to provide Heat services the default SA name +| string +| true + +| tls +| TLS - Parameters related to the TLS +| tls.Ca +| false +|=== + +<> + +[#heatenginestatus] +==== HeatEngineStatus + +HeatEngineStatus defines the observed state of HeatEngine + +|=== +| Field | Description | Scheme | Required + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| readyCount +| ReadyCount of HeatEngine instances +| int32 +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the opentack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#heatenginetemplate] +==== HeatEngineTemplate + +HeatEngineTemplate defines the input parameters for the Heat Engine service + +|=== +| Field | Description | Scheme | Required + +| containerImage +| ContainerImage - Container Image URL +| string +| true +|=== + +<> + +[#horizionoverridespec] +==== HorizionOverrideSpec + +HorizionOverrideSpec to override the generated manifest of several child resources. + +|=== +| Field | Description | Scheme | Required + +| service +| Override configuration for the Service created to serve traffic to the cluster. +| *service.RoutedOverrideSpec +| false +|=== + +<> + +[#horizon] +==== Horizon + +Horizon is the Schema for the horizons API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#horizonextravolmounts] +==== HorizonExtraVolMounts + +HorizonExtraVolMounts exposes additional parameters processed by the horizon-operator and defines the common VolMounts structure provided by the main storage module + +|=== +| Field | Description | Scheme | Required + +| name +| +| string +| false + +| region +| +| string +| false + +| extraVol +| +| []storage.VolMounts +| true +|=== + +<> + +[#horizonlist] +==== HorizonList + +HorizonList contains a list of Horizon + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#horizonspec] +==== HorizonSpec + +HorizonSpec defines the desired state of Horizon + +|=== +| Field | Description | Scheme | Required + +| containerImage +| horizon Container Image URL +| string +| true +|=== + +<> + +[#horizonspeccore] +==== HorizonSpecCore + +HorizonSpecBase - + +|=== +| Field | Description | Scheme | Required + +| nodeSelector +| NodeSelector to target subset of worker nodes running this service +| map[string]string +| false + +| defaultConfigOverwrite +| ConfigOverwrite - interface to overwrite default config files like e.g. logging.conf or policy.json. But can also be used to add additional files. Those get added to the service config dir in /etc/++++++.++++++ +| map[string]string +| false + +| resources +| Resources - Compute Resources required by this service (Limits/Requests). https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +| corev1.ResourceRequirements +| false + +| override +| Override, provides the ability to override the generated manifest of several child resources. +| <> +| false + +| tls +| TLS - Parameters related to the TLS +| tls.SimpleService +| false + +| replicas +| Replicas of horizon API to run +| *int32 +| true + +| secret +| Secret containing OpenStack password information for Horizon Secret Key +| string +| true + +| customServiceConfig +| CustomServiceConfig - customize the service config using this parameter to change service defaults, or overwrite rendered information using raw OpenStack config format. The content gets added to to /etc/openstack-dashboard/local_settings.d directory as 9999_custom_settings.py file. +| string +| true + +| memcachedInstance +| Memcached instance name. +| string +| true + +| preserveJobs +| PreserveJobs - do not delete jobs after they finished e.g. to check logs +| bool +| true + +| extraMounts +| ExtraMounts containing conf files +| []<> +| false +|=== + +<> + +[#horizonstatus] +==== HorizonStatus + +HorizonStatus defines the observed state of Horizon + +|=== +| Field | Description | Scheme | Required + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| endpoint +| Endpoint url to access OpenStack Dashboard +| string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| readyCount +| ReadyCount of Horizon instances +| int32 +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the opentack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#ironicservicetemplate] +==== IronicServiceTemplate + +IronicServiceTemplate defines the common input parameters for Ironic services + +|=== +| Field | Description | Scheme | Required + +| replicas +| Replicas - +| *int32 +| true + +| nodeSelector +| NodeSelector to target subset of worker nodes running this service. Setting here overrides any global NodeSelector settings within the Ironic CR +| map[string]string +| false + +| resources +| Resources - Compute Resources required by this service (Limits/Requests). https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +| corev1.ResourceRequirements +| false + +| customServiceConfig +| CustomServiceConfig - customize the service config using this parameter to change service defaults, or overwrite rendered information using raw OpenStack config format. The content gets added to to /etc/++++++/++++++.conf.d directory as custom.conf file.++++++++++++ +| string +| true + +| defaultConfigOverwrite +| ConfigOverwrite - interface to overwrite default config files like e.g. policy.json. But can also be used to add additional files. Those get added to the service config dir in /etc/++++++.++++++ +| map[string]string +| false +|=== + +<> + +[#keystoneendpoints] +==== KeystoneEndpoints + +KeystoneEndpoints defines keystone endpoint parameters for service + +|=== +| Field | Description | Scheme | Required + +| internal +| Internal endpoint URL +| string +| true + +| public +| Public endpoint URL +| string +| true +|=== + +<> + +[#passwordselector-2] +==== PasswordSelector + +PasswordSelector to identify the AdminUser password from the Secret + +|=== +| Field | Description | Scheme | Required + +| service +| Service - Selector to get the ironic service password from the Secret +| string +| true +|=== + +<> + +[#dhcprange] +==== DHCPRange + +DHCPRange to define address range for DHCP requestes + +|=== +| Field | Description | Scheme | Required + +| name +| Name - Name of the DHCPRange (used for tagging in dnsmasq) +| string +| false + +| cidr +| Cidr - IP address prefix (CIDR) representing an IP network. +| string +| true + +| start +| Start - Start of DHCP range +| string +| true + +| end +| End - End of DHCP range +| string +| true + +| gateway +| Gateway - IP address for the router +| string +| false + +| mtu +| MTU - Maximum Transmission Unit +| int +| false + +| podIndex +| PodIndex - Maps the DHCPRange to a specific statefulset pod index +| int +| false +|=== + +<> + +[#ironic] +==== Ironic + +Ironic is the Schema for the ironics API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#ironicimages] +==== IronicImages + +IronicImages to specify container images required by all ironic services + +|=== +| Field | Description | Scheme | Required + +| api +| API - Ironic API Container Image (will be set to environmental default if empty) +| string +| true + +| conductor +| Conductor - Ironic Conductor Container Image (will be set to environmental default if empty) +| string +| true + +| inspector +| Inspector - Ironic Inspector Container Image (will be set to environmental default if empty) +| string +| true + +| neutronAgent +| NeutronAgent - ML2 baremtal - Ironic Neutron Agent Image (will be set to environmental default if empty) +| string +| true + +| pxe +| Pxe- Ironic DHCP/TFTP/HTTP Container Image URL (will be set to environmental default if empty) +| string +| true + +| ironicPythonAgent +| IronicPythonAgent - Image containing the ironic-python-agent kernel and ramdisk +| string +| true +|=== + +<> + +[#ironiclist] +==== IronicList + +IronicList contains a list of Ironic + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#ironicspec] +==== IronicSpec + +IronicSpec defines the desired state of Ironic + +|=== +| Field | Description | Scheme | Required + +| images +| Images - Container images for all ironic services +| <> +| true +|=== + +<> + +[#ironicspeccore] +==== IronicSpecCore + +IronicSpec defines the desired state of Ironic + +|=== +| Field | Description | Scheme | Required + +| standalone +| Whether to deploy a standalone Ironic. +| bool +| true + +| serviceUser +| ServiceUser - optional username used for this service to register in ironic +| string +| true + +| databaseInstance +| MariaDB instance name. Right now required by the maridb-operator to get the credentials from the instance to create the DB. Might not be required in future. +| string +| true + +| databaseAccount +| DatabaseAccount - optional MariaDBAccount used for ironic DB, defaults to ironic. +| string +| true + +| secret +| Secret containing OpenStack password information for ironic IronicPassword +| string +| true + +| passwordSelectors +| PasswordSelectors - Selectors to identify the DB and ServiceUser password from the Secret +| <> +| true + +| preserveJobs +| PreserveJobs - do not delete jobs after they finished e.g. to check logs +| bool +| true + +| customServiceConfig +| CustomServiceConfig - customize the service config using this parameter to change service defaults, or overwrite rendered information using raw OpenStack config format. The content gets added to to /etc/++++++/++++++.conf.d directory as custom.conf file.++++++++++++ +| string +| true + +| defaultConfigOverwrite +| ConfigOverwrite - interface to overwrite default config files like e.g. policy.json. But can also be used to add additional files. Those get added to the service config dir in /etc/++++++.++++++ +| map[string]string +| false + +| ironicAPI +| IronicAPI - Spec definition for the API service of this Ironic deployment +| <> +| true + +| ironicConductors +| IronicConductors - Spec definitions for the conductor service of this Ironic deployment +| []<> +| false + +| ironicInspector +| IronicInspector - Spec definition for the inspector service of this Ironic deployment +| <> +| true + +| ironicNeutronAgent +| IronicNeutronAgent - Spec definition for the ML2 baremetal ironic-neutron-agent service of this Ironic deployment +| <> +| true + +| rabbitMqClusterName +| RabbitMQ instance name Needed to request a transportURL that is created and used in Ironic +| string +| true + +| rpcTransport +| RPC transport type - Which RPC transport implementation to use between conductor and API services. 'oslo' to use oslo.messaging transport or 'json-rpc' to use JSON RPC transport. NOTE \-> ironic and ironic-inspector require oslo.messaging transport when not in standalone mode. +| string +| true + +| nodeSelector +| NodeSelector to target subset of worker nodes running this service. Setting NodeSelector here acts as a default value and can be overridden by service specific NodeSelector Settings. +| map[string]string +| false + +| storageClass +| Storage class to host data. This is passed to IronicConductors unless storageClass is explicitly set for the conductor. +| string +| true +|=== + +<> + +[#ironicstatus] +==== IronicStatus + +IronicStatus defines the observed state of Ironic + +|=== +| Field | Description | Scheme | Required + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| databaseHostname +| Ironic Database Hostname +| string +| false + +| apiEndpoints +| API endpoint +| map[string]map[string]string +| false + +| ironicAPIReadyCount +| ReadyCount of Ironic API instance +| int32 +| false + +| ironicConductorReadyCount +| ReadyCount of Ironic Conductor instance +| map[string]int32 +| false + +| ironicInspectorReadyCount +| ReadyCount of Ironic Inspector instance +| int32 +| false + +| ironicNeutronAgentReadyCount +| ReadyCount of Ironic Neutron Agent instance +| int32 +| false + +| transportURLSecret +| TransportURLSecret - Secret containing RabbitMQ transportURL +| string +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the openstack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#apioverridespec-2] +==== APIOverrideSpec + +APIOverrideSpec to override the generated manifest of several child resources. + +|=== +| Field | Description | Scheme | Required + +| service +| Override configuration for the Service created to serve traffic to the cluster. The key must be the endpoint type (public, internal) +| map[service.Endpoint]service.RoutedOverrideSpec +| false +|=== + +<> + +[#ironicapi] +==== IronicAPI + +IronicAPI is the Schema for the ironicapis API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#ironicapilist] +==== IronicAPIList + +IronicAPIList contains a list of IronicAPI + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#ironicapispec] +==== IronicAPISpec + +IronicAPISpec defines the desired state of IronicAPI + +|=== +| Field | Description | Scheme | Required + +| containerImage +| ContainerImage - Ironic API Container Image +| string +| true + +| standalone +| Whether to deploy a standalone Ironic. +| bool +| true + +| serviceUser +| ServiceUser - optional username used for this service to register in ironic +| string +| true + +| secret +| Secret containing OpenStack password information for AdminPassword +| string +| false + +| passwordSelectors +| PasswordSelectors - Selectors to identify the DB and ServiceUser password from the Secret +| <> +| true + +| databaseHostname +| DatabaseHostname - Ironic Database Hostname +| string +| true + +| databaseAccount +| DatabaseAccount - optional MariaDBAccount used for ironic DB, defaults to ironic. +| string +| true + +| transportURLSecret +| Secret containing RabbitMq transport URL +| string +| false + +| rpcTransport +| RPC transport type - Which RPC transport implementation to use between conductor and API services. 'oslo' to use oslo.messaging transport or 'json-rpc' to use JSON RPC transport. NOTE \-> ironic requires oslo.messaging transport when not in standalone mode. +| string +| true + +| keystoneEndpoints +| KeystoneEndpoints - Internally used Keystone API endpoints +| <> +| true +|=== + +<> + +[#ironicapistatus] +==== IronicAPIStatus + +IronicAPIStatus defines the observed state of IronicAPI + +|=== +| Field | Description | Scheme | Required + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| apiEndpoints +| API endpoint +| map[string]map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| readyCount +| ReadyCount of ironic API instances +| int32 +| false + +| networkAttachments +| NetworkAttachments status of the deployment pods +| map[string][]string +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the openstack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#ironicapitemplate] +==== IronicAPITemplate + +IronicAPITemplate defines the input parameters for Ironic API service + +|=== +| Field | Description | Scheme | Required + +| networkAttachments +| NetworkAttachments is a list of NetworkAttachment resource names to expose the services to the given network +| []string +| false + +| override +| Override, provides the ability to override the generated manifest of several child resources. +| <> +| false + +| tls +| TLS - Parameters related to the TLS +| tls.API +| false +|=== + +<> + +[#ironicconductor] +==== IronicConductor + +IronicConductor is the Schema for the ironicconductors Conductor + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#ironicconductorlist] +==== IronicConductorList + +IronicConductorList contains a list of IronicConductor + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#ironicconductorspec] +==== IronicConductorSpec + +IronicConductorSpec defines the desired state of IronicConductor + +|=== +| Field | Description | Scheme | Required + +| containerImage +| ContainerImage - Ironic Conductor Container Image +| string +| true + +| standalone +| Whether to deploy a standalone Ironic. +| bool +| true + +| pxeContainerImage +| PxeContainerImage - Ironic DHCP/TFTP/HTTP Container Image +| string +| true + +| ironicPythonAgentImage +| IronicPythonAgentImage - Image containing the ironic-python-agent kernel and ramdisk +| string +| true + +| serviceUser +| ServiceUser - optional username used for this service to register in ironic +| string +| true + +| secret +| Secret containing OpenStack password information for AdminPassword +| string +| false + +| passwordSelectors +| PasswordSelectors - Selectors to identify the ServiceUser password from the Secret +| <> +| true + +| databaseHostname +| DatabaseHostname - Ironic Database Hostname +| string +| true + +| databaseAccount +| DatabaseAccount - optional MariaDBAccount used for ironic DB, defaults to ironic. +| string +| true + +| transportURLSecret +| TransportURLSecret - Secret containing RabbitMQ transportURL +| string +| false + +| rpcTransport +| RPC transport type - Which RPC transport implementation to use between conductor and API services. 'oslo' to use oslo.messaging transport or 'json-rpc' to use JSON RPC transport. NOTE \-> ironic requires oslo.messaging transport when not in standalone mode. +| string +| true + +| keystoneEndpoints +| KeystoneEndpoints - Internally used Keystone API endpoints +| <> +| true + +| tls +| TLS - Parameters related to the TLS +| tls.Ca +| false +|=== + +<> + +[#ironicconductorstatus] +==== IronicConductorStatus + +IronicConductorStatus defines the observed state of IronicConductor + +|=== +| Field | Description | Scheme | Required + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| readyCount +| ReadyCount of ironic Conductor instances +| int32 +| false + +| networkAttachments +| NetworkAttachments status of the deployment pods +| map[string][]string +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the openstack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#ironicconductortemplate] +==== IronicConductorTemplate + +IronicConductorTemplate defines the input parameters for Ironic Conductor service + +|=== +| Field | Description | Scheme | Required + +| conductorGroup +| ConductorGroup - Ironic Conductor conductor group. +| string +| true + +| storageClass +| StorageClass +| string +| true + +| storageRequest +| StorageRequest +| string +| true + +| networkAttachments +| NetworkAttachments is a list of NetworkAttachment resource names to expose the services to the given network +| []string +| false + +| provisionNetwork +| ProvisionNetwork - Additional network to attach to expose boot DHCP, TFTP, HTTP services. +| string +| false + +| dhcpRanges +| DHCPRanges - List of DHCP ranges to use for provisioning +| []<> +| false +|=== + +<> + +[#inspectoroverridespec] +==== InspectorOverrideSpec + +InspectorOverrideSpec to override the generated manifest of several child resources. + +|=== +| Field | Description | Scheme | Required + +| service +| Override configuration for the Service created to serve traffic to the cluster. The key must be the endpoint type (public, internal) +| map[service.Endpoint]service.RoutedOverrideSpec +| false +|=== + +<> + +[#ironicinspector] +==== IronicInspector + +IronicInspector is the Schema for the IronicInspector + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#ironicinspectorlist] +==== IronicInspectorList + +IronicInspectorList contains a list of IronicInspector + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#ironicinspectorpasswordselector] +==== IronicInspectorPasswordSelector + +IronicInspectorPasswordSelector to identify the AdminUser password from the Secret + +|=== +| Field | Description | Scheme | Required + +| service +| Service - Selector to get the ironic-inspector service password from the Secret +| string +| true +|=== + +<> + +[#ironicinspectorspec] +==== IronicInspectorSpec + +IronicInspectorSpec defines the desired state of IronicInspector + +|=== +| Field | Description | Scheme | Required + +| standalone +| Standalone - Whether to deploy a standalone Ironic Inspector. +| bool +| true + +| containerImage +| ContainerImage - Ironic Inspector Container Image +| string +| true + +| pxeContainerImage +| PxeContainerImage - Ironic Inspector DHCP/TFTP/HTTP Container Image +| string +| true + +| ironicPythonAgentImage +| IronicPythonAgentImage - Image containing the ironic-python-agent kernel and ramdisk +| string +| true + +| databaseInstance +| MariaDB instance name. Right now required by the maridb-operator to get the credentials from the instance to create the DB. Might not be required in future. +| string +| true + +| secret +| Secret containing OpenStack password information for AdminPassword +| string +| false + +| rabbitMqClusterName +| RabbitMQ instance name Needed to request a transportURL that is created and used in Ironic Inspector +| string +| true + +| rpcTransport +| RPC transport type - Which RPC transport implementation to use between conductor and API services. 'oslo' to use oslo.messaging transport or 'json-rpc' to use JSON RPC transport. NOTE \-> ironic-inspector requires oslo.messaging transport when not in standalone mode. +| string +| true +|=== + +<> + +[#ironicinspectorstatus] +==== IronicInspectorStatus + +IronicInspectorStatus defines the observed state of IronicInspector + +|=== +| Field | Description | Scheme | Required + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| apiEndpoints +| API endpoint +| map[string]map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| databaseHostname +| IronicInspector Database Hostname +| string +| false + +| readyCount +| ReadyCount of Ironic Inspector instances +| int32 +| false + +| transportURLSecret +| TransportURLSecret - Secret containing RabbitMQ transportURL +| string +| false + +| networkAttachments +| NetworkAttachments status of the deployment pods +| map[string][]string +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the openstack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#ironicinspectortemplate] +==== IronicInspectorTemplate + +IronicInspectorTemplate defines the input parameters for Ironic Inspector service + +|=== +| Field | Description | Scheme | Required + +| serviceUser +| ServiceUser - optional username used for this service to register in ironic-inspector +| string +| true + +| replicas +| Replicas - Ironic Inspector Replicas +| *int32 +| true + +| databaseAccount +| DatabaseAccount - optional MariaDBAccount used for ironic DB, defaults to ironic-inspector. this is separate from the account used for ironic, as a MariaDBAccount can only refer to a single MariaDBDatabase and it appears that ironic inspector uses its own MariaDBDatabase. +| string +| true + +| passwordSelectors +| PasswordSelectors - Selectors to identify the ServiceUser password from the Secret +| <> +| true + +| nodeSelector +| NodeSelector to target subset of worker nodes running this service. Setting here overrides any global NodeSelector settings within the Ironic CR +| map[string]string +| false + +| preserveJobs +| PreserveJobs - do not delete jobs after they finished e.g. to check logs +| bool +| true + +| customServiceConfig +| CustomServiceConfig - customize the service config using this parameter to change service defaults, or overwrite rendered information using raw OpenStack config format. The content gets added to to /etc/++++++/++++++.conf.d directory as custom.conf file.++++++++++++ +| string +| true + +| defaultConfigOverwrite +| ConfigOverwrite - interface to overwrite default config files like e.g. policy.json. But can also be used to add additional files. Those get added to the service config dir in /etc/++++++.++++++ +| map[string]string +| false + +| resources +| Resources - Compute Resources required by this service (Limits/Requests). https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +| corev1.ResourceRequirements +| false + +| storageClass +| StorageClass +| string +| false + +| networkAttachments +| NetworkAttachments is a list of NetworkAttachment resource names to expose the services to the given network +| []string +| false + +| inspectionNetwork +| InspectionNetwork - Additional network to attach to expose boot DHCP, TFTP, HTTP services. +| string +| false + +| dhcpRanges +| DHCPRanges - List of DHCP ranges to use for provisioning +| []<> +| false + +| override +| Override, provides the ability to override the generated manifest of several child resources. +| <> +| false + +| tls +| TLS - Parameters related to the TLS +| tls.API +| false +|=== + +<> + +[#ironicneutronagent] +==== IronicNeutronAgent + +IronicNeutronAgent is the Schema for the ML2 baremetal - ironic-neutron-agent agents + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#ironicneutronagentlist] +==== IronicNeutronAgentList + +IronicNeutronAgentList contains a list of IronicConductor + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#ironicneutronagentspec] +==== IronicNeutronAgentSpec + +IronicNeutronAgentSpec defines the desired state of ML2 baremetal - ironic-neutron-agent agents + +|=== +| Field | Description | Scheme | Required + +| containerImage +| ContainerImage - ML2 baremtal - Ironic Neutron Agent Image +| string +| true + +| serviceUser +| ServiceUser - optional username used for this service to register in ironic +| string +| true + +| secret +| Secret containing OpenStack password information for IronicPassword +| string +| false + +| passwordSelectors +| PasswordSelectors - Selectors to identify the ServiceUser password from the Secret +| <> +| true + +| tls +| TLS - Parameters related to the TLS +| tls.Ca +| false +|=== + +<> + +[#ironicneutronagentstatus] +==== IronicNeutronAgentStatus + +IronicNeutronAgentStatus defines the observed state of ML2 baremetal - ironic-neutron-agent + +|=== +| Field | Description | Scheme | Required + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| readyCount +| ReadyCount of ironic Conductor instances +| int32 +| false + +| transportURLSecret +| TransportURLSecret - Secret containing RabbitMQ transportURL +| string +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the openstack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#ironicneutronagenttemplate] +==== IronicNeutronAgentTemplate + +IronicNeutronAgentTemplate defines the input parameters for ML2 baremetal - ironic-neutron-agent agents + +|=== +| Field | Description | Scheme | Required + +| rabbitMqClusterName +| RabbitMQ instance name Needed to request a transportURL that is created and used in Ironic +| string +| true +|=== + +<> + +[#apioverridespec-2] +==== APIOverrideSpec + +APIOverrideSpec to override the generated manifest of several child resources. + +|=== +| Field | Description | Scheme | Required + +| service +| Override configuration for the Service created to serve traffic to the cluster. The key must be the endpoint type (public, internal) +| map[service.Endpoint]service.RoutedOverrideSpec +| false +|=== + +<> + +[#keystoneapi] +==== KeystoneAPI + +KeystoneAPI is the Schema for the keystoneapis API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#keystoneapilist] +==== KeystoneAPIList + +KeystoneAPIList contains a list of KeystoneAPI + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#keystoneapispec] +==== KeystoneAPISpec + +|=== +| Field | Description | Scheme | Required + +| containerImage +| Keystone Container Image URL (will be set to environmental default if empty) +| string +| true +|=== + +<> + +[#keystoneapispeccore] +==== KeystoneAPISpecCore + +KeystoneAPISpec defines the desired state of KeystoneAPI + +|=== +| Field | Description | Scheme | Required + +| databaseInstance +| MariaDB instance name Right now required by the maridb-operator to get the credentials from the instance to create the DB Might not be required in future +| string +| true + +| databaseAccount +| DatabaseAccount - name of MariaDBAccount which will be used to connect. +| string +| true + +| memcachedInstance +| Memcached instance name. +| string +| true + +| region +| Region - optional region name for the keystone service +| string +| true + +| adminProject +| AdminProject - admin project name +| string +| true + +| adminUser +| AdminUser - admin user name +| string +| true + +| replicas +| Replicas of keystone API to run +| *int32 +| true + +| secret +| Secret containing OpenStack password information for keystone AdminPassword +| string +| true + +| enableSecureRBAC +| EnableSecureRBAC - Enable Consistent and Secure RBAC policies +| bool +| true + +| trustFlushArgs +| TrustFlushArgs - Arguments added to keystone-manage trust_flush command +| string +| true + +| trustFlushSchedule +| TrustFlushSchedule - Schedule to purge expired or soft-deleted trusts from database +| string +| true + +| trustFlushSuspend +| TrustFlushSuspend - Suspend the cron job to purge trusts +| bool +| true + +| passwordSelectors +| PasswordSelectors - Selectors to identify the AdminUser password from the Secret +| <> +| true + +| nodeSelector +| NodeSelector to target subset of worker nodes running this service +| map[string]string +| false + +| preserveJobs +| PreserveJobs - do not delete jobs after they finished e.g. to check logs +| bool +| true + +| customServiceConfig +| CustomServiceConfig - customize the service config using this parameter to change service defaults, or overwrite rendered information using raw OpenStack config format. The content gets added to to /etc/++++++/++++++.conf.d directory as custom.conf file.++++++++++++ +| string +| false + +| defaultConfigOverwrite +| ConfigOverwrite - interface to overwrite default config files like e.g. policy.json. But can also be used to add additional files. Those get added to the service config dir in /etc/++++++.++++++ +| map[string]string +| false + +| resources +| Resources - Compute Resources required by this service (Limits/Requests). https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +| corev1.ResourceRequirements +| false + +| networkAttachments +| NetworkAttachments is a list of NetworkAttachment resource names to expose the services to the given network +| []string +| false + +| override +| Override, provides the ability to override the generated manifest of several child resources. +| <> +| false + +| rabbitMqClusterName +| RabbitMQ instance name Needed to request a transportURL that is created and used in Keystone +| string +| true + +| tls +| TLS - Parameters related to the TLS +| tls.API +| false +|=== + +<> + +[#keystoneapistatus] +==== KeystoneAPIStatus + +KeystoneAPIStatus defines the observed state of KeystoneAPI + +|=== +| Field | Description | Scheme | Required + +| readyCount +| ReadyCount of keystone API instances +| int32 +| false + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| apiEndpoints +| API endpoint +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| databaseHostname +| Keystone Database Hostname +| string +| false + +| networkAttachments +| NetworkAttachments status of the deployment pods +| map[string][]string +| false + +| transportURLSecret +| TransportURLSecret - Secret containing RabbitMQ transportURL +| string +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes. +| int64 +| false +|=== + +<> + +[#passwordselector-2] +==== PasswordSelector + +PasswordSelector to identify the DB and AdminUser password from the Secret + +|=== +| Field | Description | Scheme | Required + +| admin +| Admin - Selector to get the keystone Admin password from the Secret +| string +| true +|=== + +<> + +[#keystoneendpoint] +==== KeystoneEndpoint + +KeystoneEndpoint is the Schema for the keystoneendpoints API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#keystoneendpointlist] +==== KeystoneEndpointList + +KeystoneEndpointList contains a list of KeystoneEndpoint + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#keystoneendpointspec] +==== KeystoneEndpointSpec + +KeystoneEndpointSpec defines the desired state of KeystoneEndpoint + +|=== +| Field | Description | Scheme | Required + +| serviceName +| ServiceName - Name of the service to create the endpoint for +| string +| true + +| endpoints +| Endpoints - map with service api endpoint URLs with the endpoint type as index +| map[string]string +| true +|=== + +<> + +[#keystoneendpointstatus] +==== KeystoneEndpointStatus + +KeystoneEndpointStatus defines the observed state of KeystoneEndpoint + +|=== +| Field | Description | Scheme | Required + +| endpointIDs +| +| map[string]string +| false + +| serviceID +| +| string +| false + +| conditions +| Conditions +| condition.Conditions +| false +|=== + +<> + +[#keystoneservice] +==== KeystoneService + +KeystoneService is the Schema for the keystoneservices API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#keystoneservicelist] +==== KeystoneServiceList + +KeystoneServiceList contains a list of KeystoneService + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#keystoneservicespec] +==== KeystoneServiceSpec + +KeystoneServiceSpec defines the desired state of KeystoneService + +|=== +| Field | Description | Scheme | Required + +| serviceType +| ServiceType - Type is the type of the service. +| string +| true + +| serviceName +| ServiceName - Name of the service. +| string +| true + +| serviceDescription +| ServiceDescription - Description for the service. +| string +| false + +| enabled +| Enabled - whether or not the service is enabled. +| bool +| true + +| serviceUser +| ServiceUser - optional username used for this service +| string +| true + +| secret +| Secret containing OpenStack password information for the ServiceUser +| string +| true + +| passwordSelector +| PasswordSelector - Selector to get the ServiceUser password from the Secret, e.g. PlacementPassword +| string +| true +|=== + +<> + +[#keystoneservicestatus] +==== KeystoneServiceStatus + +KeystoneServiceStatus defines the observed state of KeystoneService + +|=== +| Field | Description | Scheme | Required + +| serviceID +| +| string +| false + +| conditions +| Conditions +| condition.Conditions +| false +|=== + +<> + +[#manilaservicetemplate] +==== ManilaServiceTemplate + +ManilaServiceTemplate defines the input parameters that can be defined for a given Manila service + +|=== +| Field | Description | Scheme | Required + +| nodeSelector +| NodeSelector to target subset of worker nodes running this service. Setting here overrides any global NodeSelector settings within the Manila CR. +| map[string]string +| false + +| customServiceConfig +| CustomServiceConfig - customize the service config using this parameter to change service defaults, or overwrite rendered information using raw OpenStack config format. The content gets added to to /etc/++++++/++++++.conf.d directory a custom config file.++++++++++++ +| string +| false + +| customServiceConfigSecrets +| CustomServiceConfigSecrets - customize the service config using this parameter to specify Secrets that contain sensitive service config data. The content of each Secret gets added to the /etc/++++++/++++++.conf.d directory as a custom config file.++++++++++++ +| []string +| false + +| resources +| Resources - Compute Resources required by this service (Limits/Requests). https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +| corev1.ResourceRequirements +| false + +| networkAttachments +| NetworkAttachments is a list of NetworkAttachment resource names to expose the services to the given network +| []string +| false +|=== + +<> + +[#manilatemplate] +==== ManilaTemplate + +ManilaTemplate defines common input parameters used by all Manila services + +|=== +| Field | Description | Scheme | Required + +| serviceUser +| ServiceUser - optional username used for this service to register in manila +| string +| true + +| databaseAccount +| DatabaseAccount - optional MariaDBAccount CR name used for manila DB, defaults to manila +| string +| true + +| secret +| Secret containing OpenStack password information for AdminPassword +| string +| false + +| passwordSelectors +| PasswordSelectors - Selectors to identify the ServiceUser password from the Secret +| <> +| false +|=== + +<> + +[#passwordselector-2] +==== PasswordSelector + +PasswordSelector to identify the DB and AdminUser password from the Secret + +|=== +| Field | Description | Scheme | Required + +| service +| Service - Selector to get the manila service password from the Secret +| string +| false +|=== + +<> + +[#dbpurge-2] +==== DBPurge + +DBPurge struct is used to model the parameters exposed to the Manila API CronJob + +|=== +| Field | Description | Scheme | Required + +| age +| Age is the DBPurgeAge parameter and indicates the number of days of purging DB records +| int +| true + +| schedule +| Schedule defines the crontab format string to schedule the DBPurge cronJob +| string +| true +|=== + +<> + +[#manila] +==== Manila + +Manila is the Schema for the manilas API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#maniladebug] +==== ManilaDebug + +ManilaDebug contains flags related to multiple debug activities. See the individual comments for what this means for each flag. + +|=== +| Field | Description | Scheme | Required + +| dbPurge +| DBPurge increases log verbosity by executing the db_purge command with "--debug". +| bool +| false +|=== + +<> + +[#manilaextravolmounts] +==== ManilaExtraVolMounts + +ManilaExtraVolMounts exposes additional parameters processed by the manila-operator and defines the common VolMounts structure provided by the main storage module + +|=== +| Field | Description | Scheme | Required + +| name +| +| string +| false + +| region +| +| string +| false + +| extraVol +| +| []storage.VolMounts +| true +|=== + +<> + +[#manilalist] +==== ManilaList + +ManilaList contains a list of Manila + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#manilaspec] +==== ManilaSpec + +ManilaSpec defines the desired state of Manila + +|=== +| Field | Description | Scheme | Required + +| manilaAPI +| ManilaAPI - Spec definition for the API service of this Manila deployment +| <> +| true + +| manilaScheduler +| ManilaScheduler - Spec definition for the Scheduler service of this Manila deployment +| <> +| true + +| manilaShares +| ManilaShares - Map of chosen names to spec definitions for the Share(s) service(s) of this Manila deployment +| map[string]<> +| false +|=== + +<> + +[#manilaspecbase] +==== ManilaSpecBase + +ManilaSpecBase - + +|=== +| Field | Description | Scheme | Required + +| databaseInstance +| MariaDB instance name Right now required by the maridb-operator to get the credentials from the instance to create the DB Might not be required in future +| string +| false + +| rabbitMqClusterName +| RabbitMQ instance name Needed to request a transportURL that is created and used in Manila +| string +| true + +| memcachedInstance +| Memcached instance name. +| string +| true + +| debug +| Debug - enable debug for different deploy stages. If an init container is used, it runs and the actual action pod gets started with sleep infinity +| <> +| false + +| preserveJobs +| PreserveJobs - do not delete jobs after they finished e.g. to check logs +| bool +| false + +| customServiceConfig +| CustomServiceConfig - customize the service config for all Manila services using this parameter to change service defaults, or overwrite rendered information using raw OpenStack config format. The content gets added to to /etc/++++++/++++++.conf.d directory a custom config file.++++++++++++ +| string +| false + +| extraMounts +| ExtraMounts containing conf files and credentials +| []<> +| false + +| nodeSelector +| NodeSelector to target subset of worker nodes running this service. Setting NodeSelector here acts as a default value and can be overridden by service specific NodeSelector Settings. +| map[string]string +| false + +| dbPurge +| DBPurge parameters - +| <> +| false + +| apiTimeout +| APITimeout for HAProxy, Apache, and rpc_response_timeout +| int +| true +|=== + +<> + +[#manilaspeccore] +==== ManilaSpecCore + +ManilaSpecCore defines the desired state of Manila. This version is used by OpenStackControlplane + +|=== +| Field | Description | Scheme | Required + +| manilaAPI +| ManilaAPI - Spec definition for the API service of this Manila deployment +| <> +| true + +| manilaScheduler +| ManilaScheduler - Spec definition for the Scheduler service of this Manila deployment +| <> +| true + +| manilaShares +| ManilaShares - Map of chosen names to spec definitions for the Share(s) service(s) of this Manila deployment +| map[string]<> +| false +|=== + +<> + +[#manilastatus] +==== ManilaStatus + +ManilaStatus defines the observed state of Manila + +|=== +| Field | Description | Scheme | Required + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| databaseHostname +| Manila Database Hostname +| string +| false + +| transportURLSecret +| TransportURLSecret - Secret containing RabbitMQ transportURL +| string +| false + +| manilaAPIReadyCount +| ReadyCount of Manila API instance +| int32 +| false + +| manilaSchedulerReadyCount +| ReadyCount of Manila Scheduler instance +| int32 +| false + +| manilaSharesReadyCounts +| ReadyCounts of Manila Share instances +| map[string]int32 +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the opentack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#apioverridespec-2] +==== APIOverrideSpec + +APIOverrideSpec to override the generated manifest of several child resources. + +|=== +| Field | Description | Scheme | Required + +| service +| Override configuration for the Service created to serve traffic to the cluster. The key must be the endpoint type (public, internal) +| map[service.Endpoint]service.RoutedOverrideSpec +| false +|=== + +<> + +[#manilaapi] +==== ManilaAPI + +ManilaAPI is the Schema for the manilaapis API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#manilaapilist] +==== ManilaAPIList + +ManilaAPIList contains a list of ManilaAPI + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#manilaapispec] +==== ManilaAPISpec + +ManilaAPISpec defines the desired state of ManilaAPI + +|=== +| Field | Description | Scheme | Required + +| databaseHostname +| DatabaseHostname - Manila Database Hostname +| string +| false + +| transportURLSecret +| Secret containing RabbitMq transport URL +| string +| false + +| extraMounts +| ExtraMounts containing conf files and credentials +| []<> +| false + +| serviceAccount +| ServiceAccount - service account name used internally to provide the default SA name +| string +| true +|=== + +<> + +[#manilaapistatus] +==== ManilaAPIStatus + +ManilaAPIStatus defines the observed state of ManilaAPI + +|=== +| Field | Description | Scheme | Required + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| readyCount +| ReadyCount of Manila API instances +| int32 +| false + +| networkAttachments +| NetworkAttachments status of the deployment pods +| map[string][]string +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the opentack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#manilaapitemplate] +==== ManilaAPITemplate + +ManilaAPITemplate defines the input parameter for the ManilaAPI service + +|=== +| Field | Description | Scheme | Required + +| containerImage +| ContainerImage - Manila API Container Image URL +| string +| true +|=== + +<> + +[#manilaapitemplatecore] +==== ManilaAPITemplateCore + +ManilaAPITemplateCore - + +|=== +| Field | Description | Scheme | Required + +| replicas +| Replicas - Manila API Replicas +| *int32 +| true + +| override +| Override, provides the ability to override the generated manifest of several child resources. +| <> +| false + +| tls +| TLS - Parameters related to the TLS +| tls.API +| false +|=== + +<> + +[#manilascheduler] +==== ManilaScheduler + +ManilaScheduler is the Schema for the manilaschedulers API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#manilaschedulerlist] +==== ManilaSchedulerList + +ManilaSchedulerList contains a list of ManilaScheduler + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#manilaschedulerspec] +==== ManilaSchedulerSpec + +ManilaSchedulerSpec defines the desired state of ManilaScheduler + +|=== +| Field | Description | Scheme | Required + +| databaseHostname +| DatabaseHostname - manila Database Hostname +| string +| false + +| transportURLSecret +| Secret containing RabbitMq transport URL +| string +| false + +| extraMounts +| ExtraMounts containing conf files and credentials +| []<> +| false + +| serviceAccount +| ServiceAccount - service account name used internally to provide the default SA name +| string +| true + +| tls +| TLS - Parameters related to the TLS +| tls.Ca +| false +|=== + +<> + +[#manilaschedulerstatus] +==== ManilaSchedulerStatus + +ManilaSchedulerStatus defines the observed state of ManilaScheduler + +|=== +| Field | Description | Scheme | Required + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| readyCount +| ReadyCount of Manila Scheduler instances +| int32 +| false + +| networkAttachments +| NetworkAttachments status of the deployment pods +| map[string][]string +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the opentack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#manilaschedulertemplate] +==== ManilaSchedulerTemplate + +ManilaSchedulerTemplate defines the input parameter for the ManilaScheduler service + +|=== +| Field | Description | Scheme | Required + +| containerImage +| ContainerImage - Manila API Container Image URL +| string +| true +|=== + +<> + +[#manilaschedulertemplatecore] +==== ManilaSchedulerTemplateCore + +ManilaSchedulerTemplateCore - + +|=== +| Field | Description | Scheme | Required + +| replicas +| Replicas - Manila API Replicas +| *int32 +| true +|=== + +<> + +[#manilashare] +==== ManilaShare + +ManilaShare is the Schema for the manilashares API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#manilasharelist] +==== ManilaShareList + +ManilaShareList contains a list of ManilaShare + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#manilasharespec] +==== ManilaShareSpec + +ManilaShareSpec defines the desired state of ManilaShare + +|=== +| Field | Description | Scheme | Required + +| databaseHostname +| DatabaseHostname - manila Database Hostname +| string +| false + +| transportURLSecret +| Secret containing RabbitMq transport URL +| string +| false + +| extraMounts +| ExtraMounts containing conf files and credentials +| []<> +| false + +| serviceAccount +| ServiceAccount - service account name used internally to provide the default SA name +| string +| true + +| tls +| TLS - Parameters related to the TLS +| tls.Ca +| false +|=== + +<> + +[#manilasharestatus] +==== ManilaShareStatus + +ManilaShareStatus defines the observed state of ManilaShare + +|=== +| Field | Description | Scheme | Required + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| readyCount +| ReadyCount of ManilaShare instances +| int32 +| false + +| networkAttachments +| NetworkAttachments status of the deployment pods +| map[string][]string +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the opentack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#manilasharetemplate] +==== ManilaShareTemplate + +ManilaShareTemplate defines the input parameter for the ManilaShare service + +|=== +| Field | Description | Scheme | Required + +| containerImage +| ContainerImage - Manila API Container Image URL +| string +| true +|=== + +<> + +[#manilasharetemplatecore] +==== ManilaShareTemplateCore + +ManilaShareTemplateCore - + +|=== +| Field | Description | Scheme | Required + +| replicas +| Replicas - Manila API Replicas +| *int32 +| true +|=== + +<> + +[#adoptionredirectspec] +==== AdoptionRedirectSpec + +AdoptionRedirectSpec defines redirection to a different DB instance during Adoption + +|=== +| Field | Description | Scheme | Required + +| host +| MariaDB host to redirect to (IP or name) +| string +| false +|=== + +<> + +[#galera] +==== Galera + +Galera is the Schema for the galeras API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#galeraattributes] +==== GaleraAttributes + +GaleraAttributes holds startup information for a Galera host + +|=== +| Field | Description | Scheme | Required + +| seqno +| Last recorded replication sequence number in the DB +| string +| true + +| gcomm +| Gcomm URI used to connect to the galera cluster +| string +| false + +| containerID +| Identifier of the container at the time the gcomm URI was injected +| string +| false +|=== + +<> + +[#galeralist] +==== GaleraList + +GaleraList contains a list of Galera + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#galeraspec] +==== GaleraSpec + +GaleraSpec defines the desired state of Galera + +|=== +| Field | Description | Scheme | Required + +| containerImage +| Name of the galera container image to run (will be set to environmental default if empty) +| string +| true +|=== + +<> + +[#galeraspeccore] +==== GaleraSpecCore + +GaleraSpec defines the desired state of Galera + +|=== +| Field | Description | Scheme | Required + +| secret +| Name of the secret to look for password keys +| string +| true + +| storageClass +| Storage class to host the mariadb databases +| string +| true + +| storageRequest +| Storage size allocated for the mariadb databases +| string +| true + +| replicas +| Size of the galera cluster deployment +| *int32 +| true + +| nodeSelector +| NodeSelector to target subset of worker nodes running this service +| map[string]string +| false + +| customServiceConfig +| Customize config using this parameter to change service defaults, or overwrite rendered information using raw MariaDB config format. The content gets added to /etc/my.cnf.d/galera_custom.cnf +| string +| false + +| adoptionRedirect +| Adoption configuration +| <> +| true + +| tls +| TLS settings for MySQL service and internal Galera replication +| tls.SimpleService +| false + +| disableNonTLSListeners +| When TLS is configured, only allow connections to the DB over TLS +| bool +| false +|=== + +<> + +[#galerastatus] +==== GaleraStatus + +GaleraStatus defines the observed state of Galera + +|=== +| Field | Description | Scheme | Required + +| attributes +| A map of database node attributes for each pod +| map[string]<> +| false + +| safeToBootstrap +| Name of the node that can safely bootstrap a cluster +| string +| false + +| bootstrapped +| Is the galera cluster currently running +| bool +| true + +| stopRequired +| Does the galera cluster requires to be stopped globally +| bool +| true + +| clusterProperties +| Map of properties that require full cluster restart if changed +| map[string]string +| false + +| hash +| Map of hashes to track input changes +| map[string]string +| false + +| conditions +| Deployment Conditions +| condition.Conditions +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the opentack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#mariadbaccount] +==== MariaDBAccount + +MariaDBAccount is the Schema for the mariadbaccounts API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#mariadbaccountlist] +==== MariaDBAccountList + +MariaDBAccountList contains a list of MariaDBAccount + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#mariadbaccountspec] +==== MariaDBAccountSpec + +MariaDBAccountSpec defines the desired state of MariaDBAccount + +|=== +| Field | Description | Scheme | Required + +| userName +| UserName for new account +| string +| true + +| secret +| Name of secret which contains DatabasePassword +| string +| true + +| requireTLS +| Account must use TLS to connect to the database +| bool +| true +|=== + +<> + +[#mariadbaccountstatus] +==== MariaDBAccountStatus + +MariaDBAccountStatus defines the observed state of MariaDBAccount + +|=== +| Field | Description | Scheme | Required + +| conditions +| Deployment Conditions +| condition.Conditions +| false + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false +|=== + +<> + +[#mariadbdatabase] +==== MariaDBDatabase + +MariaDBDatabase is the Schema for the mariadbdatabases API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#mariadbdatabaselist] +==== MariaDBDatabaseList + +MariaDBDatabaseList contains a list of MariaDBDatabase + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#mariadbdatabasespec] +==== MariaDBDatabaseSpec + +MariaDBDatabaseSpec defines the desired state of MariaDBDatabase + +|=== +| Field | Description | Scheme | Required + +| secret +| Name of secret which contains DatabasePassword (deprecated) +| *string +| false + +| name +| Name of the database in MariaDB +| string +| false + +| defaultCharacterSet +| Default character set for this database +| string +| false + +| defaultCollation +| Default collation for this database +| string +| false +|=== + +<> + +[#mariadbdatabasestatus] +==== MariaDBDatabaseStatus + +MariaDBDatabaseStatus defines the observed state of MariaDBDatabase + +|=== +| Field | Description | Scheme | Required + +| conditions +| Deployment Conditions +| condition.Conditions +| false + +| completed +| +| bool +| false + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| tlsSupport +| Whether TLS is supported by the DB instance +| bool +| false +|=== + +<> + +[#apioverridespec-2] +==== APIOverrideSpec + +APIOverrideSpec to override the generated manifest of several child resources. + +|=== +| Field | Description | Scheme | Required + +| service +| Override configuration for the Service created to serve traffic to the cluster. The key must be the endpoint type (public, internal) +| map[service.Endpoint]service.RoutedOverrideSpec +| false +|=== + +<> + +[#neutronapi] +==== NeutronAPI + +NeutronAPI is the Schema for the neutronapis API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#neutronapilist] +==== NeutronAPIList + +NeutronAPIList contains a list of NeutronAPI + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#neutronapispec] +==== NeutronAPISpec + +NeutronAPISpec defines the desired state of NeutronAPI + +|=== +| Field | Description | Scheme | Required + +| containerImage +| NeutronAPI Container Image URL (will be set to environmental default if empty) +| string +| true +|=== + +<> + +[#neutronapispeccore] +==== NeutronAPISpecCore + +NeutronAPISpecCore - + +|=== +| Field | Description | Scheme | Required + +| serviceUser +| ServiceUser - optional username used for this service to register in neutron +| string +| true + +| databaseInstance +| MariaDB instance name Right now required by the maridb-operator to get the credentials from the instance to create the DB Might not be required in future +| string +| true + +| databaseAccount +| DatabaseAccount - optional MariaDBAccount CR name used for neutron DB, defaults to neutron +| string +| true + +| rabbitMqClusterName +| RabbitMQ instance name Needed to request a transportURL that is created and used in Neutron +| string +| true + +| memcachedInstance +| Memcached instance name. +| string +| true + +| replicas +| Replicas of neutron API to run +| *int32 +| true + +| secret +| Secret containing OpenStack password information for NeutronPassword +| string +| true + +| passwordSelectors +| PasswordSelectors - Selectors to identify the ServiceUser password from the Secret +| <> +| true + +| nodeSelector +| NodeSelector to target subset of worker nodes running this service +| map[string]string +| false + +| preserveJobs +| PreserveJobs - do not delete jobs after they finished e.g. to check logs +| bool +| true + +| customServiceConfig +| CustomServiceConfig - customize the service config using this parameter to change service defaults, or overwrite rendered information using raw OpenStack config format. The content gets added to to /etc/++++++/++++++.conf.d directory as custom.conf file.++++++++++++ +| string +| false + +| defaultConfigOverwrite +| ConfigOverwrite - interface to overwrite default config files like e.g. logging.conf or policy.json. But can also be used to add additional files. Those get added to the service config dir in /etc/++++++.++++++ +| map[string]string +| false + +| resources +| Resources - Compute Resources required by this service (Limits/Requests). https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +| corev1.ResourceRequirements +| false + +| networkAttachments +| NetworkAttachments is a list of NetworkAttachment resource names to expose the services to the given network +| []string +| false + +| extraMounts +| ExtraMounts containing conf files +| []<> +| false + +| override +| Override, provides the ability to override the generated manifest of several child resources. +| <> +| false + +| tls +| TLS - Parameters related to the TLS +| <> +| false +|=== + +<> + +[#neutronapistatus] +==== NeutronAPIStatus + +NeutronAPIStatus defines the observed state of NeutronAPI + +|=== +| Field | Description | Scheme | Required + +| readyCount +| ReadyCount of neutron API instances +| int32 +| false + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| databaseHostname +| Neutron Database Hostname +| string +| false + +| transportURLSecret +| TransportURLSecret - Secret containing RabbitMQ transportURL +| string +| false + +| networkAttachments +| NetworkAttachments status of the deployment pods +| map[string][]string +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the opentack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#neutronapitls] +==== NeutronApiTLS + +|=== +| Field | Description | Scheme | Required + +| api +| API tls type which encapsulates for API services +| tls.APIService +| false + +| ovn +| Ovn GenericService - holds the secret for the OvnDb client cert +| tls.GenericService +| false +|=== + +<> + +[#neutronextravolmounts] +==== NeutronExtraVolMounts + +NeutronExtraVolMounts exposes additional parameters processed by the neutron-operator and defines the common VolMounts structure provided by the main storage module + +|=== +| Field | Description | Scheme | Required + +| name +| +| string +| false + +| region +| +| string +| false + +| extraVol +| +| []storage.VolMounts +| true +|=== + +<> + +[#passwordselector-2] +==== PasswordSelector + +PasswordSelector to identify the DB and AdminUser password from the Secret + +|=== +| Field | Description | Scheme | Required + +| service +| Database - Selector to get the neutron service password from the Secret +| string +| true +|=== + +<> + +[#novacellimages] +==== NovaCellImages + +|=== +| Field | Description | Scheme | Required + +| conductorContainerImageURL +| ConductorContainerImageURL +| string +| true + +| metadataContainerImageURL +| MetadataContainerImageURL +| string +| true + +| novncproxyContainerImageURL +| NoVNCContainerImageURL +| string +| true + +| computeContainerImageURL +| NovaComputeContainerImageURL +| string +| true +|=== + +<> + +[#novaimages] +==== NovaImages + +|=== +| Field | Description | Scheme | Required + +| apiContainerImageURL +| APIContainerImageURL +| string +| true + +| schedulerContainerImageURL +| SchedulerContainerImageURL +| string +| true +|=== + +<> + +[#novaservicebase] +==== NovaServiceBase + +NovaServiceBase contains the fields that are needed for each nova service CRD + +|=== +| Field | Description | Scheme | Required + +| containerImage +| The service specific Container Image URL (will be set to environmental default if empty) +| string +| true + +| replicas +| Replicas of the service to run +| *int32 +| true + +| nodeSelector +| NodeSelector to target subset of worker nodes running this service +| map[string]string +| false + +| customServiceConfig +| CustomServiceConfig - customize the service config using this parameter to change service defaults, or overwrite rendered information using raw OpenStack config format. The content gets added to to /etc/++++++/++++++.conf.d directory as custom.conf file.++++++++++++ +| string +| true + +| resources +| Resources - Compute Resources required by this service (Limits/Requests). https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +| corev1.ResourceRequirements +| false + +| networkAttachments +| NetworkAttachments is a list of NetworkAttachment resource names to expose the services to the given network +| []string +| false +|=== + +<> + +[#passwordselector-2] +==== PasswordSelector + +PasswordSelector to identify the DB and AdminUser password from the Secret + +|=== +| Field | Description | Scheme | Required + +| service +| Service - Selector to get the keystone service user password from the Secret +| string +| true + +| metadataSecret +| MetadataSecret - the name of the field to get the metadata secret from the Secret +| string +| true +|=== + +<> + +[#nova] +==== Nova + +Nova is the Schema for the nova API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#novalist] +==== NovaList + +NovaList contains a list of Nova + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#novaspeccore] +==== NovaSpecCore + +NovaSpecCore defines the template for NovaSpec used in OpenStackControlPlane + +|=== +| Field | Description | Scheme | Required + +| keystoneInstance +| KeystoneInstance to name of the KeystoneAPI CR to select the Service instance used by the Nova services to authenticate. +| string +| true + +| apiDatabaseInstance +| APIDatabaseInstance is the name of the MariaDB CR to select the DB Service instance used for the Nova API DB. +| string +| true + +| apiMessageBusInstance +| APIMessageBusInstance is the name of the RabbitMqCluster CR to select the Message Bus Service instance used by the Nova top level services to communicate. +| string +| true + +| cellTemplates +| Cells is a mapping of cell names to NovaCellTemplate objects defining the cells in the deployment. The "cell0" cell is a mandatory cell in every deployment. Moreover any real deployment needs at least one additional normal cell as "cell0" cannot have any computes. +| map[string]<> +| true + +| serviceUser +| ServiceUser - optional username used for this service to register in keystone +| string +| true + +| apiDatabaseAccount +| APIDatabaseAccount - MariaDBAccount to use when accessing the API DB +| string +| true + +| secret +| Secret is the name of the Secret instance containing password information for nova like the keystone service password and DB passwords +| string +| true + +| passwordSelectors +| PasswordSelectors - Selectors to identify the DB and ServiceUser passwords from the Secret +| <> +| true + +| nodeSelector +| NodeSelector to target subset of worker nodes running this service. Setting NodeSelector here acts as a default value and can be overridden by service specific NodeSelector Settings. +| map[string]string +| false + +| preserveJobs +| PreserveJobs - do not delete jobs after they finished e.g. to check logs +| bool +| true + +| apiServiceTemplate +| APIServiceTemplate - define the nova-api service +| <> +| true + +| schedulerServiceTemplate +| SchedulerServiceTemplate- define the nova-scheduler service +| <> +| true + +| metadataServiceTemplate +| MetadataServiceTemplate - defines the metadata service that is global for the deployment serving all the cells. Note that if you want to deploy metadata per cell then the metadata service should be disabled here and enabled in the cellTemplates instead. +| <> +| true + +| memcachedInstance +| MemcachedInstance is the name of the Memcached CR that all nova service will use. +| string +| true +|=== + +<> + +[#novastatus] +==== NovaStatus + +NovaStatus defines the observed state of Nova + +|=== +| Field | Description | Scheme | Required + +| conditions +| Conditions +| condition.Conditions +| false + +| apiServiceReadyCount +| APIServiceReadyCount defines the number or replicas ready from nova-api +| int32 +| false + +| schedulerServiceReadyCount +| SchedulerServiceReadyCount defines the number or replicas ready from nova-scheduler +| int32 +| false + +| metadataServiceReadyCount +| MetadataReadyCount defines the number of replicas ready from nova-metadata service +| int32 +| false + +| registeredCells +| RegisteredCells is a map keyed by cell names that are registered in the nova_api database with a value that is the hash of the given cell configuration. +| map[string]string +| false + +| discoveredCells +| DiscoveredCells is a map keyed by cell names that have discovered all kubernetes managed computes in cell value is a hash of config from all kubernetes managed computes in cell +| map[string]string +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes. +| int64 +| false +|=== + +<> + +[#apioverridespec-2] +==== APIOverrideSpec + +APIOverrideSpec to override the generated manifest of several child resources. + +|=== +| Field | Description | Scheme | Required + +| service +| Override configuration for the Service created to serve traffic to the cluster. The key must be the endpoint type (public, internal) +| map[service.Endpoint]service.RoutedOverrideSpec +| false +|=== + +<> + +[#novaapi] +==== NovaAPI + +NovaAPI is the Schema for the novaapis API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#novaapilist] +==== NovaAPIList + +NovaAPIList contains a list of NovaAPI + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#novaapispec] +==== NovaAPISpec + +NovaAPISpec defines the desired state of NovaAPI + +|=== +| Field | Description | Scheme | Required + +| secret +| Secret is the name of the Secret instance containing password information for the nova-api service. This secret is expected to be generated by the nova-operator based on the information passed to the Nova CR. +| string +| true + +| serviceUser +| ServiceUser - optional username used for this service to register in keystone +| string +| true + +| keystoneAuthURL +| KeystoneAuthURL configures the keystone API endpoint to be used by the service for authentication and authorization +| string +| true + +| keystonePublicAuthURL +| KeystonePublicAuthURL configures the public keystone API endpoint. This can be different from KeystoneAuthURL. The service uses this value to redirect unauthenticated users. +| string +| true + +| apiDatabaseAccount +| APIDatabaseAccount - MariaDBAccount to use when accessing the API DB +| string +| true + +| apiDatabaseHostname +| APIDatabaseHostname - hostname to use when accessing the API DB +| string +| true + +| cell0DatabaseAccount +| APIDatabaseAccount - MariaDBAccount to use when accessing the cell0 DB +| string +| true + +| cell0DatabaseHostname +| APIDatabaseHostname - hostname to use when accessing the cell0 DB +| string +| true + +| override +| Override, provides the ability to override the generated manifest of several child resources. +| <> +| false + +| serviceAccount +| ServiceAccount - service account name used internally to provide Nova services the default SA name +| string +| true + +| registeredCells +| RegisteredCells is a map keyed by cell names that are registered in the nova_api database with a value that is the hash of the given cell configuration. This is used to detect when a new cell is added or an existing cell is reconfigured to trigger refresh of the in memory cell caches of the service. +| map[string]string +| true + +| tls +| TLS - Parameters related to the TLS +| tls.API +| false + +| defaultConfigOverwrite +| DefaultConfigOverwrite - interface to overwrite default config files like e.g. api-paste.ini or policy.yaml. +| map[string]string +| false + +| memcachedInstance +| MemcachedInstance is the name of the Memcached CR that all nova service will use. +| string +| true +|=== + +<> + +[#novaapistatus] +==== NovaAPIStatus + +NovaAPIStatus defines the observed state of NovaAPI + +|=== +| Field | Description | Scheme | Required + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| readyCount +| ReadyCount defines the number of replicas ready from nova-api +| int32 +| false + +| networkAttachments +| NetworkAttachments status of the deployment pods +| map[string][]string +| false +|=== + +<> + +[#novaapitemplate] +==== NovaAPITemplate + +NovaAPITemplate defines the input parameters specified by the user to create a NovaAPI via higher level CRDs. NOTE(gibi): NovaAPITemplate has the same structure than NovaServiceBase BUT we want to default ContainerImage for the template, therefore the structs are duplicated. + +|=== +| Field | Description | Scheme | Required + +| replicas +| Replicas of the service to run +| *int32 +| true + +| nodeSelector +| NodeSelector to target subset of worker nodes running this service. Setting here overrides any global NodeSelector settings within the Nova CR. +| map[string]string +| false + +| customServiceConfig +| CustomServiceConfig - customize the service config using this parameter to change service defaults, or overwrite rendered information using raw OpenStack config format. The content gets added to to /etc/++++++/++++++.conf.d directory as custom.conf file.++++++++++++ +| string +| true + +| defaultConfigOverwrite +| DefaultConfigOverwrite - interface to overwrite default config files like e.g. api-paste.ini or policy.yaml. +| map[string]string +| false + +| resources +| Resources - Compute Resources required by this service (Limits/Requests). https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +| corev1.ResourceRequirements +| false + +| networkAttachments +| NetworkAttachments is a list of NetworkAttachment resource names to expose the services to the given network +| []string +| false + +| override +| Override, provides the ability to override the generated manifest of several child resources. +| <> +| false + +| tls +| TLS - Parameters related to the TLS +| tls.API +| false +|=== + +<> + +[#novacell] +==== NovaCell + +NovaCell is the Schema for the novacells API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#novacelldbpurge] +==== NovaCellDBPurge + +NovaCellDBPurge defines the parameters for the DB archiving and purging cron job + +|=== +| Field | Description | Scheme | Required + +| schedule +| Schedule defines when to run the DB maintenance job in a cron format. By default it runs every midnight. +| *string +| true + +| archiveAge +| ArchiveAge defines the minimum age of the records in days that can be moved to the shadow tables. +| *int +| true + +| purgeAge +| PurgeAge defines the minimum age of the records in days that can be deleted from the shadow tables +| *int +| true +|=== + +<> + +[#novacelllist] +==== NovaCellList + +NovaCellList contains a list of NovaCell + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#novacellspec] +==== NovaCellSpec + +NovaCellSpec defines the desired state of NovaCell + +|=== +| Field | Description | Scheme | Required + +| cellName +| CellName is the name of the Nova Cell. The value "cell0" has a special meaning. The "cell0" Cell cannot have compute nodes associated and the conductor in this cell acts as the super conductor for all the cells in the deployment. +| string +| true + +| secret +| Secret is the name of the Secret instance containing password information for the nova cell. This secret is expected to be generated by the nova-operator based on the information passed to the Nova CR. +| string +| true + +| nodeSelector +| NodeSelector to target subset of worker nodes running this services. +| map[string]string +| false + +| serviceUser +| ServiceUser - optional username used for this service to register in keystone +| string +| true + +| keystoneAuthURL +| KeystoneAuthURL - the URL that the service in the cell can use to talk to keystone +| string +| true + +| apiDatabaseAccount +| APIDatabaseAccount - MariaDBAccount to use when accessing the API DB +| string +| true + +| apiDatabaseHostname +| APIDatabaseHostname - hostname to use when accessing the API DB. If not provided then up-calls will be disabled. This filed is Required for cell0. +| string +| true + +| cellDatabaseAccount +| CellDatabaseAccount - MariaDBAccount to use when accessing the cell DB +| string +| true + +| cellDatabaseHostname +| CellDatabaseHostname - hostname to use when accessing the cell DB +| string +| true + +| preserveJobs +| PreserveJobs - do not delete jobs after they finished e.g. to check logs +| bool +| true + +| conductorServiceTemplate +| ConductorServiceTemplate - defines the cell conductor deployment for the cell +| <> +| true + +| metadataServiceTemplate +| MetadataServiceTemplate - defines the metadata service dedicated for the cell. +| <> +| true + +| noVNCProxyServiceTemplate +| NoVNCProxyServiceTemplate - defines the novncproxy service dedicated for the cell. +| <> +| true + +| novaComputeTemplates +| NovaComputeTemplates - map of nova computes template with selected drivers in format compute_name: compute_template. Key from map is arbitrary name for the compute. because of that there is a 20 character limit on the compute name. +| map[string]<> +| false + +| serviceAccount +| ServiceAccount - service account name used internally to provide Nova services the default SA name +| string +| true + +| tls +| TLS - Parameters related to the TLS +| tls.Ca +| false + +| memcachedInstance +| MemcachedInstance is the name of the Memcached CR that all nova service will use. +| string +| true + +| dbPurge +| DBPurge defines the parameters for the DB archiving and purging cron job +| <> +| true +|=== + +<> + +[#novacellstatus] +==== NovaCellStatus + +NovaCellStatus defines the observed state of NovaCell + +|=== +| Field | Description | Scheme | Required + +| hash +| INSERT ADDITIONAL STATUS FIELD - define observed state of cluster Important: Run "make" to regenerate code after modifying this file Map of hashes to track e.g. job status +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| conductorServiceReadyCount +| ConductorServiceReadyCount defines the number of replicas ready from nova-conductor service in the cell +| int32 +| false + +| metadataServiceReadyCount +| MetadataServiceReadyCount defines the number of replicas ready from nova-metadata service in the cell +| int32 +| false + +| noVNCProxyServiceReadyCount +| NoVNCPRoxyServiceReadyCount defines the number of replicas ready from nova-novncproxy service in the cell +| int32 +| false + +| novaComputesStatus +| NovaComputesStatus is a map with format cell_name: NovaComputeCellStatus where NovaComputeCellStatus tell if compute with selected name deployed successfully and indicates if the compute is successfully mapped to the cell in the nova_api database. When a compute is removed from the Spec the operator will delete the related NovaCompute CR and then remove the compute from this Status field. +| map[string]<> +| false +|=== + +<> + +[#novacelltemplate] +==== NovaCellTemplate + +NovaCellTemplate defines the input parameters specified by the user to create a NovaCell via higher level CRDs. + +|=== +| Field | Description | Scheme | Required + +| cellDatabaseInstance +| CellDatabaseInstance is the name of the MariaDB CR to select the DB Service instance used as the DB of this cell. +| string +| true + +| cellDatabaseAccount +| CellDatabaseAccount - MariaDBAccount to use when accessing the give cell DB +| string +| true + +| cellMessageBusInstance +| CellMessageBusInstance is the name of the RabbitMqCluster CR to select the Message Bus Service instance used by the nova services to communicate in this cell. For cell0 it is unused. +| string +| true + +| hasAPIAccess +| HasAPIAccess defines if this Cell is configured to have access to the API DB and message bus. +| bool +| true + +| nodeSelector +| NodeSelector to target subset of worker nodes running cell. +| map[string]string +| false + +| conductorServiceTemplate +| ConductorServiceTemplate - defines the cell conductor deployment for the cell. +| <> +| true + +| metadataServiceTemplate +| MetadataServiceTemplate - defines the metadata service dedicated for the cell. Note that for cell0 metadata service should not be deployed. Also if metadata service needs to be deployed per cell here then it should not be enabled to be deployed on the top level via the Nova CR at the same time. By default Nova CR deploys the metadata service at the top level and disables it on the cell level. +| <> +| true + +| noVNCProxyServiceTemplate +| NoVNCProxyServiceTemplate - defines the novncproxy service dedicated for the cell. Note that for cell0 novncproxy should not be deployed so the enabled field of this template is defaulted to false in cell0 but defaulted to true in other cells. +| <> +| true + +| novaComputeTemplates +| NovaComputeTemplates - map of nova computes template with selected drivers in format compute_name: compute_template. Key from map is arbitrary name for the compute with a limit of 20 characters. +| map[string]<> +| false + +| memcachedInstance +| MemcachedInstance is the name of the Memcached CR that the services in the cell will use. If defined then this takes precedence over Nova.Spec.MemcachedInstance for this cel +| string +| true + +| dbPurge +| DBPurge defines the parameters for the DB archiving and purging cron job +| <> +| true +|=== + +<> + +[#novacompute] +==== NovaCompute + +NovaCompute is the Schema for the NovaCompute + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#novacomputecellstatus] +==== NovaComputeCellStatus + +NovaComputeCellStatus defines state of NovaCompute in cell + +|=== +| Field | Description | Scheme | Required + +| deployed +| Deployed value: true means that the compute is deployed but can still be undiscovered +| bool +| true + +| errors +| Errors value True means that during deployment, errors appear, and the user needs to check the compute for problems +| bool +| true +|=== + +<> + +[#novacomputelist] +==== NovaComputeList + +NovaComputeList contains a list of NovaCompute + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#novacomputespec] +==== NovaComputeSpec + +NovaComputeSpec defines the desired state of NovaCompute + +|=== +| Field | Description | Scheme | Required + +| cellName +| CellName is the name of the Nova Cell this NovaCompute belongs to. +| string +| true + +| computeName +| ComputeName - compute name. +| string +| true + +| secret +| Secret is the name of the Secret instance containing password information for the NovaCompute service. This secret is expected to be generated by the nova-operator based on the information passed to the Nova CR. +| string +| true + +| serviceUser +| ServiceUser - optional username used for this service to register in keystone +| string +| true + +| keystoneAuthURL +| +| string +| true + +| serviceAccount +| ServiceAccount - service account name used internally to provide Nova services the default SA name +| string +| true + +| computeDriver +| ComputeDriver defines which driver to use for controlling virtualization +| string +| true + +| tls +| TLS - Parameters related to the TLS +| tls.Ca +| false + +| defaultConfigOverwrite +| DefaultConfigOverwrite - interface to overwrite default config files like e.g. provider.yaml +| map[string]string +| false +|=== + +<> + +[#novacomputestatus] +==== NovaComputeStatus + +NovaComputeStatus defines the observed state of NovaCompute + +|=== +| Field | Description | Scheme | Required + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| readyCount +| ReadyCount defines the number of replicas ready from NovaCompute +| int32 +| false + +| networkAttachments +| NetworkAttachments status of the deployment pods +| map[string][]string +| false +|=== + +<> + +[#novacomputetemplate] +==== NovaComputeTemplate + +NovaComputeTemplate defines the input parameters specified by the user to create a NovaCompute via higher level CRDs. + +|=== +| Field | Description | Scheme | Required + +| replicas +| Replicas of the service to run. For ironic.IronicDriver the max replica is 1 +| *int32 +| true + +| nodeSelector +| NodeSelector to target subset of worker nodes running this service. Setting here overrides any global NodeSelector settings within the Nova CR. +| map[string]string +| false + +| customServiceConfig +| CustomServiceConfig - customize the service config using this parameter to change service defaults, or overwrite rendered information using raw OpenStack config format. The content gets added to to /etc/++++++/++++++.conf.d directory as custom.conf file.++++++++++++ +| string +| true + +| defaultConfigOverwrite +| DefaultConfigOverwrite - interface to overwrite default config files like e.g. provider.yaml +| map[string]string +| false + +| resources +| Resources - Compute Resources required by this service (Limits/Requests). https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +| corev1.ResourceRequirements +| false + +| networkAttachments +| NetworkAttachments is a list of NetworkAttachment resource names to expose the services to the given network +| []string +| false + +| computeDriver +| ComputeDriver - defines which driver to use for controlling virtualization +| string +| true +|=== + +<> + +[#novaconductor] +==== NovaConductor + +NovaConductor is the Schema for the novaconductors API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#novaconductorlist] +==== NovaConductorList + +NovaConductorList contains a list of NovaConductor + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#novaconductorspec] +==== NovaConductorSpec + +NovaConductorSpec defines the desired state of NovaConductor + +|=== +| Field | Description | Scheme | Required + +| cellName +| CellName is the name of the Nova Cell this conductor belongs to. +| string +| true + +| secret +| Secret is the name of the Secret instance containing password information for the nova-conductor service. This secret is expected to be generated by the nova-operator based on the information passed to the Nova CR. +| string +| true + +| serviceUser +| ServiceUser - optional username used for this service to register in keystone +| string +| true + +| keystoneAuthURL +| KeystoneAuthURL - the URL that the nova-conductor service can use to talk to keystone +| string +| true + +| apiDatabaseAccount +| APIDatabaseAccount - MariaDBAccount to use when accessing the API DB +| string +| true + +| apiDatabaseHostname +| APIDatabaseHostname - hostname to use when accessing the API DB. If not provided then up-calls will be disabled. This filed is Required for cell0. +| string +| true + +| cellDatabaseAccount +| CellDatabaseAccount - MariaDBAccount to use when accessing the cell DB +| string +| true + +| cellDatabaseHostname +| NOTE(gibi): This should be Required, see notes in KeystoneAuthURL CellDatabaseHostname - hostname to use when accessing the cell DB +| string +| true + +| preserveJobs +| PreserveJobs - do not delete jobs after they finished e.g. to check logs +| bool +| true + +| serviceAccount +| ServiceAccount - service account name used internally to provide Nova services the default SA name +| string +| true + +| tls +| TLS - Parameters related to the TLS +| tls.Ca +| false + +| memcachedInstance +| MemcachedInstance is the name of the Memcached CR that all nova service will use. +| string +| true + +| dbPurge +| DBPurge defines the parameters for the DB archiving and purging cron job +| <> +| true +|=== + +<> + +[#novaconductorstatus] +==== NovaConductorStatus + +NovaConductorStatus defines the observed state of NovaConductor + +|=== +| Field | Description | Scheme | Required + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| readyCount +| ReadyCount defines the number of replicas ready from nova-conductor +| int32 +| false + +| networkAttachments +| NetworkAttachments status of the deployment pods +| map[string][]string +| false +|=== + +<> + +[#novaconductortemplate] +==== NovaConductorTemplate + +NovaConductorTemplate defines the input parameters specified by the user to create a NovaConductor via higher level CRDs. + +|=== +| Field | Description | Scheme | Required + +| replicas +| Replicas of the service to run +| *int32 +| true + +| nodeSelector +| NodeSelector to target subset of worker nodes running this service. Setting here overrides any global NodeSelector settings within the Nova CR. +| map[string]string +| false + +| customServiceConfig +| CustomServiceConfig - customize the service config using this parameter to change service defaults, or overwrite rendered information using raw OpenStack config format. The content gets added to to /etc/++++++/++++++.conf.d directory as custom.conf file.++++++++++++ +| string +| true + +| resources +| Resources - Compute Resources required by this service (Limits/Requests). https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +| corev1.ResourceRequirements +| false + +| networkAttachments +| NetworkAttachments is a list of NetworkAttachment resource names to expose the services to the given network +| []string +| false +|=== + +<> + +[#metadataoverridespec] +==== MetadataOverrideSpec + +MetadataOverrideSpec to override the generated manifest of several child resources. + +|=== +| Field | Description | Scheme | Required + +| service +| Override configuration for the Service created to serve traffic to the cluster for internal communication. +| *service.OverrideSpec +| false +|=== + +<> + +[#novametadata] +==== NovaMetadata + +NovaMetadata is the Schema for the novametadata API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#novametadatalist] +==== NovaMetadataList + +NovaMetadataList contains a list of NovaMetadata + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#novametadataspec] +==== NovaMetadataSpec + +NovaMetadataSpec defines the desired state of NovaMetadata + +|=== +| Field | Description | Scheme | Required + +| cellName +| CellName is the name of the Nova Cell this metadata service belongs to. If not provided then the metadata serving every cells in the deployment +| string +| false + +| secret +| Secret is the name of the Secret instance containing password information for the nova-conductor service. This secret is expected to be generated by the nova-operator based on the information passed to the Nova CR. +| string +| true + +| serviceUser +| ServiceUser - optional username used for this service to register in keystone +| string +| true + +| keystoneAuthURL +| KeystoneAuthURL - the URL that the nova-metadata service can use to talk to keystone +| string +| true + +| apiDatabaseAccount +| APIDatabaseAccount - MariaDBAccount to use when accessing the API DB +| string +| true + +| apiDatabaseHostname +| APIDatabaseHostname - hostname to use when accessing the API DB. This filed is Required if the CellName is not provided +| string +| true + +| cellDatabaseAccount +| CellDatabaseAccount - MariaDBAccount to use when accessing the cell DB +| string +| true + +| cellDatabaseHostname +| CellDatabaseHostname - hostname to use when accessing the cell DB This is unused if CellName is not provided. But if it is provided then CellDatabaseHostName is also Required. +| string +| true + +| override +| Override, provides the ability to override the generated manifest of several child resources. +| <> +| false + +| serviceAccount +| ServiceAccount - service account name used internally to provide Nova services the default SA name +| string +| true + +| registeredCells +| RegisteredCells is a map keyed by cell names that are registered in the nova_api database with a value that is the hash of the given cell configuration. This is used to detect when a new cell is added or an existing cell is reconfigured to trigger refresh of the in memory cell caches of the service. This is empty for the case when nova-metadata runs within the cell. +| map[string]string +| false + +| tls +| TLS - Parameters related to the TLS +| tls.SimpleService +| false + +| defaultConfigOverwrite +| DefaultConfigOverwrite - interface to overwrite default config files like e.g. api-paste.ini. +| map[string]string +| false + +| memcachedInstance +| MemcachedInstance is the name of the Memcached CR that all nova service will use. +| string +| true +|=== + +<> + +[#novametadatastatus] +==== NovaMetadataStatus + +NovaMetadataStatus defines the observed state of NovaMetadata + +|=== +| Field | Description | Scheme | Required + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| readyCount +| ReadyCount defines the number of replicas ready from nova-metadata +| int32 +| false + +| networkAttachments +| NetworkAttachments status of the deployment pods +| map[string][]string +| false +|=== + +<> + +[#novametadatatemplate] +==== NovaMetadataTemplate + +NovaMetadataTemplate defines the input parameters specified by the user to create a NovaMetadata via higher level CRDs. + +|=== +| Field | Description | Scheme | Required + +| enabled +| Enabled - Whether NovaMetadata services should be deployed and managed. If it is set to false then the related NovaMetadata CR will be deleted if exists and owned by a higher level nova CR (Nova or NovaCell). If it exist but not owned by a higher level nova CR then the NovaMetadata CR will not be touched. If it is set to true the a NovaMetadata CR will be created. If there is already a manually created NovaMetadata CR with the relevant name then this operator will not try to update that CR, instead the higher level nova CR will be in error state until the manually create NovaMetadata CR is deleted manually. +| *bool +| true + +| replicas +| Replicas of the service to run +| *int32 +| true + +| nodeSelector +| NodeSelector to target subset of worker nodes running this service. Setting here overrides any global NodeSelector settings within the Nova CR. +| map[string]string +| false + +| customServiceConfig +| CustomServiceConfig - customize the service config using this parameter to change service defaults, or overwrite rendered information using raw OpenStack config format. The content gets added to to /etc/++++++/++++++.conf.d directory as custom.conf file.++++++++++++ +| string +| true + +| defaultConfigOverwrite +| DefaultConfigOverwrite - interface to overwrite default config files like e.g. api-paste.ini. +| map[string]string +| false + +| resources +| Resources - Compute Resources required by this service (Limits/Requests). https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +| corev1.ResourceRequirements +| false + +| networkAttachments +| NetworkAttachments is a list of NetworkAttachment resource names to expose the services to the given network +| []string +| false + +| override +| Override, provides the ability to override the generated manifest of several child resources. +| <> +| false + +| tls +| TLS - Parameters related to the TLS +| tls.SimpleService +| false +|=== + +<> + +[#novanovncproxy] +==== NovaNoVNCProxy + +NovaNoVNCProxy is the Schema for the novanovncproxies API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#novanovncproxylist] +==== NovaNoVNCProxyList + +NovaNoVNCProxyList contains a list of NovaNoVNCProxy + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#novanovncproxyspec] +==== NovaNoVNCProxySpec + +NovaNoVNCProxySpec defines the desired state of NovaNoVNCProxy + +|=== +| Field | Description | Scheme | Required + +| cellName +| CellName is the name of the Nova Cell this novncproxy belongs to. +| string +| true + +| secret +| Secret is the name of the Secret instance containing password information for the nova-novncproxy service. This secret is expected to be generated by the nova-operator based on the information passed to the Nova CR. +| string +| true + +| serviceUser +| ServiceUser - optional username used for this service to register in keystone +| string +| true + +| keystoneAuthURL +| KeystoneAuthURL - the URL that the nova-novncproxy service can use to talk to keystone +| string +| true + +| cellDatabaseAccount +| CellDatabaseAccount - MariaDBAccount to use when accessing the cell DB +| string +| true + +| cellDatabaseHostname +| CellDatabaseHostname - hostname to use when accessing the cell DB +| string +| true + +| override +| Override, provides the ability to override the generated manifest of several child resources. +| <> +| false + +| serviceAccount +| ServiceAccount - service account name used internally to provide Nova services the default SA name +| string +| true + +| tls +| TLS - Parameters related to the TLS +| <> +| true + +| memcachedInstance +| MemcachedInstance is the name of the Memcached CR that all nova service will use. +| string +| true +|=== + +<> + +[#novanovncproxystatus] +==== NovaNoVNCProxyStatus + +NovaNoVNCProxyStatus defines the observed state of NovaNoVNCProxy + +|=== +| Field | Description | Scheme | Required + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| readyCount +| ReadyCount defines the number of replicas ready from nova-novncproxy +| int32 +| false + +| networkAttachments +| NetworkAttachments status of the deployment pods +| map[string][]string +| false +|=== + +<> + +[#novanovncproxytemplate] +==== NovaNoVNCProxyTemplate + +NovaNoVNCProxyTemplate defines the input parameters specified by the user to create a NovaNoVNCProxy via higher level CRDs. + +|=== +| Field | Description | Scheme | Required + +| enabled +| Enabled - Whether NovaNoVNCProxy services should be deployed and managed. If it is set to false then the related NovaNoVNCProxy CR will be deleted if exists and owned by the NovaCell. If it exist but not owned by the NovaCell then the NovaNoVNCProxy will not be touched. If it is set to true the a NovaNoVNCProxy CR will be created. If there is already a manually created NovaNoVNCProxy CR with the relevant name then the cell will not try to update that CR, instead the NovaCell be in error state until the manually create NovaNoVNCProxy CR is deleted by the operator. +| *bool +| true + +| replicas +| Replicas of the service to run +| *int32 +| true + +| nodeSelector +| NodeSelector to target subset of worker nodes running this service +| map[string]string +| false + +| customServiceConfig +| CustomServiceConfig - customize the service config using this parameter to change service defaults, or overwrite rendered information using raw OpenStack config format. The content gets added to to /etc/++++++/++++++.conf.d directory as custom.conf file.++++++++++++ +| string +| true + +| resources +| Resources - Compute Resources required by this service (Limits/Requests). https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +| corev1.ResourceRequirements +| false + +| networkAttachments +| NetworkAttachments is a list of NetworkAttachment resource names to expose the services to the given network +| []string +| false + +| override +| Override, provides the ability to override the generated manifest of several child resources. +| <> +| false + +| tls +| TLS - Parameters related to the TLS +| <> +| true +|=== + +<> + +[#tlssection-2] +==== TLSSection + +TLSSection defines the desired state of TLS configuration + +|=== +| Field | Description | Scheme | Required + +| service +| Service - Cert secret used for the nova novnc service endpoint +| tls.GenericService +| false + +| vencrypt +| Vencrypt - cert secret containing the x509 certificate to be presented to the VNC server. The CommonName field should match the primary hostname of the controller node. If using a HA deployment, the Organization field can also be configured to a value that is common across all console proxy instances in the deployment. https://docs.openstack.org/nova/latest/admin/remote-console-access.html#novnc-proxy-server-configuration +| tls.GenericService +| false +|=== + +<> + +[#vncproxyoverridespec] +==== VNCProxyOverrideSpec + +VNCProxyOverrideSpec to override the generated manifest of several child resources. + +|=== +| Field | Description | Scheme | Required + +| service +| Override configuration for the Service created to serve traffic to the cluster. +| *service.RoutedOverrideSpec +| false +|=== + +<> + +[#novascheduler] +==== NovaScheduler + +NovaScheduler is the Schema for the novaschedulers API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#novaschedulerlist] +==== NovaSchedulerList + +NovaSchedulerList contains a list of NovaScheduler + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#novaschedulerspec] +==== NovaSchedulerSpec + +NovaSchedulerSpec defines the desired state of NovaScheduler + +|=== +| Field | Description | Scheme | Required + +| secret +| Secret is the name of the Secret instance containing password information for the nova-scheduler service. This secret is expected to be generated by the nova-operator based on the information passed to the Nova CR. +| string +| true + +| serviceUser +| ServiceUser - optional username used for this service to register in keystone +| string +| true + +| keystoneAuthURL +| KeystoneAuthURL - the URL that the nova-scheduler service can use to talk to keystone +| string +| true + +| apiDatabaseAccount +| APIDatabaseAccount - MariaDBAccount to use when accessing the API DB +| string +| true + +| apiDatabaseHostname +| APIDatabaseHostname - hostname to use when accessing the API DB +| string +| true + +| cell0DatabaseAccount +| Cell0DatabaseAccount - MariaDBAccount to use when accessing the cell0 DB +| string +| true + +| cell0DatabaseHostname +| Cell0DatabaseHostname - hostname to use when accessing the cell0 DB +| string +| true + +| serviceAccount +| ServiceAccount - service account name used internally to provide Nova services the default SA name +| string +| true + +| registeredCells +| RegisteredCells is a map keyed by cell names that are registered in the nova_api database with a value that is the hash of the given cell configuration. This is used to detect when a new cell is added or an existing cell is reconfigured to trigger refresh of the in memory cell caches of the service. +| map[string]string +| true + +| tls +| TLS - Parameters related to the TLS +| tls.Ca +| false + +| memcachedInstance +| MemcachedInstance is the name of the Memcached CR that all nova service will use. +| string +| true +|=== + +<> + +[#novaschedulerstatus] +==== NovaSchedulerStatus + +NovaSchedulerStatus defines the observed state of NovaScheduler + +|=== +| Field | Description | Scheme | Required + +| hash +| INSERT ADDITIONAL STATUS FIELD - define observed state of cluster Important: Run "make" to regenerate code after modifying this file Map of hashes to track e.g. job status +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| readyCount +| ReadyCount defines the number of replicas ready from nova-scheduler +| int32 +| false + +| networkAttachments +| NetworkAttachments status of the deployment pods +| map[string][]string +| false +|=== + +<> + +[#novaschedulertemplate] +==== NovaSchedulerTemplate + +NovaSchedulerTemplate defines the input parameters specified by the user to create a NovaScheduler via higher level CRDs. + +|=== +| Field | Description | Scheme | Required + +| replicas +| Replicas of the service to run +| *int32 +| true + +| nodeSelector +| NodeSelector to target subset of worker nodes running this service. Setting here overrides any global NodeSelector settings within the Nova CR. +| map[string]string +| false + +| customServiceConfig +| CustomServiceConfig - customize the service config using this parameter to change service defaults, or overwrite rendered information using raw OpenStack config format. The content gets added to to /etc/++++++/++++++.conf.d directory as custom.conf file.++++++++++++ +| string +| true + +| resources +| Resources - Compute Resources required by this service (Limits/Requests). https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +| corev1.ResourceRequirements +| false + +| networkAttachments +| NetworkAttachments is a list of NetworkAttachment resource names to expose the services to the given network +| []string +| false +|=== + +<> + +[#octaviaamphoracontroller] +==== OctaviaAmphoraController + +OctaviaAmphoraController is the Schema for the octaviaworkers API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#octaviaamphoracontrollerlist] +==== OctaviaAmphoraControllerList + +OctaviaAmphoraControllerList contains a list of OctaviaWorker + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#octaviaamphoracontrollerspec] +==== OctaviaAmphoraControllerSpec + +OctaviaAmphoraControllerSpec defines common state for all Octavia Amphora Controllers + +|=== +| Field | Description | Scheme | Required + +| containerImage +| ContainerImage - Amphora Controller Container Image URL +| string +| false +|=== + +<> + +[#octaviaamphoracontrollerspeccore] +==== OctaviaAmphoraControllerSpecCore + +OctaviaAmphoraControllerSpecCore - + +|=== +| Field | Description | Scheme | Required + +| databaseInstance +| MariaDB instance name Right now required by the maridb-operator to get the credentials from the instance to create the DB Might not be required in future +| string +| true + +| databaseAccount +| DatabaseAccount - name of MariaDBAccount which will be used to connect for the main octavia database +| string +| true + +| persistenceDatabaseAccount +| PersistenceDatabaseAccount - name of MariaDBAccount which will be used to connect for the persistence database +| string +| true + +| databaseHostname +| DatabaseHostname - Octavia DB hostname +| string +| false + +| serviceUser +| ServiceUser - service user name (TODO: beagles, do we need this at all) +| string +| true + +| serviceAccount +| ServiceAccount - service account name used internally to provide Octavia services the default SA name +| string +| true + +| role +| Role - the role for the controller (one of worker, housekeeping, healthmanager) +| string +| true + +| secret +| Secret containing OpenStack password information for octavia OctaviaDatabasePassword, AdminPassword +| string +| true + +| passwordSelectors +| PasswordSelectors - Selectors to identify the AdminUser password from the Secret +| <> +| false + +| nodeSelector +| NodeSelector to target subset of worker nodes running this service +| map[string]string +| false + +| customServiceConfig +| CustomServiceConfig - customize the service config using this parameter to change service defaults, or overwrite rendered information using raw OpenStack config format. The content gets added to to /etc/++++++/++++++.conf.d directory as custom.conf file.++++++++++++ +| string +| false + +| defaultConfigOverwrite +| ConfigOverwrite - interface to overwrite default config files like e.g. logging.conf or policy.json. But can also be used to add additional files. Those get added to the service config dir in /etc/++++++.++++++ +| map[string]string +| false + +| transportURLSecret +| TransportURLSecret - Secret containing RabbitMQ transportURL +| string +| false + +| resources +| Resources - Compute Resources required by this service (Limits/Requests). https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +| corev1.ResourceRequirements +| false + +| networkAttachments +| NetworkAttachments is a list of NetworkAttachment resource names to expose the services to the given network +| []string +| false + +| tenantName +| TenantName - the name of the OpenStack tenant that controls the Octavia resources +| string +| true + +| lbMgmtNetworkID +| +| string +| true + +| lbSecurityGroupID +| +| string +| true + +| amphoraCustomFlavors +| AmphoraCustomFlavors - User-defined flavors for Octavia +| []<> +| false + +| amphoraImageOwnerID +| +| string +| false + +| tls +| TLS - Parameters related to the TLS +| tls.Ca +| false + +| octaviaProviderSubnetGateway +| OctaviaProviderSubnetGateway - +| string +| true + +| octaviaProviderSubnetCIDR +| OctaviaProviderSubnetCIDR - +| string +| true +|=== + +<> + +[#octaviaamphoracontrollerstatus] +==== OctaviaAmphoraControllerStatus + +OctaviaAmphoraControllerStatus defines the observed state of the Octavia Amphora Controller + +|=== +| Field | Description | Scheme | Required + +| readyCount +| ReadyCount of Octavia Amphora Controllers +| int32 +| false + +| desiredNumberScheduled +| DesiredNumberScheduled - total number of the nodes which should be running Daemon +| int32 +| false + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| networkAttachments +| NetworkAttachment status of the deployment pods +| map[string][]string +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the opentack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#octavia] +==== Octavia + +Octavia is the Schema for the octavia API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#octaviaamphoraflavor] +==== OctaviaAmphoraFlavor + +OctaviaAmphoraFlavor Settings for custom Amphora flavors + +|=== +| Field | Description | Scheme | Required + +| name +| +| string +| true + +| description +| +| string +| true + +| VCPUs +| +| int +| true + +| RAM +| +| int +| true + +| disk +| +| int +| true + +| RxTxFactor +| +| string +| true +|=== + +<> + +[#octavialbmgmtnetworks] +==== OctaviaLbMgmtNetworks + +OctaviaLbMgmtNetworks Settings for Octavia management networks + +|=== +| Field | Description | Scheme | Required + +| manageLbMgmtNetworks +| +| bool +| false + +| availabilityZones +| Availability zones for the octavia management network resources +| []string +| false +|=== + +<> + +[#octavialist] +==== OctaviaList + +OctaviaList contains a list of Octavia + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#octaviaspec] +==== OctaviaSpec + +OctaviaSpec defines the desired state of Octavia + +|=== +| Field | Description | Scheme | Required + +| octaviaAPI +| OctaviaAPI - Spec definition for the API service of the Octavia deployment +| <> +| true + +| octaviaHousekeeping +| OctaviaHousekeeping - Spec definition for the Octavia Housekeeping agent for the Octavia deployment +| <> +| true + +| octaviaHealthManager +| OctaviaHousekeeping - Spec definition for the Octavia Housekeeping agent for the Octavia deployment +| <> +| true + +| octaviaWorker +| OctaviaHousekeeping - Spec definition for the Octavia Housekeeping agent for the Octavia deployment +| <> +| true +|=== + +<> + +[#octaviaspecbase] +==== OctaviaSpecBase + +OctaviaSpecBase - + +|=== +| Field | Description | Scheme | Required + +| databaseInstance +| MariaDB instance name Right now required by the maridb-operator to get the credentials from the instance to create the DB Might not be required in future +| string +| true + +| databaseAccount +| DatabaseAccount - name of MariaDBAccount which will be used to connect for the main octavia database +| string +| true + +| persistenceDatabaseAccount +| PersistenceDatabaseAccount - name of MariaDBAccount which will be used to connect for the persistence database +| string +| true + +| rabbitMqClusterName +| RabbitMQ instance name Needed to request a transportURL that is created and used in Octavia +| string +| true + +| serviceUser +| ServiceUser - service user name +| string +| true + +| secret +| Secret containing OpenStack password information for octavia's keystone password; no longer used for database password +| string +| true + +| passwordSelectors +| PasswordSelectors - Selectors to identify the DB and ServiceUser password from the Secret +| <> +| false + +| nodeSelector +| NodeSelector to target subset of worker nodes running this service +| map[string]string +| false + +| preserveJobs +| PreserveJobs - do not delete jobs after they finished e.g. to check logs +| bool +| false + +| customServiceConfig +| CustomServiceConfig - customize the service config using this parameter to change service defaults, or overwrite rendered information using raw OpenStack config format. The content gets added to to /etc/++++++/++++++.conf.d directory as custom.conf file.++++++++++++ +| string +| false + +| defaultConfigOverwrite +| ConfigOverwrite - interface to overwrite default config files like e.g. logging.conf or policy.json. But can also be used to add additional files. Those get added to the service config dir in /etc/++++++.++++++ +| map[string]string +| false + +| tenantName +| TenantName - the name of the OpenStack tenant that controls the Octavia resources +| string +| true + +| lbMgmtNetwork +| +| <> +| true + +| sshPubkey +| LoadBalancerSSHPubKey - The name of the ConfigMap containing the pubilc key for connecting to the amphorae via SSH +| string +| false + +| sshPrivkeySecret +| LoadBalancerSSHPrivKey - The name of the secret that will be used to store the private key for connecting to amphorae via SSH +| string +| false + +| amphoraCustomFlavors +| AmphoraCustomFlavors - User-defined flavors for Octavia +| []<> +| false + +| resources +| Resources - Compute Resources required by this service (Limits/Requests). https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +| corev1.ResourceRequirements +| false + +| amphoraImageContainerImage +| Octavia Container Image URL +| string +| true + +| apacheContainerImage +| Apache Container Image URL +| string +| true + +| octaviaNetworkAttachment +| OctaviaNetworkAttachment is a NetworkAttachment resource name for the Octavia Management Network +| string +| true +|=== + +<> + +[#octaviaspeccore] +==== OctaviaSpecCore + +OctaviaSpecCore - this version has no containerImages and is used by OpenStackControlplane + +|=== +| Field | Description | Scheme | Required + +| octaviaAPI +| OctaviaAPI - Spec definition for the API service of the Octavia deployment +| <> +| true + +| octaviaHousekeeping +| OctaviaHousekeeping - Spec definition for the Octavia Housekeeping agent for the Octavia deployment +| <> +| true + +| octaviaHealthManager +| OctaviaHousekeeping - Spec definition for the Octavia Housekeeping agent for the Octavia deployment +| <> +| true + +| octaviaWorker +| OctaviaHousekeeping - Spec definition for the Octavia Housekeeping agent for the Octavia deployment +| <> +| true +|=== + +<> + +[#octaviastatus] +==== OctaviaStatus + +OctaviaStatus defines the observed state of Octavia + +|=== +| Field | Description | Scheme | Required + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| databaseHostname +| Octavia Database Hostname +| string +| false + +| transportURLSecret +| TransportURLSecret - Secret containing RabbitMQ transportURL +| string +| false + +| apireadyCount +| ReadyCount of octavia API instances +| int32 +| false + +| workerreadyCount +| ReadyCount of octavia Worker instances +| int32 +| false + +| housekeepingreadyCount +| ReadyCount of octavia Housekeeping instances +| int32 +| false + +| healthmanagerreadyCount +| ReadyCount of octavia HealthManager instances +| int32 +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the opentack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#passwordselector-2] +==== PasswordSelector + +PasswordSelector to identify the DB and AdminUser password from the Secret + +|=== +| Field | Description | Scheme | Required + +| database +| Database - Selector to get the octavia Database user password from the Secret +| string +| false + +| service +| Service - Selector to get the service user password from the Secret +| string +| false +|=== + +<> + +[#apioverridespec-2] +==== APIOverrideSpec + +APIOverrideSpec to override the generated manifest of several child resources. + +|=== +| Field | Description | Scheme | Required + +| service +| Override configuration for the Service created to serve traffic to the cluster. The key must be the endpoint type (public, internal) +| map[service.Endpoint]service.RoutedOverrideSpec +| false +|=== + +<> + +[#octaviaapi] +==== OctaviaAPI + +OctaviaAPI is the Schema for the octaviaapis API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#octaviaapilist] +==== OctaviaAPIList + +OctaviaAPIList contains a list of OctaviaAPI + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#octaviaapispec] +==== OctaviaAPISpec + +OctaviaAPISpec defines the desired state of OctaviaAPI + +|=== +| Field | Description | Scheme | Required + +| containerImage +| Octavia Container Image URL +| string +| true +|=== + +<> + +[#octaviaapispeccore] +==== OctaviaAPISpecCore + +OctaviaAPISpecCore - + +|=== +| Field | Description | Scheme | Required + +| databaseInstance +| MariaDB instance name Right now required by the maridb-operator to get the credentials from the instance to create the DB Might not be required in future +| string +| true + +| databaseAccount +| DatabaseAccount - name of MariaDBAccount which will be used to connect for the main octavia database +| string +| true + +| persistenceDatabaseAccount +| PersistenceDatabaseAccount - name of MariaDBAccount which will be used to connect for the persistence database +| string +| true + +| databaseHostname +| DatabaseHostname - Octavia DB hostname +| string +| false + +| serviceUser +| ServiceUser - service user name +| string +| true + +| serviceAccount +| ServiceAccount - service account name used internally to provide Octavia services the default SA name +| string +| true + +| replicas +| Replicas of octavia API to run +| *int32 +| true + +| secret +| Secret containing OpenStack password information for octavia OctaviaDatabasePassword, AdminPassword +| string +| true + +| passwordSelectors +| PasswordSelectors - Selectors to identify the DB and ServiceUser password from the Secret +| <> +| false + +| nodeSelector +| NodeSelector to target subset of worker nodes running this service +| map[string]string +| false + +| preserveJobs +| PreserveJobs - do not delete jobs after they finished e.g. to check logs +| bool +| false + +| customServiceConfig +| CustomServiceConfig - customize the service config using this parameter to change service defaults, or overwrite rendered information using raw OpenStack config format. The content gets added to to /etc/++++++/++++++.conf.d directory as custom.conf file.++++++++++++ +| string +| false + +| defaultConfigOverwrite +| ConfigOverwrite - interface to overwrite default config files like e.g. logging.conf or policy.json. But can also be used to add additional files. Those get added to the service config dir in /etc/++++++.++++++ +| map[string]string +| false + +| transportURLSecret +| TransportURLSecret - Secret containing RabbitMQ transportURL +| string +| false + +| resources +| Resources - Compute Resources required by this service (Limits/Requests). https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +| corev1.ResourceRequirements +| false + +| override +| Override, provides the ability to override the generated manifest of several child resources. +| <> +| false + +| networkAttachments +| NetworkAttachments is a list of NetworkAttachment resource names to expose the services to the given network +| []string +| false + +| tls +| TLS - Parameters related to the TLS +| <> +| false +|=== + +<> + +[#octaviaapistatus] +==== OctaviaAPIStatus + +OctaviaAPIStatus defines the observed state of OctaviaAPI + +|=== +| Field | Description | Scheme | Required + +| readyCount +| ReadyCount of octavia API instances +| int32 +| false + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| networkAttachments +| NetworkAttachment status of the deployment pods +| map[string][]string +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the opentack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#octaviaapitls] +==== OctaviaApiTLS + +|=== +| Field | Description | Scheme | Required + +| api +| API tls type which encapsulates for API services +| tls.APIService +| false + +| ovn +| Ovn GenericService - holds the secret for the OvnDb client cert +| tls.GenericService +| false +|=== + +<> + +[#cpucountreq] +==== CPUCountReq + +CPUCountReq defines a specific hardware request for CPU core count + +|=== +| Field | Description | Scheme | Required + +| count +| +| int +| false + +| exactMatch +| If ExactMatch == false, actual count > Count will match +| bool +| false +|=== + +<> + +[#cpumhzreq] +==== CPUMhzReq + +CPUMhzReq defines a specific hardware request for CPU clock speed + +|=== +| Field | Description | Scheme | Required + +| mhz +| +| int +| false + +| exactMatch +| If ExactMatch == false, actual mhz > Mhz will match +| bool +| false +|=== + +<> + +[#cpureqs] +==== CPUReqs + +CPUReqs defines specific CPU hardware requests + +|=== +| Field | Description | Scheme | Required + +| arch +| Arch is a scalar (string) because it wouldn't make sense to give it an "exact-match" option Can be either "x86_64" or "ppc64le" if included +| string +| false + +| countReq +| +| <> +| false + +| mhzReq +| +| <> +| false +|=== + +<> + +[#diskgbreq] +==== DiskGbReq + +DiskGbReq defines a specific hardware request for disk size + +|=== +| Field | Description | Scheme | Required + +| gb +| +| int +| false + +| exactMatch +| If ExactMatch == false, actual GB > Gb will match +| bool +| false +|=== + +<> + +[#diskreqs] +==== DiskReqs + +DiskReqs defines specific disk hardware requests + +|=== +| Field | Description | Scheme | Required + +| gbReq +| +| <> +| false + +| ssdReq +| SSD is scalar (bool) because it wouldn't make sense to give it an "exact-match" option +| <> +| false +|=== + +<> + +[#diskssdreq] +==== DiskSSDReq + +DiskSSDReq defines a specific hardware request for disk of type SSD (true) or rotational (false) + +|=== +| Field | Description | Scheme | Required + +| ssd +| +| bool +| false + +| exactMatch +| We only actually care about SSD flag if it is true or ExactMatch is set to true. This second flag is necessary as SSD's bool zero-value (false) is indistinguishable from it being explicitly set to false +| bool +| false +|=== + +<> + +[#hardwarereqs] +==== HardwareReqs + +HardwareReqs defines request hardware attributes for the BaremetalHost replicas + +|=== +| Field | Description | Scheme | Required + +| cpuReqs +| +| <> +| false + +| memReqs +| +| <> +| false + +| diskReqs +| +| <> +| false +|=== + +<> + +[#hoststatus] +==== HostStatus + +HostStatus represents the IPStatus and provisioning state + deployment information + +|=== +| Field | Description | Scheme | Required + +| provisioningState +| +| ProvisioningState +| true + +| annotatedForDeletion +| Host annotated for deletion +| bool +| true + +| userDataSecretName +| +| string +| true + +| networkDataSecretName +| +| string +| true +|=== + +<> + +[#ipstatus] +==== IPStatus + +IPStatus represents the hostname and IP info for a specific host + +|=== +| Field | Description | Scheme | Required + +| hostname +| +| string +| true + +| bmhRef +| +| string +| true + +| ipAddresses +| +| map[string]string +| true +|=== + +<> + +[#instancespec] +==== InstanceSpec + +InstanceSpec Instance specific attributes + +|=== +| Field | Description | Scheme | Required + +| ctlPlaneIP +| CtlPlaneIP - Control Plane IP in CIDR notation +| string +| true + +| userData +| UserData - Host User Data +| *corev1.SecretReference +| false + +| networkData +| NetworkData - Host Network Data +| *corev1.SecretReference +| false + +| preprovisioningNetworkDataName +| PreprovisioningNetworkDataName - NetwoData Secret name for Preprovisining in the local namespace +| string +| false +|=== + +<> + +[#memgbreq] +==== MemGbReq + +MemGbReq defines a specific hardware request for memory size + +|=== +| Field | Description | Scheme | Required + +| gb +| +| int +| false + +| exactMatch +| If ExactMatch == false, actual GB > Gb will match +| bool +| false +|=== + +<> + +[#memreqs] +==== MemReqs + +MemReqs defines specific memory hardware requests + +|=== +| Field | Description | Scheme | Required + +| gbReq +| +| <> +| false +|=== + +<> + +[#openstackbaremetalset] +==== OpenStackBaremetalSet + +OpenStackBaremetalSet is the Schema for the openstackbaremetalsets API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#openstackbaremetalsetlist] +==== OpenStackBaremetalSetList + +OpenStackBaremetalSetList contains a list of OpenStackBaremetalSet + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#openstackbaremetalsetspec] +==== OpenStackBaremetalSetSpec + +OpenStackBaremetalSetSpec defines the desired state of OpenStackBaremetalSet + +|=== +| Field | Description | Scheme | Required + +| baremetalHosts +| BaremetalHosts - Map of hostname to Instance Spec for all nodes to provision +| map[string]<> +| false + +| osImage +| OSImage - OS qcow2 image Name +| string +| false + +| osContainerImageUrl +| OSContainerImageURL - Container image URL for init with the OS qcow2 image (osImage) +| string +| false + +| apacheImageUrl +| ApacheImageURL - Container image URL for the main container that serves the downloaded OS qcow2 image (osImage) +| string +| false + +| agentImageUrl +| AgentImageURL - Container image URL for the sidecar container that discovers provisioning network IPs +| string +| false + +| userData +| UserData holds the reference to the Secret containing the user data to be passed to the host before it boots. UserData can be set per host in BaremetalHosts or here. If none of these are provided it will use a default cloud-config. +| *corev1.SecretReference +| false + +| networkData +| NetworkData holds the reference to the Secret containing network data to be passed to the hosts. NetworkData can be set per host in BaremetalHosts or here. If none of these are provided it will use default NetworkData to configure CtlPlaneIP. +| *corev1.SecretReference +| false + +| automatedCleaningMode +| When set to disabled, automated cleaning will be avoided during provisioning and deprovisioning. +| AutomatedCleaningMode +| false + +| provisionServerName +| ProvisionServerName - Optional. If supplied will be used as the base Image for the baremetalset instead of baseImageURL. +| string +| false + +| provisioningInterface +| ProvisioningInterface - Optional. If not provided along with ProvisionServerName, it would be discovered from CBO. This is the provisioning interface on the OCP masters/workers. +| string +| false + +| deploymentSSHSecret +| DeploymentSSHSecret - Name of secret holding the cloud-admin ssh keys +| string +| true + +| ctlplaneInterface +| CtlplaneInterface - Interface on the provisioned nodes to use for ctlplane network +| string +| true + +| ctlplaneGateway +| CtlplaneGateway - IP of gateway for ctrlplane network (TODO: acquire this is another manner?) +| string +| false + +| ctlplaneNetmask +| CtlplaneNetmask - Netmask to use for ctlplane network (TODO: acquire this is another manner?) +| string +| false + +| bmhNamespace +| BmhNamespace Namespace to look for BaremetalHosts(default: openshift-machine-api) +| string +| false + +| bmhLabelSelector +| BmhLabelSelector allows for a sub-selection of BaremetalHosts based on arbitrary labels +| map[string]string +| false + +| hardwareReqs +| Hardware requests for sub-selection of BaremetalHosts with certain hardware specs +| <> +| false + +| passwordSecret +| PasswordSecret the name of the secret used to optionally set the root pwd by adding NodeRootPassword: ++++++to the secret data++++++ +| *corev1.SecretReference +| false + +| cloudUserName +| CloudUser to be configured for remote access +| string +| true + +| domainName +| DomainName is the domain name that will be set on the underlying Metal3 BaremetalHosts (TODO: acquire this is another manner?) +| string +| false + +| bootstrapDns +| BootstrapDNS - initial DNS nameserver values to set on the BaremetalHosts when they are provisioned. Note that subsequent deployment will overwrite these values +| []string +| false + +| dnsSearchDomains +| DNSSearchDomains - initial DNS nameserver values to set on the BaremetalHosts when they are provisioned. Note that subsequent deployment will overwrite these values +| []string +| false +|=== + +<> + +[#openstackbaremetalsetstatus] +==== OpenStackBaremetalSetStatus + +OpenStackBaremetalSetStatus defines the observed state of OpenStackBaremetalSet + +|=== +| Field | Description | Scheme | Required + +| conditions +| Conditions +| condition.Conditions +| false + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| baremetalHosts +| BaremetalHosts that are being processed or have been processed for this OpenStackBaremetalSet +| map[string]<> +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the opentack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#openstackprovisionserver] +==== OpenStackProvisionServer + +OpenStackProvisionServer used to serve custom images for baremetal provisioning with Metal3 + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#openstackprovisionserverdefaults] +==== OpenStackProvisionServerDefaults + +OpenStackProvisionServerDefaults - + +|=== +| Field | Description | Scheme | Required + +| OSContainerImageURL +| +| string +| false + +| AgentImageURL +| +| string +| false + +| ApacheImageURL +| +| string +| false + +| OSImage +| +| string +| false +|=== + +<> + +[#openstackprovisionserverlist] +==== OpenStackProvisionServerList + +OpenStackProvisionServerList contains a list of OpenStackProvisionServer + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#openstackprovisionserverspec] +==== OpenStackProvisionServerSpec + +OpenStackProvisionServerSpec defines the desired state of OpenStackProvisionServer + +|=== +| Field | Description | Scheme | Required + +| port +| Port - The port on which the Apache server should listen +| int32 +| true + +| interface +| Interface - An optional interface to use instead of the cluster's default provisioning interface (if any) +| string +| false + +| osImage +| OSImage - OS qcow2 image (compressed as gz, or uncompressed) +| string +| true + +| osContainerImageUrl +| OSContainerImageURL - Container image URL for init with the OS qcow2 image (osImage) +| string +| true + +| apacheImageUrl +| ApacheImageURL - Container image URL for the main container that serves the downloaded OS qcow2 image (osImage) +| string +| true + +| agentImageUrl +| AgentImageURL - Container image URL for the sidecar container that discovers provisioning network IPs +| string +| true + +| nodeSelector +| NodeSelector to target subset of worker nodes running this provision server +| map[string]string +| false + +| resources +| Resources - Compute Resources required by this provision server (Limits/Requests). https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +| corev1.ResourceRequirements +| false +|=== + +<> + +[#openstackprovisionserverstatus] +==== OpenStackProvisionServerStatus + +OpenStackProvisionServerStatus defines the observed state of OpenStackProvisionServer + +|=== +| Field | Description | Scheme | Required + +| readyCount +| ReadyCount of provision server Apache instances +| int32 +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| provisionIp +| IP of the provisioning interface on the node running the ProvisionServer pod +| string +| false + +| localImageUrl +| URL of provisioning image on underlying Apache web server +| string +| false +|=== + +<> + +[#ovncontroller] +==== OVNController + +OVNController is the Schema for the ovncontrollers API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#ovncontrollerlist] +==== OVNControllerList + +OVNControllerList contains a list of OVNController + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#ovncontrollerspec] +==== OVNControllerSpec + +OVNControllerSpec defines the desired state of OVNController + +|=== +| Field | Description | Scheme | Required + +| ovsContainerImage +| Image used for the ovsdb-server and ovs-vswitchd containers (will be set to environmental default if empty) +| string +| true + +| ovnContainerImage +| Image used for the ovn-controller container (will be set to environmental default if empty) +| string +| true +|=== + +<> + +[#ovncontrollerspeccore] +==== OVNControllerSpecCore + +OVNControllerSpecCore - + +|=== +| Field | Description | Scheme | Required + +| external-ids +| +| <> +| true + +| nicMappings +| +| map[string]string +| false + +| resources +| Resources - Compute Resources required by this service (Limits/Requests). https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +| corev1.ResourceRequirements +| false + +| nodeSelector +| NodeSelector to target subset of worker nodes running this service +| map[string]string +| false + +| networkAttachment +| NetworkAttachment is a NetworkAttachment resource name to expose the service to the given network. If specified the IP address of this network is used as the OVNEncapIP. +| string +| true + +| tls +| TLS - Parameters related to TLS +| tls.SimpleService +| false +|=== + +<> + +[#ovncontrollerstatus] +==== OVNControllerStatus + +OVNControllerStatus defines the observed state of OVNController + +|=== +| Field | Description | Scheme | Required + +| numberReady +| NumberReady of the OVNController instances +| int32 +| false + +| ovsNumberReady +| ovsNumberReady of ovs instances +| int32 +| false + +| desiredNumberScheduled +| DesiredNumberScheduled - total number of the nodes which should be running Daemon +| int32 +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| networkAttachments +| NetworkAttachments status of the deployment pods +| map[string][]string +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes. +| int64 +| false +|=== + +<> + +[#ovsexternalids] +==== OVSExternalIDs + +OVSExternalIDs is a set of configuration options for OVS external-ids table + +|=== +| Field | Description | Scheme | Required + +| system-id +| +| string +| false + +| ovn-bridge +| +| string +| false + +| ovn-encap-type +| +| string +| false + +| availability-zones +| +| []string +| false + +| enable-chassis-as-gateway +| +| *bool +| true +|=== + +<> + +[#ovndbcluster] +==== OVNDBCluster + +OVNDBCluster is the Schema for the ovndbclusters API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#ovndbclusterlist] +==== OVNDBClusterList + +OVNDBClusterList contains a list of OVNDBCluster + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#ovndbclusterspec] +==== OVNDBClusterSpec + +OVNDBClusterSpec defines the desired state of OVNDBCluster + +|=== +| Field | Description | Scheme | Required + +| containerImage +| ContainerImage - Container Image URL (will be set to environmental default if empty) +| string +| true +|=== + +<> + +[#ovndbclusterspeccore] +==== OVNDBClusterSpecCore + +OVNDBClusterSpecCore - + +|=== +| Field | Description | Scheme | Required + +| dbType +| DBType - NB or SB +| string +| true + +| replicas +| Replicas of OVN DBCluster to run +| *int32 +| true + +| nodeSelector +| NodeSelector to target subset of worker nodes running this service +| map[string]string +| false + +| logLevel +| LogLevel - Set log level info, dbg, emer etc +| string +| false + +| electionTimer +| OVN Northbound and Southbound RAFT db election timer to use on db creation (in milliseconds) +| int32 +| true + +| inactivityProbe +| Probe interval for the OVSDB session (in milliseconds) +| int32 +| true + +| probeIntervalToActive +| Active probe interval from standby to active ovsdb-server remote +| int32 +| true + +| resources +| Resources - Compute Resources required by this service (Limits/Requests). https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +| corev1.ResourceRequirements +| false + +| storageClass +| StorageClass +| string +| false + +| storageRequest +| StorageRequest +| string +| true + +| networkAttachment +| NetworkAttachment is a NetworkAttachment resource name to expose the service to the given network. If specified the IP address of this network is used as the dbAddress connection. +| string +| true + +| tls +| TLS - Parameters related to TLS +| tls.SimpleService +| false +|=== + +<> + +[#ovndbclusterstatus] +==== OVNDBClusterStatus + +OVNDBClusterStatus defines the observed state of OVNDBCluster + +|=== +| Field | Description | Scheme | Required + +| readyCount +| ReadyCount of OVN DBCluster instances +| int32 +| false + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| dbAddress +| DBAddress - DB IP address used by external nodes +| string +| false + +| internalDbAddress +| InternalDBAddress - DB IP address used by other Pods in the cluster +| string +| false + +| networkAttachments +| NetworkAttachments status of the deployment pods +| map[string][]string +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes. +| int64 +| false +|=== + +<> + +[#ovnnorthd] +==== OVNNorthd + +OVNNorthd is the Schema for the ovnnorthds API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#ovnnorthdlist] +==== OVNNorthdList + +OVNNorthdList contains a list of OVNNorthd + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#ovnnorthdspec] +==== OVNNorthdSpec + +OVNNorthdSpec defines the desired state of OVNNorthd + +|=== +| Field | Description | Scheme | Required + +| containerImage +| ContainerImage - Container Image URL (will be set to environmental default if empty) +| string +| true +|=== + +<> + +[#ovnnorthdspeccore] +==== OVNNorthdSpecCore + +OVNNorthdSpecCore - + +|=== +| Field | Description | Scheme | Required + +| replicas +| Replicas of OVN Northd to run +| *int32 +| true + +| nodeSelector +| NodeSelector to target subset of worker nodes running this service +| map[string]string +| false + +| logLevel +| LogLevel - Set log level info, dbg, emer etc +| string +| false + +| resources +| Resources - Compute Resources required by this service (Limits/Requests). https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +| corev1.ResourceRequirements +| false + +| tls +| TLS - Parameters related to TLS +| tls.SimpleService +| false + +| nThreads +| NThreads sets number of threads used for building logical flows +| *int32 +| true +|=== + +<> + +[#ovnnorthdstatus] +==== OVNNorthdStatus + +OVNNorthdStatus defines the observed state of OVNNorthd + +|=== +| Field | Description | Scheme | Required + +| readyCount +| ReadyCount of OVN Northd instances +| int32 +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes. +| int64 +| false +|=== + +<> + +[#apioverridespec-2] +==== APIOverrideSpec + +APIOverrideSpec to override the generated manifest of several child resources. + +|=== +| Field | Description | Scheme | Required + +| service +| Override configuration for the Service created to serve traffic to the cluster. The key must be the endpoint type (public, internal) +| map[service.Endpoint]service.RoutedOverrideSpec +| false +|=== + +<> + +[#passwordselector-2] +==== PasswordSelector + +PasswordSelector to identify the DB and AdminUser password from the Secret + +|=== +| Field | Description | Scheme | Required + +| service +| Service - Selector to get the service user password from the Secret +| string +| true +|=== + +<> + +[#placementapi] +==== PlacementAPI + +PlacementAPI is the Schema for the placementapis API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#placementapilist] +==== PlacementAPIList + +PlacementAPIList contains a list of PlacementAPI + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#placementapispec] +==== PlacementAPISpec + +PlacementAPISpec defines the desired state of PlacementAPI + +|=== +| Field | Description | Scheme | Required + +| containerImage +| PlacementAPI Container Image URL (will be set to environmental default if empty) +| string +| true +|=== + +<> + +[#placementapispeccore] +==== PlacementAPISpecCore + +PlacementAPISpecCore - + +|=== +| Field | Description | Scheme | Required + +| serviceUser +| ServiceUser - optional username used for this service to register in keystone +| string +| true + +| databaseInstance +| MariaDB instance name Right now required by the maridb-operator to get the credentials from the instance to create the DB Might not be required in future +| string +| true + +| databaseAccount +| DatabaseAccount - name of MariaDBAccount which will be used to connect. +| string +| true + +| replicas +| Replicas of placement API to run +| *int32 +| true + +| secret +| Secret containing OpenStack password information for placement PlacementPassword +| string +| true + +| passwordSelectors +| PasswordSelectors - Selectors to identify the DB and ServiceUser password from the Secret +| <> +| true + +| nodeSelector +| NodeSelector to target subset of worker nodes running this service +| map[string]string +| false + +| preserveJobs +| PreserveJobs - do not delete jobs after they finished e.g. to check logs +| bool +| true + +| customServiceConfig +| CustomServiceConfig - customize the service config using this parameter to change service defaults, or overwrite rendered information using raw OpenStack config format. The content gets added to to /etc/++++++/++++++.conf.d directory as custom.conf file.++++++++++++ +| string +| true + +| defaultConfigOverwrite +| DefaultConfigOverwrite - interface to overwrite default config files like policy.yaml. +| map[string]string +| false + +| resources +| Resources - Compute Resources required by this service (Limits/Requests). https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +| corev1.ResourceRequirements +| false + +| networkAttachments +| NetworkAttachments is a list of NetworkAttachment resource names to expose the services to the given network +| []string +| false + +| override +| Override, provides the ability to override the generated manifest of several child resources. +| <> +| false + +| tls +| TLS - Parameters related to the TLS +| tls.API +| false +|=== + +<> + +[#placementapistatus] +==== PlacementAPIStatus + +PlacementAPIStatus defines the observed state of PlacementAPI + +|=== +| Field | Description | Scheme | Required + +| readyCount +| ReadyCount of placement API instances +| int32 +| false + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| databaseHostname +| Placement Database Hostname +| string +| false + +| networkAttachments +| NetworkAttachments status of the deployment pods +| map[string][]string +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes. +| int64 +| false +|=== + +<> + +[#swift] +==== Swift + +Swift is the Schema for the swifts API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#swiftlist] +==== SwiftList + +SwiftList contains a list of Swift + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#swiftspec] +==== SwiftSpec + +SwiftSpec defines the desired state of Swift + +|=== +| Field | Description | Scheme | Required + +| swiftRing +| SwiftRing - Spec definition for the Ring service of this Swift deployment +| <> +| true + +| swiftStorage +| SwiftStorage - Spec definition for the Storage service of this Swift deployment +| <> +| true + +| swiftProxy +| SwiftProxy - Spec definition for the Proxy service of this Swift deployment +| <> +| true +|=== + +<> + +[#swiftspecbase] +==== SwiftSpecBase + +SwiftSpecBase - + +|=== +| Field | Description | Scheme | Required + +| storageClass +| Storage class. This is passed to SwiftStorage unless storageClass is explicitly set for the SwiftStorage. +| string +| true + +| networkAttachments +| NetworkAttachments is a list of NetworkAttachment resource names to expose the services to the given network +| []string +| false + +| memcachedInstance +| Memcached instance name. +| string +| true +|=== + +<> + +[#swiftspeccore] +==== SwiftSpecCore + +SwiftSpecCore defines the desired state of Swift (this version is used by OpenStackControlplane) + +|=== +| Field | Description | Scheme | Required + +| swiftRing +| SwiftRing - Spec definition for the Ring service of this Swift deployment +| <> +| true + +| swiftStorage +| SwiftStorage - Spec definition for the Storage service of this Swift deployment +| <> +| true + +| swiftProxy +| SwiftProxy - Spec definition for the Proxy service of this Swift deployment +| <> +| true +|=== + +<> + +[#swiftstatus] +==== SwiftStatus + +SwiftStatus defines the observed state of Swift + +|=== +| Field | Description | Scheme | Required + +| conditions +| Conditions +| condition.Conditions +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the openstack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#passwordselector-2] +==== PasswordSelector + +PasswordSelector to identify the AdminUser password from the Secret + +|=== +| Field | Description | Scheme | Required + +| service +| Service - Selector to get the Swift service password from the Secret +| string +| true +|=== + +<> + +[#proxyoverridespec] +==== ProxyOverrideSpec + +ProxyOverrideSpec to override the generated manifest of several child resources. + +|=== +| Field | Description | Scheme | Required + +| service +| Override configuration for the Service created to serve traffic to the cluster. The key must be the endpoint type (public, internal) +| map[service.Endpoint]service.RoutedOverrideSpec +| false +|=== + +<> + +[#swiftproxy] +==== SwiftProxy + +SwiftProxy is the Schema for the swiftproxies API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#swiftproxylist] +==== SwiftProxyList + +SwiftProxyList contains a list of SwiftProxy + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#swiftproxyspec] +==== SwiftProxySpec + +SwiftProxySpec defines the desired state of SwiftProxy + +|=== +| Field | Description | Scheme | Required + +| containerImageProxy +| Swift Proxy Container Image URL +| string +| true +|=== + +<> + +[#swiftproxyspeccore] +==== SwiftProxySpecCore + +SwiftProxySpecCore - + +|=== +| Field | Description | Scheme | Required + +| replicas +| Replicas of Swift Proxy +| *int32 +| true + +| serviceUser +| ServiceUser - optional username used for this service to register in Swift +| string +| true + +| secret +| Secret containing OpenStack password information for Swift service user password +| string +| true + +| passwordSelectors +| PasswordSelector - Selector to choose the Swift user password from the Secret +| <> +| true + +| override +| Override, provides the ability to override the generated manifest of several child resources. +| <> +| false + +| networkAttachments +| NetworkAttachments is a list of NetworkAttachment resource names to expose the services to the given network +| []string +| false + +| memcachedInstance +| Memcached instance name. +| string +| true + +| rabbitMqClusterName +| RabbitMQ instance name to request a transportURL for Ceilometer middleware +| string +| true + +| tls +| TLS - Parameters related to the TLS +| tls.API +| false + +| defaultConfigOverwrite +| DefaultConfigOverwrite - can be used to add additionalfiles. Those get added to the service config dir in /etc/++++++-conf.d++++++ +| map[string]string +| false + +| encryptionEnabled +| Encrypts new objects at rest +| bool +| true + +| ceilometerEnabled +| Enables ceilometer in the swift proxy and creates required resources +| bool +| true +|=== + +<> + +[#swiftproxystatus] +==== SwiftProxyStatus + +SwiftProxyStatus defines the observed state of SwiftProxy + +|=== +| Field | Description | Scheme | Required + +| readyCount +| ReadyCount of SwiftProxy instances +| int32 +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| networkAttachments +| NetworkAttachments status of the deployment pods +| map[string][]string +| false + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| transportURLSecret +| TransportURLSecret - Secret containing RabbitMQ transportURL +| string +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the openstack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#swiftdisk] +==== SwiftDisk + +|=== +| Field | Description | Scheme | Required + +| device +| +| string +| true + +| path +| +| string +| true + +| weight +| +| int32 +| true + +| region +| +| int32 +| true + +| zone +| +| int32 +| true +|=== + +<> + +[#swiftring] +==== SwiftRing + +SwiftRing is the Schema for the swiftrings API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#swiftringlist] +==== SwiftRingList + +SwiftRingList contains a list of SwiftRing + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#swiftringspec] +==== SwiftRingSpec + +SwiftRingSpec defines the desired state of SwiftRing + +|=== +| Field | Description | Scheme | Required + +| containerImage +| Image URL for Swift proxy service +| string +| true +|=== + +<> + +[#swiftringspeccore] +==== SwiftRingSpecCore + +SwiftRingSpec defines the desired state of SwiftRing + +|=== +| Field | Description | Scheme | Required + +| ringReplicas +| Number of Swift data replicas (=copies) +| *int64 +| true + +| partPower +| Partition power of the Swift rings +| *int64 +| true + +| minPartHours +| Minimum number of hours to restrict moving a partition more than once +| *int64 +| true + +| tls +| TLS - Parameters related to the TLS +| tls.Ca +| false +|=== + +<> + +[#swiftringstatus] +==== SwiftRingStatus + +SwiftRingStatus defines the observed state of SwiftRing + +|=== +| Field | Description | Scheme | Required + +| conditions +| Conditions +| condition.Conditions +| false + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the openstack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#swiftstorage] +==== SwiftStorage + +SwiftStorage is the Schema for the swiftstorages API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#swiftstoragelist] +==== SwiftStorageList + +SwiftStorageList contains a list of SwiftStorage + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#swiftstoragespec] +==== SwiftStorageSpec + +SwiftStorageSpec defines the desired state of SwiftStorage + +|=== +| Field | Description | Scheme | Required + +| containerImageAccount +| Image URL for Swift account service +| string +| true + +| containerImageContainer +| Image URL for Swift container service +| string +| true + +| containerImageObject +| Image URL for Swift object service +| string +| true + +| containerImageProxy +| Image URL for Swift proxy service +| string +| true +|=== + +<> + +[#swiftstoragespeccore] +==== SwiftStorageSpecCore + +SwiftStorageSpecCore - + +|=== +| Field | Description | Scheme | Required + +| replicas +| +| *int32 +| true + +| storageClass +| Name of StorageClass to use for Swift PVs +| string +| true + +| storageRequest +| Minimum size for Swift PVs +| string +| true + +| networkAttachments +| NetworkAttachments is a list of NetworkAttachment resource names to expose the services to the given network +| []string +| false + +| memcachedInstance +| Memcached instance name. +| string +| true + +| containerSharderEnabled +| If the container sharder daemon is enabled. +| bool +| true + +| defaultConfigOverwrite +| DefaultConfigOverwrite - can be used to add additionalfiles. Those get added to the service config dir in /etc/++++++-conf.d++++++ +| map[string]string +| false +|=== + +<> + +[#swiftstoragestatus] +==== SwiftStorageStatus + +SwiftStorageStatus defines the observed state of SwiftStorage + +|=== +| Field | Description | Scheme | Required + +| readyCount +| ReadyCount of SwiftStorage instances +| int32 +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| networkAttachments +| NetworkAttachments status of the deployment pods +| map[string][]string +| false + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the openstack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#apioverridespec-2] +==== APIOverrideSpec + +APIOverrideSpec to override the generated manifest of several child resources. + +|=== +| Field | Description | Scheme | Required + +| service +| Override configuration for the Service created to serve traffic to the cluster. The key must be the endpoint type (public, internal) +| map[service.Endpoint]service.RoutedOverrideSpec +| false +|=== + +<> + +[#aodh] +==== Aodh + +Aodh defines the aodh component spec + +|=== +| Field | Description | Scheme | Required + +| apiImage +| +| string +| true + +| evaluatorImage +| +| string +| true + +| notifierImage +| +| string +| true + +| listenerImage +| +| string +| true +|=== + +<> + +[#aodhcore] +==== AodhCore + +Aodh defines the aodh component spec + +|=== +| Field | Description | Scheme | Required + +| rabbitMqClusterName +| RabbitMQ instance name Needed to request a transportURL that is created and used in Aodh +| string +| false + +| databaseInstance +| MariaDB instance name Right now required by the maridb-operator to get the credentials from the instance to create the DB Might not be required in future +| string +| true + +| databaseAccount +| DatabaseAccount - optional MariaDBAccount CR name used for aodh DB, defaults to aodh +| string +| true + +| passwordSelector +| PasswordSelectors - Selectors to identify the service from the Secret +| <> +| false + +| serviceUser +| ServiceUser - optional username used for this service to register in keystone +| string +| true + +| secret +| Secret containing OpenStack password information for aodh +| string +| true + +| customServiceConfig +| CustomServiceConfig - customize the service config using this parameter to change service defaults, or overwrite rendered information using raw OpenStack config format. The content gets added to to /etc/++++++/++++++.conf.d directory as custom.conf file.++++++++++++ +| string +| false + +| defaultConfigOverwrite +| ConfigOverwrite - interface to overwrite default config files like e.g. logging.conf or policy.json. But can also be used to add additional files. Those get added to the service config dir in /etc/++++++.++++++ +| map[string]string +| false + +| networkAttachmentDefinitions +| NetworkAttachmentDefinitions list of network attachment definitions the service pod gets attached to +| []string +| false + +| override +| Override, provides the ability to override the generated manifest of several child resources. +| <> +| false + +| preserveJobs +| PreserveJobs - do not delete jobs after they finished e.g. to check logs +| bool +| true + +| tls +| TLS - Parameters related to the TLS +| tls.API +| false +|=== + +<> + +[#autoscaling] +==== Autoscaling + +Autoscaling is the Schema for the autoscalings API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#autoscalinglist] +==== AutoscalingList + +AutoscalingList contains a list of Autoscaling + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#autoscalingspec] +==== AutoscalingSpec + +AutoscalingSpec defines the desired state of Autoscaling + +|=== +| Field | Description | Scheme | Required + +| aodh +| Aodh spec +| <> +| false +|=== + +<> + +[#autoscalingspecbase] +==== AutoscalingSpecBase + +AutoscalingSpecBase - + +|=== +| Field | Description | Scheme | Required + +| prometheusHost +| Host of user deployed prometheus +| string +| false + +| prometheusPort +| Port of user deployed prometheus +| int32 +| false + +| prometheusTLS +| If TLS should be used for user deployed prometheus +| *bool +| false + +| heatInstance +| Heat instance name. +| string +| true +|=== + +<> + +[#autoscalingspeccore] +==== AutoscalingSpecCore + +AutoscalingSpecCore defines the desired state of Autoscaling (this version is used by the OpenStackControlplane no image parameters) + +|=== +| Field | Description | Scheme | Required + +| aodh +| Aodh spec +| <> +| false +|=== + +<> + +[#autoscalingstatus] +==== AutoscalingStatus + +AutoscalingStatus defines the observed state of Autoscaling + +|=== +| Field | Description | Scheme | Required + +| readyCount +| ReadyCount of autoscaling instances +| int32 +| false + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| networks +| Networks in addtion to the cluster network, the service is attached to +| []string +| false + +| transportURLSecret +| TransportURLSecret - Secret containing RabbitMQ transportURL +| string +| false + +| databaseHostname +| DatabaseHostname - Hostname for the database +| string +| false + +| prometheusHostname +| PrometheusHost - Hostname for prometheus used for autoscaling +| string +| false + +| prometheusPort +| PrometheusPort - Port for prometheus used for autoscaling +| int32 +| false + +| prometheusTLS +| PrometheusTLS - Determines if TLS should be used for accessing prometheus +| bool +| false + +| apiEndpoint +| API endpoint +| map[string]string +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the openstack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#ceilometer] +==== Ceilometer + +Ceilometer is the Schema for the ceilometers API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#ceilometerlist] +==== CeilometerList + +CeilometerList contains a list of Ceilometer + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#ceilometerspec] +==== CeilometerSpec + +CeilometerSpec defines the desired state of Ceilometer + +|=== +| Field | Description | Scheme | Required + +| centralImage +| +| string +| true + +| notificationImage +| +| string +| true + +| sgCoreImage +| +| string +| true + +| computeImage +| +| string +| true + +| ipmiImage +| +| string +| true + +| proxyImage +| +| string +| true +|=== + +<> + +[#ceilometerspeccore] +==== CeilometerSpecCore + +CeilometerSpecCore defines the desired state of Ceilometer. This version is used by the OpenStackControlplane (no image parameters) + +|=== +| Field | Description | Scheme | Required + +| rabbitMqClusterName +| RabbitMQ instance name Needed to request a transportURL that is created and used in Telemetry +| string +| false + +| passwordSelector +| PasswordSelectors - Selectors to identify the service from the Secret +| <> +| false + +| serviceUser +| ServiceUser - optional username used for this service to register in keystone +| string +| true + +| secret +| Secret containing OpenStack password information for ceilometer +| string +| true + +| customServiceConfig +| CustomServiceConfig - customize the service config using this parameter to change service defaults, or overwrite rendered information using raw OpenStack config format. The content gets added to to /etc/++++++/++++++.conf.d directory as custom.conf file.++++++++++++ +| string +| false + +| defaultConfigOverwrite +| ConfigOverwrite - interface to overwrite default config files like e.g. logging.conf or policy.json. But can also be used to add additional files. Those get added to the service config dir in /etc/++++++.++++++ +| map[string]string +| false + +| networkAttachmentDefinitions +| NetworkAttachmentDefinitions list of network attachment definitions the service pod gets attached to +| []string +| false + +| tls +| TLS - Parameters related to the TLS +| tls.SimpleService +| false +|=== + +<> + +[#ceilometerstatus] +==== CeilometerStatus + +CeilometerStatus defines the observed state of Ceilometer + +|=== +| Field | Description | Scheme | Required + +| readyCount +| ReadyCount of ceilometer instances +| int32 +| false + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| transportURLSecret +| TransportURLSecret - Secret containing RabbitMQ transportURL +| string +| false + +| networks +| Networks in addtion to the cluster network, the service is attached to +| []string +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the openstack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#logging] +==== Logging + +Logging is the Schema for the loggings API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#logginglist] +==== LoggingList + +LoggingList contains a list of Logging + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#loggingspec] +==== LoggingSpec + +LoggingSpec defines the desired state of Logging + +|=== +| Field | Description | Scheme | Required + +| ipaddr +| IPAddr is the address where the service will listen on +| string +| true + +| port +| Port is the port where the service will listen on +| int32 +| true + +| targetPort +| TargetPort is the port where the logging syslog receiver is listening +| int +| true + +| cloNamespace +| CLONamespace points to the namespace where the cluster-logging-operator is deployed +| string +| true + +| annotations +| Annotations is a way to configure certain LoadBalancers, like MetalLB +| map[string]string +| true + +| rsyslogRetries +| The number of retries rsyslog will attempt before abandoning +| int32 +| true + +| rsyslogQueueType +| The type of the local queue of logs +| string +| true + +| rsyslogQueueSize +| The size of the local queue of logs +| int32 +| true +|=== + +<> + +[#loggingstatus] +==== LoggingStatus + +LoggingStatus defines the observed state of Logging + +|=== +| Field | Description | Scheme | Required + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the openstack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#metricstorage] +==== MetricStorage + +MetricStorage is the Schema for the metricstorages API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#metricstoragelist] +==== MetricStorageList + +MetricStorageList contains a list of MetricStorage + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#metricstoragespec] +==== MetricStorageSpec + +MetricStorageSpec defines the desired state of MetricStorage + +|=== +| Field | Description | Scheme | Required + +| monitoringStack +| MonitoringStack allows to define a metric storage with options supported by Red Hat +| *<> +| false + +| customMonitoringStack +| CustomMonitoringStack allows to deploy a custom monitoring stack when the options in "MonitoringStack" aren't enough +| *obov1.MonitoringStackSpec +| false + +| prometheusTls +| TLS - Parameters related to the TLS +| tls.SimpleService +| false +|=== + +<> + +[#metricstoragestatus] +==== MetricStorageStatus + +MetricStorageStatus defines the observed state of MetricStorage + +|=== +| Field | Description | Scheme | Required + +| conditions +| +| condition.Conditions +| false + +| prometheusTLSPatched +| +| bool +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the openstack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#monitoringstack] +==== MonitoringStack + +MonitoringStack defines the options for a Red Hat supported metric storage + +|=== +| Field | Description | Scheme | Required + +| alertingEnabled +| AlertingEnabled allows to enable or disable alertmanager +| bool +| true + +| dashboardsEnabled +| DashboardsEnabled allows to enable or disable dashboards and related artifacts +| bool +| true + +| scrapeInterval +| ScrapeInterval sets the interval between scrapes +| string +| true + +| dataplaneNetwork +| DataplaneNetwork defines the network that will be used to scrape dataplane node_exporter endpoints +| infranetworkv1.NetNameStr +| true + +| storage +| Storage allows to define options for how to store metrics +| <> +| true +|=== + +<> + +[#persistentstorage] +==== PersistentStorage + +PersistentStorage defines storage options used for persistent storage + +|=== +| Field | Description | Scheme | Required + +| pvcStorageRequest +| PvcStorageRequest The amount of storage to request in PVC +| string +| true + +| pvcStorageSelector +| PvcStorageSelector The Label selector to specify in PVCs +| metav1.LabelSelector +| false + +| pvcStorageClass +| PvcStorageClass The storage class to use for storing metrics +| string +| false +|=== + +<> + +[#storage] +==== Storage + +Storage defines the options used for storage of metrics + +|=== +| Field | Description | Scheme | Required + +| strategy +| Strategy to use for storage. Can be "persistent" or empty, in which case a COO default is used +| string +| true + +| retention +| Retention time for metrics +| string +| true + +| persistent +| Used to specify the options of persistent storage when strategy = "persistent" +| <> +| true +|=== + +<> + +[#autoscalingsection] +==== AutoscalingSection + +AutoscalingSection defines the desired state of the autoscaling service + +|=== +| Field | Description | Scheme | Required + +| enabled +| Enabled - Whether OpenStack autoscaling service should be deployed and managed +| bool +| true +|=== + +<> + +[#autoscalingsectioncore] +==== AutoscalingSectionCore + +AutoscalingSectionCore defines the desired state of the autoscaling service + +|=== +| Field | Description | Scheme | Required + +| enabled +| Enabled - Whether OpenStack autoscaling service should be deployed and managed +| bool +| true +|=== + +<> + +[#ceilometersection] +==== CeilometerSection + +CeilometerSection defines the desired state of the ceilometer service + +|=== +| Field | Description | Scheme | Required + +| enabled +| Enabled - Whether OpenStack Ceilometer service should be deployed and managed +| bool +| true +|=== + +<> + +[#ceilometersectioncore] +==== CeilometerSectionCore + +CeilometerSectionCore defines the desired state of the ceilometer service + +|=== +| Field | Description | Scheme | Required + +| enabled +| Enabled - Whether OpenStack Ceilometer service should be deployed and managed +| bool +| true +|=== + +<> + +[#loggingsection] +==== LoggingSection + +LoggingSection defines the desired state of the logging service + +|=== +| Field | Description | Scheme | Required + +| enabled +| Enabled - Whether OpenStack logging service should be deployed and managed +| bool +| true +|=== + +<> + +[#metricstoragesection] +==== MetricStorageSection + +MetricStorageSection defines the desired state of the MetricStorage + +|=== +| Field | Description | Scheme | Required + +| enabled +| Enabled - Whether a MetricStorage should be deployed and managed +| bool +| true +|=== + +<> + +[#passwordsselector] +==== PasswordsSelector + +PasswordsSelector to identify the Service password from the Secret + +|=== +| Field | Description | Scheme | Required + +| ceilometerService +| CeilometerService - Selector to get the ceilometer service password from the Secret +| string +| true + +| aodhService +| AodhService - Selector to get the aodh service password from the Secret +| string +| true +|=== + +<> + +[#telemetry] +==== Telemetry + +Telemetry is the Schema for the telemetry API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#telemetrylist] +==== TelemetryList + +TelemetryList contains a list of Telemetry + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#telemetryspec] +==== TelemetrySpec + +TelemetrySpec defines the desired state of Telemetry + +|=== +| Field | Description | Scheme | Required + +| autoscaling +| Autoscaling - Parameters related to the autoscaling service +| <> +| false + +| ceilometer +| Ceilometer - Parameters related to the ceilometer service +| <> +| false +|=== + +<> + +[#telemetryspecbase] +==== TelemetrySpecBase + +TelemetrySpecBase - + +|=== +| Field | Description | Scheme | Required + +| metricStorage +| MetricStorage - Parameters related to the metricStorage +| <> +| false + +| logging +| Logging - Parameters related to the logging +| <> +| false +|=== + +<> + +[#telemetryspeccore] +==== TelemetrySpecCore + +TelemetrySpecCore defines the desired state of Telemetry. This version has no image parameters and is used by OpenStackControlplane + +|=== +| Field | Description | Scheme | Required + +| autoscaling +| Autoscaling - Parameters related to the autoscaling service +| <> +| false + +| ceilometer +| Ceilometer - Parameters related to the ceilometer service +| <> +| false +|=== + +<> + +[#telemetrystatus] +==== TelemetryStatus + +TelemetryStatus defines the observed state of Telemetry + +|=== +| Field | Description | Scheme | Required + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the openstack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#memcached] +==== Memcached + +Memcached is the Schema for the memcacheds API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#memcachedlist] +==== MemcachedList + +MemcachedList contains a list of Memcached + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#memcachedspec] +==== MemcachedSpec + +MemcachedSpec defines the desired state of Memcached + +|=== +| Field | Description | Scheme | Required + +| containerImage +| Name of the memcached container image to run (will be set to environmental default if empty) +| string +| true +|=== + +<> + +[#memcachedspeccore] +==== MemcachedSpecCore + +MemcachedSpecCore - this version is used by the OpenStackControlplane CR (no container images) + +|=== +| Field | Description | Scheme | Required + +| replicas +| Size of the memcached cluster +| *int32 +| true + +| tls +| TLS settings for memcached service +| tls.SimpleService +| false +|=== + +<> + +[#memcachedstatus] +==== MemcachedStatus + +MemcachedStatus defines the observed state of Memcached + +|=== +| Field | Description | Scheme | Required + +| hash +| Map of hashes to track input changes +| map[string]string +| false + +| readyCount +| ReadyCount of Memcached instances +| int32 +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| serverList +| ServerList - List of memcached endpoints without inet(6) prefix +| []string +| false + +| serverListWithInet +| ServerListWithInet - List of memcached endpoints with inet(6) prefix +| []string +| false + +| tlsSupport +| Whether TLS is supported by the memcached instance +| bool +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the opentack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#dnsdata] +==== DNSData + +DNSData is the Schema for the dnsdata API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#dnsdatalist] +==== DNSDataList + +DNSDataList contains a list of DNSData + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#dnsdataspec] +==== DNSDataSpec + +DNSDataSpec defines the desired state of DNSData + +|=== +| Field | Description | Scheme | Required + +| hosts +| +| []<> +| false + +| dnsDataLabelSelectorValue +| Value of the DNSDataLabelSelector to set on the created configmaps containing hosts information +| string +| true +|=== + +<> + +[#dnsdatastatus] +==== DNSDataStatus + +DNSDataStatus defines the observed state of DNSData + +|=== +| Field | Description | Scheme | Required + +| conditions +| Conditions +| condition.Conditions +| false + +| hash +| Map of the dns data configmap +| string +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the opentack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#dnshost] +==== DNSHost + +DNSHost holds the mapping between IP and hostnames that will be added to dnsmasq hosts file. + +|=== +| Field | Description | Scheme | Required + +| ip +| IP address of the host file entry. +| string +| true + +| hostnames +| Hostnames for the IP address. +| []string +| true +|=== + +<> + +[#dnsmasq] +==== DNSMasq + +DNSMasq is the Schema for the dnsmasqs API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#dnsmasqlist] +==== DNSMasqList + +DNSMasqList contains a list of DNSMasq + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#dnsmasqoption] +==== DNSMasqOption + +DNSMasqOption defines allowed options for dnsmasq + +|=== +| Field | Description | Scheme | Required + +| key +| +| string +| true + +| values +| +| []string +| true +|=== + +<> + +[#dnsmasqoverridespec] +==== DNSMasqOverrideSpec + +DNSMasqOverrideSpec to override the generated manifest of several child resources. + +|=== +| Field | Description | Scheme | Required + +| service +| Override configuration for the Service created to serve traffic to the cluster. +| *service.OverrideSpec +| false +|=== + +<> + +[#dnsmasqspec] +==== DNSMasqSpec + +DNSMasqSpec defines the desired state of DNSMasq + +|=== +| Field | Description | Scheme | Required + +| containerImage +| DNSMasq Container Image URL +| string +| true +|=== + +<> + +[#dnsmasqspeccore] +==== DNSMasqSpecCore + +DNSMasqSpecCore - this version is used by the OpenStackControlplane CR (no container images) + +|=== +| Field | Description | Scheme | Required + +| replicas +| Replicas - DNSMasq Replicas +| *int32 +| true + +| options +| Options allows to customize the dnsmasq instance +| []<> +| false + +| nodeSelector +| NodeSelector to target subset of worker nodes running this service. Setting NodeSelector here acts as a default value and can be overridden by service specific NodeSelector Settings. +| map[string]string +| false + +| dnsDataLabelSelectorValue +| Value of the DNSDataLabelSelectorKey which was set on the configmaps containing hosts information +| string +| true + +| override +| Override, provides the ability to override the generated manifest of several child resources. +| <> +| false +|=== + +<> + +[#dnsmasqstatus] +==== DNSMasqStatus + +DNSMasqStatus defines the observed state of DNSMasq + +|=== +| Field | Description | Scheme | Required + +| conditions +| Conditions +| condition.Conditions +| false + +| hash +| Map of hashes to track e.g. job status +| map[string]string +| false + +| readyCount +| ReadyCount of dnsmasq deployment +| int32 +| false + +| dnsAddresses +| DNSServer Addresses +| []string +| false + +| dnsClusterAddresses +| DNSServer Cluster Addresses +| []string +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the opentack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#ipset] +==== IPSet + +IPSet is the Schema for the ipsets API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#ipsetlist] +==== IPSetList + +IPSetList contains a list of IPSet + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#ipsetnetwork] +==== IPSetNetwork + +IPSetNetwork Type + +|=== +| Field | Description | Scheme | Required + +| name +| Network Name +| NetNameStr +| true + +| subnetName +| Subnet Name +| NetNameStr +| true + +| fixedIP +| Fixed Ip +| *string +| false + +| defaultRoute +| Use gateway from subnet as default route. There can only be one default route defined per IPSet. +| *bool +| false +|=== + +<> + +[#ipsetreservation] +==== IPSetReservation + +IPSetReservation defines reservation status per requested network + +|=== +| Field | Description | Scheme | Required + +| network +| Network name +| NetNameStr +| true + +| subnet +| Subnet name +| NetNameStr +| true + +| address +| Address contains the IP address +| string +| true + +| mtu +| MTU of the network +| int +| false + +| cidr +| Cidr the cidr to use for this network +| string +| false + +| vlan +| Vlan ID +| *int +| false + +| gateway +| Gateway optional gateway for the network +| *string +| false + +| routes +| Routes, list of networks that should be routed via network gateway. +| []<> +| false + +| dnsDomain +| DNSDomain of the subnet +| string +| true +|=== + +<> + +[#ipsetspec] +==== IPSetSpec + +IPSetSpec defines the desired state of IPSet + +|=== +| Field | Description | Scheme | Required + +| immutable +| Immutable, if `true` the validation webhook will block any update to the Spec, except of Spec.Immutable. This allows the caller to add safety mechanism to the object. If a change is required to the object, an extra update needs to be done to make updates possible. +| bool +| true + +| networks +| Networks used to request IPs for +| []<> +| true +|=== + +<> + +[#ipsetstatus] +==== IPSetStatus + +IPSetStatus defines the observed state of IPSet + +|=== +| Field | Description | Scheme | Required + +| reservations +| Reservation +| []<> +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the opentack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#allocationrange] +==== AllocationRange + +AllocationRange definition + +|=== +| Field | Description | Scheme | Required + +| start +| Start IP for the AllocationRange +| string +| true + +| end +| End IP for the AllocationRange +| string +| true +|=== + +<> + +[#netconfig] +==== NetConfig + +NetConfig is the Schema for the netconfigs API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#netconfiglist] +==== NetConfigList + +NetConfigList contains a list of NetConfig + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#netconfigspec] +==== NetConfigSpec + +NetConfigSpec defines the desired state of NetConfig + +|=== +| Field | Description | Scheme | Required + +| networks +| Networks, list of all networks of the deployment +| []<> +| true +|=== + +<> + +[#network] +==== Network + +Network definition + +|=== +| Field | Description | Scheme | Required + +| name +| Name of the network, e.g. External, InternalApi, ... +| NetNameStr +| true + +| dnsDomain +| DNSDomain name of the Network +| string +| true + +| mtu +| MTU of the network +| int +| true + +| subnets +| Subnets of the network +| []<> +| true +|=== + +<> + +[#route] +==== Route + +Route definition + +|=== +| Field | Description | Scheme | Required + +| destination +| Destination, network CIDR +| string +| true + +| nexthop +| Nexthop, gateway for the destination +| string +| true +|=== + +<> + +[#subnet] +==== Subnet + +Subnet definition + +|=== +| Field | Description | Scheme | Required + +| name +| Name of the subnet +| NetNameStr +| true + +| cidr +| Cidr the cidr to use for this network +| string +| true + +| dnsDomain +| DNSDomain name of the subnet, allows to overwrite the DNSDomain of the Network +| *string +| false + +| vlan +| Vlan ID +| *int +| false + +| allocationRanges +| AllocationRanges a list of AllocationRange for assignment. Allocation will start from first range, first address. +| []<> +| true + +| excludeAddresses +| ExcludeAddresses a set of IPs that should be excluded from used as reservation, for both dynamic and static via IPSet FixedIP parameter +| []string +| false + +| gateway +| Gateway optional gateway for the network +| *string +| false + +| routes +| Routes, list of networks that should be routed via network gateway. +| []<> +| false +|=== + +<> + +[#ipaddress] +==== IPAddress + +IPAddress - + +|=== +| Field | Description | Scheme | Required + +| network +| Network name +| NetNameStr +| true + +| subnet +| Subnet name +| NetNameStr +| true + +| address +| Address contains the IP address +| string +| true +|=== + +<> + +[#reservation] +==== Reservation + +Reservation is the Schema for the reservations API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#reservationlist] +==== ReservationList + +ReservationList contains a list of Reservation + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#reservationspec] +==== ReservationSpec + +ReservationSpec defines the desired state of Reservation + +|=== +| Field | Description | Scheme | Required + +| ipSetRef +| IPSetRef points to the IPSet object the IPs were created for. +| corev1.ObjectReference +| true + +| reservation +| Reservation, map (index network name) with reservation +| map[string]<> +| true +|=== + +<> + +[#transporturl] +==== TransportURL + +TransportURL is the Schema for the transporturls API + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#transporturllist] +==== TransportURLList + +TransportURLList contains a list of TransportURL + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#transporturlspec] +==== TransportURLSpec + +TransportURLSpec defines the desired state of TransportURL + +|=== +| Field | Description | Scheme | Required + +| rabbitmqClusterName +| RabbitmqClusterName the name of the Rabbitmq cluster which to configure the transport URL +| string +| true +|=== + +<> + +[#transporturlstatus] +==== TransportURLStatus + +TransportURLStatus defines the observed state of TransportURL + +|=== +| Field | Description | Scheme | Required + +| conditions +| Conditions +| condition.Conditions +| false + +| secretName +| SecretName - name of the secret containing the rabbitmq transport URL +| string +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes injected by the opentack-operator in the top-level CR (e.g. the ContainerImage) +| int64 +| false +|=== + +<> + +[#openstackdataplanedeployment] +==== OpenStackDataPlaneDeployment + +OpenStackDataPlaneDeployment is the Schema for the openstackdataplanedeployments API OpenStackDataPlaneDeployment name must be a valid RFC1123 as it is used in labels + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#openstackdataplanedeploymentlist] +==== OpenStackDataPlaneDeploymentList + +OpenStackDataPlaneDeploymentList contains a list of OpenStackDataPlaneDeployment + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#openstackdataplanedeploymentspec] +==== OpenStackDataPlaneDeploymentSpec + +OpenStackDataPlaneDeploymentSpec defines the desired state of OpenStackDataPlaneDeployment + +|=== +| Field | Description | Scheme | Required + +| nodeSets +| NodeSets is the list of NodeSets deployed +| []string +| true + +| backoffLimit +| BackoffLimit allows to define the maximum number of retried executions (defaults to 6). +| *int32 +| false + +| ansibleTags +| AnsibleTags for ansible execution +| string +| false + +| ansibleLimit +| AnsibleLimit for ansible execution +| string +| false + +| ansibleSkipTags +| AnsibleSkipTags for ansible execution +| string +| false + +| ansibleExtraVars +| AnsibleExtraVars for ansible execution +| map[string]json.RawMessage +| false + +| servicesOverride +| ServicesOverride list +| []string +| false + +| deploymentRequeueTime +| Time before the deployment is requeued in seconds +| int +| true +|=== + +<> + +[#openstackdataplanedeploymentstatus] +==== OpenStackDataPlaneDeploymentStatus + +OpenStackDataPlaneDeploymentStatus defines the observed state of OpenStackDataPlaneDeployment + +|=== +| Field | Description | Scheme | Required + +| nodeSetConditions +| NodeSetConditions +| map[string]condition.Conditions +| false + +| configMapHashes +| ConfigMapHashes +| map[string]string +| false + +| secretHashes +| SecretHashes +| map[string]string +| false + +| nodeSetHashes +| NodeSetHashes +| map[string]string +| false + +| containerImages +| ContainerImages +| map[string]string +| false + +| conditions +| Conditions +| condition.Conditions +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this Deployment. If the observed generation is less than the spec generation, then the controller has not processed the latest changes. +| int64 +| false + +| deployedVersion +| DeployedVersion +| string +| false + +| deployed +| Deployed +| bool +| false +|=== + +<> + +[#openstackdataplanenodeset] +==== OpenStackDataPlaneNodeSet + +OpenStackDataPlaneNodeSet is the Schema for the openstackdataplanenodesets API OpenStackDataPlaneNodeSet name must be a valid RFC1123 as it is used in labels + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#openstackdataplanenodesetlist] +==== OpenStackDataPlaneNodeSetList + +OpenStackDataPlaneNodeSetList contains a list of OpenStackDataPlaneNodeSets + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#openstackdataplanenodesetspec] +==== OpenStackDataPlaneNodeSetSpec + +OpenStackDataPlaneNodeSetSpec defines the desired state of OpenStackDataPlaneNodeSet + +|=== +| Field | Description | Scheme | Required + +| baremetalSetTemplate +| BaremetalSetTemplate Template for BaremetalSet for the NodeSet +| baremetalv1.OpenStackBaremetalSetSpec +| false + +| nodeTemplate +| NodeTemplate - node attributes specific to nodes defined by this resource. These attributes can be overriden at the individual node level, else take their defaults from valus in this section. +| <> +| true + +| nodes +| Nodes - Map of Node Names and node specific data. Values here override defaults in the upper level section. +| map[string]<> +| true + +| env +| Env is a list containing the environment variables to pass to the pod Variables modifying behavior of AnsibleEE can be specified here. +| []corev1.EnvVar +| false + +| networkAttachments +| NetworkAttachments is a list of NetworkAttachment resource names to pass to the ansibleee resource which allows to connect the ansibleee runner to the given network +| []string +| false + +| services +| Services list +| []string +| true + +| tags +| Tags - Additional tags for NodeSet +| []string +| false + +| secretMaxSize +| SecretMaxSize - Maximum size in bytes of a Kubernetes secret. This size is currently situated around 1 MiB (nearly 1 MB). +| int +| true + +| preProvisioned +| \n\nPreProvisioned - Set to true if the nodes have been Pre Provisioned. +| bool +| false + +| tlsEnabled +| TLSEnabled - Whether the node set has TLS enabled. +| bool +| true +|=== + +<> + +[#openstackdataplanenodesetstatus] +==== OpenStackDataPlaneNodeSetStatus + +OpenStackDataPlaneNodeSetStatus defines the observed state of OpenStackDataPlaneNodeSet + +|=== +| Field | Description | Scheme | Required + +| conditions +| Conditions +| condition.Conditions +| false + +| deploymentStatuses +| DeploymentStatuses +| map[string]condition.Conditions +| false + +| allHostnames +| AllHostnames +| map[string]map[infranetworkv1.NetNameStr]string +| false + +| allIPs +| AllIPs +| map[string]map[infranetworkv1.NetNameStr]string +| false + +| configMapHashes +| ConfigMapHashes +| map[string]string +| false + +| secretHashes +| SecretHashes +| map[string]string +| false + +| dnsClusterAddresses +| DNSClusterAddresses +| []string +| false + +| containerImages +| ContainerImages +| map[string]string +| false + +| ctlplaneSearchDomain +| CtlplaneSearchDomain +| string +| false + +| configHash +| ConfigHash - holds the curret hash of the NodeTemplate and Node sections of the struct. This hash is used to determine when new Ansible executions are required to roll out config changes. +| string +| false + +| deployedConfigHash +| DeployedConfigHash - holds the hash of the NodeTemplate and Node sections of the struct that was last deployed. This hash is used to determine when new Ansible executions are required to roll out config changes. +| string +| false + +| observedGeneration +| ObservedGeneration - the most recent generation observed for this NodeSet. If the observed generation is less than the spec generation, then the controller has not processed the latest changes. +| int64 +| false + +| deployedVersion +| DeployedVersion +| string +| false +|=== + +<> + +[#openstackdataplaneservice] +==== OpenStackDataPlaneService + +OpenStackDataPlaneService is the Schema for the openstackdataplaneservices API OpenStackDataPlaneService name must be a valid RFC1123 as it is used in labels + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ObjectMeta +| false + +| spec +| +| <> +| false + +| status +| +| <> +| false +|=== + +<> + +[#openstackdataplaneservicelist] +==== OpenStackDataPlaneServiceList + +OpenStackDataPlaneServiceList contains a list of OpenStackDataPlaneService + +|=== +| Field | Description | Scheme | Required + +| metadata +| +| metav1.ListMeta +| false + +| items +| +| []<> +| true +|=== + +<> + +[#openstackdataplaneservicespec] +==== OpenStackDataPlaneServiceSpec + +OpenStackDataPlaneServiceSpec defines the desired state of OpenStackDataPlaneService + +|=== +| Field | Description | Scheme | Required + +| configMaps +| ConfigMaps list of ConfigMap names to mount as ExtraMounts for the OpenStackAnsibleEE +| []string +| false + +| secrets +| Secrets list of Secret names to mount as ExtraMounts for the OpenStackAnsibleEE +| []string +| false + +| dataSources +| DataSources list of DataSource objects to mount as ExtraMounts for the OpenStackAnsibleEE +| []<> +| false + +| tlsCerts +| TLSCerts tls certs to be generated +| map[string]<> +| false + +| playbookContents +| PlaybookContents is an inline playbook contents that ansible will run on execution. +| string +| false + +| playbook +| Playbook is a path to the playbook that ansible will run on this execution +| string +| false + +| caCerts +| CACerts - Secret containing the CA certificate chain +| string +| false + +| openStackAnsibleEERunnerImage +| OpenStackAnsibleEERunnerImage image to use as the ansibleEE runner image +| string +| false + +| certsFrom +| CertsFrom - Service name used to obtain TLSCert and CACerts data. If both CertsFrom and either TLSCert or CACerts is set, then those fields take precedence. +| string +| false + +| addCertMounts +| AddCertMounts - Whether to add cert mounts +| bool +| true + +| deployOnAllNodeSets +| DeployOnAllNodeSets - should the service be deploy across all nodesets This will override default target of a service play, setting it to 'all'. +| bool +| false + +| containerImageFields +| ContainerImageFields - list of container image fields names that this service deploys. The field names should match the ContainerImages struct field names from github.com/openstack-k8s-operators/openstack-operator/apis/core/v1beta1 +| []string +| false + +| edpmServiceType +| EDPMServiceType - service type, which typically corresponds to one of the default service names (such as nova, ovn, etc). Also typically corresponds to the ansible role name (without the "edpm_" prefix) used to manage the service. If not set, will default to the OpenStackDataPlaneService name. +| string +| false +|=== + +<> + +[#openstackdataplaneservicestatus] +==== OpenStackDataPlaneServiceStatus + +OpenStackDataPlaneServiceStatus defines the observed state of OpenStackDataPlaneService + +|=== +| Field | Description | Scheme | Required + +| conditions +| Conditions +| condition.Conditions +| false +|=== + +<> + +[#openstackdataplaneservicecert] +==== OpenstackDataPlaneServiceCert + +OpenstackDataPlaneServiceCert defines the property of a TLS cert issued for a dataplane service + +|=== +| Field | Description | Scheme | Required + +| contents +| Contents of the certificate This is a list of strings for properties that are needed in the cert +| []string +| true + +| networks +| Networks to include in SNI for the cert +| []infranetworkv1.NetNameStr +| false + +| issuer +| Issuer is the label for the issuer to issue the cert Only one issuer should have this label +| string +| false + +| keyUsages +| KeyUsages to be added to the issued cert +| []certmgrv1.KeyUsage +| false + +| edpmRoleServiceName +| EDPMRoleServiceName is the value of the ++++++_service_name variable from the edpm-ansible role where this certificate is used. For example if the certificate is for edpm_ovn from edpm-ansible, EDPMRoleServiceName must be ovn, which matches the edpm_ovn_service_name variable from the role. If not set, OpenStackDataPlaneService.Spec.EDPMServiceType is used. If OpenStackDataPlaneService.Spec.EDPMServiceType is not set, then OpenStackDataPlaneService.Name is used.++++++ +| string +| false +|=== + +<> + +[#ansibleeespec] +==== AnsibleEESpec + +AnsibleEESpec is a specification of the ansible EE attributes + +|=== +| Field | Description | Scheme | Required + +| extraMounts +| ExtraMounts containing files which can be mounted into an Ansible Execution Pod +| []storage.VolMounts +| false + +| env +| Env is a list containing the environment variables to pass to the pod +| []corev1.EnvVar +| false + +| extraVars +| ExtraVars for ansible execution +| map[string]json.RawMessage +| false + +| dnsConfig +| DNSConfig for setting dnsservers +| *corev1.PodDNSConfig +| false + +| networkAttachments +| NetworkAttachments is a list of NetworkAttachment resource names to pass to the ansibleee resource which allows to connect the ansibleee runner to the given network +| []string +| true + +| openStackAnsibleEERunnerImage +| OpenStackAnsibleEERunnerImage image to use as the ansibleEE runner image +| string +| false + +| ansibleTags +| AnsibleTags for ansible execution +| string +| false + +| ansibleLimit +| AnsibleLimit for ansible execution +| string +| false + +| ansibleSkipTags +| AnsibleSkipTags for ansible execution +| string +| false + +| ServiceAccountName +| ServiceAccountName allows to specify what ServiceAccountName do we want the ansible execution run with. Without specifying, it will run with default serviceaccount +| string +| false +|=== + +<> + +[#ansibleopts] +==== AnsibleOpts + +AnsibleOpts defines a logical grouping of Ansible related configuration options. + +|=== +| Field | Description | Scheme | Required + +| ansibleUser +| AnsibleUser SSH user for Ansible connection +| string +| true + +| ansibleHost +| AnsibleHost SSH host for Ansible connection +| string +| false + +| ansibleVars +| AnsibleVars for configuring ansible +| map[string]json.RawMessage +| false + +| ansibleVarsFrom +| AnsibleVarsFrom is a list of sources to populate ansible variables from. Values defined by an AnsibleVars with a duplicate key take precedence. +| []<> +| false + +| ansiblePort +| AnsiblePort SSH port for Ansible connection +| int +| false +|=== + +<> + +[#configmapenvsource] +==== ConfigMapEnvSource + +ConfigMapEnvSource selects a ConfigMap to populate the environment variables with.\n\nThe contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables. + +|=== +| Field | Description | Scheme | Required + +| optional +| Specify whether the ConfigMap must be defined +| *bool +| false +|=== + +<> + +[#datasource] +==== DataSource + +DataSource represents the source of a set of ConfigMaps/Secrets + +|=== +| Field | Description | Scheme | Required + +| prefix +| An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. +| string +| false + +| configMapRef +| The ConfigMap to select from +| *<> +| false + +| secretRef +| The Secret to select from +| *<> +| false +|=== + +<> + +[#localobjectreference] +==== LocalObjectReference + +LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. + +|=== +| Field | Description | Scheme | Required + +| name +| Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names +| string +| false +|=== + +<> + +[#nodesection] +==== NodeSection + +NodeSection defines the top level attributes inherited by nodes in the CR. + +|=== +| Field | Description | Scheme | Required + +| extraMounts +| ExtraMounts containing files which can be mounted into an Ansible Execution Pod +| []storage.VolMounts +| false + +| networks +| Networks - Instance networks +| []infranetworkv1.IPSetNetwork +| false + +| userData +| UserData node specific user-data +| *corev1.SecretReference +| false + +| networkData +| NetworkData node specific network-data +| *corev1.SecretReference +| false + +| ansible +| Ansible is the group of Ansible related configuration options. +| <> +| false + +| hostName +| HostName - node name +| string +| false + +| managementNetwork +| ManagementNetwork - Name of network to use for management (SSH/Ansible) +| string +| false + +| preprovisioningNetworkDataName +| PreprovisioningNetworkDataName - NetworkData secret name in the local namespace for pre-provisioing +| string +| false +|=== + +<> + +[#nodetemplate] +==== NodeTemplate + +NodeTemplate is a specification of the node attributes that override top level attributes. + +|=== +| Field | Description | Scheme | Required + +| extraMounts +| ExtraMounts containing files which can be mounted into an Ansible Execution Pod +| []storage.VolMounts +| false + +| networks +| Networks - Instance networks +| []infranetworkv1.IPSetNetwork +| false + +| userData +| UserData node specific user-data +| *corev1.SecretReference +| false + +| networkData +| NetworkData node specific network-data +| *corev1.SecretReference +| false + +| ansibleSSHPrivateKeySecret +| AnsibleSSHPrivateKeySecret Name of a private SSH key secret containing private SSH key for connecting to node. The named secret must be of the form: Secret.data.ssh-privatekey: ++++++++++++ +| string +| true + +| managementNetwork +| ManagementNetwork - Name of network to use for management (SSH/Ansible) +| string +| true + +| ansible +| Ansible is the group of Ansible related configuration options. +| <> +| false +|=== + +<> + +[#secretenvsource] +==== SecretEnvSource + +SecretEnvSource selects a Secret to populate the environment variables with.\n\nThe contents of the target Secret's Data field will represent the key-value pairs as environment variables. + +|=== +| Field | Description | Scheme | Required + +| optional +| Specify whether the Secret must be defined +| *bool +| false +|=== + +<> diff --git a/docs/build_docs.sh b/docs/build_docs.sh new file mode 100755 index 000000000..5b9ee99ff --- /dev/null +++ b/docs/build_docs.sh @@ -0,0 +1,47 @@ +#!/usr/bin/env bash +set -ex pipefail + +FILES=() +PATHS=( + "apis/client/v1beta1/openstackclient_types.go" + "apis/core/v1beta1/openstackcontrolplane_types.go" + "apis/core/v1beta1/openstackversion_types.go" +) +DATAPLANE_PATHS=( + "apis/dataplane/v1beta1/openstackdataplanedeployment_types.go" + "apis/dataplane/v1beta1/openstackdataplanenodeset_types.go" + "apis/dataplane/v1beta1/openstackdataplaneservice_types.go" + "apis/dataplane/v1beta1/common.go" +) + +# Getting APIs from Services +SERVICE_PATH=($(MODCACHE=$(go env GOMODCACHE) awk '/openstack-k8s-operators/ && ! /lib-common/ && ! /openstack-operator/ && ! /infra/ && ! /replace/ {print ENVIRON["MODCACHE"] "/" $1 "@" $2 "/v1beta1/*_types.go"}' apis/go.mod)) +for SERVICE in ${SERVICE_PATH[@]};do + PATHS+=($(ls ${SERVICE})) +done + +# Getting APIs from Infra +INFRA_PATH=($(MODCACHE=$(go env GOMODCACHE) awk '/openstack-k8s-operators/ && /infra/ {print ENVIRON["MODCACHE"] "/" $1 "@" $2 "/"}' apis/go.mod)) +PATTERNS=("memcached/v1beta1/*_types.go" "network/v1beta1/*_types.go" "rabbitmq/v1beta1/*_types.go") +for INFRA in ${PATTERNS[@]};do + ls ${INFRA_PATH}${INFRA} + PATHS+=($(ls ${INFRA_PATH}${INFRA})) +done + +# Adding -f to all API paths +for API_PATH in ${PATHS[@]};do + FILES+=$(echo " -f $API_PATH") +done +for API_PATH in ${DATAPLANE_PATHS[@]};do + FILES+=$(echo " -f $API_PATH") +done + +# Build docs from APIs +${CRD_MARKDOWN} $FILES -n OpenStackClient -n OpenStackControlPlane -n OpenStackVersion -n OpenStackDataPlaneDeployment -n OpenStackDataPlaneNodeSet -n OpenStackDataPlaneService > docs/assemblies/custom_resources.md +bundle exec kramdoc --auto-ids docs/assemblies/custom_resources.md && rm docs/assemblies/custom_resources.md +sed -i "s/=== Custom/== Custom/g" docs/assemblies/custom_resources.adoc + +# Render HTML +cd docs +${MAKE} html BUILD=upstream +${MAKE} html BUILD=downstream diff --git a/docs/images/.gitkeep b/docs/images/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/docs/user.adoc b/docs/user.adoc new file mode 100644 index 000000000..8af9cac8e --- /dev/null +++ b/docs/user.adoc @@ -0,0 +1,137 @@ += OpenStack Operator +:toc: left +:toclevels: 3 +:icons: font +:compat-mode: +:doctype: book +:context: osp + +This is the primary operator for OpenStack. It is a "meta" operator, meaning it +serves to coordinate the other operators for OpenStack by watching and configuring +their CustomResources (CRs). Additionally installing this operator will automatically +install all required operator dependencies for installing/managing OpenStack. + +== Description + +This project is built, modeled, and maintained with https://github.com/operator-framework/operator-sdk[operator-sdk]. + +== Getting Started + +You'll need a Kubernetes cluster to run against. You can use https://sigs.k8s.io/kind[KIND] to get a local cluster for testing, or run against a remote cluster. +*Note:* Your controller will automatically use the current context in your kubeconfig file (i.e. whatever cluster `kubectl cluster-info` shows). + +=== Running on the cluster + +. Install Instances of Custom Resources: + +[,sh] +---- +kubectl apply -f config/samples/ +---- + +. Build and push your image to the location specified by `IMG`: + +[,sh] +---- +make docker-build docker-push IMG=/openstack-operator:tag +---- + +. Deploy the controller to the cluster with the image specified by `IMG`: + +[,sh] +---- +make deploy IMG=/openstack-operator:tag +---- + +=== Uninstall CRDs + +To delete the CRDs from the cluster: + +[,sh] +---- +make uninstall +---- + +=== Undeploy controller + +UnDeploy the controller to the cluster: + +[,sh] +---- +make undeploy +---- + +=== Building your own bundle, index images + +The OpenStack operator uses multiple bundles to minimize the number of +deployment artifacts we have in the OLM catalog while also providing enough +space for our CRs (this is a big project). As such the build order for local +bundles is a bit different than normal. + +. Run make:bundle. This pins down dependencies to version used in the go.mod and + and also string replaces the URL for any dependant bundles (storage, etc) that + we will build below. Additionally a dependency.yaml is added to the generated bundle + so that we require any dependencies. This sets the stage for everything below. + +[,sh] +---- +make bundle +---- + +. Run dep-bundle-build-push. This creates any _dependency_ bundles required by this project. +It builds and pushes them to a registry as this is required to be able to build the main +bundle. + +[,sh] +---- +make dep-bundle-build-push +---- + +. Run bundle-build. This will execute podman to build the custom-bundle.Dockerfile. + +[,sh] +---- +make bundle-build +---- + +. Run bundle-push. This pushes the resulting bundle image to the registry. + +[,sh] +---- +make bundle-push +---- + +. Run catalog-build. At this point you can generate your index image so that it contains both of the above bundle images. Because we use dependencies in the openstack-operator's main bundle it will + automatically install the CSV contained in the dependant (storage, etc) bundle. + +[,sh] +---- +make catalog-build +---- + +. Run catalog-push. Push the catalog to your registry. + +[,sh] +---- +make catalog-push +---- + +=== Uninstall CRDs + +To delete the CRDs from the cluster: + +[,sh] +---- +make uninstall +---- + +=== Undeploy controller + +UnDeploy the controller to the cluster: + +[,sh] +---- +make undeploy +---- + +include::assemblies/custom_resources.adoc[leveloffset=-1] diff --git a/go.mod b/go.mod index 48bcc3138..b2b7dcec0 100644 --- a/go.mod +++ b/go.mod @@ -7,14 +7,15 @@ require ( github.com/cert-manager/cert-manager v1.13.6 github.com/ghodss/yaml v1.0.0 github.com/go-logr/logr v1.4.2 + github.com/go-playground/validator/v10 v10.21.0 github.com/google/uuid v1.6.0 + github.com/iancoleman/strcase v0.3.0 github.com/imdario/mergo v0.3.16 github.com/onsi/ginkgo/v2 v2.19.0 github.com/onsi/gomega v1.33.1 github.com/openshift/api v3.9.0+incompatible github.com/openstack-k8s-operators/barbican-operator/api v0.0.0-20240603141403-1ad477d065a2 github.com/openstack-k8s-operators/cinder-operator/api v0.3.1-0.20240610101558-1ab6bba3433c - github.com/openstack-k8s-operators/dataplane-operator/api v0.3.1-0.20240608024258-319ec0e5a28b github.com/openstack-k8s-operators/designate-operator/api v0.0.0-20240604124031-77b21b330d86 github.com/openstack-k8s-operators/glance-operator/api v0.3.1-0.20240610071145-9fc8ef9f3c7c github.com/openstack-k8s-operators/heat-operator/api v0.3.1-0.20240610020058-4a722aada8e9 @@ -22,8 +23,10 @@ require ( github.com/openstack-k8s-operators/infra-operator/apis v0.3.1-0.20240604144138-996e41d1af19 github.com/openstack-k8s-operators/ironic-operator/api v0.3.1-0.20240607124904-2f2e4f3dd090 github.com/openstack-k8s-operators/keystone-operator/api v0.3.1-0.20240605055850-8ee0ece70906 + github.com/openstack-k8s-operators/lib-common/modules/ansible v0.3.1-0.20240606071226-62abb00585ce github.com/openstack-k8s-operators/lib-common/modules/certmanager v0.0.0-20240606071226-62abb00585ce github.com/openstack-k8s-operators/lib-common/modules/common v0.3.1-0.20240606071226-62abb00585ce + github.com/openstack-k8s-operators/lib-common/modules/storage v0.3.1-0.20240606071226-62abb00585ce github.com/openstack-k8s-operators/lib-common/modules/test v0.3.1-0.20240606071226-62abb00585ce github.com/openstack-k8s-operators/manila-operator/api v0.3.1-0.20240610185533-3683da06b8eb github.com/openstack-k8s-operators/mariadb-operator/api v0.3.1-0.20240604125710-954ab886bb52 @@ -42,6 +45,7 @@ require ( github.com/rabbitmq/cluster-operator/v2 v2.6.0 go.uber.org/zap v1.27.0 golang.org/x/exp v0.0.0-20240409090435-93d18d7e34b8 + gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.28.10 k8s.io/apimachinery v0.28.10 k8s.io/client-go v0.28.10 @@ -63,7 +67,6 @@ require ( github.com/go-openapi/swag v0.22.9 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect - github.com/go-playground/validator/v10 v10.21.0 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect @@ -84,7 +87,6 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/openstack-k8s-operators/lib-common/modules/openstack v0.3.1-0.20240606071226-62abb00585ce // indirect - github.com/openstack-k8s-operators/lib-common/modules/storage v0.3.1-0.20240606071226-62abb00585ce // indirect github.com/pkg/errors v0.9.1 // indirect github.com/prometheus/client_golang v1.18.0 // indirect github.com/prometheus/client_model v0.5.0 // indirect @@ -110,7 +112,6 @@ require ( google.golang.org/protobuf v1.33.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apiextensions-apiserver v0.28.10 // indirect k8s.io/component-base v0.28.10 // indirect k8s.io/klog/v2 v2.120.1 // indirect diff --git a/go.sum b/go.sum index ada94b45d..ef6c27c15 100644 --- a/go.sum +++ b/go.sum @@ -63,6 +63,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gophercloud/gophercloud v1.12.0 h1:Jrz16vPAL93l80q16fp8NplrTCp93y7rZh2P3Q4Yq7g= github.com/gophercloud/gophercloud v1.12.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= +github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -100,8 +102,6 @@ github.com/openstack-k8s-operators/barbican-operator/api v0.0.0-20240603141403-1 github.com/openstack-k8s-operators/barbican-operator/api v0.0.0-20240603141403-1ad477d065a2/go.mod h1:FBMttmFnsXleh/Gohxvgaz5c5HdF7Gsc6/ySC9TU770= github.com/openstack-k8s-operators/cinder-operator/api v0.3.1-0.20240610101558-1ab6bba3433c h1:SaPzqQ3alJfoQunjFZrxJWFgfO9v3MWqSkn/2NoSp+M= github.com/openstack-k8s-operators/cinder-operator/api v0.3.1-0.20240610101558-1ab6bba3433c/go.mod h1:8Wn6ZAPaJshxozJVPI7uq4qrcUXZmECGAPJK7Ed+uGQ= -github.com/openstack-k8s-operators/dataplane-operator/api v0.3.1-0.20240608024258-319ec0e5a28b h1:VRAXcHzieo0mYoXLwkZD1nrfqXyk0a/ok085KKiJDo0= -github.com/openstack-k8s-operators/dataplane-operator/api v0.3.1-0.20240608024258-319ec0e5a28b/go.mod h1:ESTcEmtCGEvlrMPzdbNDeIUozxfKA4zEqhYt/rxJxmE= github.com/openstack-k8s-operators/designate-operator/api v0.0.0-20240604124031-77b21b330d86 h1:pE/BD9Qg5A2CFQHiKJfqZ8Os7obIoTsy8UYNI3sDblc= github.com/openstack-k8s-operators/designate-operator/api v0.0.0-20240604124031-77b21b330d86/go.mod h1:u53p2KRT083miTWA5oQlU4zITB4FXJOzI/eao51CkbE= github.com/openstack-k8s-operators/glance-operator/api v0.3.1-0.20240610071145-9fc8ef9f3c7c h1:MvciJWQenAmUjAL5p3TkSlgZ45VUc0nzCWJpW+2VLME= @@ -116,6 +116,8 @@ github.com/openstack-k8s-operators/ironic-operator/api v0.3.1-0.20240607124904-2 github.com/openstack-k8s-operators/ironic-operator/api v0.3.1-0.20240607124904-2f2e4f3dd090/go.mod h1:Xq0M+QuVsTmoyQFcSn0Kyp2Wj/7nNJfB2jydUt/Q/LU= github.com/openstack-k8s-operators/keystone-operator/api v0.3.1-0.20240605055850-8ee0ece70906 h1:sq7CD7w44uAs+5Yd5wX1TgZR2vyMKY3YN6iddCJwz8M= github.com/openstack-k8s-operators/keystone-operator/api v0.3.1-0.20240605055850-8ee0ece70906/go.mod h1:GWJdAtjPUEwNFbRKwlXkeTHMiGCqySE4qJo0fKZSayo= +github.com/openstack-k8s-operators/lib-common/modules/ansible v0.3.1-0.20240606071226-62abb00585ce h1:WPkI7OrKcojZQiWB2vflbG6wPTJ4dKSg9u7XEBK3nFs= +github.com/openstack-k8s-operators/lib-common/modules/ansible v0.3.1-0.20240606071226-62abb00585ce/go.mod h1:tP+nxk95PisCKJaXE/an2igG9lluxuOVhdmV9WtkR2s= github.com/openstack-k8s-operators/lib-common/modules/certmanager v0.0.0-20240606071226-62abb00585ce h1:3ufbd5r8M1B2d94cHml3g+0gjFa41QQG7Ra5yuO67eI= github.com/openstack-k8s-operators/lib-common/modules/certmanager v0.0.0-20240606071226-62abb00585ce/go.mod h1:f7idGLPDLGerFTLB1W8R+eb68GjI3105VoqtzKH77LI= github.com/openstack-k8s-operators/lib-common/modules/common v0.3.1-0.20240606071226-62abb00585ce h1:3+BwULpV9ooBYtZ3CVnOXsO40gu/w76a8tqvM45qLYk= diff --git a/hack/bundle-cache-data.sh b/hack/bundle-cache-data.sh index fc4495110..ec06440c9 100755 --- a/hack/bundle-cache-data.sh +++ b/hack/bundle-cache-data.sh @@ -5,14 +5,6 @@ # -dataplane-operator bundle is cached (in order to merge at build time) set -ex -function extract_bundle { - local IN_DIR=$1 - local OUT_DIR=$2 - for X in $(file ${IN_DIR}/* | grep gzip | cut -f 1 -d ':'); do - tar xvf $X -C ${OUT_DIR}/; - done -} - function extract_csv { local IN_DIR=$1 local OUT_DIR=$2 @@ -34,11 +26,7 @@ mkdir -p "$OUT_BUNDLE" for BUNDLE in $(hack/pin-bundle-images.sh | tr "," " "); do skopeo copy "docker://$BUNDLE" dir:${EXTRACT_DIR}/tmp; - if echo $BUNDLE | grep dataplane-operator &> /dev/null; then - extract_bundle "${EXTRACT_DIR}/tmp" "${OUT_BUNDLE}/" - else - extract_csv "${EXTRACT_DIR}/tmp" "${EXTRACT_DIR}/csvs" - fi + extract_csv "${EXTRACT_DIR}/tmp" "${EXTRACT_DIR}/csvs" done # Extract the ENV vars from all the CSVs diff --git a/hack/clean_local_webhook.sh b/hack/clean_local_webhook.sh index 90d1a69a4..c083a42a7 100755 --- a/hack/clean_local_webhook.sh +++ b/hack/clean_local_webhook.sh @@ -5,3 +5,9 @@ oc delete validatingwebhookconfiguration vopenstackcontrolplane.kb.io --ignore-n oc delete mutatingwebhookconfiguration mopenstackcontrolplane.kb.io --ignore-not-found oc delete validatingwebhookconfiguration/vopenstackclient.kb.io --ignore-not-found oc delete mutatingwebhookconfiguration/mopenstackclient.kb.io --ignore-not-found +oc delete validatingwebhookconfiguration/vopenstackdataplanenodeset.kb.io --ignore-not-found +oc delete validatingwebhookconfiguration/vopenstackdataplanedeployment.kb.io --ignore-not-found +oc delete validatingwebhookconfiguration/vopenstackdataplaneservice.kb.io --ignore-not-found +oc delete mutatingwebhookconfiguration/mopenstackdataplanenodeset.kb.io --ignore-not-found +oc delete mutatingwebhookconfiguration/mopenstackdataplaneservice.kb.io --ignore-not-found +oc delete mutatingwebhookconfiguration/mopenstackdataplanedeployment.kb.io --ignore-not-found diff --git a/hack/configure_local_webhook.sh b/hack/configure_local_webhook.sh index 1ec23d127..cd23bd8e2 100755 --- a/hack/configure_local_webhook.sh +++ b/hack/configure_local_webhook.sh @@ -140,6 +140,174 @@ webhooks: scope: '*' sideEffects: None timeoutSeconds: 10 +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: vopenstackdataplanenodeset.kb.io +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + caBundle: ${CA_BUNDLE} + url: https://${CRC_IP}:9443/validate-dataplane-openstack-org-v1beta1-openstackdataplanenodeset + failurePolicy: Fail + matchPolicy: Equivalent + name: vopenstackdataplanenodeset.kb.io + objectSelector: {} + rules: + - apiGroups: + - dataplane.openstack.org + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - openstackdataplanenodesets + scope: '*' + sideEffects: None + timeoutSeconds: 10 +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: mopenstackdataplanenodeset.kb.io +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + caBundle: ${CA_BUNDLE} + url: https://${CRC_IP}:9443/mutate-dataplane-openstack-org-v1beta1-openstackdataplanenodeset + failurePolicy: Fail + matchPolicy: Equivalent + name: mopenstackdataplanenodeset.kb.io + objectSelector: {} + rules: + - apiGroups: + - dataplane.openstack.org + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - openstackdataplanenodesets + scope: '*' + sideEffects: None + timeoutSeconds: 10 +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: vopenstackdataplanedeployment.kb.io +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + caBundle: ${CA_BUNDLE} + url: https://${CRC_IP}:9443/validate-dataplane-openstack-org-v1beta1-openstackdataplanedeployment + failurePolicy: Fail + matchPolicy: Equivalent + name: vopenstackdataplanedeployment.kb.io + objectSelector: {} + rules: + - apiGroups: + - dataplane.openstack.org + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - openstackdataplanedeployments + scope: '*' + sideEffects: None + timeoutSeconds: 10 +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: mopenstackdataplanedeployment.kb.io +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + caBundle: ${CA_BUNDLE} + url: https://${CRC_IP}:9443/mutate-dataplane-openstack-org-v1beta1-openstackdataplanedeployment + failurePolicy: Fail + matchPolicy: Equivalent + name: mopenstackdataplanedeployment.kb.io + objectSelector: {} + rules: + - apiGroups: + - dataplane.openstack.org + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - openstackdataplanedeployments + scope: '*' + sideEffects: None + timeoutSeconds: 10 +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: vopenstackdataplaneservice.kb.io +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + caBundle: ${CA_BUNDLE} + url: https://${CRC_IP}:9443/validate-dataplane-openstack-org-v1beta1-openstackdataplaneservice + failurePolicy: Fail + matchPolicy: Equivalent + name: vopenstackdataplaneservice.kb.io + objectSelector: {} + rules: + - apiGroups: + - dataplane.openstack.org + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - openstackdataplaneservices + scope: '*' + sideEffects: None + timeoutSeconds: 10 +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: mopenstackdataplaneservice.kb.io +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + caBundle: ${CA_BUNDLE} + url: https://${CRC_IP}:9443/mutate-dataplane-openstack-org-v1beta1-openstackdataplaneservice + failurePolicy: Fail + matchPolicy: Equivalent + name: mopenstackdataplaneservice.kb.io + objectSelector: {} + rules: + - apiGroups: + - dataplane.openstack.org + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - openstackdataplaneservices + scope: '*' + sideEffects: None + timeoutSeconds: 10 EOF_CAT oc apply -n openstack -f ${TMPDIR}/patch_webhook_configurations.yaml diff --git a/kuttl-test.yaml b/kuttl-test.yaml index 771ee66a7..7a42f84a7 100644 --- a/kuttl-test.yaml +++ b/kuttl-test.yaml @@ -16,6 +16,22 @@ apiVersion: kuttl.dev/v1beta1 kind: TestSuite +commands: + - command: oc apply -n openstack-kuttl-tests -f https://raw.githubusercontent.com/openstack-k8s-operators/infra-operator/main/config/samples/network_v1beta1_netconfig.yaml + - command: oc apply -n openstack-kuttl-tests -f https://raw.githubusercontent.com/openstack-k8s-operators/infra-operator/main/config/samples/network_v1beta1_dnsmasq.yaml + - script: | + if [ ! -f ansibleee-ssh-key-id_rsa ]; then + ssh-keygen -f ansibleee-ssh-key-id_rsa -N "" -t rsa -b 4096 + fi + oc create secret generic dataplane-ansible-ssh-private-key-secret \ + --save-config \ + --dry-run=client \ + --from-file=authorized_keys=ansibleee-ssh-key-id_rsa.pub \ + --from-file=ssh-privatekey=ansibleee-ssh-key-id_rsa \ + --from-file=ssh-publickey=ansibleee-ssh-key-id_rsa.pub \ + -n openstack-kuttl-tests \ + -o yaml | \ + oc apply -f - reportFormat: JSON reportName: kuttl-test-openstack namespace: openstack-kuttl-tests diff --git a/main.go b/main.go index 859c0f812..a3baada80 100644 --- a/main.go +++ b/main.go @@ -32,7 +32,6 @@ import ( certmgrv1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" barbicanv1 "github.com/openstack-k8s-operators/barbican-operator/api/v1beta1" cinderv1 "github.com/openstack-k8s-operators/cinder-operator/api/v1beta1" - dataplanev1beta1 "github.com/openstack-k8s-operators/dataplane-operator/api/v1beta1" designatev1 "github.com/openstack-k8s-operators/designate-operator/api/v1beta1" glancev1 "github.com/openstack-k8s-operators/glance-operator/api/v1beta1" heatv1 "github.com/openstack-k8s-operators/heat-operator/api/v1beta1" @@ -74,9 +73,11 @@ import ( clientv1 "github.com/openstack-k8s-operators/openstack-operator/apis/client/v1beta1" corev1 "github.com/openstack-k8s-operators/openstack-operator/apis/core/v1beta1" + dataplanev1 "github.com/openstack-k8s-operators/openstack-operator/apis/dataplane/v1beta1" clientcontrollers "github.com/openstack-k8s-operators/openstack-operator/controllers/client" corecontrollers "github.com/openstack-k8s-operators/openstack-operator/controllers/core" + dataplanecontrollers "github.com/openstack-k8s-operators/openstack-operator/controllers/dataplane" "github.com/openstack-k8s-operators/openstack-operator/pkg/openstack" //+kubebuilder:scaffold:imports ) @@ -89,6 +90,7 @@ var ( func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(corev1.AddToScheme(scheme)) + utilruntime.Must(dataplanev1.AddToScheme(scheme)) utilruntime.Must(keystonev1.AddToScheme(scheme)) utilruntime.Must(mariadbv1.AddToScheme(scheme)) utilruntime.Must(memcachedv1.AddToScheme(scheme)) @@ -104,7 +106,6 @@ func init() { utilruntime.Must(neutronv1.AddToScheme(scheme)) utilruntime.Must(octaviav1.AddToScheme(scheme)) utilruntime.Must(designatev1.AddToScheme(scheme)) - utilruntime.Must(dataplanev1beta1.AddToScheme(scheme)) utilruntime.Must(ansibleeev1.AddToScheme(scheme)) utilruntime.Must(rabbitmqv1.AddToScheme(scheme)) utilruntime.Must(manilav1.AddToScheme(scheme)) @@ -217,6 +218,24 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "OpenStackVersion") os.Exit(1) } + + if err = (&dataplanecontrollers.OpenStackDataPlaneNodeSetReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Kclient: kclient, + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "OpenStackDataPlaneNodeSet") + os.Exit(1) + } + + if err = (&dataplanecontrollers.OpenStackDataPlaneDeploymentReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Kclient: kclient, + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "OpenStackDataPlaneDeployment") + os.Exit(1) + } corecontrollers.SetupVersionDefaults() // Defaults for service operators @@ -225,6 +244,9 @@ func main() { // Defaults for OpenStackClient clientv1.SetupDefaults() + // Defaults for Dataplane + dataplanev1.SetupDefaults() + // Defaults for anything else that was not covered by OpenStackClient nor service operator defaults corev1.SetupDefaults() corev1.SetupVersionDefaults() @@ -245,6 +267,18 @@ func main() { setupLog.Error(err, "unable to create webhook", "webhook", "OpenStackVersion") os.Exit(1) } + if err = (&dataplanev1.OpenStackDataPlaneNodeSet{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "OpenStackDataPlaneNodeSet") + os.Exit(1) + } + if err = (&dataplanev1.OpenStackDataPlaneDeployment{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "OpenStackDataPlaneDeployment") + os.Exit(1) + } + if err = (&dataplanev1.OpenStackDataPlaneService{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "OpenStackDataPlaneService") + os.Exit(1) + } checker = mgr.GetWebhookServer().StartedChecker() } diff --git a/pkg/dataplane/baremetal.go b/pkg/dataplane/baremetal.go new file mode 100644 index 000000000..87ce2a465 --- /dev/null +++ b/pkg/dataplane/baremetal.go @@ -0,0 +1,109 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deployment + +import ( + "context" + "fmt" + "net" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + infranetworkv1 "github.com/openstack-k8s-operators/infra-operator/apis/network/v1beta1" + condition "github.com/openstack-k8s-operators/lib-common/modules/common/condition" + "github.com/openstack-k8s-operators/lib-common/modules/common/helper" + utils "github.com/openstack-k8s-operators/lib-common/modules/common/util" + baremetalv1 "github.com/openstack-k8s-operators/openstack-baremetal-operator/api/v1beta1" + dataplanev1 "github.com/openstack-k8s-operators/openstack-operator/apis/dataplane/v1beta1" +) + +// DeployBaremetalSet Deploy OpenStackBaremetalSet +func DeployBaremetalSet( + ctx context.Context, helper *helper.Helper, instance *dataplanev1.OpenStackDataPlaneNodeSet, + ipSets map[string]infranetworkv1.IPSet, + dnsAddresses []string, +) (bool, error) { + baremetalSet := &baremetalv1.OpenStackBaremetalSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.Name, + Namespace: instance.Namespace, + }, + } + + if instance.Spec.BaremetalSetTemplate.BaremetalHosts == nil { + return false, fmt.Errorf("no baremetal hosts set in baremetalSetTemplate") + } + utils.LogForObject(helper, "Reconciling BaremetalSet", instance) + _, err := controllerutil.CreateOrPatch(ctx, helper.GetClient(), baremetalSet, func() error { + instance.Spec.BaremetalSetTemplate.DeepCopyInto(&baremetalSet.Spec) + for _, node := range instance.Spec.Nodes { + hostName := node.HostName + ipSet, ok := ipSets[hostName] + instanceSpec := baremetalSet.Spec.BaremetalHosts[hostName] + if !ok { + // TODO: Change this to raise an error instead. + // NOTE(hjensas): Hardcode /24 here, this used to rely on + // baremetalSet.Spec.CtlplaneNetmask's default value ("255.255.255.0"). + utils.LogForObject(helper, "IPAM Not configured for use, skipping", instance) + instanceSpec.CtlPlaneIP = fmt.Sprintf("%s/24", node.Ansible.AnsibleHost) + } else { + for _, res := range ipSet.Status.Reservation { + if strings.ToLower(string(res.Network)) == CtlPlaneNetwork { + _, ipNet, err := net.ParseCIDR(res.Cidr) + if err != nil { + return err + } + ipPrefix, _ := ipNet.Mask.Size() + instanceSpec.CtlPlaneIP = fmt.Sprintf("%s/%d", res.Address, ipPrefix) + baremetalSet.Spec.CtlplaneGateway = *res.Gateway + baremetalSet.Spec.BootstrapDNS = dnsAddresses + baremetalSet.Spec.DNSSearchDomains = []string{res.DNSDomain} + } + } + } + baremetalSet.Spec.BaremetalHosts[hostName] = instanceSpec + + } + err := controllerutil.SetControllerReference( + helper.GetBeforeObject(), baremetalSet, helper.GetScheme()) + return err + }) + + if err != nil { + instance.Status.Conditions.MarkFalse( + dataplanev1.NodeSetBareMetalProvisionReadyCondition, + condition.ErrorReason, condition.SeverityError, + dataplanev1.NodeSetBaremetalProvisionErrorMessage) + return false, err + } + + // Check if baremetalSet is ready + if !baremetalSet.IsReady() { + utils.LogForObject(helper, "BaremetalSet not ready, waiting...", instance) + instance.Status.Conditions.MarkFalse( + dataplanev1.NodeSetBareMetalProvisionReadyCondition, + condition.RequestedReason, condition.SeverityInfo, + dataplanev1.NodeSetBaremetalProvisionReadyWaitingMessage) + return false, nil + } + instance.Status.Conditions.MarkTrue( + dataplanev1.NodeSetBareMetalProvisionReadyCondition, + dataplanev1.NodeSetBaremetalProvisionReadyMessage) + return true, nil +} diff --git a/pkg/dataplane/cert.go b/pkg/dataplane/cert.go new file mode 100644 index 000000000..a13db62ca --- /dev/null +++ b/pkg/dataplane/cert.go @@ -0,0 +1,286 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deployment + +import ( + "context" + "fmt" + "sort" + "strconv" + "strings" + "time" + + "golang.org/x/exp/slices" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + certmgrv1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" + infranetworkv1 "github.com/openstack-k8s-operators/infra-operator/apis/network/v1beta1" + "github.com/openstack-k8s-operators/lib-common/modules/certmanager" + "github.com/openstack-k8s-operators/lib-common/modules/common/helper" + "github.com/openstack-k8s-operators/lib-common/modules/common/secret" + dataplanev1 "github.com/openstack-k8s-operators/openstack-operator/apis/dataplane/v1beta1" +) + +// Generates an organized data structure that is leveraged to create the secrets. +func createSecretsDataStructure(secretMaxSize int, + certsData map[string][]byte, +) []map[string][]byte { + ci := []map[string][]byte{} + + keys := []string{} + for k := range certsData { + keys = append(keys, k) + } + sort.Strings(keys) + + totalSize := secretMaxSize + var cur *map[string][]byte + // Going 3 by 3 to include CA, crt and key, in the same secret. + for k := 0; k < len(keys)-1; k += 3 { + szCa := len(certsData[keys[k]]) + len(keys[k]) + szCrt := len(certsData[keys[k+1]]) + len(keys[k+1]) + szKey := len(certsData[keys[k+2]]) + len(keys[k+2]) + sz := szCa + szCrt + szKey + if (totalSize + sz) > secretMaxSize { + i := len(ci) + ci = append(ci, make(map[string][]byte)) + cur = &ci[i] + totalSize = 0 + } + totalSize += sz + (*cur)[keys[k]] = certsData[keys[k]] + (*cur)[keys[k+1]] = certsData[keys[k+1]] + (*cur)[keys[k+2]] = certsData[keys[k+2]] + } + + return ci +} + +// EnsureTLSCerts generates secrets containing all the certificates for the relevant service +// These secrets will be mounted by the ansibleEE pod as an extra mount when the service is deployed. +func EnsureTLSCerts(ctx context.Context, helper *helper.Helper, + instance *dataplanev1.OpenStackDataPlaneNodeSet, + allHostnames map[string]map[infranetworkv1.NetNameStr]string, + allIPs map[string]map[infranetworkv1.NetNameStr]string, + service dataplanev1.OpenStackDataPlaneService, + certKey string, +) (*ctrl.Result, error) { + certsData := map[string][]byte{} + secretMaxSize := instance.Spec.SecretMaxSize + + // for each node in the nodeset, issue all the TLS certs needed based on the + // ips or DNS Names + for nodeName, node := range instance.Spec.Nodes { + var dnsNames map[infranetworkv1.NetNameStr]string + var ipsMap map[infranetworkv1.NetNameStr]string + var hosts []string + var ips []string + var issuer *certmgrv1.Issuer + var issuerLabelSelector map[string]string + var certName string + var certSecret *corev1.Secret + var err error + var result ctrl.Result + + // TODO(alee) decide if we want to use other labels + // For now we just add the hostname so we can select all the certs on one node + hostName := node.HostName + labels := map[string]string{ + HostnameLabel: hostName, + ServiceLabel: service.Name, + ServiceKeyLabel: certKey, + NodeSetLabel: instance.Name, + } + certName = service.Name + "-" + certKey + "-" + hostName + + dnsNames = allHostnames[hostName] + ipsMap = allIPs[hostName] + + dnsNamesInCert := slices.Contains(service.Spec.TLSCerts[certKey].Contents, DNSNamesStr) + ipValuesInCert := slices.Contains(service.Spec.TLSCerts[certKey].Contents, IPValuesStr) + + // Create the hosts and ips lists + if dnsNamesInCert { + if len(service.Spec.TLSCerts[certKey].Networks) == 0 { + hosts = make([]string, 0, len(dnsNames)) + for _, host := range dnsNames { + hosts = append(hosts, host) + } + } else { + hosts = make([]string, 0, len(service.Spec.TLSCerts[certKey].Networks)) + for _, network := range service.Spec.TLSCerts[certKey].Networks { + certNetwork := strings.ToLower(string(network)) + hosts = append(hosts, dnsNames[infranetworkv1.NetNameStr(certNetwork)]) + } + } + } + if ipValuesInCert { + if len(service.Spec.TLSCerts[certKey].Networks) == 0 { + ips = make([]string, 0, len(ipsMap)) + for _, ip := range ipsMap { + ips = append(ips, ip) + } + } else { + ips = make([]string, 0, len(service.Spec.TLSCerts[certKey].Networks)) + for _, network := range service.Spec.TLSCerts[certKey].Networks { + certNetwork := strings.ToLower(string(network)) + ips = append(ips, ipsMap[infranetworkv1.NetNameStr(certNetwork)]) + } + } + } + + if service.Spec.TLSCerts[certKey].Issuer == "" { + // by default, use the internal root CA + issuerLabelSelector = map[string]string{certmanager.RootCAIssuerInternalLabel: ""} + } else { + issuerLabelSelector = map[string]string{service.Spec.TLSCerts[certKey].Issuer: ""} + } + + issuer, err = certmanager.GetIssuerByLabels(ctx, helper, instance.Namespace, issuerLabelSelector) + if err != nil { + helper.GetLogger().Info("Error retrieving issuer by label", "issuerLabelSelector", issuerLabelSelector) + return &result, err + } + + // NOTE: we are assuming that there will always be a ctlplane network + // that means if you are not using network isolation with multiple networks + // you should still need to have a ctlplane network at a minimum to use tls-e + baseName, ok := dnsNames[CtlPlaneNetwork] + if !ok { + return &result, fmt.Errorf( + "control plane network not found for node %s , tls-e requires a control plane network to be present", + nodeName) + } + + certSecret, result, err = GetTLSNodeCert(ctx, helper, instance, certName, + issuer, labels, baseName, hosts, ips, service.Spec.TLSCerts[certKey].KeyUsages) + + // handle cert request errors + if (err != nil) || (result != ctrl.Result{}) { + return &result, err + } + + // TODO(alee) Add an owner reference to the secret so it can be monitored + // We'll do this once stuggi adds a function to do this in libcommon + + // To use this cert, add it to the relevant service data + certsData[baseName+"-tls.key"] = certSecret.Data["tls.key"] + certsData[baseName+"-tls.crt"] = certSecret.Data["tls.crt"] + certsData[baseName+"-ca.crt"] = certSecret.Data["ca.crt"] + } + + // Calculate number of secrets to create + ci := createSecretsDataStructure(secretMaxSize, certsData) + + labels := map[string]string{ + "numberOfSecrets": strconv.Itoa(len(ci)), + } + // create secrets to hold the certs for the services + for i := range ci { + labels["secretNumber"] = strconv.Itoa(i) + serviceCertsSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: GetServiceCertsSecretName(instance, service.Name, certKey, i), + Namespace: instance.Namespace, + Labels: labels, + }, + Data: ci[i], + } + _, result, err := secret.CreateOrPatchSecret(ctx, helper, instance, serviceCertsSecret) + if err != nil { + err = fmt.Errorf("error creating certs secret for %s - %w", service.Name, err) + return &ctrl.Result{}, err + } else if result != controllerutil.OperationResultNone { + return &ctrl.Result{RequeueAfter: time.Second * 5}, nil + } + } + + return &ctrl.Result{}, nil +} + +// GetTLSNodeCert creates or retrieves the cert for a node for a given service +func GetTLSNodeCert(ctx context.Context, helper *helper.Helper, + instance *dataplanev1.OpenStackDataPlaneNodeSet, + certName string, issuer *certmgrv1.Issuer, + labels map[string]string, + commonName string, + hostnames []string, ips []string, usages []certmgrv1.KeyUsage, +) (*corev1.Secret, ctrl.Result, error) { + // use cert duration and renewBefore from annotations set on issuer + // - if no duration annotation is set, use the default from certmanager lib-common module, + // - if no renewBefore annotation is set, the cert-manager default is used. + durationString := certmanager.CertDefaultDuration + if d, ok := issuer.Annotations[certmanager.CertDurationAnnotation]; ok && d != "" { + durationString = d + } + duration, err := time.ParseDuration(durationString) + if err != nil { + err = fmt.Errorf("error parsing duration annotation %s - %w", certmanager.CertDurationAnnotation, err) + return nil, ctrl.Result{}, err + } + + var renewBefore *time.Duration + if r, ok := issuer.Annotations[certmanager.CertRenewBeforeAnnotation]; ok && r != "" { + rb, err := time.ParseDuration(r) + if err != nil { + err = fmt.Errorf("error parsing renewBefore annotation %s - %w", certmanager.CertRenewBeforeAnnotation, err) + return nil, ctrl.Result{}, err + } + + renewBefore = &rb + } + + request := certmanager.CertificateRequest{ + CommonName: &commonName, + IssuerName: issuer.Name, + CertName: certName, + Duration: &duration, + RenewBefore: renewBefore, + Hostnames: hostnames, + Ips: ips, + Annotations: nil, + Labels: labels, + Usages: usages, + Subject: &certmgrv1.X509Subject{ + // NOTE(owalsh): For libvirt/QEMU this should match issuer CN + Organizations: []string{issuer.Name}, + }, + } + + certSecret, result, err := certmanager.EnsureCert(ctx, helper, request, instance) + if err != nil { + return nil, ctrl.Result{}, err + } else if (result != ctrl.Result{}) { + return nil, result, nil + } + + return certSecret, ctrl.Result{}, nil +} + +// GetServiceCertsSecretName - return name of secret to be mounted in ansibleEE which contains +// all the TLS certs that fit in a secret for the relevant service. The index variable is used +// to make the secret name unique. +// The convention we use here is "---certs-", for example, +// openstack-epdm-nova-default-certs-0. +func GetServiceCertsSecretName(instance *dataplanev1.OpenStackDataPlaneNodeSet, serviceName string, + certKey string, index int) string { + return fmt.Sprintf("%s-%s-%s-certs-%s", instance.Name, serviceName, certKey, strconv.Itoa(index)) +} diff --git a/pkg/dataplane/const.go b/pkg/dataplane/const.go new file mode 100644 index 000000000..79f24c64a --- /dev/null +++ b/pkg/dataplane/const.go @@ -0,0 +1,74 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deployment + +const ( + + // CtlPlaneNetwork - default ctlplane Network Name in NetConfig + CtlPlaneNetwork = "ctlplane" + + // ValidateNetworkLabel for ValidateNetwork OpenStackAnsibleEE + ValidateNetworkLabel = "validate-network" + + // InstallOSLabel for InstallOS OpenStackAnsibleEE + InstallOSLabel = "install-os" + + // ConfigureOSLabel for ConfigureOS OpenStackAnsibleEE + ConfigureOSLabel = "configure-os" + + // RunOSLabel for RunOS OpenStackAnsibleEE + RunOSLabel = "run-os" + + // InstallOpenStackLabel for InstallOpenStack OpenStackAnsibleEE + InstallOpenStackLabel = "install-openstack" + + // ConfigureOpenStackLabel for ConfigureOpenStack OpenStackAnsibleEE + ConfigureOpenStackLabel = "configure-openstack" + + // RunOpenStackLabel for RunOpenStack OpenStackAnsibleEE + RunOpenStackLabel = "run-openstack" + + // NicConfigTemplateFile is the custom nic config file we use when user provided network config templates are provided. + NicConfigTemplateFile = "/runner/network/nic-config-template" + + // ConfigPaths base path for volume mounts in OpenStackAnsibleEE pod + ConfigPaths = "/var/lib/openstack/configs" + + // CertPaths base path for cert volume mount in OpenStackAnsibleEE pod + CertPaths = "/var/lib/openstack/certs" + + // CACertPaths base path for CA cert volume mount in OpenStackAnsibleEE pod + CACertPaths = "/var/lib/openstack/cacerts" + + // DNSNamesStr value for setting dns values in a cert + DNSNamesStr = "dnsnames" + + // IPValuesStr value for setting ip addresses in a cert + IPValuesStr = "ips" + + // NodeSetLabel label for marking secrets to be watched for changes + NodeSetLabel = "osdpns" + + //ServiceLabel label for marking secrets to be watched for changes + ServiceLabel = "osdp-service" + + //ServiceKeyLabel label for marking secrets to be watched for changes + ServiceKeyLabel = "osdp-service-cert-key" + + //HostnameLabel label for marking secrets to be watched for changes + HostnameLabel = "hostname" +) diff --git a/pkg/dataplane/deployment.go b/pkg/dataplane/deployment.go new file mode 100644 index 000000000..bf515eee7 --- /dev/null +++ b/pkg/dataplane/deployment.go @@ -0,0 +1,496 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deployment + +import ( + "context" + "fmt" + "path" + "reflect" + "sort" + "strconv" + + slices "golang.org/x/exp/slices" + k8s_errors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + + "github.com/iancoleman/strcase" + condition "github.com/openstack-k8s-operators/lib-common/modules/common/condition" + "github.com/openstack-k8s-operators/lib-common/modules/common/helper" + "github.com/openstack-k8s-operators/lib-common/modules/common/util" + "github.com/openstack-k8s-operators/lib-common/modules/storage" + ansibleeev1 "github.com/openstack-k8s-operators/openstack-ansibleee-operator/api/v1beta1" + openstackv1 "github.com/openstack-k8s-operators/openstack-operator/apis/core/v1beta1" + dataplanev1 "github.com/openstack-k8s-operators/openstack-operator/apis/dataplane/v1beta1" + dataplaneutil "github.com/openstack-k8s-operators/openstack-operator/pkg/dataplane/util" + corev1 "k8s.io/api/core/v1" +) + +// Deployer defines a data structure with all of the relevant objects required for a full deployment. +type Deployer struct { + Ctx context.Context + Helper *helper.Helper + NodeSet *dataplanev1.OpenStackDataPlaneNodeSet + Deployment *dataplanev1.OpenStackDataPlaneDeployment + Status *dataplanev1.OpenStackDataPlaneDeploymentStatus + AeeSpec *dataplanev1.AnsibleEESpec + InventorySecrets map[string]string + AnsibleSSHPrivateKeySecrets map[string]string + Version *openstackv1.OpenStackVersion +} + +// Deploy function encapsulating primary deloyment handling +func (d *Deployer) Deploy(services []string) (*ctrl.Result, error) { + log := d.Helper.GetLogger() + + var readyCondition condition.Type + var readyMessage string + var readyWaitingMessage string + var readyErrorMessage string + var deployName string + + // Save a copy of the original ExtraMounts so it can be reset after each + // service deployment + aeeSpecMounts := make([]storage.VolMounts, len(d.AeeSpec.ExtraMounts)) + copy(aeeSpecMounts, d.AeeSpec.ExtraMounts) + // Deploy the composable services + for _, service := range services { + deployName = service + readyCondition = condition.Type(fmt.Sprintf("Service%sDeploymentReady", strcase.ToCamel(service))) + readyWaitingMessage = fmt.Sprintf(dataplanev1.NodeSetServiceDeploymentReadyWaitingMessage, deployName) + readyMessage = fmt.Sprintf(dataplanev1.NodeSetServiceDeploymentReadyMessage, deployName) + readyErrorMessage = fmt.Sprintf(dataplanev1.NodeSetServiceDeploymentErrorMessage, deployName) + " error %s" + + nsConditions := d.Status.NodeSetConditions[d.NodeSet.Name] + log.Info("Deploying service", "service", service) + foundService, err := GetService(d.Ctx, d.Helper, service) + if err != nil { + nsConditions.Set(condition.FalseCondition( + readyCondition, + condition.ErrorReason, + condition.SeverityError, + readyErrorMessage, + err.Error())) + d.Status.NodeSetConditions[d.NodeSet.Name] = nsConditions + return &ctrl.Result{}, err + } + + containerImages := dataplaneutil.GetContainerImages(d.Version) + if containerImages.AnsibleeeImage != nil { + d.AeeSpec.OpenStackAnsibleEERunnerImage = *containerImages.AnsibleeeImage + } + if len(foundService.Spec.OpenStackAnsibleEERunnerImage) > 0 { + d.AeeSpec.OpenStackAnsibleEERunnerImage = foundService.Spec.OpenStackAnsibleEERunnerImage + } + + // Reset ExtraMounts to its original value, and then add in service + // specific mounts. + d.AeeSpec.ExtraMounts = make([]storage.VolMounts, len(aeeSpecMounts)) + copy(d.AeeSpec.ExtraMounts, aeeSpecMounts) + d.AeeSpec, err = d.addServiceExtraMounts(foundService) + if err != nil { + nsConditions.Set(condition.FalseCondition( + readyCondition, + condition.ErrorReason, + condition.SeverityError, + readyErrorMessage, + err.Error())) + d.Status.NodeSetConditions[d.NodeSet.Name] = nsConditions + return &ctrl.Result{}, err + } + + // Add certMounts if TLS is enabled + if d.NodeSet.Spec.TLSEnabled { + if foundService.Spec.AddCertMounts { + d.AeeSpec, err = d.addCertMounts(services) + } + if err != nil { + nsConditions.Set(condition.FalseCondition( + readyCondition, + condition.ErrorReason, + condition.SeverityError, + readyErrorMessage, + err.Error())) + d.Status.NodeSetConditions[d.NodeSet.Name] = nsConditions + return &ctrl.Result{}, err + } + } + + err = d.ConditionalDeploy( + readyCondition, + readyMessage, + readyWaitingMessage, + readyErrorMessage, + deployName, + foundService, + ) + + nsConditions = d.Status.NodeSetConditions[d.NodeSet.Name] + if err != nil || !nsConditions.IsTrue(readyCondition) { + log.Info(fmt.Sprintf("Condition %s not ready", readyCondition)) + return &ctrl.Result{}, err + } + + log.Info(fmt.Sprintf("Condition %s ready", readyCondition)) + + // (TODO) Only considers the container image values from the Version + // for the time being. Can be expanded later to look at the actual + // values used from the inventory, etc. + if d.Version != nil { + vContainerImages := reflect.ValueOf(d.Version.Status.ContainerImages) + for _, cif := range foundService.Spec.ContainerImageFields { + d.Deployment.Status.ContainerImages[cif] = reflect.Indirect(vContainerImages.FieldByName(cif)).String() + } + } + + } + + return nil, nil +} + +// ConditionalDeploy function encapsulating primary deloyment handling with +// conditions. +func (d *Deployer) ConditionalDeploy( + readyCondition condition.Type, + readyMessage string, + readyWaitingMessage string, + readyErrorMessage string, + deployName string, + foundService dataplanev1.OpenStackDataPlaneService, +) error { + var err error + log := d.Helper.GetLogger() + + nsConditions := d.Status.NodeSetConditions[d.NodeSet.Name] + if nsConditions.IsUnknown(readyCondition) { + log.Info(fmt.Sprintf("%s Unknown, starting %s", readyCondition, deployName)) + err = d.DeployService( + foundService) + if err != nil { + util.LogErrorForObject(d.Helper, err, fmt.Sprintf("Unable to %s for %s", deployName, d.NodeSet.Name), d.NodeSet) + return err + } + nsConditions.Set(condition.FalseCondition( + readyCondition, + condition.RequestedReason, + condition.SeverityInfo, + readyWaitingMessage)) + + } + + if nsConditions.IsFalse(readyCondition) { + var ansibleEE *ansibleeev1.OpenStackAnsibleEE + _, labelSelector := dataplaneutil.GetAnsibleExecutionNameAndLabels(&foundService, d.Deployment.Name, d.NodeSet.Name) + ansibleEE, err = dataplaneutil.GetAnsibleExecution(d.Ctx, d.Helper, d.Deployment, labelSelector) + if err != nil { + // Return nil if we don't have AnsibleEE available yet + if k8s_errors.IsNotFound(err) { + log.Info(fmt.Sprintf("%s OpenStackAnsibleEE not yet found", readyCondition)) + return nil + } + log.Error(err, fmt.Sprintf("Error getting ansibleEE job for %s", deployName)) + nsConditions.Set(condition.FalseCondition( + readyCondition, + condition.ErrorReason, + condition.SeverityError, + readyErrorMessage, + err.Error())) + } + + if ansibleEE.Status.JobStatus == ansibleeev1.JobStatusSucceeded { + log.Info(fmt.Sprintf("Condition %s ready", readyCondition)) + nsConditions.Set(condition.TrueCondition( + readyCondition, + readyMessage)) + } + + if ansibleEE.Status.JobStatus == ansibleeev1.JobStatusRunning || ansibleEE.Status.JobStatus == ansibleeev1.JobStatusPending { + log.Info(fmt.Sprintf("AnsibleEE job is not yet completed: Execution: %s, Status: %s", ansibleEE.Name, ansibleEE.Status.JobStatus)) + nsConditions.Set(condition.FalseCondition( + readyCondition, + condition.RequestedReason, + condition.SeverityInfo, + readyWaitingMessage)) + } + + if ansibleEE.Status.JobStatus == ansibleeev1.JobStatusFailed { + errorMsg := fmt.Sprintf("execution.name %s execution.namespace %s execution.status.jobstatus: %s", ansibleEE.Name, ansibleEE.Namespace, ansibleEE.Status.JobStatus) + ansibleCondition := ansibleEE.Status.Conditions.Get(condition.ReadyCondition) + if ansibleCondition.Reason == condition.JobReasonBackoffLimitExceeded { + errorMsg = fmt.Sprintf("backoff limit reached for execution.name %s execution.namespace %s execution.status.jobstatus: %s", ansibleEE.Name, ansibleEE.Namespace, ansibleEE.Status.JobStatus) + } + log.Info(fmt.Sprintf("Condition %s error", readyCondition)) + err = fmt.Errorf(errorMsg) + nsConditions.Set(condition.FalseCondition( + readyCondition, + ansibleCondition.Reason, + ansibleCondition.Severity, + readyErrorMessage, + err.Error())) + } + } + d.Status.NodeSetConditions[d.NodeSet.Name] = nsConditions + + return err +} + +// addCertMounts adds the cert mounts to the aeeSpec for the install-certs service +func (d *Deployer) addCertMounts( + services []string, +) (*dataplanev1.AnsibleEESpec, error) { + log := d.Helper.GetLogger() + client := d.Helper.GetClient() + for _, svc := range services { + service, err := GetService(d.Ctx, d.Helper, svc) + if err != nil { + return nil, err + } + + if service.Spec.CertsFrom != "" && service.Spec.TLSCerts == nil && service.Spec.CACerts == "" { + if slices.Contains(services, service.Spec.CertsFrom) { + continue + } + service, err = GetService(d.Ctx, d.Helper, service.Spec.CertsFrom) + if err != nil { + return nil, err + } + } + + if service.Spec.TLSCerts != nil { + // sort cert list to ensure mount list is consistent + certKeyList := make([]string, 0, len(service.Spec.TLSCerts)) + for ckey := range service.Spec.TLSCerts { + certKeyList = append(certKeyList, ckey) + } + sort.Strings(certKeyList) + + for _, certKey := range certKeyList { + log.Info("Mounting TLS cert for service", "service", svc) + volMounts := storage.VolMounts{} + + // add mount for certs and keys + secretName := GetServiceCertsSecretName(d.NodeSet, service.Name, certKey, 0) // Need to get the number of secrets + certSecret := &corev1.Secret{} + err := client.Get(d.Ctx, types.NamespacedName{Name: secretName, Namespace: service.Namespace}, certSecret) + if err != nil { + return d.AeeSpec, err + } + numberOfSecrets, _ := strconv.Atoi(certSecret.Labels["numberOfSecrets"]) + projectedVolumeSource := corev1.ProjectedVolumeSource{ + Sources: []corev1.VolumeProjection{}, + } + for i := 0; i < numberOfSecrets; i++ { + secretName := GetServiceCertsSecretName(d.NodeSet, service.Name, certKey, i) + certSecret := &corev1.Secret{} + err := client.Get(d.Ctx, types.NamespacedName{Name: secretName, Namespace: service.Namespace}, certSecret) + if err != nil { + return d.AeeSpec, err + } + volumeProjection := corev1.VolumeProjection{ + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: secretName, + }, + }, + } + projectedVolumeSource.Sources = append(projectedVolumeSource.Sources, volumeProjection) + } + certVolume := corev1.Volume{ + Name: GetServiceCertsSecretName(d.NodeSet, service.Name, certKey, 0), + VolumeSource: corev1.VolumeSource{ + Projected: &projectedVolumeSource, + }, + } + + certMountDir := service.Spec.TLSCerts[certKey].EDPMRoleServiceName + if certMountDir == "" { + certMountDir = service.Spec.EDPMServiceType + } + + certVolumeMount := corev1.VolumeMount{ + Name: GetServiceCertsSecretName(d.NodeSet, service.Name, certKey, 0), + MountPath: path.Join(CertPaths, certMountDir, certKey), + } + volMounts.Volumes = append(volMounts.Volumes, certVolume) + volMounts.Mounts = append(volMounts.Mounts, certVolumeMount) + d.AeeSpec.ExtraMounts = append(d.AeeSpec.ExtraMounts, volMounts) + } + } + + // add mount for cacert bundle + if len(service.Spec.CACerts) > 0 { + log.Info("Mounting CA cert bundle for service", "service", svc) + volMounts := storage.VolMounts{} + cacertSecret := &corev1.Secret{} + err := client.Get(d.Ctx, types.NamespacedName{Name: service.Spec.CACerts, Namespace: service.Namespace}, cacertSecret) + if err != nil { + return d.AeeSpec, err + } + cacertVolume := corev1.Volume{ + Name: fmt.Sprintf("%s-%s", service.Name, service.Spec.CACerts), + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: service.Spec.CACerts, + }, + }, + } + + cacertVolumeMount := corev1.VolumeMount{ + Name: fmt.Sprintf("%s-%s", service.Name, service.Spec.CACerts), + MountPath: path.Join(CACertPaths, service.Spec.EDPMServiceType), + } + + volMounts.Volumes = append(volMounts.Volumes, cacertVolume) + volMounts.Mounts = append(volMounts.Mounts, cacertVolumeMount) + d.AeeSpec.ExtraMounts = append(d.AeeSpec.ExtraMounts, volMounts) + } + } + + return d.AeeSpec, nil +} + +// addServiceExtraMounts adds the service configs as ExtraMounts to aeeSpec +func (d *Deployer) addServiceExtraMounts( + service dataplanev1.OpenStackDataPlaneService, +) (*dataplanev1.AnsibleEESpec, error) { + client := d.Helper.GetClient() + baseMountPath := path.Join(ConfigPaths, service.Name) + + var configMaps []*corev1.ConfigMap + var secrets []*corev1.Secret + + for _, dataSource := range service.Spec.DataSources { + _cm, _secret, err := dataplaneutil.GetDataSourceCmSecret(d.Ctx, d.Helper, service.Namespace, dataSource) + if err != nil { + return nil, err + } + + if _cm != nil { + configMaps = append(configMaps, _cm) + } + if _secret != nil { + secrets = append(secrets, _secret) + } + } + + for _, cmName := range service.Spec.ConfigMaps { + cm := &corev1.ConfigMap{} + err := client.Get(d.Ctx, types.NamespacedName{Name: cmName, Namespace: service.Namespace}, cm) + if err != nil { + return d.AeeSpec, err + } + configMaps = append(configMaps, cm) + } + + for _, secretName := range service.Spec.Secrets { + sec := &corev1.Secret{} + err := client.Get(d.Ctx, types.NamespacedName{Name: secretName, Namespace: service.Namespace}, sec) + if err != nil { + return d.AeeSpec, err + } + secrets = append(secrets, sec) + } + + for _, cm := range configMaps { + + volMounts := storage.VolMounts{} + + keys := []string{} + for key := range cm.Data { + keys = append(keys, key) + } + for key := range cm.BinaryData { + keys = append(keys, key) + } + sort.Strings(keys) + + for idx, key := range keys { + name := fmt.Sprintf("%s-%s", cm.Name, strconv.Itoa(idx)) + volume := corev1.Volume{ + Name: name, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: cm.Name, + }, + Items: []corev1.KeyToPath{ + { + Key: key, + Path: key, + }, + }, + }, + }, + } + + volumeMount := corev1.VolumeMount{ + Name: name, + MountPath: path.Join(baseMountPath, key), + SubPath: key, + } + + volMounts.Volumes = append(volMounts.Volumes, volume) + volMounts.Mounts = append(volMounts.Mounts, volumeMount) + + } + + d.AeeSpec.ExtraMounts = append(d.AeeSpec.ExtraMounts, volMounts) + } + + for _, sec := range secrets { + + volMounts := storage.VolMounts{} + keys := []string{} + for key := range sec.Data { + keys = append(keys, key) + } + sort.Strings(keys) + + for idx, key := range keys { + name := fmt.Sprintf("%s-%s", sec.Name, strconv.Itoa(idx)) + volume := corev1.Volume{ + Name: name, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: sec.Name, + Items: []corev1.KeyToPath{ + { + Key: key, + Path: key, + }, + }, + }, + }, + } + + volumeMount := corev1.VolumeMount{ + Name: name, + MountPath: path.Join(baseMountPath, key), + SubPath: key, + } + + volMounts.Volumes = append(volMounts.Volumes, volume) + volMounts.Mounts = append(volMounts.Mounts, volumeMount) + + } + + d.AeeSpec.ExtraMounts = append(d.AeeSpec.ExtraMounts, volMounts) + } + + return d.AeeSpec, nil +} diff --git a/pkg/dataplane/hashes.go b/pkg/dataplane/hashes.go new file mode 100644 index 000000000..8f57dc498 --- /dev/null +++ b/pkg/dataplane/hashes.go @@ -0,0 +1,136 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deployment + +import ( + "context" + + "github.com/openstack-k8s-operators/lib-common/modules/common/configmap" + "github.com/openstack-k8s-operators/lib-common/modules/common/helper" + "github.com/openstack-k8s-operators/lib-common/modules/common/secret" + dataplanev1 "github.com/openstack-k8s-operators/openstack-operator/apis/dataplane/v1beta1" + dataplaneutil "github.com/openstack-k8s-operators/openstack-operator/pkg/dataplane/util" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" +) + +// GetDeploymentHashesForService - Hash the ConfigMaps and Secrets for the provided service +func GetDeploymentHashesForService( + ctx context.Context, + helper *helper.Helper, + namespace string, + serviceName string, + configMapHashes map[string]string, + secretHashes map[string]string, + nodeSets dataplanev1.OpenStackDataPlaneNodeSetList, +) error { + + namespacedName := types.NamespacedName{ + Name: serviceName, + Namespace: namespace, + } + service := &dataplanev1.OpenStackDataPlaneService{} + err := helper.GetClient().Get(context.Background(), namespacedName, service) + if err != nil { + helper.GetLogger().Error(err, "Unable to retrieve OpenStackDataPlaneService %v") + return err + } + + for _, dataSource := range service.Spec.DataSources { + cm, sec, err := dataplaneutil.GetDataSourceCmSecret(ctx, helper, namespace, dataSource) + if err != nil { + return err + } + + if cm != nil { + configMapHashes[cm.Name], err = configmap.Hash(cm) + if err != nil { + helper.GetLogger().Error(err, "Unable to hash ConfigMap %v") + } + } + if sec != nil { + secretHashes[sec.Name], err = secret.Hash(sec) + if err != nil { + helper.GetLogger().Error(err, "Unable to hash Secret %v") + } + } + } + + for _, cmName := range service.Spec.ConfigMaps { + namespacedName := types.NamespacedName{ + Name: cmName, + Namespace: namespace, + } + cm := &corev1.ConfigMap{} + err := helper.GetClient().Get(context.Background(), namespacedName, cm) + if err != nil { + helper.GetLogger().Error(err, "Unable to retrieve ConfigMap %v") + return err + } + configMapHashes[cmName], err = configmap.Hash(cm) + if err != nil { + helper.GetLogger().Error(err, "Unable to hash ConfigMap %v") + } + + } + for _, secretName := range service.Spec.Secrets { + namespacedName := types.NamespacedName{ + Name: secretName, + Namespace: namespace, + } + sec := &corev1.Secret{} + err := helper.GetClient().Get(ctx, namespacedName, sec) + if err != nil { + helper.GetLogger().Error(err, "Unable to retrieve Secret %v") + return err + } + secretHashes[secretName], err = secret.Hash(sec) + if err != nil { + helper.GetLogger().Error(err, "Unable to hash Secret %v") + } + } + + if service.Spec.TLSCerts != nil { + for certKey := range service.Spec.TLSCerts { + var secrets *corev1.SecretList + for _, nodeSet := range nodeSets.Items { + labelSelectorMap := map[string]string{ + NodeSetLabel: nodeSet.Name, + ServiceLabel: serviceName, + ServiceKeyLabel: certKey, + } + secrets, err = secret.GetSecrets(ctx, helper, "", labelSelectorMap) + if err != nil { + helper.GetLogger().Error(err, "Unable to search for cert secrets %v") + return err + } + for _, sec := range secrets.Items { + // get secret? or is it already there + secretHashes[sec.Name], err = secret.Hash(&sec) + if err != nil { + helper.GetLogger().Error(err, "Unable to search for hash cert secrets %v") + return err + } + + } + + } + } + } + + return nil +} diff --git a/pkg/dataplane/inventory.go b/pkg/dataplane/inventory.go new file mode 100644 index 000000000..a7b092aa6 --- /dev/null +++ b/pkg/dataplane/inventory.go @@ -0,0 +1,336 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deployment + +import ( + "context" + "encoding/json" + "fmt" + "net" + "strconv" + "strings" + + yaml "gopkg.in/yaml.v3" + + infranetworkv1 "github.com/openstack-k8s-operators/infra-operator/apis/network/v1beta1" + "github.com/openstack-k8s-operators/lib-common/modules/ansible" + "github.com/openstack-k8s-operators/lib-common/modules/common/helper" + "github.com/openstack-k8s-operators/lib-common/modules/common/secret" + utils "github.com/openstack-k8s-operators/lib-common/modules/common/util" + openstackv1 "github.com/openstack-k8s-operators/openstack-operator/apis/core/v1beta1" + dataplanev1 "github.com/openstack-k8s-operators/openstack-operator/apis/dataplane/v1beta1" + "github.com/openstack-k8s-operators/openstack-operator/pkg/dataplane/util" +) + +// getAnsibleVarsFrom gets ansible vars from ConfigMap/Secret +func getAnsibleVarsFrom(ctx context.Context, helper *helper.Helper, namespace string, ansible *dataplanev1.AnsibleOpts) (map[string]string, error) { + + var result = make(map[string]string) + + for _, dataSource := range ansible.AnsibleVarsFrom { + configMap, secret, err := util.GetDataSourceCmSecret(ctx, helper, namespace, dataSource) + if err != nil { + return result, err + } + + // AnsibleVars will override AnsibleVarsFrom variables. + // Process AnsibleVarsFrom first then allow AnsibleVars to replace existing values. + if configMap != nil { + for k, v := range configMap.Data { + if len(dataSource.Prefix) > 0 { + k = dataSource.Prefix + k + } + + result[k] = v + } + } + + if secret != nil { + for k, v := range secret.Data { + if len(dataSource.Prefix) > 0 { + k = dataSource.Prefix + k + } + result[k] = string(v) + } + } + + } + return result, nil +} + +// GenerateNodeSetInventory yields a parsed Inventory for role +func GenerateNodeSetInventory(ctx context.Context, helper *helper.Helper, + instance *dataplanev1.OpenStackDataPlaneNodeSet, + allIPSets map[string]infranetworkv1.IPSet, dnsAddresses []string, + containerImages openstackv1.ContainerImages) (string, error) { + inventory := ansible.MakeInventory() + nodeSetGroup := inventory.AddGroup(instance.Name) + groupVars, err := getAnsibleVarsFrom(ctx, helper, instance.Namespace, &instance.Spec.NodeTemplate.Ansible) + if err != nil { + utils.LogErrorForObject(helper, err, "could not get ansible group vars from configMap/secret", instance) + return "", err + } + for k, v := range groupVars { + nodeSetGroup.Vars[k] = v + } + err = resolveGroupAnsibleVars(&instance.Spec.NodeTemplate, &nodeSetGroup, containerImages) + if err != nil { + utils.LogErrorForObject(helper, err, "Could not resolve ansible group vars", instance) + return "", err + } + + // add the NodeSet name variable + nodeSetGroup.Vars["edpm_nodeset_name"] = instance.Name + + // add TLS ansible variable + nodeSetGroup.Vars["edpm_tls_certs_enabled"] = instance.Spec.TLSEnabled + if instance.Spec.Tags != nil { + nodeSetGroup.Vars["nodeset_tags"] = instance.Spec.Tags + } + + // add services list + nodeSetGroup.Vars["edpm_services"] = instance.Spec.Services + + nodeSetGroup.Vars["ansible_ssh_private_key_file"] = fmt.Sprintf("/runner/env/ssh_key/ssh_key_%s", instance.Name) + + for _, node := range instance.Spec.Nodes { + host := nodeSetGroup.AddHost(strings.Split(node.HostName, ".")[0]) + hostVars, err := getAnsibleVarsFrom(ctx, helper, instance.Namespace, &node.Ansible) + if err != nil { + utils.LogErrorForObject(helper, err, "could not get ansible host vars from configMap/secret", instance) + return "", err + } + for k, v := range hostVars { + host.Vars[k] = v + } + // Use ansible_host if provided else use hostname. Fall back to + // nodeName if all else fails. + if node.Ansible.AnsibleHost != "" { + host.Vars["ansible_host"] = node.Ansible.AnsibleHost + } else { + host.Vars["ansible_host"] = node.HostName + } + + err = resolveHostAnsibleVars(&node, &host) + if err != nil { + utils.LogErrorForObject(helper, err, "Could not resolve ansible host vars", instance) + return "", err + } + + ipSet, ok := allIPSets[node.HostName] + if ok { + populateInventoryFromIPAM(&ipSet, host, dnsAddresses, node.HostName) + } + + } + + invData, err := inventory.MarshalYAML() + if err != nil { + utils.LogErrorForObject(helper, err, "Could not parse NodeSet inventory", instance) + return "", err + } + secretData := map[string]string{ + "inventory": string(invData), + } + secretName := fmt.Sprintf("dataplanenodeset-%s", instance.Name) + labels := map[string]string{ + "openstack.org/operator-name": "dataplane", + "openstackdataplanenodeset": instance.Name, + "inventory": "true", + } + for key, val := range instance.ObjectMeta.Labels { + labels[key] = val + } + template := []utils.Template{ + // Secret + { + Name: secretName, + Namespace: instance.Namespace, + Type: utils.TemplateTypeNone, + InstanceType: instance.Kind, + CustomData: secretData, + Labels: labels, + }, + } + err = secret.EnsureSecrets(ctx, helper, instance, template, nil) + return secretName, err +} + +// populateInventoryFromIPAM populates inventory from IPAM +func populateInventoryFromIPAM( + ipSet *infranetworkv1.IPSet, host ansible.Host, + dnsAddresses []string, hostName string) { + var dnsSearchDomains []string + for _, res := range ipSet.Status.Reservation { + // Build the vars for ips/routes etc + entry := strings.ToLower(string(res.Network)) + host.Vars[entry+"_ip"] = res.Address + _, ipnet, err := net.ParseCIDR(res.Cidr) + if err == nil { + netCidr, _ := ipnet.Mask.Size() + host.Vars[entry+"_cidr"] = netCidr + } + if res.Vlan != nil || entry != CtlPlaneNetwork { + host.Vars[entry+"_vlan_id"] = res.Vlan + } + host.Vars[entry+"_mtu"] = res.MTU + host.Vars[entry+"_gateway_ip"] = res.Gateway + host.Vars[entry+"_host_routes"] = res.Routes + + if entry == CtlPlaneNetwork { + host.Vars[entry+"_dns_nameservers"] = dnsAddresses + if dataplanev1.NodeHostNameIsFQDN(hostName) { + host.Vars["canonical_hostname"] = hostName + domain := strings.SplitN(hostName, ".", 2)[1] + if domain != res.DNSDomain { + dnsSearchDomains = append(dnsSearchDomains, domain) + } + } else { + host.Vars["canonical_hostname"] = strings.Join([]string{hostName, res.DNSDomain}, ".") + } + } + dnsSearchDomains = append(dnsSearchDomains, res.DNSDomain) + } + host.Vars["dns_search_domains"] = dnsSearchDomains +} + +// set group ansible vars from NodeTemplate +func resolveGroupAnsibleVars(template *dataplanev1.NodeTemplate, group *ansible.Group, + containerImages openstackv1.ContainerImages) error { + + if template.Ansible.AnsibleUser != "" { + group.Vars["ansible_user"] = template.Ansible.AnsibleUser + } + if template.Ansible.AnsiblePort > 0 { + group.Vars["ansible_port"] = strconv.Itoa(template.Ansible.AnsiblePort) + } + if template.ManagementNetwork != "" { + group.Vars["management_network"] = template.ManagementNetwork + } + + // Set the ansible variables for the container images if they are not + // provided by the user in the spec. + if template.Ansible.AnsibleVars["edpm_frr_image"] == nil { + group.Vars["edpm_frr_image"] = containerImages.EdpmFrrImage + } + if template.Ansible.AnsibleVars["edpm_iscsid_image"] == nil { + group.Vars["edpm_iscsid_image"] = containerImages.EdpmIscsidImage + } + if template.Ansible.AnsibleVars["edpm_logrotate_crond_image"] == nil { + group.Vars["edpm_logrotate_crond_image"] = containerImages.EdpmLogrotateCrondImage + } + if template.Ansible.AnsibleVars["edpm_multipathd_image"] == nil { + group.Vars["edpm_multipathd_image"] = containerImages.EdpmMultipathdImage + } + if template.Ansible.AnsibleVars["edpm_neutron_dhcp_image"] == nil { + group.Vars["edpm_neutron_dhcp_image"] = containerImages.EdpmNeutronDhcpAgentImage + } + if template.Ansible.AnsibleVars["edpm_neutron_metadata_agent_image"] == nil { + group.Vars["edpm_neutron_metadata_agent_image"] = containerImages.EdpmNeutronMetadataAgentImage + } + if template.Ansible.AnsibleVars["edpm_neutron_ovn_agent_image"] == nil { + group.Vars["edpm_neutron_ovn_agent_image"] = containerImages.EdpmNeutronOvnAgentImage + } + if template.Ansible.AnsibleVars["edpm_neutron_sriov_agent_image"] == nil { + group.Vars["edpm_neutron_sriov_image"] = containerImages.EdpmNeutronSriovAgentImage + } + if template.Ansible.AnsibleVars["edpm_nova_compute_image"] == nil { + group.Vars["edpm_nova_compute_image"] = containerImages.NovaComputeImage + } + if template.Ansible.AnsibleVars["edpm_ovn_controller_agent_image"] == nil { + group.Vars["edpm_ovn_controller_agent_image"] = containerImages.OvnControllerImage + } + if template.Ansible.AnsibleVars["edpm_ovn_bgp_agent_image"] == nil { + group.Vars["edpm_ovn_bgp_agent_image"] = containerImages.EdpmOvnBgpAgentImage + } + if template.Ansible.AnsibleVars["edpm_telemetry_ceilometer_compute_image"] == nil { + group.Vars["edpm_telemetry_ceilometer_compute_image"] = containerImages.CeilometerComputeImage + } + if template.Ansible.AnsibleVars["edpm_telemetry_ceilometer_ipmi_image"] == nil { + group.Vars["edpm_telemetry_ceilometer_ipmi_image"] = containerImages.CeilometerIpmiImage + } + if template.Ansible.AnsibleVars["edpm_telemetry_node_exporter_image"] == nil { + group.Vars["edpm_telemetry_node_exporter_image"] = containerImages.EdpmNodeExporterImage + } + + err := unmarshalAnsibleVars(template.Ansible.AnsibleVars, group.Vars) + if err != nil { + return err + } + if len(template.Networks) != 0 { + nets, netsLower := buildNetworkVars(template.Networks) + group.Vars["nodeset_networks"] = nets + group.Vars["networks_lower"] = netsLower + } + + return nil +} + +// set host ansible vars from NodeSection +func resolveHostAnsibleVars(node *dataplanev1.NodeSection, host *ansible.Host) error { + + if node.Ansible.AnsibleUser != "" { + host.Vars["ansible_user"] = node.Ansible.AnsibleUser + } + if node.Ansible.AnsiblePort > 0 { + host.Vars["ansible_port"] = strconv.Itoa(node.Ansible.AnsiblePort) + } + if node.ManagementNetwork != "" { + host.Vars["management_network"] = node.ManagementNetwork + } + + err := unmarshalAnsibleVars(node.Ansible.AnsibleVars, host.Vars) + if err != nil { + return err + } + if len(node.Networks) != 0 { + nets, netsLower := buildNetworkVars(node.Networks) + host.Vars["nodeset_networks"] = nets + host.Vars["networks_lower"] = netsLower + } + return nil + +} + +// unmarshal raw strings into an ansible vars dictionary +func unmarshalAnsibleVars(ansibleVars map[string]json.RawMessage, + parsedVars map[string]interface{}) error { + + for key, val := range ansibleVars { + var v interface{} + err := yaml.Unmarshal(val, &v) + if err != nil { + return err + } + parsedVars[key] = v + } + return nil +} + +func buildNetworkVars(networks []infranetworkv1.IPSetNetwork) ([]string, map[string]string) { + netsLower := make(map[string]string) + var nets []string + for _, network := range networks { + netName := string(network.Name) + if strings.EqualFold(netName, CtlPlaneNetwork) { + continue + } + nets = append(nets, netName) + netsLower[netName] = strings.ToLower(netName) + } + return nets, netsLower +} diff --git a/pkg/dataplane/ipam.go b/pkg/dataplane/ipam.go new file mode 100644 index 000000000..1428118f1 --- /dev/null +++ b/pkg/dataplane/ipam.go @@ -0,0 +1,318 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deployment + +import ( + "context" + "errors" + "fmt" + "sort" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + infranetworkv1 "github.com/openstack-k8s-operators/infra-operator/apis/network/v1beta1" + condition "github.com/openstack-k8s-operators/lib-common/modules/common/condition" + "github.com/openstack-k8s-operators/lib-common/modules/common/helper" + "github.com/openstack-k8s-operators/lib-common/modules/common/util" + dataplanev1 "github.com/openstack-k8s-operators/openstack-operator/apis/dataplane/v1beta1" +) + +// DNSDetails struct for IPAM and DNS details of NodeSet +type DNSDetails struct { + // IsReady has DNSData been reconciled + IsReady bool + // ServerAddresses holds a slice of DNS servers in the environment + ServerAddresses []string + // ClusterAddresses holds a slice of Kubernetes service ClusterIPs for the DNSMasq services + ClusterAddresses []string + // CtlplaneSearchDomain is the search domain provided by IPAM + CtlplaneSearchDomain string + // Hostnames is a map of hostnames provided by the NodeSet to the FQDNs + Hostnames map[string]map[infranetworkv1.NetNameStr]string + // AllIPs holds a map of all IP addresses per hostname. + AllIPs map[string]map[infranetworkv1.NetNameStr]string +} + +// checkDNSService checks if DNS is configured and ready +func checkDNSService(ctx context.Context, helper *helper.Helper, + instance client.Object, dnsDetails *DNSDetails, +) error { + dnsmasqList := &infranetworkv1.DNSMasqList{} + listOpts := []client.ListOption{ + client.InNamespace(instance.GetNamespace()), + } + err := helper.GetClient().List(ctx, dnsmasqList, listOpts...) + if err != nil { + util.LogErrorForObject(helper, err, "Error listing dnsmasqs", instance) + return err + } + if len(dnsmasqList.Items) > 1 { + util.LogForObject(helper, "Only one DNS control plane service can exist", instance) + err = errors.New(dataplanev1.NodeSetDNSDataMultipleDNSMasqErrorMessage) + return err + } + if len(dnsmasqList.Items) == 0 { + util.LogForObject(helper, "No DNS control plane service exists yet", instance) + return nil + } + if !dnsmasqList.Items[0].IsReady() { + util.LogForObject(helper, "DNS control plane service exists, but not ready yet ", instance) + return nil + } + dnsDetails.ClusterAddresses = dnsmasqList.Items[0].Status.DNSClusterAddresses + dnsDetails.ServerAddresses = dnsmasqList.Items[0].Status.DNSAddresses + return nil +} + +// createOrPatchDNSData builds the DNSData +func createOrPatchDNSData(ctx context.Context, helper *helper.Helper, + instance *dataplanev1.OpenStackDataPlaneNodeSet, + allIPSets map[string]infranetworkv1.IPSet, dnsDetails *DNSDetails, +) error { + var allDNSRecords []infranetworkv1.DNSHost + var ctlplaneSearchDomain string + dnsDetails.Hostnames = map[string]map[infranetworkv1.NetNameStr]string{} + dnsDetails.AllIPs = map[string]map[infranetworkv1.NetNameStr]string{} + + // Build DNSData CR + // We need to sort the nodes here, else DNSData.Spec.Hosts would change + // For every reconcile and it could create reconcile loops. + nodes := instance.Spec.Nodes + sortedNodeNames := make([]string, 0) + for name := range instance.Spec.Nodes { + sortedNodeNames = append(sortedNodeNames, name) + } + sort.Strings(sortedNodeNames) + + for _, nodeName := range sortedNodeNames { + node := nodes[nodeName] + var shortName string + nets := node.Networks + hostName := node.HostName + + dnsDetails.Hostnames[hostName] = map[infranetworkv1.NetNameStr]string{} + dnsDetails.AllIPs[hostName] = map[infranetworkv1.NetNameStr]string{} + + shortName = strings.Split(hostName, ".")[0] + if len(nets) == 0 { + nets = instance.Spec.NodeTemplate.Networks + } + if len(nets) > 0 { + // Get IPSet + ipSet, ok := allIPSets[hostName] + if ok { + for _, res := range ipSet.Status.Reservation { + var fqdnNames []string + dnsRecord := infranetworkv1.DNSHost{} + dnsRecord.IP = res.Address + netLower := strings.ToLower(string(res.Network)) + fqdnName := strings.Join([]string{shortName, res.DNSDomain}, ".") + if fqdnName != hostName { + fqdnNames = append(fqdnNames, fqdnName) + dnsDetails.Hostnames[hostName][infranetworkv1.NetNameStr(netLower)] = fqdnName + } + if dataplanev1.NodeHostNameIsFQDN(hostName) && netLower == CtlPlaneNetwork { + fqdnNames = append(fqdnNames, hostName) + dnsDetails.Hostnames[hostName][infranetworkv1.NetNameStr(netLower)] = hostName + } + dnsDetails.AllIPs[hostName][infranetworkv1.NetNameStr(netLower)] = res.Address + dnsRecord.Hostnames = fqdnNames + allDNSRecords = append(allDNSRecords, dnsRecord) + // Adding only ctlplane domain for ansibleee. + // TODO (rabi) This is not very efficient. + if netLower == CtlPlaneNetwork && ctlplaneSearchDomain == "" { + dnsDetails.CtlplaneSearchDomain = res.DNSDomain + } + } + } + } + } + dnsData := &infranetworkv1.DNSData{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: instance.Namespace, + Name: instance.Name, + }, + } + _, err := controllerutil.CreateOrPatch(ctx, helper.GetClient(), dnsData, func() error { + dnsData.Spec.Hosts = allDNSRecords + // TODO (rabi) DNSDataLabelSelectorValue can probably be + // used from dnsmasq(?) + dnsData.Spec.DNSDataLabelSelectorValue = "dnsdata" + // Set controller reference to the DataPlaneNode object + err := controllerutil.SetControllerReference( + helper.GetBeforeObject(), dnsData, helper.GetScheme()) + return err + }) + if err != nil { + return err + } + return nil +} + +// EnsureDNSData Ensures DNSData is created +func EnsureDNSData(ctx context.Context, helper *helper.Helper, + instance *dataplanev1.OpenStackDataPlaneNodeSet, + allIPSets map[string]infranetworkv1.IPSet, +) (dnsDetails *DNSDetails, err error) { + dnsDetails = &DNSDetails{} + // Verify dnsmasq CR exists + err = checkDNSService( + ctx, helper, instance, dnsDetails) + + if err != nil { + instance.Status.Conditions.MarkFalse( + dataplanev1.NodeSetDNSDataReadyCondition, + condition.ErrorReason, condition.SeverityError, + err.Error()) + return dnsDetails, err + } + if dnsDetails.ClusterAddresses == nil { + instance.Status.Conditions.MarkFalse( + dataplanev1.NodeSetDNSDataReadyCondition, + condition.RequestedReason, condition.SeverityInfo, + dataplanev1.NodeSetDNSDataReadyWaitingMessage) + return dnsDetails, nil + } + + // Create or Patch DNSData + err = createOrPatchDNSData( + ctx, helper, instance, allIPSets, dnsDetails) + if err != nil { + instance.Status.Conditions.MarkFalse( + dataplanev1.NodeSetDNSDataReadyCondition, + condition.ErrorReason, condition.SeverityError, + dataplanev1.NodeSetDNSDataReadyErrorMessage) + return dnsDetails, err + } + + dnsData := &infranetworkv1.DNSData{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.Name, + Namespace: instance.Namespace, + }, + } + key := client.ObjectKeyFromObject(dnsData) + err = helper.GetClient().Get(ctx, key, dnsData) + if err != nil { + instance.Status.Conditions.MarkFalse( + dataplanev1.NodeSetDNSDataReadyCondition, + condition.ErrorReason, condition.SeverityError, + dataplanev1.NodeSetDNSDataReadyErrorMessage) + return dnsDetails, err + } + if !dnsData.IsReady() { + util.LogForObject(helper, "DNSData not ready yet waiting", instance) + instance.Status.Conditions.MarkFalse( + dataplanev1.NodeSetDNSDataReadyCondition, + condition.RequestedReason, condition.SeverityInfo, + dataplanev1.NodeSetDNSDataReadyWaitingMessage) + return dnsDetails, nil + } + + instance.Status.Conditions.MarkTrue( + dataplanev1.NodeSetDNSDataReadyCondition, + dataplanev1.NodeSetDNSDataReadyMessage) + dnsDetails.IsReady = true + return dnsDetails, nil +} + +// EnsureIPSets Creates the IPSets +func EnsureIPSets(ctx context.Context, helper *helper.Helper, + instance *dataplanev1.OpenStackDataPlaneNodeSet, +) (map[string]infranetworkv1.IPSet, bool, error) { + allIPSets, err := reserveIPs(ctx, helper, instance) + if err != nil { + instance.Status.Conditions.MarkFalse( + dataplanev1.NodeSetIPReservationReadyCondition, + condition.ErrorReason, condition.SeverityError, + dataplanev1.NodeSetIPReservationReadyErrorMessage) + return nil, false, err + } + + for _, s := range allIPSets { + if s.Status.Conditions.IsFalse(condition.ReadyCondition) { + instance.Status.Conditions.MarkFalse( + dataplanev1.NodeSetIPReservationReadyCondition, + condition.RequestedReason, condition.SeverityInfo, + dataplanev1.NodeSetIPReservationReadyWaitingMessage) + return nil, false, nil + } + } + instance.Status.Conditions.MarkTrue( + dataplanev1.NodeSetIPReservationReadyCondition, + dataplanev1.NodeSetIPReservationReadyMessage) + return allIPSets, true, nil +} + +// reserveIPs Reserves IPs by creating IPSets +func reserveIPs(ctx context.Context, helper *helper.Helper, + instance *dataplanev1.OpenStackDataPlaneNodeSet, +) (map[string]infranetworkv1.IPSet, error) { + // Verify NetConfig CRs exist + netConfigList := &infranetworkv1.NetConfigList{} + listOpts := []client.ListOption{ + client.InNamespace(instance.GetNamespace()), + } + err := helper.GetClient().List(ctx, netConfigList, listOpts...) + if err != nil { + return nil, err + } + if len(netConfigList.Items) == 0 { + errMsg := "No NetConfig CR exists yet" + util.LogForObject(helper, errMsg, instance) + return nil, fmt.Errorf(errMsg) + } + + allIPSets := make(map[string]infranetworkv1.IPSet) + // CreateOrPatch IPSets + for nodeName, node := range instance.Spec.Nodes { + nets := node.Networks + hostName := node.HostName + if len(nets) == 0 { + nets = instance.Spec.NodeTemplate.Networks + } + + if len(nets) > 0 { + ipSet := &infranetworkv1.IPSet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: instance.Namespace, + Name: hostName, + }, + } + _, err := controllerutil.CreateOrPatch(ctx, helper.GetClient(), ipSet, func() error { + ipSet.Spec.Networks = nets + // Set controller reference to the DataPlaneNode object + err := controllerutil.SetControllerReference( + helper.GetBeforeObject(), ipSet, helper.GetScheme()) + return err + }) + if err != nil { + return nil, err + } + allIPSets[hostName] = *ipSet + } else { + msg := fmt.Sprintf("No Networks defined for node %s or template", nodeName) + util.LogForObject(helper, msg, instance) + return nil, fmt.Errorf(msg) + } + } + + return allIPSets, nil +} diff --git a/pkg/dataplane/service.go b/pkg/dataplane/service.go new file mode 100644 index 000000000..e0b7b98a9 --- /dev/null +++ b/pkg/dataplane/service.go @@ -0,0 +1,197 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deployment + +import ( + "context" + "fmt" + "os" + "path" + "strings" + + yaml "gopkg.in/yaml.v3" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + "github.com/go-playground/validator/v10" + "github.com/openstack-k8s-operators/lib-common/modules/common/helper" + "github.com/openstack-k8s-operators/lib-common/modules/common/util" + dataplanev1 "github.com/openstack-k8s-operators/openstack-operator/apis/dataplane/v1beta1" + dataplaneutil "github.com/openstack-k8s-operators/openstack-operator/pkg/dataplane/util" +) + +// ServiceYAML struct for service YAML unmarshalling +type ServiceYAML struct { + Kind string + Metadata yaml.Node + Spec yaml.Node +} + +// DeployService service deployment +func (d *Deployer) DeployService(foundService dataplanev1.OpenStackDataPlaneService) error { + err := dataplaneutil.AnsibleExecution( + d.Ctx, + d.Helper, + d.Deployment, + &foundService, + d.AnsibleSSHPrivateKeySecrets, + d.InventorySecrets, + d.AeeSpec, + d.NodeSet) + + if err != nil { + d.Helper.GetLogger().Error(err, fmt.Sprintf("Unable to execute Ansible for %s", foundService.Name)) + return err + } + + return nil +} + +// GetService return service +func GetService(ctx context.Context, helper *helper.Helper, service string) (dataplanev1.OpenStackDataPlaneService, error) { + client := helper.GetClient() + beforeObj := helper.GetBeforeObject() + namespace := beforeObj.GetNamespace() + foundService := &dataplanev1.OpenStackDataPlaneService{} + err := client.Get(ctx, types.NamespacedName{Name: service, Namespace: namespace}, foundService) + return *foundService, err +} + +// EnsureServices - ensure the OpenStackDataPlaneServices exist +func EnsureServices(ctx context.Context, helper *helper.Helper, instance *dataplanev1.OpenStackDataPlaneNodeSet, validation *validator.Validate) error { + servicesPath, found := os.LookupEnv("OPERATOR_SERVICES") + if !found { + servicesPath = "config/services" + os.Setenv("OPERATOR_SERVICES", servicesPath) + util.LogForObject( + helper, "OPERATOR_SERVICES not set in env when reconciling ", instance, + "defaulting to ", servicesPath) + } + + helper.GetLogger().Info("Ensuring services", "servicesPath", servicesPath) + services, err := os.ReadDir(servicesPath) + if err != nil { + return err + } + + for _, service := range services { + + servicePath := path.Join(servicesPath, service.Name()) + + if !strings.HasSuffix(service.Name(), ".yaml") { + helper.GetLogger().Info("Skipping ensuring service from file without .yaml suffix", "file", service.Name()) + continue + } + + data, _ := os.ReadFile(servicePath) + var serviceObj ServiceYAML + err = yaml.Unmarshal(data, &serviceObj) + if err != nil { + helper.GetLogger().Info("Service YAML file Unmarshal error", "service YAML file", servicePath) + return err + } + + if serviceObj.Kind != "OpenStackDataPlaneService" { + helper.GetLogger().Info("Skipping ensuring service since kind is not OpenStackDataPlaneService", "file", servicePath, "Kind", serviceObj.Kind) + continue + } + + serviceObjMeta := &metav1.ObjectMeta{} + err = serviceObj.Metadata.Decode(serviceObjMeta) + if err != nil { + helper.GetLogger().Info("Service Metadata decode error") + return err + } + // Check if service name matches RFC1123 for use in labels + if err = validation.Var(serviceObjMeta.Name, "hostname_rfc1123"); err != nil { + helper.GetLogger().Info("service name must follow RFC1123") + return err + } + nodeSetContainsService := false + for _, roleServiceName := range instance.Spec.Services { + if roleServiceName == serviceObjMeta.Name { + nodeSetContainsService = true + break + } + } + if !nodeSetContainsService { + helper.GetLogger().Info("Skipping ensure service since it is not a service on this nodeset", "service", serviceObjMeta.Name) + continue + } + + serviceObjSpec := &dataplanev1.OpenStackDataPlaneServiceSpec{} + err = serviceObj.Spec.Decode(serviceObjSpec) + if err != nil { + helper.GetLogger().Info("Service Spec decode error") + return err + } + + ensureService := &dataplanev1.OpenStackDataPlaneService{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceObjMeta.Name, + Namespace: instance.Namespace, + }, + } + _, err = controllerutil.CreateOrPatch(ctx, helper.GetClient(), ensureService, func() error { + serviceObjSpec.DeepCopyInto(&ensureService.Spec) + return nil + }) + if err != nil { + return fmt.Errorf("error ensuring service: %w", err) + } + + } + + return nil +} + +// CheckGlobalServiceExecutionConsistency - Check that global services are defined only once in all nodesets, report and fail if there are duplicates +func CheckGlobalServiceExecutionConsistency(ctx context.Context, helper *helper.Helper, nodesets []dataplanev1.OpenStackDataPlaneNodeSet) error { + var globalServices []string + var allServices []string + + for _, nodeset := range nodesets { + allServices = append(allServices, nodeset.Spec.Services...) + } + for _, svc := range allServices { + service, err := GetService(ctx, helper, svc) + if err != nil { + helper.GetLogger().Error(err, fmt.Sprintf("error getting service %s for consistency check", svc)) + return err + } + + if service.Spec.DeployOnAllNodeSets { + if serviceInList(service.Name, globalServices) { + return fmt.Errorf("global service %s defined multiple times", service.Name) + } + globalServices = append(globalServices, service.Name) + } + } + + return nil +} + +// Check if service name is already in a list +func serviceInList(service string, services []string) bool { + for _, svc := range services { + if svc == service { + return true + } + } + return false +} diff --git a/pkg/dataplane/util/ansible_execution.go b/pkg/dataplane/util/ansible_execution.go new file mode 100644 index 000000000..e2a89ef3d --- /dev/null +++ b/pkg/dataplane/util/ansible_execution.go @@ -0,0 +1,308 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "context" + "encoding/json" + "fmt" + "sort" + "strings" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + "github.com/openstack-k8s-operators/lib-common/modules/common/helper" + "github.com/openstack-k8s-operators/lib-common/modules/common/util" + "github.com/openstack-k8s-operators/lib-common/modules/storage" + ansibleeev1 "github.com/openstack-k8s-operators/openstack-ansibleee-operator/api/v1beta1" + dataplanev1 "github.com/openstack-k8s-operators/openstack-operator/apis/dataplane/v1beta1" +) + +// AnsibleExecution creates a OpenStackAnsiblEE CR +func AnsibleExecution( + ctx context.Context, + helper *helper.Helper, + deployment *dataplanev1.OpenStackDataPlaneDeployment, + service *dataplanev1.OpenStackDataPlaneService, + sshKeySecrets map[string]string, + inventorySecrets map[string]string, + aeeSpec *dataplanev1.AnsibleEESpec, + nodeSet client.Object, +) error { + var err error + var cmdLineArguments strings.Builder + var inventoryVolume corev1.Volume + var inventoryName string + var inventoryMountPath string + var sshKeyName string + var sshKeyMountPath string + var sshKeyMountSubPath string + + ansibleEEMounts := storage.VolMounts{} + + executionName, labels := GetAnsibleExecutionNameAndLabels(service, deployment.GetName(), nodeSet.GetName()) + ansibleEE, err := GetAnsibleExecution(ctx, helper, deployment, labels) + if err != nil && !k8serrors.IsNotFound(err) { + return err + } + if ansibleEE == nil { + ansibleEE = &ansibleeev1.OpenStackAnsibleEE{ + ObjectMeta: metav1.ObjectMeta{ + Name: executionName, + Namespace: deployment.GetNamespace(), + Labels: labels, + }, + } + } + + _, err = controllerutil.CreateOrPatch(ctx, helper.GetClient(), ansibleEE, func() error { + ansibleEE.Spec.NetworkAttachments = aeeSpec.NetworkAttachments + if aeeSpec.DNSConfig != nil { + ansibleEE.Spec.DNSConfig = aeeSpec.DNSConfig + } + if len(aeeSpec.OpenStackAnsibleEERunnerImage) > 0 { + ansibleEE.Spec.Image = aeeSpec.OpenStackAnsibleEERunnerImage + } + if len(aeeSpec.ExtraVars) > 0 { + ansibleEE.Spec.ExtraVars = aeeSpec.ExtraVars + } + if len(aeeSpec.AnsibleTags) > 0 { + fmt.Fprintf(&cmdLineArguments, "--tags %s ", aeeSpec.AnsibleTags) + } + if len(aeeSpec.AnsibleLimit) > 0 { + fmt.Fprintf(&cmdLineArguments, "--limit %s ", aeeSpec.AnsibleLimit) + } + if len(aeeSpec.AnsibleSkipTags) > 0 { + fmt.Fprintf(&cmdLineArguments, "--skip-tags %s ", aeeSpec.AnsibleSkipTags) + } + if len(aeeSpec.ServiceAccountName) > 0 { + ansibleEE.Spec.ServiceAccountName = aeeSpec.ServiceAccountName + } + if cmdLineArguments.Len() > 0 { + ansibleEE.Spec.CmdLine = strings.TrimSpace(cmdLineArguments.String()) + } + + if len(service.Spec.PlaybookContents) > 0 { + ansibleEE.Spec.Play = service.Spec.PlaybookContents + } + if len(service.Spec.Playbook) > 0 { + ansibleEE.Spec.Playbook = service.Spec.Playbook + } + ansibleEE.Spec.BackoffLimit = deployment.Spec.BackoffLimit + + // If we have a service that ought to be deployed everywhere + // substitute the existing play target with 'all' + // Check if we have ExtraVars before accessing it + if ansibleEE.Spec.ExtraVars == nil { + ansibleEE.Spec.ExtraVars = make(map[string]json.RawMessage) + } + if service.Spec.DeployOnAllNodeSets { + ansibleEE.Spec.ExtraVars["edpm_override_hosts"] = json.RawMessage([]byte("\"all\"")) + util.LogForObject(helper, fmt.Sprintf("for service %s, substituting existing ansible play host with 'all'.", service.Name), ansibleEE) + } else { + ansibleEE.Spec.ExtraVars["edpm_override_hosts"] = json.RawMessage([]byte(fmt.Sprintf("\"%s\"", nodeSet.GetName()))) + util.LogForObject(helper, + fmt.Sprintf("for service %s, substituting existing ansible play host with '%s'.", service.Name, nodeSet.GetName()), ansibleEE) + } + if service.Spec.EDPMServiceType != "" { + ansibleEE.Spec.ExtraVars["edpm_service_type"] = json.RawMessage([]byte(fmt.Sprintf("\"%s\"", service.Spec.EDPMServiceType))) + } else { + ansibleEE.Spec.ExtraVars["edpm_service_type"] = json.RawMessage([]byte(fmt.Sprintf("\"%s\"", service.Name))) + } + + if len(deployment.Spec.ServicesOverride) > 0 { + ansibleEE.Spec.ExtraVars["edpm_services_override"] = json.RawMessage([]byte(fmt.Sprintf("\"%s\"", deployment.Spec.ServicesOverride))) + } + + // Sort keys of the ssh secret map + sshKeys := make([]string, 0) + for k := range sshKeySecrets { + sshKeys = append(sshKeys, k) + } + sort.Strings(sshKeys) + + for _, sshKeyNodeName := range sshKeys { + sshKeySecret := sshKeySecrets[sshKeyNodeName] + if service.Spec.DeployOnAllNodeSets { + sshKeyName = fmt.Sprintf("ssh-key-%s", sshKeyNodeName) + sshKeyMountSubPath = fmt.Sprintf("ssh_key_%s", sshKeyNodeName) + sshKeyMountPath = fmt.Sprintf("/runner/env/ssh_key/%s", sshKeyMountSubPath) + } else { + if sshKeyNodeName != nodeSet.GetName() { + continue + } + sshKeyName = "ssh-key" + sshKeyMountSubPath = "ssh_key" + sshKeyMountPath = "/runner/env/ssh_key" + } + sshKeyVolume := corev1.Volume{ + Name: sshKeyName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: sshKeySecret, + Items: []corev1.KeyToPath{ + { + Key: "ssh-privatekey", + Path: sshKeyMountSubPath, + }, + }, + }, + }, + } + sshKeyMount := corev1.VolumeMount{ + Name: sshKeyName, + MountPath: sshKeyMountPath, + SubPath: sshKeyMountSubPath, + } + // Mount ssh secrets + ansibleEEMounts.Mounts = append(ansibleEEMounts.Mounts, sshKeyMount) + ansibleEEMounts.Volumes = append(ansibleEEMounts.Volumes, sshKeyVolume) + } + + // order the inventory keys otherwise it could lead to changing order and mount order changing + invKeys := make([]string, 0) + for k := range inventorySecrets { + invKeys = append(invKeys, k) + } + sort.Strings(invKeys) + + // Mounting inventory and secrets + for inventoryIndex, nodeName := range invKeys { + if service.Spec.DeployOnAllNodeSets { + inventoryName = fmt.Sprintf("inventory-%d", inventoryIndex) + inventoryMountPath = fmt.Sprintf("/runner/inventory/%s", inventoryName) + } else { + if nodeName != nodeSet.GetName() { + continue + } + inventoryName = "inventory" + inventoryMountPath = "/runner/inventory/hosts" + } + + inventoryVolume = corev1.Volume{ + Name: inventoryName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: inventorySecrets[nodeName], + Items: []corev1.KeyToPath{ + { + Key: "inventory", + Path: inventoryName, + }, + }, + }, + }, + } + inventoryMount := corev1.VolumeMount{ + Name: inventoryName, + MountPath: inventoryMountPath, + SubPath: inventoryName, + } + // Inventory mount + ansibleEEMounts.Mounts = append(ansibleEEMounts.Mounts, inventoryMount) + ansibleEEMounts.Volumes = append(ansibleEEMounts.Volumes, inventoryVolume) + } + + ansibleEE.Spec.ExtraMounts = append(aeeSpec.ExtraMounts, []storage.VolMounts{ansibleEEMounts}...) + ansibleEE.Spec.Env = aeeSpec.Env + + err := controllerutil.SetControllerReference(deployment, ansibleEE, helper.GetScheme()) + if err != nil { + return err + } + + return nil + }) + + if err != nil { + util.LogErrorForObject(helper, err, fmt.Sprintf("Unable to create AnsibleEE %s", ansibleEE.Name), ansibleEE) + return err + } + + return nil +} + +// GetAnsibleExecution gets and returns an OpenStackAnsibleEE with the given +// labels where +// "openstackdataplaneservice": , +// "openstackdataplanedeployment": , +// "openstackdataplanenodeset": , +// If none or more than one is found, return nil and error +func GetAnsibleExecution(ctx context.Context, + helper *helper.Helper, obj client.Object, labelSelector map[string]string) (*ansibleeev1.OpenStackAnsibleEE, error) { + var err error + ansibleEEs := &ansibleeev1.OpenStackAnsibleEEList{} + + listOpts := []client.ListOption{ + client.InNamespace(obj.GetNamespace()), + } + if len(labelSelector) > 0 { + labels := client.MatchingLabels(labelSelector) + listOpts = append(listOpts, labels) + } + err = helper.GetClient().List(ctx, ansibleEEs, listOpts...) + if err != nil { + return nil, err + } + + var ansibleEE *ansibleeev1.OpenStackAnsibleEE + if len(ansibleEEs.Items) == 0 { + return nil, k8serrors.NewNotFound(appsv1.Resource("OpenStackAnsibleEE"), fmt.Sprintf("with label %s", labelSelector)) + } else if len(ansibleEEs.Items) == 1 { + ansibleEE = &ansibleEEs.Items[0] + } else { + return nil, fmt.Errorf("multiple OpenStackAnsibleEE's found with label %s", labelSelector) + } + + return ansibleEE, nil +} + +// getAnsibleExecutionNamePrefix compute the name of the AnsibleEE +func getAnsibleExecutionNamePrefix(serviceName string) string { + var executionNamePrefix string + if len(serviceName) > AnsibleExecutionServiceNameLen { + executionNamePrefix = serviceName[:AnsibleExecutionServiceNameLen] + } else { + executionNamePrefix = serviceName + } + return executionNamePrefix +} + +// GetAnsibleExecutionNameAndLabels Name and Labels of AnsibleEE +func GetAnsibleExecutionNameAndLabels(service *dataplanev1.OpenStackDataPlaneService, + deploymentName string, + nodeSetName string) (string, map[string]string) { + executionName := fmt.Sprintf("%s-%s", getAnsibleExecutionNamePrefix(service.Name), deploymentName) + if !service.Spec.DeployOnAllNodeSets { + executionName = fmt.Sprintf("%s-%s", executionName, nodeSetName) + } + if len(executionName) > AnsibleExcecutionNameLabelLen { + executionName = executionName[:AnsibleExcecutionNameLabelLen] + } + + labels := map[string]string{ + "openstackdataplaneservice": service.Name, + "openstackdataplanedeployment": deploymentName, + "openstackdataplanenodeset": nodeSetName, + } + return executionName, labels +} diff --git a/pkg/dataplane/util/const.go b/pkg/dataplane/util/const.go new file mode 100644 index 000000000..e16b44ce1 --- /dev/null +++ b/pkg/dataplane/util/const.go @@ -0,0 +1,24 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +const ( + // AnsibleExecutionServiceNameLen max length for the ansibleEE service name prefix + AnsibleExecutionServiceNameLen = 53 + // AnsibleExcecutionNameLabelLen max length for the ansibleEE execution name + AnsibleExcecutionNameLabelLen = 63 +) diff --git a/pkg/dataplane/util/datasource.go b/pkg/dataplane/util/datasource.go new file mode 100644 index 000000000..402c0f31e --- /dev/null +++ b/pkg/dataplane/util/datasource.go @@ -0,0 +1,72 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "context" + + "github.com/openstack-k8s-operators/lib-common/modules/common/helper" + utils "github.com/openstack-k8s-operators/lib-common/modules/common/util" + dataplanev1 "github.com/openstack-k8s-operators/openstack-operator/apis/dataplane/v1beta1" + v1 "k8s.io/api/core/v1" + k8s_errors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" +) + +// GetDataSourceCmSecrets gets the ConfigMaps and Secrets from a DataSource +func GetDataSourceCmSecret(ctx context.Context, helper *helper.Helper, namespace string, dataSource dataplanev1.DataSource) (*v1.ConfigMap, *v1.Secret, error) { + + var configMap *v1.ConfigMap + var secret *v1.Secret + + client := helper.GetClient() + + switch { + case dataSource.ConfigMapRef != nil: + cm := dataSource.ConfigMapRef + optional := cm.Optional != nil && *cm.Optional + configMap = &v1.ConfigMap{} + err := client.Get(ctx, types.NamespacedName{Name: cm.Name, Namespace: namespace}, configMap) + if err != nil { + if k8s_errors.IsNotFound(err) && optional { + // ignore error when marked optional + utils.LogForObject(helper, "Optional ConfigMap not found", configMap) + return nil, nil, nil + } + utils.LogErrorForObject(helper, err, "Required ConfigMap not found", configMap) + return configMap, secret, err + } + + case dataSource.SecretRef != nil: + s := dataSource.SecretRef + optional := s.Optional != nil && *s.Optional + secret = &v1.Secret{} + err := client.Get(ctx, types.NamespacedName{Name: s.Name, Namespace: namespace}, secret) + if err != nil { + if k8s_errors.IsNotFound(err) && optional { + // ignore error when marked optional + utils.LogForObject(helper, "Optional Secret not found", secret) + return nil, nil, nil + } + utils.LogErrorForObject(helper, err, "Required Secret not found", secret) + return configMap, secret, err + } + + } + + return configMap, secret, nil +} diff --git a/pkg/dataplane/util/version.go b/pkg/dataplane/util/version.go new file mode 100644 index 000000000..93e53199b --- /dev/null +++ b/pkg/dataplane/util/version.go @@ -0,0 +1,105 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "context" + "errors" + + "github.com/openstack-k8s-operators/lib-common/modules/common/helper" + openstackv1 "github.com/openstack-k8s-operators/openstack-operator/apis/core/v1beta1" + dataplanev1 "github.com/openstack-k8s-operators/openstack-operator/apis/dataplane/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// GetVersion - Get OpenStackVersion and assume at most 1 should exist +func GetVersion(ctx context.Context, helper *helper.Helper, namespace string) (*openstackv1.OpenStackVersion, error) { + log := helper.GetLogger() + var version *openstackv1.OpenStackVersion + versions := &openstackv1.OpenStackVersionList{} + opts := []client.ListOption{ + client.InNamespace(namespace), + } + if err := helper.GetClient().List(ctx, versions, opts...); err != nil { + log.Error(err, "Unable to retrieve OpenStackVersions %w") + return nil, err + } + if len(versions.Items) > 1 { + errorMsg := "Found multiple OpenStackVersions when at most 1 should exist" + err := errors.New(errorMsg) + log.Error(err, errorMsg) + return nil, err + } else if len(versions.Items) == 1 { + version = &versions.Items[0] + } else { + version = nil + } + + return version, nil +} + +// GetContainerImages - get the container image values considering either the +// OpenStackVersion or the defaults +func GetContainerImages(version *openstackv1.OpenStackVersion) openstackv1.ContainerImages { + + var containerImages openstackv1.ContainerImages + + // Set the containerImages variable for the container images If there is an + // OpenStackVersion, use the value from there, else use the default value. + if version != nil { + containerImages.AnsibleeeImage = version.Status.ContainerImages.AnsibleeeImage + containerImages.CeilometerComputeImage = version.Status.ContainerImages.CeilometerComputeImage + containerImages.CeilometerIpmiImage = version.Status.ContainerImages.CeilometerIpmiImage + containerImages.EdpmFrrImage = version.Status.ContainerImages.EdpmFrrImage + containerImages.EdpmIscsidImage = version.Status.ContainerImages.EdpmIscsidImage + containerImages.EdpmLogrotateCrondImage = version.Status.ContainerImages.EdpmLogrotateCrondImage + containerImages.EdpmMultipathdImage = version.Status.ContainerImages.EdpmMultipathdImage + containerImages.EdpmNeutronDhcpAgentImage = version.Status.ContainerImages.EdpmNeutronDhcpAgentImage + containerImages.EdpmNeutronMetadataAgentImage = version.Status.ContainerImages.EdpmNeutronMetadataAgentImage + containerImages.EdpmNeutronOvnAgentImage = version.Status.ContainerImages.EdpmNeutronOvnAgentImage + containerImages.EdpmNeutronSriovAgentImage = version.Status.ContainerImages.EdpmNeutronSriovAgentImage + containerImages.EdpmNodeExporterImage = version.Status.ContainerImages.EdpmNodeExporterImage + containerImages.EdpmOvnBgpAgentImage = version.Status.ContainerImages.EdpmOvnBgpAgentImage + containerImages.NovaComputeImage = version.Status.ContainerImages.NovaComputeImage + containerImages.OvnControllerImage = version.Status.ContainerImages.OvnControllerImage + containerImages.OsContainerImage = version.Status.ContainerImages.OsContainerImage + containerImages.AgentImage = version.Status.ContainerImages.AgentImage + containerImages.ApacheImage = version.Status.ContainerImages.ApacheImage + } else { + containerImages.AnsibleeeImage = dataplanev1.ContainerImages.AnsibleeeImage + containerImages.CeilometerComputeImage = dataplanev1.ContainerImages.CeilometerComputeImage + containerImages.CeilometerIpmiImage = dataplanev1.ContainerImages.CeilometerIpmiImage + containerImages.EdpmFrrImage = dataplanev1.ContainerImages.EdpmFrrImage + containerImages.EdpmIscsidImage = dataplanev1.ContainerImages.EdpmIscsidImage + containerImages.EdpmLogrotateCrondImage = dataplanev1.ContainerImages.EdpmLogrotateCrondImage + containerImages.EdpmMultipathdImage = dataplanev1.ContainerImages.EdpmMultipathdImage + containerImages.EdpmNeutronDhcpAgentImage = dataplanev1.ContainerImages.EdpmNeutronDhcpAgentImage + containerImages.EdpmNeutronMetadataAgentImage = dataplanev1.ContainerImages.EdpmNeutronMetadataAgentImage + containerImages.EdpmNeutronOvnAgentImage = dataplanev1.ContainerImages.EdpmNeutronOvnAgentImage + containerImages.EdpmNeutronSriovAgentImage = dataplanev1.ContainerImages.EdpmNeutronSriovAgentImage + containerImages.EdpmNodeExporterImage = dataplanev1.ContainerImages.EdpmNodeExporterImage + containerImages.EdpmOvnBgpAgentImage = dataplanev1.ContainerImages.EdpmOvnBgpAgentImage + containerImages.NovaComputeImage = dataplanev1.ContainerImages.NovaComputeImage + containerImages.OvnControllerImage = dataplanev1.ContainerImages.OvnControllerImage + containerImages.OsContainerImage = dataplanev1.ContainerImages.OsContainerImage + containerImages.AgentImage = dataplanev1.ContainerImages.AgentImage + containerImages.ApacheImage = dataplanev1.ContainerImages.ApacheImage + + } + + return containerImages +} diff --git a/pkg/openstack/dataplane.go b/pkg/openstack/dataplane.go index 5758e3e31..c5ed1d9c8 100644 --- a/pkg/openstack/dataplane.go +++ b/pkg/openstack/dataplane.go @@ -6,7 +6,7 @@ import ( "github.com/openstack-k8s-operators/lib-common/modules/common/helper" corev1beta1 "github.com/openstack-k8s-operators/openstack-operator/apis/core/v1beta1" - dataplanev1 "github.com/openstack-k8s-operators/dataplane-operator/api/v1beta1" + dataplanev1 "github.com/openstack-k8s-operators/openstack-operator/apis/dataplane/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" ) diff --git a/tests/functional/base_test.go b/tests/functional/ctlplane/base_test.go similarity index 100% rename from tests/functional/base_test.go rename to tests/functional/ctlplane/base_test.go diff --git a/tests/functional/openstackclient_webhook_test.go b/tests/functional/ctlplane/openstackclient_webhook_test.go similarity index 100% rename from tests/functional/openstackclient_webhook_test.go rename to tests/functional/ctlplane/openstackclient_webhook_test.go diff --git a/tests/functional/openstackoperator_controller_test.go b/tests/functional/ctlplane/openstackoperator_controller_test.go similarity index 100% rename from tests/functional/openstackoperator_controller_test.go rename to tests/functional/ctlplane/openstackoperator_controller_test.go diff --git a/tests/functional/openstackversion_controller_test.go b/tests/functional/ctlplane/openstackversion_controller_test.go similarity index 100% rename from tests/functional/openstackversion_controller_test.go rename to tests/functional/ctlplane/openstackversion_controller_test.go diff --git a/tests/functional/suite_test.go b/tests/functional/ctlplane/suite_test.go similarity index 85% rename from tests/functional/suite_test.go rename to tests/functional/ctlplane/suite_test.go index e87fe27d1..0cf71bf40 100644 --- a/tests/functional/suite_test.go +++ b/tests/functional/ctlplane/suite_test.go @@ -34,7 +34,6 @@ import ( barbicanv1 "github.com/openstack-k8s-operators/barbican-operator/api/v1beta1" cinderv1 "github.com/openstack-k8s-operators/cinder-operator/api/v1beta1" - dataplanev1beta1 "github.com/openstack-k8s-operators/dataplane-operator/api/v1beta1" designatev1 "github.com/openstack-k8s-operators/designate-operator/api/v1beta1" glancev1 "github.com/openstack-k8s-operators/glance-operator/api/v1beta1" heatv1 "github.com/openstack-k8s-operators/heat-operator/api/v1beta1" @@ -50,6 +49,7 @@ import ( octaviav1 "github.com/openstack-k8s-operators/octavia-operator/api/v1beta1" openstackclientv1 "github.com/openstack-k8s-operators/openstack-operator/apis/client/v1beta1" corev1 "github.com/openstack-k8s-operators/openstack-operator/apis/core/v1beta1" + dataplanev1beta1 "github.com/openstack-k8s-operators/openstack-operator/apis/dataplane/v1beta1" "github.com/openstack-k8s-operators/openstack-operator/pkg/openstack" ovnv1 "github.com/openstack-k8s-operators/ovn-operator/api/v1beta1" placementv1 "github.com/openstack-k8s-operators/placement-operator/api/v1beta1" @@ -100,7 +100,7 @@ const ( func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "Controller Suite") + RunSpecs(t, "CtlPlane Controller Suite") } var _ = BeforeSuite(func() { @@ -110,76 +110,74 @@ var _ = BeforeSuite(func() { })) ctx, cancel = context.WithCancel(context.TODO()) + const gomod = "../../../go.mod" - routev1CRDs, err := test.GetOpenShiftCRDDir("route/v1", "../../go.mod") + routev1CRDs, err := test.GetOpenShiftCRDDir("route/v1", gomod) Expect(err).ShouldNot(HaveOccurred()) mariaDBCRDs, err := test.GetCRDDirFromModule( - "github.com/openstack-k8s-operators/mariadb-operator/api", "../../go.mod", "bases") + "github.com/openstack-k8s-operators/mariadb-operator/api", gomod, "bases") Expect(err).ShouldNot(HaveOccurred()) infraCRDs, err := test.GetCRDDirFromModule( - "github.com/openstack-k8s-operators/infra-operator/apis", "../../go.mod", "bases") + "github.com/openstack-k8s-operators/infra-operator/apis", gomod, "bases") Expect(err).ShouldNot(HaveOccurred()) cinderv1CRDs, err := test.GetCRDDirFromModule( - "github.com/openstack-k8s-operators/cinder-operator/api", "../../go.mod", "bases") + "github.com/openstack-k8s-operators/cinder-operator/api", gomod, "bases") Expect(err).ShouldNot(HaveOccurred()) glancev1CRDs, err := test.GetCRDDirFromModule( - "github.com/openstack-k8s-operators/glance-operator/api", "../../go.mod", "bases") + "github.com/openstack-k8s-operators/glance-operator/api", gomod, "bases") Expect(err).ShouldNot(HaveOccurred()) heatv1CRDs, err := test.GetCRDDirFromModule( - "github.com/openstack-k8s-operators/heat-operator/api", "../../go.mod", "bases") + "github.com/openstack-k8s-operators/heat-operator/api", gomod, "bases") Expect(err).ShouldNot(HaveOccurred()) horizonv1CRDs, err := test.GetCRDDirFromModule( - "github.com/openstack-k8s-operators/horizon-operator/api", "../../go.mod", "bases") + "github.com/openstack-k8s-operators/horizon-operator/api", gomod, "bases") Expect(err).ShouldNot(HaveOccurred()) ironicv1CRDs, err := test.GetCRDDirFromModule( - "github.com/openstack-k8s-operators/ironic-operator/api", "../../go.mod", "bases") + "github.com/openstack-k8s-operators/ironic-operator/api", gomod, "bases") Expect(err).ShouldNot(HaveOccurred()) keystonev1CRDs, err := test.GetCRDDirFromModule( - "github.com/openstack-k8s-operators/keystone-operator/api", "../../go.mod", "bases") + "github.com/openstack-k8s-operators/keystone-operator/api", gomod, "bases") Expect(err).ShouldNot(HaveOccurred()) manilav1CRDs, err := test.GetCRDDirFromModule( - "github.com/openstack-k8s-operators/manila-operator/api", "../../go.mod", "bases") + "github.com/openstack-k8s-operators/manila-operator/api", gomod, "bases") Expect(err).ShouldNot(HaveOccurred()) neutronv1CRDs, err := test.GetCRDDirFromModule( - "github.com/openstack-k8s-operators/neutron-operator/api", "../../go.mod", "bases") + "github.com/openstack-k8s-operators/neutron-operator/api", gomod, "bases") Expect(err).ShouldNot(HaveOccurred()) novav1CRDs, err := test.GetCRDDirFromModule( - "github.com/openstack-k8s-operators/nova-operator/api", "../../go.mod", "bases") + "github.com/openstack-k8s-operators/nova-operator/api", gomod, "bases") Expect(err).ShouldNot(HaveOccurred()) octaviav1CRDs, err := test.GetCRDDirFromModule( - "github.com/openstack-k8s-operators/octavia-operator/api", "../../go.mod", "bases") + "github.com/openstack-k8s-operators/octavia-operator/api", gomod, "bases") Expect(err).ShouldNot(HaveOccurred()) ovnv1CRDs, err := test.GetCRDDirFromModule( - "github.com/openstack-k8s-operators/ovn-operator/api", "../../go.mod", "bases") + "github.com/openstack-k8s-operators/ovn-operator/api", gomod, "bases") Expect(err).ShouldNot(HaveOccurred()) placementv1CRDs, err := test.GetCRDDirFromModule( - "github.com/openstack-k8s-operators/placement-operator/api", "../../go.mod", "bases") + "github.com/openstack-k8s-operators/placement-operator/api", gomod, "bases") Expect(err).ShouldNot(HaveOccurred()) swiftv1CRDs, err := test.GetCRDDirFromModule( - "github.com/openstack-k8s-operators/swift-operator/api", "../../go.mod", "bases") + "github.com/openstack-k8s-operators/swift-operator/api", gomod, "bases") Expect(err).ShouldNot(HaveOccurred()) telemetryv1CRDs, err := test.GetCRDDirFromModule( - "github.com/openstack-k8s-operators/telemetry-operator/api", "../../go.mod", "bases") - Expect(err).ShouldNot(HaveOccurred()) - dataplanev1CRDs, err := test.GetCRDDirFromModule( - "github.com/openstack-k8s-operators/dataplane-operator/api", "../../go.mod", "bases") + "github.com/openstack-k8s-operators/telemetry-operator/api", gomod, "bases") Expect(err).ShouldNot(HaveOccurred()) designatev1CRDs, err := test.GetCRDDirFromModule( - "github.com/openstack-k8s-operators/designate-operator/api", "../../go.mod", "bases") + "github.com/openstack-k8s-operators/designate-operator/api", gomod, "bases") Expect(err).ShouldNot(HaveOccurred()) barbicanv1CRDs, err := test.GetCRDDirFromModule( - "github.com/openstack-k8s-operators/barbican-operator/api", "../../go.mod", "bases") + "github.com/openstack-k8s-operators/barbican-operator/api", gomod, "bases") Expect(err).ShouldNot(HaveOccurred()) rabbitmqv2CRDs, err := test.GetCRDDirFromModule( - "github.com/rabbitmq/cluster-operator/v2", "../../go.mod", "config/crd/bases") + "github.com/rabbitmq/cluster-operator/v2", gomod, "config/crd/bases") Expect(err).ShouldNot(HaveOccurred()) - certmgrv1CRDs, err := test.GetOpenShiftCRDDir("cert-manager/v1", "../../go.mod") + certmgrv1CRDs, err := test.GetOpenShiftCRDDir("cert-manager/v1", gomod) Expect(err).ShouldNot(HaveOccurred()) By("bootstrapping test environment") testEnv = &envtest.Environment{ CRDDirectoryPaths: []string{ - filepath.Join("..", "..", "config", "crd", "bases"), + filepath.Join("..", "..", "..", "config", "crd", "bases"), routev1CRDs, mariaDBCRDs, infraCRDs, @@ -197,7 +195,6 @@ var _ = BeforeSuite(func() { placementv1CRDs, swiftv1CRDs, telemetryv1CRDs, - dataplanev1CRDs, designatev1CRDs, barbicanv1CRDs, rabbitmqv2CRDs, @@ -205,7 +202,7 @@ var _ = BeforeSuite(func() { }, ErrorIfCRDPathMissing: true, WebhookInstallOptions: envtest.WebhookInstallOptions{ - Paths: []string{filepath.Join("..", "..", "config", "webhook")}, + Paths: []string{filepath.Join("..", "..", "..", "config", "webhook")}, // NOTE(gibi): if localhost is resolved to ::1 (ipv6) then starting // the webhook fails as it try to parse the address as ipv4 and // failing on the colons in ::1 @@ -271,7 +268,7 @@ var _ = BeforeSuite(func() { //+kubebuilder:scaffold:scheme - logger = ctrl.Log.WithName("---Test---") + logger = ctrl.Log.WithName("---CtlPlane Test---") k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) Expect(err).NotTo(HaveOccurred()) diff --git a/tests/functional/dataplane/base_test.go b/tests/functional/dataplane/base_test.go new file mode 100644 index 000000000..509a9a23f --- /dev/null +++ b/tests/functional/dataplane/base_test.go @@ -0,0 +1,420 @@ +package functional + +import ( + "fmt" + + . "github.com/onsi/gomega" //revive:disable:dot-imports + "gopkg.in/yaml.v3" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/types" + + infrav1 "github.com/openstack-k8s-operators/infra-operator/apis/network/v1beta1" + "github.com/openstack-k8s-operators/lib-common/modules/common/condition" + "github.com/openstack-k8s-operators/openstack-ansibleee-operator/api/v1beta1" + dataplanev1 "github.com/openstack-k8s-operators/openstack-operator/apis/dataplane/v1beta1" +) + +var DefaultEdpmServiceAnsibleVarList = []string{ + "edpm_frr_image", + "edpm_iscsid_image", + "edpm_logrotate_crond_image", + "edpm_neutron_metadata_agent_image", + "edpm_nova_compute_image", + "edpm_ovn_controller_agent_image", + "edpm_ovn_bgp_agent_image", +} + +var CustomEdpmServiceDomainTag = "test-image:latest" +var DefaultBackoffLimit = int32(6) + +// Create OpenstackDataPlaneNodeSet in k8s and test that no errors occur +func CreateDataplaneNodeSet(name types.NamespacedName, spec map[string]interface{}) *unstructured.Unstructured { + instance := DefaultDataplaneNodeSetTemplate(name, spec) + return th.CreateUnstructured(instance) +} + +// Create OpenStackDataPlaneDeployment in k8s and test that no errors occur +func CreateDataplaneDeployment(name types.NamespacedName, spec map[string]interface{}) *unstructured.Unstructured { + instance := DefaultDataplaneDeploymentTemplate(name, spec) + return th.CreateUnstructured(instance) +} + +// Create an OpenStackDataPlaneService with a given NamespacedName, assert on success +func CreateDataplaneService(name types.NamespacedName, globalService bool) *unstructured.Unstructured { + var raw map[string]interface{} + if globalService { + raw = DefaultDataplaneGlobalService(name) + } else { + raw = DefaultDataplaneService(name) + } + return th.CreateUnstructured(raw) +} + +// Create an OpenStackDataPlaneService with a given NamespacedName, and a given unstructured spec +func CreateDataPlaneServiceFromSpec(name types.NamespacedName, spec map[string]interface{}) *unstructured.Unstructured { + raw := map[string]interface{}{ + + "apiVersion": "dataplane.openstack.org/v1beta1", + "kind": "OpenStackDataPlaneService", + "metadata": map[string]interface{}{ + "name": name.Name, + "namespace": name.Namespace, + }, + "spec": spec, + } + return th.CreateUnstructured(raw) +} + +// Build CustomServiceImageSpec struct with empty `Nodes` list +func CustomServiceImageSpec() map[string]interface{} { + + var ansibleServiceVars = make(map[string]interface{}) + for _, svcName := range DefaultEdpmServiceAnsibleVarList { + imageAddress := fmt.Sprintf(`"%s.%s"`, svcName, CustomEdpmServiceDomainTag) + ansibleServiceVars[svcName] = imageAddress + } + + return map[string]interface{}{ + "preProvisioned": true, + "nodeTemplate": map[string]interface{}{ + "networks": []infrav1.IPSetNetwork{ + {Name: "ctlplane", SubnetName: "subnet1"}, + }, + "ansibleSSHPrivateKeySecret": "dataplane-ansible-ssh-private-key-secret", + "ansible": map[string]interface{}{ + "ansibleVars": ansibleServiceVars, + }, + }, + "nodes": map[string]dataplanev1.NodeSection{"edpm-compute-node-1": {}}, + } +} + +func CreateNetConfig(name types.NamespacedName, spec map[string]interface{}) *unstructured.Unstructured { + raw := DefaultNetConfig(name, spec) + return th.CreateUnstructured(raw) +} + +func CreateDNSMasq(name types.NamespacedName, spec map[string]interface{}) *unstructured.Unstructured { + raw := DefaultDNSMasq(name, spec) + return th.CreateUnstructured(raw) +} + +// Create SSHSecret +func CreateSSHSecret(name types.NamespacedName) *corev1.Secret { + return th.CreateSecret( + types.NamespacedName{Namespace: name.Namespace, Name: name.Name}, + map[string][]byte{ + "ssh-privatekey": []byte("blah"), + "authorized_keys": []byte("blih"), + }, + ) +} + +// Struct initialization + +// Build OpenStackDataPlaneNodeSetSpec struct and fill it with preset values +func DefaultDataPlaneNodeSetSpec(nodeSetName string) map[string]interface{} { + + return map[string]interface{}{ + "services": []string{ + "foo-service", + "foo-update-service", + "global-service", + }, + "nodeTemplate": map[string]interface{}{ + "ansibleSSHPrivateKeySecret": "dataplane-ansible-ssh-private-key-secret", + "ansible": map[string]interface{}{ + "ansibleUser": "cloud-user", + }, + }, + "nodes": map[string]interface{}{ + fmt.Sprintf("%s-node-1", nodeSetName): map[string]interface{}{ + "hostName": "edpm-compute-node-1", + "networks": []infrav1.IPSetNetwork{ + {Name: "ctlplane", SubnetName: "subnet1"}, + }, + }, + }, + "baremetalSetTemplate": map[string]interface{}{ + "baremetalHosts": map[string]interface{}{ + "ctlPlaneIP": map[string]interface{}{}, + }, + "deploymentSSHSecret": "dataplane-ansible-ssh-private-key-secret", + "ctlplaneInterface": "172.20.12.1", + }, + "secretMaxSize": 1048576, + "tlsEnabled": true, + } +} + +// Build OpenStackDataPlaneNodeSetSpec struct with empty `Nodes` list +func DefaultDataPlaneNoNodeSetSpec(tlsEnabled bool) map[string]interface{} { + spec := map[string]interface{}{ + "preProvisioned": true, + "nodeTemplate": map[string]interface{}{ + "networks": []infrav1.IPSetNetwork{ + {Name: "ctlplane", SubnetName: "subnet1"}, + }, + "ansibleSSHPrivateKeySecret": "dataplane-ansible-ssh-private-key-secret", + }, + "nodes": map[string]interface{}{}, + "servicesOverride": []string{}, + } + if tlsEnabled { + spec["tlsEnabled"] = true + } + spec["nodes"] = map[string]dataplanev1.NodeSection{"edpm-compute-node-1": {}} + return spec +} + +// Build OpenStackDataPlnaeDeploymentSpec and fill it with preset values +func DefaultDataPlaneDeploymentSpec() map[string]interface{} { + + return map[string]interface{}{ + "nodeSets": []string{ + "edpm-compute-nodeset", + }, + "servicesOverride": []string{}, + } +} + +func DefaultNetConfigSpec() map[string]interface{} { + return map[string]interface{}{ + "networks": []map[string]interface{}{{ + "dnsDomain": "test-domain.test", + "mtu": 1500, + "name": "CtlPLane", + "subnets": []map[string]interface{}{{ + "allocationRanges": []map[string]interface{}{{ + "end": "172.20.12.120", + "start": "172.20.12.0", + }, + }, + "name": "subnet1", + "cidr": "172.20.12.0/16", + "gateway": "172.20.12.1", + }, + }, + }, + }, + } +} + +func DefaultDNSMasqSpec() map[string]interface{} { + return map[string]interface{}{ + "replicas": 1, + } +} + +func SimulateDNSMasqComplete(name types.NamespacedName) { + Eventually(func(g Gomega) { + dnsMasq := &infrav1.DNSMasq{} + g.Expect(th.K8sClient.Get(th.Ctx, name, dnsMasq)).Should(Succeed()) + dnsMasq.Status.Conditions.MarkTrue(condition.ReadyCondition, condition.ReadyMessage) + dnsMasq.Status.DNSClusterAddresses = []string{"192.168.122.80"} + dnsMasq.Status.DNSAddresses = []string{"192.168.122.80"} + g.Expect(th.K8sClient.Status().Update(th.Ctx, dnsMasq)).To(Succeed()) + }, th.Timeout, th.Interval).Should(Succeed()) + th.Logger.Info("Simulated DNS creation completed", "on", name) +} + +// SimulateIPSetComplete - Simulates the result of the IPSet status +func SimulateDNSDataComplete(name types.NamespacedName) { + Eventually(func(g Gomega) { + dnsData := &infrav1.DNSData{} + + g.Expect(th.K8sClient.Get(th.Ctx, name, dnsData)).Should(Succeed()) + dnsData.Status.Conditions.MarkTrue(condition.ReadyCondition, condition.ReadyMessage) + // This can return conflict so we have the gomega.Eventually block to retry + g.Expect(th.K8sClient.Status().Update(th.Ctx, dnsData)).To(Succeed()) + + }, th.Timeout, th.Interval).Should(Succeed()) + + th.Logger.Info("Simulated dnsData creation completed", "on", name) +} + +// SimulateIPSetComplete - Simulates the result of the IPSet status +func SimulateIPSetComplete(name types.NamespacedName) { + Eventually(func(g Gomega) { + IPSet := &infrav1.IPSet{} + g.Expect(th.K8sClient.Get(th.Ctx, name, IPSet)).Should(Succeed()) + gateway := "172.20.12.1" + IPSet.Status.Reservation = []infrav1.IPSetReservation{ + { + Address: "172.20.12.76", + Cidr: "172.20.12.0/16", + MTU: 1500, + Network: "CtlPlane", + Subnet: "subnet1", + Gateway: &gateway, + }, + } + // This can return conflict so we have the gomega.Eventually block to retry + g.Expect(th.K8sClient.Status().Update(th.Ctx, IPSet)).To(Succeed()) + + }, th.Timeout, th.Interval).Should(Succeed()) + + th.Logger.Info("Simulated IPSet creation completed", "on", name) +} + +// Build OpenStackDataPlaneNodeSet struct and fill it with preset values +func DefaultDataplaneNodeSetTemplate(name types.NamespacedName, spec map[string]interface{}) map[string]interface{} { + return map[string]interface{}{ + + "apiVersion": "dataplane.openstack.org/v1beta1", + "kind": "OpenStackDataPlaneNodeSet", + "metadata": map[string]interface{}{ + "name": name.Name, + "namespace": name.Namespace, + }, + "spec": spec, + } +} + +// Build OpenStackDataPlaneDeployment struct and fill it with preset values +func DefaultDataplaneDeploymentTemplate(name types.NamespacedName, spec map[string]interface{}) map[string]interface{} { + return map[string]interface{}{ + + "apiVersion": "dataplane.openstack.org/v1beta1", + "kind": "OpenStackDataPlaneDeployment", + + "metadata": map[string]interface{}{ + "name": name.Name, + "namespace": name.Namespace, + }, + "spec": spec, + } +} + +func DefaultNetConfig(name types.NamespacedName, spec map[string]interface{}) map[string]interface{} { + return map[string]interface{}{ + "apiVersion": "network.openstack.org/v1beta1", + "kind": "NetConfig", + "metadata": map[string]interface{}{ + "name": name.Name, + "namespace": name.Namespace, + }, + "spec": spec, + } +} + +func DefaultDNSMasq(name types.NamespacedName, spec map[string]interface{}) map[string]interface{} { + return map[string]interface{}{ + "apiVersion": "network.openstack.org/v1beta1", + "kind": "DNSMasq", + "metadata": map[string]interface{}{ + "name": name.Name, + "namespace": name.Namespace, + }, + "spec": spec, + } +} + +// Create an empty OpenStackDataPlaneService struct +// containing only given NamespacedName as metadata +func DefaultDataplaneService(name types.NamespacedName) map[string]interface{} { + + return map[string]interface{}{ + + "apiVersion": "dataplane.openstack.org/v1beta1", + "kind": "OpenStackDataPlaneService", + "metadata": map[string]interface{}{ + "name": name.Name, + "namespace": name.Namespace, + }} +} + +// Create an empty OpenStackDataPlaneService struct +// containing only given NamespacedName as metadata +func DefaultDataplaneGlobalService(name types.NamespacedName) map[string]interface{} { + + return map[string]interface{}{ + + "apiVersion": "dataplane.openstack.org/v1beta1", + "kind": "OpenStackDataPlaneService", + "metadata": map[string]interface{}{ + "name": name.Name, + "namespace": name.Namespace, + }, + "spec": map[string]interface{}{ + "deployOnAllNodeSets": true, + }, + } +} + +// Get resources + +// Retrieve OpenStackDataPlaneDeployment and check for errors +func GetDataplaneDeployment(name types.NamespacedName) *dataplanev1.OpenStackDataPlaneDeployment { + instance := &dataplanev1.OpenStackDataPlaneDeployment{} + Eventually(func(g Gomega) error { + g.Expect(k8sClient.Get(ctx, name, instance)).Should(Succeed()) + return nil + }, timeout, interval).Should(Succeed()) + return instance +} + +// Retrieve OpenStackDataPlaneDeployment and check for errors +func GetDataplaneNodeSet(name types.NamespacedName) *dataplanev1.OpenStackDataPlaneNodeSet { + instance := &dataplanev1.OpenStackDataPlaneNodeSet{} + Eventually(func(g Gomega) error { + g.Expect(k8sClient.Get(ctx, name, instance)).Should(Succeed()) + return nil + }, timeout, interval).Should(Succeed()) + return instance +} + +// Get service with given NamespacedName, assert on successful retrieval +func GetService(name types.NamespacedName) *dataplanev1.OpenStackDataPlaneService { + foundService := &dataplanev1.OpenStackDataPlaneService{} + Eventually(func(g Gomega) error { + g.Expect(k8sClient.Get(ctx, name, foundService)).Should(Succeed()) + return nil + }, timeout, interval).Should(Succeed()) + return foundService +} + +// Get OpenStackDataPlaneNodeSet conditions +func DataplaneConditionGetter(name types.NamespacedName) condition.Conditions { + instance := GetDataplaneNodeSet(name) + return instance.Status.Conditions +} + +// Get OpenStackDataPlaneDeployment conditions +func DataplaneDeploymentConditionGetter(name types.NamespacedName) condition.Conditions { + instance := GetDataplaneDeployment(name) + return instance.Status.Conditions +} + +func GetAnsibleee(name types.NamespacedName) *v1beta1.OpenStackAnsibleEE { + instance := &v1beta1.OpenStackAnsibleEE{} + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(ctx, name, instance)).Should(Succeed()) + }, timeout, interval).Should(Succeed()) + return instance +} + +// Delete resources + +// Delete namespace from k8s, check for errors +func DeleteNamespace(name string) { + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } + Expect(k8sClient.Delete(ctx, ns)).Should(Succeed()) +} + +func getCtlPlaneIP(secret *corev1.Secret) string { + secretData := secret.Data["inventory"] + + var inv AnsibleInventory + err := yaml.Unmarshal(secretData, &inv) + if err != nil { + fmt.Printf("Error unmarshalling secretData: %v", err) + } + return inv.EdpmComputeNodeset.Hosts.Node.CtlPlaneIP +} diff --git a/tests/functional/dataplane/openstackdataplanedeployment_controller_test.go b/tests/functional/dataplane/openstackdataplanedeployment_controller_test.go new file mode 100644 index 000000000..556851ccf --- /dev/null +++ b/tests/functional/dataplane/openstackdataplanedeployment_controller_test.go @@ -0,0 +1,722 @@ +package functional + +import ( + "fmt" + "os" + + . "github.com/onsi/ginkgo/v2" //revive:disable:dot-imports + . "github.com/onsi/gomega" //revive:disable:dot-imports + "github.com/openstack-k8s-operators/lib-common/modules/common/condition" + dataplanev1 "github.com/openstack-k8s-operators/openstack-operator/apis/dataplane/v1beta1" + dataplaneutil "github.com/openstack-k8s-operators/openstack-operator/pkg/dataplane/util" + + //revive:disable-next-line:dot-imports + . "github.com/openstack-k8s-operators/lib-common/modules/common/test/helpers" + ansibleeev1 "github.com/openstack-k8s-operators/openstack-ansibleee-operator/api/v1beta1" + baremetalv1 "github.com/openstack-k8s-operators/openstack-baremetal-operator/api/v1beta1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "k8s.io/apimachinery/pkg/types" +) + +var _ = Describe("Dataplane Deployment Test", func() { + var dataplaneDeploymentName types.NamespacedName + var dataplaneNodeSetName types.NamespacedName + var dataplaneSSHSecretName types.NamespacedName + var neutronOvnMetadataSecretName types.NamespacedName + var novaNeutronMetadataSecretName types.NamespacedName + var novaCellComputeConfigSecretName types.NamespacedName + var novaMigrationSSHKey types.NamespacedName + var ceilometerConfigSecretName types.NamespacedName + var dataplaneNetConfigName types.NamespacedName + var dnsMasqName types.NamespacedName + var dataplaneNodeName types.NamespacedName + var dataplaneMultiNodesetDeploymentName types.NamespacedName + var dataplaneServiceName types.NamespacedName + var dataplaneUpdateServiceName types.NamespacedName + var dataplaneGlobalServiceName types.NamespacedName + + BeforeEach(func() { + dnsMasqName = types.NamespacedName{ + Name: "dnsmasq", + Namespace: namespace, + } + dataplaneDeploymentName = types.NamespacedName{ + Name: "edpm-deployment", + Namespace: namespace, + } + dataplaneNodeSetName = types.NamespacedName{ + Name: "edpm-compute-nodeset", + Namespace: namespace, + } + dataplaneNodeName = types.NamespacedName{ + Namespace: namespace, + Name: "edpm-compute-node-1", + } + dataplaneSSHSecretName = types.NamespacedName{ + Namespace: namespace, + Name: "dataplane-ansible-ssh-private-key-secret", + } + neutronOvnMetadataSecretName = types.NamespacedName{ + Namespace: namespace, + Name: "neutron-ovn-metadata-agent-neutron-config", + } + novaNeutronMetadataSecretName = types.NamespacedName{ + Namespace: namespace, + Name: "nova-metadata-neutron-config", + } + novaCellComputeConfigSecretName = types.NamespacedName{ + Namespace: namespace, + Name: "nova-cell1-compute-config", + } + novaMigrationSSHKey = types.NamespacedName{ + Namespace: namespace, + Name: "nova-migration-ssh-key", + } + ceilometerConfigSecretName = types.NamespacedName{ + Namespace: namespace, + Name: "ceilometer-compute-config-data", + } + dataplaneNetConfigName = types.NamespacedName{ + Namespace: namespace, + Name: "dataplane-netconfig", + } + dataplaneMultiNodesetDeploymentName = types.NamespacedName{ + Namespace: namespace, + Name: "edpm-compute-nodeset-global", + } + dataplaneServiceName = types.NamespacedName{ + Namespace: namespace, + Name: "foo-service", + } + dataplaneUpdateServiceName = types.NamespacedName{ + Namespace: namespace, + Name: "foo-update-service", + } + dataplaneGlobalServiceName = types.NamespacedName{ + Name: "global-service", + Namespace: namespace, + } + err := os.Setenv("OPERATOR_SERVICES", "../../../config/services") + Expect(err).NotTo(HaveOccurred()) + }) + + When("A dataplaneDeployment is created with matching NodeSet", func() { + BeforeEach(func() { + CreateSSHSecret(dataplaneSSHSecretName) + DeferCleanup(th.DeleteInstance, th.CreateSecret(neutronOvnMetadataSecretName, map[string][]byte{ + "fake_keys": []byte("blih"), + })) + DeferCleanup(th.DeleteInstance, th.CreateSecret(novaNeutronMetadataSecretName, map[string][]byte{ + "fake_keys": []byte("blih"), + })) + DeferCleanup(th.DeleteInstance, th.CreateSecret(novaCellComputeConfigSecretName, map[string][]byte{ + "fake_keys": []byte("blih"), + })) + DeferCleanup(th.DeleteInstance, th.CreateSecret(novaMigrationSSHKey, map[string][]byte{ + "ssh-privatekey": []byte("fake-ssh-private-key"), + "ssh-publickey": []byte("fake-ssh-public-key"), + })) + DeferCleanup(th.DeleteInstance, th.CreateSecret(ceilometerConfigSecretName, map[string][]byte{ + "fake_keys": []byte("blih"), + })) + // DefaultDataPlanenodeSetSpec comes with three mock services + // default service + CreateDataplaneService(dataplaneServiceName, false) + // marked for deployment on all nodesets + CreateDataplaneService(dataplaneGlobalServiceName, true) + // with EDPMServiceType set + CreateDataPlaneServiceFromSpec(dataplaneUpdateServiceName, map[string]interface{}{ + "EDPMServiceType": "foo-service"}) + + DeferCleanup(th.DeleteService, dataplaneServiceName) + DeferCleanup(th.DeleteService, dataplaneGlobalServiceName) + DeferCleanup(th.DeleteInstance, CreateNetConfig(dataplaneNetConfigName, DefaultNetConfigSpec())) + DeferCleanup(th.DeleteInstance, CreateDNSMasq(dnsMasqName, DefaultDNSMasqSpec())) + SimulateDNSMasqComplete(dnsMasqName) + DeferCleanup(th.DeleteInstance, CreateDataplaneNodeSet(dataplaneNodeSetName, DefaultDataPlaneNodeSetSpec(dataplaneNodeSetName.Name))) + SimulateIPSetComplete(dataplaneNodeName) + SimulateDNSDataComplete(dataplaneNodeSetName) + DeferCleanup(th.DeleteInstance, CreateDataplaneDeployment(dataplaneDeploymentName, DefaultDataPlaneDeploymentSpec())) + }) + + It("Should have Spec fields initialized", func() { + dataplaneDeploymentInstance := GetDataplaneDeployment(dataplaneDeploymentName) + expectedSpec := dataplanev1.OpenStackDataPlaneDeploymentSpec{ + NodeSets: []string{"edpm-compute-nodeset"}, + AnsibleTags: "", + AnsibleLimit: "", + AnsibleSkipTags: "", + BackoffLimit: &DefaultBackoffLimit, + DeploymentRequeueTime: 15, + ServicesOverride: nil, + } + Expect(dataplaneDeploymentInstance.Spec).Should(Equal(expectedSpec)) + }) + + It("should have conditions set", func() { + + nodeSet := dataplanev1.OpenStackDataPlaneNodeSet{} + baremetal := baremetalv1.OpenStackBaremetalSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeSet.Name, + Namespace: nodeSet.Namespace, + }, + } + // Create config map for OVN service + ovnConfigMapName := types.NamespacedName{ + Namespace: namespace, + Name: "ovncontroller-config", + } + mapData := map[string]interface{}{ + "ovsdb-config": "test-ovn-config", + } + th.CreateConfigMap(ovnConfigMapName, mapData) + + nodeSet = *GetDataplaneNodeSet(dataplaneNodeSetName) + + // Set baremetal provisioning conditions to True + Eventually(func(g Gomega) { + // OpenStackBaremetalSet has the same name as OpenStackDataPlaneNodeSet + g.Expect(th.K8sClient.Get(th.Ctx, dataplaneNodeSetName, &baremetal)).To(Succeed()) + baremetal.Status.Conditions.MarkTrue( + condition.ReadyCondition, + condition.ReadyMessage) + g.Expect(th.K8sClient.Status().Update(th.Ctx, &baremetal)).To(Succeed()) + + }, th.Timeout, th.Interval).Should(Succeed()) + + // Create all services necessary for deployment + for _, serviceName := range nodeSet.Spec.Services { + dataplaneServiceName := types.NamespacedName{ + Name: serviceName, + Namespace: namespace, + } + service := GetService(dataplaneServiceName) + deployment := GetDataplaneDeployment(dataplaneDeploymentName) + //Retrieve service AnsibleEE and set JobStatus to Successful + aeeName, _ := dataplaneutil.GetAnsibleExecutionNameAndLabels( + service, deployment.GetName(), nodeSet.GetName()) + Eventually(func(g Gomega) { + // Make an AnsibleEE name for each service + ansibleeeName := types.NamespacedName{ + Name: aeeName, + Namespace: dataplaneDeploymentName.Namespace, + } + ansibleEE := &ansibleeev1.OpenStackAnsibleEE{ + ObjectMeta: metav1.ObjectMeta{ + Name: ansibleeeName.Name, + Namespace: ansibleeeName.Namespace, + }} + g.Expect(th.K8sClient.Get(th.Ctx, ansibleeeName, ansibleEE)).To(Succeed()) + ansibleEE.Status.JobStatus = ansibleeev1.JobStatusSucceeded + + g.Expect(th.K8sClient.Status().Update(th.Ctx, ansibleEE)).To(Succeed()) + g.Expect(ansibleEE.Spec.ExtraVars).To(HaveKey("edpm_override_hosts")) + if service.Spec.EDPMServiceType != "" { + g.Expect(string(ansibleEE.Spec.ExtraVars["edpm_service_type"])).To(Equal(fmt.Sprintf("\"%s\"", service.Spec.EDPMServiceType))) + } else { + g.Expect(string(ansibleEE.Spec.ExtraVars["edpm_service_type"])).To(Equal(fmt.Sprintf("\"%s\"", serviceName))) + } + if service.Spec.DeployOnAllNodeSets { + g.Expect(string(ansibleEE.Spec.ExtraVars["edpm_override_hosts"])).To(Equal("\"all\"")) + } else { + g.Expect(string(ansibleEE.Spec.ExtraVars["edpm_override_hosts"])).To(Equal(fmt.Sprintf("\"%s\"", dataplaneNodeSetName.Name))) + } + }, th.Timeout, th.Interval).Should(Succeed()) + } + + th.ExpectCondition( + dataplaneDeploymentName, + ConditionGetterFunc(DataplaneDeploymentConditionGetter), + condition.ReadyCondition, + corev1.ConditionTrue, + ) + th.ExpectCondition( + dataplaneDeploymentName, + ConditionGetterFunc(DataplaneDeploymentConditionGetter), + condition.InputReadyCondition, + corev1.ConditionTrue, + ) + }) + }) + + When("A dataplaneDeployment is created with two NodeSets", func() { + BeforeEach(func() { + CreateSSHSecret(dataplaneSSHSecretName) + DeferCleanup(th.DeleteInstance, th.CreateSecret(neutronOvnMetadataSecretName, map[string][]byte{ + "fake_keys": []byte("blih"), + })) + DeferCleanup(th.DeleteInstance, th.CreateSecret(novaNeutronMetadataSecretName, map[string][]byte{ + "fake_keys": []byte("blih"), + })) + DeferCleanup(th.DeleteInstance, th.CreateSecret(novaCellComputeConfigSecretName, map[string][]byte{ + "fake_keys": []byte("blih"), + })) + DeferCleanup(th.DeleteInstance, th.CreateSecret(novaMigrationSSHKey, map[string][]byte{ + "ssh-privatekey": []byte("fake-ssh-private-key"), + "ssh-publickey": []byte("fake-ssh-public-key"), + })) + DeferCleanup(th.DeleteInstance, th.CreateSecret(ceilometerConfigSecretName, map[string][]byte{ + "fake_keys": []byte("blih"), + })) + + alphaNodeSetName := types.NamespacedName{ + Name: "alpha-nodeset", + Namespace: namespace, + } + betaNodeSetName := types.NamespacedName{ + Name: "beta-nodeset", + Namespace: namespace, + } + + // Three services on both nodesets + CreateDataplaneService(dataplaneServiceName, false) + CreateDataplaneService(dataplaneGlobalServiceName, true) + CreateDataPlaneServiceFromSpec(dataplaneUpdateServiceName, map[string]interface{}{ + "EDPMServiceType": "foo-service"}) + + DeferCleanup(th.DeleteService, dataplaneServiceName) + DeferCleanup(th.DeleteService, dataplaneGlobalServiceName) + + DeferCleanup(th.DeleteInstance, CreateNetConfig(dataplaneNetConfigName, DefaultNetConfigSpec())) + DeferCleanup(th.DeleteInstance, CreateDNSMasq(dnsMasqName, DefaultDNSMasqSpec())) + SimulateDNSMasqComplete(dnsMasqName) + // Create both nodesets + + betaNodeName := fmt.Sprintf("%s-node-1", betaNodeSetName.Name) + betaNodeSetSpec := map[string]interface{}{ + "preProvisioned": false, + "services": []string{ + "foo-service", + }, + "nodeTemplate": map[string]interface{}{ + "ansibleSSHPrivateKeySecret": "dataplane-ansible-ssh-private-key-secret", + "ansible": map[string]interface{}{ + "ansibleUser": "cloud-user", + }, + }, + "nodes": map[string]interface{}{ + betaNodeName: map[string]interface{}{ + "hostname": betaNodeName, + "networks": []map[string]interface{}{{ + "name": "CtlPlane", + "subnetName": "subnet1", + }, + }, + }, + }, + "baremetalSetTemplate": map[string]interface{}{ + "baremetalHosts": map[string]interface{}{ + "ctlPlaneIP": map[string]interface{}{}, + }, + "deploymentSSHSecret": "dataplane-ansible-ssh-private-key-secret", + "ctlplaneInterface": "172.20.12.1", + }, + "tlsEnabled": true, + } + DeferCleanup(th.DeleteInstance, CreateDataplaneNodeSet(alphaNodeSetName, DefaultDataPlaneNodeSetSpec(alphaNodeSetName.Name))) + DeferCleanup(th.DeleteInstance, CreateDataplaneNodeSet(betaNodeSetName, betaNodeSetSpec)) + SimulateIPSetComplete(dataplaneNodeName) + SimulateDNSDataComplete(alphaNodeSetName) + SimulateIPSetComplete(types.NamespacedName{Name: betaNodeName, Namespace: namespace}) + SimulateDNSDataComplete(betaNodeSetName) + + deploymentSpec := map[string]interface{}{ + "nodeSets": []string{ + "alpha-nodeset", + "beta-nodeset", + }, + } + DeferCleanup(th.DeleteInstance, CreateDataplaneDeployment(dataplaneMultiNodesetDeploymentName, deploymentSpec)) + }) + + It("Should have Spec fields initialized", func() { + dataplaneDeploymentInstance := GetDataplaneDeployment(dataplaneMultiNodesetDeploymentName) + nodeSetsNames := []string{ + "alpha-nodeset", + "beta-nodeset", + } + + expectedSpec := dataplanev1.OpenStackDataPlaneDeploymentSpec{ + NodeSets: nodeSetsNames, + AnsibleTags: "", + AnsibleLimit: "", + AnsibleSkipTags: "", + BackoffLimit: &DefaultBackoffLimit, + DeploymentRequeueTime: 15, + ServicesOverride: nil, + } + Expect(dataplaneDeploymentInstance.Spec).Should(Equal(expectedSpec)) + }) + + It("should have conditions set", func() { + alphaNodeSetName := types.NamespacedName{ + Name: "alpha-nodeset", + Namespace: namespace, + } + betaNodeSetName := types.NamespacedName{ + Name: "beta-nodeset", + Namespace: namespace, + } + + baremetalAlpha := baremetalv1.OpenStackBaremetalSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: alphaNodeSetName.Name, + Namespace: alphaNodeSetName.Namespace, + }, + } + + baremetalBeta := baremetalv1.OpenStackBaremetalSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: betaNodeSetName.Name, + Namespace: betaNodeSetName.Namespace, + }, + } + + // Create config map for OVN service + ovnConfigMapName := types.NamespacedName{ + Namespace: namespace, + Name: "ovncontroller-config", + } + mapData := map[string]interface{}{ + "ovsdb-config": "test-ovn-config", + } + th.CreateConfigMap(ovnConfigMapName, mapData) + + nodeSetAlpha := *GetDataplaneNodeSet(alphaNodeSetName) + nodeSetBeta := *GetDataplaneNodeSet(betaNodeSetName) + + // Set baremetal provisioning conditions to True + Eventually(func(g Gomega) { + // OpenStackBaremetalSet has the same name as OpenStackDataPlaneNodeSet + g.Expect(th.K8sClient.Get(th.Ctx, alphaNodeSetName, &baremetalAlpha)).To(Succeed()) + baremetalAlpha.Status.Conditions.MarkTrue( + condition.ReadyCondition, + condition.ReadyMessage) + g.Expect(th.K8sClient.Status().Update(th.Ctx, &baremetalAlpha)).To(Succeed()) + // OpenStackBaremetalSet has the same name as OpenStackDataPlaneNodeSet + g.Expect(th.K8sClient.Get(th.Ctx, betaNodeSetName, &baremetalBeta)).To(Succeed()) + baremetalBeta.Status.Conditions.MarkTrue( + condition.ReadyCondition, + condition.ReadyMessage) + g.Expect(th.K8sClient.Status().Update(th.Ctx, &baremetalBeta)).To(Succeed()) + + }, th.Timeout, th.Interval).Should(Succeed()) + + // Create all services necessary for deployment + for _, serviceName := range nodeSetAlpha.Spec.Services { + dataplaneServiceName := types.NamespacedName{ + Name: serviceName, + Namespace: namespace, + } + service := GetService(dataplaneServiceName) + deployment := GetDataplaneDeployment(dataplaneMultiNodesetDeploymentName) + aeeName, _ := dataplaneutil.GetAnsibleExecutionNameAndLabels( + service, deployment.GetName(), nodeSetAlpha.GetName()) + //Retrieve service AnsibleEE and set JobStatus to Successful + Eventually(func(g Gomega) { + // Make an AnsibleEE name for each service + ansibleeeName := types.NamespacedName{ + Name: aeeName, + Namespace: dataplaneMultiNodesetDeploymentName.Namespace, + } + ansibleEE := GetAnsibleee(ansibleeeName) + if service.Spec.DeployOnAllNodeSets { + g.Expect(ansibleEE.Spec.ExtraMounts[0].Volumes).Should(HaveLen(4)) + } else { + g.Expect(ansibleEE.Spec.ExtraMounts[0].Volumes).Should(HaveLen(2)) + } + ansibleEE.Status.JobStatus = ansibleeev1.JobStatusSucceeded + g.Expect(th.K8sClient.Status().Update(th.Ctx, ansibleEE)).To(Succeed()) + if service.Spec.EDPMServiceType != "" { + g.Expect(string(ansibleEE.Spec.ExtraVars["edpm_service_type"])).To(Equal(fmt.Sprintf("\"%s\"", service.Spec.EDPMServiceType))) + } else { + g.Expect(string(ansibleEE.Spec.ExtraVars["edpm_service_type"])).To(Equal(fmt.Sprintf("\"%s\"", serviceName))) + } + if service.Spec.DeployOnAllNodeSets { + g.Expect(string(ansibleEE.Spec.ExtraVars["edpm_override_hosts"])).To(Equal("\"all\"")) + } + }, th.Timeout, th.Interval).Should(Succeed()) + } + + // Create all services necessary for deployment + for _, serviceName := range nodeSetBeta.Spec.Services { + dataplaneServiceName := types.NamespacedName{ + Name: serviceName, + Namespace: namespace, + } + service := GetService(dataplaneServiceName) + deployment := GetDataplaneDeployment(dataplaneMultiNodesetDeploymentName) + aeeName, _ := dataplaneutil.GetAnsibleExecutionNameAndLabels( + service, deployment.GetName(), nodeSetBeta.GetName()) + + //Retrieve service AnsibleEE and set JobStatus to Successful + Eventually(func(g Gomega) { + // Make an AnsibleEE name for each service + ansibleeeName := types.NamespacedName{ + Name: aeeName, + Namespace: dataplaneMultiNodesetDeploymentName.Namespace, + } + ansibleEE := GetAnsibleee(ansibleeeName) + if service.Spec.DeployOnAllNodeSets { + g.Expect(ansibleEE.Spec.ExtraMounts[0].Volumes).Should(HaveLen(4)) + } else { + g.Expect(ansibleEE.Spec.ExtraMounts[0].Volumes).Should(HaveLen(2)) + } + ansibleEE.Status.JobStatus = ansibleeev1.JobStatusSucceeded + g.Expect(th.K8sClient.Status().Update(th.Ctx, ansibleEE)).To(Succeed()) + if service.Spec.EDPMServiceType != "" { + g.Expect(string(ansibleEE.Spec.ExtraVars["edpm_service_type"])).To(Equal(fmt.Sprintf("\"%s\"", service.Spec.EDPMServiceType))) + } else { + g.Expect(string(ansibleEE.Spec.ExtraVars["edpm_service_type"])).To(Equal(fmt.Sprintf("\"%s\"", serviceName))) + } + }, th.Timeout, th.Interval).Should(Succeed()) + } + + th.ExpectCondition( + dataplaneMultiNodesetDeploymentName, + ConditionGetterFunc(DataplaneDeploymentConditionGetter), + condition.ReadyCondition, + corev1.ConditionTrue, + ) + th.ExpectCondition( + dataplaneMultiNodesetDeploymentName, + ConditionGetterFunc(DataplaneDeploymentConditionGetter), + condition.InputReadyCondition, + corev1.ConditionTrue, + ) + }) + }) + + When("A dataplaneDeployment is created with a missing nodeset", func() { + BeforeEach(func() { + CreateSSHSecret(dataplaneSSHSecretName) + DeferCleanup(th.DeleteInstance, th.CreateSecret(neutronOvnMetadataSecretName, map[string][]byte{ + "fake_keys": []byte("blih"), + })) + DeferCleanup(th.DeleteInstance, th.CreateSecret(novaNeutronMetadataSecretName, map[string][]byte{ + "fake_keys": []byte("blih"), + })) + DeferCleanup(th.DeleteInstance, th.CreateSecret(novaCellComputeConfigSecretName, map[string][]byte{ + "fake_keys": []byte("blih"), + })) + DeferCleanup(th.DeleteInstance, th.CreateSecret(novaMigrationSSHKey, map[string][]byte{ + "ssh-privatekey": []byte("fake-ssh-private-key"), + "ssh-publickey": []byte("fake-ssh-public-key"), + })) + DeferCleanup(th.DeleteInstance, th.CreateSecret(ceilometerConfigSecretName, map[string][]byte{ + "fake_keys": []byte("blih"), + })) + + alphaNodeSetName := types.NamespacedName{ + Name: "alpha-nodeset", + Namespace: namespace, + } + + // Two services on both nodesets + CreateDataplaneService(dataplaneServiceName, false) + + DeferCleanup(th.DeleteService, dataplaneServiceName) + + DeferCleanup(th.DeleteInstance, CreateNetConfig(dataplaneNetConfigName, DefaultNetConfigSpec())) + DeferCleanup(th.DeleteInstance, CreateDNSMasq(dnsMasqName, DefaultDNSMasqSpec())) + SimulateDNSMasqComplete(dnsMasqName) + + // Create only one nodeset + DeferCleanup(th.DeleteInstance, CreateDataplaneNodeSet(alphaNodeSetName, DefaultDataPlaneNodeSetSpec(alphaNodeSetName.Name))) + SimulateIPSetComplete(dataplaneNodeName) + SimulateDNSDataComplete(alphaNodeSetName) + + deploymentSpec := map[string]interface{}{ + "nodeSets": []string{ + "alpha-nodeset", + "beta-nodeset", + }, + } + DeferCleanup(th.DeleteInstance, CreateDataplaneDeployment(dataplaneMultiNodesetDeploymentName, deploymentSpec)) + }) + + It("Should have Spec fields initialized", func() { + dataplaneDeploymentInstance := GetDataplaneDeployment(dataplaneMultiNodesetDeploymentName) + nodeSetsNames := []string{ + "alpha-nodeset", + "beta-nodeset", + } + + expectedSpec := dataplanev1.OpenStackDataPlaneDeploymentSpec{ + NodeSets: nodeSetsNames, + AnsibleTags: "", + AnsibleLimit: "", + AnsibleSkipTags: "", + BackoffLimit: &DefaultBackoffLimit, + DeploymentRequeueTime: 15, + ServicesOverride: nil, + } + Expect(dataplaneDeploymentInstance.Spec).Should(Equal(expectedSpec)) + }) + + It("should have conditions set to unknown", func() { + alphaNodeSetName := types.NamespacedName{ + Name: "alpha-nodeset", + Namespace: namespace, + } + betaNodeSetName := types.NamespacedName{ + Name: "beta-nodeset", + Namespace: namespace, + } + + baremetalAlpha := baremetalv1.OpenStackBaremetalSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: alphaNodeSetName.Name, + Namespace: alphaNodeSetName.Namespace, + }, + } + + baremetalBeta := baremetalv1.OpenStackBaremetalSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: betaNodeSetName.Name, + Namespace: betaNodeSetName.Namespace, + }, + } + + // Create config map for OVN service + ovnConfigMapName := types.NamespacedName{ + Namespace: namespace, + Name: "ovncontroller-config", + } + mapData := map[string]interface{}{ + "ovsdb-config": "test-ovn-config", + } + th.CreateConfigMap(ovnConfigMapName, mapData) + + // Set baremetal provisioning conditions to True + // This must succeed, as the "alpha-nodeset" exists + Eventually(func(g Gomega) { + // OpenStackBaremetalSet has the same name as OpenStackDataPlaneNodeSet + g.Expect(th.K8sClient.Get(th.Ctx, alphaNodeSetName, &baremetalAlpha)).To(Succeed()) + baremetalAlpha.Status.Conditions.MarkTrue( + condition.ReadyCondition, + condition.ReadyMessage) + g.Expect(th.K8sClient.Status().Update(th.Ctx, &baremetalAlpha)).To(Succeed()) + + }, th.Timeout, th.Interval).Should(Succeed()) + + // These must fail, as there is no "beta-nodeset" + Expect(th.K8sClient.Get(th.Ctx, betaNodeSetName, &baremetalBeta)).NotTo(Succeed()) + baremetalBeta.Status.Conditions.MarkTrue( + condition.ReadyCondition, + condition.ReadyMessage) + Expect(th.K8sClient.Status().Update(th.Ctx, &baremetalBeta)).NotTo(Succeed()) + + // These conditions must remain unknown + th.ExpectCondition( + dataplaneMultiNodesetDeploymentName, + ConditionGetterFunc(DataplaneDeploymentConditionGetter), + condition.ReadyCondition, + corev1.ConditionUnknown, + ) + th.ExpectCondition( + dataplaneMultiNodesetDeploymentName, + ConditionGetterFunc(DataplaneDeploymentConditionGetter), + condition.InputReadyCondition, + corev1.ConditionUnknown, + ) + }) + }) + + When("A dataplaneDeployment is created with non-existent service in nodeset", func() { + BeforeEach(func() { + CreateSSHSecret(dataplaneSSHSecretName) + DeferCleanup(th.DeleteInstance, th.CreateSecret(neutronOvnMetadataSecretName, map[string][]byte{ + "fake_keys": []byte("blih"), + })) + DeferCleanup(th.DeleteInstance, th.CreateSecret(novaNeutronMetadataSecretName, map[string][]byte{ + "fake_keys": []byte("blih"), + })) + DeferCleanup(th.DeleteInstance, th.CreateSecret(novaCellComputeConfigSecretName, map[string][]byte{ + "fake_keys": []byte("blih"), + })) + DeferCleanup(th.DeleteInstance, th.CreateSecret(novaMigrationSSHKey, map[string][]byte{ + "ssh-privatekey": []byte("fake-ssh-private-key"), + "ssh-publickey": []byte("fake-ssh-public-key"), + })) + DeferCleanup(th.DeleteInstance, th.CreateSecret(ceilometerConfigSecretName, map[string][]byte{ + "fake_keys": []byte("blih"), + })) + // DefaultDataPlanenodeSetSpec comes with two mock services, one marked for deployment on all nodesets + // But we will not create them to test this scenario + DeferCleanup(th.DeleteInstance, CreateNetConfig(dataplaneNetConfigName, DefaultNetConfigSpec())) + DeferCleanup(th.DeleteInstance, CreateDNSMasq(dnsMasqName, DefaultDNSMasqSpec())) + SimulateDNSMasqComplete(dnsMasqName) + DeferCleanup(th.DeleteInstance, CreateDataplaneNodeSet(dataplaneNodeSetName, DefaultDataPlaneNodeSetSpec(dataplaneNodeSetName.Name))) + DeferCleanup(th.DeleteInstance, CreateDataplaneDeployment(dataplaneDeploymentName, DefaultDataPlaneDeploymentSpec())) + SimulateIPSetComplete(dataplaneNodeName) + SimulateDNSDataComplete(dataplaneNodeSetName) + }) + + It("Should have Spec fields initialized", func() { + dataplaneDeploymentInstance := GetDataplaneDeployment(dataplaneDeploymentName) + expectedSpec := dataplanev1.OpenStackDataPlaneDeploymentSpec{ + NodeSets: []string{"edpm-compute-nodeset"}, + AnsibleTags: "", + AnsibleLimit: "", + AnsibleSkipTags: "", + BackoffLimit: &DefaultBackoffLimit, + DeploymentRequeueTime: 15, + ServicesOverride: nil, + } + Expect(dataplaneDeploymentInstance.Spec).Should(Equal(expectedSpec)) + }) + + It("should have conditions set to false", func() { + + nodeSet := dataplanev1.OpenStackDataPlaneNodeSet{} + baremetal := baremetalv1.OpenStackBaremetalSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeSet.Name, + Namespace: nodeSet.Namespace, + }, + } + // Create config map for OVN service + ovnConfigMapName := types.NamespacedName{ + Namespace: namespace, + Name: "ovncontroller-config", + } + mapData := map[string]interface{}{ + "ovsdb-config": "test-ovn-config", + } + th.CreateConfigMap(ovnConfigMapName, mapData) + + nodeSet = *GetDataplaneNodeSet(dataplaneNodeSetName) + + // Set baremetal provisioning conditions to True + Eventually(func(g Gomega) { + // OpenStackBaremetalSet has the same name as OpenStackDataPlaneNodeSet + g.Expect(th.K8sClient.Get(th.Ctx, dataplaneNodeSetName, &baremetal)).To(Succeed()) + baremetal.Status.Conditions.MarkTrue( + condition.ReadyCondition, + condition.ReadyMessage) + g.Expect(th.K8sClient.Status().Update(th.Ctx, &baremetal)).To(Succeed()) + + }, th.Timeout, th.Interval).Should(Succeed()) + // Attempt to get the service ... fail + foundService := &dataplanev1.OpenStackDataPlaneService{} + Expect(k8sClient.Get(ctx, dataplaneServiceName, foundService)).ShouldNot(Succeed()) + + th.ExpectCondition( + dataplaneDeploymentName, + ConditionGetterFunc(DataplaneDeploymentConditionGetter), + condition.ReadyCondition, + corev1.ConditionFalse, + ) + th.ExpectCondition( + dataplaneDeploymentName, + ConditionGetterFunc(DataplaneDeploymentConditionGetter), + condition.InputReadyCondition, + corev1.ConditionFalse, + ) + }) + }) +}) diff --git a/tests/functional/dataplane/openstackdataplanenodeset_controller_test.go b/tests/functional/dataplane/openstackdataplanenodeset_controller_test.go new file mode 100644 index 000000000..8dd1384c6 --- /dev/null +++ b/tests/functional/dataplane/openstackdataplanenodeset_controller_test.go @@ -0,0 +1,1293 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package functional + +import ( + "encoding/json" + "fmt" + "os" + + . "github.com/onsi/ginkgo/v2" //revive:disable:dot-imports + . "github.com/onsi/gomega" //revive:disable:dot-imports + "github.com/openstack-k8s-operators/lib-common/modules/common/condition" + dataplanev1 "github.com/openstack-k8s-operators/openstack-operator/apis/dataplane/v1beta1" + + //revive:disable-next-line:dot-imports + infrav1 "github.com/openstack-k8s-operators/infra-operator/apis/network/v1beta1" + . "github.com/openstack-k8s-operators/lib-common/modules/common/test/helpers" + baremetalv1 "github.com/openstack-k8s-operators/openstack-baremetal-operator/api/v1beta1" + "gopkg.in/yaml.v3" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" +) + +// Ansible Inventory Structs for testing specific values +type AnsibleInventory struct { + EdpmComputeNodeset struct { + Vars struct { + AnsibleUser string `yaml:"ansible_user"` + } `yaml:"vars"` + Hosts struct { + Node struct { + AnsibleHost string `yaml:"ansible_host"` + AnsiblePort string `yaml:"ansible_port"` + AnsibleUser string `yaml:"ansible_user"` + CtlPlaneIP string `yaml:"ctlplane_ip"` + DNSSearchDomains []interface{} `yaml:"dns_search_domains"` + ManagementNetwork string `yaml:"management_network"` + Networks []interface{} `yaml:"networks"` + } `yaml:"edpm-compute-node-1"` + } `yaml:"hosts"` + } `yaml:"edpm-compute-nodeset"` +} + +var _ = Describe("Dataplane NodeSet Test", func() { + var dataplaneNodeSetName types.NamespacedName + var dataplaneSecretName types.NamespacedName + var dataplaneSSHSecretName types.NamespacedName + var dataplaneNetConfigName types.NamespacedName + var dnsMasqName types.NamespacedName + var dataplaneNodeName types.NamespacedName + var dataplaneDeploymentName types.NamespacedName + var dataplaneConfigHash string + var dataplaneGlobalServiceName types.NamespacedName + + defaultEdpmServiceList := []string{ + "edpm_frr_image", + "edpm_iscsid_image", + "edpm_logrotate_crond_image", + "edpm_neutron_metadata_agent_image", + "edpm_nova_compute_image", + "edpm_ovn_controller_agent_image", + "edpm_ovn_bgp_agent_image", + } + + BeforeEach(func() { + dnsMasqName = types.NamespacedName{ + Name: "dnsmasq", + Namespace: namespace, + } + dataplaneNodeSetName = types.NamespacedName{ + Name: "edpm-compute-nodeset", + Namespace: namespace, + } + dataplaneSecretName = types.NamespacedName{ + Namespace: namespace, + Name: "dataplanenodeset-edpm-compute-nodeset", + } + dataplaneSSHSecretName = types.NamespacedName{ + Namespace: namespace, + Name: "dataplane-ansible-ssh-private-key-secret", + } + dataplaneNetConfigName = types.NamespacedName{ + Namespace: namespace, + Name: "dataplane-netconfig", + } + dataplaneNodeName = types.NamespacedName{ + Namespace: namespace, + Name: "edpm-compute-node-1", + } + dataplaneDeploymentName = types.NamespacedName{ + Name: "edpm-deployment", + Namespace: namespace, + } + dataplaneGlobalServiceName = types.NamespacedName{ + Name: "global-service", + Namespace: namespace, + } + err := os.Setenv("OPERATOR_SERVICES", "../../../config/services") + Expect(err).NotTo(HaveOccurred()) + }) + When("A Dataplane nodeset is created and no netconfig", func() { + BeforeEach(func() { + DeferCleanup(th.DeleteInstance, + CreateDataplaneNodeSet(dataplaneNodeSetName, + DefaultDataPlaneNoNodeSetSpec(false))) + }) + It("should have ip reservation not ready and unknown Conditions initialized", func() { + th.ExpectCondition( + dataplaneNodeSetName, + ConditionGetterFunc(DataplaneConditionGetter), + condition.ReadyCondition, + corev1.ConditionFalse, + ) + th.ExpectCondition( + dataplaneNodeSetName, + ConditionGetterFunc(DataplaneConditionGetter), + condition.InputReadyCondition, + corev1.ConditionUnknown, + ) + th.ExpectCondition( + dataplaneNodeSetName, + ConditionGetterFunc(DataplaneConditionGetter), + dataplanev1.NodeSetIPReservationReadyCondition, + corev1.ConditionFalse, + ) + }) + }) + + When("A Dataplane nodeset is created and no dnsmasq", func() { + BeforeEach(func() { + DeferCleanup(th.DeleteInstance, + CreateNetConfig(dataplaneNetConfigName, DefaultNetConfigSpec())) + DeferCleanup(th.DeleteInstance, + CreateDataplaneNodeSet(dataplaneNodeSetName, + DefaultDataPlaneNoNodeSetSpec(false))) + SimulateIPSetComplete(dataplaneNodeName) + }) + It("should have dnsdata not ready and unknown Conditions initialized", func() { + th.ExpectCondition( + dataplaneNodeSetName, + ConditionGetterFunc(DataplaneConditionGetter), + condition.ReadyCondition, + corev1.ConditionFalse, + ) + th.ExpectCondition( + dataplaneNodeSetName, + ConditionGetterFunc(DataplaneConditionGetter), + condition.InputReadyCondition, + corev1.ConditionUnknown, + ) + th.ExpectCondition( + dataplaneNodeSetName, + ConditionGetterFunc(DataplaneConditionGetter), + dataplanev1.NodeSetIPReservationReadyCondition, + corev1.ConditionTrue, + ) + th.ExpectCondition( + dataplaneNodeSetName, + ConditionGetterFunc(DataplaneConditionGetter), + dataplanev1.NodeSetDNSDataReadyCondition, + corev1.ConditionFalse, + ) + + }) + }) + + When("A Dataplane nodeset is created and more than one dnsmasq", func() { + BeforeEach(func() { + DeferCleanup(th.DeleteInstance, + CreateNetConfig(dataplaneNetConfigName, DefaultNetConfigSpec())) + firstDNSMasqName := types.NamespacedName{ + Name: "first-dnsmasq", + Namespace: namespace, + } + DeferCleanup(th.DeleteInstance, + CreateDNSMasq(firstDNSMasqName, DefaultDNSMasqSpec())) + secondDNSMasqName := types.NamespacedName{ + Name: "second-dnsmasq", + Namespace: namespace, + } + DeferCleanup(th.DeleteInstance, + CreateDNSMasq(secondDNSMasqName, DefaultDNSMasqSpec())) + DeferCleanup(th.DeleteInstance, + CreateDataplaneNodeSet(dataplaneNodeSetName, + DefaultDataPlaneNoNodeSetSpec(false))) + SimulateIPSetComplete(dataplaneNodeName) + }) + It("should have multiple dnsdata error message and unknown Conditions initialized", func() { + th.ExpectCondition( + dataplaneNodeSetName, + ConditionGetterFunc(DataplaneConditionGetter), + condition.ReadyCondition, + corev1.ConditionFalse, + ) + th.ExpectCondition( + dataplaneNodeSetName, + ConditionGetterFunc(DataplaneConditionGetter), + condition.InputReadyCondition, + corev1.ConditionUnknown, + ) + th.ExpectCondition( + dataplaneNodeSetName, + ConditionGetterFunc(DataplaneConditionGetter), + dataplanev1.NodeSetIPReservationReadyCondition, + corev1.ConditionTrue, + ) + th.ExpectCondition( + dataplaneNodeSetName, + ConditionGetterFunc(DataplaneConditionGetter), + dataplanev1.NodeSetDNSDataReadyCondition, + corev1.ConditionFalse, + ) + conditions := DataplaneConditionGetter(dataplaneNodeSetName) + message := &conditions.Get(dataplanev1.NodeSetDNSDataReadyCondition).Message + Expect(*message).Should(Equal(dataplanev1.NodeSetDNSDataMultipleDNSMasqErrorMessage)) + + }) + }) + + When("TLS is enabled", func() { + tlsEnabled := true + When("A Dataplane resource is created with PreProvisioned nodes, no deployment", func() { + BeforeEach(func() { + DeferCleanup(th.DeleteInstance, CreateNetConfig(dataplaneNetConfigName, DefaultNetConfigSpec())) + DeferCleanup(th.DeleteInstance, CreateDNSMasq(dnsMasqName, DefaultDNSMasqSpec())) + SimulateDNSMasqComplete(dnsMasqName) + DeferCleanup(th.DeleteInstance, + CreateDataplaneNodeSet(dataplaneNodeSetName, + DefaultDataPlaneNoNodeSetSpec(tlsEnabled))) + SimulateIPSetComplete(dataplaneNodeName) + SimulateDNSDataComplete(dataplaneNodeSetName) + }) + It("should have the Spec fields initialized", func() { + dataplaneNodeSetInstance := GetDataplaneNodeSet(dataplaneNodeSetName) + emptyNodeSpec := dataplanev1.OpenStackDataPlaneNodeSetSpec{ + BaremetalSetTemplate: baremetalv1.OpenStackBaremetalSetSpec{ + BaremetalHosts: nil, + OSImage: "", + UserData: nil, + NetworkData: nil, + AutomatedCleaningMode: "metadata", + ProvisionServerName: "", + ProvisioningInterface: "", + CtlplaneInterface: "", + CtlplaneGateway: "", + CtlplaneNetmask: "255.255.255.0", + BmhNamespace: "openshift-machine-api", + HardwareReqs: baremetalv1.HardwareReqs{ + CPUReqs: baremetalv1.CPUReqs{ + Arch: "", + CountReq: baremetalv1.CPUCountReq{Count: 0, ExactMatch: false}, + MhzReq: baremetalv1.CPUMhzReq{Mhz: 0, ExactMatch: false}, + }, + MemReqs: baremetalv1.MemReqs{ + GbReq: baremetalv1.MemGbReq{Gb: 0, ExactMatch: false}, + }, + DiskReqs: baremetalv1.DiskReqs{ + GbReq: baremetalv1.DiskGbReq{Gb: 0, ExactMatch: false}, + SSDReq: baremetalv1.DiskSSDReq{SSD: false, ExactMatch: false}, + }, + }, + PasswordSecret: nil, + CloudUserName: "", + DomainName: "", + BootstrapDNS: nil, + DNSSearchDomains: nil, + }, + NodeTemplate: dataplanev1.NodeTemplate{ + AnsibleSSHPrivateKeySecret: "dataplane-ansible-ssh-private-key-secret", + ManagementNetwork: "ctlplane", + Ansible: dataplanev1.AnsibleOpts{ + AnsibleUser: "cloud-admin", + AnsibleHost: "", + AnsiblePort: 0, + AnsibleVars: nil, + }, + ExtraMounts: nil, + UserData: nil, + NetworkData: nil, + Networks: []infrav1.IPSetNetwork{{ + Name: "ctlplane", + SubnetName: "subnet1", + }, + }, + }, + Env: nil, + PreProvisioned: true, + NetworkAttachments: nil, + SecretMaxSize: 1048576, + TLSEnabled: tlsEnabled, + Nodes: map[string]dataplanev1.NodeSection{ + dataplaneNodeName.Name: { + HostName: dataplaneNodeName.Name, + }, + }, + Services: []string{ + "download-cache", + "bootstrap", + "configure-network", + "validate-network", + "install-os", + "configure-os", + "ssh-known-hosts", + "run-os", + "reboot-os", + "install-certs", + "ovn", + "neutron-metadata", + "libvirt", + "nova", + "telemetry"}, + } + Expect(dataplaneNodeSetInstance.Spec).Should(Equal(emptyNodeSpec)) + }) + + It("should have input not ready and unknown Conditions initialized", func() { + th.ExpectCondition( + dataplaneNodeSetName, + ConditionGetterFunc(DataplaneConditionGetter), + condition.ReadyCondition, + corev1.ConditionFalse, + ) + th.ExpectCondition( + dataplaneNodeSetName, + ConditionGetterFunc(DataplaneConditionGetter), + condition.InputReadyCondition, + corev1.ConditionFalse, + ) + th.ExpectCondition( + dataplaneNodeSetName, + ConditionGetterFunc(DataplaneConditionGetter), + dataplanev1.SetupReadyCondition, + corev1.ConditionFalse, + ) + }) + + It("Should not have created a Secret", func() { + th.AssertSecretDoesNotExist(dataplaneSecretName) + }) + }) + + When("A Dataplane resource is created with PreProvisioned nodes, no deployment and global service", func() { + BeforeEach(func() { + nodeSetSpec := DefaultDataPlaneNoNodeSetSpec(tlsEnabled) + nodeSetSpec["services"] = []string{ + "download-cache", + "bootstrap", + "configure-network", + "validate-network", + "install-os", + "configure-os", + "run-os", + "reboot-os", + "install-certs", + "ovn", + "neutron-metadata", + "libvirt", + "nova", + "telemetry", + "global-service"} + + CreateDataplaneService(dataplaneGlobalServiceName, true) + DeferCleanup(th.DeleteInstance, CreateNetConfig(dataplaneNetConfigName, DefaultNetConfigSpec())) + DeferCleanup(th.DeleteInstance, CreateDNSMasq(dnsMasqName, DefaultDNSMasqSpec())) + SimulateDNSMasqComplete(dnsMasqName) + DeferCleanup(th.DeleteService, dataplaneGlobalServiceName) + DeferCleanup(th.DeleteInstance, CreateDataplaneNodeSet(dataplaneNodeSetName, nodeSetSpec)) + SimulateIPSetComplete(dataplaneNodeName) + SimulateDNSDataComplete(dataplaneNodeSetName) + }) + It("should have the Spec fields initialized", func() { + dataplaneNodeSetInstance := GetDataplaneNodeSet(dataplaneNodeSetName) + emptyNodeSpec := dataplanev1.OpenStackDataPlaneNodeSetSpec{ + BaremetalSetTemplate: baremetalv1.OpenStackBaremetalSetSpec{ + BaremetalHosts: nil, + OSImage: "", + UserData: nil, + NetworkData: nil, + AutomatedCleaningMode: "metadata", + ProvisionServerName: "", + ProvisioningInterface: "", + CtlplaneInterface: "", + CtlplaneGateway: "", + CtlplaneNetmask: "255.255.255.0", + BmhNamespace: "openshift-machine-api", + HardwareReqs: baremetalv1.HardwareReqs{ + CPUReqs: baremetalv1.CPUReqs{ + Arch: "", + CountReq: baremetalv1.CPUCountReq{Count: 0, ExactMatch: false}, + MhzReq: baremetalv1.CPUMhzReq{Mhz: 0, ExactMatch: false}, + }, + MemReqs: baremetalv1.MemReqs{ + GbReq: baremetalv1.MemGbReq{Gb: 0, ExactMatch: false}, + }, + DiskReqs: baremetalv1.DiskReqs{ + GbReq: baremetalv1.DiskGbReq{Gb: 0, ExactMatch: false}, + SSDReq: baremetalv1.DiskSSDReq{SSD: false, ExactMatch: false}, + }, + }, + PasswordSecret: nil, + CloudUserName: "", + DomainName: "", + BootstrapDNS: nil, + DNSSearchDomains: nil, + }, + NodeTemplate: dataplanev1.NodeTemplate{ + AnsibleSSHPrivateKeySecret: "dataplane-ansible-ssh-private-key-secret", + Networks: []infrav1.IPSetNetwork{{ + Name: "ctlplane", + SubnetName: "subnet1", + }, + }, + ManagementNetwork: "ctlplane", + Ansible: dataplanev1.AnsibleOpts{ + AnsibleUser: "cloud-admin", + AnsibleHost: "", + AnsiblePort: 0, + AnsibleVars: nil, + }, + ExtraMounts: nil, + UserData: nil, + NetworkData: nil, + }, + Env: nil, + PreProvisioned: true, + NetworkAttachments: nil, + SecretMaxSize: 1048576, + TLSEnabled: tlsEnabled, + Nodes: map[string]dataplanev1.NodeSection{ + dataplaneNodeName.Name: { + HostName: dataplaneNodeName.Name, + }, + }, + Services: []string{ + "download-cache", + "bootstrap", + "configure-network", + "validate-network", + "install-os", + "configure-os", + "run-os", + "reboot-os", + "install-certs", + "ovn", + "neutron-metadata", + "libvirt", + "nova", + "telemetry", + "global-service"}, + } + Expect(dataplaneNodeSetInstance.Spec).Should(Equal(emptyNodeSpec)) + }) + + It("should have input not ready and unknown Conditions initialized", func() { + th.ExpectCondition( + dataplaneNodeSetName, + ConditionGetterFunc(DataplaneConditionGetter), + condition.ReadyCondition, + corev1.ConditionFalse, + ) + th.ExpectCondition( + dataplaneNodeSetName, + ConditionGetterFunc(DataplaneConditionGetter), + condition.InputReadyCondition, + corev1.ConditionFalse, + ) + th.ExpectCondition( + dataplaneNodeSetName, + ConditionGetterFunc(DataplaneConditionGetter), + dataplanev1.SetupReadyCondition, + corev1.ConditionFalse, + ) + }) + + It("Should not have created a Secret", func() { + th.AssertSecretDoesNotExist(dataplaneSecretName) + }) + + It("Should have service called 'global-service'", func() { + service := GetService(dataplaneGlobalServiceName) + Expect(service.Spec.DeployOnAllNodeSets).Should(BeTrue()) + }) + }) + + When("A Dataplane resorce is created without PreProvisioned nodes and ordered deployment", func() { + BeforeEach(func() { + spec := DefaultDataPlaneNoNodeSetSpec(tlsEnabled) + spec["metadata"] = map[string]interface{}{"ansiblesshprivatekeysecret": ""} + spec["preProvisioned"] = false + DeferCleanup(th.DeleteInstance, CreateNetConfig(dataplaneNetConfigName, DefaultNetConfigSpec())) + DeferCleanup(th.DeleteInstance, CreateDNSMasq(dnsMasqName, DefaultDNSMasqSpec())) + DeferCleanup(th.DeleteInstance, CreateDataplaneNodeSet(dataplaneNodeSetName, spec)) + SimulateDNSMasqComplete(dnsMasqName) + SimulateIPSetComplete(dataplaneNodeName) + SimulateDNSDataComplete(dataplaneNodeSetName) + }) + It("should have the Spec fields initialized", func() { + dataplaneNodeSetInstance := GetDataplaneNodeSet(dataplaneNodeSetName) + Expect(dataplaneNodeSetInstance.Spec.PreProvisioned).Should(BeFalse()) + }) + + It("should have ReadyCondition, InputReadyCondition and SetupReadyCondition set to false, and DeploymentReadyCondition set to Unknown", func() { + th.ExpectCondition( + dataplaneNodeSetName, + ConditionGetterFunc(DataplaneConditionGetter), + condition.ReadyCondition, + corev1.ConditionFalse, + ) + th.ExpectCondition( + dataplaneNodeSetName, + ConditionGetterFunc(DataplaneConditionGetter), + condition.InputReadyCondition, + corev1.ConditionFalse, + ) + th.ExpectCondition( + dataplaneNodeSetName, + ConditionGetterFunc(DataplaneConditionGetter), + dataplanev1.SetupReadyCondition, + corev1.ConditionFalse, + ) + th.ExpectCondition( + dataplaneNodeSetName, + ConditionGetterFunc(DataplaneConditionGetter), + condition.DeploymentReadyCondition, + corev1.ConditionUnknown, + ) + }) + + It("Should not have created a Secret", func() { + th.AssertSecretDoesNotExist(dataplaneSecretName) + }) + }) + + When("A Dataplane resorce is created without PreProvisioned nodes but is marked as PreProvisioned, with ordered deployment", func() { + BeforeEach(func() { + spec := DefaultDataPlaneNoNodeSetSpec(tlsEnabled) + spec["metadata"] = map[string]interface{}{"ansiblesshprivatekeysecret": ""} + DeferCleanup(th.DeleteInstance, CreateNetConfig(dataplaneNetConfigName, DefaultNetConfigSpec())) + DeferCleanup(th.DeleteInstance, CreateDNSMasq(dnsMasqName, DefaultDNSMasqSpec())) + DeferCleanup(th.DeleteInstance, CreateDataplaneNodeSet(dataplaneNodeSetName, spec)) + SimulateDNSMasqComplete(dnsMasqName) + SimulateIPSetComplete(dataplaneNodeName) + SimulateDNSDataComplete(dataplaneNodeSetName) + }) + It("should have the Spec fields initialized", func() { + dataplaneNodeSetInstance := GetDataplaneNodeSet(dataplaneNodeSetName) + Expect(dataplaneNodeSetInstance.Spec.PreProvisioned).Should(BeTrue()) + }) + + It("should have ReadyCondition, InputReadCondition and SetupReadyCondition set to false, and DeploymentReadyCondition set to unknown", func() { + th.ExpectCondition( + dataplaneNodeSetName, + ConditionGetterFunc(DataplaneConditionGetter), + condition.ReadyCondition, + corev1.ConditionFalse, + ) + th.ExpectCondition( + dataplaneNodeSetName, + ConditionGetterFunc(DataplaneConditionGetter), + condition.InputReadyCondition, + corev1.ConditionFalse, + ) + th.ExpectCondition( + dataplaneNodeSetName, + ConditionGetterFunc(DataplaneConditionGetter), + dataplanev1.SetupReadyCondition, + corev1.ConditionFalse, + ) + th.ExpectCondition( + dataplaneNodeSetName, + ConditionGetterFunc(DataplaneConditionGetter), + condition.DeploymentReadyCondition, + corev1.ConditionUnknown, + ) + }) + + It("Should not have created a Secret", func() { + th.AssertSecretDoesNotExist(dataplaneSecretName) + }) + }) + + When("A ssh secret is created", func() { + + BeforeEach(func() { + DeferCleanup(th.DeleteInstance, CreateNetConfig(dataplaneNetConfigName, DefaultNetConfigSpec())) + DeferCleanup(th.DeleteInstance, CreateDNSMasq(dnsMasqName, DefaultDNSMasqSpec())) + DeferCleanup(th.DeleteInstance, CreateDataplaneNodeSet(dataplaneNodeSetName, DefaultDataPlaneNoNodeSetSpec(tlsEnabled))) + CreateSSHSecret(dataplaneSSHSecretName) + SimulateDNSMasqComplete(dnsMasqName) + SimulateIPSetComplete(dataplaneNodeName) + SimulateDNSDataComplete(dataplaneNodeSetName) + }) + It("Should have created a Secret", func() { + secret := th.GetSecret(dataplaneSecretName) + Expect(secret.Data["inventory"]).Should( + ContainSubstring("edpm-compute-nodeset")) + }) + It("Should set Input and Setup ready", func() { + + th.ExpectCondition( + dataplaneNodeSetName, + ConditionGetterFunc(DataplaneConditionGetter), + condition.InputReadyCondition, + corev1.ConditionTrue, + ) + }) + }) + + When("No default service image is provided", func() { + BeforeEach(func() { + DeferCleanup(th.DeleteInstance, CreateNetConfig(dataplaneNetConfigName, DefaultNetConfigSpec())) + DeferCleanup(th.DeleteInstance, CreateDNSMasq(dnsMasqName, DefaultDNSMasqSpec())) + DeferCleanup(th.DeleteInstance, CreateDataplaneNodeSet(dataplaneNodeSetName, DefaultDataPlaneNoNodeSetSpec(tlsEnabled))) + CreateSSHSecret(dataplaneSSHSecretName) + SimulateDNSMasqComplete(dnsMasqName) + SimulateIPSetComplete(dataplaneNodeName) + SimulateDNSDataComplete(dataplaneNodeSetName) + }) + It("Should have default service values provided", func() { + secret := th.GetSecret(dataplaneSecretName) + for _, svcImage := range defaultEdpmServiceList { + Expect(secret.Data["inventory"]).Should( + ContainSubstring(svcImage)) + } + }) + }) + + When("A user provides a custom service image", func() { + BeforeEach(func() { + DeferCleanup(th.DeleteInstance, CreateNetConfig(dataplaneNetConfigName, DefaultNetConfigSpec())) + DeferCleanup(th.DeleteInstance, CreateDNSMasq(dnsMasqName, DefaultDNSMasqSpec())) + DeferCleanup(th.DeleteInstance, CreateDataplaneNodeSet(dataplaneNodeSetName, CustomServiceImageSpec())) + CreateSSHSecret(dataplaneSSHSecretName) + SimulateDNSMasqComplete(dnsMasqName) + SimulateIPSetComplete(dataplaneNodeName) + SimulateDNSDataComplete(dataplaneNodeSetName) + }) + It("Should have the user defined image in the inventory", func() { + secret := th.GetSecret(dataplaneSecretName) + for _, svcAnsibleVar := range DefaultEdpmServiceAnsibleVarList { + Expect(secret.Data["inventory"]).Should( + ContainSubstring(fmt.Sprintf("%s.%s", svcAnsibleVar, CustomEdpmServiceDomainTag))) + } + }) + }) + + When("No default service image is provided", func() { + BeforeEach(func() { + DeferCleanup(th.DeleteInstance, CreateNetConfig(dataplaneNetConfigName, DefaultNetConfigSpec())) + DeferCleanup(th.DeleteInstance, CreateDNSMasq(dnsMasqName, DefaultDNSMasqSpec())) + DeferCleanup(th.DeleteInstance, CreateDataplaneNodeSet(dataplaneNodeSetName, DefaultDataPlaneNoNodeSetSpec(tlsEnabled))) + CreateSSHSecret(dataplaneSSHSecretName) + SimulateDNSMasqComplete(dnsMasqName) + SimulateIPSetComplete(dataplaneNodeName) + SimulateDNSDataComplete(dataplaneNodeSetName) + }) + It("Should have default service values provided", func() { + secret := th.GetSecret(dataplaneSecretName) + for _, svcAnsibleVar := range DefaultEdpmServiceAnsibleVarList { + Expect(secret.Data["inventory"]).Should( + ContainSubstring(svcAnsibleVar)) + } + }) + }) + + When("A user provides a custom service image", func() { + BeforeEach(func() { + DeferCleanup(th.DeleteInstance, CreateNetConfig(dataplaneNetConfigName, DefaultNetConfigSpec())) + DeferCleanup(th.DeleteInstance, CreateDNSMasq(dnsMasqName, DefaultDNSMasqSpec())) + DeferCleanup(th.DeleteInstance, CreateDataplaneNodeSet(dataplaneNodeSetName, CustomServiceImageSpec())) + CreateSSHSecret(dataplaneSSHSecretName) + SimulateDNSMasqComplete(dnsMasqName) + SimulateIPSetComplete(dataplaneNodeName) + SimulateDNSDataComplete(dataplaneNodeSetName) + }) + It("Should have the user defined image in the inventory", func() { + secret := th.GetSecret(dataplaneSecretName) + for _, svcAnsibleVar := range DefaultEdpmServiceAnsibleVarList { + Expect(secret.Data["inventory"]).Should( + ContainSubstring(fmt.Sprintf("%s.%s", svcAnsibleVar, CustomEdpmServiceDomainTag))) + } + }) + }) + + When("The nodeTemplate contains a ansibleUser but the individual node does not", func() { + BeforeEach(func() { + nodeSetSpec := DefaultDataPlaneNodeSetSpec("edpm-compute") + nodeSetSpec["preProvisioned"] = true + DeferCleanup(th.DeleteInstance, CreateNetConfig(dataplaneNetConfigName, DefaultNetConfigSpec())) + DeferCleanup(th.DeleteInstance, CreateDNSMasq(dnsMasqName, DefaultDNSMasqSpec())) + DeferCleanup(th.DeleteInstance, CreateDataplaneNodeSet(dataplaneNodeSetName, nodeSetSpec)) + CreateSSHSecret(dataplaneSSHSecretName) + SimulateDNSMasqComplete(dnsMasqName) + SimulateIPSetComplete(dataplaneNodeName) + SimulateDNSDataComplete(dataplaneNodeSetName) + }) + It("Should not have set the node specific ansible_user variable", func() { + secret := th.GetSecret(dataplaneSecretName) + secretData := secret.Data["inventory"] + + var inv AnsibleInventory + err := yaml.Unmarshal(secretData, &inv) + if err != nil { + fmt.Printf("Error: %v", err) + } + Expect(inv.EdpmComputeNodeset.Vars.AnsibleUser).Should(Equal("cloud-user")) + Expect(inv.EdpmComputeNodeset.Hosts.Node.AnsibleUser).Should(BeEmpty()) + }) + }) + + When("The individual node has a AnsibleUser override", func() { + BeforeEach(func() { + DeferCleanup(th.DeleteInstance, CreateNetConfig(dataplaneNetConfigName, DefaultNetConfigSpec())) + nodeOverrideSpec := dataplanev1.NodeSection{ + HostName: dataplaneNodeName.Name, + Networks: []infrav1.IPSetNetwork{{ + Name: "ctlplane", + SubnetName: "subnet1", + }, + }, + Ansible: dataplanev1.AnsibleOpts{ + AnsibleUser: "test-user", + }, + } + + nodeTemplateOverrideSpec := map[string]interface{}{ + "ansibleSSHPrivateKeySecret": "dataplane-ansible-ssh-private-key-secret", + "ansible": map[string]interface{}{ + "ansibleUser": "cloud-user", + }, + } + + nodeSetSpec := DefaultDataPlaneNoNodeSetSpec(tlsEnabled) + nodeSetSpec["nodes"].(map[string]dataplanev1.NodeSection)[dataplaneNodeName.Name] = nodeOverrideSpec + nodeSetSpec["nodeTemplate"] = nodeTemplateOverrideSpec + + DeferCleanup(th.DeleteInstance, CreateDNSMasq(dnsMasqName, DefaultDNSMasqSpec())) + DeferCleanup(th.DeleteInstance, CreateDataplaneNodeSet(dataplaneNodeSetName, nodeSetSpec)) + CreateSSHSecret(dataplaneSSHSecretName) + SimulateDNSMasqComplete(dnsMasqName) + SimulateIPSetComplete(dataplaneNodeName) + SimulateDNSDataComplete(dataplaneNodeSetName) + }) + It("Should have a node specific override that is different to the group", func() { + secret := th.GetSecret(dataplaneSecretName) + secretData := secret.Data["inventory"] + + var inv AnsibleInventory + err := yaml.Unmarshal(secretData, &inv) + if err != nil { + fmt.Printf("Error: %v", err) + } + Expect(inv.EdpmComputeNodeset.Hosts.Node.AnsibleUser).Should(Equal("test-user")) + Expect(inv.EdpmComputeNodeset.Vars.AnsibleUser).Should(Equal("cloud-user")) + }) + }) + + When("A nodeSet is created with IPAM", func() { + BeforeEach(func() { + nodeSetSpec := DefaultDataPlaneNodeSetSpec("edpm-compute") + nodeSetSpec["preProvisioned"] = true + DeferCleanup(th.DeleteInstance, CreateNetConfig(dataplaneNetConfigName, DefaultNetConfigSpec())) + DeferCleanup(th.DeleteInstance, CreateDNSMasq(dnsMasqName, DefaultDNSMasqSpec())) + DeferCleanup(th.DeleteInstance, CreateDataplaneNodeSet(dataplaneNodeSetName, nodeSetSpec)) + CreateSSHSecret(dataplaneSSHSecretName) + SimulateDNSMasqComplete(dnsMasqName) + SimulateIPSetComplete(dataplaneNodeName) + SimulateDNSDataComplete(dataplaneNodeSetName) + }) + It("Should set the ctlplane_ip variable in the Ansible inventory secret", func() { + Eventually(func() string { + secret := th.GetSecret(dataplaneSecretName) + return getCtlPlaneIP(&secret) + }).Should(Equal("172.20.12.76")) + }) + }) + + When("A DataPlaneNodeSet is created with NoNodes and a OpenStackDataPlaneDeployment is created", func() { + BeforeEach(func() { + DeferCleanup(th.DeleteInstance, CreateNetConfig(dataplaneNetConfigName, DefaultNetConfigSpec())) + DeferCleanup(th.DeleteInstance, CreateDNSMasq(dnsMasqName, DefaultDNSMasqSpec())) + DeferCleanup(th.DeleteInstance, CreateDataplaneNodeSet(dataplaneNodeSetName, DefaultDataPlaneNoNodeSetSpec(tlsEnabled))) + DeferCleanup(th.DeleteInstance, CreateDataplaneDeployment(dataplaneDeploymentName, DefaultDataPlaneDeploymentSpec())) + CreateSSHSecret(dataplaneSSHSecretName) + SimulateDNSMasqComplete(dnsMasqName) + SimulateIPSetComplete(dataplaneNodeName) + SimulateDNSDataComplete(dataplaneNodeSetName) + }) + It("Should reach Input and Setup Ready completion", func() { + var conditionList = []condition.Type{ + condition.InputReadyCondition, + dataplanev1.SetupReadyCondition, + } + for _, cond := range conditionList { + th.ExpectCondition( + dataplaneNodeSetName, + ConditionGetterFunc(DataplaneConditionGetter), + cond, + corev1.ConditionTrue, + ) + } + }) + }) + }) + When("TLS is not enabled explicitly its enabled by default", func() { + tlsEnabled := true + When("A Dataplane resorce is created with PreProvisioned nodes, no deployment", func() { + BeforeEach(func() { + DeferCleanup(th.DeleteInstance, CreateNetConfig(dataplaneNetConfigName, DefaultNetConfigSpec())) + DeferCleanup(th.DeleteInstance, CreateDNSMasq(dnsMasqName, DefaultDNSMasqSpec())) + DeferCleanup(th.DeleteInstance, CreateDataplaneNodeSet(dataplaneNodeSetName, DefaultDataPlaneNoNodeSetSpec(tlsEnabled))) + SimulateDNSMasqComplete(dnsMasqName) + SimulateIPSetComplete(dataplaneNodeName) + SimulateDNSDataComplete(dataplaneNodeSetName) + + }) + It("should have the Spec fields initialized", func() { + dataplaneNodeSetInstance := GetDataplaneNodeSet(dataplaneNodeSetName) + emptyNodeSpec := dataplanev1.OpenStackDataPlaneNodeSetSpec{ + BaremetalSetTemplate: baremetalv1.OpenStackBaremetalSetSpec{ + BaremetalHosts: nil, + OSImage: "", + UserData: nil, + NetworkData: nil, + AutomatedCleaningMode: "metadata", + ProvisionServerName: "", + ProvisioningInterface: "", + CtlplaneInterface: "", + CtlplaneGateway: "", + CtlplaneNetmask: "255.255.255.0", + BmhNamespace: "openshift-machine-api", + HardwareReqs: baremetalv1.HardwareReqs{ + CPUReqs: baremetalv1.CPUReqs{ + Arch: "", + CountReq: baremetalv1.CPUCountReq{Count: 0, ExactMatch: false}, + MhzReq: baremetalv1.CPUMhzReq{Mhz: 0, ExactMatch: false}, + }, + MemReqs: baremetalv1.MemReqs{ + GbReq: baremetalv1.MemGbReq{Gb: 0, ExactMatch: false}, + }, + DiskReqs: baremetalv1.DiskReqs{ + GbReq: baremetalv1.DiskGbReq{Gb: 0, ExactMatch: false}, + SSDReq: baremetalv1.DiskSSDReq{SSD: false, ExactMatch: false}, + }, + }, + PasswordSecret: nil, + CloudUserName: "", + DomainName: "", + BootstrapDNS: nil, + DNSSearchDomains: nil, + }, + NodeTemplate: dataplanev1.NodeTemplate{ + AnsibleSSHPrivateKeySecret: "dataplane-ansible-ssh-private-key-secret", + Networks: []infrav1.IPSetNetwork{{ + Name: "ctlplane", + SubnetName: "subnet1", + }, + }, + ManagementNetwork: "ctlplane", + Ansible: dataplanev1.AnsibleOpts{ + AnsibleUser: "cloud-admin", + AnsibleHost: "", + AnsiblePort: 0, + AnsibleVars: nil, + }, + ExtraMounts: nil, + UserData: nil, + NetworkData: nil, + }, + Env: nil, + PreProvisioned: true, + NetworkAttachments: nil, + SecretMaxSize: 1048576, + TLSEnabled: tlsEnabled, + Nodes: map[string]dataplanev1.NodeSection{ + dataplaneNodeName.Name: { + HostName: dataplaneNodeName.Name, + }, + }, + Services: []string{ + "download-cache", + "bootstrap", + "configure-network", + "validate-network", + "install-os", + "configure-os", + "ssh-known-hosts", + "run-os", + "reboot-os", + "install-certs", + "ovn", + "neutron-metadata", + "libvirt", + "nova", + "telemetry"}, + } + Expect(dataplaneNodeSetInstance.Spec).Should(Equal(emptyNodeSpec)) + }) + + It("should have input not ready and unknown Conditions initialized", func() { + th.ExpectCondition( + dataplaneNodeSetName, + ConditionGetterFunc(DataplaneConditionGetter), + condition.ReadyCondition, + corev1.ConditionFalse, + ) + th.ExpectCondition( + dataplaneNodeSetName, + ConditionGetterFunc(DataplaneConditionGetter), + condition.InputReadyCondition, + corev1.ConditionFalse, + ) + th.ExpectCondition( + dataplaneNodeSetName, + ConditionGetterFunc(DataplaneConditionGetter), + dataplanev1.SetupReadyCondition, + corev1.ConditionFalse, + ) + }) + + It("Should not have created a Secret", func() { + th.AssertSecretDoesNotExist(dataplaneSecretName) + }) + }) + + When("A Dataplane resorce is created without PreProvisioned nodes and ordered deployment", func() { + BeforeEach(func() { + spec := DefaultDataPlaneNoNodeSetSpec(tlsEnabled) + spec["metadata"] = map[string]interface{}{"ansiblesshprivatekeysecret": ""} + spec["preProvisioned"] = false + DeferCleanup(th.DeleteInstance, CreateNetConfig(dataplaneNetConfigName, DefaultNetConfigSpec())) + DeferCleanup(th.DeleteInstance, CreateDNSMasq(dnsMasqName, DefaultDNSMasqSpec())) + DeferCleanup(th.DeleteInstance, CreateDataplaneNodeSet(dataplaneNodeSetName, spec)) + SimulateDNSMasqComplete(dnsMasqName) + SimulateIPSetComplete(dataplaneNodeName) + SimulateDNSDataComplete(dataplaneNodeSetName) + }) + It("should have the Spec fields initialized", func() { + dataplaneNodeSetInstance := GetDataplaneNodeSet(dataplaneNodeSetName) + Expect(dataplaneNodeSetInstance.Spec.PreProvisioned).Should(BeFalse()) + }) + + It("should have ReadyCondition, InputReadyCondition and SetupReadyCondition set to false, and DeploymentReadyCondition set to Unknown", func() { + th.ExpectCondition( + dataplaneNodeSetName, + ConditionGetterFunc(DataplaneConditionGetter), + condition.ReadyCondition, + corev1.ConditionFalse, + ) + th.ExpectCondition( + dataplaneNodeSetName, + ConditionGetterFunc(DataplaneConditionGetter), + condition.InputReadyCondition, + corev1.ConditionFalse, + ) + th.ExpectCondition( + dataplaneNodeSetName, + ConditionGetterFunc(DataplaneConditionGetter), + dataplanev1.SetupReadyCondition, + corev1.ConditionFalse, + ) + th.ExpectCondition( + dataplaneNodeSetName, + ConditionGetterFunc(DataplaneConditionGetter), + condition.DeploymentReadyCondition, + corev1.ConditionUnknown, + ) + }) + + It("Should not have created a Secret", func() { + th.AssertSecretDoesNotExist(dataplaneSecretName) + }) + }) + + When("A Dataplane resorce is created without PreProvisioned nodes but is marked as PreProvisioned, with ordered deployment", func() { + BeforeEach(func() { + spec := DefaultDataPlaneNoNodeSetSpec(tlsEnabled) + spec["metadata"] = map[string]interface{}{"ansiblesshprivatekeysecret": ""} + DeferCleanup(th.DeleteInstance, CreateNetConfig(dataplaneNetConfigName, DefaultNetConfigSpec())) + DeferCleanup(th.DeleteInstance, CreateDNSMasq(dnsMasqName, DefaultDNSMasqSpec())) + DeferCleanup(th.DeleteInstance, CreateDataplaneNodeSet(dataplaneNodeSetName, spec)) + SimulateDNSMasqComplete(dnsMasqName) + SimulateIPSetComplete(dataplaneNodeName) + SimulateDNSDataComplete(dataplaneNodeSetName) + }) + It("should have the Spec fields initialized", func() { + dataplaneNodeSetInstance := GetDataplaneNodeSet(dataplaneNodeSetName) + Expect(dataplaneNodeSetInstance.Spec.PreProvisioned).Should(BeTrue()) + }) + + It("should have ReadyCondition, InputReadCondition and SetupReadyCondition set to false, and DeploymentReadyCondition set to unknown", func() { + th.ExpectCondition( + dataplaneNodeSetName, + ConditionGetterFunc(DataplaneConditionGetter), + condition.ReadyCondition, + corev1.ConditionFalse, + ) + th.ExpectCondition( + dataplaneNodeSetName, + ConditionGetterFunc(DataplaneConditionGetter), + condition.InputReadyCondition, + corev1.ConditionFalse, + ) + th.ExpectCondition( + dataplaneNodeSetName, + ConditionGetterFunc(DataplaneConditionGetter), + dataplanev1.SetupReadyCondition, + corev1.ConditionFalse, + ) + th.ExpectCondition( + dataplaneNodeSetName, + ConditionGetterFunc(DataplaneConditionGetter), + condition.DeploymentReadyCondition, + corev1.ConditionUnknown, + ) + }) + + It("Should not have created a Secret", func() { + th.AssertSecretDoesNotExist(dataplaneSecretName) + }) + }) + + When("A ssh secret is created", func() { + + BeforeEach(func() { + DeferCleanup(th.DeleteInstance, CreateNetConfig(dataplaneNetConfigName, DefaultNetConfigSpec())) + DeferCleanup(th.DeleteInstance, CreateDNSMasq(dnsMasqName, DefaultDNSMasqSpec())) + DeferCleanup(th.DeleteInstance, CreateDataplaneNodeSet(dataplaneNodeSetName, DefaultDataPlaneNoNodeSetSpec(tlsEnabled))) + CreateSSHSecret(dataplaneSSHSecretName) + SimulateDNSMasqComplete(dnsMasqName) + SimulateIPSetComplete(dataplaneNodeName) + SimulateDNSDataComplete(dataplaneNodeSetName) + }) + It("Should have created a Secret", func() { + secret := th.GetSecret(dataplaneSecretName) + Expect(secret.Data["inventory"]).Should( + ContainSubstring("edpm-compute-nodeset")) + }) + It("Should set Input and Setup ready", func() { + + th.ExpectCondition( + dataplaneNodeSetName, + ConditionGetterFunc(DataplaneConditionGetter), + condition.InputReadyCondition, + corev1.ConditionTrue, + ) + }) + }) + + When("No default service image is provided", func() { + BeforeEach(func() { + DeferCleanup(th.DeleteInstance, CreateNetConfig(dataplaneNetConfigName, DefaultNetConfigSpec())) + DeferCleanup(th.DeleteInstance, CreateDNSMasq(dnsMasqName, DefaultDNSMasqSpec())) + DeferCleanup(th.DeleteInstance, CreateDataplaneNodeSet(dataplaneNodeSetName, DefaultDataPlaneNoNodeSetSpec(tlsEnabled))) + CreateSSHSecret(dataplaneSSHSecretName) + SimulateDNSMasqComplete(dnsMasqName) + SimulateIPSetComplete(dataplaneNodeName) + SimulateDNSDataComplete(dataplaneNodeSetName) + }) + It("Should have default service values provided", func() { + secret := th.GetSecret(dataplaneSecretName) + for _, svcImage := range defaultEdpmServiceList { + Expect(secret.Data["inventory"]).Should( + ContainSubstring(svcImage)) + } + }) + }) + + When("A user provides a custom service image", func() { + BeforeEach(func() { + DeferCleanup(th.DeleteInstance, CreateNetConfig(dataplaneNetConfigName, DefaultNetConfigSpec())) + DeferCleanup(th.DeleteInstance, CreateDNSMasq(dnsMasqName, DefaultDNSMasqSpec())) + DeferCleanup(th.DeleteInstance, CreateDataplaneNodeSet(dataplaneNodeSetName, CustomServiceImageSpec())) + CreateSSHSecret(dataplaneSSHSecretName) + SimulateDNSMasqComplete(dnsMasqName) + SimulateIPSetComplete(dataplaneNodeName) + SimulateDNSDataComplete(dataplaneNodeSetName) + }) + It("Should have the user defined image in the inventory", func() { + secret := th.GetSecret(dataplaneSecretName) + for _, svcAnsibleVar := range DefaultEdpmServiceAnsibleVarList { + Expect(secret.Data["inventory"]).Should( + ContainSubstring(fmt.Sprintf("%s.%s", svcAnsibleVar, CustomEdpmServiceDomainTag))) + } + }) + }) + + When("No default service image is provided", func() { + BeforeEach(func() { + DeferCleanup(th.DeleteInstance, CreateNetConfig(dataplaneNetConfigName, DefaultNetConfigSpec())) + DeferCleanup(th.DeleteInstance, CreateDNSMasq(dnsMasqName, DefaultDNSMasqSpec())) + DeferCleanup(th.DeleteInstance, CreateDataplaneNodeSet(dataplaneNodeSetName, DefaultDataPlaneNoNodeSetSpec(tlsEnabled))) + CreateSSHSecret(dataplaneSSHSecretName) + SimulateDNSMasqComplete(dnsMasqName) + SimulateIPSetComplete(dataplaneNodeName) + SimulateDNSDataComplete(dataplaneNodeSetName) + }) + It("Should have default service values provided", func() { + secret := th.GetSecret(dataplaneSecretName) + for _, svcAnsibleVar := range DefaultEdpmServiceAnsibleVarList { + Expect(secret.Data["inventory"]).Should( + ContainSubstring(svcAnsibleVar)) + } + }) + }) + + When("A user provides a custom service image", func() { + BeforeEach(func() { + DeferCleanup(th.DeleteInstance, CreateNetConfig(dataplaneNetConfigName, DefaultNetConfigSpec())) + DeferCleanup(th.DeleteInstance, CreateDNSMasq(dnsMasqName, DefaultDNSMasqSpec())) + DeferCleanup(th.DeleteInstance, CreateDataplaneNodeSet(dataplaneNodeSetName, CustomServiceImageSpec())) + CreateSSHSecret(dataplaneSSHSecretName) + SimulateDNSMasqComplete(dnsMasqName) + SimulateIPSetComplete(dataplaneNodeName) + SimulateDNSDataComplete(dataplaneNodeSetName) + }) + It("Should have the user defined image in the inventory", func() { + secret := th.GetSecret(dataplaneSecretName) + for _, svcAnsibleVar := range DefaultEdpmServiceAnsibleVarList { + Expect(secret.Data["inventory"]).Should( + ContainSubstring(fmt.Sprintf("%s.%s", svcAnsibleVar, CustomEdpmServiceDomainTag))) + } + }) + }) + + When("The nodeTemplate contains a ansibleUser but the individual node does not", func() { + BeforeEach(func() { + nodeSetSpec := DefaultDataPlaneNodeSetSpec("edpm-compute") + nodeSetSpec["preProvisioned"] = true + DeferCleanup(th.DeleteInstance, CreateNetConfig(dataplaneNetConfigName, DefaultNetConfigSpec())) + DeferCleanup(th.DeleteInstance, CreateDNSMasq(dnsMasqName, DefaultDNSMasqSpec())) + DeferCleanup(th.DeleteInstance, CreateDataplaneNodeSet(dataplaneNodeSetName, nodeSetSpec)) + CreateSSHSecret(dataplaneSSHSecretName) + SimulateDNSMasqComplete(dnsMasqName) + SimulateIPSetComplete(dataplaneNodeName) + SimulateDNSDataComplete(dataplaneNodeSetName) + }) + It("Should not have set the node specific ansible_user variable", func() { + secret := th.GetSecret(dataplaneSecretName) + secretData := secret.Data["inventory"] + + var inv AnsibleInventory + err := yaml.Unmarshal(secretData, &inv) + if err != nil { + fmt.Printf("Error: %v", err) + } + Expect(inv.EdpmComputeNodeset.Vars.AnsibleUser).Should(Equal("cloud-user")) + Expect(inv.EdpmComputeNodeset.Hosts.Node.AnsibleUser).Should(BeEmpty()) + }) + }) + + When("The individual node has a AnsibleUser override", func() { + BeforeEach(func() { + DeferCleanup(th.DeleteInstance, CreateNetConfig(dataplaneNetConfigName, DefaultNetConfigSpec())) + DeferCleanup(th.DeleteInstance, CreateDNSMasq(dnsMasqName, DefaultDNSMasqSpec())) + nodeOverrideSpec := dataplanev1.NodeSection{ + HostName: dataplaneNodeName.Name, + Networks: []infrav1.IPSetNetwork{{ + Name: "ctlplane", + SubnetName: "subnet1", + }, + }, + Ansible: dataplanev1.AnsibleOpts{ + AnsibleUser: "test-user", + }, + } + + nodeTemplateOverrideSpec := map[string]interface{}{ + "ansibleSSHPrivateKeySecret": "dataplane-ansible-ssh-private-key-secret", + "ansible": map[string]interface{}{ + "ansibleUser": "cloud-user", + }, + } + + nodeSetSpec := DefaultDataPlaneNoNodeSetSpec(tlsEnabled) + nodeSetSpec["nodes"].(map[string]dataplanev1.NodeSection)[dataplaneNodeName.Name] = nodeOverrideSpec + nodeSetSpec["nodeTemplate"] = nodeTemplateOverrideSpec + + DeferCleanup(th.DeleteInstance, CreateDataplaneNodeSet(dataplaneNodeSetName, nodeSetSpec)) + CreateSSHSecret(dataplaneSSHSecretName) + SimulateDNSMasqComplete(dnsMasqName) + SimulateIPSetComplete(dataplaneNodeName) + SimulateDNSDataComplete(dataplaneNodeSetName) + }) + It("Should have a node specific override that is different to the group", func() { + secret := th.GetSecret(dataplaneSecretName) + secretData := secret.Data["inventory"] + + var inv AnsibleInventory + err := yaml.Unmarshal(secretData, &inv) + if err != nil { + fmt.Printf("Error: %v", err) + } + Expect(inv.EdpmComputeNodeset.Hosts.Node.AnsibleUser).Should(Equal("test-user")) + Expect(inv.EdpmComputeNodeset.Vars.AnsibleUser).Should(Equal("cloud-user")) + }) + }) + + When("A nodeSet is created with IPAM", func() { + BeforeEach(func() { + nodeSetSpec := DefaultDataPlaneNodeSetSpec("edpm-compute") + nodeSetSpec["preProvisioned"] = true + DeferCleanup(th.DeleteInstance, CreateNetConfig(dataplaneNetConfigName, DefaultNetConfigSpec())) + DeferCleanup(th.DeleteInstance, CreateDNSMasq(dnsMasqName, DefaultDNSMasqSpec())) + DeferCleanup(th.DeleteInstance, CreateDataplaneNodeSet(dataplaneNodeSetName, nodeSetSpec)) + CreateSSHSecret(dataplaneSSHSecretName) + SimulateDNSMasqComplete(dnsMasqName) + SimulateIPSetComplete(dataplaneNodeName) + SimulateDNSDataComplete(dataplaneNodeSetName) + }) + It("Should set the ctlplane_ip variable in the Ansible inventory secret", func() { + Eventually(func() string { + secret := th.GetSecret(dataplaneSecretName) + return getCtlPlaneIP(&secret) + }).Should(Equal("172.20.12.76")) + }) + }) + + When("A DataPlaneNodeSet is created with NoNodes and a OpenStackDataPlaneDeployment is created", func() { + BeforeEach(func() { + DeferCleanup(th.DeleteInstance, CreateNetConfig(dataplaneNetConfigName, DefaultNetConfigSpec())) + DeferCleanup(th.DeleteInstance, CreateDNSMasq(dnsMasqName, DefaultDNSMasqSpec())) + DeferCleanup(th.DeleteInstance, CreateDataplaneNodeSet(dataplaneNodeSetName, DefaultDataPlaneNoNodeSetSpec(tlsEnabled))) + DeferCleanup(th.DeleteInstance, CreateDataplaneDeployment(dataplaneDeploymentName, DefaultDataPlaneDeploymentSpec())) + CreateSSHSecret(dataplaneSSHSecretName) + SimulateDNSMasqComplete(dnsMasqName) + SimulateIPSetComplete(dataplaneNodeName) + SimulateDNSDataComplete(dataplaneNodeSetName) + }) + It("Should reach Input and Setup Ready completion", func() { + var conditionList = []condition.Type{ + condition.InputReadyCondition, + dataplanev1.SetupReadyCondition, + } + for _, cond := range conditionList { + th.ExpectCondition( + dataplaneNodeSetName, + ConditionGetterFunc(DataplaneConditionGetter), + cond, + corev1.ConditionTrue, + ) + } + }) + }) + }) + + When("A user changes spec field that would require a new Ansible execution", func() { + BeforeEach(func() { + nodeSetSpec := DefaultDataPlaneNodeSetSpec(dataplaneNodeSetName.Name) + nodeSetSpec["nodeTemplate"] = dataplanev1.NodeTemplate{ + Ansible: dataplanev1.AnsibleOpts{ + AnsibleVars: map[string]json.RawMessage{ + "edpm_network_config_hide_sensitive_logs": json.RawMessage([]byte(`"false"`)), + }, + }, + } + DeferCleanup(th.DeleteInstance, CreateNetConfig(dataplaneNetConfigName, DefaultNetConfigSpec())) + DeferCleanup(th.DeleteInstance, CreateDNSMasq(dnsMasqName, DefaultDNSMasqSpec())) + SimulateDNSMasqComplete(dnsMasqName) + DeferCleanup(th.DeleteInstance, CreateDataplaneNodeSet(dataplaneNodeSetName, nodeSetSpec)) + SimulateIPSetComplete(dataplaneNodeName) + SimulateDNSDataComplete(dataplaneNodeSetName) + }) + + It("Should change the ConfigHash", func() { + Eventually(func(_ Gomega) error { + instance := GetDataplaneNodeSet(dataplaneNodeSetName) + dataplaneConfigHash = instance.Status.ConfigHash + instance.Spec.NodeTemplate.Ansible.AnsibleVars = map[string]json.RawMessage{ + "edpm_network_config_hide_sensitive_logs": json.RawMessage([]byte(`"true"`)), + } + return th.K8sClient.Update(th.Ctx, instance) + }).Should(Succeed()) + Eventually(func(_ Gomega) bool { + updatedInstance := GetDataplaneNodeSet(dataplaneNodeSetName) + return dataplaneConfigHash != updatedInstance.Status.ConfigHash + }).Should(BeTrue()) + }) + }) +}) diff --git a/tests/functional/dataplane/openstackdataplanenodeset_webhook_test.go b/tests/functional/dataplane/openstackdataplanenodeset_webhook_test.go new file mode 100644 index 000000000..0ca3aa65c --- /dev/null +++ b/tests/functional/dataplane/openstackdataplanenodeset_webhook_test.go @@ -0,0 +1,246 @@ +package functional + +import ( + "fmt" + "os" + + . "github.com/onsi/ginkgo/v2" //revive:disable:dot-imports + . "github.com/onsi/gomega" //revive:disable:dot-imports + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/types" + + "github.com/openstack-k8s-operators/lib-common/modules/common/condition" + v1beta1 "github.com/openstack-k8s-operators/openstack-operator/apis/dataplane/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + baremetalv1 "github.com/openstack-k8s-operators/openstack-baremetal-operator/api/v1beta1" +) + +var _ = Describe("DataplaneNodeSet Webhook", func() { + + var dataplaneNodeSetName types.NamespacedName + var dataplaneDeploymentName types.NamespacedName + + BeforeEach(func() { + dataplaneNodeSetName = types.NamespacedName{ + Name: "edpm-compute-nodeset", + Namespace: namespace, + } + dataplaneDeploymentName = types.NamespacedName{ + Name: "edpm-deployment", + Namespace: namespace, + } + err := os.Setenv("OPERATOR_SERVICES", "../../../config/services") + Expect(err).NotTo(HaveOccurred()) + }) + + When("User tries to change forbidden items in the baremetalSetTemplate", func() { + BeforeEach(func() { + nodeSetSpec := DefaultDataPlaneNoNodeSetSpec(false) + nodeSetSpec["preProvisioned"] = false + nodeSetSpec["nodes"] = map[string]interface{}{ + "compute-0": map[string]interface{}{ + "hostName": "compute-0"}, + } + nodeSetSpec["baremetalSetTemplate"] = baremetalv1.OpenStackBaremetalSetSpec{ + CloudUserName: "test-user", + BmhLabelSelector: map[string]string{ + "app": "test-openstack", + }, + } + DeferCleanup(th.DeleteInstance, CreateDataplaneNodeSet(dataplaneNodeSetName, nodeSetSpec)) + }) + + It("Should block changes to the BmhLabelSelector object in baremetalSetTemplate spec", func() { + Eventually(func(_ Gomega) string { + instance := GetDataplaneNodeSet(dataplaneNodeSetName) + instance.Spec.BaremetalSetTemplate = baremetalv1.OpenStackBaremetalSetSpec{ + CloudUserName: "new-user", + BmhLabelSelector: map[string]string{ + "app": "openstack1", + }, + } + err := th.K8sClient.Update(th.Ctx, instance) + return fmt.Sprintf("%s", err) + }).Should(ContainSubstring("Forbidden: cannot change")) + }) + }) + + When("A user changes an allowed field in the baremetalSetTemplate", func() { + BeforeEach(func() { + nodeSetSpec := DefaultDataPlaneNoNodeSetSpec(false) + nodeSetSpec["preProvisioned"] = false + nodeSetSpec["baremetalSetTemplate"] = baremetalv1.OpenStackBaremetalSetSpec{ + CloudUserName: "test-user", + BmhLabelSelector: map[string]string{ + "app": "test-openstack", + }, + BaremetalHosts: map[string]baremetalv1.InstanceSpec{ + "compute-0": { + CtlPlaneIP: "192.168.1.12/24", + }, + }, + } + DeferCleanup(th.DeleteInstance, CreateDataplaneNodeSet(dataplaneNodeSetName, nodeSetSpec)) + }) + It("Should allow changes to the CloudUserName", func() { + Eventually(func(_ Gomega) error { + instance := GetDataplaneNodeSet(dataplaneNodeSetName) + instance.Spec.BaremetalSetTemplate = baremetalv1.OpenStackBaremetalSetSpec{ + CloudUserName: "new-user", + BmhLabelSelector: map[string]string{ + "app": "test-openstack", + }, + BaremetalHosts: map[string]baremetalv1.InstanceSpec{ + "compute-0": { + CtlPlaneIP: "192.168.1.12/24", + }, + }, + } + return th.K8sClient.Update(th.Ctx, instance) + }).Should(Succeed()) + }) + }) + + When("domainName in baremetalSetTemplate", func() { + BeforeEach(func() { + nodeSetSpec := DefaultDataPlaneNoNodeSetSpec(false) + nodeSetSpec["preProvisioned"] = false + nodeSetSpec["nodes"] = map[string]interface{}{ + "compute-0": map[string]interface{}{ + "hostName": "compute-0"}, + } + nodeSetSpec["baremetalSetTemplate"] = baremetalv1.OpenStackBaremetalSetSpec{ + DomainName: "example.com", + BmhLabelSelector: map[string]string{ + "app": "test-openstack", + }, + BaremetalHosts: map[string]baremetalv1.InstanceSpec{ + "compute-0": { + CtlPlaneIP: "192.168.1.12/24", + }, + }, + } + DeferCleanup(th.DeleteInstance, CreateDataplaneNodeSet(dataplaneNodeSetName, nodeSetSpec)) + }) + + It("hostName should be fqdn", func() { + instance := GetDataplaneNodeSet(dataplaneNodeSetName) + Expect(instance.Spec.Nodes["compute-0"].HostName).Should(Equal( + "compute-0.example.com")) + }) + + }) + + When("A user tries to redeclare an existing node in a new NodeSet", func() { + BeforeEach(func() { + nodeSetSpec := DefaultDataPlaneNoNodeSetSpec(false) + nodeSetSpec["preProvisioned"] = true + nodeSetSpec["nodes"] = map[string]interface{}{ + "compute-0": map[string]interface{}{ + "hostName": "compute-0"}, + } + DeferCleanup(th.DeleteInstance, CreateDataplaneNodeSet(dataplaneNodeSetName, nodeSetSpec)) + }) + + It("Should block duplicate node declaration", func() { + Eventually(func(_ Gomega) string { + newNodeSetSpec := DefaultDataPlaneNoNodeSetSpec(false) + newNodeSetSpec["preProvisioned"] = true + newNodeSetSpec["nodes"] = map[string]interface{}{ + "compute-0": map[string]interface{}{ + "hostName": "compute-0"}, + } + newInstance := DefaultDataplaneNodeSetTemplate(types.NamespacedName{Name: "test-duplicate-node", Namespace: namespace}, newNodeSetSpec) + unstructuredObj := &unstructured.Unstructured{Object: newInstance} + _, err := controllerutil.CreateOrPatch( + th.Ctx, th.K8sClient, unstructuredObj, func() error { return nil }) + return fmt.Sprintf("%s", err) + }).Should(ContainSubstring("already exists in another cluster")) + }) + + It("Should block NodeSets if they contain a duplicate ansibleHost", func() { + Eventually(func(_ Gomega) string { + newNodeSetSpec := DefaultDataPlaneNoNodeSetSpec(false) + newNodeSetSpec["preProvisioned"] = true + newNodeSetSpec["nodes"] = map[string]interface{}{ + "compute-3": map[string]interface{}{ + "hostName": "compute-3", + "ansible": map[string]interface{}{ + "ansibleHost": "compute-3", + }, + }, + "compute-2": map[string]interface{}{ + "hostName": "compute-2"}, + "compute-8": map[string]interface{}{ + "hostName": "compute-8"}, + "compute-0": map[string]interface{}{ + "ansible": map[string]interface{}{ + "ansibleHost": "compute-0", + }, + }, + } + newInstance := DefaultDataplaneNodeSetTemplate(types.NamespacedName{Name: "test-nodeset-with-duplicate-node", Namespace: namespace}, newNodeSetSpec) + unstructuredObj := &unstructured.Unstructured{Object: newInstance} + _, err := controllerutil.CreateOrPatch( + th.Ctx, th.K8sClient, unstructuredObj, func() error { return nil }) + return fmt.Sprintf("%s", err) + }).Should(ContainSubstring("already exists in another cluster")) + }) + }) + When("A NodeSet is updated with a OpenStackDataPlaneDeployment", func() { + BeforeEach(func() { + nodeSetSpec := DefaultDataPlaneNoNodeSetSpec(false) + nodeSetSpec["preProvisioned"] = true + nodeSetSpec["nodes"] = map[string]interface{}{ + "compute-0": map[string]interface{}{ + "hostName": "compute-0"}, + } + + DeferCleanup(th.DeleteInstance, CreateDataplaneNodeSet(dataplaneNodeSetName, nodeSetSpec)) + DeferCleanup(th.DeleteInstance, CreateDataplaneDeployment(dataplaneDeploymentName, DefaultDataPlaneDeploymentSpec())) + }) + It("Should allow for NodeSet updates if Deployment is Completed", func() { + Eventually(func(g Gomega) error { + instance := GetDataplaneNodeSet(dataplaneNodeSetName) + instance.Spec.NodeTemplate.Ansible = v1beta1.AnsibleOpts{ + AnsibleUser: "random-user", + } + + deploymentReadyConditions := condition.Conditions{} + deploymentReadyConditions.MarkTrue( + v1beta1.NodeSetDeploymentReadyCondition, + condition.ReadyMessage) + + instance.Status.DeploymentStatuses = make(map[string]condition.Conditions) + instance.Status.DeploymentStatuses[dataplaneDeploymentName.Name] = deploymentReadyConditions + g.Expect(th.K8sClient.Status().Update(th.Ctx, instance)).To(Succeed()) + + return th.K8sClient.Update(th.Ctx, instance) + }).Should(Succeed()) + }) + It("Should block NodeSet updates if Deployment is NOT completed", func() { + Eventually(func(g Gomega) string { + instance := GetDataplaneNodeSet(dataplaneNodeSetName) + + deploymentReadyConditions := condition.Conditions{} + deploymentReadyConditions.MarkFalse( + v1beta1.NodeSetDeploymentReadyCondition, + "mock-error", + condition.SeverityWarning, + condition.ReadyMessage) + + instance.Status.DeploymentStatuses = make(map[string]condition.Conditions) + instance.Status.DeploymentStatuses[dataplaneDeploymentName.Name] = deploymentReadyConditions + g.Expect(th.K8sClient.Status().Update(th.Ctx, instance)).To(Succeed()) + + instance.Spec.NodeTemplate.Ansible = v1beta1.AnsibleOpts{ + AnsibleUser: "random-user", + } + err := th.K8sClient.Update(th.Ctx, instance) + return fmt.Sprintf("%s", err) + }).Should(ContainSubstring(fmt.Sprintf("could not patch openstackdataplanenodeset while openstackdataplanedeployment %s (blocked on %s condition) is running", + dataplaneDeploymentName.Name, string(v1beta1.NodeSetDeploymentReadyCondition)))) + }) + }) +}) diff --git a/tests/functional/dataplane/openstackdataplaneservice_controller_test.go b/tests/functional/dataplane/openstackdataplaneservice_controller_test.go new file mode 100644 index 000000000..c1d5d9b2a --- /dev/null +++ b/tests/functional/dataplane/openstackdataplaneservice_controller_test.go @@ -0,0 +1,66 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package functional + +import ( + "os" + + . "github.com/onsi/ginkgo/v2" //revive:disable:dot-imports + . "github.com/onsi/gomega" //revive:disable:dot-imports + "k8s.io/apimachinery/pkg/types" +) + +var _ = Describe("OpenstackDataplaneService Test", func() { + var dataplaneServiceName types.NamespacedName + BeforeEach(func() { + dataplaneServiceName = types.NamespacedName{ + Namespace: namespace, + Name: "configure-network", + } + }) + + When("A defined service resource is created", func() { + BeforeEach(func() { + os.Unsetenv("OPERATOR_SERVICES") + CreateDataplaneService(dataplaneServiceName, false) + DeferCleanup(th.DeleteService, dataplaneServiceName) + }) + + It("spec fields are set up", func() { + service := GetService(dataplaneServiceName) + Expect(service.Spec.Secrets).To(BeEmpty()) + Expect(service.Spec.Playbook).To(BeEmpty()) + Expect(service.Spec.ConfigMaps).To(BeEmpty()) + Expect(service.Spec.DeployOnAllNodeSets).To(BeFalse()) + }) + }) + + When("A defined service resource for all nodes is created", func() { + BeforeEach(func() { + os.Unsetenv("OPERATOR_SERVICES") + CreateDataplaneService(dataplaneServiceName, true) + DeferCleanup(th.DeleteService, dataplaneServiceName) + }) + + It("spec fields are set up", func() { + service := GetService(dataplaneServiceName) + Expect(service.Spec.Secrets).To(BeEmpty()) + Expect(service.Spec.Playbook).To(BeEmpty()) + Expect(service.Spec.ConfigMaps).To(BeEmpty()) + Expect(service.Spec.DeployOnAllNodeSets).To(BeTrue()) + }) + }) +}) diff --git a/tests/functional/dataplane/suite_test.go b/tests/functional/dataplane/suite_test.go new file mode 100644 index 000000000..bb56410aa --- /dev/null +++ b/tests/functional/dataplane/suite_test.go @@ -0,0 +1,219 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package functional + +import ( + "context" + "path/filepath" + "testing" + "time" + + "github.com/go-logr/logr" + "github.com/google/uuid" + . "github.com/onsi/ginkgo/v2" //revive:disable:dot-imports + . "github.com/onsi/gomega" //revive:disable:dot-imports + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/webhook" + + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + + infrav1 "github.com/openstack-k8s-operators/infra-operator/apis/network/v1beta1" + aee "github.com/openstack-k8s-operators/openstack-ansibleee-operator/api/v1beta1" + baremetalv1 "github.com/openstack-k8s-operators/openstack-baremetal-operator/api/v1beta1" + openstackv1 "github.com/openstack-k8s-operators/openstack-operator/apis/core/v1beta1" + dataplanev1 "github.com/openstack-k8s-operators/openstack-operator/apis/dataplane/v1beta1" + dataplanecontrollers "github.com/openstack-k8s-operators/openstack-operator/controllers/dataplane" + + //revive:disable-next-line:dot-imports + . "github.com/openstack-k8s-operators/lib-common/modules/common/test/helpers" + test "github.com/openstack-k8s-operators/lib-common/modules/test" + //+kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var ( + k8sClient client.Client // You'll be using this client in your tests. + testEnv *envtest.Environment + ctx context.Context + cancel context.CancelFunc + logger logr.Logger + th *TestHelper + namespace string +) + +const ( + SecretName = "test-secret" + MessageBusSecretName = "rabbitmq-secret" + ContainerImage = "test://nova" + timeout = 40 * time.Second + // have maximum 100 retries before the timeout hits + interval = timeout / 100 +) + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "DataPlane Controller Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + ctx, cancel = context.WithCancel(context.TODO()) + + const gomod = "../../../go.mod" + + aeeCRDs, err := test.GetCRDDirFromModule( + "github.com/openstack-k8s-operators/openstack-ansibleee-operator/api", gomod, "bases") + Expect(err).ShouldNot(HaveOccurred()) + baremetalCRDs, err := test.GetCRDDirFromModule( + "github.com/openstack-k8s-operators/openstack-baremetal-operator/api", gomod, "bases") + Expect(err).ShouldNot(HaveOccurred()) + infraCRDs, err := test.GetCRDDirFromModule( + "github.com/openstack-k8s-operators/infra-operator/apis", gomod, "bases") + Expect(err).ShouldNot(HaveOccurred()) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{ + filepath.Join("..", "..", "..", "config", "crd", "bases"), + aeeCRDs, + baremetalCRDs, + infraCRDs, + }, + WebhookInstallOptions: envtest.WebhookInstallOptions{ + Paths: []string{filepath.Join("..", "..", "..", "config", "webhook")}, + // NOTE(gibi): if localhost is resolved to ::1 (ipv6) then starting + // the webhook fails as it try to parse the address as ipv4 and + // failing on the colons in ::1 + LocalServingHost: "127.0.0.1", + }, + ErrorIfCRDPathMissing: true, + } + + // cfg is defined in this file globally. + cfg, err := testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + // NOTE(gibi): Need to add all API schemas our operator can own. + // Keep this in synch with SetupWithManager, otherwise the reconciler loop + // will silently not start in the test env. + err = dataplanev1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + err = aee.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + err = batchv1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + err = corev1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + err = appsv1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + err = baremetalv1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + err = infrav1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + err = openstackv1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + //+kubebuilder:scaffold:scheme + + logger = ctrl.Log.WithName("---DataPlane Test---") + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + th = NewTestHelper(ctx, k8sClient, timeout, interval, logger) + Expect(th).NotTo(BeNil()) + + // Start the controller-manager if goroutine + webhookInstallOptions := &testEnv.WebhookInstallOptions + k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{ + Scheme: scheme.Scheme, + Metrics: metricsserver.Options{ + BindAddress: "0", + }, + WebhookServer: webhook.NewServer( + webhook.Options{ + Host: webhookInstallOptions.LocalServingHost, + Port: webhookInstallOptions.LocalServingPort, + CertDir: webhookInstallOptions.LocalServingCertDir, + }), + LeaderElection: false, + }) + Expect(err).ToNot(HaveOccurred()) + + err = (&dataplanev1.OpenStackDataPlaneNodeSet{}).SetupWebhookWithManager(k8sManager) + Expect(err).NotTo(HaveOccurred()) + + err = (&dataplanev1.OpenStackDataPlaneDeployment{}).SetupWebhookWithManager(k8sManager) + Expect(err).NotTo(HaveOccurred()) + + err = (&dataplanev1.OpenStackDataPlaneService{}).SetupWebhookWithManager(k8sManager) + Expect(err).NotTo(HaveOccurred()) + + kclient, err := kubernetes.NewForConfig(cfg) + Expect(err).ToNot(HaveOccurred(), "failed to create kclient") + err = (&dataplanecontrollers.OpenStackDataPlaneNodeSetReconciler{ + Client: k8sManager.GetClient(), + Scheme: k8sManager.GetScheme(), + Kclient: kclient, + }).SetupWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred()) + + err = (&dataplanecontrollers.OpenStackDataPlaneDeploymentReconciler{ + Client: k8sManager.GetClient(), + Scheme: k8sManager.GetScheme(), + Kclient: kclient, + }).SetupWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred()) + + go func() { + defer GinkgoRecover() + err = k8sManager.Start(ctx) + Expect(err).ToNot(HaveOccurred(), "failed to run manager") + }() +}) + +var _ = BeforeEach(func() { + // NOTE(gibi): We need to create a unique namespace for each test run + // as namespaces cannot be deleted in a locally running envtest. See + // https://book.kubebuilder.io/reference/envtest.html#namespace-usage-limitation + namespace = uuid.New().String() + th.CreateNamespace(namespace) + // We still request the delete of the Namespace to properly cleanup if + // we run the test in an existing cluster. + DeferCleanup(th.DeleteNamespace, namespace) +}) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + cancel() + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) diff --git a/tests/kuttl/tests/basic-deployment/01-assert-deploy-openstack.yaml b/tests/kuttl/tests/ctlplane-basic-deployment/01-assert-deploy-openstack.yaml similarity index 100% rename from tests/kuttl/tests/basic-deployment/01-assert-deploy-openstack.yaml rename to tests/kuttl/tests/ctlplane-basic-deployment/01-assert-deploy-openstack.yaml diff --git a/tests/kuttl/tests/basic-deployment/01-deploy-openstack.yaml b/tests/kuttl/tests/ctlplane-basic-deployment/01-deploy-openstack.yaml similarity index 100% rename from tests/kuttl/tests/basic-deployment/01-deploy-openstack.yaml rename to tests/kuttl/tests/ctlplane-basic-deployment/01-deploy-openstack.yaml diff --git a/tests/kuttl/tests/basic-deployment/02-cleanup.yaml b/tests/kuttl/tests/ctlplane-basic-deployment/02-cleanup.yaml similarity index 100% rename from tests/kuttl/tests/basic-deployment/02-cleanup.yaml rename to tests/kuttl/tests/ctlplane-basic-deployment/02-cleanup.yaml diff --git a/tests/kuttl/tests/basic-deployment/02-errors-cleanup.yaml b/tests/kuttl/tests/ctlplane-basic-deployment/02-errors-cleanup.yaml similarity index 100% rename from tests/kuttl/tests/basic-deployment/02-errors-cleanup.yaml rename to tests/kuttl/tests/ctlplane-basic-deployment/02-errors-cleanup.yaml diff --git a/tests/kuttl/tests/collapsed/01-assert-collapsed-cell.yaml b/tests/kuttl/tests/ctlplane-collapsed/01-assert-collapsed-cell.yaml similarity index 100% rename from tests/kuttl/tests/collapsed/01-assert-collapsed-cell.yaml rename to tests/kuttl/tests/ctlplane-collapsed/01-assert-collapsed-cell.yaml diff --git a/tests/kuttl/tests/collapsed/01-deploy-openstack-collapsed-cell.yaml b/tests/kuttl/tests/ctlplane-collapsed/01-deploy-openstack-collapsed-cell.yaml similarity index 100% rename from tests/kuttl/tests/collapsed/01-deploy-openstack-collapsed-cell.yaml rename to tests/kuttl/tests/ctlplane-collapsed/01-deploy-openstack-collapsed-cell.yaml diff --git a/tests/kuttl/tests/collapsed/02-cleanup.yaml b/tests/kuttl/tests/ctlplane-collapsed/02-cleanup.yaml similarity index 100% rename from tests/kuttl/tests/collapsed/02-cleanup.yaml rename to tests/kuttl/tests/ctlplane-collapsed/02-cleanup.yaml diff --git a/tests/kuttl/tests/collapsed/02-errors-cleanup.yaml b/tests/kuttl/tests/ctlplane-collapsed/02-errors-cleanup.yaml similarity index 100% rename from tests/kuttl/tests/collapsed/02-errors-cleanup.yaml rename to tests/kuttl/tests/ctlplane-collapsed/02-errors-cleanup.yaml diff --git a/tests/kuttl/tests/galera-3replicas/01-assert-galera-3replicas.yaml b/tests/kuttl/tests/ctlplane-galera-3replicas/01-assert-galera-3replicas.yaml similarity index 100% rename from tests/kuttl/tests/galera-3replicas/01-assert-galera-3replicas.yaml rename to tests/kuttl/tests/ctlplane-galera-3replicas/01-assert-galera-3replicas.yaml diff --git a/tests/kuttl/tests/galera-3replicas/01-deploy-galera-3replicas.yaml b/tests/kuttl/tests/ctlplane-galera-3replicas/01-deploy-galera-3replicas.yaml similarity index 100% rename from tests/kuttl/tests/galera-3replicas/01-deploy-galera-3replicas.yaml rename to tests/kuttl/tests/ctlplane-galera-3replicas/01-deploy-galera-3replicas.yaml diff --git a/tests/kuttl/tests/galera-3replicas/02-cleanup.yaml b/tests/kuttl/tests/ctlplane-galera-3replicas/02-cleanup.yaml similarity index 100% rename from tests/kuttl/tests/galera-3replicas/02-cleanup.yaml rename to tests/kuttl/tests/ctlplane-galera-3replicas/02-cleanup.yaml diff --git a/tests/kuttl/tests/galera-3replicas/02-errors-cleanup.yaml b/tests/kuttl/tests/ctlplane-galera-3replicas/02-errors-cleanup.yaml similarity index 100% rename from tests/kuttl/tests/galera-3replicas/02-errors-cleanup.yaml rename to tests/kuttl/tests/ctlplane-galera-3replicas/02-errors-cleanup.yaml diff --git a/tests/kuttl/tests/galera-basic/01-assert-galera.yaml b/tests/kuttl/tests/ctlplane-galera-basic/01-assert-galera.yaml similarity index 100% rename from tests/kuttl/tests/galera-basic/01-assert-galera.yaml rename to tests/kuttl/tests/ctlplane-galera-basic/01-assert-galera.yaml diff --git a/tests/kuttl/tests/galera-basic/01-deploy-galera.yaml b/tests/kuttl/tests/ctlplane-galera-basic/01-deploy-galera.yaml similarity index 100% rename from tests/kuttl/tests/galera-basic/01-deploy-galera.yaml rename to tests/kuttl/tests/ctlplane-galera-basic/01-deploy-galera.yaml diff --git a/tests/kuttl/tests/galera-basic/02-cleanup.yaml b/tests/kuttl/tests/ctlplane-galera-basic/02-cleanup.yaml similarity index 100% rename from tests/kuttl/tests/galera-basic/02-cleanup.yaml rename to tests/kuttl/tests/ctlplane-galera-basic/02-cleanup.yaml diff --git a/tests/kuttl/tests/galera-basic/02-errors-cleanup.yaml b/tests/kuttl/tests/ctlplane-galera-basic/02-errors-cleanup.yaml similarity index 100% rename from tests/kuttl/tests/galera-basic/02-errors-cleanup.yaml rename to tests/kuttl/tests/ctlplane-galera-basic/02-errors-cleanup.yaml diff --git a/tests/kuttl/tests/dataplane-create-test/00-assert.yaml b/tests/kuttl/tests/dataplane-create-test/00-assert.yaml new file mode 100644 index 000000000..5c3f1c00f --- /dev/null +++ b/tests/kuttl/tests/dataplane-create-test/00-assert.yaml @@ -0,0 +1,202 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +collectors: +- type: command + command: oc logs -n openstack-operators -l openstack.org/operator-name=openstack + name: operator-logs +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneNodeSet +metadata: + name: openstack-edpm-ipam +spec: + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + nodes: + edpm-compute-0: + hostName: edpm-compute-0 + networks: + - name: ctlplane + subnetName: subnet1 + defaultRoute: true + fixedIP: 192.168.122.100 + - name: internalapi + subnetName: subnet1 + - name: storage + subnetName: subnet1 + - name: tenant + subnetName: subnet1 + + nodeTemplate: + ansible: + ansiblePort: 22 + ansibleUser: cloud-admin + ansibleVarsFrom: + - prefix: edpm_ + configMapRef: + name: network-config-template + ansibleVars: + timesync_ntp_servers: + - hostname: clock.redhat.com + edpm_network_config_hide_sensitive_logs: false + edpm_nodes_validation_validate_controllers_icmp: false + edpm_nodes_validation_validate_gateway_icmp: false + edpm_selinux_mode: enforcing + edpm_sshd_allowed_ranges: + - 192.168.122.0/24 + edpm_sshd_configure_firewall: true + enable_debug: false + ansibleSSHPrivateKeySecret: dataplane-ansible-ssh-private-key-secret + preProvisioned: true + services: + - download-cache + - bootstrap + - configure-network + - validate-network + - install-os + - configure-os + - run-os + - install-certs + - ovn + - neutron-metadata + - libvirt + - nova +status: + observedGeneration: 1 + allHostnames: + edpm-compute-0: + ctlplane: edpm-compute-0.ctlplane.example.com + internalapi: edpm-compute-0.internalapi.example.com + storage: edpm-compute-0.storage.example.com + tenant: edpm-compute-0.tenant.example.com + allIPs: + edpm-compute-0: + ctlplane: 192.168.122.100 + internalapi: 172.17.0.100 + storage: 172.18.0.100 + tenant: 172.19.0.100 + ctlplaneSearchDomain: ctlplane.example.com + conditions: + - message: Deployment not started + reason: Requested + status: "False" + type: Ready + - message: Deployment not started + reason: Requested + status: "False" + type: DeploymentReady + - message: Input data complete + reason: Ready + status: "True" + type: InputReady + - message: NodeSetDNSDataReady ready + reason: Ready + status: "True" + type: NodeSetDNSDataReady + - message: NodeSetIPReservationReady ready + reason: Ready + status: "True" + type: NodeSetIPReservationReady + - message: ServiceAccount created + reason: Ready + status: "True" + type: ServiceAccountReady + - message: Setup complete + reason: Ready + status: "True" + type: SetupReady +--- +apiVersion: network.openstack.org/v1beta1 +kind: IPSet +metadata: + name: edpm-compute-0 +spec: + immutable: false + networks: + - defaultRoute: true + name: ctlplane + subnetName: subnet1 + - name: internalapi + subnetName: subnet1 + - name: storage + subnetName: subnet1 + - name: tenant + subnetName: subnet1 +status: + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Input data complete + reason: Ready + status: "True" + type: InputReady + - message: Reservation successful + reason: Ready + status: "True" + type: ReservationReady + reservations: + - address: 192.168.122.100 + cidr: 192.168.122.0/24 + dnsDomain: ctlplane.example.com + gateway: 192.168.122.1 + mtu: 1500 + network: ctlplane + routes: + - destination: 0.0.0.0/0 + nexthop: 192.168.122.1 + subnet: subnet1 + - address: 172.17.0.100 + cidr: 172.17.0.0/24 + dnsDomain: internalapi.example.com + mtu: 1500 + network: internalapi + subnet: subnet1 + vlan: 20 + - address: 172.18.0.100 + cidr: 172.18.0.0/24 + dnsDomain: storage.example.com + mtu: 1500 + network: storage + subnet: subnet1 + vlan: 21 + - address: 172.19.0.100 + cidr: 172.19.0.0/24 + dnsDomain: tenant.example.com + mtu: 1500 + network: tenant + subnet: subnet1 + vlan: 22 +--- +apiVersion: network.openstack.org/v1beta1 +kind: DNSData +metadata: + name: openstack-edpm-ipam +spec: + dnsDataLabelSelectorValue: dnsdata + hosts: + - hostnames: + - edpm-compute-0.ctlplane.example.com + ip: 192.168.122.100 + - hostnames: + - edpm-compute-0.internalapi.example.com + ip: 172.17.0.100 + - hostnames: + - edpm-compute-0.storage.example.com + ip: 172.18.0.100 + - hostnames: + - edpm-compute-0.tenant.example.com + ip: 172.19.0.100 +status: + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Input data complete + reason: Ready + status: "True" + type: ServiceConfigReady diff --git a/tests/kuttl/tests/dataplane-create-test/00-dataplane-create.yaml b/tests/kuttl/tests/dataplane-create-test/00-dataplane-create.yaml new file mode 100644 index 000000000..a7f7b889e --- /dev/null +++ b/tests/kuttl/tests/dataplane-create-test/00-dataplane-create.yaml @@ -0,0 +1,99 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: network-config-template +data: + network_config_template: | + --- + {% set mtu_list = [ctlplane_mtu] %} + {% for network in nodeset_networks %} + {{ mtu_list.append(lookup('vars', networks_lower[network] ~ '_mtu')) }} + {%- endfor %} + {% set min_viable_mtu = mtu_list | max %} + network_config: + - type: ovs_bridge + name: {{ neutron_physical_bridge_name }} + mtu: {{ min_viable_mtu }} + use_dhcp: false + dns_servers: {{ ctlplane_dns_nameservers }} + domain: {{ dns_search_domains }} + addresses: + - ip_netmask: {{ ctlplane_ip }}/{{ ctlplane_cidr }} + routes: {{ ctlplane_host_routes }} + members: + - type: interface + name: nic1 + mtu: {{ min_viable_mtu }} + # force the MAC address of the bridge to this interface + primary: true + {% for network in nodeset_networks %} + - type: vlan + mtu: {{ lookup('vars', networks_lower[network] ~ '_mtu') }} + vlan_id: {{ lookup('vars', networks_lower[network] ~ '_vlan_id') }} + addresses: + - ip_netmask: + {{ lookup('vars', networks_lower[network] ~ '_ip') }}/{{ lookup('vars', networks_lower[network] ~ '_cidr') }} + routes: {{ lookup('vars', networks_lower[network] ~ '_host_routes') }} + {% endfor %} +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneNodeSet +metadata: + name: openstack-edpm-ipam +spec: + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + services: + - download-cache + - bootstrap + - configure-network + - validate-network + - install-os + - configure-os + - run-os + - install-certs + - ovn + - neutron-metadata + - libvirt + - nova + preProvisioned: true + nodes: + edpm-compute-0: + hostName: edpm-compute-0 + networks: + - name: ctlplane + subnetName: subnet1 + defaultRoute: true + fixedIP: 192.168.122.100 + - name: internalapi + subnetName: subnet1 + - name: storage + subnetName: subnet1 + - name: tenant + subnetName: subnet1 + nodeTemplate: + ansibleSSHPrivateKeySecret: dataplane-ansible-ssh-private-key-secret + ansible: + ansibleUser: cloud-admin + ansiblePort: 22 + ansibleVarsFrom: + - prefix: edpm_ + configMapRef: + name: network-config-template + ansibleVars: + timesync_ntp_servers: + - hostname: clock.redhat.com + # edpm_network_config + # Default nic config template for a EDPM compute node + # These vars are edpm_network_config role vars + edpm_network_config_hide_sensitive_logs: false + edpm_nodes_validation_validate_controllers_icmp: false + edpm_nodes_validation_validate_gateway_icmp: false + gather_facts: false + enable_debug: false + # edpm firewall, change the allowed CIDR if needed + edpm_sshd_configure_firewall: true + edpm_sshd_allowed_ranges: ['192.168.122.0/24'] + # SELinux module + edpm_selinux_mode: enforcing diff --git a/tests/kuttl/tests/dataplane-deploy-global-service-test/00-assert.yaml b/tests/kuttl/tests/dataplane-deploy-global-service-test/00-assert.yaml new file mode 100644 index 000000000..34ef6ee43 --- /dev/null +++ b/tests/kuttl/tests/dataplane-deploy-global-service-test/00-assert.yaml @@ -0,0 +1,138 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +collectors: +- type: command + command: oc get OpenStackDataPlaneDeployment -n openstack-kuttl-tests edpm-compute-global -o yaml + name: edpm-compute-global-deployment +- type: command + command: oc logs -n openstack-operators -l openstack.org/operator-name=openstack + name: operator-logs +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneNodeSet +metadata: + name: edpm-compute-global + namespace: openstack-kuttl-tests +spec: + preProvisioned: true + services: + - download-cache + - bootstrap + - configure-network + - validate-network + - install-os + - configure-os + - run-os + - install-certs + - ovn + - neutron-metadata + - neutron-ovn + - neutron-sriov + - neutron-dhcp + - libvirt + - nova + - custom-global-service + tlsEnabled: false + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + nodes: {} + nodeTemplate: + ansibleSSHPrivateKeySecret: dataplane-ansible-ssh-private-key-secret + managementNetwork: ctlplane + ansible: + ansibleUser: cloud-admin + ansiblePort: 22 + ansibleVarsFrom: + - prefix: edpm_ + configMapRef: + name: network-config-template + ansibleVars: + timesync_ntp_servers: + - hostname: clock.redhat.com + # edpm_network_config + # Default nic config template for a EDPM compute node + # These vars are edpm_network_config role vars + edpm_network_config_hide_sensitive_logs: false + # These vars are for the network config templates themselves and are + # considered EDPM network defaults. + neutron_physical_bridge_name: br-ex + neutron_public_interface_name: eth0 + ctlplane_mtu: 1500 + ctlplane_cidr: 24 + ctlplane_gateway_ip: 192.168.122.1 + ctlplane_host_routes: + - ip_netmask: 0.0.0.0/0 + next_hop: 192.168.122.1 + external_mtu: 1500 + external_vlan_id: 44 + external_cidr: '24' + external_host_routes: [] + internalapi_mtu: 1500 + internalapi_vlan_id: 20 + internalapi_cidr: '24' + internalapi_host_routes: [] + storage_mtu: 1500 + storage_vlan_id: 21 + storage_cidr: '24' + storage_host_routes: [] + tenant_mtu: 1500 + tenant_vlan_id: 22 + tenant_cidr: '24' + tenant_host_routes: [] + nodeset_networks: + - InternalApi + - Storage + - Tenant + networks_lower: + External: external + InternalApi: internalapi + Storage: storage + Tenant: tenant + # edpm_nodes_validation + edpm_nodes_validation_validate_controllers_icmp: false + edpm_nodes_validation_validate_gateway_icmp: false + ctlplane_dns_nameservers: + - 192.168.122.1 + dns_search_domains: [] + gather_facts: false + enable_debug: false + # edpm firewall, change the allowed CIDR if needed + edpm_sshd_configure_firewall: true + edpm_sshd_allowed_ranges: ['192.168.122.0/24'] + # SELinux module + edpm_selinux_mode: enforcing +status: + observedGeneration: 1 + conditions: + - message: Deployment not started + reason: Requested + severity: Info + status: "False" + type: Ready + - message: Deployment not started + reason: Requested + severity: Info + status: "False" + type: DeploymentReady + - message: Input data complete + reason: Ready + status: "True" + type: InputReady + - message: NodeSetDNSDataReady ready + reason: Ready + status: "True" + type: NodeSetDNSDataReady + - message: NodeSetIPReservationReady ready + reason: Ready + status: "True" + type: NodeSetIPReservationReady + - message: ServiceAccount created + reason: Ready + status: "True" + type: ServiceAccountReady + - message: Setup complete + reason: Ready + status: "True" + type: SetupReady diff --git a/tests/kuttl/tests/dataplane-deploy-global-service-test/00-dataplane-create.yaml b/tests/kuttl/tests/dataplane-deploy-global-service-test/00-dataplane-create.yaml new file mode 100644 index 000000000..b3d048c89 --- /dev/null +++ b/tests/kuttl/tests/dataplane-deploy-global-service-test/00-dataplane-create.yaml @@ -0,0 +1,230 @@ +apiVersion: v1 +kind: Secret +metadata: + name: nova-cell1-compute-config +data: + nova-blank.conf: Zm9vCg== + 01-nova.conf: Zm9vCg== +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: ovncontroller-config +data: + ovsdb-config: test-ovn-config +--- +apiVersion: v1 +kind: Secret +metadata: + name: neutron-ovn-metadata-agent-neutron-config +data: + 10-neutron-metadata.conf: dGVzdC1uZXV0cm9uLW92bi1tZXRhZGF0YS1hZ2VudC1jb25maWc= +--- +apiVersion: v1 +kind: Secret +metadata: + name: nova-metadata-neutron-config +data: + 05-nova-metadata.conf: dGVzdC1ub3ZhLW1ldGFkYXRhLWNvbXB1dGUtY29uZmln + httpd.conf: dGVzdC1ub3ZhLW1ldGFkYXRhLWNvbXB1dGUtY29uZmln + nova-metadata-config.json: dGVzdC1ub3ZhLW1ldGFkYXRhLWNvbXB1dGUtY29uZmln +--- +apiVersion: v1 +kind: Secret +metadata: + name: neutron-ovn-agent-neutron-config +data: + 10-neutron-ovn.conf: dGVzdC1uZXV0cm9uLW92bi1hZ2VudC1jb25maWc= +--- +apiVersion: v1 +kind: Secret +metadata: + name: neutron-sriov-agent-neutron-config +data: + 10-neutron-sriov.conf: dGVzdC1uZXV0cm9uLXNyaW92LWFnZW50LXNlY3JldC1jb25maWcK +--- +apiVersion: v1 +kind: Secret +metadata: + name: neutron-dhcp-agent-neutron-config +data: + 10-neutron-dhcp.conf: dGVzdC1uZXV0cm9uLWRoY3AtYWdlbnQtc2VjcmV0LWNvbmZpZwo= +--- +apiVersion: v1 +kind: Secret +metadata: + name: nova-migration-ssh-key +data: + ssh-privatekey: ZmFrZQo= + ssh-publickey: ZmFrZQo= +--- +apiVersion: v1 +kind: Secret +metadata: + name: libvirt-secret +data: + LibvirtPassword: ZmFrZQo= +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + name: custom-global-service +spec: + label: custom-global-service + playbookContents: | + - hosts: localhost + gather_facts: no + name: global kuttl play + tasks: + - name: Sleep + command: sleep 1 + delegate_to: localhost + deployOnAllNodeSets: true +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: network-config-template +data: + network_config_template: | + --- + {% set mtu_list = [ctlplane_mtu] %} + {% for network in nodeset_networks %} + {{ mtu_list.append(lookup('vars', networks_lower[network] ~ '_mtu')) }} + {%- endfor %} + {% set min_viable_mtu = mtu_list | max %} + network_config: + - type: ovs_bridge + name: {{ neutron_physical_bridge_name }} + mtu: {{ min_viable_mtu }} + use_dhcp: false + dns_servers: {{ ctlplane_dns_nameservers }} + domain: {{ dns_search_domains }} + addresses: + - ip_netmask: {{ ctlplane_ip }}/{{ ctlplane_cidr }} + routes: {{ ctlplane_host_routes }} + members: + - type: interface + name: nic1 + mtu: {{ min_viable_mtu }} + # force the MAC address of the bridge to this interface + primary: true + {% for network in nodeset_networks %} + - type: vlan + mtu: {{ lookup('vars', networks_lower[network] ~ '_mtu') }} + vlan_id: {{ lookup('vars', networks_lower[network] ~ '_vlan_id') }} + addresses: + - ip_netmask: + {{ lookup('vars', networks_lower[network] ~ '_ip') }}/{{ lookup('vars', networks_lower[network] ~ '_cidr') }} + routes: {{ lookup('vars', networks_lower[network] ~ '_host_routes') }} + {% endfor %} +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneNodeSet +metadata: + name: edpm-compute-global +spec: + baremetalSetTemplate: + automatedCleaningMode: metadata + bmhNamespace: openshift-machine-api + cloudUserName: "" + ctlplaneInterface: "" + ctlplaneNetmask: 255.255.255.0 + deploymentSSHSecret: "" + hardwareReqs: + cpuReqs: + countReq: {} + mhzReq: {} + diskReqs: + gbReq: {} + ssdReq: {} + memReqs: + gbReq: {} + preProvisioned: true + tlsEnabled: false + services: + - download-cache + - bootstrap + - configure-network + - validate-network + - install-os + - configure-os + - run-os + - install-certs + - ovn + - neutron-metadata + - neutron-ovn + - neutron-sriov + - neutron-dhcp + - libvirt + - nova + - custom-global-service + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + nodes: {} + nodeTemplate: + ansibleSSHPrivateKeySecret: dataplane-ansible-ssh-private-key-secret + managementNetwork: ctlplane + ansible: + ansibleUser: cloud-admin + ansiblePort: 22 + ansibleVarsFrom: + - prefix: edpm_ + configMapRef: + name: network-config-template + ansibleVars: + timesync_ntp_servers: + - hostname: clock.redhat.com + # edpm_network_config + # Default nic config template for a EDPM compute node + # These vars are edpm_network_config role vars + edpm_network_config_hide_sensitive_logs: false + # These vars are for the network config templates themselves and are + # considered EDPM network defaults. + neutron_physical_bridge_name: br-ex + neutron_public_interface_name: eth0 + ctlplane_mtu: 1500 + ctlplane_cidr: 24 + ctlplane_gateway_ip: 192.168.122.1 + ctlplane_host_routes: + - ip_netmask: 0.0.0.0/0 + next_hop: 192.168.122.1 + external_mtu: 1500 + external_vlan_id: 44 + external_cidr: '24' + external_host_routes: [] + internalapi_mtu: 1500 + internalapi_vlan_id: 20 + internalapi_cidr: '24' + internalapi_host_routes: [] + storage_mtu: 1500 + storage_vlan_id: 21 + storage_cidr: '24' + storage_host_routes: [] + tenant_mtu: 1500 + tenant_vlan_id: 22 + tenant_cidr: '24' + tenant_host_routes: [] + nodeset_networks: + - InternalApi + - Storage + - Tenant + networks_lower: + External: external + InternalApi: internalapi + Storage: storage + Tenant: tenant + # edpm_nodes_validation + edpm_nodes_validation_validate_controllers_icmp: false + edpm_nodes_validation_validate_gateway_icmp: false + ctlplane_dns_nameservers: + - 192.168.122.1 + dns_search_domains: [] + gather_facts: false + enable_debug: false + # edpm firewall, change the allowed CIDR if needed + edpm_sshd_configure_firewall: true + edpm_sshd_allowed_ranges: ['192.168.122.0/24'] + # SELinux module + edpm_selinux_mode: enforcing diff --git a/tests/kuttl/tests/dataplane-deploy-global-service-test/01-assert.yaml b/tests/kuttl/tests/dataplane-deploy-global-service-test/01-assert.yaml new file mode 100644 index 000000000..ab4189bcb --- /dev/null +++ b/tests/kuttl/tests/dataplane-deploy-global-service-test/01-assert.yaml @@ -0,0 +1,1026 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +collectors: +- type: command + command: oc get OpenStackDataPlaneDeployment -n openstack-kuttl-tests edpm-compute-global -o yaml + name: edpm-compute-global-deployment +- type: command + command: oc logs -n openstack-operators -l openstack.org/operator-name=openstack + name: operator-logs +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneNodeSet +metadata: + name: edpm-compute-global + namespace: openstack-kuttl-tests +spec: + services: + - download-cache + - bootstrap + - configure-network + - validate-network + - install-os + - configure-os + - run-os + - install-certs + - ovn + - neutron-metadata + - neutron-ovn + - neutron-sriov + - neutron-dhcp + - libvirt + - nova + - custom-global-service + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + nodes: {} + nodeTemplate: + ansibleSSHPrivateKeySecret: dataplane-ansible-ssh-private-key-secret +status: + observedGeneration: 1 + conditions: + - message: NodeSet Ready + reason: Ready + status: "True" + type: Ready + - message: Deployment completed + reason: Ready + status: "True" + type: DeploymentReady + - message: Input data complete + reason: Ready + status: "True" + type: InputReady + - message: NodeSetDNSDataReady ready + reason: Ready + status: "True" + type: NodeSetDNSDataReady + - message: NodeSetIPReservationReady ready + reason: Ready + status: "True" + type: NodeSetIPReservationReady + - message: ServiceAccount created + reason: Ready + status: "True" + type: ServiceAccountReady + - message: Setup complete + reason: Ready + status: "True" + type: SetupReady +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + generation: 1 + name: custom-global-service-edpm-compute-global + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: edpm-compute-global +spec: + backoffLimit: 6 + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + envConfigMapName: openstack-aee-default-env + extraMounts: + - mounts: + - mountPath: /runner/env/ssh_key/ssh_key_edpm-compute-global + name: ssh-key-edpm-compute-global + subPath: ssh_key_edpm-compute-global + - mountPath: /runner/inventory/inventory-0 + name: inventory-0 + subPath: inventory-0 + volumes: + - name: ssh-key-edpm-compute-global + secret: + items: + - key: ssh-privatekey + path: ssh_key_edpm-compute-global + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory-0 + secret: + items: + - key: inventory + path: inventory-0 + secretName: dataplanenodeset-edpm-compute-global + extraVars: + edpm_override_hosts: all + name: openstackansibleee + play: | + - hosts: localhost + gather_facts: no + name: global kuttl play + tasks: + - name: Sleep + command: sleep 1 + delegate_to: localhost + preserveJobs: true + restartPolicy: Never + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + name: download-cache-edpm-compute-global-edpm-compute-global + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: edpm-compute-global +spec: + backoffLimit: 6 + extraMounts: + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-global + name: openstackansibleee + restartPolicy: Never + playbook: osp.edpm.download_cache + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + name: bootstrap-edpm-compute-global-edpm-compute-global + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: edpm-compute-global +spec: + backoffLimit: 6 + extraMounts: + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-global + name: openstackansibleee + restartPolicy: Never + playbook: osp.edpm.bootstrap + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady + +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + name: configure-network-edpm-compute-global-edpm-compute-global + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: edpm-compute-global +spec: + backoffLimit: 6 + extraMounts: + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-global + name: openstackansibleee + restartPolicy: Never + playbook: osp.edpm.configure_network + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + name: validate-network-edpm-compute-global-edpm-compute-global + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: edpm-compute-global +spec: + backoffLimit: 6 + extraMounts: + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-global + name: openstackansibleee + restartPolicy: Never + playbook: osp.edpm.validate_network + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + name: install-os-edpm-compute-global-edpm-compute-global + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: edpm-compute-global +spec: + backoffLimit: 6 + extraMounts: + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-global + name: openstackansibleee + restartPolicy: Never + playbook: osp.edpm.install_os + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + generation: 1 + name: configure-os-edpm-compute-global-edpm-compute-global + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: edpm-compute-global +spec: + backoffLimit: 6 + extraMounts: + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-global + name: openstackansibleee + restartPolicy: Never + playbook: osp.edpm.configure_os + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + name: run-os-edpm-compute-global-edpm-compute-global + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: edpm-compute-global +spec: + backoffLimit: 6 + extraMounts: + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-global + name: openstackansibleee + restartPolicy: Never + playbook: osp.edpm.run_os + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + name: install-certs-edpm-compute-global-edpm-compute-global + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: edpm-compute-global +spec: + backoffLimit: 6 + extraMounts: + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-global + name: openstackansibleee + restartPolicy: Never + playbook: osp.edpm.install_certs + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + generation: 1 + name: ovn-edpm-compute-global-edpm-compute-global + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: edpm-compute-global +spec: + backoffLimit: 6 + extraMounts: + - mounts: + - mountPath: /var/lib/openstack/configs/ovn/ovsdb-config + name: ovncontroller-config-0 + subPath: ovsdb-config + volumes: + - configMap: + items: + - key: ovsdb-config + path: ovsdb-config + name: ovncontroller-config + name: ovncontroller-config-0 + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-global + name: openstackansibleee + restartPolicy: Never + playbook: osp.edpm.ovn + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + generation: 1 + name: neutron-metadata-edpm-compute-global-edpm-compute-global + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: edpm-compute-global +spec: + backoffLimit: 6 + extraMounts: + - mounts: + - mountPath: /var/lib/openstack/configs/neutron-metadata/10-neutron-metadata.conf + name: neutron-ovn-metadata-agent-neutron-config-0 + subPath: 10-neutron-metadata.conf + volumes: + - secret: + items: + - key: 10-neutron-metadata.conf + path: 10-neutron-metadata.conf + secretName: neutron-ovn-metadata-agent-neutron-config + name: neutron-ovn-metadata-agent-neutron-config-0 + - mounts: + - mountPath: /var/lib/openstack/configs/neutron-metadata/05-nova-metadata.conf + name: nova-metadata-neutron-config-0 + subPath: 05-nova-metadata.conf + - mountPath: /var/lib/openstack/configs/neutron-metadata/httpd.conf + name: nova-metadata-neutron-config-1 + subPath: httpd.conf + - mountPath: /var/lib/openstack/configs/neutron-metadata/nova-metadata-config.json + name: nova-metadata-neutron-config-2 + subPath: nova-metadata-config.json + volumes: + - secret: + items: + - key: 05-nova-metadata.conf + path: 05-nova-metadata.conf + secretName: nova-metadata-neutron-config + name: nova-metadata-neutron-config-0 + - name: nova-metadata-neutron-config-1 + secret: + items: + - key: httpd.conf + path: httpd.conf + secretName: nova-metadata-neutron-config + - name: nova-metadata-neutron-config-2 + secret: + items: + - key: nova-metadata-config.json + path: nova-metadata-config.json + secretName: nova-metadata-neutron-config + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-global + name: openstackansibleee + restartPolicy: Never + playbook: osp.edpm.neutron_metadata + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + generation: 1 + name: neutron-ovn-edpm-compute-global-edpm-compute-global + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: edpm-compute-global +spec: + backoffLimit: 6 + extraMounts: + - mounts: + - mountPath: /var/lib/openstack/configs/neutron-ovn/10-neutron-ovn.conf + name: neutron-ovn-agent-neutron-config-0 + subPath: 10-neutron-ovn.conf + volumes: + - secret: + items: + - key: 10-neutron-ovn.conf + path: 10-neutron-ovn.conf + secretName: neutron-ovn-agent-neutron-config + name: neutron-ovn-agent-neutron-config-0 + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-global + name: openstackansibleee + restartPolicy: Never + playbook: osp.edpm.neutron_ovn + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + generation: 1 + name: neutron-sriov-edpm-compute-global-edpm-compute-global + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: edpm-compute-global +spec: + backoffLimit: 6 + extraMounts: + - mounts: + - mountPath: /var/lib/openstack/configs/neutron-sriov/10-neutron-sriov.conf + name: neutron-sriov-agent-neutron-config-0 + subPath: 10-neutron-sriov.conf + volumes: + - secret: + items: + - key: 10-neutron-sriov.conf + path: 10-neutron-sriov.conf + secretName: neutron-sriov-agent-neutron-config + name: neutron-sriov-agent-neutron-config-0 + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-global + name: openstackansibleee + restartPolicy: Never + playbook: osp.edpm.neutron_sriov + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + generation: 1 + name: neutron-dhcp-edpm-compute-global-edpm-compute-global + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: edpm-compute-global +spec: + backoffLimit: 6 + extraMounts: + - mounts: + - mountPath: /var/lib/openstack/configs/neutron-dhcp/10-neutron-dhcp.conf + name: neutron-dhcp-agent-neutron-config-0 + subPath: 10-neutron-dhcp.conf + volumes: + - secret: + items: + - key: 10-neutron-dhcp.conf + path: 10-neutron-dhcp.conf + secretName: neutron-dhcp-agent-neutron-config + name: neutron-dhcp-agent-neutron-config-0 + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-global + name: openstackansibleee + restartPolicy: Never + playbook: osp.edpm.neutron_dhcp + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + name: libvirt-edpm-compute-global-edpm-compute-global + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: edpm-compute-global +spec: + backoffLimit: 6 + envConfigMapName: openstack-aee-default-env + extraMounts: + - mounts: + - mountPath: /var/lib/openstack/configs/libvirt/LibvirtPassword + name: libvirt-secret-0 + subPath: LibvirtPassword + volumes: + - name: libvirt-secret-0 + secret: + items: + - key: LibvirtPassword + path: LibvirtPassword + secretName: libvirt-secret + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-global + name: openstackansibleee + preserveJobs: true + restartPolicy: Never + playbook: osp.edpm.libvirt + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + name: nova-edpm-compute-global-edpm-compute-global + namespace: openstack-kuttl-tests +spec: + backoffLimit: 6 + envConfigMapName: openstack-aee-default-env + extraMounts: + - mounts: + - mountPath: /var/lib/openstack/configs/nova/01-nova.conf + name: nova-cell1-compute-config-0 + subPath: 01-nova.conf + - mountPath: /var/lib/openstack/configs/nova/nova-blank.conf + name: nova-cell1-compute-config-1 + subPath: nova-blank.conf + volumes: + - name: nova-cell1-compute-config-0 + secret: + items: + - key: 01-nova.conf + path: 01-nova.conf + secretName: nova-cell1-compute-config + - name: nova-cell1-compute-config-1 + secret: + items: + - key: nova-blank.conf + path: nova-blank.conf + secretName: nova-cell1-compute-config + - mounts: + - mountPath: /var/lib/openstack/configs/nova/ssh-privatekey + name: nova-migration-ssh-key-0 + subPath: ssh-privatekey + - mountPath: /var/lib/openstack/configs/nova/ssh-publickey + name: nova-migration-ssh-key-1 + subPath: ssh-publickey + volumes: + - name: nova-migration-ssh-key-0 + secret: + items: + - key: ssh-privatekey + path: ssh-privatekey + secretName: nova-migration-ssh-key + - name: nova-migration-ssh-key-1 + secret: + items: + - key: ssh-publickey + path: ssh-publickey + secretName: nova-migration-ssh-key + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-global + name: openstackansibleee + preserveJobs: true + restartPolicy: Never + playbook: osp.edpm.nova + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady diff --git a/tests/kuttl/tests/dataplane-deploy-global-service-test/01-dataplane-deploy.yaml b/tests/kuttl/tests/dataplane-deploy-global-service-test/01-dataplane-deploy.yaml new file mode 100644 index 000000000..62f2006c3 --- /dev/null +++ b/tests/kuttl/tests/dataplane-deploy-global-service-test/01-dataplane-deploy.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneDeployment +metadata: + name: edpm-compute-global +spec: + nodeSets: + - edpm-compute-global diff --git a/tests/kuttl/tests/dataplane-deploy-global-service-test/02-add-nodeset.yaml b/tests/kuttl/tests/dataplane-deploy-global-service-test/02-add-nodeset.yaml new file mode 100644 index 000000000..d9ae4555e --- /dev/null +++ b/tests/kuttl/tests/dataplane-deploy-global-service-test/02-add-nodeset.yaml @@ -0,0 +1,42 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneNodeSet +metadata: + name: edpm-compute-beta-nodeset +spec: + baremetalSetTemplate: + automatedCleaningMode: metadata + bmhNamespace: openshift-machine-api + cloudUserName: "" + ctlplaneInterface: "" + ctlplaneNetmask: 255.255.255.0 + deploymentSSHSecret: "" + hardwareReqs: + cpuReqs: + countReq: {} + mhzReq: {} + diskReqs: + gbReq: {} + ssdReq: {} + memReqs: + gbReq: {} + preProvisioned: true + services: + - download-cache + - bootstrap + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + nodes: {} + nodeTemplate: + ansibleSSHPrivateKeySecret: dataplane-ansible-ssh-private-key-secret + ansible: + ansibleUser: cloud-admin +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneDeployment +metadata: + name: edpm-multinodeset +spec: + nodeSets: + - edpm-compute-global + - edpm-compute-beta-nodeset diff --git a/tests/kuttl/tests/dataplane-deploy-global-service-test/02-assert.yaml b/tests/kuttl/tests/dataplane-deploy-global-service-test/02-assert.yaml new file mode 100644 index 000000000..c133a2142 --- /dev/null +++ b/tests/kuttl/tests/dataplane-deploy-global-service-test/02-assert.yaml @@ -0,0 +1,174 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +collectors: +- type: command + command: oc logs -n openstack-operators -l openstack.org/operator-name=openstack + name: operator-logs +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneNodeSet +metadata: + name: edpm-compute-beta-nodeset + namespace: openstack-kuttl-tests +spec: + services: + - download-cache + - bootstrap + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + nodes: {} + nodeTemplate: + ansibleSSHPrivateKeySecret: dataplane-ansible-ssh-private-key-secret +status: + conditions: + - message: NodeSet Ready + reason: Ready + status: "True" + type: Ready + - message: Deployment completed + reason: Ready + status: "True" + type: DeploymentReady + - message: Input data complete + reason: Ready + status: "True" + type: InputReady + - message: NodeSetDNSDataReady ready + reason: Ready + status: "True" + type: NodeSetDNSDataReady + - message: NodeSetIPReservationReady ready + reason: Ready + status: "True" + type: NodeSetIPReservationReady + - message: ServiceAccount created + reason: Ready + status: "True" + type: ServiceAccountReady + - message: Setup complete + reason: Ready + status: "True" + type: SetupReady + configMapHashes: + ovncontroller-config: n647h6fh674h55fh56ch5bh68bh5fdh8dh59ch58dhdch59ch646h568h675h99h66bh59bhcch5b4h589h674h568hbch84h554h95h6dhc4hbh699q + deploymentStatuses: {} + secretHashes: + neutron-dhcp-agent-neutron-config: n68h676h98h689hd4h575h5dbh694h6fh688h57h665h5c5h56dh5ddh65bh5d7h5cdh644hb8h8fh5d9h5b9h555h9ch56dh5fh6chd4h5c5h5c5h68q + neutron-ovn-agent-neutron-config: n5f4h89hb8h645h55bh657h9fh5d9h5c6h595h9dh667h5f4hfhffh7fh685h56ch57fh679h5ddh5ddh95h696hbch5c7h669h84h54dh685hfh85q + neutron-ovn-metadata-agent-neutron-config: n68dh585h666h5c4h568hf7h65fh695h649hb9h657h5f6h548h679h77h5b4h664h8h5b8h654h5hf5h674h664h545h74h58ch57ch8ch56h54fh5ddq + neutron-sriov-agent-neutron-config: n685h567h697h5bch8ch5cfh87h698h658h684h8h99h5dch5c5h699h79hb5h87h66dh664h546h586h7bh56fh5d6h5d4h566h56bh87h678h696h56cq + nova-cell1-compute-config: n89hd6h5h545h644h58h556hd9h5c5h598hd4h7bh5f9h5bdh649hb5h99h686h677h8ch575h665h574h587h5b6h5ddh8fh687h9bh657h675h97q + nova-metadata-neutron-config: n7fh696h674h5b9h68dh77h677h5c5hd9h5dbh89h646h696h65ch64bh86hd8h56h78h558h5h5c7h87h86h5bh5bch78h6ch5cbh54fh56fhfdq + nova-migration-ssh-key: n64dh97h54dhffh65fh577h59bh664hbch54dhcbh547hdbhdch655hd9h675h5d4h67dh5ch67bh64h5fdh5c8h5cdh66bh5f5h58dhcbh9bh66bhd4q +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneDeployment +metadata: + name: edpm-multinodeset + namespace: openstack-kuttl-tests +spec: + nodeSets: + - edpm-compute-global + - edpm-compute-beta-nodeset +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + name: download-cache-edpm-multinodeset-edpm-compute-beta-nodeset + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: edpm-multinodeset +spec: + backoffLimit: 6 + extraMounts: + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-beta-nodeset + name: openstackansibleee + restartPolicy: Never + playbook: osp.edpm.download_cache + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + name: bootstrap-edpm-multinodeset-edpm-compute-beta-nodeset + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: edpm-multinodeset +spec: + backoffLimit: 6 + extraMounts: + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-beta-nodeset + name: openstackansibleee + restartPolicy: Never + playbook: osp.edpm.bootstrap + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady diff --git a/tests/kuttl/tests/dataplane-deploy-multiple-secrets/00-assert.yaml b/tests/kuttl/tests/dataplane-deploy-multiple-secrets/00-assert.yaml new file mode 100644 index 000000000..4c0aee626 --- /dev/null +++ b/tests/kuttl/tests/dataplane-deploy-multiple-secrets/00-assert.yaml @@ -0,0 +1,359 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +collectors: +- type: command + command: oc logs -n openstack-operators -l openstack.org/operator-name=openstack + name: operator-logs +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + name: generic-service1 +spec: + caCerts: combined-ca-bundle + tlsCerts: + default: + contents: + - dnsnames + playbookContents: | + - hosts: localhost + gather_facts: no + name: kuttl play + tasks: + - name: Sleep + command: sleep 1 + delegate_to: localhost +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + name: install-certs-ovr +spec: + addCertMounts: True + playbookContents: | + - hosts: localhost + gather_facts: no + name: kuttl play + tasks: + - name: Sleep + command: sleep 1 + delegate_to: localhost +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneNodeSet +metadata: + name: openstack-edpm-tls +spec: + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + nodes: + edpm-compute-0: + hostName: edpm-compute-0 + networks: + - name: ctlplane + subnetName: subnet1 + defaultRoute: true + fixedIP: 192.168.122.100 + - name: internalapi + subnetName: subnet1 + fixedIP: 172.17.0.100 + - name: storage + subnetName: subnet1 + fixedIP: 172.18.0.100 + - name: tenant + subnetName: subnet1 + fixedIP: 172.19.0.100 + edpm-compute-1: + hostName: edpm-compute-1 + networks: + - name: ctlplane + subnetName: subnet1 + defaultRoute: true + fixedIP: 192.168.122.101 + - name: internalapi + subnetName: subnet1 + fixedIP: 172.17.0.101 + - name: storage + subnetName: subnet1 + fixedIP: 172.18.0.101 + - name: tenant + subnetName: subnet1 + fixedIP: 172.19.0.101 + edpm-compute-2: + hostName: edpm-compute-2 + networks: + - name: ctlplane + subnetName: subnet1 + defaultRoute: true + fixedIP: 192.168.122.102 + - name: internalapi + subnetName: subnet1 + fixedIP: 172.17.0.102 + - name: storage + subnetName: subnet1 + fixedIP: 172.18.0.102 + - name: tenant + subnetName: subnet1 + fixedIP: 172.19.0.102 + nodeTemplate: + ansible: + ansiblePort: 22 + ansibleUser: cloud-admin + ansibleVars: + timesync_ntp_servers: + - hostname: clock.redhat.com + edpm_network_config_hide_sensitive_logs: false + edpm_network_config_template: templates/single_nic_vlans/single_nic_vlans.j2 + edpm_nodes_validation_validate_controllers_icmp: false + edpm_nodes_validation_validate_gateway_icmp: false + edpm_selinux_mode: enforcing + edpm_sshd_allowed_ranges: + - 192.168.122.0/24 + edpm_sshd_configure_firewall: true + enable_debug: false + ansibleSSHPrivateKeySecret: dataplane-ansible-ssh-private-key-secret + preProvisioned: true + tlsEnabled: true + secretMaxSize: 2880 + services: + - install-certs-ovr + - generic-service1 +status: + observedGeneration: 1 + conditions: + - message: Deployment not started + reason: Requested + status: "False" + type: Ready + - message: Deployment not started + reason: Requested + status: "False" + type: DeploymentReady + - message: Input data complete + reason: Ready + status: "True" + type: InputReady + - message: NodeSetDNSDataReady ready + reason: Ready + status: "True" + type: NodeSetDNSDataReady + - message: NodeSetIPReservationReady ready + reason: Ready + status: "True" + type: NodeSetIPReservationReady + - message: ServiceAccount created + reason: Ready + status: "True" + type: ServiceAccountReady + - message: Setup complete + reason: Ready + status: "True" + type: SetupReady +--- +apiVersion: network.openstack.org/v1beta1 +kind: IPSet +metadata: + name: edpm-compute-0 +spec: + immutable: false + networks: + - defaultRoute: true + name: ctlplane + subnetName: subnet1 + - name: internalapi + subnetName: subnet1 + - name: storage + subnetName: subnet1 + - name: tenant + subnetName: subnet1 +status: + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Input data complete + reason: Ready + status: "True" + type: InputReady + - message: Reservation successful + reason: Ready + status: "True" + type: ReservationReady + reservations: + - address: 192.168.122.100 + cidr: 192.168.122.0/24 + dnsDomain: ctlplane.example.com + gateway: 192.168.122.1 + mtu: 1500 + network: ctlplane + routes: + - destination: 0.0.0.0/0 + nexthop: 192.168.122.1 + subnet: subnet1 + - address: 172.17.0.100 + cidr: 172.17.0.0/24 + dnsDomain: internalapi.example.com + mtu: 1500 + network: internalapi + subnet: subnet1 + vlan: 20 + - address: 172.18.0.100 + cidr: 172.18.0.0/24 + dnsDomain: storage.example.com + mtu: 1500 + network: storage + subnet: subnet1 + vlan: 21 + - address: 172.19.0.100 + cidr: 172.19.0.0/24 + dnsDomain: tenant.example.com + mtu: 1500 + network: tenant + subnet: subnet1 + vlan: 22 +--- +apiVersion: network.openstack.org/v1beta1 +kind: IPSet +metadata: + name: edpm-compute-1 +spec: + immutable: false + networks: + - defaultRoute: true + name: ctlplane + subnetName: subnet1 + - name: internalapi + subnetName: subnet1 + - name: storage + subnetName: subnet1 + - name: tenant + subnetName: subnet1 +status: + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Input data complete + reason: Ready + status: "True" + type: InputReady + - message: Reservation successful + reason: Ready + status: "True" + type: ReservationReady + reservations: + - address: 192.168.122.101 + cidr: 192.168.122.0/24 + dnsDomain: ctlplane.example.com + gateway: 192.168.122.1 + mtu: 1500 + network: ctlplane + routes: + - destination: 0.0.0.0/0 + nexthop: 192.168.122.1 + subnet: subnet1 + - address: 172.17.0.101 + cidr: 172.17.0.0/24 + dnsDomain: internalapi.example.com + mtu: 1500 + network: internalapi + subnet: subnet1 + vlan: 20 + - address: 172.18.0.101 + cidr: 172.18.0.0/24 + dnsDomain: storage.example.com + mtu: 1500 + network: storage + subnet: subnet1 + vlan: 21 + - address: 172.19.0.101 + cidr: 172.19.0.0/24 + dnsDomain: tenant.example.com + mtu: 1500 + network: tenant + subnet: subnet1 + vlan: 22 +--- +apiVersion: network.openstack.org/v1beta1 +kind: IPSet +metadata: + name: edpm-compute-2 +spec: + immutable: false + networks: + - defaultRoute: true + name: ctlplane + subnetName: subnet1 + - name: internalapi + subnetName: subnet1 + - name: storage + subnetName: subnet1 + - name: tenant + subnetName: subnet1 +status: + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Input data complete + reason: Ready + status: "True" + type: InputReady + - message: Reservation successful + reason: Ready + status: "True" + type: ReservationReady + reservations: + - address: 192.168.122.102 + cidr: 192.168.122.0/24 + dnsDomain: ctlplane.example.com + gateway: 192.168.122.1 + mtu: 1500 + network: ctlplane + routes: + - destination: 0.0.0.0/0 + nexthop: 192.168.122.1 + subnet: subnet1 + - address: 172.17.0.102 + cidr: 172.17.0.0/24 + dnsDomain: internalapi.example.com + mtu: 1500 + network: internalapi + subnet: subnet1 + vlan: 20 + - address: 172.18.0.102 + cidr: 172.18.0.0/24 + dnsDomain: storage.example.com + mtu: 1500 + network: storage + subnet: subnet1 + vlan: 21 + - address: 172.19.0.102 + cidr: 172.19.0.0/24 + dnsDomain: tenant.example.com + mtu: 1500 + network: tenant + subnet: subnet1 + vlan: 22 +--- +apiVersion: network.openstack.org/v1beta1 +kind: DNSData +metadata: + name: openstack-edpm-tls +spec: + dnsDataLabelSelectorValue: dnsdata +status: + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Input data complete + reason: Ready + status: "True" + type: ServiceConfigReady diff --git a/tests/kuttl/tests/dataplane-deploy-multiple-secrets/00-dataplane-create.yaml b/tests/kuttl/tests/dataplane-deploy-multiple-secrets/00-dataplane-create.yaml new file mode 100644 index 000000000..bbafdeeee --- /dev/null +++ b/tests/kuttl/tests/dataplane-deploy-multiple-secrets/00-dataplane-create.yaml @@ -0,0 +1,119 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + name: generic-service1 +spec: + caCerts: combined-ca-bundle + tlsCerts: + default: + contents: + - dnsnames + playbookContents: | + - hosts: localhost + gather_facts: no + name: kuttl play + tasks: + - name: Sleep + command: sleep 1 + delegate_to: localhost +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + name: install-certs-ovr +spec: + addCertMounts: True + playbookContents: | + - hosts: localhost + gather_facts: no + name: kuttl play + tasks: + - name: Sleep + command: sleep 1 + delegate_to: localhost +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneNodeSet +metadata: + name: openstack-edpm-tls +spec: + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + services: + - install-certs-ovr + - generic-service1 + preProvisioned: true + tlsEnabled: true + secretMaxSize: 2880 + nodes: + edpm-compute-0: + hostName: edpm-compute-0 + networks: + - name: ctlplane + subnetName: subnet1 + defaultRoute: true + fixedIP: 192.168.122.100 + - name: internalapi + subnetName: subnet1 + fixedIP: 172.17.0.100 + - name: storage + subnetName: subnet1 + fixedIP: 172.18.0.100 + - name: tenant + subnetName: subnet1 + fixedIP: 172.19.0.100 + edpm-compute-1: + hostName: edpm-compute-1 + networks: + - name: ctlplane + subnetName: subnet1 + defaultRoute: true + fixedIP: 192.168.122.101 + - name: internalapi + subnetName: subnet1 + fixedIP: 172.17.0.101 + - name: storage + subnetName: subnet1 + fixedIP: 172.18.0.101 + - name: tenant + subnetName: subnet1 + fixedIP: 172.19.0.101 + edpm-compute-2: + hostName: edpm-compute-2 + networks: + - name: ctlplane + subnetName: subnet1 + defaultRoute: true + fixedIP: 192.168.122.102 + - name: internalapi + subnetName: subnet1 + fixedIP: 172.17.0.102 + - name: storage + subnetName: subnet1 + fixedIP: 172.18.0.102 + - name: tenant + subnetName: subnet1 + fixedIP: 172.19.0.102 + nodeTemplate: + ansibleSSHPrivateKeySecret: dataplane-ansible-ssh-private-key-secret + ansible: + ansibleUser: cloud-admin + ansiblePort: 22 + ansibleVars: + timesync_ntp_servers: + - hostname: clock.redhat.com + # edpm_network_config + # Default nic config template for a EDPM compute node + # These vars are edpm_network_config role vars + edpm_network_config_hide_sensitive_logs: false + edpm_network_config_template: templates/single_nic_vlans/single_nic_vlans.j2 + edpm_nodes_validation_validate_controllers_icmp: false + edpm_nodes_validation_validate_gateway_icmp: false + gather_facts: false + enable_debug: false + # edpm firewall, change the allowed CIDR if needed + edpm_sshd_configure_firewall: true + edpm_sshd_allowed_ranges: ['192.168.122.0/24'] + # SELinux module + edpm_selinux_mode: enforcing diff --git a/tests/kuttl/tests/dataplane-deploy-multiple-secrets/01-create-cert-issuers.yaml b/tests/kuttl/tests/dataplane-deploy-multiple-secrets/01-create-cert-issuers.yaml new file mode 100644 index 000000000..5a4d601f1 --- /dev/null +++ b/tests/kuttl/tests/dataplane-deploy-multiple-secrets/01-create-cert-issuers.yaml @@ -0,0 +1,22 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: | + function wait_for() { + timeout=$1 + shift 1 + until [ $timeout -le 0 ] || ("$@" &> /dev/null); do + echo waiting for "$@" + sleep 1 + timeout=$(( timeout - 1 )) + done + if [ $timeout -le 0 ]; then + return 1 + fi + } + + if oc get secret combined-ca-bundle -n openstack-kuttl-tests; then oc delete secret combined-ca-bundle -n openstack-kuttl-tests; fi + oc apply -f ./certs.yaml + wait_for 100 oc get secret osp-rootca-secret -n openstack-kuttl-tests + CA_CRT=$(oc get secret osp-rootca-secret -n openstack-kuttl-tests -o json|jq -r '.data."ca.crt"') + oc create secret generic combined-ca-bundle -n openstack-kuttl-tests --from-literal=TLSCABundleFile=$CA_CRT diff --git a/tests/kuttl/tests/dataplane-deploy-multiple-secrets/02-assert.yaml b/tests/kuttl/tests/dataplane-deploy-multiple-secrets/02-assert.yaml new file mode 100644 index 000000000..356b6c63b --- /dev/null +++ b/tests/kuttl/tests/dataplane-deploy-multiple-secrets/02-assert.yaml @@ -0,0 +1,220 @@ +apiVersion: v1 +kind: Secret +metadata: + name: cert-generic-service1-default-edpm-compute-0 + annotations: + cert-manager.io/certificate-name: generic-service1-default-edpm-compute-0 + cert-manager.io/issuer-group: cert-manager.io + cert-manager.io/issuer-kind: Issuer + cert-manager.io/issuer-name: rootca-internal + labels: + hostname: edpm-compute-0 + osdp-service: generic-service1 + osdp-service-cert-key: default + osdpns: openstack-edpm-tls +type: kubernetes.io/tls +--- +apiVersion: v1 +kind: Secret +metadata: + name: cert-generic-service1-default-edpm-compute-1 + annotations: + cert-manager.io/certificate-name: generic-service1-default-edpm-compute-1 + cert-manager.io/issuer-group: cert-manager.io + cert-manager.io/issuer-kind: Issuer + cert-manager.io/issuer-name: rootca-internal + labels: + hostname: edpm-compute-1 + osdp-service: generic-service1 + osdp-service-cert-key: default + osdpns: openstack-edpm-tls +type: kubernetes.io/tls +--- +apiVersion: v1 +kind: Secret +metadata: + name: cert-generic-service1-default-edpm-compute-2 + annotations: + cert-manager.io/certificate-name: generic-service1-default-edpm-compute-2 + cert-manager.io/issuer-group: cert-manager.io + cert-manager.io/issuer-kind: Issuer + cert-manager.io/issuer-name: rootca-internal + labels: + hostname: edpm-compute-2 + osdp-service: generic-service1 + osdp-service-cert-key: default + osdpns: openstack-edpm-tls +type: kubernetes.io/tls +--- +# validate the alt-names - which is a list with elements that can be in any order +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: + - script: | + template='{{index .metadata.annotations "cert-manager.io/alt-names" }}' + names=$(oc get secret cert-generic-service1-default-edpm-compute-0 -n openstack-kuttl-tests -o go-template="$template") + echo $names > test123.data + regex="(?=.*(edpm-compute-0\.internalapi\.example\.com))(?=.*(edpm-compute-0\.storage\.example\.com))(?=.*(edpm-compute-0\.tenant\.example\.com))(?=.*(edpm-compute-0\.ctlplane\.example\.com))" + matches=$(grep -P "$regex" test123.data) + rm test123.data + if [ -z "$matches" ]; then + echo "bad match: $names" + exit 1 + else + exit 0 + fi +--- +apiVersion: v1 +kind: Secret +metadata: + name: openstack-edpm-tls-generic-service1-default-certs-0 + labels: + numberOfSecrets: "3" + secretNumber: "0" + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + kind: OpenStackDataPlaneNodeSet + name: openstack-edpm-tls +type: Opaque +--- +apiVersion: v1 +kind: Secret +metadata: + name: openstack-edpm-tls-generic-service1-default-certs-1 + labels: + numberOfSecrets: "3" + secretNumber: "1" + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + kind: OpenStackDataPlaneNodeSet + name: openstack-edpm-tls +type: Opaque +--- +apiVersion: v1 +kind: Secret +metadata: + name: openstack-edpm-tls-generic-service1-default-certs-2 + labels: + numberOfSecrets: "3" + secretNumber: "2" + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + kind: OpenStackDataPlaneNodeSet + name: openstack-edpm-tls +type: Opaque +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + name: install-certs-ovr-openstack-edpm-tls-openstack-edpm-tls + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: openstack-edpm-tls +spec: + backoffLimit: 6 + extraMounts: + - mounts: + - mountPath: /var/lib/openstack/certs/generic-service1/default + name: openstack-edpm-tls-generic-service1-default-certs-0 + volumes: + - name: openstack-edpm-tls-generic-service1-default-certs-0 + projected: + sources: + - secret: + name: openstack-edpm-tls-generic-service1-default-certs-0 + - secret: + name: openstack-edpm-tls-generic-service1-default-certs-1 + - secret: + name: openstack-edpm-tls-generic-service1-default-certs-2 + - mounts: + - mountPath: /var/lib/openstack/cacerts/generic-service1 + name: generic-service1-combined-ca-bundle + volumes: + - name: generic-service1-combined-ca-bundle + secret: + secretName: combined-ca-bundle + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-openstack-edpm-tls + name: openstackansibleee + restartPolicy: Never + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + name: generic-service1-openstack-edpm-tls-openstack-edpm-tls + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + kind: OpenStackDataPlaneDeployment + name: openstack-edpm-tls +spec: + backoffLimit: 6 + extraMounts: + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-openstack-edpm-tls + name: openstackansibleee + restartPolicy: Never + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady diff --git a/tests/kuttl/tests/dataplane-deploy-multiple-secrets/02-dataplane-deploy.yaml b/tests/kuttl/tests/dataplane-deploy-multiple-secrets/02-dataplane-deploy.yaml new file mode 100644 index 000000000..61aa719ef --- /dev/null +++ b/tests/kuttl/tests/dataplane-deploy-multiple-secrets/02-dataplane-deploy.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneDeployment +metadata: + name: openstack-edpm-tls +spec: + nodeSets: + - openstack-edpm-tls + services: + - generic-service1 diff --git a/tests/kuttl/tests/dataplane-deploy-multiple-secrets/certs.yaml b/tests/kuttl/tests/dataplane-deploy-multiple-secrets/certs.yaml new file mode 100644 index 000000000..7cffc290c --- /dev/null +++ b/tests/kuttl/tests/dataplane-deploy-multiple-secrets/certs.yaml @@ -0,0 +1,38 @@ +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: selfsigned-issuer + namespace: openstack-kuttl-tests +spec: + selfSigned: {} +--- +# RootCA Certificate used to sign certificates +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: osp-rootca + namespace: openstack-kuttl-tests +spec: + isCA: true + commonName: osp-rootca + secretName: osp-rootca-secret + privateKey: + algorithm: ECDSA + size: 256 + issuerRef: + name: selfsigned-issuer + kind: Issuer + group: cert-manager.io +--- +# Issuer that uses the generated CA certificate to issue certs +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: rootca-internal + namespace: openstack-kuttl-tests + labels: + osp-rootca-issuer-internal: "" +spec: + ca: + secretName: osp-rootca-secret +--- diff --git a/tests/kuttl/tests/dataplane-deploy-no-nodes-test/00-assert.yaml b/tests/kuttl/tests/dataplane-deploy-no-nodes-test/00-assert.yaml new file mode 100644 index 000000000..260b74a7c --- /dev/null +++ b/tests/kuttl/tests/dataplane-deploy-no-nodes-test/00-assert.yaml @@ -0,0 +1,68 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +collectors: +- type: command + command: oc logs -n openstack-operators -l openstack.org/operator-name=openstack + name: operator-logs +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneNodeSet +metadata: + name: edpm-compute-no-nodes + namespace: openstack-kuttl-tests +spec: + preProvisioned: true + services: + - download-cache + - bootstrap + - configure-network + - validate-network + - install-os + - configure-os + - run-os + - install-certs + - ovn + - neutron-metadata + - neutron-ovn + - neutron-sriov + - neutron-dhcp + - libvirt + - nova + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + nodes: {} + nodeTemplate: + ansibleSSHPrivateKeySecret: dataplane-ansible-ssh-private-key-secret +status: + observedGeneration: 1 + conditions: + - message: Deployment not started + reason: Requested + status: "False" + type: Ready + - message: Deployment not started + reason: Requested + status: "False" + type: DeploymentReady + - message: Input data complete + reason: Ready + status: "True" + type: InputReady + - message: NodeSetDNSDataReady ready + reason: Ready + status: "True" + type: NodeSetDNSDataReady + - message: NodeSetIPReservationReady ready + reason: Ready + status: "True" + type: NodeSetIPReservationReady + - message: ServiceAccount created + reason: Ready + status: "True" + type: ServiceAccountReady + - message: Setup complete + reason: Ready + status: "True" + type: SetupReady diff --git a/tests/kuttl/tests/dataplane-deploy-no-nodes-test/00-dataplane-create.yaml b/tests/kuttl/tests/dataplane-deploy-no-nodes-test/00-dataplane-create.yaml new file mode 100644 index 000000000..23e5d5cf1 --- /dev/null +++ b/tests/kuttl/tests/dataplane-deploy-no-nodes-test/00-dataplane-create.yaml @@ -0,0 +1,97 @@ +apiVersion: v1 +kind: Secret +metadata: + name: nova-cell1-compute-config +data: + nova-blank.conf: Zm9vCg== + 01-nova.conf: Zm9vCg== +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: ovncontroller-config +data: + ovsdb-config: test-ovn-config +--- +apiVersion: v1 +kind: Secret +metadata: + name: neutron-ovn-metadata-agent-neutron-config +data: + 10-neutron-metadata.conf: dGVzdC1uZXV0cm9uLW92bi1tZXRhZGF0YS1hZ2VudC1jb25maWc= +--- +apiVersion: v1 +kind: Secret +metadata: + name: nova-metadata-neutron-config +data: + 05-nova-metadata.conf: dGVzdC1ub3ZhLW1ldGFkYXRhLWNvbXB1dGUtY29uZmln + httpd.conf: dGVzdC1ub3ZhLW1ldGFkYXRhLWNvbXB1dGUtY29uZmln + nova-metadata-config.json: dGVzdC1ub3ZhLW1ldGFkYXRhLWNvbXB1dGUtY29uZmln +--- +apiVersion: v1 +kind: Secret +metadata: + name: neutron-ovn-agent-neutron-config +data: + 10-neutron-ovn.conf: dGVzdC1uZXV0cm9uLW92bi1hZ2VudC1jb25maWc= +--- +--- +apiVersion: v1 +kind: Secret +metadata: + name: neutron-sriov-agent-neutron-config +data: + 10-neutron-sriov.conf: dGVzdC1uZXV0cm9uLXNyaW92LWFnZW50LXNlY3JldC1jb25maWcK +--- +apiVersion: v1 +kind: Secret +metadata: + name: neutron-dhcp-agent-neutron-config +data: + 10-neutron-dhcp.conf: dGVzdC1uZXV0cm9uLWRoY3AtYWdlbnQtc2VjcmV0LWNvbmZpZwo= +--- +apiVersion: v1 +kind: Secret +metadata: + name: nova-migration-ssh-key +data: + ssh-privatekey: ZmFrZQo= + ssh-publickey: ZmFrZQo= +--- +apiVersion: v1 +kind: Secret +metadata: + name: libvirt-secret +data: + LibvirtPassword: ZmFrZQo= +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneNodeSet +metadata: + name: edpm-compute-no-nodes +spec: + preProvisioned: true + tlsEnabled: false + services: + - download-cache + - bootstrap + - configure-network + - validate-network + - install-os + - configure-os + - run-os + - install-certs + - ovn + - neutron-metadata + - neutron-ovn + - neutron-sriov + - neutron-dhcp + - libvirt + - nova + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + nodes: {} + nodeTemplate: + ansibleSSHPrivateKeySecret: dataplane-ansible-ssh-private-key-secret diff --git a/tests/kuttl/tests/dataplane-deploy-no-nodes-test/01-assert.yaml b/tests/kuttl/tests/dataplane-deploy-no-nodes-test/01-assert.yaml new file mode 100644 index 000000000..ef909f218 --- /dev/null +++ b/tests/kuttl/tests/dataplane-deploy-no-nodes-test/01-assert.yaml @@ -0,0 +1,959 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +collectors: +- type: command + command: oc logs -n openstack-operators -l openstack.org/operator-name=openstack + name: operator-logs +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneNodeSet +metadata: + name: edpm-compute-no-nodes + namespace: openstack-kuttl-tests +spec: + services: + - download-cache + - bootstrap + - configure-network + - validate-network + - install-os + - configure-os + - run-os + - install-certs + - ovn + - neutron-metadata + - neutron-ovn + - neutron-sriov + - neutron-dhcp + - libvirt + - nova + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + nodes: {} + nodeTemplate: + ansibleSSHPrivateKeySecret: dataplane-ansible-ssh-private-key-secret +status: + observedGeneration: 1 + conditions: + - message: NodeSet Ready + reason: Ready + status: "True" + type: Ready + - message: Deployment completed + reason: Ready + status: "True" + type: DeploymentReady + - message: Input data complete + reason: Ready + status: "True" + type: InputReady + - message: NodeSetDNSDataReady ready + reason: Ready + status: "True" + type: NodeSetDNSDataReady + - message: NodeSetIPReservationReady ready + reason: Ready + status: "True" + type: NodeSetIPReservationReady + - message: ServiceAccount created + reason: Ready + status: "True" + type: ServiceAccountReady + - message: Setup complete + reason: Ready + status: "True" + type: SetupReady +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + name: download-cache-edpm-compute-no-nodes-edpm-compute-no-nodes + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: edpm-compute-no-nodes +spec: + backoffLimit: 6 + extraMounts: + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-no-nodes + name: openstackansibleee + restartPolicy: Never + playbook: osp.edpm.download_cache + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + name: bootstrap-edpm-compute-no-nodes-edpm-compute-no-nodes + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: edpm-compute-no-nodes +spec: + backoffLimit: 6 + extraVars: + foo: bar + extraMounts: + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-no-nodes + name: openstackansibleee + restartPolicy: Never + playbook: osp.edpm.bootstrap + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady + +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + name: configure-network-edpm-compute-no-nodes-edpm-compute-no-nodes + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: edpm-compute-no-nodes +spec: + backoffLimit: 6 + extraMounts: + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-no-nodes + name: openstackansibleee + restartPolicy: Never + playbook: osp.edpm.configure_network + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + name: validate-network-edpm-compute-no-nodes-edpm-compute-no-nodes + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: edpm-compute-no-nodes +spec: + backoffLimit: 6 + extraMounts: + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-no-nodes + name: openstackansibleee + restartPolicy: Never + playbook: osp.edpm.validate_network + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + name: install-os-edpm-compute-no-nodes-edpm-compute-no-nodes + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: edpm-compute-no-nodes +spec: + backoffLimit: 6 + extraMounts: + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-no-nodes + name: openstackansibleee + restartPolicy: Never + playbook: osp.edpm.install_os + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + generation: 1 + name: configure-os-edpm-compute-no-nodes-edpm-compute-no-nodes + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: edpm-compute-no-nodes +spec: + backoffLimit: 6 + extraMounts: + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-no-nodes + name: openstackansibleee + restartPolicy: Never + playbook: osp.edpm.configure_os + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + name: run-os-edpm-compute-no-nodes-edpm-compute-no-nodes + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: edpm-compute-no-nodes +spec: + backoffLimit: 6 + extraMounts: + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-no-nodes + name: openstackansibleee + restartPolicy: Never + playbook: osp.edpm.run_os + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + name: install-certs-edpm-compute-no-nodes-edpm-compute-no-nodes + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: edpm-compute-no-nodes +spec: + backoffLimit: 6 + extraMounts: + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-no-nodes + name: openstackansibleee + restartPolicy: Never + playbook: osp.edpm.install_certs + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + generation: 1 + name: ovn-edpm-compute-no-nodes-edpm-compute-no-nodes + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: edpm-compute-no-nodes +spec: + backoffLimit: 6 + extraMounts: + - mounts: + - mountPath: /var/lib/openstack/configs/ovn/ovsdb-config + name: ovncontroller-config-0 + subPath: ovsdb-config + volumes: + - configMap: + items: + - key: ovsdb-config + path: ovsdb-config + name: ovncontroller-config + name: ovncontroller-config-0 + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-no-nodes + name: openstackansibleee + restartPolicy: Never + playbook: osp.edpm.ovn + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + generation: 1 + name: neutron-metadata-edpm-compute-no-nodes-edpm-compute-no-nodes + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: edpm-compute-no-nodes +spec: + backoffLimit: 6 + extraMounts: + - mounts: + - mountPath: /var/lib/openstack/configs/neutron-metadata/10-neutron-metadata.conf + name: neutron-ovn-metadata-agent-neutron-config-0 + subPath: 10-neutron-metadata.conf + volumes: + - secret: + items: + - key: 10-neutron-metadata.conf + path: 10-neutron-metadata.conf + secretName: neutron-ovn-metadata-agent-neutron-config + name: neutron-ovn-metadata-agent-neutron-config-0 + - mounts: + - mountPath: /var/lib/openstack/configs/neutron-metadata/05-nova-metadata.conf + name: nova-metadata-neutron-config-0 + subPath: 05-nova-metadata.conf + - mountPath: /var/lib/openstack/configs/neutron-metadata/httpd.conf + name: nova-metadata-neutron-config-1 + subPath: httpd.conf + - mountPath: /var/lib/openstack/configs/neutron-metadata/nova-metadata-config.json + name: nova-metadata-neutron-config-2 + subPath: nova-metadata-config.json + volumes: + - secret: + items: + - key: 05-nova-metadata.conf + path: 05-nova-metadata.conf + secretName: nova-metadata-neutron-config + name: nova-metadata-neutron-config-0 + - name: nova-metadata-neutron-config-1 + secret: + items: + - key: httpd.conf + path: httpd.conf + secretName: nova-metadata-neutron-config + - name: nova-metadata-neutron-config-2 + secret: + items: + - key: nova-metadata-config.json + path: nova-metadata-config.json + secretName: nova-metadata-neutron-config + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-no-nodes + name: openstackansibleee + restartPolicy: Never + playbook: osp.edpm.neutron_metadata + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + generation: 1 + name: neutron-ovn-edpm-compute-no-nodes-edpm-compute-no-nodes + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: edpm-compute-no-nodes +spec: + backoffLimit: 6 + extraMounts: + - mounts: + - mountPath: /var/lib/openstack/configs/neutron-ovn/10-neutron-ovn.conf + name: neutron-ovn-agent-neutron-config-0 + subPath: 10-neutron-ovn.conf + volumes: + - secret: + items: + - key: 10-neutron-ovn.conf + path: 10-neutron-ovn.conf + secretName: neutron-ovn-agent-neutron-config + name: neutron-ovn-agent-neutron-config-0 + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-no-nodes + name: openstackansibleee + restartPolicy: Never + playbook: osp.edpm.neutron_ovn + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + generation: 1 + name: neutron-sriov-edpm-compute-no-nodes-edpm-compute-no-nodes + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: edpm-compute-no-nodes +spec: + backoffLimit: 6 + extraMounts: + - mounts: + - mountPath: /var/lib/openstack/configs/neutron-sriov/10-neutron-sriov.conf + name: neutron-sriov-agent-neutron-config-0 + subPath: 10-neutron-sriov.conf + volumes: + - secret: + items: + - key: 10-neutron-sriov.conf + path: 10-neutron-sriov.conf + secretName: neutron-sriov-agent-neutron-config + name: neutron-sriov-agent-neutron-config-0 + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-no-nodes + name: openstackansibleee + restartPolicy: Never + playbook: osp.edpm.neutron_sriov + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + generation: 1 + name: neutron-dhcp-edpm-compute-no-nodes-edpm-compute-no-nodes + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: edpm-compute-no-nodes +spec: + backoffLimit: 6 + extraMounts: + - mounts: + - mountPath: /var/lib/openstack/configs/neutron-dhcp/10-neutron-dhcp.conf + name: neutron-dhcp-agent-neutron-config-0 + subPath: 10-neutron-dhcp.conf + volumes: + - secret: + items: + - key: 10-neutron-dhcp.conf + path: 10-neutron-dhcp.conf + secretName: neutron-dhcp-agent-neutron-config + name: neutron-dhcp-agent-neutron-config-0 + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-no-nodes + name: openstackansibleee + restartPolicy: Never + playbook: osp.edpm.neutron_dhcp + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + name: libvirt-edpm-compute-no-nodes-edpm-compute-no-nodes + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: edpm-compute-no-nodes +spec: + backoffLimit: 6 + envConfigMapName: openstack-aee-default-env + extraMounts: + - mounts: + - mountPath: /var/lib/openstack/configs/libvirt/LibvirtPassword + name: libvirt-secret-0 + subPath: LibvirtPassword + volumes: + - name: libvirt-secret-0 + secret: + items: + - key: LibvirtPassword + path: LibvirtPassword + secretName: libvirt-secret + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-no-nodes + name: openstackansibleee + preserveJobs: true + restartPolicy: Never + playbook: osp.edpm.libvirt + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + name: nova-edpm-compute-no-nodes-edpm-compute-no-nodes + namespace: openstack-kuttl-tests +spec: + backoffLimit: 6 + envConfigMapName: openstack-aee-default-env + extraMounts: + - mounts: + - mountPath: /var/lib/openstack/configs/nova/01-nova.conf + name: nova-cell1-compute-config-0 + subPath: 01-nova.conf + - mountPath: /var/lib/openstack/configs/nova/nova-blank.conf + name: nova-cell1-compute-config-1 + subPath: nova-blank.conf + volumes: + - name: nova-cell1-compute-config-0 + secret: + items: + - key: 01-nova.conf + path: 01-nova.conf + secretName: nova-cell1-compute-config + - name: nova-cell1-compute-config-1 + secret: + items: + - key: nova-blank.conf + path: nova-blank.conf + secretName: nova-cell1-compute-config + - mounts: + - mountPath: /var/lib/openstack/configs/nova/ssh-privatekey + name: nova-migration-ssh-key-0 + subPath: ssh-privatekey + - mountPath: /var/lib/openstack/configs/nova/ssh-publickey + name: nova-migration-ssh-key-1 + subPath: ssh-publickey + volumes: + - name: nova-migration-ssh-key-0 + secret: + items: + - key: ssh-privatekey + path: ssh-privatekey + secretName: nova-migration-ssh-key + - name: nova-migration-ssh-key-1 + secret: + items: + - key: ssh-publickey + path: ssh-publickey + secretName: nova-migration-ssh-key + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-no-nodes + name: openstackansibleee + preserveJobs: true + restartPolicy: Never + playbook: osp.edpm.nova + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady diff --git a/tests/kuttl/tests/dataplane-deploy-no-nodes-test/01-dataplane-deploy.yaml b/tests/kuttl/tests/dataplane-deploy-no-nodes-test/01-dataplane-deploy.yaml new file mode 100644 index 000000000..2f68ffbfe --- /dev/null +++ b/tests/kuttl/tests/dataplane-deploy-no-nodes-test/01-dataplane-deploy.yaml @@ -0,0 +1,9 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneDeployment +metadata: + name: edpm-compute-no-nodes +spec: + nodeSets: + - edpm-compute-no-nodes + ansibleExtraVars: + foo: bar diff --git a/tests/kuttl/tests/dataplane-deploy-no-nodes-test/02-assert.yaml b/tests/kuttl/tests/dataplane-deploy-no-nodes-test/02-assert.yaml new file mode 100644 index 000000000..487c1d40e --- /dev/null +++ b/tests/kuttl/tests/dataplane-deploy-no-nodes-test/02-assert.yaml @@ -0,0 +1,70 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +collectors: +- type: command + command: oc logs -n openstack-operators -l openstack.org/operator-name=openstack + name: operator-logs +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + generation: 1 + name: custom-svc-edpm-compute-no-nodes-ovrd-edpm-compute-no-nodes + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: edpm-compute-no-nodes-ovrd +spec: + backoffLimit: 6 + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + envConfigMapName: openstack-aee-default-env + extraMounts: + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-no-nodes + name: openstackansibleee + play: | + - hosts: localhost + gather_facts: no + name: kuttl play + tasks: + - name: Sleep + command: sleep 1 + delegate_to: localhost + preserveJobs: true + restartPolicy: Never + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady diff --git a/tests/kuttl/tests/dataplane-deploy-no-nodes-test/02-dataplane-deploy-services-override.yaml b/tests/kuttl/tests/dataplane-deploy-no-nodes-test/02-dataplane-deploy-services-override.yaml new file mode 100644 index 000000000..5786dd903 --- /dev/null +++ b/tests/kuttl/tests/dataplane-deploy-no-nodes-test/02-dataplane-deploy-services-override.yaml @@ -0,0 +1,25 @@ +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + name: custom-svc +spec: + label: custom-svc + playbookContents: | + - hosts: localhost + gather_facts: no + name: kuttl play + tasks: + - name: Sleep + command: sleep 1 + delegate_to: localhost +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneDeployment +metadata: + name: edpm-compute-no-nodes-ovrd +spec: + nodeSets: + - edpm-compute-no-nodes + servicesOverride: + - custom-svc diff --git a/tests/kuttl/tests/dataplane-deploy-no-nodes-test/03-assert.yaml b/tests/kuttl/tests/dataplane-deploy-no-nodes-test/03-assert.yaml new file mode 100644 index 000000000..bde378cf0 --- /dev/null +++ b/tests/kuttl/tests/dataplane-deploy-no-nodes-test/03-assert.yaml @@ -0,0 +1,67 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +collectors: +- type: command + command: oc logs -n openstack-operators -l openstack.org/operator-name=openstack + name: operator-logs +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneNodeSet +metadata: + name: edpm-compute-no-nodes + namespace: openstack-kuttl-tests +spec: + services: + - download-cache + - bootstrap + - configure-network + - validate-network + - install-os + - configure-os + - run-os + - install-certs + - ovn + - neutron-metadata + - neutron-ovn + - neutron-sriov + - neutron-dhcp + - libvirt + - nova + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + nodes: {} + nodeTemplate: + ansibleSSHPrivateKeySecret: dataplane-ansible-ssh-private-key-secret +status: + observedGeneration: 1 + conditions: + - message: NodeSet Ready + reason: Ready + status: "True" + type: Ready + - message: Deployment completed + reason: Ready + status: "True" + type: DeploymentReady + - message: Input data complete + reason: Ready + status: "True" + type: InputReady + - message: NodeSetDNSDataReady ready + reason: Ready + status: "True" + type: NodeSetDNSDataReady + - message: NodeSetIPReservationReady ready + reason: Ready + status: "True" + type: NodeSetIPReservationReady + - message: ServiceAccount created + reason: Ready + status: "True" + type: ServiceAccountReady + - message: Setup complete + reason: Ready + status: "True" + type: SetupReady diff --git a/tests/kuttl/tests/dataplane-deploy-no-nodes-test/03-update-ovn-cm.yaml b/tests/kuttl/tests/dataplane-deploy-no-nodes-test/03-update-ovn-cm.yaml new file mode 100644 index 000000000..98df55230 --- /dev/null +++ b/tests/kuttl/tests/dataplane-deploy-no-nodes-test/03-update-ovn-cm.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: ovncontroller-config +data: + ovsdb-config: test-ovn-config-updated +--- +# Sleep for 30s, b/c this test is meant to assert that even though we've +# changed the above CM, the hash on the NodeSet does not change. +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: sleep 30 diff --git a/tests/kuttl/tests/dataplane-deploy-no-nodes-test/04-assert.yaml b/tests/kuttl/tests/dataplane-deploy-no-nodes-test/04-assert.yaml new file mode 100644 index 000000000..516c7b4df --- /dev/null +++ b/tests/kuttl/tests/dataplane-deploy-no-nodes-test/04-assert.yaml @@ -0,0 +1,129 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +collectors: +- type: command + command: oc logs -n openstack-operators -l openstack.org/operator-name=openstack + name: operator-logs +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneNodeSet +metadata: + name: edpm-compute-no-nodes + namespace: openstack-kuttl-tests +spec: + services: + - download-cache + - bootstrap + - configure-network + - validate-network + - install-os + - configure-os + - run-os + - install-certs + - ovn + - neutron-metadata + - neutron-ovn + - neutron-sriov + - neutron-dhcp + - libvirt + - nova + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + nodes: {} + nodeTemplate: + ansibleSSHPrivateKeySecret: dataplane-ansible-ssh-private-key-secret +status: + observedGeneration: 1 + conditions: + - message: NodeSet Ready + reason: Ready + status: "True" + type: Ready + - message: Deployment completed + reason: Ready + status: "True" + type: DeploymentReady + - message: Input data complete + reason: Ready + status: "True" + type: InputReady + - message: NodeSetDNSDataReady ready + reason: Ready + status: "True" + type: NodeSetDNSDataReady + - message: NodeSetIPReservationReady ready + reason: Ready + status: "True" + type: NodeSetIPReservationReady + - message: ServiceAccount created + reason: Ready + status: "True" + type: ServiceAccountReady + - message: Setup complete + reason: Ready + status: "True" + type: SetupReady +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + generation: 1 + name: ovn-edpm-compute-no-nodes-updated-ovn-cm-edpm-compute-no-nodes + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: edpm-compute-no-nodes-updated-ovn-cm +spec: + backoffLimit: 6 + extraMounts: + - mounts: + - mountPath: /var/lib/openstack/configs/ovn/ovsdb-config + name: ovncontroller-config-0 + subPath: ovsdb-config + volumes: + - configMap: + items: + - key: ovsdb-config + path: ovsdb-config + name: ovncontroller-config + name: ovncontroller-config-0 + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-no-nodes + name: openstackansibleee + restartPolicy: Never + playbook: osp.edpm.ovn + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady diff --git a/tests/kuttl/tests/dataplane-deploy-no-nodes-test/04-dataplane-deploy.yaml b/tests/kuttl/tests/dataplane-deploy-no-nodes-test/04-dataplane-deploy.yaml new file mode 100644 index 000000000..5295f1181 --- /dev/null +++ b/tests/kuttl/tests/dataplane-deploy-no-nodes-test/04-dataplane-deploy.yaml @@ -0,0 +1,9 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneDeployment +metadata: + name: edpm-compute-no-nodes-updated-ovn-cm +spec: + nodeSets: + - edpm-compute-no-nodes + servicesOverride: + - ovn diff --git a/tests/kuttl/tests/dataplane-deploy-no-nodes-test/05-assert.yaml b/tests/kuttl/tests/dataplane-deploy-no-nodes-test/05-assert.yaml new file mode 100644 index 000000000..4bb03489f --- /dev/null +++ b/tests/kuttl/tests/dataplane-deploy-no-nodes-test/05-assert.yaml @@ -0,0 +1,218 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +collectors: +- type: command + command: oc logs -n openstack-operators -l openstack.org/operator-name=openstack + name: operator-logs +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneNodeSet +metadata: + name: edpm-compute-no-nodes + namespace: openstack-kuttl-tests +spec: + services: + - download-cache + - bootstrap + - configure-network + - validate-network + - install-os + - configure-os + - run-os + - install-certs + - ovn + - neutron-metadata + - neutron-ovn + - neutron-sriov + - neutron-dhcp + - libvirt + - nova + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + nodes: {} + nodeTemplate: + ansibleSSHPrivateKeySecret: dataplane-ansible-ssh-private-key-secret +status: + observedGeneration: 1 + conditions: + - message: Deployment error occurred in this-service-does-not-exist service error + OpenStackDataPlaneService.dataplane.openstack.org "this-service-does-not-exist" + not found + reason: Error + severity: Error + status: "False" + type: Ready + - message: Deployment error occurred in this-service-does-not-exist service error + OpenStackDataPlaneService.dataplane.openstack.org "this-service-does-not-exist" + not found + reason: Error + severity: Error + status: "False" + type: DeploymentReady + - message: Input data complete + reason: Ready + status: "True" + type: InputReady + - message: NodeSetDNSDataReady ready + reason: Ready + status: "True" + type: NodeSetDNSDataReady + - message: NodeSetIPReservationReady ready + reason: Ready + status: "True" + type: NodeSetIPReservationReady + - message: ServiceAccount created + reason: Ready + status: "True" + type: ServiceAccountReady + - message: Setup complete + reason: Ready + status: "True" + type: SetupReady + deploymentStatuses: + edpm-compute-no-nodes: + - message: Deployment completed + reason: Ready + status: "True" + type: NodeSetDeploymentReady + - message: Deployment ready for bootstrap service + reason: Ready + status: "True" + type: ServiceBootstrapDeploymentReady + - message: Deployment ready for configure-network service + reason: Ready + status: "True" + type: ServiceConfigureNetworkDeploymentReady + - message: Deployment ready for configure-os service + reason: Ready + status: "True" + type: ServiceConfigureOsDeploymentReady + - message: Deployment ready for download-cache service + reason: Ready + status: "True" + type: ServiceDownloadCacheDeploymentReady + - message: Deployment ready for install-certs service + reason: Ready + status: "True" + type: ServiceInstallCertsDeploymentReady + - message: Deployment ready for install-os service + reason: Ready + status: "True" + type: ServiceInstallOsDeploymentReady + - message: Deployment ready for libvirt service + reason: Ready + status: "True" + type: ServiceLibvirtDeploymentReady + - message: Deployment ready for neutron-dhcp service + reason: Ready + status: "True" + type: ServiceNeutronDhcpDeploymentReady + - message: Deployment ready for neutron-metadata service + reason: Ready + status: "True" + type: ServiceNeutronMetadataDeploymentReady + - message: Deployment ready for neutron-ovn service + reason: Ready + status: "True" + type: ServiceNeutronOvnDeploymentReady + - message: Deployment ready for neutron-sriov service + reason: Ready + status: "True" + type: ServiceNeutronSriovDeploymentReady + - message: Deployment ready for nova service + reason: Ready + status: "True" + type: ServiceNovaDeploymentReady + - message: Deployment ready for ovn service + reason: Ready + status: "True" + type: ServiceOvnDeploymentReady + - message: Deployment ready for run-os service + reason: Ready + status: "True" + type: ServiceRunOsDeploymentReady + - message: Deployment ready for validate-network service + reason: Ready + status: "True" + type: ServiceValidateNetworkDeploymentReady + edpm-compute-no-nodes-non-existent-service: + - message: Deployment error occurred in this-service-does-not-exist service error + OpenStackDataPlaneService.dataplane.openstack.org "this-service-does-not-exist" + not found + reason: Error + severity: Error + status: "False" + type: NodeSetDeploymentReady + - message: Deployment error occurred in this-service-does-not-exist service error + OpenStackDataPlaneService.dataplane.openstack.org "this-service-does-not-exist" + not found + reason: Error + severity: Error + status: "False" + type: ServiceThisServiceDoesNotExistDeploymentReady + edpm-compute-no-nodes-ovrd: + - message: Deployment completed + reason: Ready + status: "True" + type: NodeSetDeploymentReady + - message: Deployment ready for custom-svc service + reason: Ready + status: "True" + type: ServiceCustomSvcDeploymentReady + edpm-compute-no-nodes-updated-ovn-cm: + - message: Deployment completed + reason: Ready + status: "True" + type: NodeSetDeploymentReady + - message: Deployment ready for ovn service + reason: Ready + status: "True" + type: ServiceOvnDeploymentReady +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneDeployment +metadata: + name: edpm-compute-no-nodes-non-existent-service + namespace: openstack-kuttl-tests +spec: + nodeSets: + - edpm-compute-no-nodes + servicesOverride: + - this-service-does-not-exist +status: + observedGeneration: 1 + conditions: + - message: 'Deployment error occurred nodeSet: edpm-compute-no-nodes error: OpenStackDataPlaneService.dataplane.openstack.org + "this-service-does-not-exist" not found' + reason: Error + severity: Warning + status: "False" + type: Ready + - message: 'Deployment error occurred nodeSet: edpm-compute-no-nodes error: OpenStackDataPlaneService.dataplane.openstack.org + "this-service-does-not-exist" not found' + reason: Error + severity: Warning + status: "False" + type: DeploymentReady + - message: Input data complete + reason: Ready + status: "True" + type: InputReady + nodeSetConditions: + edpm-compute-no-nodes: + - message: Deployment error occurred in this-service-does-not-exist service error + OpenStackDataPlaneService.dataplane.openstack.org "this-service-does-not-exist" + not found + reason: Error + severity: Error + status: "False" + type: NodeSetDeploymentReady + - message: Deployment error occurred in this-service-does-not-exist service error + OpenStackDataPlaneService.dataplane.openstack.org "this-service-does-not-exist" + not found + reason: Error + severity: Error + status: "False" + type: ServiceThisServiceDoesNotExistDeploymentReady diff --git a/tests/kuttl/tests/dataplane-deploy-no-nodes-test/05-dataplane-deploy-service-not-found.yaml b/tests/kuttl/tests/dataplane-deploy-no-nodes-test/05-dataplane-deploy-service-not-found.yaml new file mode 100644 index 000000000..76b2f46eb --- /dev/null +++ b/tests/kuttl/tests/dataplane-deploy-no-nodes-test/05-dataplane-deploy-service-not-found.yaml @@ -0,0 +1,9 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneDeployment +metadata: + name: edpm-compute-no-nodes-non-existent-service +spec: + nodeSets: + - edpm-compute-no-nodes + servicesOverride: + - this-service-does-not-exist diff --git a/tests/kuttl/tests/dataplane-deploy-no-nodes-test/06-add-nodeset.yaml b/tests/kuttl/tests/dataplane-deploy-no-nodes-test/06-add-nodeset.yaml new file mode 100644 index 000000000..e24b42809 --- /dev/null +++ b/tests/kuttl/tests/dataplane-deploy-no-nodes-test/06-add-nodeset.yaml @@ -0,0 +1,44 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneNodeSet +metadata: + name: edpm-compute-beta-nodeset +spec: + baremetalSetTemplate: + automatedCleaningMode: metadata + bmhNamespace: openshift-machine-api + cloudUserName: "" + ctlplaneInterface: "" + ctlplaneNetmask: 255.255.255.0 + deploymentSSHSecret: "" + hardwareReqs: + cpuReqs: + countReq: {} + mhzReq: {} + diskReqs: + gbReq: {} + ssdReq: {} + memReqs: + gbReq: {} + preProvisioned: true + services: + - download-cache + - bootstrap + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + nodes: {} + nodeTemplate: + ansibleSSHPrivateKeySecret: dataplane-ansible-ssh-private-key-secret + ansible: + ansibleUser: cloud-admin +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneDeployment +metadata: + name: edpm-multinodeset +spec: + nodeSets: + - edpm-compute-no-nodes + - edpm-compute-beta-nodeset + ansibleExtraVars: + foo: bar diff --git a/tests/kuttl/tests/dataplane-deploy-no-nodes-test/06-assert.yaml b/tests/kuttl/tests/dataplane-deploy-no-nodes-test/06-assert.yaml new file mode 100644 index 000000000..21e2e873e --- /dev/null +++ b/tests/kuttl/tests/dataplane-deploy-no-nodes-test/06-assert.yaml @@ -0,0 +1,172 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +collectors: +- type: command + command: oc logs -n openstack-operators -l openstack.org/operator-name=openstack + name: operator-logs +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneNodeSet +metadata: + name: edpm-compute-beta-nodeset + namespace: openstack-kuttl-tests +spec: + services: + - download-cache + - bootstrap + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + nodes: {} + nodeTemplate: + ansibleSSHPrivateKeySecret: dataplane-ansible-ssh-private-key-secret +status: + conditions: + - message: NodeSet Ready + reason: Ready + status: "True" + type: Ready + - message: Deployment completed + reason: Ready + status: "True" + type: DeploymentReady + - message: Input data complete + reason: Ready + status: "True" + type: InputReady + - message: NodeSetDNSDataReady ready + reason: Ready + status: "True" + type: NodeSetDNSDataReady + - message: NodeSetIPReservationReady ready + reason: Ready + status: "True" + type: NodeSetIPReservationReady + - message: ServiceAccount created + reason: Ready + status: "True" + type: ServiceAccountReady + - message: Setup complete + reason: Ready + status: "True" + type: SetupReady + configMapHashes: + ovncontroller-config: n56h54bh9bhcbh65ch9fhdh66dh95h5dch569h678h7fh599h7ch84h597h59h54dh58dhf6h66bh565h4hc4h587h645hd7hcch5d8h5f4h55cq + secretHashes: + neutron-dhcp-agent-neutron-config: n68h676h98h689hd4h575h5dbh694h6fh688h57h665h5c5h56dh5ddh65bh5d7h5cdh644hb8h8fh5d9h5b9h555h9ch56dh5fh6chd4h5c5h5c5h68q + neutron-ovn-agent-neutron-config: n5f4h89hb8h645h55bh657h9fh5d9h5c6h595h9dh667h5f4hfhffh7fh685h56ch57fh679h5ddh5ddh95h696hbch5c7h669h84h54dh685hfh85q + neutron-ovn-metadata-agent-neutron-config: n68dh585h666h5c4h568hf7h65fh695h649hb9h657h5f6h548h679h77h5b4h664h8h5b8h654h5hf5h674h664h545h74h58ch57ch8ch56h54fh5ddq + neutron-sriov-agent-neutron-config: n685h567h697h5bch8ch5cfh87h698h658h684h8h99h5dch5c5h699h79hb5h87h66dh664h546h586h7bh56fh5d6h5d4h566h56bh87h678h696h56cq + nova-cell1-compute-config: n89hd6h5h545h644h58h556hd9h5c5h598hd4h7bh5f9h5bdh649hb5h99h686h677h8ch575h665h574h587h5b6h5ddh8fh687h9bh657h675h97q + nova-metadata-neutron-config: n7fh696h674h5b9h68dh77h677h5c5hd9h5dbh89h646h696h65ch64bh86hd8h56h78h558h5h5c7h87h86h5bh5bch78h6ch5cbh54fh56fhfdq +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneDeployment +metadata: + name: edpm-multinodeset + namespace: openstack-kuttl-tests +spec: + nodeSets: + - edpm-compute-no-nodes + - edpm-compute-beta-nodeset +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + name: download-cache-edpm-multinodeset-edpm-compute-beta-nodeset + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: edpm-multinodeset +spec: + backoffLimit: 6 + extraMounts: + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-beta-nodeset + name: openstackansibleee + restartPolicy: Never + playbook: osp.edpm.download_cache + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + name: bootstrap-edpm-multinodeset-edpm-compute-beta-nodeset + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: edpm-multinodeset +spec: + backoffLimit: 6 + extraMounts: + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-beta-nodeset + name: openstackansibleee + restartPolicy: Never + playbook: osp.edpm.bootstrap + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady diff --git a/tests/kuttl/tests/dataplane-deploy-tls-test/00-assert.yaml b/tests/kuttl/tests/dataplane-deploy-tls-test/00-assert.yaml new file mode 100644 index 000000000..f7c4bd0e5 --- /dev/null +++ b/tests/kuttl/tests/dataplane-deploy-tls-test/00-assert.yaml @@ -0,0 +1,217 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +collectors: +- type: command + command: oc logs -n openstack-operators -l openstack.org/operator-name=openstack + name: operator-logs +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + name: tls-dnsnames +spec: + caCerts: combined-ca-bundle + tlsCerts: + default: + contents: + - dnsnames + second: + contents: + - ips + playbookContents: | + - hosts: localhost + gather_facts: no + name: kuttl play + tasks: + - name: Sleep + command: sleep 1 + delegate_to: localhost +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + name: install-certs-ovrd +spec: + addCertMounts: True + playbookContents: | + - hosts: localhost + gather_facts: no + name: kuttl play + tasks: + - name: Sleep + command: sleep 1 + delegate_to: localhost +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneNodeSet +metadata: + name: openstack-edpm-tls +spec: + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + nodes: + edpm-compute-0: + hostName: edpm-compute-0 + networks: + - name: ctlplane + subnetName: subnet1 + defaultRoute: true + fixedIP: 192.168.122.100 + - name: internalapi + subnetName: subnet1 + - name: storage + subnetName: subnet1 + - name: tenant + subnetName: subnet1 + + nodeTemplate: + ansible: + ansiblePort: 22 + ansibleUser: cloud-admin + ansibleVarsFrom: + - prefix: edpm_ + configMapRef: + name: network-config-template + ansibleVars: + timesync_ntp_servers: + - hostname: clock.redhat.com + edpm_network_config_hide_sensitive_logs: false + edpm_nodes_validation_validate_controllers_icmp: false + edpm_nodes_validation_validate_gateway_icmp: false + edpm_selinux_mode: enforcing + edpm_sshd_allowed_ranges: + - 192.168.122.0/24 + edpm_sshd_configure_firewall: true + enable_debug: false + ansibleSSHPrivateKeySecret: dataplane-ansible-ssh-private-key-secret + preProvisioned: true + tlsEnabled: true + services: + - install-certs-ovrd + - tls-dnsnames +status: + observedGeneration: 1 + conditions: + - message: Deployment not started + reason: Requested + status: "False" + type: Ready + - message: Deployment not started + reason: Requested + status: "False" + type: DeploymentReady + - message: Input data complete + reason: Ready + status: "True" + type: InputReady + - message: NodeSetDNSDataReady ready + reason: Ready + status: "True" + type: NodeSetDNSDataReady + - message: NodeSetIPReservationReady ready + reason: Ready + status: "True" + type: NodeSetIPReservationReady + - message: ServiceAccount created + reason: Ready + status: "True" + type: ServiceAccountReady + - message: Setup complete + reason: Ready + status: "True" + type: SetupReady +--- +apiVersion: network.openstack.org/v1beta1 +kind: IPSet +metadata: + name: edpm-compute-0 +spec: + immutable: false + networks: + - defaultRoute: true + name: ctlplane + subnetName: subnet1 + - name: internalapi + subnetName: subnet1 + - name: storage + subnetName: subnet1 + - name: tenant + subnetName: subnet1 +status: + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Input data complete + reason: Ready + status: "True" + type: InputReady + - message: Reservation successful + reason: Ready + status: "True" + type: ReservationReady + reservations: + - address: 192.168.122.100 + cidr: 192.168.122.0/24 + dnsDomain: ctlplane.example.com + gateway: 192.168.122.1 + mtu: 1500 + network: ctlplane + routes: + - destination: 0.0.0.0/0 + nexthop: 192.168.122.1 + subnet: subnet1 + - address: 172.17.0.100 + cidr: 172.17.0.0/24 + dnsDomain: internalapi.example.com + mtu: 1500 + network: internalapi + subnet: subnet1 + vlan: 20 + - address: 172.18.0.100 + cidr: 172.18.0.0/24 + dnsDomain: storage.example.com + mtu: 1500 + network: storage + subnet: subnet1 + vlan: 21 + - address: 172.19.0.100 + cidr: 172.19.0.0/24 + dnsDomain: tenant.example.com + mtu: 1500 + network: tenant + subnet: subnet1 + vlan: 22 +--- +apiVersion: network.openstack.org/v1beta1 +kind: DNSData +metadata: + name: openstack-edpm-tls +spec: + dnsDataLabelSelectorValue: dnsdata + hosts: + - hostnames: + - edpm-compute-0.ctlplane.example.com + ip: 192.168.122.100 + - hostnames: + - edpm-compute-0.internalapi.example.com + ip: 172.17.0.100 + - hostnames: + - edpm-compute-0.storage.example.com + ip: 172.18.0.100 + - hostnames: + - edpm-compute-0.tenant.example.com + ip: 172.19.0.100 +status: + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Input data complete + reason: Ready + status: "True" + type: ServiceConfigReady diff --git a/tests/kuttl/tests/dataplane-deploy-tls-test/00-dataplane-create.yaml b/tests/kuttl/tests/dataplane-deploy-tls-test/00-dataplane-create.yaml new file mode 100644 index 000000000..c84c2fd34 --- /dev/null +++ b/tests/kuttl/tests/dataplane-deploy-tls-test/00-dataplane-create.yaml @@ -0,0 +1,127 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + name: tls-dnsnames +spec: + caCerts: combined-ca-bundle + tlsCerts: + default: + contents: + - dnsnames + second: + contents: + - ips + playbookContents: | + - hosts: localhost + gather_facts: no + name: kuttl play + tasks: + - name: Sleep + command: sleep 1 + delegate_to: localhost +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + name: install-certs-ovrd +spec: + addCertMounts: True + playbookContents: | + - hosts: localhost + gather_facts: no + name: kuttl play + tasks: + - name: Sleep + command: sleep 1 + delegate_to: localhost +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: network-config-template +data: + network_config_template: | + --- + {% set mtu_list = [ctlplane_mtu] %} + {% for network in nodeset_networks %} + {{ mtu_list.append(lookup('vars', networks_lower[network] ~ '_mtu')) }} + {%- endfor %} + {% set min_viable_mtu = mtu_list | max %} + network_config: + - type: ovs_bridge + name: {{ neutron_physical_bridge_name }} + mtu: {{ min_viable_mtu }} + use_dhcp: false + dns_servers: {{ ctlplane_dns_nameservers }} + domain: {{ dns_search_domains }} + addresses: + - ip_netmask: {{ ctlplane_ip }}/{{ ctlplane_cidr }} + routes: {{ ctlplane_host_routes }} + members: + - type: interface + name: nic1 + mtu: {{ min_viable_mtu }} + # force the MAC address of the bridge to this interface + primary: true + {% for network in nodeset_networks %} + - type: vlan + mtu: {{ lookup('vars', networks_lower[network] ~ '_mtu') }} + vlan_id: {{ lookup('vars', networks_lower[network] ~ '_vlan_id') }} + addresses: + - ip_netmask: + {{ lookup('vars', networks_lower[network] ~ '_ip') }}/{{ lookup('vars', networks_lower[network] ~ '_cidr') }} + routes: {{ lookup('vars', networks_lower[network] ~ '_host_routes') }} + {% endfor %} +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneNodeSet +metadata: + name: openstack-edpm-tls +spec: + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + services: + - install-certs-ovrd + - tls-dnsnames + preProvisioned: true + tlsEnabled: true + nodes: + edpm-compute-0: + hostName: edpm-compute-0 + networks: + - name: ctlplane + subnetName: subnet1 + defaultRoute: true + fixedIP: 192.168.122.100 + - name: internalapi + subnetName: subnet1 + - name: storage + subnetName: subnet1 + - name: tenant + subnetName: subnet1 + nodeTemplate: + ansibleSSHPrivateKeySecret: dataplane-ansible-ssh-private-key-secret + ansible: + ansibleUser: cloud-admin + ansiblePort: 22 + ansibleVarsFrom: + - prefix: edpm_ + configMapRef: + name: network-config-template + ansibleVars: + timesync_ntp_servers: + - hostname: clock.redhat.com + # edpm_network_config + # Default nic config template for a EDPM compute node + # These vars are edpm_network_config role vars + edpm_network_config_hide_sensitive_logs: false + edpm_nodes_validation_validate_controllers_icmp: false + edpm_nodes_validation_validate_gateway_icmp: false + gather_facts: false + enable_debug: false + # edpm firewall, change the allowed CIDR if needed + edpm_sshd_configure_firewall: true + edpm_sshd_allowed_ranges: ['192.168.122.0/24'] + # SELinux module + edpm_selinux_mode: enforcing diff --git a/tests/kuttl/tests/dataplane-deploy-tls-test/01-create-cert-issuers.yaml b/tests/kuttl/tests/dataplane-deploy-tls-test/01-create-cert-issuers.yaml new file mode 100644 index 000000000..19b614ec4 --- /dev/null +++ b/tests/kuttl/tests/dataplane-deploy-tls-test/01-create-cert-issuers.yaml @@ -0,0 +1,22 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: | + function wait_for() { + timeout=$1 + shift 1 + until [ $timeout -le 0 ] || ("$@" &> /dev/null); do + echo waiting for "$@" + sleep 1 + timeout=$(( timeout - 1 )) + done + if [ $timeout -le 0 ]; then + return 1 + fi + } + + if oc get secret combined-ca-bundle -n openstack-kuttl-tests; then oc delete secret combined-ca-bundle -n openstack-kuttl-tests; fi + oc apply -f ./certs.yaml + wait_for 100 oc get secret osp-rootca-secret -n openstack-kuttl-tests + CA_CRT=$(oc get secret osp-rootca-secret -n openstack-kuttl-tests -o json|jq -r '.data."ca.crt"') + oc create secret generic combined-ca-bundle -n openstack-kuttl-tests --from-literal=tls-ca-bundle.pem=$CA_CRT diff --git a/tests/kuttl/tests/dataplane-deploy-tls-test/02-assert.yaml b/tests/kuttl/tests/dataplane-deploy-tls-test/02-assert.yaml new file mode 100644 index 000000000..22c379d3a --- /dev/null +++ b/tests/kuttl/tests/dataplane-deploy-tls-test/02-assert.yaml @@ -0,0 +1,255 @@ +apiVersion: v1 +kind: Secret +metadata: + name: cert-tls-dnsnames-default-edpm-compute-0 + annotations: + cert-manager.io/certificate-name: tls-dnsnames-default-edpm-compute-0 + cert-manager.io/issuer-group: cert-manager.io + cert-manager.io/issuer-kind: Issuer + cert-manager.io/issuer-name: rootca-internal + labels: + hostname: edpm-compute-0 + osdp-service: tls-dnsnames + osdp-service-cert-key: default + osdpns: openstack-edpm-tls +type: kubernetes.io/tls +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + labels: + hostname: edpm-compute-0 + osdp-service: tls-dnsnames + osdp-service-cert-key: default + osdpns: openstack-edpm-tls + name: tls-dnsnames-default-edpm-compute-0 + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + kind: OpenStackDataPlaneNodeSet + name: openstack-edpm-tls +spec: + issuerRef: + group: cert-manager.io + kind: Issuer + name: rootca-internal + secretName: cert-tls-dnsnames-default-edpm-compute-0 + secretTemplate: + labels: + hostname: edpm-compute-0 + osdp-service: tls-dnsnames + osdpns: openstack-edpm-tls +--- +# validate the alt-names and usages - which is a list with elements that can be in any order +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: + - script: | + template='{{index .spec.dnsNames }}' + names=$(oc get certificate tls-dnsnames-default-edpm-compute-0 -n openstack-kuttl-tests -o go-template="$template") + echo $names > test123.data + regex="(?=.*(edpm-compute-0\.internalapi\.example\.com))(?=.*(edpm-compute-0\.storage\.example\.com))(?=.*(edpm-compute-0\.tenant\.example\.com))(?=.*(edpm-compute-0\.ctlplane\.example\.com))" + matches=$(grep -P "$regex" test123.data) + rm test123.data + if [ -z "$matches" ]; then + echo "bad dnsnames match: $names" + exit 1 + else + exit 0 + fi + - script: | + template='{{index .spec.usages }}' + usages=$(oc get certificate tls-dnsnames-default-edpm-compute-0 -n openstack-kuttl-tests -o go-template="$template") + echo $usages > test123.data + regex="(?=.*(key encipherment))(?=.*(digital signature))(?=.*(server auth))" + matches=$(grep -P "$regex" test123.data) + rm test123.data + if [ -z "$matches" ]; then + echo "bad usages match: $usages" + exit 1 + else + exit 0 + fi +--- +apiVersion: v1 +kind: Secret +metadata: + name: cert-tls-dnsnames-second-edpm-compute-0 + annotations: + cert-manager.io/certificate-name: tls-dnsnames-second-edpm-compute-0 + cert-manager.io/issuer-group: cert-manager.io + cert-manager.io/issuer-kind: Issuer + cert-manager.io/issuer-name: rootca-internal + labels: + hostname: edpm-compute-0 + osdp-service: tls-dnsnames + osdp-service-cert-key: second + osdpns: openstack-edpm-tls +type: kubernetes.io/tls +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + labels: + hostname: edpm-compute-0 + osdp-service: tls-dnsnames + osdp-service-cert-key: second + osdpns: openstack-edpm-tls + name: tls-dnsnames-second-edpm-compute-0 + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + kind: OpenStackDataPlaneNodeSet + name: openstack-edpm-tls +spec: + issuerRef: + group: cert-manager.io + kind: Issuer + name: rootca-internal + secretName: cert-tls-dnsnames-second-edpm-compute-0 + secretTemplate: + labels: + hostname: edpm-compute-0 + osdp-service: tls-dnsnames + osdpns: openstack-edpm-tls +--- +apiVersion: v1 +kind: Secret +metadata: + name: openstack-edpm-tls-tls-dnsnames-default-certs-0 + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + kind: OpenStackDataPlaneNodeSet + name: openstack-edpm-tls +type: Opaque +--- +apiVersion: v1 +kind: Secret +metadata: + name: openstack-edpm-tls-tls-dnsnames-second-certs-0 + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + kind: OpenStackDataPlaneNodeSet + name: openstack-edpm-tls +type: Opaque +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + name: install-certs-ovrd-openstack-edpm-tls-openstack-edpm-tls + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: openstack-edpm-tls +spec: + backoffLimit: 6 + extraMounts: + - mounts: + - mountPath: /var/lib/openstack/certs/tls-dnsnames/default + name: openstack-edpm-tls-tls-dnsnames-default-certs-0 + volumes: + - name: openstack-edpm-tls-tls-dnsnames-default-certs-0 + projected: + sources: + - secret: + name: openstack-edpm-tls-tls-dnsnames-default-certs-0 + - mounts: + - mountPath: /var/lib/openstack/certs/tls-dnsnames/second + name: openstack-edpm-tls-tls-dnsnames-second-certs-0 + volumes: + - name: openstack-edpm-tls-tls-dnsnames-second-certs-0 + projected: + sources: + - secret: + name: openstack-edpm-tls-tls-dnsnames-second-certs-0 + - mounts: + - mountPath: /var/lib/openstack/cacerts/tls-dnsnames + name: tls-dnsnames-combined-ca-bundle + volumes: + - name: tls-dnsnames-combined-ca-bundle + secret: + secretName: combined-ca-bundle + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-openstack-edpm-tls + name: openstackansibleee + restartPolicy: Never + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + name: tls-dnsnames-openstack-edpm-tls-openstack-edpm-tls + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + kind: OpenStackDataPlaneDeployment + name: openstack-edpm-tls +spec: + backoffLimit: 6 + extraMounts: + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-openstack-edpm-tls + name: openstackansibleee + restartPolicy: Never + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady diff --git a/tests/kuttl/tests/dataplane-deploy-tls-test/02-dataplane-deploy.yaml b/tests/kuttl/tests/dataplane-deploy-tls-test/02-dataplane-deploy.yaml new file mode 100644 index 000000000..17c50468f --- /dev/null +++ b/tests/kuttl/tests/dataplane-deploy-tls-test/02-dataplane-deploy.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneDeployment +metadata: + name: openstack-edpm-tls +spec: + nodeSets: + - openstack-edpm-tls diff --git a/tests/kuttl/tests/dataplane-deploy-tls-test/03-assert.yaml b/tests/kuttl/tests/dataplane-deploy-tls-test/03-assert.yaml new file mode 100644 index 000000000..b5661aad9 --- /dev/null +++ b/tests/kuttl/tests/dataplane-deploy-tls-test/03-assert.yaml @@ -0,0 +1,315 @@ +apiVersion: v1 +kind: Secret +metadata: + name: cert-tls-dns-ips-default-edpm-compute-0 + annotations: + cert-manager.io/alt-names: edpm-compute-0.ctlplane.example.com + cert-manager.io/certificate-name: tls-dns-ips-default-edpm-compute-0 + cert-manager.io/ip-sans: 192.168.122.100 + cert-manager.io/issuer-group: cert-manager.io + cert-manager.io/issuer-kind: Issuer + cert-manager.io/issuer-name: rootca-internal + labels: + hostname: edpm-compute-0 + osdp-service: tls-dns-ips + osdp-service-cert-key: default + osdpns: openstack-edpm-tls +type: kubernetes.io/tls +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + labels: + hostname: edpm-compute-0 + osdp-service: tls-dns-ips + osdp-service-cert-key: default + osdpns: openstack-edpm-tls + name: tls-dns-ips-default-edpm-compute-0 + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + kind: OpenStackDataPlaneNodeSet + name: openstack-edpm-tls +spec: + dnsNames: + - edpm-compute-0.ctlplane.example.com + issuerRef: + group: cert-manager.io + kind: Issuer + name: rootca-internal + secretName: cert-tls-dns-ips-default-edpm-compute-0 + secretTemplate: + labels: + hostname: edpm-compute-0 + osdp-service: tls-dns-ips + osdp-service-cert-key: default + osdpns: openstack-edpm-tls +--- +apiVersion: v1 +kind: Secret +metadata: + name: cert-custom-tls-dns-default-edpm-compute-0 + annotations: + cert-manager.io/certificate-name: custom-tls-dns-default-edpm-compute-0 + cert-manager.io/issuer-group: cert-manager.io + cert-manager.io/issuer-kind: Issuer + cert-manager.io/issuer-name: rootca-internal + labels: + hostname: edpm-compute-0 + osdp-service: custom-tls-dns + osdp-service-cert-key: default + osdpns: openstack-edpm-tls +type: kubernetes.io/tls +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + labels: + hostname: edpm-compute-0 + osdp-service: custom-tls-dns + osdp-service-cert-key: default + osdpns: openstack-edpm-tls + name: custom-tls-dns-default-edpm-compute-0 + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + kind: OpenStackDataPlaneNodeSet + name: openstack-edpm-tls +spec: + issuerRef: + group: cert-manager.io + kind: Issuer + name: rootca-internal + secretName: cert-custom-tls-dns-default-edpm-compute-0 + secretTemplate: + labels: + hostname: edpm-compute-0 + osdp-service: custom-tls-dns + osdp-service-cert-key: default + osdpns: openstack-edpm-tls +--- +# validate the alt-names and usages - which is a list with elements that can be in any order +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: + - script: | + template='{{index .spec.dnsNames }}' + names=$(oc get certificate custom-tls-dns-default-edpm-compute-0 -n openstack-kuttl-tests -o go-template="$template") + echo $names > test123.data + regex="(?=.*(edpm-compute-0\.internalapi\.example\.com))(?=.*(edpm-compute-0\.storage\.example\.com))(?=.*(edpm-compute-0\.tenant\.example\.com))(?=.*(edpm-compute-0\.ctlplane\.example\.com))" + matches=$(grep -P "$regex" test123.data) + rm test123.data + if [ -z "$matches" ]; then + echo "bad dnsnames match: $names" + exit 1 + else + exit 0 + fi + - script: | + template='{{index .spec.usages }}' + usages=$(oc get certificate custom-tls-dns-default-edpm-compute-0 -n openstack-kuttl-tests -o go-template="$template") + echo $usages > test123.data + regex="(?=.*(key encipherment))(?=.*(digital signature))(?=.*(server auth))(?=.*(client auth))" + matches=$(grep -P "$regex" test123.data) + rm test123.data + if [ -z "$matches" ]; then + echo "bad usages match: $usages" + exit 1 + else + exit 0 + fi +--- +apiVersion: v1 +kind: Secret +metadata: + name: openstack-edpm-tls-tls-dns-ips-default-certs-0 + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + kind: OpenStackDataPlaneNodeSet + name: openstack-edpm-tls +type: Opaque +--- +apiVersion: v1 +kind: Secret +metadata: + name: openstack-edpm-tls-custom-tls-dns-default-certs-0 + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + kind: OpenStackDataPlaneNodeSet + name: openstack-edpm-tls +type: Opaque +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + name: install-certs-ovrd-openstack-edpm-tls-ovrd-openstack-edpm-tls + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: openstack-edpm-tls-ovrd +spec: + backoffLimit: 6 + extraMounts: + - mounts: + - mountPath: /var/lib/openstack/certs/tls-dns-ips/default + name: openstack-edpm-tls-tls-dns-ips-default-certs-0 + volumes: + - name: openstack-edpm-tls-tls-dns-ips-default-certs-0 + projected: + sources: + - secret: + name: openstack-edpm-tls-tls-dns-ips-default-certs-0 + - mounts: + - mountPath: /var/lib/openstack/cacerts/tls-dns-ips + name: tls-dns-ips-combined-ca-bundle + volumes: + - name: tls-dns-ips-combined-ca-bundle + secret: + secretName: combined-ca-bundle + - mounts: + - mountPath: /var/lib/openstack/certs/custom-tls-dns/default + name: openstack-edpm-tls-custom-tls-dns-default-certs-0 + volumes: + - name: openstack-edpm-tls-custom-tls-dns-default-certs-0 + projected: + sources: + - secret: + name: openstack-edpm-tls-custom-tls-dns-default-certs-0 + - mounts: + - mountPath: /var/lib/openstack/cacerts/custom-tls-dns + name: custom-tls-dns-combined-ca-bundle + volumes: + - name: custom-tls-dns-combined-ca-bundle + secret: + secretName: combined-ca-bundle + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-openstack-edpm-tls + name: openstackansibleee + restartPolicy: Never + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + name: tls-dns-ips-openstack-edpm-tls-ovrd-openstack-edpm-tls + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + kind: OpenStackDataPlaneDeployment + name: openstack-edpm-tls-ovrd +spec: + backoffLimit: 6 + extraMounts: + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-openstack-edpm-tls + name: openstackansibleee + restartPolicy: Never + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + name: custom-tls-dns-openstack-edpm-tls-ovrd-openstack-edpm-tls + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + kind: OpenStackDataPlaneDeployment + name: openstack-edpm-tls-ovrd +spec: + backoffLimit: 6 + extraMounts: + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-openstack-edpm-tls + name: openstackansibleee + restartPolicy: Never + uid: 1001 +status: + JobStatus: Succeeded + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Job completed + reason: Ready + status: "True" + type: JobReady diff --git a/tests/kuttl/tests/dataplane-deploy-tls-test/03-dataplane-deploy-services-override.yaml b/tests/kuttl/tests/dataplane-deploy-tls-test/03-dataplane-deploy-services-override.yaml new file mode 100644 index 000000000..921a1adb3 --- /dev/null +++ b/tests/kuttl/tests/dataplane-deploy-tls-test/03-dataplane-deploy-services-override.yaml @@ -0,0 +1,74 @@ +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + name: tls-dns-ips +spec: + caCerts: combined-ca-bundle + tlsCerts: + default: + contents: + - dnsnames + - ips + issuer: osp-rootca-issuer-internal + networks: + - ctlplane + playbookContents: | + - hosts: localhost + gather_facts: no + name: kuttl play + tasks: + - name: Sleep + command: sleep 1 + delegate_to: localhost +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + name: custom-tls-dns +spec: + caCerts: combined-ca-bundle + tlsCerts: + default: + contents: + - dnsnames + keyUsages: + - key encipherment + - digital signature + - server auth + - client auth + playbookContents: | + - hosts: localhost + gather_facts: no + name: kuttl play + tasks: + - name: Sleep + command: sleep 1 + delegate_to: localhost +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + name: install-certs-ovrd +spec: + addCertMounts: True + playbookContents: | + - hosts: localhost + gather_facts: no + name: kuttl play + tasks: + - name: Sleep + command: sleep 1 + delegate_to: localhost +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneDeployment +metadata: + name: openstack-edpm-tls-ovrd +spec: + nodeSets: + - openstack-edpm-tls + servicesOverride: + - install-certs-ovrd + - tls-dns-ips + - custom-tls-dns diff --git a/tests/kuttl/tests/dataplane-deploy-tls-test/04-rotate-certs.yaml b/tests/kuttl/tests/dataplane-deploy-tls-test/04-rotate-certs.yaml new file mode 100644 index 000000000..8eab94f54 --- /dev/null +++ b/tests/kuttl/tests/dataplane-deploy-tls-test/04-rotate-certs.yaml @@ -0,0 +1,4 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - command: oc delete -n openstack-kuttl-tests secrets cert-custom-tls-dns-default-edpm-compute-0 cert-tls-dns-ips-default-edpm-compute-0 diff --git a/tests/kuttl/tests/dataplane-deploy-tls-test/05-assert.yaml b/tests/kuttl/tests/dataplane-deploy-tls-test/05-assert.yaml new file mode 100644 index 000000000..937239713 --- /dev/null +++ b/tests/kuttl/tests/dataplane-deploy-tls-test/05-assert.yaml @@ -0,0 +1,32 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +commands: + - script: | + PNAME=`oc get pod -l job-name=install-certs-ovrd-certs-refresh-openstack-edpm-tls --field-selector status.phase=Succeeded -n openstack-kuttl-tests -o name` + echo $PNAME + if [ -z "$PNAME" ]; then + echo "Waiting for successful ansibleee pod" + exit 1 + fi + + serial1=`oc get secret cert-custom-tls-dns-default-edpm-compute-0 -n openstack-kuttl-tests -o json|jq -r '.data."tls.crt"'|base64 -d |openssl x509 -noout -serial` + echo "serial1:" $serial1 + serial2=`oc debug $PNAME -n openstack-kuttl-tests -- cat /var/lib/openstack/certs/custom-tls-dns/default/edpm-compute-0.ctlplane.example.com-tls.crt |openssl x509 -noout -serial` + echo "serial2:" $serial2 + if [ $serial1 != $serial2 ]; then + echo "serials for cert-custom-tls-dns-default-edpm-compute-0 not equal" + exit 1 + fi + + serial1=`oc get secret cert-tls-dns-ips-default-edpm-compute-0 -n openstack-kuttl-tests -o json|jq -r '.data."tls.crt"'|base64 -d |openssl x509 -noout -serial` + echo "serial1:" $serial1 + serial2=`oc debug $PNAME -n openstack-kuttl-tests -- cat /var/lib/openstack/certs/tls-dns-ips/default/edpm-compute-0.ctlplane.example.com-tls.crt |openssl x509 -noout -serial` + echo "serial2:" $serial2 + if [ $serial1 != $serial2 ]; then + echo "serials for cert-tls-dns-ips-default-edpm-compute-0 not equal" + exit 1 + fi + + exit 0 diff --git a/tests/kuttl/tests/dataplane-deploy-tls-test/05-dataplane-redeploy.yaml b/tests/kuttl/tests/dataplane-deploy-tls-test/05-dataplane-redeploy.yaml new file mode 100644 index 000000000..942fe5c1b --- /dev/null +++ b/tests/kuttl/tests/dataplane-deploy-tls-test/05-dataplane-redeploy.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneDeployment +metadata: + name: certs-refresh +spec: + nodeSets: + - openstack-edpm-tls + servicesOverride: + - install-certs-ovrd + - tls-dns-ips + - custom-tls-dns diff --git a/tests/kuttl/tests/dataplane-deploy-tls-test/certs.yaml b/tests/kuttl/tests/dataplane-deploy-tls-test/certs.yaml new file mode 100644 index 000000000..7cffc290c --- /dev/null +++ b/tests/kuttl/tests/dataplane-deploy-tls-test/certs.yaml @@ -0,0 +1,38 @@ +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: selfsigned-issuer + namespace: openstack-kuttl-tests +spec: + selfSigned: {} +--- +# RootCA Certificate used to sign certificates +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: osp-rootca + namespace: openstack-kuttl-tests +spec: + isCA: true + commonName: osp-rootca + secretName: osp-rootca-secret + privateKey: + algorithm: ECDSA + size: 256 + issuerRef: + name: selfsigned-issuer + kind: Issuer + group: cert-manager.io +--- +# Issuer that uses the generated CA certificate to issue certs +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: rootca-internal + namespace: openstack-kuttl-tests + labels: + osp-rootca-issuer-internal: "" +spec: + ca: + secretName: osp-rootca-secret +--- diff --git a/tests/kuttl/tests/dataplane-extramounts/00-assert.yaml b/tests/kuttl/tests/dataplane-extramounts/00-assert.yaml new file mode 100644 index 000000000..21897befd --- /dev/null +++ b/tests/kuttl/tests/dataplane-extramounts/00-assert.yaml @@ -0,0 +1,71 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +collectors: +- type: command + command: oc logs -n openstack-operators -l openstack.org/operator-name=openstack + name: operator-logs +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneNodeSet +metadata: + name: edpm-extramounts +spec: + preProvisioned: true + services: + - test-service + nodes: {} + nodeTemplate: + extraMounts: + - extraVolType: edpm-ansible + mounts: + - mountPath: /usr/share/ansible/collections/ansible_collections/osp/edpm + name: edpm-ansible + volumes: + - name: edpm-ansible + persistentVolumeClaim: + claimName: edpm-ansible + readOnly: true +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + name: test-service-edpm-extramounts-edpm-extramounts + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: edpm-extramounts +spec: + extraMounts: + - extraVolType: edpm-ansible + mounts: + - mountPath: /usr/share/ansible/collections/ansible_collections/osp/edpm + name: edpm-ansible + volumes: + - name: edpm-ansible + persistentVolumeClaim: + claimName: edpm-ansible + readOnly: true + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-extramounts diff --git a/tests/kuttl/tests/dataplane-extramounts/00-dataplane-create.yaml b/tests/kuttl/tests/dataplane-extramounts/00-dataplane-create.yaml new file mode 100644 index 000000000..9069dea31 --- /dev/null +++ b/tests/kuttl/tests/dataplane-extramounts/00-dataplane-create.yaml @@ -0,0 +1,37 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + name: test-service +spec: + label: test-service + playbook: test.yml +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneNodeSet +metadata: + name: edpm-extramounts +spec: + preProvisioned: true + services: + - test-service + nodes: {} + nodeTemplate: + ansibleSSHPrivateKeySecret: dataplane-ansible-ssh-private-key-secret + extraMounts: + - extraVolType: edpm-ansible + mounts: + - mountPath: /usr/share/ansible/collections/ansible_collections/osp/edpm + name: edpm-ansible + volumes: + - name: edpm-ansible + persistentVolumeClaim: + claimName: edpm-ansible + readOnly: true +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneDeployment +metadata: + name: edpm-extramounts +spec: + nodeSets: + - edpm-extramounts diff --git a/tests/kuttl/tests/dataplane-multinode-nodeset-create-test/00-assert.yaml b/tests/kuttl/tests/dataplane-multinode-nodeset-create-test/00-assert.yaml new file mode 100644 index 000000000..e92511bf1 --- /dev/null +++ b/tests/kuttl/tests/dataplane-multinode-nodeset-create-test/00-assert.yaml @@ -0,0 +1,202 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneNodeSet +metadata: + name: openstack-edpm-multinode +status: + allHostnames: + edpm-compute-0: + ctlplane: edpm-compute-0.ctlplane.example.com + internalapi: edpm-compute-0.internalapi.example.com + storage: edpm-compute-0.storage.example.com + tenant: edpm-compute-0.tenant.example.com + edpm-compute-1: + ctlplane: edpm-compute-1.ctlplane.example.com + internalapi: edpm-compute-1.internalapi.example.com + storage: edpm-compute-1.storage.example.com + tenant: edpm-compute-1.tenant.example.com + allIPs: + edpm-compute-0: + ctlplane: 192.168.122.100 + internalapi: 172.17.0.100 + storage: 172.18.0.100 + tenant: 172.19.0.100 + edpm-compute-1: + ctlplane: 192.168.122.101 + internalapi: 172.17.0.101 + storage: 172.18.0.101 + tenant: 172.19.0.101 + observedGeneration: 1 + ctlplaneSearchDomain: ctlplane.example.com + conditions: + - message: Deployment not started + reason: Requested + status: "False" + type: Ready + - message: Deployment not started + reason: Requested + status: "False" + type: DeploymentReady + - message: Input data complete + reason: Ready + status: "True" + type: InputReady + - message: NodeSetDNSDataReady ready + reason: Ready + status: "True" + type: NodeSetDNSDataReady + - message: NodeSetIPReservationReady ready + reason: Ready + status: "True" + type: NodeSetIPReservationReady + - message: ServiceAccount created + reason: Ready + status: "True" + type: ServiceAccountReady + - message: Setup complete + reason: Ready + status: "True" + type: SetupReady +--- +apiVersion: network.openstack.org/v1beta1 +kind: IPSet +metadata: + name: edpm-compute-0 +status: + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Input data complete + reason: Ready + status: "True" + type: InputReady + - message: Reservation successful + reason: Ready + status: "True" + type: ReservationReady + reservations: + - address: 192.168.122.100 + cidr: 192.168.122.0/24 + dnsDomain: ctlplane.example.com + gateway: 192.168.122.1 + mtu: 1500 + network: ctlplane + routes: + - destination: 0.0.0.0/0 + nexthop: 192.168.122.1 + subnet: subnet1 + - address: 172.17.0.100 + cidr: 172.17.0.0/24 + dnsDomain: internalapi.example.com + mtu: 1500 + network: internalapi + subnet: subnet1 + vlan: 20 + - address: 172.18.0.100 + cidr: 172.18.0.0/24 + dnsDomain: storage.example.com + mtu: 1500 + network: storage + subnet: subnet1 + vlan: 21 + - address: 172.19.0.100 + cidr: 172.19.0.0/24 + dnsDomain: tenant.example.com + mtu: 1500 + network: tenant + subnet: subnet1 + vlan: 22 +--- +apiVersion: network.openstack.org/v1beta1 +kind: IPSet +metadata: + name: edpm-compute-1 +status: + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Input data complete + reason: Ready + status: "True" + type: InputReady + - message: Reservation successful + reason: Ready + status: "True" + type: ReservationReady + reservations: + - address: 192.168.122.101 + cidr: 192.168.122.0/24 + dnsDomain: ctlplane.example.com + gateway: 192.168.122.1 + mtu: 1500 + network: ctlplane + routes: + - destination: 0.0.0.0/0 + nexthop: 192.168.122.1 + subnet: subnet1 + - address: 172.17.0.101 + cidr: 172.17.0.0/24 + dnsDomain: internalapi.example.com + mtu: 1500 + network: internalapi + subnet: subnet1 + vlan: 20 + - address: 172.18.0.101 + cidr: 172.18.0.0/24 + dnsDomain: storage.example.com + mtu: 1500 + network: storage + subnet: subnet1 + vlan: 21 + - address: 172.19.0.101 + cidr: 172.19.0.0/24 + dnsDomain: tenant.example.com + mtu: 1500 + network: tenant + subnet: subnet1 + vlan: 22 +--- +apiVersion: network.openstack.org/v1beta1 +kind: DNSData +metadata: + name: openstack-edpm-multinode +spec: + dnsDataLabelSelectorValue: dnsdata + hosts: + - hostnames: + - edpm-compute-0.ctlplane.example.com + ip: 192.168.122.100 + - hostnames: + - edpm-compute-0.internalapi.example.com + ip: 172.17.0.100 + - hostnames: + - edpm-compute-0.storage.example.com + ip: 172.18.0.100 + - hostnames: + - edpm-compute-0.tenant.example.com + ip: 172.19.0.100 + - hostnames: + - edpm-compute-1.ctlplane.example.com + ip: 192.168.122.101 + - hostnames: + - edpm-compute-1.internalapi.example.com + ip: 172.17.0.101 + - hostnames: + - edpm-compute-1.storage.example.com + ip: 172.18.0.101 + - hostnames: + - edpm-compute-1.tenant.example.com + ip: 172.19.0.101 +status: + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Input data complete + reason: Ready + status: "True" + type: ServiceConfigReady diff --git a/tests/kuttl/tests/dataplane-multinode-nodeset-create-test/00-dataplane-create.yaml b/tests/kuttl/tests/dataplane-multinode-nodeset-create-test/00-dataplane-create.yaml new file mode 100644 index 000000000..342338504 --- /dev/null +++ b/tests/kuttl/tests/dataplane-multinode-nodeset-create-test/00-dataplane-create.yaml @@ -0,0 +1,118 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: network-config-template +data: + network_config_template: | + --- + {% set mtu_list = [ctlplane_mtu] %} + {% for network in nodeset_networks %} + {{ mtu_list.append(lookup('vars', networks_lower[network] ~ '_mtu')) }} + {%- endfor %} + {% set min_viable_mtu = mtu_list | max %} + network_config: + - type: ovs_bridge + name: {{ neutron_physical_bridge_name }} + mtu: {{ min_viable_mtu }} + use_dhcp: false + dns_servers: {{ ctlplane_dns_nameservers }} + domain: {{ dns_search_domains }} + addresses: + - ip_netmask: {{ ctlplane_ip }}/{{ ctlplane_cidr }} + routes: {{ ctlplane_host_routes }} + members: + - type: interface + name: nic1 + mtu: {{ min_viable_mtu }} + # force the MAC address of the bridge to this interface + primary: true + {% for network in nodeset_networks %} + - type: vlan + mtu: {{ lookup('vars', networks_lower[network] ~ '_mtu') }} + vlan_id: {{ lookup('vars', networks_lower[network] ~ '_vlan_id') }} + addresses: + - ip_netmask: + {{ lookup('vars', networks_lower[network] ~ '_ip') }}/{{ lookup('vars', networks_lower[network] ~ '_cidr') }} + routes: {{ lookup('vars', networks_lower[network] ~ '_host_routes') }} + {% endfor %} +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneNodeSet +metadata: + name: openstack-edpm-multinode +spec: + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + services: + - download-cache + - bootstrap + - configure-network + - validate-network + - install-os + - configure-os + - run-os + - install-certs + - ovn + - neutron-metadata + - libvirt + - nova + preProvisioned: true + nodes: + edpm-compute-1: + hostName: edpm-compute-1 + networks: + - name: ctlplane + subnetName: subnet1 + defaultRoute: true + fixedIP: 192.168.122.101 + - name: internalapi + subnetName: subnet1 + fixedIP: 172.17.0.101 + - name: storage + subnetName: subnet1 + fixedIP: 172.18.0.101 + - name: tenant + subnetName: subnet1 + fixedIP: 172.19.0.101 + edpm-compute-0: + hostName: edpm-compute-0 + networks: + - name: ctlplane + subnetName: subnet1 + defaultRoute: true + fixedIP: 192.168.122.100 + - name: internalapi + subnetName: subnet1 + fixedIP: 172.17.0.100 + - name: storage + subnetName: subnet1 + fixedIP: 172.18.0.100 + - name: tenant + subnetName: subnet1 + fixedIP: 172.19.0.100 + nodeTemplate: + ansibleSSHPrivateKeySecret: dataplane-ansible-ssh-private-key-secret + ansible: + ansibleUser: cloud-admin + ansiblePort: 22 + ansibleVarsFrom: + - prefix: edpm_ + configMapRef: + name: network-config-template + ansibleVars: + timesync_ntp_servers: + - hostname: clock.redhat.com + # edpm_network_config + # Default nic config template for a EDPM compute node + # These vars are edpm_network_config role vars + edpm_network_config_hide_sensitive_logs: false + edpm_nodes_validation_validate_controllers_icmp: false + edpm_nodes_validation_validate_gateway_icmp: false + gather_facts: false + enable_debug: false + # edpm firewall, change the allowed CIDR if needed + edpm_sshd_configure_firewall: true + edpm_sshd_allowed_ranges: ['192.168.122.0/24'] + # SELinux module + edpm_selinux_mode: enforcing diff --git a/tests/kuttl/tests/dataplane-service-config/00-assert.yaml b/tests/kuttl/tests/dataplane-service-config/00-assert.yaml new file mode 100644 index 000000000..7301ab83c --- /dev/null +++ b/tests/kuttl/tests/dataplane-service-config/00-assert.yaml @@ -0,0 +1,126 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +collectors: +- type: command + command: oc logs -n openstack-operators -l openstack.org/operator-name=openstack + name: operator-logs +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + name: kuttl-service-edpm-compute-no-nodes-edpm-compute-no-nodes + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: edpm-compute-no-nodes +spec: + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + backoffLimit: 6 + envConfigMapName: openstack-aee-default-env + extraMounts: + - mounts: + - mountPath: /var/lib/openstack/configs/kuttl-service/00-ansibleVars + subPath: 00-ansibleVars + - mountPath: /var/lib/openstack/configs/kuttl-service/00-kuttl-service.conf + subPath: 00-kuttl-service.conf + - mountPath: /var/lib/openstack/configs/kuttl-service/01-kuttl-service.conf + subPath: 01-kuttl-service.conf + volumes: + - configMap: + items: + - key: 00-ansibleVars + path: 00-ansibleVars + name: kuttl-service-cm-0 + - configMap: + items: + - key: 00-kuttl-service.conf + path: 00-kuttl-service.conf + name: kuttl-service-cm-0 + - configMap: + items: + - key: 01-kuttl-service.conf + path: 01-kuttl-service.conf + name: kuttl-service-cm-0 + - mounts: + - mountPath: /var/lib/openstack/configs/kuttl-service/01-ansibleVars + subPath: 01-ansibleVars + - mountPath: /var/lib/openstack/configs/kuttl-service/10-kuttl-service.conf + subPath: 10-kuttl-service.conf + - mountPath: /var/lib/openstack/configs/kuttl-service/20-kuttl-service.conf + subPath: 20-kuttl-service.conf + volumes: + - configMap: + items: + - key: 01-ansibleVars + path: 01-ansibleVars + name: kuttl-service-cm-1 + - configMap: + items: + - key: 10-kuttl-service.conf + path: 10-kuttl-service.conf + name: kuttl-service-cm-1 + - configMap: + items: + - key: 20-kuttl-service.conf + path: 20-kuttl-service.conf + name: kuttl-service-cm-1 + - mounts: + - mountPath: /var/lib/openstack/configs/kuttl-service/30-kuttl-service.conf + subPath: 30-kuttl-service.conf + volumes: + - configMap: + items: + - key: 30-kuttl-service.conf + path: 30-kuttl-service.conf + name: kuttl-service-cm-2 + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-no-nodes + name: openstackansibleee + play: | + - hosts: localhost + gather_facts: no + name: kuttl play + tasks: + - name: Sleep + command: sleep infinity + delegate_to: localhost + preserveJobs: true + restartPolicy: Never + uid: 1001 +status: + JobStatus: Running + conditions: + - message: Job in progress + reason: Requested + severity: Info + status: "False" + type: Ready + - message: Job in progress + reason: Requested + severity: Info + status: "False" + type: JobReady diff --git a/tests/kuttl/tests/dataplane-service-config/00-create.yaml b/tests/kuttl/tests/dataplane-service-config/00-create.yaml new file mode 100644 index 000000000..5d97149c5 --- /dev/null +++ b/tests/kuttl/tests/dataplane-service-config/00-create.yaml @@ -0,0 +1,76 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: kuttl-service-cm-0 +data: + 00-kuttl-service.conf: | + a=b + c=d + 01-kuttl-service.conf: | + e=f + g=h + 00-ansibleVars: | + foo: bar + baz: blippy +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: kuttl-service-cm-1 +data: + 10-kuttl-service.conf: | + i=j + 20-kuttl-service.conf: | + k=l + 01-ansibleVars: | + baz: blippy + zed: zod +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: kuttl-service-cm-2 +binaryData: + 30-kuttl-service.conf: Cg== +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + name: kuttl-service +spec: + playbookContents: | + - hosts: localhost + gather_facts: no + name: kuttl play + tasks: + - name: Sleep + command: sleep infinity + delegate_to: localhost + configMaps: + - kuttl-service-cm-0 + - kuttl-service-cm-1 + - kuttl-service-cm-2 +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneNodeSet +metadata: + name: edpm-compute-no-nodes +spec: + preProvisioned: true + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + nodes: {} + nodeTemplate: + ansibleSSHPrivateKeySecret: dataplane-ansible-ssh-private-key-secret + services: + - kuttl-service +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneDeployment +metadata: + name: edpm-compute-no-nodes +spec: + nodeSets: + - edpm-compute-no-nodes diff --git a/tests/kuttl/tests/dataplane-service-custom-image/00-assert.yaml b/tests/kuttl/tests/dataplane-service-custom-image/00-assert.yaml new file mode 100644 index 000000000..cb77b1109 --- /dev/null +++ b/tests/kuttl/tests/dataplane-service-custom-image/00-assert.yaml @@ -0,0 +1,107 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +collectors: +- type: command + command: oc logs -n openstack-operators -l openstack.org/operator-name=openstack + name: operator-logs +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneNodeSet +metadata: + name: edpm-no-nodes-custom-svc +spec: + preProvisioned: true + services: + - custom-img-svc + nodes: {} + nodeTemplate: + ansible: + ansibleUser: cloud-admin + ansiblePort: 22 + ansibleSSHPrivateKeySecret: dataplane-ansible-ssh-private-key-secret +status: + observedGeneration: 1 + conditions: + - message: Deployment in progress + reason: Requested + severity: Info + status: "False" + type: Ready + - message: Deployment in progress + reason: Requested + severity: Info + status: "False" + type: DeploymentReady + - message: Input data complete + reason: Ready + status: "True" + type: InputReady + - message: NodeSetDNSDataReady ready + reason: Ready + status: "True" + type: NodeSetDNSDataReady + - message: NodeSetIPReservationReady ready + reason: Ready + status: "True" + type: NodeSetIPReservationReady + - message: ServiceAccount created + reason: Ready + status: "True" + type: ServiceAccountReady + - message: Setup complete + reason: Ready + status: "True" + type: SetupReady +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + name: custom-img-svc-edpm-compute-no-nodes-edpm-no-nodes-custom-svc + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: edpm-compute-no-nodes +spec: + backoffLimit: 6 + extraMounts: + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-no-nodes-custom-svc + image: example.com/repo/runner-image:latest + name: openstackansibleee + restartPolicy: Never + uid: 1001 +status: + JobStatus: Running + conditions: + - message: Job in progress + reason: Requested + severity: Info + status: "False" + type: Ready + - message: Job in progress + reason: Requested + severity: Info + status: "False" + type: JobReady diff --git a/tests/kuttl/tests/dataplane-service-custom-image/00-dataplane-create.yaml b/tests/kuttl/tests/dataplane-service-custom-image/00-dataplane-create.yaml new file mode 100644 index 000000000..358f0c4c9 --- /dev/null +++ b/tests/kuttl/tests/dataplane-service-custom-image/00-dataplane-create.yaml @@ -0,0 +1,37 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + name: custom-img-svc +spec: + openStackAnsibleEERunnerImage: example.com/repo/runner-image:latest + role: + name: "test role" + hosts: "all" + strategy: "linear" + tasks: + - name: "test task" + import_role: + name: "test role" +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneNodeSet +metadata: + name: edpm-no-nodes-custom-svc +spec: + preProvisioned: true + services: + - custom-img-svc + nodes: {} + nodeTemplate: + ansible: + ansibleUser: cloud-admin + ansiblePort: 22 + ansibleSSHPrivateKeySecret: dataplane-ansible-ssh-private-key-secret +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneDeployment +metadata: + name: edpm-compute-no-nodes +spec: + nodeSets: + - edpm-no-nodes-custom-svc diff --git a/tests/kuttl/tests/dataplane-service-failure/00-assert.yaml b/tests/kuttl/tests/dataplane-service-failure/00-assert.yaml new file mode 100644 index 000000000..c361748b3 --- /dev/null +++ b/tests/kuttl/tests/dataplane-service-failure/00-assert.yaml @@ -0,0 +1,202 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +collectors: +- type: command + command: oc logs -n openstack-operators -l openstack.org/operator-name=openstack + name: operator-logs +--- +apiVersion: ansibleee.openstack.org/v1beta1 +kind: OpenStackAnsibleEE +metadata: + generation: 1 + labels: + openstackdataplanedeployment: edpm-compute-no-nodes + openstackdataplanenodeset: edpm-compute-no-nodes + openstackdataplaneservice: failed-service + name: failed-service-edpm-compute-no-nodes-edpm-compute-no-nodes + namespace: openstack-kuttl-tests + ownerReferences: + - apiVersion: dataplane.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: OpenStackDataPlaneDeployment + name: edpm-compute-no-nodes +spec: + backoffLimit: 3 + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + envConfigMapName: openstack-aee-default-env + extraMounts: + - mounts: + - mountPath: /runner/env/ssh_key + name: ssh-key + subPath: ssh_key + - mountPath: /runner/inventory/hosts + name: inventory + subPath: inventory + volumes: + - name: ssh-key + secret: + items: + - key: ssh-privatekey + path: ssh_key + secretName: dataplane-ansible-ssh-private-key-secret + - name: inventory + secret: + items: + - key: inventory + path: inventory + secretName: dataplanenodeset-edpm-compute-no-nodes + extraVars: + edpm_override_hosts: edpm-compute-no-nodes + edpm_service_type: failed-service + name: openstackansibleee + play: | + - hosts: localhost + gather_facts: no + name: kuttl play + tasks: + - name: Copy absent file + ansible.builtin.shell: | + set -euxo pipefail + cp absent failed_op + preserveJobs: true + restartPolicy: Never + serviceAccountName: edpm-compute-no-nodes + uid: 1001 +status: + JobStatus: Failed + conditions: + - message: 'Job error occurred Internal error occurred: Job has reached the specified + backoff limit. Check job logs' + reason: BackoffLimitExceeded + severity: Error + status: "False" + type: Ready + - message: 'Job error occurred Internal error occurred: Job has reached the specified + backoff limit. Check job logs' + reason: BackoffLimitExceeded + severity: Error + status: "False" + type: JobReady + observedGeneration: 1 +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneNodeSet +metadata: + generation: 1 + name: edpm-compute-no-nodes + namespace: openstack-kuttl-tests +spec: + nodeTemplate: + ansibleSSHPrivateKeySecret: dataplane-ansible-ssh-private-key-secret + managementNetwork: ctlplane + nodes: {} + preProvisioned: true + services: + - failed-service + tlsEnabled: true +status: + conditions: + - message: 'Deployment error occurred in failed-service service error backoff limit + reached for execution.name failed-service-edpm-compute-no-nodes-edpm-compute-no-nodes + execution.namespace openstack-kuttl-tests execution.status.jobstatus: Failed' + reason: Error + severity: Error + status: "False" + type: Ready + - message: 'Deployment error occurred in failed-service service error backoff limit + reached for execution.name failed-service-edpm-compute-no-nodes-edpm-compute-no-nodes + execution.namespace openstack-kuttl-tests execution.status.jobstatus: Failed' + reason: Error + severity: Error + status: "False" + type: DeploymentReady + - message: Input data complete + reason: Ready + status: "True" + type: InputReady + - message: NodeSetDNSDataReady ready + reason: Ready + status: "True" + type: NodeSetDNSDataReady + - message: NodeSetIPReservationReady ready + reason: Ready + status: "True" + type: NodeSetIPReservationReady + - message: ServiceAccount created + reason: Ready + status: "True" + type: ServiceAccountReady + - message: Setup complete + reason: Ready + status: "True" + type: SetupReady + deploymentStatuses: + edpm-compute-no-nodes: + - message: 'Deployment error occurred in failed-service service error backoff + limit reached for execution.name failed-service-edpm-compute-no-nodes-edpm-compute-no-nodes + execution.namespace openstack-kuttl-tests execution.status.jobstatus: Failed' + reason: BackoffLimitExceeded + severity: Error + status: "False" + type: NodeSetDeploymentReady + - message: 'Deployment error occurred in failed-service service error backoff + limit reached for execution.name failed-service-edpm-compute-no-nodes-edpm-compute-no-nodes + execution.namespace openstack-kuttl-tests execution.status.jobstatus: Failed' + reason: BackoffLimitExceeded + severity: Error + status: "False" + type: ServiceFailedServiceDeploymentReady + observedGeneration: 1 +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneDeployment +metadata: + generation: 1 + name: edpm-compute-no-nodes + namespace: openstack-kuttl-tests +spec: + backoffLimit: 3 + deploymentRequeueTime: 15 + nodeSets: + - edpm-compute-no-nodes +status: + conditions: + - message: 'Deployment error occurred nodeSet: edpm-compute-no-nodes error: backoff + limit reached for execution.name failed-service-edpm-compute-no-nodes-edpm-compute-no-nodes + execution.namespace openstack-kuttl-tests execution.status.jobstatus: Failed' + reason: BackoffLimitExceeded + severity: Error + status: "False" + type: Ready + - message: 'Deployment error occurred nodeSet: edpm-compute-no-nodes error: backoff + limit reached for execution.name failed-service-edpm-compute-no-nodes-edpm-compute-no-nodes + execution.namespace openstack-kuttl-tests execution.status.jobstatus: Failed' + reason: BackoffLimitExceeded + severity: Error + status: "False" + type: DeploymentReady + - message: Input data complete + reason: Ready + status: "True" + type: InputReady + nodeSetConditions: + edpm-compute-no-nodes: + - message: 'Deployment error occurred in failed-service service error backoff + limit reached for execution.name failed-service-edpm-compute-no-nodes-edpm-compute-no-nodes + execution.namespace openstack-kuttl-tests execution.status.jobstatus: Failed' + reason: BackoffLimitExceeded + severity: Error + status: "False" + type: NodeSetDeploymentReady + - message: 'Deployment error occurred in failed-service service error backoff + limit reached for execution.name failed-service-edpm-compute-no-nodes-edpm-compute-no-nodes + execution.namespace openstack-kuttl-tests execution.status.jobstatus: Failed' + reason: BackoffLimitExceeded + severity: Error + status: "False" + type: ServiceFailedServiceDeploymentReady + observedGeneration: 1 diff --git a/tests/kuttl/tests/dataplane-service-failure/00-create.yaml b/tests/kuttl/tests/dataplane-service-failure/00-create.yaml new file mode 100644 index 000000000..e5c6e0cf4 --- /dev/null +++ b/tests/kuttl/tests/dataplane-service-failure/00-create.yaml @@ -0,0 +1,38 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneService +metadata: + name: failed-service +spec: + playbookContents: | + - hosts: localhost + gather_facts: no + name: kuttl play + tasks: + - name: Copy absent file + ansible.builtin.shell: | + set -euxo pipefail + cp absent failed_op +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneNodeSet +metadata: + name: edpm-compute-no-nodes +spec: + preProvisioned: true + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + nodes: {} + nodeTemplate: + ansibleSSHPrivateKeySecret: dataplane-ansible-ssh-private-key-secret + services: + - failed-service +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneDeployment +metadata: + name: edpm-compute-no-nodes +spec: + backoffLimit: 3 + nodeSets: + - edpm-compute-no-nodes diff --git a/zuul.d/jobs.yaml b/zuul.d/jobs.yaml new file mode 100644 index 000000000..ca3e0fd63 --- /dev/null +++ b/zuul.d/jobs.yaml @@ -0,0 +1,57 @@ +--- +- job: + name: openstack-operator-docs-preview + parent: cifmw-doc + vars: + asciidoc_available: true + # This var is specific to ci-framework docs directory using sphinx + doc_available: false + +- job: + name: openstack-operator-tempest-multinode + parent: podified-multinode-edpm-deployment-crc-3comp + dependencies: ["openstack-k8s-operators-content-provider"] + vars: + cifmw_operator_build_golang_ct: "docker.io/library/golang:1.20" + cifmw_operator_build_golang_alt_ct: "quay.rdoproject.org/openstack-k8s-operators/golang:1.20" + cifmw_run_test_role: tempest + cifmw_tempest_tempestconf_profile: + overrides: + compute-feature-enabled.vnc_console: true + compute-feature-enabled.stable_rescue: true + compute_feature_enabled.hostname_fqdn_sanitization: true + # NOTE(alee) these tests will fail with barbican in the mix + # while cinder/nova is not configured to talk to barbican + # re-enable this when that support is added + compute-feature-enabled.attach_encrypted_volume: false + compute-feature-enabled.live_migration: true + compute-feature-enabled.block_migration_for_live_migration: true + # NOTE(gibi): This is a WA to force the publicURL as otherwise + # tempest gets configured with adminURL and that causes test + # instability. + identity.v3_endpoint_type: public + cifmw_tempest_tests_allowed: + # NOTE(gibi): enable only the high level scenario tests to keep the + # job run time reasonable + - tempest.scenario + # Plus an extra live migration test until we have cinder volumes / ceph + # to run the live migration scenario tests with it + - tempest.api.compute.admin.test_live_migration.LiveAutoBlockMigrationV225Test + cifmw_tempest_tests_skipped: + # NOTE(gibi): there are no cinder backend enabled so test needing a + # volumes needs to be skipped + - tempest.scenario.test_minimum_basic.TestMinimumBasicScenario + - test_shelve_volume_backed_instance + - tempest.scenario.test_stamp_pattern.TestStampPattern + - tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern + - tempest.scenario.test_network_advanced_server_ops.TestNetworkAdvancedServerOps.test_server_connectivity_live_migration + - tempest.scenario.test_server_volume_attachment.TestServerVolumeAttachScenarioOldVersion + - tempest.scenario.test_server_volume_attachment.TestServerVolumeAttachmentScenario + - tempest.scenario.test_instances_with_cinder_volumes.TestInstancesWithCinderVolumes + - tempest.api.compute.admin.test_live_migration.LiveAutoBlockMigrationV225Test.test_live_block_migration_with_attached_volume + # We need to use a custom cpu model to allow live migrating between + # slightly different computes coming from the node pool + cifmw_edpm_deploy_nova_compute_extra_config: | + [libvirt] + cpu_mode = custom + cpu_models = Nehalem diff --git a/zuul.d/projects.yaml b/zuul.d/projects.yaml index a947fee3c..723748f0f 100644 --- a/zuul.d/projects.yaml +++ b/zuul.d/projects.yaml @@ -1,53 +1,4 @@ --- -- job: - name: openstack-operator-tempest-multinode - parent: podified-multinode-edpm-deployment-crc-3comp - dependencies: ["openstack-k8s-operators-content-provider"] - vars: - cifmw_operator_build_golang_ct: "docker.io/library/golang:1.20" - cifmw_operator_build_golang_alt_ct: "quay.rdoproject.org/openstack-k8s-operators/golang:1.20" - cifmw_run_test_role: tempest - cifmw_tempest_tempestconf_profile: - overrides: - compute-feature-enabled.vnc_console: true - compute-feature-enabled.stable_rescue: true - compute_feature_enabled.hostname_fqdn_sanitization: true - # NOTE(alee) these tests will fail with barbican in the mix - # while cinder/nova is not configured to talk to barbican - # re-enable this when that support is added - compute-feature-enabled.attach_encrypted_volume: false - compute-feature-enabled.live_migration: true - compute-feature-enabled.block_migration_for_live_migration: true - # NOTE(gibi): This is a WA to force the publicURL as otherwise - # tempest gets configured with adminURL and that causes test - # instability. - identity.v3_endpoint_type: public - cifmw_tempest_tests_allowed: - # NOTE(gibi): enable only the high level scenario tests to keep the - # job run time reasonable - - tempest.scenario - # Plus an extra live migration test until we have cinder volumes / ceph - # to run the live migration scenario tests with it - - tempest.api.compute.admin.test_live_migration.LiveAutoBlockMigrationV225Test - cifmw_tempest_tests_skipped: - # NOTE(gibi): there are no cinder backend enabled so test needing a - # volumes needs to be skipped - - tempest.scenario.test_minimum_basic.TestMinimumBasicScenario - - test_shelve_volume_backed_instance - - tempest.scenario.test_stamp_pattern.TestStampPattern - - tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern - - tempest.scenario.test_network_advanced_server_ops.TestNetworkAdvancedServerOps.test_server_connectivity_live_migration - - tempest.scenario.test_server_volume_attachment.TestServerVolumeAttachScenarioOldVersion - - tempest.scenario.test_server_volume_attachment.TestServerVolumeAttachmentScenario - - tempest.scenario.test_instances_with_cinder_volumes.TestInstancesWithCinderVolumes - - tempest.api.compute.admin.test_live_migration.LiveAutoBlockMigrationV225Test.test_live_block_migration_with_attached_volume - # We need to use a custom cpu model to allow live migrating between - # slightly different computes coming from the node pool - cifmw_edpm_deploy_nova_compute_extra_config: | - [libvirt] - cpu_mode = custom - cpu_models = Nehalem - - project: name: openstack-k8s-operators/openstack-operator templates: @@ -56,3 +7,4 @@ github-check: jobs: - openstack-operator-tempest-multinode + - openstack-operator-docs-preview