diff --git a/.golangci.yml b/.golangci.yml index a0a78372e..5d5573720 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -5,6 +5,7 @@ run: - external/ - controllers/virtualmachineservice/v1alpha1/utils/ - controllers/virtualmachineservice/v1alpha2/utils/ + - pkg/util/cloudinit/schema/ # override defaults linters-settings: diff --git a/pkg/vmprovider/providers/vsphere2/cloudinit/cloudconfig.go b/pkg/util/cloudinit/cloudconfig.go similarity index 91% rename from pkg/vmprovider/providers/vsphere2/cloudinit/cloudconfig.go rename to pkg/util/cloudinit/cloudconfig.go index ad34962f3..ac06f6c93 100644 --- a/pkg/vmprovider/providers/vsphere2/cloudinit/cloudconfig.go +++ b/pkg/util/cloudinit/cloudconfig.go @@ -6,13 +6,15 @@ package cloudinit import ( "bytes" "context" - "fmt" + "strconv" "gopkg.in/yaml.v3" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" "github.com/vmware-tanzu/vm-operator/api/v1alpha2/cloudinit" "github.com/vmware-tanzu/vm-operator/api/v1alpha2/common" + + "github.com/vmware-tanzu/vm-operator/pkg/util/cloudinit/validate" ) // cloudConfig provides support for marshalling the object to a valid @@ -45,7 +47,7 @@ type user struct { Groups []string `json:"groups,omitempty" yaml:"groups,omitempty"` HashedPasswd string `json:"hashed_passwd,omitempty" yaml:"hashed_passwd,omitempty"` Homedir *string `json:"homedir,omitempty" yaml:"homedir,omitempty"` - Inactive *int32 `json:"inactive,omitempty" yaml:"inactive,omitempty"` + Inactive *string `json:"inactive,omitempty" yaml:"inactive,omitempty"` LockPasswd *bool `json:"lock_passwd,omitempty" yaml:"lock_passwd,omitempty"` Name string `json:"name" yaml:"name"` NoCreateHome *bool `json:"no_create_home,omitempty" yaml:"no_create_home,omitempty"` @@ -76,8 +78,9 @@ type writeFile struct { const emptyYAMLObject = "{}\n" +// MarshalYAML marshals the provided CloudConfig and secret data to a valid, +// YAML CloudConfig document. func MarshalYAML( - ctx context.Context, in cloudinit.CloudConfig, secret CloudConfigSecretData) (string, error) { @@ -119,11 +122,24 @@ func MarshalYAML( if l := len(in.WriteFiles); l > 0 { out.WriteFiles = make([]writeFile, l) + for i := range in.WriteFiles { + + // If the content was not derived from a secret, then get it as + // a string from the Content field. + content := secret.WriteFiles[in.WriteFiles[i].Path] + if content == "" { + if err := yaml.Unmarshal( + in.WriteFiles[i].Content, &content); err != nil { + + return "", err + } + } + copyWriteFile( in.WriteFiles[i], &out.WriteFiles[i], - secret.WriteFiles[in.WriteFiles[i].Path]) + content) } } @@ -139,6 +155,11 @@ func MarshalYAML( return "", nil } + // Validate the produced CloudConfig YAML using the CloudConfig schema. + if err := validate.CloudConfigYAML(data); err != nil { + return "", err + } + return data, nil } @@ -158,7 +179,12 @@ func copyUser( out.HashedPasswd = secret.HashPasswd out.Homedir = in.Homedir - out.Inactive = in.Inactive + + if v := in.Inactive; v != nil { + s := strconv.Itoa(int(*v)) + out.Inactive = &s + } + out.LockPasswd = in.LockPasswd out.Name = in.Name out.NoCreateHome = in.NoCreateHome @@ -216,10 +242,8 @@ func (ccu *cloudConfigUsers) MarshalYAML() (any, error) { func (ccu cloudConfigRunCmd) MarshalYAML() (any, error) { if ccu.singleString != "" { - fmt.Printf("singleStringVal=%s\n", ccu.singleString) return ccu.singleString, nil } else if len(ccu.listOfStrings) > 0 { - fmt.Printf("listOfStrings=%v\n", ccu.listOfStrings) return ccu.listOfStrings, nil } return nil, nil diff --git a/pkg/vmprovider/providers/vsphere2/cloudinit/cloudconfig_secret.go b/pkg/util/cloudinit/cloudconfig_secret.go similarity index 100% rename from pkg/vmprovider/providers/vsphere2/cloudinit/cloudconfig_secret.go rename to pkg/util/cloudinit/cloudconfig_secret.go diff --git a/pkg/vmprovider/providers/vsphere2/cloudinit/cloudconfig_secret_test.go b/pkg/util/cloudinit/cloudconfig_secret_test.go similarity index 99% rename from pkg/vmprovider/providers/vsphere2/cloudinit/cloudconfig_secret_test.go rename to pkg/util/cloudinit/cloudconfig_secret_test.go index dae10f025..d04503906 100644 --- a/pkg/vmprovider/providers/vsphere2/cloudinit/cloudconfig_secret_test.go +++ b/pkg/util/cloudinit/cloudconfig_secret_test.go @@ -16,7 +16,7 @@ import ( vmopv1cloudinit "github.com/vmware-tanzu/vm-operator/api/v1alpha2/cloudinit" "github.com/vmware-tanzu/vm-operator/api/v1alpha2/common" - "github.com/vmware-tanzu/vm-operator/pkg/vmprovider/providers/vsphere2/cloudinit" + "github.com/vmware-tanzu/vm-operator/pkg/util/cloudinit" ) var _ = Describe("CloudConfig GetCloudConfigSecretData", func() { diff --git a/pkg/vmprovider/providers/vsphere2/cloudinit/cloudconfig_test.go b/pkg/util/cloudinit/cloudconfig_test.go similarity index 64% rename from pkg/vmprovider/providers/vsphere2/cloudinit/cloudconfig_test.go rename to pkg/util/cloudinit/cloudconfig_test.go index 30112786c..2302c83fe 100644 --- a/pkg/vmprovider/providers/vsphere2/cloudinit/cloudconfig_test.go +++ b/pkg/util/cloudinit/cloudconfig_test.go @@ -16,14 +16,13 @@ import ( vmopv1cloudinit "github.com/vmware-tanzu/vm-operator/api/v1alpha2/cloudinit" "github.com/vmware-tanzu/vm-operator/api/v1alpha2/common" - "github.com/vmware-tanzu/vm-operator/pkg/vmprovider/providers/vsphere2/cloudinit" + "github.com/vmware-tanzu/vm-operator/pkg/util/cloudinit" ) var _ = Describe("CloudConfig MarshalYAML", func() { var ( err error data string - ctx context.Context cloudConfig vmopv1cloudinit.CloudConfig cloudConfigSecretData cloudinit.CloudConfigSecretData ) @@ -31,13 +30,12 @@ var _ = Describe("CloudConfig MarshalYAML", func() { BeforeEach(func() { err = nil data = "" - ctx = context.Background() cloudConfig = vmopv1cloudinit.CloudConfig{} cloudConfigSecretData = cloudinit.CloudConfigSecretData{} }) JustBeforeEach(func() { - data, err = cloudinit.MarshalYAML(ctx, cloudConfig, cloudConfigSecretData) + data, err = cloudinit.MarshalYAML(cloudConfig, cloudConfigSecretData) }) When("CloudConfig and CloudConfigSecretData are both empty", func() { @@ -159,6 +157,118 @@ var _ = Describe("CloudConfig MarshalYAML", func() { }) }) + When("CloudConfig has all possible fields set", func() { + BeforeEach(func() { + cloudConfig = vmopv1cloudinit.CloudConfig{ + Users: []vmopv1cloudinit.User{ + { + Name: "bob.wilson", + CreateGroups: addrOf(false), + ExpireDate: addrOf("9999-99-99"), + Gecos: addrOf("gecos"), + Groups: []string{"group1", "group2"}, + HashedPasswd: &common.SecretKeySelector{ + Name: "my-bootstrap-data", + Key: "cloud-init-user-bob.wilson-hashed_passwd", + }, + Homedir: addrOf("/home/bob.wilson"), + Inactive: addrOf(int32(1)), + LockPasswd: addrOf(false), + NoCreateHome: addrOf(false), + NoLogInit: addrOf(false), + PrimaryGroup: addrOf("group1"), + SELinuxUser: addrOf("bob.wilson"), + Shell: addrOf("/bin/bash"), + SnapUser: addrOf("bob.wilson"), + SSHAuthorizedKeys: []string{"key1", "key2"}, + SSHImportID: []string{"id1", "id2"}, + SSHRedirectUser: addrOf(false), + Sudo: addrOf("sudoyou?"), + System: addrOf(false), + UID: addrOf(int64(123)), + }, + { + Name: "rob.wilson", + CreateGroups: addrOf(true), + ExpireDate: addrOf("9999-99-99"), + Gecos: addrOf("gecos"), + Groups: []string{"group1", "group2"}, + Homedir: addrOf("/home/rob.wilson"), + Inactive: addrOf(int32(10)), + LockPasswd: addrOf(true), + NoCreateHome: addrOf(true), + NoLogInit: addrOf(true), + Passwd: &common.SecretKeySelector{ + Name: "my-bootstrap-data", + Key: "cloud-init-user-rob.wilson-passwd", + }, + PrimaryGroup: addrOf("group1"), + SELinuxUser: addrOf("rob.wilson"), + Shell: addrOf("/bin/bash"), + SnapUser: addrOf("rob.wilson"), + SSHAuthorizedKeys: []string{"key1", "key2"}, + SSHImportID: []string{"id1", "id2"}, + SSHRedirectUser: addrOf(true), + Sudo: addrOf("sudoyou?"), + System: addrOf(true), + UID: addrOf(int64(123)), + }, + }, + RunCmd: []json.RawMessage{ + []byte("ls /"), + []byte(`[ "ls", "-a", "-l", "/" ]`), + []byte("- echo\n- \"hello, world.\""), + }, + WriteFiles: []vmopv1cloudinit.WriteFile{ + { + Path: "/hello", + Content: []byte("world"), + Append: true, + Defer: true, + Encoding: vmopv1cloudinit.WriteFileEncodingTextPlain, + Owner: "bob.wilson:bob.wilson", + Permissions: "0644", + }, + { + Path: "/hi", + Content: []byte("name: \"my-bootstrap-data\"\nkey: \"/hi\""), + Append: false, + Defer: false, + Encoding: vmopv1cloudinit.WriteFileEncodingTextPlain, + Owner: "rob.wilson:rob.wilson", + Permissions: "0755", + }, + { + Path: "/doc", + Content: []byte("|\n a multi-line\n document"), + Append: true, + Defer: true, + Encoding: vmopv1cloudinit.WriteFileEncodingTextPlain, + Owner: "bob.wilson:bob.wilson", + Permissions: "0644", + }, + }, + } + cloudConfigSecretData = cloudinit.CloudConfigSecretData{ + Users: map[string]cloudinit.CloudConfigUserSecretData{ + "bob.wilson": { + HashPasswd: "0123456789", + }, + "rob.wilson": { + Passwd: "password", + }, + }, + WriteFiles: map[string]string{ + "/hi": "there", + }, + } + }) + It("Should return user data", func() { + Expect(err).ToNot(HaveOccurred()) + Expect(data).To(Equal(cloudConfigWithAllPossibleValues)) + }) + }) + }) var _ = Describe("CloudConfig GetSecretResources", func() { @@ -348,3 +458,90 @@ runcmd: - - echo - hello, world. ` + +const cloudConfigWithAllPossibleValues = `users: + - create_groups: false + expiredate: 9999-99-99 + gecos: gecos + groups: + - group1 + - group2 + hashed_passwd: "0123456789" + homedir: /home/bob.wilson + inactive: "1" + lock_passwd: false + name: bob.wilson + no_create_home: false + no_log_init: false + primary_group: group1 + selinux_user: bob.wilson + shell: /bin/bash + snapuser: bob.wilson + ssh_authorized_keys: + - key1 + - key2 + ssh_import_id: + - id1 + - id2 + ssh_redirect_user: false + sudo: sudoyou? + system: false + uid: 123 + - create_groups: true + expiredate: 9999-99-99 + gecos: gecos + groups: + - group1 + - group2 + homedir: /home/rob.wilson + inactive: "10" + lock_passwd: true + name: rob.wilson + no_create_home: true + no_log_init: true + passwd: password + primary_group: group1 + selinux_user: rob.wilson + shell: /bin/bash + snapuser: rob.wilson + ssh_authorized_keys: + - key1 + - key2 + ssh_import_id: + - id1 + - id2 + ssh_redirect_user: true + sudo: sudoyou? + system: true + uid: 123 +runcmd: + - ls / + - - ls + - -a + - -l + - / + - - echo + - hello, world. +write_files: + - append: true + content: world + defer: true + encoding: text/plain + owner: bob.wilson:bob.wilson + path: /hello + permissions: "0644" + - content: there + encoding: text/plain + owner: rob.wilson:rob.wilson + path: /hi + permissions: "0755" + - append: true + content: |- + a multi-line + document + defer: true + encoding: text/plain + owner: bob.wilson:bob.wilson + path: /doc + permissions: "0644" +` diff --git a/pkg/vmprovider/providers/vsphere2/cloudinit/cloudinit_suite_test.go b/pkg/util/cloudinit/cloudinit_suite_test.go similarity index 91% rename from pkg/vmprovider/providers/vsphere2/cloudinit/cloudinit_suite_test.go rename to pkg/util/cloudinit/cloudinit_suite_test.go index bac685cb7..17db07c0a 100644 --- a/pkg/vmprovider/providers/vsphere2/cloudinit/cloudinit_suite_test.go +++ b/pkg/util/cloudinit/cloudinit_suite_test.go @@ -20,3 +20,7 @@ func TestCloudInit(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "vSphere Provider Cloud-Init Suite") } + +func addrOf[T any](t T) *T { + return &t +} diff --git a/pkg/util/cloudinit/schema/Dockerfile.quicktype b/pkg/util/cloudinit/schema/Dockerfile.quicktype new file mode 100644 index 000000000..16bf4b00e --- /dev/null +++ b/pkg/util/cloudinit/schema/Dockerfile.quicktype @@ -0,0 +1,17 @@ +FROM node:20 AS build +WORKDIR /quicktype + +RUN npm install --prefix /quicktype quicktype + +FROM gcr.io/distroless/nodejs20-debian12 +COPY --from=build /quicktype /quicktype + +WORKDIR /output +CMD [ \ + "/quicktype/node_modules/quicktype/dist/index.js", \ + "--src", "/schema.json", \ + "--src-lang", "schema", \ + "--out", "/output/cloudconfig.go", \ + "--lang", "go", \ + "--package", "schema" \ +] diff --git a/pkg/util/cloudinit/schema/Makefile b/pkg/util/cloudinit/schema/Makefile new file mode 100644 index 000000000..7bccefb6d --- /dev/null +++ b/pkg/util/cloudinit/schema/Makefile @@ -0,0 +1,98 @@ +# Copyright (c) 2023 VMware, Inc. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +# If you update this file, please follow +# https://suva.sh/posts/well-documented-makefiles + +# Ensure Make is run with bash shell as some syntax below is bash-specific +SHELL := /usr/bin/env bash + +.DEFAULT_GOAL := help + +# Directories. +BIN_DIR := bin +NODE_PREFIX := $(abspath $(BIN_DIR)) + +# Binaries. +QUICKTYPE := $(NODE_PREFIX)/node_modules/.bin/quicktype + +# Schemas. +SCHEMA_CLOUD_CONFIG := schema-cloud-config-v1.json + +# Output. +CLOUD_CONFIG_GO := cloudconfig.go + +# Images. +QUICKTYPE_IMAGE_NAME := vm-op-quicktype +QUICKTYPE_IMAGE_VERSION := latest +QUICKTYPE_IMAGE ?= $(QUICKTYPE_IMAGE_NAME):$(QUICKTYPE_IMAGE_VERSION) + +# Select how to run quicktype. +ifeq (,$(shell command -v npm)) +QUICKTYPE_METHOD ?= docker +endif + + +## -------------------------------------- +## Help +## -------------------------------------- + +help: ## Display this help + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +## -------------------------------------- +## Image +## -------------------------------------- + +.PHONY: build-images-quicktype +build-images-quicktype: + docker build -t $(QUICKTYPE_IMAGE) -f Dockerfile.quicktype . + +.PHONY: build-images +build-images: ## Build the docker images + $(MAKE) build-images-quicktype + + +## -------------------------------------- +## Binaries +## -------------------------------------- + +quicktype: $(QUICKTYPE) ## Install quicktype +$(QUICKTYPE): + npm install --prefix $(NODE_PREFIX) quicktype + + +## -------------------------------------- +## Generate +## -------------------------------------- + +$(CLOUD_CONFIG_GO): $(SCHEMA_CLOUD_CONFIG) +ifeq (docker,$(QUICKTYPE_METHOD)) +$(CLOUD_CONFIG_GO): build-images-quicktype + docker run -it --rm \ + -v $$(pwd):/output \ + -v $$(pwd)/$(SCHEMA_CLOUD_CONFIG):/schema.json \ + $(QUICKTYPE_IMAGE) +else +$(CLOUD_CONFIG_GO): | $(QUICKTYPE) + $(QUICKTYPE) \ + --src $(SCHEMA_CLOUD_CONFIG) --src-lang schema \ + --out $@ --lang go --package schema +endif + +generate-go: ## Generate the go source code from the schemas + $(MAKE) $(CLOUD_CONFIG_GO) + + +## -------------------------------------- +## Cleanup +## -------------------------------------- + +.PHONY: clean +clean: ## Run all the clean targets + rm -f cloudconfig.go + +.PHONY: clobber +clobber: ## Remove all of the tooling as well + $(MAKE) clean + rm -fr $(BIN_DIR) diff --git a/pkg/util/cloudinit/schema/README.md b/pkg/util/cloudinit/schema/README.md new file mode 100644 index 000000000..e47d045b4 --- /dev/null +++ b/pkg/util/cloudinit/schema/README.md @@ -0,0 +1,15 @@ +# Cloud-Init schemas + +## Overview + +This directory contains schema files relates to Cloud-Init: + +* [`schema-cloud-config-v1.json`](./schema-cloud-config-v1.json) + + * **`Copied`** `2023/12/14` + * **`Source`** https://github.com/canonical/cloud-init/blob/main/cloudinit/config/schemas/schema-cloud-config-v1.json + * **`--help`** The Cloud-Init CloudConfig schema that may be used to validate user and vendor data + +## Generating the Go source code + +Run `make generate-go`. If the local system has `npm`, it is used to install `quicktype`, which is then used to generate `cloudconfig.go` from the schema. Otherwise a container image is built from `Dockerfile.quicktype` which is then used to generate the Go source code. diff --git a/pkg/util/cloudinit/schema/cloudconfig.go b/pkg/util/cloudinit/schema/cloudconfig.go new file mode 100644 index 000000000..ec9b4d4ee --- /dev/null +++ b/pkg/util/cloudinit/schema/cloudconfig.go @@ -0,0 +1,2737 @@ +// This file was generated from JSON Schema using quicktype, do not modify it directly. +// To parse and unparse this JSON data, add this code to your project and do: +// +// cloudconfig, err := UnmarshalCloudconfig(bytes) +// bytes, err = cloudconfig.Marshal() + +package schema + +import ( + "bytes" + "encoding/json" + "errors" +) + +func UnmarshalCloudconfig(data []byte) (Cloudconfig, error) { + var r Cloudconfig + err := json.Unmarshal(data, &r) + return r, err +} + +func (r *Cloudconfig) Marshal() ([]byte, error) { + return json.Marshal(r) +} + +type Cloudconfig struct { + // If ``true``, will import the public SSH keys from the datasource's metadata to the user's + // ``.ssh/authorized_keys`` file. Default: ``true`` + AllowPublicSSHKeys *bool `json:"allow_public_ssh_keys,omitempty"` + Ansible *AnsibleClass `json:"ansible,omitempty"` + ApkRepos *ApkRepos `json:"apk_repos,omitempty"` + Apt *Apt `json:"apt,omitempty"` + AptPipelining *AptPipeliningUnion `json:"apt_pipelining,omitempty"` + // Default: ``false``. + AptRebootIfRequired *bool `json:"apt_reboot_if_required,omitempty"` + // Default: ``false``. + AptUpdate *bool `json:"apt_update,omitempty"` + // Default: ``false``. + AptUpgrade *bool `json:"apt_upgrade,omitempty"` + // The hash type to use when generating SSH fingerprints. Default: ``sha256`` + AuthkeyHash *string `json:"authkey_hash,omitempty"` + // Opaque autoinstall schema definition for Ubuntu autoinstall. Full schema processed by + // live-installer. See: https://ubuntu.com/server/docs/install/autoinstall-reference + Autoinstall *Autoinstall `json:"autoinstall,omitempty"` + Bootcmd []Cmd `json:"bootcmd,omitempty"` + ByobuByDefault *ByobuByDefault `json:"byobu_by_default,omitempty"` + CACerts *CACertsClass `json:"ca-certs,omitempty"` + CloudconfigCACerts *CACertsClass `json:"ca_certs,omitempty"` + Chef *ChefClass `json:"chef,omitempty"` + Chpasswd *Chpasswd `json:"chpasswd,omitempty"` + CloudConfigModules []CloudConfigModuleElement `json:"cloud_config_modules,omitempty"` + CloudFinalModules []CloudConfigModuleElement `json:"cloud_final_modules,omitempty"` + CloudInitModules []CloudConfigModuleElement `json:"cloud_init_modules,omitempty"` + // If ``false``, the hostname file (e.g. /etc/hostname) will not be created if it does not + // exist. On systems that use systemd, setting create_hostname_file to ``false`` will set + // the hostname transiently. If ``true``, the hostname file will always be created and the + // hostname will be set statically on systemd systems. Default: ``true`` + CreateHostnameFile *bool `json:"create_hostname_file,omitempty"` + DeviceAliases *DeviceAliases `json:"device_aliases,omitempty"` + // Set true to disable IPv4 routes to EC2 metadata. Default: ``false`` + DisableEc2Metadata *bool `json:"disable_ec2_metadata,omitempty"` + // Disable root login. Default: ``true`` + DisableRoot *bool `json:"disable_root,omitempty"` + // Disable root login options. If ``disable_root_opts`` is specified and contains the + // string ``$USER``, it will be replaced with the username of the default user. Default: + // ``no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command="echo 'Please login as + // the user \"$USER\" rather than the user \"$DISABLE_USER\".';echo;sleep 10;exit 142"`` + DisableRootOpts *string `json:"disable_root_opts,omitempty"` + DiskSetup *DiskSetupClass `json:"disk_setup,omitempty"` + Drivers *Drivers `json:"drivers,omitempty"` + Fan *FanClass `json:"fan,omitempty"` + // The message to display at the end of the run + FinalMessage *string `json:"final_message,omitempty"` + // The fully qualified domain name to set + // + // Optional fully qualified domain name to use when updating ``/etc/hosts``. Preferred over + // ``hostname`` if both are provided. In absence of ``hostname`` and ``fqdn`` in + // cloud-config, the ``local-hostname`` value will be used from datasource metadata. + FQDN *string `json:"fqdn,omitempty"` + FSSetup []FSSetup `json:"fs_setup,omitempty"` + Groups *CloudconfigGroups `json:"groups,omitempty"` + Growpart *Growpart `json:"growpart,omitempty"` + // An alias for ``grub_dpkg`` + GrubDpkg map[string]interface{} `json:"grub-dpkg,omitempty"` + CloudconfigGrubDpkg *GrubDpkgClass `json:"grub_dpkg,omitempty"` + // The hostname to set + // + // Hostname to set when rendering ``/etc/hosts``. If ``fqdn`` is set, the hostname extracted + // from ``fqdn`` overrides ``hostname``. + Hostname *string `json:"hostname,omitempty"` + Keyboard *KeyboardClass `json:"keyboard,omitempty"` + Landscape *LandscapeClass `json:"landscape,omitempty"` + // The launch index for the specified cloud-config. + LaunchIndex *int64 `json:"launch-index,omitempty"` + // The locale to set as the system's locale (e.g. ar_PS) + Locale *string `json:"locale,omitempty"` + // The file in which to write the locale configuration (defaults to the distro's default + // location) + LocaleConfigfile *string `json:"locale_configfile,omitempty"` + Lxd *LxdClass `json:"lxd,omitempty"` + // Whether to manage ``/etc/hosts`` on the system. If ``true``, render the hosts file using + // ``/etc/cloud/templates/hosts.tmpl`` replacing ``$hostname`` and ``$fdqn``. If + // ``localhost``, append a ``127.0.1.1`` entry that resolves from FQDN and hostname every + // boot. Default: ``false`` + ManageEtcHosts *ManageEtcHostsUnion `json:"manage_etc_hosts,omitempty"` + // Whether to manage the resolv.conf file. ``resolv_conf`` block will be ignored unless this + // is set to ``true``. Default: ``false`` + ManageResolvConf *bool `json:"manage_resolv_conf,omitempty"` + Mcollective *McollectiveClass `json:"mcollective,omitempty"` + MergeHow *MergeHow `json:"merge_how,omitempty"` + MergeType *MergeHow `json:"merge_type,omitempty"` + // Whether to migrate legacy cloud-init semaphores to new format. Default: ``true`` + Migrate *bool `json:"migrate,omitempty"` + // Default mount configuration for any mount entry with less than 6 options provided. When + // specified, 6 items are required and represent ``/etc/fstab`` entries. Default: + // ``defaults,nofail,x-systemd.requires=cloud-init.service,_netdev`` + MountDefaultFields []*string `json:"mount_default_fields,omitempty"` + // List of lists. Each inner list entry is a list of ``/etc/fstab`` mount declarations of + // the format: [ fs_spec, fs_file, fs_vfstype, fs_mntops, fs-freq, fs_passno ]. A mount + // declaration with less than 6 items will get remaining values from + // ``mount_default_fields``. A mount declaration with only `fs_spec` and no `fs_file` + // mountpoint will be skipped. + Mounts [][]string `json:"mounts,omitempty"` + // If true, SSH fingerprints will not be written. Default: ``false`` + NoSSHFingerprints *bool `json:"no_ssh_fingerprints,omitempty"` + NTP *NTPClass `json:"ntp,omitempty"` + Output *Output `json:"output,omitempty"` + // Set ``true`` to reboot the system if required by presence of `/var/run/reboot-required`. + // Default: ``false`` + PackageRebootIfRequired *bool `json:"package_reboot_if_required,omitempty"` + // Set ``true`` to update packages. Happens before upgrade or install. Default: ``false`` + PackageUpdate *bool `json:"package_update,omitempty"` + // Set ``true`` to upgrade packages. Happens before install. Default: ``false`` + PackageUpgrade *bool `json:"package_upgrade,omitempty"` + // An array containing either a package specification, or an object consisting of a package + // manager key having a package specification value . A package specification can be either + // a package name or a list with two entries, the first being the package name and the + // second being the specific package version to install. + Packages []PackageElement `json:"packages,omitempty"` + // Set the default user's password. Ignored if ``chpasswd`` ``list`` is used + Password *string `json:"password,omitempty"` + PhoneHome *PhoneHomeClass `json:"phone_home,omitempty"` + PowerState *PowerState `json:"power_state,omitempty"` + // If true, the fqdn will be used if it is set. If false, the hostname will be used. If + // unset, the result is distro-dependent + // + // By default, it is distro-dependent whether cloud-init uses the short hostname or fully + // qualified domain name when both ``local-hostname` and ``fqdn`` are both present in + // instance metadata. When set ``true``, use fully qualified domain name if present as + // hostname instead of short hostname. When set ``false``, use ``hostname`` config value if + // present, otherwise fallback to ``fqdn``. + PreferFQDNOverHostname *bool `json:"prefer_fqdn_over_hostname,omitempty"` + // If true, the hostname will not be changed. Default: ``false`` + // + // Do not update system hostname when ``true``. Default: ``false``. + PreserveHostname *bool `json:"preserve_hostname,omitempty"` + Puppet *PuppetClass `json:"puppet,omitempty"` + RandomSeed *RandomSeed `json:"random_seed,omitempty"` + Reporting *Reporting `json:"reporting,omitempty"` + // Whether to resize the root partition. ``noblock`` will resize in the background. Default: + // ``true`` + ResizeRootfs *ResizeRootfsUnion `json:"resize_rootfs,omitempty"` + ResolvConf *ResolvConfClass `json:"resolv_conf,omitempty"` + RhSubscription *RhSubscriptionClass `json:"rh_subscription,omitempty"` + Rsyslog *RsyslogClass `json:"rsyslog,omitempty"` + Runcmd []RuncmdElement `json:"runcmd,omitempty"` + SaltMinion *SaltMinionClass `json:"salt_minion,omitempty"` + Snap *SnapClass `json:"snap,omitempty"` + Spacewalk *SpacewalkClass `json:"spacewalk,omitempty"` + SSH *SSHClass `json:"ssh,omitempty"` + // The SSH public keys to add ``.ssh/authorized_keys`` in the default user's home directory + SSHAuthorizedKeys []string `json:"ssh_authorized_keys,omitempty"` + // Remove host SSH keys. This prevents re-use of a private host key from an image with + // default host SSH keys. Default: ``true`` + SSHDeletekeys *bool `json:"ssh_deletekeys,omitempty"` + // Avoid printing matching SSH fingerprints to the system console. + SSHFPConsoleBlacklist []string `json:"ssh_fp_console_blacklist,omitempty"` + // The SSH key types to generate. Default: ``[rsa, ecdsa, ed25519]`` + SSHGenkeytypes []SSHGenkeytype `json:"ssh_genkeytypes,omitempty"` + SSHImportID []string `json:"ssh_import_id,omitempty"` + // Avoid printing matching SSH key types to the system console. + SSHKeyConsoleBlacklist []string `json:"ssh_key_console_blacklist,omitempty"` + // A dictionary entries for the public and private host keys of each desired key type. + // Entries in the ``ssh_keys`` config dict should have keys in the format ``_private``, ``_public``, and, optionally, ``_certificate``, e.g. + // ``rsa_private: ``, ``rsa_public: ``, and ``rsa_certificate: ``. Not all + // key types have to be specified, ones left unspecified will not be used. If this config + // option is used, then separate keys will not be automatically generated. In order to + // specify multi-line private host keys and certificates, use YAML multi-line syntax. + // **Note:** Your ssh keys might possibly be visible to unprivileged users on your system, + // depending on your cloud's security model. + SSHKeys *SSHKeys `json:"ssh_keys,omitempty"` + SSHPublishHostkeys *SSHPublishHostkeys `json:"ssh_publish_hostkeys,omitempty"` + // Sets whether or not to accept password authentication. ``true`` will enable password + // auth. ``false`` will disable. Default: leave the value unchanged. In order for this + // config to be applied, SSH may need to be restarted. On systemd systems, this restart will + // only happen if the SSH service has already been started. On non-systemd systems, a + // restart will be attempted regardless of the service state. + SSHPwauth *GrubPCInstallDevicesEmpty `json:"ssh_pwauth,omitempty"` + // If ``true``, will suppress the output of key generation to the console. Default: ``false`` + SSHQuietKeygen *bool `json:"ssh_quiet_keygen,omitempty"` + Swap *Swap `json:"swap,omitempty"` + // The timezone to use as represented in /usr/share/zoneinfo + Timezone *string `json:"timezone,omitempty"` + UbuntuAdvantage *UbuntuAdvantageClass `json:"ubuntu_advantage,omitempty"` + Updates *Updates `json:"updates,omitempty"` + // The ``user`` dictionary values override the ``default_user`` configuration from + // ``/etc/cloud/cloud.cfg``. The `user` dictionary keys supported for the default_user are + // the same as the ``users`` schema. + User *CloudconfigUser `json:"user,omitempty"` + Users *Users `json:"users,omitempty"` + VendorData *VendorData `json:"vendor_data,omitempty"` + Version interface{} `json:"version,omitempty"` + Wireguard *WireguardClass `json:"wireguard,omitempty"` + WriteFiles []WriteFile `json:"write_files,omitempty"` + // The repo parts directory where individual yum repo config files will be written. Default: + // ``/etc/yum.repos.d`` + YumRepoDir *string `json:"yum_repo_dir,omitempty"` + YumRepos *YumRepos `json:"yum_repos,omitempty"` + Zypper *Zypper `json:"zypper,omitempty"` +} + +type AnsibleClass struct { + // Sets the ANSIBLE_CONFIG environment variable. If set, overrides default config. + AnsibleConfig *string `json:"ansible_config,omitempty"` + Galaxy *Galaxy `json:"galaxy,omitempty"` + // The type of installation for ansible. It can be one of the following values: + // + // - ``distro`` + // - ``pip`` + InstallMethod *InstallMethod `json:"install_method,omitempty"` + PackageName *string `json:"package_name,omitempty"` + Pull *Pull `json:"pull,omitempty"` + // User to run module commands as. If install_method: pip, the pip install runs as this user + // as well. + RunUser *string `json:"run_user,omitempty"` + SetupController *SetupController `json:"setup_controller,omitempty"` +} + +type Galaxy struct { + Actions [][]string `json:"actions"` +} + +type Pull struct { + AcceptHostKey *bool `json:"accept_host_key,omitempty"` + Checkout *string `json:"checkout,omitempty"` + Clean *bool `json:"clean,omitempty"` + Connection *string `json:"connection,omitempty"` + Diff *bool `json:"diff,omitempty"` + Full *bool `json:"full,omitempty"` + ModuleName *string `json:"module_name,omitempty"` + ModulePath *string `json:"module_path,omitempty"` + PlaybookName string `json:"playbook_name"` + PrivateKey *string `json:"private_key,omitempty"` + SCPExtraArgs *string `json:"scp_extra_args,omitempty"` + SFTPExtraArgs *string `json:"sftp_extra_args,omitempty"` + SkipTags *string `json:"skip_tags,omitempty"` + Sleep *string `json:"sleep,omitempty"` + SSHCommonArgs *string `json:"ssh_common_args,omitempty"` + Tags *string `json:"tags,omitempty"` + Timeout *string `json:"timeout,omitempty"` + URL string `json:"url"` + VaultID *string `json:"vault_id,omitempty"` + VaultPasswordFile *string `json:"vault_password_file,omitempty"` +} + +type SetupController struct { + Repositories []Repository `json:"repositories,omitempty"` + RunAnsible []RunAnsibleElement `json:"run_ansible,omitempty"` +} + +type Repository struct { + Path string `json:"path"` + Source string `json:"source"` +} + +type RunAnsibleClass struct { + Args *string `json:"args,omitempty"` + Background *float64 `json:"background,omitempty"` + BecomePasswordFile *string `json:"become_password_file,omitempty"` + Check *bool `json:"check,omitempty"` + Connection *string `json:"connection,omitempty"` + ConnectionPasswordFile *string `json:"connection_password_file,omitempty"` + Diff *bool `json:"diff,omitempty"` + ExtraVars *string `json:"extra_vars,omitempty"` + Forks *float64 `json:"forks,omitempty"` + Inventory *string `json:"inventory,omitempty"` + ListHosts *bool `json:"list_hosts,omitempty"` + ModuleName *string `json:"module_name,omitempty"` + ModulePath *string `json:"module_path,omitempty"` + PlaybookDir *string `json:"playbook_dir,omitempty"` + PlaybookName *string `json:"playbook_name,omitempty"` + Poll *float64 `json:"poll,omitempty"` + PrivateKey *string `json:"private_key,omitempty"` + SCPExtraArgs *string `json:"scp_extra_args,omitempty"` + SFTPExtraArgs *string `json:"sftp_extra_args,omitempty"` + SkipTags *string `json:"skip_tags,omitempty"` + Sleep *string `json:"sleep,omitempty"` + SyntaxCheck *bool `json:"syntax_check,omitempty"` + Tags *string `json:"tags,omitempty"` + Timeout *float64 `json:"timeout,omitempty"` + VaultID *string `json:"vault_id,omitempty"` + VaultPasswordFile *string `json:"vault_password_file,omitempty"` +} + +type ApkRepos struct { + AlpineRepo *AlpineRepo `json:"alpine_repo,omitempty"` + // The base URL of an Alpine repository containing unofficial packages + LocalRepoBaseURL *string `json:"local_repo_base_url,omitempty"` + // By default, cloud-init will generate a new repositories file ``/etc/apk/repositories`` + // based on any valid configuration settings specified within a apk_repos section of cloud + // config. To disable this behavior and preserve the repositories file from the pristine + // image, set ``preserve_repositories`` to ``true``. + // + // The ``preserve_repositories`` option overrides all other config keys that would alter + // ``/etc/apk/repositories``. + PreserveRepositories *bool `json:"preserve_repositories,omitempty"` +} + +type AlpineRepo struct { + // The base URL of an Alpine repository, or mirror, to download official packages from. If + // not specified then it defaults to ``https://alpine.global.ssl.fastly.net/alpine`` + BaseURL *string `json:"base_url,omitempty"` + // Whether to add the Community repo to the repositories file. By default the Community repo + // is not included. + CommunityEnabled *bool `json:"community_enabled,omitempty"` + // Whether to add the Testing repo to the repositories file. By default the Testing repo is + // not included. It is only recommended to use the Testing repo on a machine running the + // ``Edge`` version of Alpine as packages installed from Testing may have dependencies that + // conflict with those in non-Edge Main or Community repos. + TestingEnabled *bool `json:"testing_enabled,omitempty"` + // The Alpine version to use (e.g. ``v3.12`` or ``edge``) + Version string `json:"version"` +} + +type Apt struct { + // All source entries in ``apt-sources`` that match regex in ``add_apt_repo_match`` will be + // added to the system using ``add-apt-repository``. If ``add_apt_repo_match`` is not + // specified, it defaults to ``^[\w-]+:\w`` + AddAptRepoMatch *string `json:"add_apt_repo_match,omitempty"` + // Specify configuration for apt, such as proxy configuration. This configuration is + // specified as a string. For multi-line APT configuration, make sure to follow YAML syntax. + Conf *string `json:"conf,omitempty"` + // Debconf additional configurations can be specified as a dictionary under the + // ``debconf_selections`` config key, with each key in the dict representing a different set + // of configurations. The value of each key must be a string containing all the debconf + // configurations that must be applied. We will bundle all of the values and pass them to + // ``debconf-set-selections``. Therefore, each value line must be a valid entry for + // ``debconf-set-selections``, meaning that they must possess for distinct fields: + // + // ``pkgname question type answer`` + // + // Where: + // + // - ``pkgname`` is the name of the package. + // - ``question`` the name of the questions. + // - ``type`` is the type of question. + // - ``answer`` is the value used to answer the question. + // + // For example: ``ippackage ippackage/ip string 127.0.01`` + DebconfSelections *DebconfSelections `json:"debconf_selections,omitempty"` + // Entries in the sources list can be disabled using ``disable_suites``, which takes a list + // of suites to be disabled. If the string ``$RELEASE`` is present in a suite in the + // ``disable_suites`` list, it will be replaced with the release name. If a suite specified + // in ``disable_suites`` is not present in ``sources.list`` it will be ignored. For + // convenience, several aliases are provided for`` disable_suites``: + // + // - ``updates`` => ``$RELEASE-updates`` + // - ``backports`` => ``$RELEASE-backports`` + // - ``security`` => ``$RELEASE-security`` + // - ``proposed`` => ``$RELEASE-proposed`` + // - ``release`` => ``$RELEASE``. + // + // When a suite is disabled using ``disable_suites``, its entry in ``sources.list`` is not + // deleted; it is just commented out. + DisableSuites []string `json:"disable_suites,omitempty"` + // More convenient way to specify ftp APT proxy. ftp proxy url is specified in the format + // ``ftp://[[user][:pass]@]host[:port]/``. + FTPProxy *string `json:"ftp_proxy,omitempty"` + // More convenient way to specify http APT proxy. http proxy url is specified in the format + // ``http://[[user][:pass]@]host[:port]/``. + HTTPProxy *string `json:"http_proxy,omitempty"` + // More convenient way to specify https APT proxy. https proxy url is specified in the + // format ``https://[[user][:pass]@]host[:port]/``. + HTTPSProxy *string `json:"https_proxy,omitempty"` + // By default, cloud-init will generate a new sources list in ``/etc/apt/sources.list.d`` + // based on any changes specified in cloud config. To disable this behavior and preserve the + // sources list from the pristine image, set ``preserve_sources_list`` to ``true``. + // + // The ``preserve_sources_list`` option overrides all other config keys that would alter + // ``sources.list`` or ``sources.list.d``, **except** for additional sources to be added to + // ``sources.list.d``. + PreserveSourcesList *bool `json:"preserve_sources_list,omitempty"` + // The primary and security archive mirrors can be specified using the ``primary`` and + // ``security`` keys, respectively. Both the ``primary`` and ``security`` keys take a list + // of configs, allowing mirrors to be specified on a per-architecture basis. Each config is + // a dictionary which must have an entry for ``arches``, specifying which architectures that + // config entry is for. The keyword ``default`` applies to any architecture not explicitly + // listed. The mirror url can be specified with the ``uri`` key, or a list of mirrors to + // check can be provided in order, with the first mirror that can be resolved being + // selected. This allows the same configuration to be used in different environment, with + // different hosts used for a local APT mirror. If no mirror is provided by ``uri`` or + // ``search``, ``search_dns`` may be used to search for dns names in the format + // ``-mirror`` in each of the following: + // + // - fqdn of this host per cloud metadata, + // - localdomain, + // - domains listed in ``/etc/resolv.conf``. + // + // If there is a dns entry for ``-mirror``, then it is assumed that there is a + // distro mirror at ``http://-mirror./``. If the ``primary`` key is + // defined, but not the ``security`` key, then then configuration for ``primary`` is also + // used for ``security``. If ``search_dns`` is used for the ``security`` key, the search + // pattern will be ``-security-mirror``. + // + // Each mirror may also specify a key to import via any of the following optional keys: + // + // - ``keyid``: a key to import via shortid or fingerprint. + // - ``key``: a raw PGP key. + // - ``keyserver``: alternate keyserver to pull ``keyid`` key from. + // + // If no mirrors are specified, or all lookups fail, then default mirrors defined in the + // datasource are used. If none are present in the datasource either the following defaults + // are used: + // + // - ``primary`` => ``http://archive.ubuntu.com/ubuntu``. + // - ``security`` => ``http://security.ubuntu.com/ubuntu`` + Primary []PrimaryElement `json:"primary,omitempty"` + // Alias for defining a http APT proxy. + Proxy *string `json:"proxy,omitempty"` + // Please refer to the primary config documentation + Security []PrimaryElement `json:"security,omitempty"` + // Source list entries can be specified as a dictionary under the ``sources`` config key, + // with each key in the dict representing a different source file. The key of each source + // entry will be used as an id that can be referenced in other config entries, as well as + // the filename for the source's configuration under ``/etc/apt/sources.list.d``. If the + // name does not end with ``.list``, it will be appended. If there is no configuration for a + // key in ``sources``, no file will be written, but the key may still be referred to as an + // id in other ``sources`` entries. + // + // Each entry under ``sources`` is a dictionary which may contain any of the following + // optional keys: + // - ``source``: a sources.list entry (some variable replacements apply). + // - ``keyid``: a key to import via shortid or fingerprint. + // - ``key``: a raw PGP key. + // - ``keyserver``: alternate keyserver to pull ``keyid`` key from. + // - ``filename``: specify the name of the list file. + // - ``append``: If ``true``, append to sources file, otherwise overwrite it. Default: + // ``true``. + // + // The ``source`` key supports variable replacements for the following strings: + // + // - ``$MIRROR`` + // - ``$PRIMARY`` + // - ``$SECURITY`` + // - ``$RELEASE`` + // - ``$KEY_FILE`` + Sources *Sources `json:"sources,omitempty"` + // Specifies a custom template for rendering ``sources.list`` . If no ``sources_list`` + // template is given, cloud-init will use sane default. Within this template, the following + // strings will be replaced with the appropriate values: + // + // - ``$MIRROR`` + // - ``$RELEASE`` + // - ``$PRIMARY`` + // - ``$SECURITY`` + // - ``$KEY_FILE`` + SourcesList *string `json:"sources_list,omitempty"` +} + +// Debconf additional configurations can be specified as a dictionary under the +// “debconf_selections“ config key, with each key in the dict representing a different set +// of configurations. The value of each key must be a string containing all the debconf +// configurations that must be applied. We will bundle all of the values and pass them to +// “debconf-set-selections“. Therefore, each value line must be a valid entry for +// “debconf-set-selections“, meaning that they must possess for distinct fields: +// +// “pkgname question type answer“ +// +// Where: +// +// - “pkgname“ is the name of the package. +// - “question“ the name of the questions. +// - “type“ is the type of question. +// - “answer“ is the value used to answer the question. +// +// For example: “ippackage ippackage/ip string 127.0.01“ +type DebconfSelections struct { +} + +// The primary and security archive mirrors can be specified using the “primary“ and +// “security“ keys, respectively. Both the “primary“ and “security“ keys take a list +// of configs, allowing mirrors to be specified on a per-architecture basis. Each config is +// a dictionary which must have an entry for “arches“, specifying which architectures that +// config entry is for. The keyword “default“ applies to any architecture not explicitly +// listed. The mirror url can be specified with the “uri“ key, or a list of mirrors to +// check can be provided in order, with the first mirror that can be resolved being +// selected. This allows the same configuration to be used in different environment, with +// different hosts used for a local APT mirror. If no mirror is provided by “uri“ or +// “search“, “search_dns“ may be used to search for dns names in the format +// “-mirror“ in each of the following: +// +// - fqdn of this host per cloud metadata, +// - localdomain, +// - domains listed in “/etc/resolv.conf“. +// +// If there is a dns entry for “-mirror“, then it is assumed that there is a +// distro mirror at “http://-mirror./“. If the “primary“ key is +// defined, but not the “security“ key, then then configuration for “primary“ is also +// used for “security“. If “search_dns“ is used for the “security“ key, the search +// pattern will be “-security-mirror“. +// +// Each mirror may also specify a key to import via any of the following optional keys: +// +// - “keyid“: a key to import via shortid or fingerprint. +// - “key“: a raw PGP key. +// - “keyserver“: alternate keyserver to pull “keyid“ key from. +// +// If no mirrors are specified, or all lookups fail, then default mirrors defined in the +// datasource are used. If none are present in the datasource either the following defaults +// are used: +// +// - “primary“ => “http://archive.ubuntu.com/ubuntu“. +// - “security“ => “http://security.ubuntu.com/ubuntu“ +type PrimaryElement struct { + Arches []string `json:"arches"` + Key *string `json:"key,omitempty"` + Keyid *string `json:"keyid,omitempty"` + Keyserver *string `json:"keyserver,omitempty"` + Search []string `json:"search,omitempty"` + SearchDNS *bool `json:"search_dns,omitempty"` + URI *string `json:"uri,omitempty"` +} + +// Source list entries can be specified as a dictionary under the “sources“ config key, +// with each key in the dict representing a different source file. The key of each source +// entry will be used as an id that can be referenced in other config entries, as well as +// the filename for the source's configuration under “/etc/apt/sources.list.d“. If the +// name does not end with “.list“, it will be appended. If there is no configuration for a +// key in “sources“, no file will be written, but the key may still be referred to as an +// id in other “sources“ entries. +// +// Each entry under “sources“ is a dictionary which may contain any of the following +// optional keys: +// - “source“: a sources.list entry (some variable replacements apply). +// - “keyid“: a key to import via shortid or fingerprint. +// - “key“: a raw PGP key. +// - “keyserver“: alternate keyserver to pull “keyid“ key from. +// - “filename“: specify the name of the list file. +// - “append“: If “true“, append to sources file, otherwise overwrite it. Default: +// “true“. +// +// The “source“ key supports variable replacements for the following strings: +// +// - “$MIRROR“ +// - “$PRIMARY“ +// - “$SECURITY“ +// - “$RELEASE“ +// - “$KEY_FILE“ +type Sources struct { +} + +// Opaque autoinstall schema definition for Ubuntu autoinstall. Full schema processed by +// live-installer. See: https://ubuntu.com/server/docs/install/autoinstall-reference +type Autoinstall struct { + Version int64 `json:"version"` +} + +type CACertsClass struct { + RemoveDefaults *bool `json:"remove-defaults,omitempty"` + // Remove default CA certificates if true. Default: ``false`` + CACertsRemoveDefaults *bool `json:"remove_defaults,omitempty"` + // List of trusted CA certificates to add. + Trusted []string `json:"trusted,omitempty"` +} + +type ChefClass struct { + // string that indicates if user accepts or not license related to some of chef products + ChefLicense *string `json:"chef_license,omitempty"` + // Optional path for client_cert. Default: ``/etc/chef/client.pem``. + ClientKey *string `json:"client_key,omitempty"` + // Create the necessary directories for chef to run. By default, it creates the following + // directories: + // + // - ``/etc/chef`` + // - ``/var/log/chef`` + // - ``/var/lib/chef`` + // - ``/var/cache/chef`` + // - ``/var/backups/chef`` + // - ``/var/run/chef`` + Directories []string `json:"directories,omitempty"` + // Specifies the location of the secret key used by chef to encrypt data items. By default, + // this path is set to null, meaning that chef will have to look at the path + // ``/etc/chef/encrypted_data_bag_secret`` for it. + EncryptedDataBagSecret *string `json:"encrypted_data_bag_secret,omitempty"` + // Specifies which environment chef will use. By default, it will use the ``_default`` + // configuration. + Environment *string `json:"environment,omitempty"` + // Set true if we should run or not run chef (defaults to false, unless a gem installed is + // requested where this will then default to true). + Exec *bool `json:"exec,omitempty"` + // Specifies the location in which backup files are stored. By default, it uses the + // ``/var/backups/chef`` location. + FileBackupPath *string `json:"file_backup_path,omitempty"` + // Specifies the location in which chef cache files will be saved. By default, it uses the + // ``/var/cache/chef`` location. + FileCachePath *string `json:"file_cache_path,omitempty"` + // Path to write run_list and initial_attributes keys that should also be present in this + // configuration, defaults to ``/etc/chef/firstboot.json`` + FirstbootPath *string `json:"firstboot_path,omitempty"` + // If set to ``true``, forces chef installation, even if it is already installed. + ForceInstall *bool `json:"force_install,omitempty"` + // Specify a list of initial attributes used by the cookbooks. + InitialAttributes map[string]interface{} `json:"initial_attributes,omitempty"` + // The type of installation for chef. It can be one of the following values: + // + // - ``packages`` + // - ``gems`` + // - ``omnibus`` + InstallType *ChefInstallType `json:"install_type,omitempty"` + // Specifies the location in which some chef json data is stored. By default, it uses the + // ``/etc/chef/firstboot.json`` location. + JSONAttribs *string `json:"json_attribs,omitempty"` + // Defines the level of logging to be stored in the log file. By default this value is set + // to ``:info``. + LogLevel *string `json:"log_level,omitempty"` + // Specifies the location of the chef log file. By default, the location is specified at + // ``/var/log/chef/client.log``. + LogLocation *string `json:"log_location,omitempty"` + // The name of the node to run. By default, we will use th instance id as the node name. + NodeName *string `json:"node_name,omitempty"` + // Omnibus URL if chef should be installed through Omnibus. By default, it uses the + // ``https://www.chef.io/chef/install.sh``. + OmnibusURL *string `json:"omnibus_url,omitempty"` + // The number of retries that will be attempted to reach the Omnibus URL. Default: ``5``. + OmnibusURLRetries *int64 `json:"omnibus_url_retries,omitempty"` + // Optional version string to require for omnibus install. + OmnibusVersion *string `json:"omnibus_version,omitempty"` + // The location in which a process identification number (pid) is saved. By default, it + // saves in the ``/var/run/chef/client.pid`` location. + PIDFile *string `json:"pid_file,omitempty"` + // A run list for a first boot json. + RunList []string `json:"run_list,omitempty"` + // The URL for the chef server + ServerURL *string `json:"server_url,omitempty"` + // Show time in chef logs + ShowTime *bool `json:"show_time,omitempty"` + // Set the verify mode for HTTPS requests. We can have two possible values for this + // parameter: + // + // - ``:verify_none``: No validation of SSL certificates. + // - ``:verify_peer``: Validate all SSL certificates. + // + // By default, the parameter is set as ``:verify_none``. + SSLVerifyMode *string `json:"ssl_verify_mode,omitempty"` + // Optional string to be written to file validation_key. Special value ``system`` means set + // use existing file. + ValidationCERT *string `json:"validation_cert,omitempty"` + // Optional path for validation_cert. default to ``/etc/chef/validation.pem`` + ValidationKey *string `json:"validation_key,omitempty"` + // The name of the chef-validator key that Chef Infra Client uses to access the Chef Infra + // Server during the initial Chef Infra Client run. + ValidationName *string `json:"validation_name,omitempty"` +} + +type Chpasswd struct { + // Whether to expire all user passwords such that a password will need to be reset on the + // user's next login. Default: ``true`` + Expire *bool `json:"expire,omitempty"` + // List of ``username:password`` pairs. Each user will have the corresponding password set. + // A password can be randomly generated by specifying ``RANDOM`` or ``R`` as a user's + // password. A hashed password, created by a tool like ``mkpasswd``, can be specified. A + // regex (``r'\$(1|2a|2y|5|6)(\$.+){2}'``) is used to determine if a password value should + // be treated as a hash. + List *ListUnion `json:"list,omitempty"` + // This key represents a list of existing users to set passwords for. Each item under users + // contains the following required keys: ``name`` and ``password`` or in the case of a + // randomly generated password, ``name`` and ``type``. The ``type`` key has a default value + // of ``hash``, and may alternatively be set to ``text`` or ``RANDOM``. Randomly generated + // passwords may be insecure, use at your own risk. + Users []UserClass `json:"users,omitempty"` +} + +type UserClass struct { + Name string `json:"name"` + Type *Type `json:"type,omitempty"` + Password *string `json:"password,omitempty"` +} + +type GrubDpkgClass struct { + // Whether to configure which device is used as the target for grub installation. Default: + // ``true`` + Enabled *bool `json:"enabled,omitempty"` + // Partition to use as target for grub installation. If unspecified, ``grub-probe`` of + // ``/boot/efi`` will be used to find the partition + GrubEFIInstallDevices *string `json:"grub-efi/install_devices,omitempty"` + // Device to use as target for grub installation. If unspecified, ``grub-probe`` of + // ``/boot`` will be used to find the device + GrubPCInstallDevices *string `json:"grub-pc/install_devices,omitempty"` + // Sets values for ``grub-pc/install_devices_empty``. If unspecified, will be set to + // ``true`` if ``grub-pc/install_devices`` is empty, otherwise ``false`` + GrubPCInstallDevicesEmpty *GrubPCInstallDevicesEmpty `json:"grub-pc/install_devices_empty,omitempty"` +} + +type DeviceAliases struct { +} + +type DiskSetupClass struct { +} + +type Drivers struct { + Nvidia *Nvidia `json:"nvidia,omitempty"` +} + +type Nvidia struct { + // Do you accept the NVIDIA driver license? + LicenseAccepted bool `json:"license-accepted"` + // The version of the driver to install (e.g. "390", "410"). Default: latest version. + Version *string `json:"version,omitempty"` +} + +type FSSetup struct { + // Optional command to run to create the filesystem. Can include string substitutions of the + // other ``fs_setup`` config keys. This is only necessary if you need to override the + // default command. + Cmd *Cmd `json:"cmd,omitempty"` + // Specified either as a path or as an alias in the format ``.`` where + // ```` denotes the partition number on the device. If specifying device using the + // ``.`` format, the value of ``partition`` will be + // overwritten. + Device *string `json:"device,omitempty"` + // Optional options to pass to the filesystem creation command. Ignored if you using ``cmd`` + // directly. + ExtraOpts *Cmd `json:"extra_opts,omitempty"` + // Filesystem type to create. E.g., ``ext4`` or ``btrfs`` + Filesystem *string `json:"filesystem,omitempty"` + // Label for the filesystem. + Label *string `json:"label,omitempty"` + // If ``true``, overwrite any existing filesystem. Using ``overwrite: true`` for filesystems + // is **dangerous** and can lead to data loss, so double check the entry in ``fs_setup``. + // Default: ``false`` + Overwrite *bool `json:"overwrite,omitempty"` + // The partition can be specified by setting ``partition`` to the desired partition number. + // The ``partition`` option may also be set to ``auto``, in which this module will search + // for the existence of a filesystem matching the ``label``, ``filesystem`` and ``device`` + // of the ``fs_setup`` entry and will skip creating the filesystem if one is found. The + // ``partition`` option may also be set to ``any``, in which case any filesystem that + // matches ``filesystem`` and ``device`` will cause this module to skip filesystem creation + // for the ``fs_setup`` entry, regardless of ``label`` matching or not. To write a + // filesystem directly to a device, use ``partition: none``. ``partition: none`` will + // **always** write the filesystem, even when the ``label`` and ``filesystem`` are matched, + // and ``overwrite`` is ``false``. + Partition *Partition `json:"partition,omitempty"` + // Ignored unless ``partition`` is ``auto`` or ``any``. Default ``false``. + ReplaceFS *string `json:"replace_fs,omitempty"` +} + +type FanClass struct { + // The fan configuration to use as a single multi-line string + Config string `json:"config"` + // The path to write the fan configuration to. Default: ``/etc/network/fan`` + ConfigPath *string `json:"config_path,omitempty"` +} + +type GroupsClass struct { +} + +type Growpart struct { + // The devices to resize. Each entry can either be the path to the device's mountpoint in + // the filesystem or a path to the block device in '/dev'. Default: ``[/]`` + Devices []string `json:"devices,omitempty"` + // If ``true``, ignore the presence of ``/etc/growroot-disabled``. If ``false`` and the file + // exists, then don't resize. Default: ``false`` + IgnoreGrowrootDisabled *bool `json:"ignore_growroot_disabled,omitempty"` + // The utility to use for resizing. Default: ``auto`` + // + // Possible options: + // + // * ``auto`` - Use any available utility + // + // * ``growpart`` - Use growpart utility + // + // * ``gpart`` - Use BSD gpart utility + // + // * ``off`` - Take no action + Mode *ModeUnion `json:"mode,omitempty"` +} + +type KeyboardClass struct { + // Required. Keyboard layout. Corresponds to XKBLAYOUT. + Layout string `json:"layout"` + // Optional. Keyboard model. Corresponds to XKBMODEL. Default: ``pc105``. + Model *string `json:"model,omitempty"` + // Optional. Keyboard options. Corresponds to XKBOPTIONS. + Options *string `json:"options,omitempty"` + // Required for Alpine Linux, optional otherwise. Keyboard variant. Corresponds to + // XKBVARIANT. + Variant *string `json:"variant,omitempty"` +} + +type LandscapeClass struct { + Client Client `json:"client"` +} + +type Client struct { + // The account this computer belongs to. + AccountName string `json:"account_name"` + // The title of this computer. + ComputerTitle string `json:"computer_title"` + // The directory to store data files in. Default: ``/var/lib/land‐scape/client/``. + DataPath *string `json:"data_path,omitempty"` + // The URL of the HTTP proxy, if one is needed. + HTTPProxy *string `json:"http_proxy,omitempty"` + // The URL of the HTTPS proxy, if one is needed. + HTTPSProxy *string `json:"https_proxy,omitempty"` + // The log level for the client. Default: ``info``. + LogLevel *LogLevel `json:"log_level,omitempty"` + // The URL to perform lightweight exchange initiation with. Default: + // ``https://landscape.canonical.com/ping``. + PingURL *string `json:"ping_url,omitempty"` + // The account-wide key used for registering clients. + RegistrationKey *string `json:"registration_key,omitempty"` + // Comma separated list of tag names to be sent to the server. + Tags *string `json:"tags,omitempty"` + // The Landscape server URL to connect to. Default: + // ``https://landscape.canonical.com/message-system``. + URL *string `json:"url,omitempty"` +} + +type LxdClass struct { + // LXD bridge configuration provided to setup the host lxd bridge. Can not be combined with + // ``lxd.preseed``. + Bridge *Bridge `json:"bridge,omitempty"` + // LXD init configuration values to provide to `lxd init --auto` command. Can not be + // combined with ``lxd.preseed``. + Init *Init `json:"init,omitempty"` + // Opaque LXD preseed YAML config passed via stdin to the command: lxd init --preseed. See: + // https://documentation.ubuntu.com/lxd/en/latest/howto/initialize/#non-interactive-configuration + // or lxd init --dump for viable config. Can not be combined with either ``lxd.init`` or + // ``lxd.bridge``. + Preseed *string `json:"preseed,omitempty"` +} + +// LXD bridge configuration provided to setup the host lxd bridge. Can not be combined with +// “lxd.preseed“. +type Bridge struct { + // Domain to advertise to DHCP clients and use for DNS resolution. + Domain *string `json:"domain,omitempty"` + // IPv4 address for the bridge. If set, ``ipv4_netmask`` key required. + Ipv4Address *string `json:"ipv4_address,omitempty"` + // First IPv4 address of the DHCP range for the network created. This value will combined + // with ``ipv4_dhcp_last`` key to set LXC ``ipv4.dhcp.ranges``. + Ipv4DHCPFirst *string `json:"ipv4_dhcp_first,omitempty"` + // Last IPv4 address of the DHCP range for the network created. This value will combined + // with ``ipv4_dhcp_first`` key to set LXC ``ipv4.dhcp.ranges``. + Ipv4DHCPLast *string `json:"ipv4_dhcp_last,omitempty"` + // Number of DHCP leases to allocate within the range. Automatically calculated based on + // `ipv4_dhcp_first` and `ipv4_dchp_last` when unset. + Ipv4DHCPLeases *int64 `json:"ipv4_dhcp_leases,omitempty"` + // Set ``true`` to NAT the IPv4 traffic allowing for a routed IPv4 network. Default: + // ``false``. + Ipv4Nat *bool `json:"ipv4_nat,omitempty"` + // Prefix length for the ``ipv4_address`` key. Required when ``ipv4_address`` is set. + Ipv4Netmask *int64 `json:"ipv4_netmask,omitempty"` + // IPv6 address for the bridge (CIDR notation). When set, ``ipv6_netmask`` key is required. + // When absent, no IPv6 will be configured. + Ipv6Address *string `json:"ipv6_address,omitempty"` + // Whether to NAT. Default: ``false``. + Ipv6Nat *bool `json:"ipv6_nat,omitempty"` + // Prefix length for ``ipv6_address`` provided. Required when ``ipv6_address`` is set. + Ipv6Netmask *int64 `json:"ipv6_netmask,omitempty"` + // Whether to setup LXD bridge, use an existing bridge by ``name`` or create a new bridge. + // `none` will avoid bridge setup, `existing` will configure lxd to use the bring matching + // ``name`` and `new` will create a new bridge. + Mode BridgeMode `json:"mode"` + // Bridge MTU, defaults to LXD's default value + MTU *int64 `json:"mtu,omitempty"` + // Name of the LXD network bridge to attach or create. Default: ``lxdbr0``. + Name *string `json:"name,omitempty"` +} + +// LXD init configuration values to provide to `lxd init --auto` command. Can not be +// combined with “lxd.preseed“. +type Init struct { + // IP address for LXD to listen on + NetworkAddress *string `json:"network_address,omitempty"` + // Network port to bind LXD to. + NetworkPort *int64 `json:"network_port,omitempty"` + // Storage backend to use. Default: ``dir``. + StorageBackend *StorageBackend `json:"storage_backend,omitempty"` + // Setup device based storage using DEVICE + StorageCreateDevice *string `json:"storage_create_device,omitempty"` + // Setup loop based storage with SIZE in GB + StorageCreateLoop *int64 `json:"storage_create_loop,omitempty"` + // Name of storage pool to use or create + StoragePool *string `json:"storage_pool,omitempty"` + // The password required to add new clients + TrustPassword *string `json:"trust_password,omitempty"` +} + +type McollectiveClass struct { + Conf *McollectiveConf `json:"conf,omitempty"` +} + +type McollectiveConf struct { + // Optional value of server private certificate which will be written to + // ``/etc/mcollective/ssl/server-private.pem`` + PrivateCERT *string `json:"private-cert,omitempty"` + // Optional value of server public certificate which will be written to + // ``/etc/mcollective/ssl/server-public.pem`` + PublicCERT *string `json:"public-cert,omitempty"` +} + +type MergeHowElement struct { + Name Name `json:"name"` + Settings []Setting `json:"settings"` +} + +type NTPClass struct { + // List of CIDRs to allow + Allow []string `json:"allow,omitempty"` + // Configuration settings or overrides for the + // ``ntp_client`` specified. + Config *NTPConfig `json:"config,omitempty"` + // Attempt to enable ntp clients if set to True. If set + // to False, ntp client will not be configured or + // installed + Enabled *bool `json:"enabled,omitempty"` + // Name of an NTP client to use to configure system NTP. + // When unprovided or 'auto' the default client preferred + // by the distribution will be used. The following + // built-in client names can be used to override existing + // configuration defaults: chrony, ntp, openntpd, + // ntpdate, systemd-timesyncd. + NTPClient *string `json:"ntp_client,omitempty"` + // List of ntp peers. + Peers []string `json:"peers,omitempty"` + // List of ntp pools. If both pools and servers are + // empty, 4 default pool servers will be provided of + // the format ``{0-3}.{distro}.pool.ntp.org``. NOTE: + // for Alpine Linux when using the Busybox NTP client + // this setting will be ignored due to the limited + // functionality of Busybox's ntpd. + Pools []string `json:"pools,omitempty"` + // List of ntp servers. If both pools and servers are + // empty, 4 default pool servers will be provided with + // the format ``{0-3}.{distro}.pool.ntp.org``. + Servers []string `json:"servers,omitempty"` +} + +// Configuration settings or overrides for the +// “ntp_client“ specified. +type NTPConfig struct { + // The executable name for the ``ntp_client``. + // For example, ntp service ``check_exe`` is + // 'ntpd' because it runs the ntpd binary. + CheckExe *string `json:"check_exe,omitempty"` + // The path to where the ``ntp_client`` + // configuration is written. + Confpath *string `json:"confpath,omitempty"` + // List of packages needed to be installed for the + // selected ``ntp_client``. + Packages []string `json:"packages,omitempty"` + // The systemd or sysvinit service name used to + // start and stop the ``ntp_client`` + // service. + ServiceName *string `json:"service_name,omitempty"` + // Inline template allowing users to customize their ``ntp_client`` configuration with the + // use of the Jinja templating engine. + // The template content should start with ``## template:jinja``. + // Within the template, you can utilize any of the following ntp module config keys: + // ``servers``, ``pools``, ``allow``, and ``peers``. + // Each cc_ntp schema config key and expected value type is defined above. + Template *string `json:"template,omitempty"` +} + +type Output struct { + All *AllUnion `json:"all,omitempty"` + Config *AllUnion `json:"config,omitempty"` + Final *AllUnion `json:"final,omitempty"` + Init *AllUnion `json:"init,omitempty"` +} + +type AllClass struct { + // A filepath operation configuration. A string containing a filepath and an optional + // leading operator: '>', '>>' or '|'. Operators '>' and '>>' indicate whether to overwrite + // or append to the file. The operator '|' redirects content to the command arguments + // specified. + Error *string `json:"error,omitempty"` + // A filepath operation configuration. This is a string containing a filepath and an + // optional leading operator: '>', '>>' or '|'. Operators '>' and '>>' indicate whether to + // overwrite or append to the file. The operator '|' redirects content to the command + // arguments specified. + Output *string `json:"output,omitempty"` +} + +type PackageClass struct { + Apt []Cmd `json:"apt,omitempty"` + Snap []Cmd `json:"snap,omitempty"` +} + +type PhoneHomeClass struct { + // A list of keys to post or ``all``. Default: ``all`` + Post *PostUnion `json:"post,omitempty"` + // The number of times to try sending the phone home data. Default: ``10`` + Tries *int64 `json:"tries,omitempty"` + // The URL to send the phone home data to. + URL string `json:"url"` +} + +type PowerState struct { + // Apply state change only if condition is met. May be boolean true (always met), false + // (never met), or a command string or list to be executed. For command formatting, see the + // documentation for ``cc_runcmd``. If exit code is 0, condition is met, otherwise not. + // Default: ``true`` + Condition *Condition `json:"condition,omitempty"` + // Time in minutes to delay after cloud-init has finished. Can be ``now`` or an integer + // specifying the number of minutes to delay. Default: ``now`` + Delay *Delay `json:"delay,omitempty"` + // Optional message to display to the user when the system is powering off or rebooting. + Message *string `json:"message,omitempty"` + // Must be one of ``poweroff``, ``halt``, or ``reboot``. + Mode PowerStateMode `json:"mode"` + // Time in seconds to wait for the cloud-init process to finish before executing shutdown. + // Default: ``30`` + Timeout *int64 `json:"timeout,omitempty"` +} + +type PuppetClass struct { + // If ``install_type`` is ``aio``, change the url of the install script. + AioInstallURL *string `json:"aio_install_url,omitempty"` + // Whether to remove the puppetlabs repo after installation if ``install_type`` is ``aio`` + // Default: ``true`` + Cleanup *bool `json:"cleanup,omitempty"` + // Puppet collection to install if ``install_type`` is ``aio``. This can be set to one of + // ``puppet`` (rolling release), ``puppet6``, ``puppet7`` (or their nightly counterparts) in + // order to install specific release streams. + Collection *string `json:"collection,omitempty"` + // Every key present in the conf object will be added to puppet.conf. As such, section names + // should be one of: ``main``, ``server``, ``agent`` or ``user`` and keys should be valid + // puppet configuration options. The configuration is specified as a dictionary containing + // high-level ``
`` keys and lists of ``=`` pairs within each section. + // The ``certname`` key supports string substitutions for ``%i`` and ``%f``, corresponding + // to the instance id and fqdn of the machine respectively. + // + // ``ca_cert`` is a special case. It won't be added to puppet.conf. It holds the + // puppetserver certificate in pem format. It should be a multi-line string (using the | + // YAML notation for multi-line strings). + Conf *PuppetConf `json:"conf,omitempty"` + // The path to the puppet config file. Default depends on ``install_type`` + ConfFile *string `json:"conf_file,omitempty"` + // create a ``csr_attributes.yaml`` file for CSR attributes and certificate extension + // requests. See https://puppet.com/docs/puppet/latest/config_file_csr_attributes.html + CsrAttributes *CsrAttributes `json:"csr_attributes,omitempty"` + // The path to the puppet csr attributes file. Default depends on ``install_type`` + CsrAttributesPath *string `json:"csr_attributes_path,omitempty"` + // Whether or not to run puppet after configuration finishes. A single manual run can be + // triggered by setting ``exec`` to ``true``, and additional arguments can be passed to + // ``puppet agent`` via the ``exec_args`` key (by default the agent will execute with the + // ``--test`` flag). Default: ``false`` + Exec *bool `json:"exec,omitempty"` + // A list of arguments to pass to 'puppet agent' if 'exec' is true Default: ``['--test']`` + ExecArgs []string `json:"exec_args,omitempty"` + // Whether or not to install puppet. Setting to ``false`` will result in an error if puppet + // is not already present on the system. Default: ``true`` + Install *bool `json:"install,omitempty"` + // Valid values are ``packages`` and ``aio``. Agent packages from the puppetlabs + // repositories can be installed by setting ``aio``. Based on this setting, the default + // config/SSL/CSR paths will be adjusted accordingly. Default: ``packages`` + InstallType *PuppetInstallType `json:"install_type,omitempty"` + // Name of the package to install if ``install_type`` is ``packages``. Default: ``puppet`` + PackageName *string `json:"package_name,omitempty"` + // The path to the puppet SSL directory. Default depends on ``install_type`` + SSLDir *string `json:"ssl_dir,omitempty"` + // By default, the puppet service will be automatically enabled after installation and set + // to automatically start on boot. To override this in favor of manual puppet execution set + // ``start_service`` to ``false`` + StartService *bool `json:"start_service,omitempty"` + // Optional version to pass to the installer script or package manager. If unset, the latest + // version from the repos will be installed. + Version *string `json:"version,omitempty"` +} + +// Every key present in the conf object will be added to puppet.conf. As such, section names +// should be one of: “main“, “server“, “agent“ or “user“ and keys should be valid +// puppet configuration options. The configuration is specified as a dictionary containing +// high-level “
“ keys and lists of “=“ pairs within each section. +// The “certname“ key supports string substitutions for “%i“ and “%f“, corresponding +// to the instance id and fqdn of the machine respectively. +// +// “ca_cert“ is a special case. It won't be added to puppet.conf. It holds the +// puppetserver certificate in pem format. It should be a multi-line string (using the | +// YAML notation for multi-line strings). +type PuppetConf struct { + Agent map[string]interface{} `json:"agent,omitempty"` + CACERT *string `json:"ca_cert,omitempty"` + Main map[string]interface{} `json:"main,omitempty"` + Server map[string]interface{} `json:"server,omitempty"` + User map[string]interface{} `json:"user,omitempty"` +} + +// create a “csr_attributes.yaml“ file for CSR attributes and certificate extension +// requests. See https://puppet.com/docs/puppet/latest/config_file_csr_attributes.html +type CsrAttributes struct { + CustomAttributes map[string]interface{} `json:"custom_attributes,omitempty"` + ExtensionRequests map[string]interface{} `json:"extension_requests,omitempty"` +} + +type RandomSeed struct { + // Execute this command to seed random. The command will have RANDOM_SEED_FILE in its + // environment set to the value of ``file`` above. + Command []string `json:"command,omitempty"` + // If true, and ``command`` is not available to be run then an exception is raised and + // cloud-init will record failure. Otherwise, only debug error is mentioned. Default: + // ``false`` + CommandRequired *bool `json:"command_required,omitempty"` + // This data will be written to ``file`` before data from the datasource. When using a + // multi-line value or specifying binary data, be sure to follow YAML syntax and use the + // ``|`` and ``!binary`` YAML format specifiers when appropriate + Data *string `json:"data,omitempty"` + // Used to decode ``data`` provided. Allowed values are ``raw``, ``base64``, ``b64``, + // ``gzip``, or ``gz``. Default: ``raw`` + Encoding *RandomSeedEncoding `json:"encoding,omitempty"` + // File to write random data to. Default: ``/dev/urandom`` + File *string `json:"file,omitempty"` +} + +type Reporting struct { +} + +type ResolvConfClass struct { + // The domain to be added as ``domain`` line + Domain *string `json:"domain,omitempty"` + // A list of nameservers to use to be added as ``nameserver`` lines + Nameservers []interface{} `json:"nameservers,omitempty"` + // Key/value pairs of options to go under ``options`` heading. A unary option should be + // specified as ``true`` + Options map[string]interface{} `json:"options,omitempty"` + // A list of domains to be added ``search`` line + Searchdomains []interface{} `json:"searchdomains,omitempty"` + // A list of IP addresses to be added to ``sortlist`` line + Sortlist []interface{} `json:"sortlist,omitempty"` +} + +type RhSubscriptionClass struct { + // The activation key to use. Must be used with ``org``. Should not be used with + // ``username`` or ``password`` + ActivationKey *string `json:"activation-key,omitempty"` + // A list of pools ids add to the subscription + AddPool []string `json:"add-pool,omitempty"` + // Whether to attach subscriptions automatically + AutoAttach *bool `json:"auto-attach,omitempty"` + // A list of repositories to disable + DisableRepo []string `json:"disable-repo,omitempty"` + // A list of repositories to enable + EnableRepo []string `json:"enable-repo,omitempty"` + // The organization number to use. Must be used with ``activation-key``. Should not be used + // with ``username`` or ``password`` + Org *int64 `json:"org,omitempty"` + // The password to use. Must be used with username. Should not be used with + // ``activation-key`` or ``org`` + Password *string `json:"password,omitempty"` + // Sets the baseurl in ``/etc/rhsm/rhsm.conf`` + RhsmBaseurl *string `json:"rhsm-baseurl,omitempty"` + // Sets the serverurl in ``/etc/rhsm/rhsm.conf`` + ServerHostname *string `json:"server-hostname,omitempty"` + // The service level to use when subscribing to RH repositories. ``auto-attach`` must be + // true for this to be used + ServiceLevel *string `json:"service-level,omitempty"` + // The username to use. Must be used with password. Should not be used with + // ``activation-key`` or ``org`` + Username *string `json:"username,omitempty"` +} + +type RsyslogClass struct { + // The executable name for the rsyslog daemon. + // For example, ``rsyslogd``, or ``/opt/sbin/rsyslogd`` if the rsyslog binary is in an + // unusual path. This is only used if ``install_rsyslog`` is ``true``. Default: ``rsyslogd`` + CheckExe *string `json:"check_exe,omitempty"` + // The directory where rsyslog configuration files will be written. Default: + // ``/etc/rsyslog.d`` + ConfigDir *string `json:"config_dir,omitempty"` + // The name of the rsyslog configuration file. Default: ``20-cloud-config.conf`` + ConfigFilename *string `json:"config_filename,omitempty"` + // Each entry in ``configs`` is either a string or an object. Each config entry contains a + // configuration string and a file to write it to. For config entries that are an object, + // ``filename`` sets the target filename and ``content`` specifies the config string to + // write. For config entries that are only a string, the string is used as the config string + // to write. If the filename to write the config to is not specified, the value of the + // ``config_filename`` key is used. A file with the selected filename will be written inside + // the directory specified by ``config_dir``. + Configs []ConfigElement `json:"configs,omitempty"` + // Install rsyslog. Default: ``false`` + InstallRsyslog *bool `json:"install_rsyslog,omitempty"` + // List of packages needed to be installed for rsyslog. This is only used if + // ``install_rsyslog`` is ``true``. Default: ``[rsyslog]`` + Packages []string `json:"packages,omitempty"` + // Each key is the name for an rsyslog remote entry. Each value holds the contents of the + // remote config for rsyslog. The config consists of the following parts: + // + // - filter for log messages (defaults to ``*.*``) + // + // - optional leading ``@`` or ``@@``, indicating udp and tcp respectively (defaults to + // ``@``, for udp) + // + // - ipv4 or ipv6 hostname or address. ipv6 addresses must be in ``[::1]`` format, (e.g. + // ``@[fd00::1]:514``) + // + // - optional port number (defaults to ``514``) + // + // This module will provide sane defaults for any part of the remote entry that is not + // specified, so in most cases remote hosts can be specified just using ``: + //
``. + Remotes map[string]interface{} `json:"remotes,omitempty"` + // The command to use to reload the rsyslog service after the config has been updated. If + // this is set to ``auto``, then an appropriate command for the distro will be used. This is + // the default behavior. To manually set the command, use a list of command args (e.g. + // ``[systemctl, restart, rsyslog]``). + ServiceReloadCommand *ServiceReloadCommandUnion `json:"service_reload_command,omitempty"` +} + +type ConfigConfig struct { + Content string `json:"content"` + Filename *string `json:"filename,omitempty"` +} + +type SSHClass struct { + // Set false to avoid printing SSH keys to system console. Default: ``true``. + EmitKeysToConsole bool `json:"emit_keys_to_console"` +} + +// A dictionary entries for the public and private host keys of each desired key type. +// Entries in the “ssh_keys“ config dict should have keys in the format “_private“, “_public“, and, optionally, “_certificate“, e.g. +// “rsa_private: “, “rsa_public: “, and “rsa_certificate: “. Not all +// key types have to be specified, ones left unspecified will not be used. If this config +// option is used, then separate keys will not be automatically generated. In order to +// specify multi-line private host keys and certificates, use YAML multi-line syntax. +// **Note:** Your ssh keys might possibly be visible to unprivileged users on your system, +// depending on your cloud's security model. +type SSHKeys struct { +} + +type SSHPublishHostkeys struct { + // The SSH key types to ignore when publishing. Default: ``[]`` to publish all SSH key types + Blacklist []string `json:"blacklist,omitempty"` + // If true, will read host keys from ``/etc/ssh/*.pub`` and publish them to the datasource + // (if supported). Default: ``true`` + Enabled *bool `json:"enabled,omitempty"` +} + +type SaltMinionClass struct { + // Configuration to be written to `config_dir`/minion + Conf map[string]interface{} `json:"conf,omitempty"` + // Directory to write config files to. Default: ``/etc/salt`` + ConfigDir *string `json:"config_dir,omitempty"` + // Configuration to be written to `config_dir`/grains + Grains map[string]interface{} `json:"grains,omitempty"` + // Package name to install. Default: ``salt-minion`` + PkgName *string `json:"pkg_name,omitempty"` + // Directory to write key files. Default: `config_dir`/pki/minion + PKIDir *string `json:"pki_dir,omitempty"` + // Private key to be used by salt minion + PrivateKey *string `json:"private_key,omitempty"` + // Public key to be used by the salt minion + PublicKey *string `json:"public_key,omitempty"` + // Service name to enable. Default: ``salt-minion`` + ServiceName *string `json:"service_name,omitempty"` +} + +type SnapClass struct { + // Properly-signed snap assertions which will run before and snap ``commands``. + Assertions *Assertions `json:"assertions,omitempty"` + // Snap commands to run on the target system + Commands *Commands `json:"commands,omitempty"` +} + +type SpacewalkClass struct { + // The activation key to use when registering with Spacewalk + ActivationKey *string `json:"activation_key,omitempty"` + // The proxy to use when connecting to Spacewalk + Proxy *string `json:"proxy,omitempty"` + // The Spacewalk server to use + Server *string `json:"server,omitempty"` +} + +type Swap struct { + // Path to the swap file to create + Filename *string `json:"filename,omitempty"` + // The maxsize in bytes of the swap file + Maxsize *Size `json:"maxsize,omitempty"` + // The size in bytes of the swap file, 'auto' or a human-readable size abbreviation of the + // format where units are one of B, K, M, G or T. **WARNING: Attempts to + // use IEC prefixes in your configuration prior to cloud-init version 23.1 will result in + // unexpected behavior. SI prefixes names (KB, MB) are required on pre-23.1 cloud-init, + // however IEC values are used. In summary, assume 1KB == 1024B, not 1000B** + Size *Size `json:"size,omitempty"` +} + +type UbuntuAdvantageClass struct { + // Configuration settings or override Ubuntu Advantage config. + Config *UbuntuAdvantageConfig `json:"config,omitempty"` + // Optional list of ubuntu-advantage services to enable. Any of: cc-eal, cis, esm-infra, + // fips, fips-updates, livepatch. By default, a given contract token will automatically + // enable a number of services, use this list to supplement which services should + // additionally be enabled. Any service unavailable on a given Ubuntu release or unentitled + // in a given contract will remain disabled. In Ubuntu Pro instances, if this list is given, + // then only those services will be enabled, ignoring contract defaults. Passing beta + // services here will cause an error. + Enable []string `json:"enable,omitempty"` + // Optional list of ubuntu-advantage beta services to enable. By default, a given contract + // token will automatically enable a number of services, use this list to supplement which + // services should additionally be enabled. Any service unavailable on a given Ubuntu + // release or unentitled in a given contract will remain disabled. In Ubuntu Pro instances, + // if this list is given, then only those services will be enabled, ignoring contract + // defaults. + EnableBeta []string `json:"enable_beta,omitempty"` + // Ubuntu Advantage features. + Features *Features `json:"features,omitempty"` + // Contract token obtained from https://ubuntu.com/advantage to attach. Required for non-Pro + // instances. + Token *string `json:"token,omitempty"` +} + +// Configuration settings or override Ubuntu Advantage config. +type UbuntuAdvantageConfig struct { + // HTTP Proxy URL used for all APT repositories on a system or null to unset. Stored at + // ``/etc/apt/apt.conf.d/90ubuntu-advantage-aptproxy`` + GlobalAptHTTPProxy *string `json:"global_apt_http_proxy,omitempty"` + // HTTPS Proxy URL used for all APT repositories on a system or null to unset. Stored at + // ``/etc/apt/apt.conf.d/90ubuntu-advantage-aptproxy`` + GlobalAptHTTPSProxy *string `json:"global_apt_https_proxy,omitempty"` + // Ubuntu Advantage HTTP Proxy URL or null to unset. + HTTPProxy *string `json:"http_proxy,omitempty"` + // Ubuntu Advantage HTTPS Proxy URL or null to unset. + HTTPSProxy *string `json:"https_proxy,omitempty"` + // HTTP Proxy URL used only for Ubuntu Advantage APT repositories or null to unset. Stored + // at ``/etc/apt/apt.conf.d/90ubuntu-advantage-aptproxy`` + UaAptHTTPProxy *string `json:"ua_apt_http_proxy,omitempty"` + // HTTPS Proxy URL used only for Ubuntu Advantage APT repositories or null to unset. Stored + // at ``/etc/apt/apt.conf.d/90ubuntu-advantage-aptproxy`` + UaAptHTTPSProxy *string `json:"ua_apt_https_proxy,omitempty"` +} + +// Ubuntu Advantage features. +type Features struct { + // Optional boolean for controlling if ua-auto-attach.service (in Ubuntu Pro instances) will + // be attempted each boot. Default: ``false`` + DisableAutoAttach *bool `json:"disable_auto_attach,omitempty"` +} + +type Updates struct { + Network *Network `json:"network,omitempty"` +} + +type Network struct { + When []When `json:"when"` +} + +type PurpleSchemaCloudConfigV1 struct { + // Boolean set ``false`` to disable creation of specified user ``groups``. Default: ``true``. + CreateGroups *bool `json:"create_groups,omitempty"` + // List of doas rules to add for a user. doas or opendoas must be installed for rules to + // take effect. + Doas []string `json:"doas,omitempty"` + // Optional. Date on which the user's account will be disabled. Default: ``null`` + Expiredate *string `json:"expiredate,omitempty"` + // Optional comment about the user, usually a comma-separated string of real name and + // contact information + Gecos *string `json:"gecos,omitempty"` + // Optional comma-separated string of groups to add the user to. + Groups *UserGroups `json:"groups,omitempty"` + // Hash of user password to be applied. This will be applied even if the user is + // preexisting. To generate this hash, run: ``mkpasswd --method=SHA-512 --rounds=500000``. + // **Note:** Your password might possibly be visible to unprivileged users on your system, + // depending on your cloud's security model. Check if your cloud's IMDS server is visible + // from an unprivileged user to evaluate risk. + HashedPasswd *string `json:"hashed_passwd,omitempty"` + // Optional home dir for user. Default: ``/home/`` + Homedir *string `json:"homedir,omitempty"` + // Optional string representing the number of days until the user is disabled. + Inactive *string `json:"inactive,omitempty"` + // Default: ``true`` + LockPasswd *bool `json:"lock-passwd,omitempty"` + // Disable password login. Default: ``true`` + SchemaCloudConfigV1LockPasswd *bool `json:"lock_passwd,omitempty"` + // The user's login name. Required otherwise user creation will be skipped for this user. + Name *string `json:"name,omitempty"` + // Do not create home directory. Default: ``false`` + NoCreateHome *bool `json:"no_create_home,omitempty"` + // Do not initialize lastlog and faillog for user. Default: ``false`` + NoLogInit *bool `json:"no_log_init,omitempty"` + // Do not create group named after user. Default: ``false`` + NoUserGroup *bool `json:"no_user_group,omitempty"` + // Hash of user password applied when user does not exist. This will NOT be applied if the + // user already exists. To generate this hash, run: ``mkpasswd --method=SHA-512 + // --rounds=500000`` **Note:** Your password might possibly be visible to unprivileged users + // on your system, depending on your cloud's security model. Check if your cloud's IMDS + // server is visible from an unprivileged user to evaluate risk. + Passwd *string `json:"passwd,omitempty"` + // Clear text of user password to be applied. This will be applied even if the user is + // preexisting. **Note:** SSH keys or certificates are a safer choice for logging in to your + // system. For local escalation, supplying a hashed password is a safer choice than plain + // text. Your password might possibly be visible to unprivileged users on your system, + // depending on your cloud's security model. An exposed plain text password is an immediate + // security concern. Check if your cloud's IMDS server is visible from an unprivileged user + // to evaluate risk. + PlainTextPasswd *string `json:"plain_text_passwd,omitempty"` + // Primary group for user. Default: ```` + PrimaryGroup *string `json:"primary_group,omitempty"` + // SELinux user for user's login. Default: the default SELinux user. + SelinuxUser *string `json:"selinux_user,omitempty"` + // Path to the user's login shell. Default: the host system's default shell. + Shell *string `json:"shell,omitempty"` + // Specify an email address to create the user as a Snappy user through ``snap + // create-user``. If an Ubuntu SSO account is associated with the address, username and SSH + // keys will be requested from there. + Snapuser *string `json:"snapuser,omitempty"` + // List of SSH keys to add to user's authkeys file. Can not be combined with + // ``ssh_redirect_user`` + SSHAuthorizedKeys []string `json:"ssh_authorized_keys,omitempty"` + // List of ssh ids to import for user. Can not be combined with ``ssh_redirect_user``. See + // the man page[1] for more details. [1] + // https://manpages.ubuntu.com/manpages/noble/en/man1/ssh-import-id.1.html + SSHImportID []string `json:"ssh_import_id,omitempty"` + // Boolean set to true to disable SSH logins for this user. When specified, all cloud + // meta-data public SSH keys will be set up in a disabled state for this username. Any SSH + // login as this username will timeout and prompt with a message to login instead as the + // ``default_username`` for this instance. Default: ``false``. This key can not be combined + // with ``ssh_import_id`` or ``ssh_authorized_keys``. + SSHRedirectUser *bool `json:"ssh_redirect_user,omitempty"` + Sudo *Sudo `json:"sudo,omitempty"` + // Optional. Create user as system user with no home directory. Default: ``false``. + System *bool `json:"system,omitempty"` + // The user's ID. Default value [system default] + Uid *Uid `json:"uid,omitempty"` +} + +type FluffySchemaCloudConfigV1 struct { + // Boolean set ``false`` to disable creation of specified user ``groups``. Default: ``true``. + CreateGroups *bool `json:"create_groups,omitempty"` + // List of doas rules to add for a user. doas or opendoas must be installed for rules to + // take effect. + Doas []string `json:"doas,omitempty"` + // Optional. Date on which the user's account will be disabled. Default: ``null`` + Expiredate *string `json:"expiredate,omitempty"` + // Optional comment about the user, usually a comma-separated string of real name and + // contact information + Gecos *string `json:"gecos,omitempty"` + // Optional comma-separated string of groups to add the user to. + Groups *UserGroups `json:"groups,omitempty"` + // Hash of user password to be applied. This will be applied even if the user is + // preexisting. To generate this hash, run: ``mkpasswd --method=SHA-512 --rounds=500000``. + // **Note:** Your password might possibly be visible to unprivileged users on your system, + // depending on your cloud's security model. Check if your cloud's IMDS server is visible + // from an unprivileged user to evaluate risk. + HashedPasswd *string `json:"hashed_passwd,omitempty"` + // Optional home dir for user. Default: ``/home/`` + Homedir *string `json:"homedir,omitempty"` + // Optional string representing the number of days until the user is disabled. + Inactive *string `json:"inactive,omitempty"` + // Default: ``true`` + LockPasswd *bool `json:"lock-passwd,omitempty"` + // Disable password login. Default: ``true`` + SchemaCloudConfigV1LockPasswd *bool `json:"lock_passwd,omitempty"` + // The user's login name. Required otherwise user creation will be skipped for this user. + Name *string `json:"name,omitempty"` + // Do not create home directory. Default: ``false`` + NoCreateHome *bool `json:"no_create_home,omitempty"` + // Do not initialize lastlog and faillog for user. Default: ``false`` + NoLogInit *bool `json:"no_log_init,omitempty"` + // Do not create group named after user. Default: ``false`` + NoUserGroup *bool `json:"no_user_group,omitempty"` + // Hash of user password applied when user does not exist. This will NOT be applied if the + // user already exists. To generate this hash, run: ``mkpasswd --method=SHA-512 + // --rounds=500000`` **Note:** Your password might possibly be visible to unprivileged users + // on your system, depending on your cloud's security model. Check if your cloud's IMDS + // server is visible from an unprivileged user to evaluate risk. + Passwd *string `json:"passwd,omitempty"` + // Clear text of user password to be applied. This will be applied even if the user is + // preexisting. **Note:** SSH keys or certificates are a safer choice for logging in to your + // system. For local escalation, supplying a hashed password is a safer choice than plain + // text. Your password might possibly be visible to unprivileged users on your system, + // depending on your cloud's security model. An exposed plain text password is an immediate + // security concern. Check if your cloud's IMDS server is visible from an unprivileged user + // to evaluate risk. + PlainTextPasswd *string `json:"plain_text_passwd,omitempty"` + // Primary group for user. Default: ```` + PrimaryGroup *string `json:"primary_group,omitempty"` + // SELinux user for user's login. Default: the default SELinux user. + SelinuxUser *string `json:"selinux_user,omitempty"` + // Path to the user's login shell. Default: the host system's default shell. + Shell *string `json:"shell,omitempty"` + // Specify an email address to create the user as a Snappy user through ``snap + // create-user``. If an Ubuntu SSO account is associated with the address, username and SSH + // keys will be requested from there. + Snapuser *string `json:"snapuser,omitempty"` + // List of SSH keys to add to user's authkeys file. Can not be combined with + // ``ssh_redirect_user`` + SSHAuthorizedKeys []string `json:"ssh_authorized_keys,omitempty"` + // List of ssh ids to import for user. Can not be combined with ``ssh_redirect_user``. See + // the man page[1] for more details. [1] + // https://manpages.ubuntu.com/manpages/noble/en/man1/ssh-import-id.1.html + SSHImportID []string `json:"ssh_import_id,omitempty"` + // Boolean set to true to disable SSH logins for this user. When specified, all cloud + // meta-data public SSH keys will be set up in a disabled state for this username. Any SSH + // login as this username will timeout and prompt with a message to login instead as the + // ``default_username`` for this instance. Default: ``false``. This key can not be combined + // with ``ssh_import_id`` or ``ssh_authorized_keys``. + SSHRedirectUser *bool `json:"ssh_redirect_user,omitempty"` + Sudo *Sudo `json:"sudo,omitempty"` + // Optional. Create user as system user with no home directory. Default: ``false``. + System *bool `json:"system,omitempty"` + // The user's ID. Default value [system default] + Uid *Uid `json:"uid,omitempty"` +} + +type VendorData struct { + // Whether vendor data is enabled or not. Default: ``true`` + Enabled *GrubPCInstallDevicesEmpty `json:"enabled,omitempty"` + // The command to run before any vendor scripts. Its primary use case is for profiling a + // script, not to prevent its run + Prefix *Prefix `json:"prefix,omitempty"` +} + +type WireguardClass struct { + Interfaces []Interface `json:"interfaces"` + // List of shell commands to be executed as probes. + Readinessprobe []string `json:"readinessprobe,omitempty"` +} + +type Interface struct { + // Path to configuration file of Wireguard interface + ConfigPath *string `json:"config_path,omitempty"` + // Wireguard interface configuration. Contains key, peer, ... + Content *string `json:"content,omitempty"` + // Name of the interface. Typically wgx (example: wg0) + Name *string `json:"name,omitempty"` +} + +type WriteFile struct { + // Whether to append ``content`` to existing file if ``path`` exists. Default: ``false``. + Append *bool `json:"append,omitempty"` + // Optional content to write to the provided ``path``. When content is present and encoding + // is not 'text/plain', decode the content prior to writing. Default: ``''`` + Content *string `json:"content,omitempty"` + // Defer writing the file until 'final' stage, after users were created, and packages were + // installed. Default: ``false``. + Defer *bool `json:"defer,omitempty"` + // Optional encoding type of the content. Default: ``text/plain``. No decoding is performed + // by default. Supported encoding types are: gz, gzip, gz+base64, gzip+base64, gz+b64, + // gzip+b64, b64, base64 + Encoding *WriteFileEncoding `json:"encoding,omitempty"` + // Optional owner:group to chown on the file and new directories. Default: ``root:root`` + Owner *string `json:"owner,omitempty"` + // Path of the file to which ``content`` is decoded and written + Path string `json:"path"` + // Optional file permissions to set on ``path`` represented as an octal string '0###'. + // Default: ``0o644`` + Permissions *string `json:"permissions,omitempty"` +} + +type YumRepos struct { +} + +type Zypper struct { + // Any supported zypo.conf key is written to ``/etc/zypp/zypp.conf`` + Config map[string]interface{} `json:"config,omitempty"` + Repos []Repo `json:"repos,omitempty"` +} + +type Repo struct { + // The base repositoy URL + Baseurl string `json:"baseurl"` + // The unique id of the repo, used when writing /etc/zypp/repos.d/.repo. + ID string `json:"id"` +} + +// The type of installation for ansible. It can be one of the following values: +// +// - “distro“ +// - “pip“ +type InstallMethod string + +const ( + Distro InstallMethod = "distro" + Pip InstallMethod = "pip" +) + +// Optional command to run to create the filesystem. Can include string substitutions of the +// other “fs_setup“ config keys. This is only necessary if you need to override the +// default command. +// +// Optional options to pass to the filesystem creation command. Ignored if you using “cmd“ +// directly. +// +// Properly-signed snap assertions which will run before and snap “commands“. +// +// # The SSH public key to import +// +// A filepath operation configuration. This is a string containing a filepath and an +// optional leading operator: '>', '>>' or '|'. Operators '>' and '>>' indicate whether to +// overwrite or append to the file. The operator '|' redirects content to the command +// arguments specified. +// +// A list specifying filepath operation configuration for stdout and stderror +type AptPipeliningEnum string + +const ( + CloudconfigNone AptPipeliningEnum = "none" + OS AptPipeliningEnum = "os" + Unchanged AptPipeliningEnum = "unchanged" +) + +type ByobuByDefault string + +const ( + Disable ByobuByDefault = "disable" + DisableSystem ByobuByDefault = "disable-system" + DisableUser ByobuByDefault = "disable-user" + Enable ByobuByDefault = "enable" + EnableSystem ByobuByDefault = "enable-system" + EnableUser ByobuByDefault = "enable-user" + System ByobuByDefault = "system" + User ByobuByDefault = "user" +) + +// The type of installation for chef. It can be one of the following values: +// +// - “packages“ +// - “gems“ +// - “omnibus“ +type ChefInstallType string + +const ( + Gems ChefInstallType = "gems" + Omnibus ChefInstallType = "omnibus" + PurplePackages ChefInstallType = "packages" +) + +type Type string + +const ( + Hash Type = "hash" + Random Type = "RANDOM" + Text Type = "text" +) + +type CloudConfigModuleEnum string + +const ( + Ansible CloudConfigModuleEnum = "ansible" + ApkConfigure CloudConfigModuleEnum = "apk-configure" + AptConfigure CloudConfigModuleEnum = "apt-configure" + AptPipelining CloudConfigModuleEnum = "apt-pipelining" + Bootcmd CloudConfigModuleEnum = "bootcmd" + Byobu CloudConfigModuleEnum = "byobu" + CACerts CloudConfigModuleEnum = "ca-certs" + Chef CloudConfigModuleEnum = "chef" + DisableEc2Metadata CloudConfigModuleEnum = "disable-ec2-metadata" + DiskSetup CloudConfigModuleEnum = "disk-setup" + Fan CloudConfigModuleEnum = "fan" + FinalMessage CloudConfigModuleEnum = "final-message" + GrubDpkg CloudConfigModuleEnum = "grub-dpkg" + InstallHotplug CloudConfigModuleEnum = "install-hotplug" + Keyboard CloudConfigModuleEnum = "keyboard" + KeysToConsole CloudConfigModuleEnum = "keys-to-console" + Landscape CloudConfigModuleEnum = "landscape" + Locale CloudConfigModuleEnum = "locale" + Lxd CloudConfigModuleEnum = "lxd" + Mcollective CloudConfigModuleEnum = "mcollective" + Migrator CloudConfigModuleEnum = "migrator" + Mounts CloudConfigModuleEnum = "mounts" + NTP CloudConfigModuleEnum = "ntp" + PackageUpdateUpgradeInstall CloudConfigModuleEnum = "package-update-upgrade-install" + PhoneHome CloudConfigModuleEnum = "phone-home" + PowerStateChange CloudConfigModuleEnum = "power-state-change" + Puppet CloudConfigModuleEnum = "puppet" + ResetRmc CloudConfigModuleEnum = "reset-rmc" + Resizefs CloudConfigModuleEnum = "resizefs" + ResolvConf CloudConfigModuleEnum = "resolv-conf" + RhSubscription CloudConfigModuleEnum = "rh-subscription" + RightscaleUserdata CloudConfigModuleEnum = "rightscale-userdata" + Rsyslog CloudConfigModuleEnum = "rsyslog" + Runcmd CloudConfigModuleEnum = "runcmd" + SSH CloudConfigModuleEnum = "ssh" + SSHAuthkeyFingerprints CloudConfigModuleEnum = "ssh-authkey-fingerprints" + SSHImportID CloudConfigModuleEnum = "ssh-import-id" + SaltMinion CloudConfigModuleEnum = "salt-minion" + SchemaCloudConfigV1ApkConfigure CloudConfigModuleEnum = "apk_configure" + SchemaCloudConfigV1AptConfigure CloudConfigModuleEnum = "apt_configure" + SchemaCloudConfigV1AptPipelining CloudConfigModuleEnum = "apt_pipelining" + SchemaCloudConfigV1CACerts CloudConfigModuleEnum = "ca_certs" + SchemaCloudConfigV1DisableEc2Metadata CloudConfigModuleEnum = "disable_ec2_metadata" + SchemaCloudConfigV1DiskSetup CloudConfigModuleEnum = "disk_setup" + SchemaCloudConfigV1FinalMessage CloudConfigModuleEnum = "final_message" + SchemaCloudConfigV1Growpart CloudConfigModuleEnum = "growpart" + SchemaCloudConfigV1GrubDpkg CloudConfigModuleEnum = "grub_dpkg" + SchemaCloudConfigV1InstallHotplug CloudConfigModuleEnum = "install_hotplug" + SchemaCloudConfigV1KeysToConsole CloudConfigModuleEnum = "keys_to_console" + SchemaCloudConfigV1PackageUpdateUpgradeInstall CloudConfigModuleEnum = "package_update_upgrade_install" + SchemaCloudConfigV1PhoneHome CloudConfigModuleEnum = "phone_home" + SchemaCloudConfigV1PowerStateChange CloudConfigModuleEnum = "power_state_change" + SchemaCloudConfigV1ResetRmc CloudConfigModuleEnum = "reset_rmc" + SchemaCloudConfigV1ResolvConf CloudConfigModuleEnum = "resolv_conf" + SchemaCloudConfigV1RhSubscription CloudConfigModuleEnum = "rh_subscription" + SchemaCloudConfigV1RightscaleUserdata CloudConfigModuleEnum = "rightscale_userdata" + SchemaCloudConfigV1SSHAuthkeyFingerprints CloudConfigModuleEnum = "ssh_authkey_fingerprints" + SchemaCloudConfigV1SSHImportID CloudConfigModuleEnum = "ssh_import_id" + SchemaCloudConfigV1SaltMinion CloudConfigModuleEnum = "salt_minion" + SchemaCloudConfigV1ScriptsPerBoot CloudConfigModuleEnum = "scripts_per_boot" + SchemaCloudConfigV1ScriptsPerInstance CloudConfigModuleEnum = "scripts_per_instance" + SchemaCloudConfigV1ScriptsPerOnce CloudConfigModuleEnum = "scripts_per_once" + SchemaCloudConfigV1ScriptsUser CloudConfigModuleEnum = "scripts_user" + SchemaCloudConfigV1ScriptsVendor CloudConfigModuleEnum = "scripts_vendor" + SchemaCloudConfigV1SeedRandom CloudConfigModuleEnum = "seed_random" + SchemaCloudConfigV1SetHostname CloudConfigModuleEnum = "set_hostname" + SchemaCloudConfigV1SetPasswords CloudConfigModuleEnum = "set_passwords" + SchemaCloudConfigV1UbuntuAdvantage CloudConfigModuleEnum = "ubuntu_advantage" + SchemaCloudConfigV1UbuntuAutoinstall CloudConfigModuleEnum = "ubuntu_autoinstall" + SchemaCloudConfigV1UbuntuDrivers CloudConfigModuleEnum = "ubuntu_drivers" + SchemaCloudConfigV1UpdateEtcHosts CloudConfigModuleEnum = "update_etc_hosts" + SchemaCloudConfigV1UpdateHostname CloudConfigModuleEnum = "update_hostname" + SchemaCloudConfigV1UsersGroups CloudConfigModuleEnum = "users_groups" + SchemaCloudConfigV1WriteFiles CloudConfigModuleEnum = "write_files" + SchemaCloudConfigV1WriteFilesDeferred CloudConfigModuleEnum = "write_files_deferred" + SchemaCloudConfigV1YumAddRepo CloudConfigModuleEnum = "yum_add_repo" + SchemaCloudConfigV1ZypperAddRepo CloudConfigModuleEnum = "zypper_add_repo" + ScriptsPerBoot CloudConfigModuleEnum = "scripts-per-boot" + ScriptsPerInstance CloudConfigModuleEnum = "scripts-per-instance" + ScriptsPerOnce CloudConfigModuleEnum = "scripts-per-once" + ScriptsUser CloudConfigModuleEnum = "scripts-user" + ScriptsVendor CloudConfigModuleEnum = "scripts-vendor" + SeedRandom CloudConfigModuleEnum = "seed-random" + SetHostname CloudConfigModuleEnum = "set-hostname" + SetPasswords CloudConfigModuleEnum = "set-passwords" + Snap CloudConfigModuleEnum = "snap" + Spacewalk CloudConfigModuleEnum = "spacewalk" + Timezone CloudConfigModuleEnum = "timezone" + UbuntuAdvantage CloudConfigModuleEnum = "ubuntu-advantage" + UbuntuAutoinstall CloudConfigModuleEnum = "ubuntu-autoinstall" + UbuntuDrivers CloudConfigModuleEnum = "ubuntu-drivers" + UpdateEtcHosts CloudConfigModuleEnum = "update-etc-hosts" + UpdateHostname CloudConfigModuleEnum = "update-hostname" + UsersGroups CloudConfigModuleEnum = "users-groups" + Wireguard CloudConfigModuleEnum = "wireguard" + WriteFiles CloudConfigModuleEnum = "write-files" + WriteFilesDeferred CloudConfigModuleEnum = "write-files-deferred" + YumAddRepo CloudConfigModuleEnum = "yum-add-repo" + ZypperAddRepo CloudConfigModuleEnum = "zypper-add-repo" +) + +// The partition can be specified by setting “partition“ to the desired partition number. +// The “partition“ option may also be set to “auto“, in which this module will search +// for the existence of a filesystem matching the “label“, “filesystem“ and “device“ +// of the “fs_setup“ entry and will skip creating the filesystem if one is found. The +// “partition“ option may also be set to “any“, in which case any filesystem that +// matches “filesystem“ and “device“ will cause this module to skip filesystem creation +// for the “fs_setup“ entry, regardless of “label“ matching or not. To write a +// filesystem directly to a device, use “partition: none“. “partition: none“ will +// **always** write the filesystem, even when the “label“ and “filesystem“ are matched, +// and “overwrite“ is “false“. +// +// Optional command to run to create the filesystem. Can include string substitutions of the +// other “fs_setup“ config keys. This is only necessary if you need to override the +// default command. +// +// Optional options to pass to the filesystem creation command. Ignored if you using “cmd“ +// directly. +// +// Properly-signed snap assertions which will run before and snap “commands“. +// +// # The SSH public key to import +// +// A filepath operation configuration. This is a string containing a filepath and an +// optional leading operator: '>', '>>' or '|'. Operators '>' and '>>' indicate whether to +// overwrite or append to the file. The operator '|' redirects content to the command +// arguments specified. +// +// A list specifying filepath operation configuration for stdout and stderror +type Partition string + +const ( + Any Partition = "any" + PartitionAuto Partition = "auto" + PartitionNone Partition = "none" +) + +type ModeMode string + +const ( + Gpart ModeMode = "gpart" + ModeAuto ModeMode = "auto" + ModeGrowpart ModeMode = "growpart" + Off ModeMode = "off" +) + +// The log level for the client. Default: “info“. +type LogLevel string + +const ( + Critical LogLevel = "critical" + Debug LogLevel = "debug" + Error LogLevel = "error" + Info LogLevel = "info" + Warning LogLevel = "warning" +) + +// Whether to setup LXD bridge, use an existing bridge by “name“ or create a new bridge. +// `none` will avoid bridge setup, `existing` will configure lxd to use the bring matching +// “name“ and `new` will create a new bridge. +type BridgeMode string + +const ( + Existing BridgeMode = "existing" + ModeNone BridgeMode = "none" + New BridgeMode = "new" +) + +// Storage backend to use. Default: “dir“. +type StorageBackend string + +const ( + Btrfs StorageBackend = "btrfs" + Dir StorageBackend = "dir" + LVM StorageBackend = "lvm" + Zfs StorageBackend = "zfs" +) + +type ManageEtcHostsEnum string + +const ( + Localhost ManageEtcHostsEnum = "localhost" + Template ManageEtcHostsEnum = "template" +) + +type Name string + +const ( + Dict Name = "dict" + List Name = "list" + Str Name = "str" +) + +type Setting string + +const ( + AllowDelete Setting = "allow_delete" + Append Setting = "append" + NoReplace Setting = "no_replace" + Prepend Setting = "prepend" + RecurseArray Setting = "recurse_array" + RecurseDict Setting = "recurse_dict" + RecurseList Setting = "recurse_list" + RecurseStr Setting = "recurse_str" + Replace Setting = "replace" +) + +type PostElement string + +const ( + FQDN PostElement = "fqdn" + Hostname PostElement = "hostname" + InstanceID PostElement = "instance_id" + PubKeyEcdsa PostElement = "pub_key_ecdsa" + PubKeyEd25519 PostElement = "pub_key_ed25519" + PubKeyRSA PostElement = "pub_key_rsa" +) + +type PurplePost string + +const ( + All PurplePost = "all" +) + +// Must be one of “poweroff“, “halt“, or “reboot“. +type PowerStateMode string + +const ( + Halt PowerStateMode = "halt" + Poweroff PowerStateMode = "poweroff" + Reboot PowerStateMode = "reboot" +) + +// Valid values are “packages“ and “aio“. Agent packages from the puppetlabs +// repositories can be installed by setting “aio“. Based on this setting, the default +// config/SSL/CSR paths will be adjusted accordingly. Default: “packages“ +type PuppetInstallType string + +const ( + Aio PuppetInstallType = "aio" + FluffyPackages PuppetInstallType = "packages" +) + +// Used to decode “data“ provided. Allowed values are “raw“, “base64“, “b64“, +// “gzip“, or “gz“. Default: “raw“ +type RandomSeedEncoding string + +const ( + PurpleB64 RandomSeedEncoding = "b64" + PurpleBase64 RandomSeedEncoding = "base64" + PurpleGz RandomSeedEncoding = "gz" + PurpleGzip RandomSeedEncoding = "gzip" + Raw RandomSeedEncoding = "raw" +) + +type ResizeRootfsEnum string + +const ( + Noblock ResizeRootfsEnum = "noblock" +) + +type ServiceReloadCommandEnum string + +const ( + ServiceReloadCommandAuto ServiceReloadCommandEnum = "auto" +) + +type SSHGenkeytype string + +const ( + Ecdsa SSHGenkeytype = "ecdsa" + Ed25519 SSHGenkeytype = "ed25519" + RSA SSHGenkeytype = "rsa" +) + +type When string + +const ( + Boot When = "boot" + BootLegacy When = "boot-legacy" + BootNewInstance When = "boot-new-instance" + Hotplug When = "hotplug" +) + +// Optional encoding type of the content. Default: “text/plain“. No decoding is performed +// by default. Supported encoding types are: gz, gzip, gz+base64, gzip+base64, gz+b64, +// gzip+b64, b64, base64 +type WriteFileEncoding string + +const ( + FluffyB64 WriteFileEncoding = "b64" + FluffyBase64 WriteFileEncoding = "base64" + FluffyGz WriteFileEncoding = "gz" + FluffyGzip WriteFileEncoding = "gzip" + GzB64 WriteFileEncoding = "gz+b64" + GzBase64 WriteFileEncoding = "gz+base64" + GzipB64 WriteFileEncoding = "gzip+b64" + GzipBase64 WriteFileEncoding = "gzip+base64" + TextPlain WriteFileEncoding = "text/plain" +) + +type RunAnsibleElement struct { + AnythingArray []interface{} + Bool *bool + Double *float64 + Integer *int64 + RunAnsibleClass *RunAnsibleClass + String *string +} + +func (x *RunAnsibleElement) UnmarshalJSON(data []byte) error { + x.AnythingArray = nil + x.RunAnsibleClass = nil + var c RunAnsibleClass + object, err := unmarshalUnion(data, &x.Integer, &x.Double, &x.Bool, &x.String, true, &x.AnythingArray, true, &c, false, nil, false, nil, true) + if err != nil { + return err + } + if object { + x.RunAnsibleClass = &c + } + return nil +} + +func (x *RunAnsibleElement) MarshalJSON() ([]byte, error) { + return marshalUnion(x.Integer, x.Double, x.Bool, x.String, x.AnythingArray != nil, x.AnythingArray, x.RunAnsibleClass != nil, x.RunAnsibleClass, false, nil, false, nil, true) +} + +type AptPipeliningUnion struct { + Bool *bool + Enum *AptPipeliningEnum + Integer *int64 +} + +func (x *AptPipeliningUnion) UnmarshalJSON(data []byte) error { + x.Enum = nil + object, err := unmarshalUnion(data, &x.Integer, nil, &x.Bool, nil, false, nil, false, nil, false, nil, true, &x.Enum, false) + if err != nil { + return err + } + if object { + } + return nil +} + +func (x *AptPipeliningUnion) MarshalJSON() ([]byte, error) { + return marshalUnion(x.Integer, nil, x.Bool, nil, false, nil, false, nil, false, nil, x.Enum != nil, x.Enum, false) +} + +// Optional command to run to create the filesystem. Can include string substitutions of the +// other “fs_setup“ config keys. This is only necessary if you need to override the +// default command. +// +// Optional options to pass to the filesystem creation command. Ignored if you using “cmd“ +// directly. +// +// Snap commands to run on the target system +type Cmd struct { + String *string + StringArray []string +} + +func (x *Cmd) UnmarshalJSON(data []byte) error { + x.StringArray = nil + object, err := unmarshalUnion(data, nil, nil, nil, &x.String, true, &x.StringArray, false, nil, false, nil, false, nil, false) + if err != nil { + return err + } + if object { + } + return nil +} + +func (x *Cmd) MarshalJSON() ([]byte, error) { + return marshalUnion(nil, nil, nil, x.String, x.StringArray != nil, x.StringArray, false, nil, false, nil, false, nil, false) +} + +// List of “username:password“ pairs. Each user will have the corresponding password set. +// A password can be randomly generated by specifying “RANDOM“ or “R“ as a user's +// password. A hashed password, created by a tool like “mkpasswd“, can be specified. A +// regex (“r'\$(1|2a|2y|5|6)(\$.+){2}'“) is used to determine if a password value should +// be treated as a hash. +type ListUnion struct { + String *string + StringArray []string +} + +func (x *ListUnion) UnmarshalJSON(data []byte) error { + x.StringArray = nil + object, err := unmarshalUnion(data, nil, nil, nil, &x.String, true, &x.StringArray, false, nil, false, nil, false, nil, false) + if err != nil { + return err + } + if object { + } + return nil +} + +func (x *ListUnion) MarshalJSON() ([]byte, error) { + return marshalUnion(nil, nil, nil, x.String, x.StringArray != nil, x.StringArray, false, nil, false, nil, false, nil, false) +} + +type CloudConfigModuleElement struct { + AnythingArray []interface{} + Enum *CloudConfigModuleEnum +} + +func (x *CloudConfigModuleElement) UnmarshalJSON(data []byte) error { + x.AnythingArray = nil + x.Enum = nil + object, err := unmarshalUnion(data, nil, nil, nil, nil, true, &x.AnythingArray, false, nil, false, nil, true, &x.Enum, false) + if err != nil { + return err + } + if object { + } + return nil +} + +func (x *CloudConfigModuleElement) MarshalJSON() ([]byte, error) { + return marshalUnion(nil, nil, nil, nil, x.AnythingArray != nil, x.AnythingArray, false, nil, false, nil, x.Enum != nil, x.Enum, false) +} + +type GrubPCInstallDevicesEmpty struct { + Bool *bool + String *string +} + +func (x *GrubPCInstallDevicesEmpty) UnmarshalJSON(data []byte) error { + object, err := unmarshalUnion(data, nil, nil, &x.Bool, &x.String, false, nil, false, nil, false, nil, false, nil, false) + if err != nil { + return err + } + if object { + } + return nil +} + +func (x *GrubPCInstallDevicesEmpty) MarshalJSON() ([]byte, error) { + return marshalUnion(nil, nil, x.Bool, x.String, false, nil, false, nil, false, nil, false, nil, false) +} + +type CloudconfigGroups struct { + AnythingArray []interface{} + GroupsClass *GroupsClass + String *string +} + +func (x *CloudconfigGroups) UnmarshalJSON(data []byte) error { + x.AnythingArray = nil + x.GroupsClass = nil + var c GroupsClass + object, err := unmarshalUnion(data, nil, nil, nil, &x.String, true, &x.AnythingArray, true, &c, false, nil, false, nil, false) + if err != nil { + return err + } + if object { + x.GroupsClass = &c + } + return nil +} + +func (x *CloudconfigGroups) MarshalJSON() ([]byte, error) { + return marshalUnion(nil, nil, nil, x.String, x.AnythingArray != nil, x.AnythingArray, x.GroupsClass != nil, x.GroupsClass, false, nil, false, nil, false) +} + +// The utility to use for resizing. Default: “auto“ +// +// Possible options: +// +// * “auto“ - Use any available utility +// +// * “growpart“ - Use growpart utility +// +// * “gpart“ - Use BSD gpart utility +// +// * “off“ - Take no action +type ModeUnion struct { + Bool *bool + Enum *ModeMode +} + +func (x *ModeUnion) UnmarshalJSON(data []byte) error { + x.Enum = nil + object, err := unmarshalUnion(data, nil, nil, &x.Bool, nil, false, nil, false, nil, false, nil, true, &x.Enum, false) + if err != nil { + return err + } + if object { + } + return nil +} + +func (x *ModeUnion) MarshalJSON() ([]byte, error) { + return marshalUnion(nil, nil, x.Bool, nil, false, nil, false, nil, false, nil, x.Enum != nil, x.Enum, false) +} + +// Whether to manage “/etc/hosts“ on the system. If “true“, render the hosts file using +// “/etc/cloud/templates/hosts.tmpl“ replacing “$hostname“ and “$fdqn“. If +// “localhost“, append a “127.0.1.1“ entry that resolves from FQDN and hostname every +// boot. Default: “false“ +type ManageEtcHostsUnion struct { + Bool *bool + Enum *ManageEtcHostsEnum +} + +func (x *ManageEtcHostsUnion) UnmarshalJSON(data []byte) error { + x.Enum = nil + object, err := unmarshalUnion(data, nil, nil, &x.Bool, nil, false, nil, false, nil, false, nil, true, &x.Enum, false) + if err != nil { + return err + } + if object { + } + return nil +} + +func (x *ManageEtcHostsUnion) MarshalJSON() ([]byte, error) { + return marshalUnion(nil, nil, x.Bool, nil, false, nil, false, nil, false, nil, x.Enum != nil, x.Enum, false) +} + +type MergeHow struct { + MergeHowElementArray []MergeHowElement + String *string +} + +func (x *MergeHow) UnmarshalJSON(data []byte) error { + x.MergeHowElementArray = nil + object, err := unmarshalUnion(data, nil, nil, nil, &x.String, true, &x.MergeHowElementArray, false, nil, false, nil, false, nil, false) + if err != nil { + return err + } + if object { + } + return nil +} + +func (x *MergeHow) MarshalJSON() ([]byte, error) { + return marshalUnion(nil, nil, nil, x.String, x.MergeHowElementArray != nil, x.MergeHowElementArray, false, nil, false, nil, false, nil, false) +} + +type AllUnion struct { + AllClass *AllClass + String *string + StringArray []string +} + +func (x *AllUnion) UnmarshalJSON(data []byte) error { + x.StringArray = nil + x.AllClass = nil + var c AllClass + object, err := unmarshalUnion(data, nil, nil, nil, &x.String, true, &x.StringArray, true, &c, false, nil, false, nil, false) + if err != nil { + return err + } + if object { + x.AllClass = &c + } + return nil +} + +func (x *AllUnion) MarshalJSON() ([]byte, error) { + return marshalUnion(nil, nil, nil, x.String, x.StringArray != nil, x.StringArray, x.AllClass != nil, x.AllClass, false, nil, false, nil, false) +} + +type PackageElement struct { + PackageClass *PackageClass + String *string + StringArray []string +} + +func (x *PackageElement) UnmarshalJSON(data []byte) error { + x.StringArray = nil + x.PackageClass = nil + var c PackageClass + object, err := unmarshalUnion(data, nil, nil, nil, &x.String, true, &x.StringArray, true, &c, false, nil, false, nil, false) + if err != nil { + return err + } + if object { + x.PackageClass = &c + } + return nil +} + +func (x *PackageElement) MarshalJSON() ([]byte, error) { + return marshalUnion(nil, nil, nil, x.String, x.StringArray != nil, x.StringArray, x.PackageClass != nil, x.PackageClass, false, nil, false, nil, false) +} + +// A list of keys to post or “all“. Default: “all“ +type PostUnion struct { + Enum *PurplePost + EnumArray []PostElement +} + +func (x *PostUnion) UnmarshalJSON(data []byte) error { + x.EnumArray = nil + x.Enum = nil + object, err := unmarshalUnion(data, nil, nil, nil, nil, true, &x.EnumArray, false, nil, false, nil, true, &x.Enum, false) + if err != nil { + return err + } + if object { + } + return nil +} + +func (x *PostUnion) MarshalJSON() ([]byte, error) { + return marshalUnion(nil, nil, nil, nil, x.EnumArray != nil, x.EnumArray, false, nil, false, nil, x.Enum != nil, x.Enum, false) +} + +// Apply state change only if condition is met. May be boolean true (always met), false +// (never met), or a command string or list to be executed. For command formatting, see the +// documentation for “cc_runcmd“. If exit code is 0, condition is met, otherwise not. +// Default: “true“ +type Condition struct { + AnythingArray []interface{} + Bool *bool + String *string +} + +func (x *Condition) UnmarshalJSON(data []byte) error { + x.AnythingArray = nil + object, err := unmarshalUnion(data, nil, nil, &x.Bool, &x.String, true, &x.AnythingArray, false, nil, false, nil, false, nil, false) + if err != nil { + return err + } + if object { + } + return nil +} + +func (x *Condition) MarshalJSON() ([]byte, error) { + return marshalUnion(nil, nil, x.Bool, x.String, x.AnythingArray != nil, x.AnythingArray, false, nil, false, nil, false, nil, false) +} + +// Time in minutes to delay after cloud-init has finished. Can be “now“ or an integer +// specifying the number of minutes to delay. Default: “now“ +type Delay struct { + Integer *int64 + String *string +} + +func (x *Delay) UnmarshalJSON(data []byte) error { + object, err := unmarshalUnion(data, &x.Integer, nil, nil, &x.String, false, nil, false, nil, false, nil, false, nil, false) + if err != nil { + return err + } + if object { + } + return nil +} + +func (x *Delay) MarshalJSON() ([]byte, error) { + return marshalUnion(x.Integer, nil, nil, x.String, false, nil, false, nil, false, nil, false, nil, false) +} + +// Whether to resize the root partition. “noblock“ will resize in the background. Default: +// “true“ +type ResizeRootfsUnion struct { + Bool *bool + Enum *ResizeRootfsEnum +} + +func (x *ResizeRootfsUnion) UnmarshalJSON(data []byte) error { + x.Enum = nil + object, err := unmarshalUnion(data, nil, nil, &x.Bool, nil, false, nil, false, nil, false, nil, true, &x.Enum, false) + if err != nil { + return err + } + if object { + } + return nil +} + +func (x *ResizeRootfsUnion) MarshalJSON() ([]byte, error) { + return marshalUnion(nil, nil, x.Bool, nil, false, nil, false, nil, false, nil, x.Enum != nil, x.Enum, false) +} + +type ConfigElement struct { + ConfigConfig *ConfigConfig + String *string +} + +func (x *ConfigElement) UnmarshalJSON(data []byte) error { + x.ConfigConfig = nil + var c ConfigConfig + object, err := unmarshalUnion(data, nil, nil, nil, &x.String, false, nil, true, &c, false, nil, false, nil, false) + if err != nil { + return err + } + if object { + x.ConfigConfig = &c + } + return nil +} + +func (x *ConfigElement) MarshalJSON() ([]byte, error) { + return marshalUnion(nil, nil, nil, x.String, false, nil, x.ConfigConfig != nil, x.ConfigConfig, false, nil, false, nil, false) +} + +// The command to use to reload the rsyslog service after the config has been updated. If +// this is set to “auto“, then an appropriate command for the distro will be used. This is +// the default behavior. To manually set the command, use a list of command args (e.g. +// “[systemctl, restart, rsyslog]“). +type ServiceReloadCommandUnion struct { + Enum *ServiceReloadCommandEnum + StringArray []string +} + +func (x *ServiceReloadCommandUnion) UnmarshalJSON(data []byte) error { + x.StringArray = nil + x.Enum = nil + object, err := unmarshalUnion(data, nil, nil, nil, nil, true, &x.StringArray, false, nil, false, nil, true, &x.Enum, false) + if err != nil { + return err + } + if object { + } + return nil +} + +func (x *ServiceReloadCommandUnion) MarshalJSON() ([]byte, error) { + return marshalUnion(nil, nil, nil, nil, x.StringArray != nil, x.StringArray, false, nil, false, nil, x.Enum != nil, x.Enum, false) +} + +type RuncmdElement struct { + String *string + StringArray []string +} + +func (x *RuncmdElement) UnmarshalJSON(data []byte) error { + x.StringArray = nil + object, err := unmarshalUnion(data, nil, nil, nil, &x.String, true, &x.StringArray, false, nil, false, nil, false, nil, true) + if err != nil { + return err + } + if object { + } + return nil +} + +func (x *RuncmdElement) MarshalJSON() ([]byte, error) { + return marshalUnion(nil, nil, nil, x.String, x.StringArray != nil, x.StringArray, false, nil, false, nil, false, nil, true) +} + +// Properly-signed snap assertions which will run before and snap “commands“. +type Assertions struct { + StringArray []string + StringMap map[string]string +} + +func (x *Assertions) UnmarshalJSON(data []byte) error { + x.StringArray = nil + x.StringMap = nil + object, err := unmarshalUnion(data, nil, nil, nil, nil, true, &x.StringArray, false, nil, true, &x.StringMap, false, nil, false) + if err != nil { + return err + } + if object { + } + return nil +} + +func (x *Assertions) MarshalJSON() ([]byte, error) { + return marshalUnion(nil, nil, nil, nil, x.StringArray != nil, x.StringArray, false, nil, x.StringMap != nil, x.StringMap, false, nil, false) +} + +// Snap commands to run on the target system +type Commands struct { + UnionArray []Cmd + UnionMap map[string]*Cmd +} + +func (x *Commands) UnmarshalJSON(data []byte) error { + x.UnionArray = nil + x.UnionMap = nil + object, err := unmarshalUnion(data, nil, nil, nil, nil, true, &x.UnionArray, false, nil, true, &x.UnionMap, false, nil, false) + if err != nil { + return err + } + if object { + } + return nil +} + +func (x *Commands) MarshalJSON() ([]byte, error) { + return marshalUnion(nil, nil, nil, nil, x.UnionArray != nil, x.UnionArray, false, nil, x.UnionMap != nil, x.UnionMap, false, nil, false) +} + +// The maxsize in bytes of the swap file +// +// The size in bytes of the swap file, 'auto' or a human-readable size abbreviation of the +// format where units are one of B, K, M, G or T. **WARNING: Attempts to +// use IEC prefixes in your configuration prior to cloud-init version 23.1 will result in +// unexpected behavior. SI prefixes names (KB, MB) are required on pre-23.1 cloud-init, +// however IEC values are used. In summary, assume 1KB == 1024B, not 1000B** +type Size struct { + Integer *int64 + String *string +} + +func (x *Size) UnmarshalJSON(data []byte) error { + object, err := unmarshalUnion(data, &x.Integer, nil, nil, &x.String, false, nil, false, nil, false, nil, false, nil, false) + if err != nil { + return err + } + if object { + } + return nil +} + +func (x *Size) MarshalJSON() ([]byte, error) { + return marshalUnion(x.Integer, nil, nil, x.String, false, nil, false, nil, false, nil, false, nil, false) +} + +// The “user“ dictionary values override the “default_user“ configuration from +// “/etc/cloud/cloud.cfg“. The `user` dictionary keys supported for the default_user are +// the same as the “users“ schema. +type CloudconfigUser struct { + PurpleSchemaCloudConfigV1 *PurpleSchemaCloudConfigV1 + String *string +} + +func (x *CloudconfigUser) UnmarshalJSON(data []byte) error { + x.PurpleSchemaCloudConfigV1 = nil + var c PurpleSchemaCloudConfigV1 + object, err := unmarshalUnion(data, nil, nil, nil, &x.String, false, nil, true, &c, false, nil, false, nil, false) + if err != nil { + return err + } + if object { + x.PurpleSchemaCloudConfigV1 = &c + } + return nil +} + +func (x *CloudconfigUser) MarshalJSON() ([]byte, error) { + return marshalUnion(nil, nil, nil, x.String, false, nil, x.PurpleSchemaCloudConfigV1 != nil, x.PurpleSchemaCloudConfigV1, false, nil, false, nil, false) +} + +// Optional comma-separated string of groups to add the user to. +type UserGroups struct { + AnythingMap map[string]interface{} + String *string + StringArray []string +} + +func (x *UserGroups) UnmarshalJSON(data []byte) error { + x.StringArray = nil + x.AnythingMap = nil + object, err := unmarshalUnion(data, nil, nil, nil, &x.String, true, &x.StringArray, false, nil, true, &x.AnythingMap, false, nil, false) + if err != nil { + return err + } + if object { + } + return nil +} + +func (x *UserGroups) MarshalJSON() ([]byte, error) { + return marshalUnion(nil, nil, nil, x.String, x.StringArray != nil, x.StringArray, false, nil, x.AnythingMap != nil, x.AnythingMap, false, nil, false) +} + +type Sudo struct { + Bool *bool + String *string +} + +func (x *Sudo) UnmarshalJSON(data []byte) error { + object, err := unmarshalUnion(data, nil, nil, &x.Bool, &x.String, false, nil, false, nil, false, nil, false, nil, true) + if err != nil { + return err + } + if object { + } + return nil +} + +func (x *Sudo) MarshalJSON() ([]byte, error) { + return marshalUnion(nil, nil, x.Bool, x.String, false, nil, false, nil, false, nil, false, nil, true) +} + +// The command to run before any vendor scripts. Its primary use case is for profiling a +// script, not to prevent its run +// +// The user's ID. Default value [system default] +type Uid struct { + Integer *int64 + String *string +} + +func (x *Uid) UnmarshalJSON(data []byte) error { + object, err := unmarshalUnion(data, &x.Integer, nil, nil, &x.String, false, nil, false, nil, false, nil, false, nil, false) + if err != nil { + return err + } + if object { + } + return nil +} + +func (x *Uid) MarshalJSON() ([]byte, error) { + return marshalUnion(x.Integer, nil, nil, x.String, false, nil, false, nil, false, nil, false, nil, false) +} + +type Users struct { + AnythingMap map[string]interface{} + String *string + UnionArray []UsersUser +} + +func (x *Users) UnmarshalJSON(data []byte) error { + x.UnionArray = nil + x.AnythingMap = nil + object, err := unmarshalUnion(data, nil, nil, nil, &x.String, true, &x.UnionArray, false, nil, true, &x.AnythingMap, false, nil, false) + if err != nil { + return err + } + if object { + } + return nil +} + +func (x *Users) MarshalJSON() ([]byte, error) { + return marshalUnion(nil, nil, nil, x.String, x.UnionArray != nil, x.UnionArray, false, nil, x.AnythingMap != nil, x.AnythingMap, false, nil, false) +} + +type UsersUser struct { + FluffySchemaCloudConfigV1 *FluffySchemaCloudConfigV1 + String *string + StringArray []string +} + +func (x *UsersUser) UnmarshalJSON(data []byte) error { + x.StringArray = nil + x.FluffySchemaCloudConfigV1 = nil + var c FluffySchemaCloudConfigV1 + object, err := unmarshalUnion(data, nil, nil, nil, &x.String, true, &x.StringArray, true, &c, false, nil, false, nil, false) + if err != nil { + return err + } + if object { + x.FluffySchemaCloudConfigV1 = &c + } + return nil +} + +func (x *UsersUser) MarshalJSON() ([]byte, error) { + return marshalUnion(nil, nil, nil, x.String, x.StringArray != nil, x.StringArray, x.FluffySchemaCloudConfigV1 != nil, x.FluffySchemaCloudConfigV1, false, nil, false, nil, false) +} + +// The command to run before any vendor scripts. Its primary use case is for profiling a +// script, not to prevent its run +type Prefix struct { + String *string + UnionArray []Uid +} + +func (x *Prefix) UnmarshalJSON(data []byte) error { + x.UnionArray = nil + object, err := unmarshalUnion(data, nil, nil, nil, &x.String, true, &x.UnionArray, false, nil, false, nil, false, nil, false) + if err != nil { + return err + } + if object { + } + return nil +} + +func (x *Prefix) MarshalJSON() ([]byte, error) { + return marshalUnion(nil, nil, nil, x.String, x.UnionArray != nil, x.UnionArray, false, nil, false, nil, false, nil, false) +} + +func unmarshalUnion(data []byte, pi **int64, pf **float64, pb **bool, ps **string, haveArray bool, pa interface{}, haveObject bool, pc interface{}, haveMap bool, pm interface{}, haveEnum bool, pe interface{}, nullable bool) (bool, error) { + if pi != nil { + *pi = nil + } + if pf != nil { + *pf = nil + } + if pb != nil { + *pb = nil + } + if ps != nil { + *ps = nil + } + + dec := json.NewDecoder(bytes.NewReader(data)) + dec.UseNumber() + tok, err := dec.Token() + if err != nil { + return false, err + } + + switch v := tok.(type) { + case json.Number: + if pi != nil { + i, err := v.Int64() + if err == nil { + *pi = &i + return false, nil + } + } + if pf != nil { + f, err := v.Float64() + if err == nil { + *pf = &f + return false, nil + } + return false, errors.New("Unparsable number") + } + return false, errors.New("Union does not contain number") + case float64: + return false, errors.New("Decoder should not return float64") + case bool: + if pb != nil { + *pb = &v + return false, nil + } + return false, errors.New("Union does not contain bool") + case string: + if haveEnum { + return false, json.Unmarshal(data, pe) + } + if ps != nil { + *ps = &v + return false, nil + } + return false, errors.New("Union does not contain string") + case nil: + if nullable { + return false, nil + } + return false, errors.New("Union does not contain null") + case json.Delim: + if v == '{' { + if haveObject { + return true, json.Unmarshal(data, pc) + } + if haveMap { + return false, json.Unmarshal(data, pm) + } + return false, errors.New("Union does not contain object") + } + if v == '[' { + if haveArray { + return false, json.Unmarshal(data, pa) + } + return false, errors.New("Union does not contain array") + } + return false, errors.New("Cannot handle delimiter") + } + return false, errors.New("Cannot unmarshal union") + +} + +func marshalUnion(pi *int64, pf *float64, pb *bool, ps *string, haveArray bool, pa interface{}, haveObject bool, pc interface{}, haveMap bool, pm interface{}, haveEnum bool, pe interface{}, nullable bool) ([]byte, error) { + if pi != nil { + return json.Marshal(*pi) + } + if pf != nil { + return json.Marshal(*pf) + } + if pb != nil { + return json.Marshal(*pb) + } + if ps != nil { + return json.Marshal(*ps) + } + if haveArray { + return json.Marshal(pa) + } + if haveObject { + return json.Marshal(pc) + } + if haveMap { + return json.Marshal(pm) + } + if haveEnum { + return json.Marshal(pe) + } + if nullable { + return json.Marshal(nil) + } + return nil, errors.New("Union must not be null") +} diff --git a/pkg/util/cloudinit/schema/schema-cloud-config-v1.json b/pkg/util/cloudinit/schema/schema-cloud-config-v1.json new file mode 100644 index 000000000..77ed7a3d8 --- /dev/null +++ b/pkg/util/cloudinit/schema/schema-cloud-config-v1.json @@ -0,0 +1,3892 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "$defs": { + "all_modules": { + "enum": [ + "ansible", + "apk-configure", + "apk_configure", + "apt-configure", + "apt_configure", + "apt-pipelining", + "apt_pipelining", + "bootcmd", + "byobu", + "ca-certs", + "ca_certs", + "chef", + "disable-ec2-metadata", + "disable_ec2_metadata", + "disk-setup", + "disk_setup", + "fan", + "final-message", + "final_message", + "growpart", + "grub-dpkg", + "grub_dpkg", + "install-hotplug", + "install_hotplug", + "keyboard", + "keys-to-console", + "keys_to_console", + "landscape", + "locale", + "lxd", + "mcollective", + "migrator", + "mounts", + "ntp", + "package-update-upgrade-install", + "package_update_upgrade_install", + "phone-home", + "phone_home", + "power-state-change", + "power_state_change", + "puppet", + "reset-rmc", + "reset_rmc", + "resizefs", + "resolv-conf", + "resolv_conf", + "rh-subscription", + "rh_subscription", + "rightscale-userdata", + "rightscale_userdata", + "rsyslog", + "runcmd", + "salt-minion", + "salt_minion", + "scripts-per-boot", + "scripts_per_boot", + "scripts-per-instance", + "scripts_per_instance", + "scripts-per-once", + "scripts_per_once", + "scripts-user", + "scripts_user", + "scripts-vendor", + "scripts_vendor", + "seed-random", + "seed_random", + "set-hostname", + "set_hostname", + "set-passwords", + "set_passwords", + "snap", + "spacewalk", + "ssh", + "ssh-authkey-fingerprints", + "ssh_authkey_fingerprints", + "ssh-import-id", + "ssh_import_id", + "timezone", + "ubuntu-advantage", + "ubuntu_advantage", + "ubuntu-autoinstall", + "ubuntu_autoinstall", + "ubuntu-drivers", + "ubuntu_drivers", + "update-etc-hosts", + "update_etc_hosts", + "update-hostname", + "update_hostname", + "users-groups", + "users_groups", + "wireguard", + "write-files", + "write_files", + "write-files-deferred", + "write_files_deferred", + "yum-add-repo", + "yum_add_repo", + "zypper-add-repo", + "zypper_add_repo" + ] + }, + "users_groups.groups_by_groupname": { + "additionalProperties": false, + "patternProperties": { + "^.+$": { + "label": "", + "description": "Optional string of single username or a list of usernames to add to the group", + "type": [ + "string", + "array" + ], + "items": { + "type": "string" + }, + "minItems": 1 + } + } + }, + "users_groups.user": { + "oneOf": [ + { + "required": [ + "name" + ] + }, + { + "required": [ + "snapuser" + ] + } + ], + "additionalProperties": false, + "properties": { + "name": { + "description": "The user's login name. Required otherwise user creation will be skipped for this user.", + "type": "string" + }, + "doas": { + "description": "List of doas rules to add for a user. doas or opendoas must be installed for rules to take effect.", + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + }, + "expiredate": { + "default": null, + "description": "Optional. Date on which the user's account will be disabled. Default: ``null``", + "type": "string", + "format": "date" + }, + "gecos": { + "description": "Optional comment about the user, usually a comma-separated string of real name and contact information", + "type": "string" + }, + "groups": { + "description": "Optional comma-separated string of groups to add the user to.", + "oneOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "type": [ + "string" + ] + }, + "minItems": 1 + }, + { + "type": "object", + "patternProperties": { + "^.+$": { + "label": "", + "description": "When providing an object for users.groups the ```` keys are the groups to add this user to", + "deprecated": true, + "deprecated_version": "23.1", + "type": [ + "null" + ], + "minItems": 1 + } + }, + "hidden": [ + "patternProperties" + ] + } + ] + }, + "homedir": { + "description": "Optional home dir for user. Default: ``/home/``", + "default": "``/home/``", + "type": "string" + }, + "inactive": { + "description": "Optional string representing the number of days until the user is disabled. ", + "type": "string" + }, + "lock-passwd": { + "default": true, + "type": "boolean", + "description": "Default: ``true``", + "deprecated": true, + "deprecated_version": "22.3", + "deprecated_description": "Use ``lock_passwd`` instead." + }, + "lock_passwd": { + "default": true, + "description": "Disable password login. Default: ``true``", + "type": "boolean" + }, + "no_create_home": { + "default": false, + "description": "Do not create home directory. Default: ``false``", + "type": "boolean" + }, + "no_log_init": { + "default": false, + "description": "Do not initialize lastlog and faillog for user. Default: ``false``", + "type": "boolean" + }, + "no_user_group": { + "default": false, + "description": "Do not create group named after user. Default: ``false``", + "type": "boolean" + }, + "passwd": { + "description": "Hash of user password applied when user does not exist. This will NOT be applied if the user already exists. To generate this hash, run: ``mkpasswd --method=SHA-512 --rounds=500000`` **Note:** Your password might possibly be visible to unprivileged users on your system, depending on your cloud's security model. Check if your cloud's IMDS server is visible from an unprivileged user to evaluate risk.", + "type": "string" + }, + "hashed_passwd": { + "description": "Hash of user password to be applied. This will be applied even if the user is preexisting. To generate this hash, run: ``mkpasswd --method=SHA-512 --rounds=500000``. **Note:** Your password might possibly be visible to unprivileged users on your system, depending on your cloud's security model. Check if your cloud's IMDS server is visible from an unprivileged user to evaluate risk.", + "type": "string" + }, + "plain_text_passwd": { + "description": "Clear text of user password to be applied. This will be applied even if the user is preexisting. **Note:** SSH keys or certificates are a safer choice for logging in to your system. For local escalation, supplying a hashed password is a safer choice than plain text. Your password might possibly be visible to unprivileged users on your system, depending on your cloud's security model. An exposed plain text password is an immediate security concern. Check if your cloud's IMDS server is visible from an unprivileged user to evaluate risk.", + "type": "string" + }, + "create_groups": { + "default": true, + "description": "Boolean set ``false`` to disable creation of specified user ``groups``. Default: ``true``.", + "type": "boolean" + }, + "primary_group": { + "default": "````", + "description": "Primary group for user. Default: ````", + "type": "string" + }, + "selinux_user": { + "description": "SELinux user for user's login. Default: the default SELinux user.", + "type": "string" + }, + "shell": { + "description": "Path to the user's login shell. Default: the host system's default shell.", + "type": "string" + }, + "snapuser": { + "description": " Specify an email address to create the user as a Snappy user through ``snap create-user``. If an Ubuntu SSO account is associated with the address, username and SSH keys will be requested from there.", + "type": "string" + }, + "ssh_authorized_keys": { + "description": "List of SSH keys to add to user's authkeys file. Can not be combined with ``ssh_redirect_user``", + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + }, + "ssh_import_id": { + "description": "List of ssh ids to import for user. Can not be combined with ``ssh_redirect_user``. See the man page[1] for more details. [1] https://manpages.ubuntu.com/manpages/noble/en/man1/ssh-import-id.1.html", + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + }, + "ssh_redirect_user": { + "type": "boolean", + "default": false, + "description": "Boolean set to true to disable SSH logins for this user. When specified, all cloud meta-data public SSH keys will be set up in a disabled state for this username. Any SSH login as this username will timeout and prompt with a message to login instead as the ``default_username`` for this instance. Default: ``false``. This key can not be combined with ``ssh_import_id`` or ``ssh_authorized_keys``." + }, + "system": { + "description": "Optional. Create user as system user with no home directory. Default: ``false``.", + "type": "boolean", + "default": false + }, + "sudo": { + "oneOf": [ + { + "type": [ + "string", + "null" + ], + "description": "Sudo rule to use or false. Absence of a sudo value or ``null`` will result in no sudo rules added for this user." + }, + { + "type": "boolean", + "changed": true, + "changed_version": "22.2", + "changed_description": "The value ``false`` is deprecated for this key, use ``null`` instead." + } + ] + }, + "uid": { + "description": "The user's ID. Default value [system default]", + "oneOf": [ + { + "type": "integer" + }, + { + "type": "string", + "changed": true, + "changed_description": "The use of ``string`` type is deprecated. Use an ``integer`` instead.", + "changed_version": "22.3" + } + ] + } + } + }, + "apt_configure.mirror": { + "type": "array", + "items": { + "type": "object", + "required": [ + "arches" + ], + "additionalProperties": false, + "properties": { + "arches": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + }, + "uri": { + "type": "string", + "format": "uri" + }, + "search": { + "type": "array", + "items": { + "type": "string", + "format": "uri" + }, + "minItems": 1 + }, + "search_dns": { + "type": "boolean" + }, + "keyid": { + "type": "string" + }, + "key": { + "type": "string" + }, + "keyserver": { + "type": "string" + } + } + }, + "minItems": 1 + }, + "ca_certs.properties": { + "type": "object", + "additionalProperties": false, + "properties": { + "remove-defaults": { + "type": "boolean", + "default": false, + "deprecated": true, + "deprecated_version": "22.3", + "deprecated_description": "Use ``remove_defaults`` instead." + }, + "remove_defaults": { + "description": "Remove default CA certificates if true. Default: ``false``", + "type": "boolean", + "default": false + }, + "trusted": { + "description": "List of trusted CA certificates to add.", + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + } + }, + "minProperties": 1 + }, + "modules_definition": { + "type": "array", + "items": { + "oneOf": [ + { + "$ref": "#/$defs/all_modules" + }, + { + "type": "array", + "prefixItems": [ + { + "enum": { + "$ref": "#/$defs/all_modules" + } + }, + { + "enum": [ + "always", + "once", + "once-per-instance" + ] + } + ] + } + ] + } + }, + "merge_defintion": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array", + "minItems": 1, + "items": { + "type": "object", + "additionalProperties": false, + "required": [ + "name", + "settings" + ], + "properties": { + "name": { + "type": "string", + "enum": [ + "list", + "dict", + "str" + ] + }, + "settings": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "allow_delete", + "no_replace", + "replace", + "append", + "prepend", + "recurse_dict", + "recurse_list", + "recurse_array", + "recurse_str" + ] + } + } + } + } + } + ] + }, + "base_config": { + "type": "object", + "properties": { + "cloud_init_modules": { + "$ref": "#/$defs/modules_definition" + }, + "cloud_config_modules": { + "$ref": "#/$defs/modules_definition" + }, + "cloud_final_modules": { + "$ref": "#/$defs/modules_definition" + }, + "launch-index": { + "type": "integer", + "description": "The launch index for the specified cloud-config." + }, + "merge_how": { + "$ref": "#/$defs/merge_defintion" + }, + "merge_type": { + "$ref": "#/$defs/merge_defintion" + } + } + }, + "cc_ubuntu_autoinstall": { + "type": "object", + "properties": { + "autoinstall": { + "description": "Opaque autoinstall schema definition for Ubuntu autoinstall. Full schema processed by live-installer. See: https://ubuntu.com/server/docs/install/autoinstall-reference", + "type": "object", + "properties": { + "version": { + "type": "integer" + } + }, + "required": [ + "version" + ] + } + }, + "additionalProperties": true + }, + "package_item_definition": { + "oneOf": [ + { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 2, + "maxItems": 2 + }, + { + "type": "string" + } + ] + }, + "cc_ansible": { + "type": "object", + "properties": { + "ansible": { + "type": "object", + "additionalProperties": false, + "properties": { + "install_method": { + "type": "string", + "default": "distro", + "enum": [ + "distro", + "pip" + ], + "description": "The type of installation for ansible. It can be one of the following values:\n\n - ``distro``\n - ``pip``" + }, + "run_user": { + "type": "string", + "description": "User to run module commands as. If install_method: pip, the pip install runs as this user as well." + }, + "ansible_config": { + "description": "Sets the ANSIBLE_CONFIG environment variable. If set, overrides default config.", + "type": "string" + }, + "setup_controller": { + "type": "object", + "additionalProperties": false, + "properties": { + "repositories": { + "type": "array", + "items": { + "required": [ + "path", + "source" + ], + "type": "object", + "additionalProperties": false, + "properties": { + "path": { + "type": "string" + }, + "source": { + "type": "string" + } + } + } + }, + "run_ansible": { + "type": "array", + "items": { + "properties": { + "playbook_name": { + "type": "string" + }, + "playbook_dir": { + "type": "string" + }, + "become_password_file": { + "type": "string" + }, + "connection_password_file": { + "type": "string" + }, + "list_hosts": { + "type": "boolean", + "default": false + }, + "syntax_check": { + "type": "boolean", + "default": false + }, + "timeout": { + "type": "number", + "minimum": 0 + }, + "vault_id": { + "type": "string" + }, + "vault_password_file": { + "type": "string" + }, + "background": { + "type": "number", + "minimum": 0 + }, + "check": { + "type": "boolean", + "default": false + }, + "diff": { + "type": "boolean", + "default": false + }, + "module_path": { + "type": "string" + }, + "poll": { + "type": "number", + "minimum": 0 + }, + "args": { + "type": "string" + }, + "extra_vars": { + "type": "string" + }, + "forks": { + "type": "number", + "minimum": 0 + }, + "inventory": { + "type": "string" + }, + "scp_extra_args": { + "type": "string" + }, + "sftp_extra_args": { + "type": "string" + }, + "private_key": { + "type": "string" + }, + "connection": { + "type": "string" + }, + "module_name": { + "type": "string" + }, + "sleep": { + "type": "string" + }, + "tags": { + "type": "string" + }, + "skip_tags": { + "type": "string" + } + } + } + } + } + }, + "galaxy": { + "required": [ + "actions" + ], + "type": "object", + "additionalProperties": false, + "properties": { + "actions": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string", + "pattern": "^.*$" + } + } + } + } + }, + "package_name": { + "type": "string", + "default": "ansible" + }, + "pull": { + "required": [ + "url", + "playbook_name" + ], + "type": "object", + "additionalProperties": false, + "properties": { + "accept_host_key": { + "type": "boolean", + "default": false + }, + "clean": { + "type": "boolean", + "default": false + }, + "full": { + "type": "boolean", + "default": false + }, + "diff": { + "type": "boolean", + "default": false + }, + "ssh_common_args": { + "type": "string" + }, + "scp_extra_args": { + "type": "string" + }, + "sftp_extra_args": { + "type": "string" + }, + "private_key": { + "type": "string" + }, + "checkout": { + "type": "string" + }, + "module_path": { + "type": "string" + }, + "timeout": { + "type": "string" + }, + "url": { + "type": "string" + }, + "connection": { + "type": "string" + }, + "vault_id": { + "type": "string" + }, + "vault_password_file": { + "type": "string" + }, + "module_name": { + "type": "string" + }, + "sleep": { + "type": "string" + }, + "tags": { + "type": "string" + }, + "skip_tags": { + "type": "string" + }, + "playbook_name": { + "type": "string" + } + } + } + } + } + } + }, + "cc_apk_configure": { + "type": "object", + "properties": { + "apk_repos": { + "type": "object", + "minProperties": 1, + "additionalProperties": false, + "properties": { + "preserve_repositories": { + "type": "boolean", + "default": false, + "description": "By default, cloud-init will generate a new repositories file ``/etc/apk/repositories`` based on any valid configuration settings specified within a apk_repos section of cloud config. To disable this behavior and preserve the repositories file from the pristine image, set ``preserve_repositories`` to ``true``.\n\n The ``preserve_repositories`` option overrides all other config keys that would alter ``/etc/apk/repositories``." + }, + "alpine_repo": { + "type": [ + "object", + "null" + ], + "additionalProperties": false, + "properties": { + "base_url": { + "type": "string", + "default": "https://alpine.global.ssl.fastly.net/alpine", + "description": "The base URL of an Alpine repository, or mirror, to download official packages from. If not specified then it defaults to ``https://alpine.global.ssl.fastly.net/alpine``" + }, + "community_enabled": { + "type": "boolean", + "default": false, + "description": "Whether to add the Community repo to the repositories file. By default the Community repo is not included." + }, + "testing_enabled": { + "type": "boolean", + "default": false, + "description": "Whether to add the Testing repo to the repositories file. By default the Testing repo is not included. It is only recommended to use the Testing repo on a machine running the ``Edge`` version of Alpine as packages installed from Testing may have dependencies that conflict with those in non-Edge Main or Community repos." + }, + "version": { + "type": "string", + "description": "The Alpine version to use (e.g. ``v3.12`` or ``edge``)" + } + }, + "required": [ + "version" + ], + "minProperties": 1 + }, + "local_repo_base_url": { + "type": "string", + "description": "The base URL of an Alpine repository containing unofficial packages" + } + } + } + } + }, + "cc_apt_configure": { + "properties": { + "apt": { + "type": "object", + "minProperties": 1, + "additionalProperties": false, + "properties": { + "preserve_sources_list": { + "type": "boolean", + "default": false, + "description": "By default, cloud-init will generate a new sources list in ``/etc/apt/sources.list.d`` based on any changes specified in cloud config. To disable this behavior and preserve the sources list from the pristine image, set ``preserve_sources_list`` to ``true``.\n\nThe ``preserve_sources_list`` option overrides all other config keys that would alter ``sources.list`` or ``sources.list.d``, **except** for additional sources to be added to ``sources.list.d``." + }, + "disable_suites": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1, + "uniqueItems": true, + "description": "Entries in the sources list can be disabled using ``disable_suites``, which takes a list of suites to be disabled. If the string ``$RELEASE`` is present in a suite in the ``disable_suites`` list, it will be replaced with the release name. If a suite specified in ``disable_suites`` is not present in ``sources.list`` it will be ignored. For convenience, several aliases are provided for`` disable_suites``:\n\n - ``updates`` => ``$RELEASE-updates``\n - ``backports`` => ``$RELEASE-backports``\n - ``security`` => ``$RELEASE-security``\n - ``proposed`` => ``$RELEASE-proposed``\n - ``release`` => ``$RELEASE``.\n\nWhen a suite is disabled using ``disable_suites``, its entry in ``sources.list`` is not deleted; it is just commented out." + }, + "primary": { + "$ref": "#/$defs/apt_configure.mirror", + "description": "The primary and security archive mirrors can be specified using the ``primary`` and ``security`` keys, respectively. Both the ``primary`` and ``security`` keys take a list of configs, allowing mirrors to be specified on a per-architecture basis. Each config is a dictionary which must have an entry for ``arches``, specifying which architectures that config entry is for. The keyword ``default`` applies to any architecture not explicitly listed. The mirror url can be specified with the ``uri`` key, or a list of mirrors to check can be provided in order, with the first mirror that can be resolved being selected. This allows the same configuration to be used in different environment, with different hosts used for a local APT mirror. If no mirror is provided by ``uri`` or ``search``, ``search_dns`` may be used to search for dns names in the format ``-mirror`` in each of the following:\n\n - fqdn of this host per cloud metadata,\n - localdomain,\n - domains listed in ``/etc/resolv.conf``.\n\nIf there is a dns entry for ``-mirror``, then it is assumed that there is a distro mirror at ``http://-mirror./``. If the ``primary`` key is defined, but not the ``security`` key, then then configuration for ``primary`` is also used for ``security``. If ``search_dns`` is used for the ``security`` key, the search pattern will be ``-security-mirror``.\n\nEach mirror may also specify a key to import via any of the following optional keys:\n\n - ``keyid``: a key to import via shortid or fingerprint.\n - ``key``: a raw PGP key.\n - ``keyserver``: alternate keyserver to pull ``keyid`` key from.\n\nIf no mirrors are specified, or all lookups fail, then default mirrors defined in the datasource are used. If none are present in the datasource either the following defaults are used:\n\n - ``primary`` => ``http://archive.ubuntu.com/ubuntu``.\n - ``security`` => ``http://security.ubuntu.com/ubuntu``" + }, + "security": { + "$ref": "#/$defs/apt_configure.mirror", + "description": "Please refer to the primary config documentation" + }, + "add_apt_repo_match": { + "type": "string", + "default": "^[\\w-]+:\\w", + "description": "All source entries in ``apt-sources`` that match regex in ``add_apt_repo_match`` will be added to the system using ``add-apt-repository``. If ``add_apt_repo_match`` is not specified, it defaults to ``^[\\w-]+:\\w``" + }, + "debconf_selections": { + "type": "object", + "minProperties": 1, + "additionalProperties": false, + "patternProperties": { + "^.+$": { + "type": "string" + } + }, + "description": "Debconf additional configurations can be specified as a dictionary under the ``debconf_selections`` config key, with each key in the dict representing a different set of configurations. The value of each key must be a string containing all the debconf configurations that must be applied. We will bundle all of the values and pass them to ``debconf-set-selections``. Therefore, each value line must be a valid entry for ``debconf-set-selections``, meaning that they must possess for distinct fields:\n\n``pkgname question type answer``\n\nWhere:\n\n - ``pkgname`` is the name of the package.\n - ``question`` the name of the questions.\n - ``type`` is the type of question.\n - ``answer`` is the value used to answer the question.\n\nFor example: ``ippackage ippackage/ip string 127.0.01``" + }, + "sources_list": { + "type": "string", + "description": "Specifies a custom template for rendering ``sources.list`` . If no ``sources_list`` template is given, cloud-init will use sane default. Within this template, the following strings will be replaced with the appropriate values:\n\n - ``$MIRROR``\n - ``$RELEASE``\n - ``$PRIMARY``\n - ``$SECURITY``\n - ``$KEY_FILE``" + }, + "conf": { + "type": "string", + "description": "Specify configuration for apt, such as proxy configuration. This configuration is specified as a string. For multi-line APT configuration, make sure to follow YAML syntax." + }, + "https_proxy": { + "type": "string", + "description": "More convenient way to specify https APT proxy. https proxy url is specified in the format ``https://[[user][:pass]@]host[:port]/``." + }, + "http_proxy": { + "type": "string", + "description": "More convenient way to specify http APT proxy. http proxy url is specified in the format ``http://[[user][:pass]@]host[:port]/``." + }, + "proxy": { + "type": "string", + "description": "Alias for defining a http APT proxy." + }, + "ftp_proxy": { + "type": "string", + "description": "More convenient way to specify ftp APT proxy. ftp proxy url is specified in the format ``ftp://[[user][:pass]@]host[:port]/``." + }, + "sources": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^.+$": { + "type": "object", + "additionalProperties": false, + "properties": { + "source": { + "type": "string" + }, + "keyid": { + "type": "string" + }, + "key": { + "type": "string" + }, + "keyserver": { + "type": "string" + }, + "filename": { + "type": "string" + }, + "append": { + "type": "boolean", + "default": true + } + }, + "minProperties": 1 + } + }, + "description": "Source list entries can be specified as a dictionary under the ``sources`` config key, with each key in the dict representing a different source file. The key of each source entry will be used as an id that can be referenced in other config entries, as well as the filename for the source's configuration under ``/etc/apt/sources.list.d``. If the name does not end with ``.list``, it will be appended. If there is no configuration for a key in ``sources``, no file will be written, but the key may still be referred to as an id in other ``sources`` entries.\n\nEach entry under ``sources`` is a dictionary which may contain any of the following optional keys:\n - ``source``: a sources.list entry (some variable replacements apply).\n - ``keyid``: a key to import via shortid or fingerprint.\n - ``key``: a raw PGP key.\n - ``keyserver``: alternate keyserver to pull ``keyid`` key from.\n - ``filename``: specify the name of the list file.\n - ``append``: If ``true``, append to sources file, otherwise overwrite it. Default: ``true``.\n\nThe ``source`` key supports variable replacements for the following strings:\n\n - ``$MIRROR``\n - ``$PRIMARY``\n - ``$SECURITY``\n - ``$RELEASE``\n - ``$KEY_FILE``" + } + } + } + } + }, + "cc_apt_pipelining": { + "type": "object", + "properties": { + "apt_pipelining": { + "oneOf": [ + { + "type": "integer" + }, + { + "type": "boolean" + }, + { + "type": "string", + "oneOf": [ + { + "enum": [ + "os" + ] + }, + { + "deprecated": true, + "deprecated_version": "22.4", + "deprecated_description": "Use ``os`` instead.", + "enum": [ + "none", + "unchanged" + ] + } + ] + } + ] + } + } + }, + "cc_bootcmd": { + "type": "object", + "properties": { + "bootcmd": { + "type": "array", + "items": { + "oneOf": [ + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "string" + } + ] + }, + "additionalItems": false, + "minItems": 1 + } + } + }, + "cc_byobu": { + "type": "object", + "properties": { + "byobu_by_default": { + "type": "string", + "enum": [ + "enable-system", + "enable-user", + "disable-system", + "disable-user", + "enable", + "disable", + "user", + "system" + ] + } + } + }, + "cc_ca_certs": { + "type": "object", + "properties": { + "ca_certs": { + "$ref": "#/$defs/ca_certs.properties" + }, + "ca-certs": { + "allOf": [ + { + "$ref": "#/$defs/ca_certs.properties" + }, + { + "deprecated": true, + "deprecated_version": "22.3", + "deprecated_description": "Use ``ca_certs`` instead." + } + ] + } + } + }, + "cc_chef": { + "type": "object", + "properties": { + "chef": { + "type": "object", + "minProperties": 1, + "additionalProperties": false, + "properties": { + "directories": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1, + "uniqueItems": true, + "description": "Create the necessary directories for chef to run. By default, it creates the following directories:\n\n - ``/etc/chef``\n - ``/var/log/chef``\n - ``/var/lib/chef``\n - ``/var/cache/chef``\n - ``/var/backups/chef``\n - ``/var/run/chef``" + }, + "validation_cert": { + "type": "string", + "description": "Optional string to be written to file validation_key. Special value ``system`` means set use existing file." + }, + "validation_key": { + "type": "string", + "default": "/etc/chef/validation.pem", + "description": "Optional path for validation_cert. default to ``/etc/chef/validation.pem``" + }, + "firstboot_path": { + "type": "string", + "default": "/etc/chef/firstboot.json", + "description": "Path to write run_list and initial_attributes keys that should also be present in this configuration, defaults to ``/etc/chef/firstboot.json``" + }, + "exec": { + "type": "boolean", + "default": false, + "description": "Set true if we should run or not run chef (defaults to false, unless a gem installed is requested where this will then default to true)." + }, + "client_key": { + "type": "string", + "default": "/etc/chef/client.pem", + "description": "Optional path for client_cert. Default: ``/etc/chef/client.pem``." + }, + "encrypted_data_bag_secret": { + "type": "string", + "default": null, + "description": "Specifies the location of the secret key used by chef to encrypt data items. By default, this path is set to null, meaning that chef will have to look at the path ``/etc/chef/encrypted_data_bag_secret`` for it." + }, + "environment": { + "type": "string", + "default": "_default", + "description": "Specifies which environment chef will use. By default, it will use the ``_default`` configuration." + }, + "file_backup_path": { + "type": "string", + "default": "/var/backups/chef", + "description": "Specifies the location in which backup files are stored. By default, it uses the ``/var/backups/chef`` location." + }, + "file_cache_path": { + "type": "string", + "default": "/var/cache/chef", + "description": "Specifies the location in which chef cache files will be saved. By default, it uses the ``/var/cache/chef`` location." + }, + "json_attribs": { + "type": "string", + "default": "/etc/chef/firstboot.json", + "description": "Specifies the location in which some chef json data is stored. By default, it uses the ``/etc/chef/firstboot.json`` location." + }, + "log_level": { + "type": "string", + "default": ":info", + "description": "Defines the level of logging to be stored in the log file. By default this value is set to ``:info``." + }, + "log_location": { + "type": "string", + "default": "/var/log/chef/client.log", + "description": "Specifies the location of the chef log file. By default, the location is specified at ``/var/log/chef/client.log``." + }, + "node_name": { + "type": "string", + "description": "The name of the node to run. By default, we will use th instance id as the node name." + }, + "omnibus_url": { + "type": "string", + "default": "https://www.chef.io/chef/install.sh", + "description": "Omnibus URL if chef should be installed through Omnibus. By default, it uses the ``https://www.chef.io/chef/install.sh``." + }, + "omnibus_url_retries": { + "type": "integer", + "default": 5, + "description": "The number of retries that will be attempted to reach the Omnibus URL. Default: ``5``." + }, + "omnibus_version": { + "type": "string", + "description": "Optional version string to require for omnibus install." + }, + "pid_file": { + "type": "string", + "default": "/var/run/chef/client.pid", + "description": "The location in which a process identification number (pid) is saved. By default, it saves in the ``/var/run/chef/client.pid`` location." + }, + "server_url": { + "type": "string", + "description": "The URL for the chef server" + }, + "show_time": { + "type": "boolean", + "default": true, + "description": "Show time in chef logs" + }, + "ssl_verify_mode": { + "type": "string", + "default": ":verify_none", + "description": "Set the verify mode for HTTPS requests. We can have two possible values for this parameter:\n\n - ``:verify_none``: No validation of SSL certificates.\n - ``:verify_peer``: Validate all SSL certificates.\n\nBy default, the parameter is set as ``:verify_none``." + }, + "validation_name": { + "type": "string", + "description": "The name of the chef-validator key that Chef Infra Client uses to access the Chef Infra Server during the initial Chef Infra Client run." + }, + "force_install": { + "type": "boolean", + "default": false, + "description": "If set to ``true``, forces chef installation, even if it is already installed." + }, + "initial_attributes": { + "type": "object", + "items": { + "type": "string" + }, + "description": "Specify a list of initial attributes used by the cookbooks." + }, + "install_type": { + "type": "string", + "default": "packages", + "enum": [ + "packages", + "gems", + "omnibus" + ], + "description": "The type of installation for chef. It can be one of the following values:\n\n - ``packages``\n - ``gems``\n - ``omnibus``" + }, + "run_list": { + "type": "array", + "items": { + "type": "string" + }, + "description": "A run list for a first boot json." + }, + "chef_license": { + "type": "string", + "description": "string that indicates if user accepts or not license related to some of chef products" + } + } + } + } + }, + "cc_disable_ec2_metadata": { + "type": "object", + "properties": { + "disable_ec2_metadata": { + "default": false, + "description": "Set true to disable IPv4 routes to EC2 metadata. Default: ``false``", + "type": "boolean" + } + } + }, + "cc_disk_setup": { + "type": "object", + "properties": { + "device_aliases": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^.+$": { + "label": "", + "type": "string", + "description": "Path to disk to be aliased by this name." + } + } + }, + "disk_setup": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^.+$": { + "label": "", + "type": "object", + "additionalProperties": false, + "properties": { + "table_type": { + "type": "string", + "default": "mbr", + "enum": [ + "mbr", + "gpt" + ], + "description": "Specifies the partition table type, either ``mbr`` or ``gpt``. Default: ``mbr``." + }, + "layout": { + "default": false, + "oneOf": [ + { + "type": "string", + "enum": [ + "remove" + ] + }, + { + "type": "boolean" + }, + { + "type": "array", + "items": { + "oneOf": [ + { + "type": "integer" + }, + { + "type": "array", + "items": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ] + }, + "minItems": 2, + "maxItems": 2 + } + ] + } + } + ], + "description": "If set to ``true``, a single partition using all the space on the device will be created. If set to ``false``, no partitions will be created. If set to ``remove``, any existing partition table will be purged. Partitions can be specified by providing a list to ``layout``, where each entry in the list is either a size or a list containing a size and the numerical value for a partition type. The size for partitions is specified in **percentage** of disk space, not in bytes (e.g. a size of 33 would take up 1/3 of the disk space). The partition type defaults to '83' (Linux partition), for other types of partition, such as Linux swap, the type must be passed as part of a list along with the size. Default: ``false``." + }, + "overwrite": { + "type": "boolean", + "default": false, + "description": "Controls whether this module tries to be safe about writing partition tables or not. If ``overwrite: false`` is set, the device will be checked for a partition table and for a file system and if either is found, the operation will be skipped. If ``overwrite: true`` is set, no checks will be performed. Using ``overwrite: true`` is **dangerous** and can lead to data loss, so double check that the correct device has been specified if using this option. Default: ``false``" + } + } + } + } + }, + "fs_setup": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": false, + "properties": { + "label": { + "type": "string", + "description": "Label for the filesystem." + }, + "filesystem": { + "type": "string", + "description": "Filesystem type to create. E.g., ``ext4`` or ``btrfs``" + }, + "device": { + "type": "string", + "description": "Specified either as a path or as an alias in the format ``.`` where ```` denotes the partition number on the device. If specifying device using the ``.`` format, the value of ``partition`` will be overwritten." + }, + "partition": { + "type": [ + "string", + "integer" + ], + "oneOf": [ + { + "type": "string", + "enum": [ + "auto", + "any", + "none" + ] + } + ], + "description": "The partition can be specified by setting ``partition`` to the desired partition number. The ``partition`` option may also be set to ``auto``, in which this module will search for the existence of a filesystem matching the ``label``, ``filesystem`` and ``device`` of the ``fs_setup`` entry and will skip creating the filesystem if one is found. The ``partition`` option may also be set to ``any``, in which case any filesystem that matches ``filesystem`` and ``device`` will cause this module to skip filesystem creation for the ``fs_setup`` entry, regardless of ``label`` matching or not. To write a filesystem directly to a device, use ``partition: none``. ``partition: none`` will **always** write the filesystem, even when the ``label`` and ``filesystem`` are matched, and ``overwrite`` is ``false``." + }, + "overwrite": { + "type": "boolean", + "description": "If ``true``, overwrite any existing filesystem. Using ``overwrite: true`` for filesystems is **dangerous** and can lead to data loss, so double check the entry in ``fs_setup``. Default: ``false``" + }, + "replace_fs": { + "type": "string", + "description": "Ignored unless ``partition`` is ``auto`` or ``any``. Default ``false``." + }, + "extra_opts": { + "type": [ + "array", + "string" + ], + "items": { + "type": "string" + }, + "description": "Optional options to pass to the filesystem creation command. Ignored if you using ``cmd`` directly." + }, + "cmd": { + "type": [ + "array", + "string" + ], + "items": { + "type": "string" + }, + "description": "Optional command to run to create the filesystem. Can include string substitutions of the other ``fs_setup`` config keys. This is only necessary if you need to override the default command." + } + } + } + } + } + }, + "cc_fan": { + "type": "object", + "properties": { + "fan": { + "type": "object", + "required": [ + "config" + ], + "additionalProperties": false, + "properties": { + "config": { + "type": "string", + "description": "The fan configuration to use as a single multi-line string" + }, + "config_path": { + "type": "string", + "default": "/etc/network/fan", + "description": "The path to write the fan configuration to. Default: ``/etc/network/fan``" + } + } + } + } + }, + "cc_final_message": { + "type": "object", + "properties": { + "final_message": { + "type": "string", + "description": "The message to display at the end of the run" + } + } + }, + "cc_growpart": { + "type": "object", + "properties": { + "growpart": { + "type": "object", + "additionalProperties": false, + "properties": { + "mode": { + "default": "auto", + "description": "The utility to use for resizing. Default: ``auto``\n\nPossible options:\n\n* ``auto`` - Use any available utility\n\n* ``growpart`` - Use growpart utility\n\n* ``gpart`` - Use BSD gpart utility\n\n* ``off`` - Take no action", + "oneOf": [ + { + "enum": [ + "auto", + "growpart", + "gpart", + "off" + ] + }, + { + "enum": [ + false + ], + "changed": true, + "changed_version": "22.3", + "changed_description": "Specifying a boolean ``false`` value for ``mode`` is deprecated. Use ``off`` instead." + } + ] + }, + "devices": { + "type": "array", + "default": [ + "/" + ], + "items": { + "type": "string" + }, + "description": "The devices to resize. Each entry can either be the path to the device's mountpoint in the filesystem or a path to the block device in '/dev'. Default: ``[/]``" + }, + "ignore_growroot_disabled": { + "type": "boolean", + "default": false, + "description": "If ``true``, ignore the presence of ``/etc/growroot-disabled``. If ``false`` and the file exists, then don't resize. Default: ``false``" + } + } + } + } + }, + "cc_grub_dpkg": { + "type": "object", + "properties": { + "grub_dpkg": { + "type": "object", + "additionalProperties": false, + "properties": { + "enabled": { + "type": "boolean", + "default": true, + "description": "Whether to configure which device is used as the target for grub installation. Default: ``true``" + }, + "grub-pc/install_devices": { + "type": "string", + "description": "Device to use as target for grub installation. If unspecified, ``grub-probe`` of ``/boot`` will be used to find the device" + }, + "grub-pc/install_devices_empty": { + "description": "Sets values for ``grub-pc/install_devices_empty``. If unspecified, will be set to ``true`` if ``grub-pc/install_devices`` is empty, otherwise ``false``", + "oneOf": [ + { + "type": "boolean" + }, + { + "type": "string", + "changed": true, + "changed_version": "22.3", + "changed_description": "Use a boolean value instead." + } + ] + }, + "grub-efi/install_devices": { + "type": "string", + "description": "Partition to use as target for grub installation. If unspecified, ``grub-probe`` of ``/boot/efi`` will be used to find the partition" + } + } + }, + "grub-dpkg": { + "type": "object", + "description": "An alias for ``grub_dpkg``", + "deprecated": true, + "deprecated_version": "22.2", + "deprecated_description": "Use ``grub_dpkg`` instead." + } + } + }, + "cc_install_hotplug": { + "type": "object", + "properties": { + "updates": { + "type": "object", + "additionalProperties": false, + "properties": { + "network": { + "type": "object", + "required": [ + "when" + ], + "additionalProperties": false, + "properties": { + "when": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "boot-new-instance", + "boot-legacy", + "boot", + "hotplug" + ] + } + } + } + } + } + } + } + }, + "cc_keyboard": { + "type": "object", + "properties": { + "keyboard": { + "type": "object", + "additionalProperties": false, + "properties": { + "layout": { + "type": "string", + "description": "Required. Keyboard layout. Corresponds to XKBLAYOUT." + }, + "model": { + "type": "string", + "default": "pc105", + "description": "Optional. Keyboard model. Corresponds to XKBMODEL. Default: ``pc105``." + }, + "variant": { + "type": "string", + "description": "Required for Alpine Linux, optional otherwise. Keyboard variant. Corresponds to XKBVARIANT." + }, + "options": { + "type": "string", + "description": "Optional. Keyboard options. Corresponds to XKBOPTIONS." + } + }, + "required": [ + "layout" + ] + } + } + }, + "cc_keys_to_console": { + "type": "object", + "properties": { + "ssh": { + "type": "object", + "additionalProperties": false, + "properties": { + "emit_keys_to_console": { + "type": "boolean", + "default": true, + "description": "Set false to avoid printing SSH keys to system console. Default: ``true``." + } + }, + "required": [ + "emit_keys_to_console" + ] + }, + "ssh_key_console_blacklist": { + "type": "array", + "default": [], + "description": "Avoid printing matching SSH key types to the system console.", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "ssh_fp_console_blacklist": { + "type": "array", + "description": "Avoid printing matching SSH fingerprints to the system console.", + "items": { + "type": "string" + }, + "uniqueItems": true + } + } + }, + "cc_landscape": { + "type": "object", + "properties": { + "landscape": { + "type": "object", + "required": [ + "client" + ], + "additionalProperties": false, + "properties": { + "client": { + "type": "object", + "additionalProperties": true, + "required": [ + "account_name", + "computer_title" + ], + "properties": { + "url": { + "type": "string", + "default": "https://landscape.canonical.com/message-system", + "description": "The Landscape server URL to connect to. Default: ``https://landscape.canonical.com/message-system``." + }, + "ping_url": { + "type": "string", + "default": "https://landscape.canonical.com/ping", + "description": "The URL to perform lightweight exchange initiation with. Default: ``https://landscape.canonical.com/ping``." + }, + "data_path": { + "type": "string", + "default": "/var/lib/landscape/client", + "description": "The directory to store data files in. Default: ``/var/lib/land\u2010scape/client/``." + }, + "log_level": { + "type": "string", + "default": "info", + "enum": [ + "debug", + "info", + "warning", + "error", + "critical" + ], + "description": "The log level for the client. Default: ``info``." + }, + "computer_title": { + "type": "string", + "description": "The title of this computer." + }, + "account_name": { + "type": "string", + "description": "The account this computer belongs to." + }, + "registration_key": { + "type": "string", + "description": "The account-wide key used for registering clients." + }, + "tags": { + "type": "string", + "pattern": "^[-_0-9a-zA-Z]+(,[-_0-9a-zA-Z]+)*$", + "description": "Comma separated list of tag names to be sent to the server." + }, + "http_proxy": { + "type": "string", + "description": "The URL of the HTTP proxy, if one is needed." + }, + "https_proxy": { + "type": "string", + "description": "The URL of the HTTPS proxy, if one is needed." + } + } + } + } + } + } + }, + "cc_locale": { + "properties": { + "locale": { + "type": "string", + "description": "The locale to set as the system's locale (e.g. ar_PS)" + }, + "locale_configfile": { + "type": "string", + "description": "The file in which to write the locale configuration (defaults to the distro's default location)" + } + } + }, + "cc_lxd": { + "type": "object", + "properties": { + "lxd": { + "type": "object", + "minProperties": 1, + "additionalProperties": false, + "properties": { + "init": { + "type": "object", + "additionalProperties": false, + "description": "LXD init configuration values to provide to `lxd init --auto` command. Can not be combined with ``lxd.preseed``.", + "properties": { + "network_address": { + "type": "string", + "description": "IP address for LXD to listen on" + }, + "network_port": { + "type": "integer", + "description": "Network port to bind LXD to." + }, + "storage_backend": { + "type": "string", + "enum": [ + "zfs", + "dir", + "lvm", + "btrfs" + ], + "default": "dir", + "description": "Storage backend to use. Default: ``dir``." + }, + "storage_create_device": { + "type": "string", + "description": "Setup device based storage using DEVICE" + }, + "storage_create_loop": { + "type": "integer", + "description": "Setup loop based storage with SIZE in GB" + }, + "storage_pool": { + "type": "string", + "description": "Name of storage pool to use or create" + }, + "trust_password": { + "type": "string", + "description": "The password required to add new clients" + } + } + }, + "bridge": { + "type": "object", + "required": [ + "mode" + ], + "additionalProperties": false, + "description": "LXD bridge configuration provided to setup the host lxd bridge. Can not be combined with ``lxd.preseed``.", + "properties": { + "mode": { + "type": "string", + "description": "Whether to setup LXD bridge, use an existing bridge by ``name`` or create a new bridge. `none` will avoid bridge setup, `existing` will configure lxd to use the bring matching ``name`` and `new` will create a new bridge.", + "enum": [ + "none", + "existing", + "new" + ] + }, + "name": { + "type": "string", + "description": "Name of the LXD network bridge to attach or create. Default: ``lxdbr0``.", + "default": "lxdbr0" + }, + "mtu": { + "type": "integer", + "description": "Bridge MTU, defaults to LXD's default value", + "default": -1, + "minimum": -1 + }, + "ipv4_address": { + "type": "string", + "description": "IPv4 address for the bridge. If set, ``ipv4_netmask`` key required." + }, + "ipv4_netmask": { + "type": "integer", + "description": "Prefix length for the ``ipv4_address`` key. Required when ``ipv4_address`` is set." + }, + "ipv4_dhcp_first": { + "type": "string", + "description": "First IPv4 address of the DHCP range for the network created. This value will combined with ``ipv4_dhcp_last`` key to set LXC ``ipv4.dhcp.ranges``." + }, + "ipv4_dhcp_last": { + "type": "string", + "description": "Last IPv4 address of the DHCP range for the network created. This value will combined with ``ipv4_dhcp_first`` key to set LXC ``ipv4.dhcp.ranges``." + }, + "ipv4_dhcp_leases": { + "type": "integer", + "description": "Number of DHCP leases to allocate within the range. Automatically calculated based on `ipv4_dhcp_first` and `ipv4_dchp_last` when unset." + }, + "ipv4_nat": { + "type": "boolean", + "default": false, + "description": "Set ``true`` to NAT the IPv4 traffic allowing for a routed IPv4 network. Default: ``false``." + }, + "ipv6_address": { + "type": "string", + "description": "IPv6 address for the bridge (CIDR notation). When set, ``ipv6_netmask`` key is required. When absent, no IPv6 will be configured." + }, + "ipv6_netmask": { + "type": "integer", + "description": "Prefix length for ``ipv6_address`` provided. Required when ``ipv6_address`` is set." + }, + "ipv6_nat": { + "type": "boolean", + "default": false, + "description": "Whether to NAT. Default: ``false``." + }, + "domain": { + "type": "string", + "description": "Domain to advertise to DHCP clients and use for DNS resolution." + } + } + }, + "preseed": { + "type": "string", + "description": "Opaque LXD preseed YAML config passed via stdin to the command: lxd init --preseed. See: https://documentation.ubuntu.com/lxd/en/latest/howto/initialize/#non-interactive-configuration or lxd init --dump for viable config. Can not be combined with either ``lxd.init`` or ``lxd.bridge``." + } + } + } + } + }, + "cc_mcollective": { + "type": "object", + "properties": { + "mcollective": { + "type": "object", + "additionalProperties": false, + "properties": { + "conf": { + "type": "object", + "additionalProperties": false, + "properties": { + "public-cert": { + "type": "string", + "description": "Optional value of server public certificate which will be written to ``/etc/mcollective/ssl/server-public.pem``" + }, + "private-cert": { + "type": "string", + "description": "Optional value of server private certificate which will be written to ``/etc/mcollective/ssl/server-private.pem``" + } + }, + "patternProperties": { + "^.+$": { + "description": "Optional config key: value pairs which will be appended to ``/etc/mcollective/server.cfg``.", + "oneOf": [ + { + "type": "boolean" + }, + { + "type": "integer" + }, + { + "type": "string" + } + ] + } + } + } + } + } + } + }, + "cc_migrator": { + "type": "object", + "properties": { + "migrate": { + "type": "boolean", + "default": true, + "description": "Whether to migrate legacy cloud-init semaphores to new format. Default: ``true``" + } + } + }, + "cc_mounts": { + "type": "object", + "properties": { + "mounts": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1, + "maxItems": 6 + }, + "description": "List of lists. Each inner list entry is a list of ``/etc/fstab`` mount declarations of the format: [ fs_spec, fs_file, fs_vfstype, fs_mntops, fs-freq, fs_passno ]. A mount declaration with less than 6 items will get remaining values from ``mount_default_fields``. A mount declaration with only `fs_spec` and no `fs_file` mountpoint will be skipped.", + "minItems": 1 + }, + "mount_default_fields": { + "type": "array", + "description": "Default mount configuration for any mount entry with less than 6 options provided. When specified, 6 items are required and represent ``/etc/fstab`` entries. Default: ``defaults,nofail,x-systemd.requires=cloud-init.service,_netdev``", + "default": [ + null, + null, + "auto", + "defaults,nofail,x-systemd.requires=cloud-init.service", + "0", + "2" + ], + "items": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "minItems": 6, + "maxItems": 6 + }, + "swap": { + "type": "object", + "additionalProperties": false, + "properties": { + "filename": { + "type": "string", + "description": "Path to the swap file to create" + }, + "size": { + "description": "The size in bytes of the swap file, 'auto' or a human-readable size abbreviation of the format where units are one of B, K, M, G or T. **WARNING: Attempts to use IEC prefixes in your configuration prior to cloud-init version 23.1 will result in unexpected behavior. SI prefixes names (KB, MB) are required on pre-23.1 cloud-init, however IEC values are used. In summary, assume 1KB == 1024B, not 1000B**", + "oneOf": [ + { + "enum": [ + "auto" + ] + }, + { + "type": "integer" + }, + { + "type": "string", + "pattern": "^([0-9]+)?\\.?[0-9]+[BKMGT]$" + } + ] + }, + "maxsize": { + "oneOf": [ + { + "type": "integer" + }, + { + "type": "string", + "pattern": "^([0-9]+)?\\.?[0-9]+[BKMGT]$" + } + ], + "description": "The maxsize in bytes of the swap file" + } + } + } + } + }, + "cc_ntp": { + "type": "object", + "properties": { + "ntp": { + "type": [ + "null", + "object" + ], + "additionalProperties": false, + "properties": { + "pools": { + "type": "array", + "items": { + "type": "string", + "format": "hostname" + }, + "uniqueItems": true, + "description": "List of ntp pools. If both pools and servers are\nempty, 4 default pool servers will be provided of\nthe format ``{0-3}.{distro}.pool.ntp.org``. NOTE:\nfor Alpine Linux when using the Busybox NTP client\nthis setting will be ignored due to the limited\nfunctionality of Busybox's ntpd." + }, + "servers": { + "type": "array", + "items": { + "type": "string", + "format": "hostname" + }, + "uniqueItems": true, + "description": "List of ntp servers. If both pools and servers are\nempty, 4 default pool servers will be provided with\nthe format ``{0-3}.{distro}.pool.ntp.org``." + }, + "peers": { + "type": "array", + "items": { + "type": "string", + "format": "hostname" + }, + "uniqueItems": true, + "description": "List of ntp peers." + }, + "allow": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true, + "description": "List of CIDRs to allow" + }, + "ntp_client": { + "type": "string", + "default": "auto", + "description": "Name of an NTP client to use to configure system NTP.\nWhen unprovided or 'auto' the default client preferred\nby the distribution will be used. The following\nbuilt-in client names can be used to override existing\nconfiguration defaults: chrony, ntp, openntpd,\nntpdate, systemd-timesyncd." + }, + "enabled": { + "type": "boolean", + "default": true, + "description": "Attempt to enable ntp clients if set to True. If set\nto False, ntp client will not be configured or\ninstalled" + }, + "config": { + "description": "Configuration settings or overrides for the\n``ntp_client`` specified.", + "type": "object", + "minProperties": 1, + "additionalProperties": false, + "properties": { + "confpath": { + "type": "string", + "description": "The path to where the ``ntp_client``\nconfiguration is written." + }, + "check_exe": { + "type": "string", + "description": "The executable name for the ``ntp_client``.\nFor example, ntp service ``check_exe`` is\n'ntpd' because it runs the ntpd binary." + }, + "packages": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true, + "description": "List of packages needed to be installed for the\nselected ``ntp_client``." + }, + "service_name": { + "type": "string", + "description": "The systemd or sysvinit service name used to\nstart and stop the ``ntp_client``\nservice." + }, + "template": { + "type": "string", + "description": "Inline template allowing users to customize their ``ntp_client`` configuration with the use of the Jinja templating engine.\nThe template content should start with ``## template:jinja``.\nWithin the template, you can utilize any of the following ntp module config keys: ``servers``, ``pools``, ``allow``, and ``peers``.\nEach cc_ntp schema config key and expected value type is defined above." + } + } + } + } + } + } + }, + "cc_package_update_upgrade_install": { + "type": "object", + "properties": { + "packages": { + "type": "array", + "description": "An array containing either a package specification, or an object consisting of a package manager key having a package specification value . A package specification can be either a package name or a list with two entries, the first being the package name and the second being the specific package version to install.", + "items": { + "oneOf": [ + { + "type": "object", + "properties": { + "apt": { + "type": "array", + "items": { + "$ref": "#/$defs/package_item_definition" + } + }, + "snap": { + "type": "array", + "items": { + "$ref": "#/$defs/package_item_definition" + } + } + }, + "additionalProperties": false + }, + { + "$ref": "#/$defs/package_item_definition" + } + ] + }, + "minItems": 1 + }, + "package_update": { + "type": "boolean", + "default": false, + "description": "Set ``true`` to update packages. Happens before upgrade or install. Default: ``false``" + }, + "package_upgrade": { + "type": "boolean", + "default": false, + "description": "Set ``true`` to upgrade packages. Happens before install. Default: ``false``" + }, + "package_reboot_if_required": { + "type": "boolean", + "default": false, + "description": "Set ``true`` to reboot the system if required by presence of `/var/run/reboot-required`. Default: ``false``" + }, + "apt_update": { + "type": "boolean", + "default": false, + "description": "Default: ``false``.", + "deprecated": true, + "deprecated_version": "22.2", + "deprecated_description": "Use ``package_update`` instead." + }, + "apt_upgrade": { + "type": "boolean", + "default": false, + "description": "Default: ``false``.", + "deprecated": true, + "deprecated_version": "22.2", + "deprecated_description": "Use ``package_upgrade`` instead." + }, + "apt_reboot_if_required": { + "type": "boolean", + "default": false, + "description": "Default: ``false``.", + "deprecated": true, + "deprecated_version": "22.2", + "deprecated_description": "Use ``package_reboot_if_required`` instead." + } + } + }, + "cc_phone_home": { + "type": "object", + "properties": { + "phone_home": { + "type": "object", + "required": [ + "url" + ], + "additionalProperties": false, + "properties": { + "url": { + "type": "string", + "format": "uri", + "description": "The URL to send the phone home data to." + }, + "post": { + "description": "A list of keys to post or ``all``. Default: ``all``", + "oneOf": [ + { + "enum": [ + "all" + ] + }, + { + "type": "array", + "items": { + "type": "string", + "enum": [ + "pub_key_rsa", + "pub_key_ecdsa", + "pub_key_ed25519", + "instance_id", + "hostname", + "fqdn" + ] + } + } + ] + }, + "tries": { + "type": "integer", + "description": "The number of times to try sending the phone home data. Default: ``10``", + "default": 10 + } + } + } + } + }, + "cc_power_state_change": { + "type": "object", + "properties": { + "power_state": { + "type": "object", + "required": [ + "mode" + ], + "additionalProperties": false, + "properties": { + "delay": { + "description": "Time in minutes to delay after cloud-init has finished. Can be ``now`` or an integer specifying the number of minutes to delay. Default: ``now``", + "default": "now", + "oneOf": [ + { + "type": "integer", + "minimum": 0 + }, + { + "type": "string", + "pattern": "^\\+?[0-9]+$", + "changed": true, + "changed_version": "22.3", + "changed_description": "Use of type string for this value is deprecated. Use ``now`` or integer type." + }, + { + "enum": [ + "now" + ] + } + ] + }, + "mode": { + "description": "Must be one of ``poweroff``, ``halt``, or ``reboot``.", + "type": "string", + "enum": [ + "poweroff", + "reboot", + "halt" + ] + }, + "message": { + "description": "Optional message to display to the user when the system is powering off or rebooting.", + "type": "string" + }, + "timeout": { + "description": "Time in seconds to wait for the cloud-init process to finish before executing shutdown. Default: ``30``", + "type": "integer", + "default": 30 + }, + "condition": { + "description": "Apply state change only if condition is met. May be boolean true (always met), false (never met), or a command string or list to be executed. For command formatting, see the documentation for ``cc_runcmd``. If exit code is 0, condition is met, otherwise not. Default: ``true``", + "default": true, + "oneOf": [ + { + "type": "string" + }, + { + "type": "boolean" + }, + { + "type": "array" + } + ] + } + } + } + } + }, + "cc_puppet": { + "type": "object", + "properties": { + "puppet": { + "type": "object", + "additionalProperties": false, + "properties": { + "install": { + "type": "boolean", + "default": true, + "description": "Whether or not to install puppet. Setting to ``false`` will result in an error if puppet is not already present on the system. Default: ``true``" + }, + "version": { + "type": "string", + "description": "Optional version to pass to the installer script or package manager. If unset, the latest version from the repos will be installed." + }, + "install_type": { + "type": "string", + "description": "Valid values are ``packages`` and ``aio``. Agent packages from the puppetlabs repositories can be installed by setting ``aio``. Based on this setting, the default config/SSL/CSR paths will be adjusted accordingly. Default: ``packages``", + "enum": [ + "packages", + "aio" + ], + "default": "packages" + }, + "collection": { + "type": "string", + "description": "Puppet collection to install if ``install_type`` is ``aio``. This can be set to one of ``puppet`` (rolling release), ``puppet6``, ``puppet7`` (or their nightly counterparts) in order to install specific release streams." + }, + "aio_install_url": { + "type": "string", + "description": "If ``install_type`` is ``aio``, change the url of the install script." + }, + "cleanup": { + "type": "boolean", + "default": true, + "description": "Whether to remove the puppetlabs repo after installation if ``install_type`` is ``aio`` Default: ``true``" + }, + "conf_file": { + "type": "string", + "description": "The path to the puppet config file. Default depends on ``install_type``" + }, + "ssl_dir": { + "type": "string", + "description": "The path to the puppet SSL directory. Default depends on ``install_type``" + }, + "csr_attributes_path": { + "type": "string", + "description": "The path to the puppet csr attributes file. Default depends on ``install_type``" + }, + "package_name": { + "type": "string", + "description": "Name of the package to install if ``install_type`` is ``packages``. Default: ``puppet``" + }, + "exec": { + "type": "boolean", + "default": false, + "description": "Whether or not to run puppet after configuration finishes. A single manual run can be triggered by setting ``exec`` to ``true``, and additional arguments can be passed to ``puppet agent`` via the ``exec_args`` key (by default the agent will execute with the ``--test`` flag). Default: ``false``" + }, + "exec_args": { + "type": "array", + "description": "A list of arguments to pass to 'puppet agent' if 'exec' is true Default: ``['--test']``", + "items": { + "type": "string" + } + }, + "start_service": { + "type": "boolean", + "default": true, + "description": "By default, the puppet service will be automatically enabled after installation and set to automatically start on boot. To override this in favor of manual puppet execution set ``start_service`` to ``false``" + }, + "conf": { + "type": "object", + "description": "Every key present in the conf object will be added to puppet.conf. As such, section names should be one of: ``main``, ``server``, ``agent`` or ``user`` and keys should be valid puppet configuration options. The configuration is specified as a dictionary containing high-level ``
`` keys and lists of ``=`` pairs within each section. The ``certname`` key supports string substitutions for ``%i`` and ``%f``, corresponding to the instance id and fqdn of the machine respectively.\n\n``ca_cert`` is a special case. It won't be added to puppet.conf. It holds the puppetserver certificate in pem format. It should be a multi-line string (using the | YAML notation for multi-line strings).", + "additionalProperties": false, + "properties": { + "main": { + "type": "object" + }, + "server": { + "type": "object" + }, + "agent": { + "type": "object" + }, + "user": { + "type": "object" + }, + "ca_cert": { + "type": "string" + } + } + }, + "csr_attributes": { + "type": "object", + "description": "create a ``csr_attributes.yaml`` file for CSR attributes and certificate extension requests. See https://puppet.com/docs/puppet/latest/config_file_csr_attributes.html", + "additionalProperties": false, + "properties": { + "custom_attributes": { + "type": "object" + }, + "extension_requests": { + "type": "object" + } + } + } + } + } + } + }, + "cc_resizefs": { + "type": "object", + "properties": { + "resize_rootfs": { + "enum": [ + true, + false, + "noblock" + ], + "description": "Whether to resize the root partition. ``noblock`` will resize in the background. Default: ``true``" + } + } + }, + "cc_resolv_conf": { + "type": "object", + "properties": { + "manage_resolv_conf": { + "type": "boolean", + "default": false, + "description": "Whether to manage the resolv.conf file. ``resolv_conf`` block will be ignored unless this is set to ``true``. Default: ``false``" + }, + "resolv_conf": { + "type": "object", + "additionalProperties": false, + "properties": { + "nameservers": { + "type": "array", + "description": "A list of nameservers to use to be added as ``nameserver`` lines" + }, + "searchdomains": { + "type": "array", + "description": "A list of domains to be added ``search`` line" + }, + "domain": { + "type": "string", + "description": "The domain to be added as ``domain`` line" + }, + "sortlist": { + "type": "array", + "description": "A list of IP addresses to be added to ``sortlist`` line" + }, + "options": { + "type": "object", + "description": "Key/value pairs of options to go under ``options`` heading. A unary option should be specified as ``true``" + } + } + } + } + }, + "cc_rh_subscription": { + "type": "object", + "properties": { + "rh_subscription": { + "type": "object", + "additionalProperties": false, + "properties": { + "username": { + "type": "string", + "description": "The username to use. Must be used with password. Should not be used with ``activation-key`` or ``org``" + }, + "password": { + "type": "string", + "description": "The password to use. Must be used with username. Should not be used with ``activation-key`` or ``org``" + }, + "activation-key": { + "type": "string", + "description": "The activation key to use. Must be used with ``org``. Should not be used with ``username`` or ``password``" + }, + "org": { + "type": "integer", + "description": "The organization number to use. Must be used with ``activation-key``. Should not be used with ``username`` or ``password``" + }, + "auto-attach": { + "type": "boolean", + "description": "Whether to attach subscriptions automatically" + }, + "service-level": { + "type": "string", + "description": "The service level to use when subscribing to RH repositories. ``auto-attach`` must be true for this to be used" + }, + "add-pool": { + "type": "array", + "description": "A list of pools ids add to the subscription", + "items": { + "type": "string" + } + }, + "enable-repo": { + "type": "array", + "description": "A list of repositories to enable", + "items": { + "type": "string" + } + }, + "disable-repo": { + "type": "array", + "description": "A list of repositories to disable", + "items": { + "type": "string" + } + }, + "rhsm-baseurl": { + "type": "string", + "description": "Sets the baseurl in ``/etc/rhsm/rhsm.conf``" + }, + "server-hostname": { + "type": "string", + "description": "Sets the serverurl in ``/etc/rhsm/rhsm.conf``" + } + } + } + } + }, + "cc_rsyslog": { + "type": "object", + "properties": { + "rsyslog": { + "type": "object", + "additionalProperties": false, + "properties": { + "config_dir": { + "type": "string", + "description": "The directory where rsyslog configuration files will be written. Default: ``/etc/rsyslog.d``" + }, + "config_filename": { + "type": "string", + "description": "The name of the rsyslog configuration file. Default: ``20-cloud-config.conf``" + }, + "configs": { + "type": "array", + "description": "Each entry in ``configs`` is either a string or an object. Each config entry contains a configuration string and a file to write it to. For config entries that are an object, ``filename`` sets the target filename and ``content`` specifies the config string to write. For config entries that are only a string, the string is used as the config string to write. If the filename to write the config to is not specified, the value of the ``config_filename`` key is used. A file with the selected filename will be written inside the directory specified by ``config_dir``.", + "items": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "object", + "required": [ + "content" + ], + "additionalProperties": false, + "properties": { + "filename": { + "type": "string" + }, + "content": { + "type": "string" + } + } + } + ] + } + }, + "remotes": { + "type": "object", + "description": "Each key is the name for an rsyslog remote entry. Each value holds the contents of the remote config for rsyslog. The config consists of the following parts:\n\n- filter for log messages (defaults to ``*.*``)\n\n- optional leading ``@`` or ``@@``, indicating udp and tcp respectively (defaults to ``@``, for udp)\n\n- ipv4 or ipv6 hostname or address. ipv6 addresses must be in ``[::1]`` format, (e.g. ``@[fd00::1]:514``)\n\n- optional port number (defaults to ``514``)\n\nThis module will provide sane defaults for any part of the remote entry that is not specified, so in most cases remote hosts can be specified just using ``:
``." + }, + "service_reload_command": { + "description": "The command to use to reload the rsyslog service after the config has been updated. If this is set to ``auto``, then an appropriate command for the distro will be used. This is the default behavior. To manually set the command, use a list of command args (e.g. ``[systemctl, restart, rsyslog]``).", + "oneOf": [ + { + "enum": [ + "auto" + ] + }, + { + "type": "array", + "items": { + "type": "string" + } + } + ] + }, + "install_rsyslog": { + "default": false, + "description": "Install rsyslog. Default: ``false``", + "type": "boolean" + }, + "check_exe": { + "type": "string", + "description": "The executable name for the rsyslog daemon.\nFor example, ``rsyslogd``, or ``/opt/sbin/rsyslogd`` if the rsyslog binary is in an unusual path. This is only used if ``install_rsyslog`` is ``true``. Default: ``rsyslogd``" + }, + "packages": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true, + "description": "List of packages needed to be installed for rsyslog. This is only used if ``install_rsyslog`` is ``true``. Default: ``[rsyslog]``" + } + } + } + } + }, + "cc_runcmd": { + "type": "object", + "properties": { + "runcmd": { + "type": "array", + "items": { + "oneOf": [ + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "minItems": 1 + } + } + }, + "cc_salt_minion": { + "type": "object", + "properties": { + "salt_minion": { + "type": "object", + "additionalProperties": false, + "properties": { + "pkg_name": { + "type": "string", + "description": "Package name to install. Default: ``salt-minion``" + }, + "service_name": { + "type": "string", + "description": "Service name to enable. Default: ``salt-minion``" + }, + "config_dir": { + "type": "string", + "description": "Directory to write config files to. Default: ``/etc/salt``" + }, + "conf": { + "type": "object", + "description": "Configuration to be written to `config_dir`/minion" + }, + "grains": { + "type": "object", + "description": "Configuration to be written to `config_dir`/grains" + }, + "public_key": { + "type": "string", + "description": "Public key to be used by the salt minion" + }, + "private_key": { + "type": "string", + "description": "Private key to be used by salt minion" + }, + "pki_dir": { + "type": "string", + "description": "Directory to write key files. Default: `config_dir`/pki/minion" + } + } + } + } + }, + "cc_scripts_vendor": { + "type": "object", + "properties": { + "vendor_data": { + "type": "object", + "additionalProperties": false, + "properties": { + "enabled": { + "description": "Whether vendor data is enabled or not. Default: ``true``", + "oneOf": [ + { + "type": "boolean", + "default": true + }, + { + "type": "string", + "deprecated": true, + "deprecated_version": "22.3", + "deprecated_description": "Use of type string for this value is deprecated. Use a boolean instead." + } + ] + }, + "prefix": { + "type": [ + "array", + "string" + ], + "items": { + "type": [ + "string", + "integer" + ] + }, + "description": "The command to run before any vendor scripts. Its primary use case is for profiling a script, not to prevent its run" + } + } + } + } + }, + "cc_seed_random": { + "type": "object", + "properties": { + "random_seed": { + "type": "object", + "additionalProperties": false, + "properties": { + "file": { + "type": "string", + "default": "/dev/urandom", + "description": "File to write random data to. Default: ``/dev/urandom``" + }, + "data": { + "type": "string", + "description": "This data will be written to ``file`` before data from the datasource. When using a multi-line value or specifying binary data, be sure to follow YAML syntax and use the ``|`` and ``!binary`` YAML format specifiers when appropriate" + }, + "encoding": { + "type": "string", + "default": "raw", + "enum": [ + "raw", + "base64", + "b64", + "gzip", + "gz" + ], + "description": "Used to decode ``data`` provided. Allowed values are ``raw``, ``base64``, ``b64``, ``gzip``, or ``gz``. Default: ``raw``" + }, + "command": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Execute this command to seed random. The command will have RANDOM_SEED_FILE in its environment set to the value of ``file`` above." + }, + "command_required": { + "type": "boolean", + "default": false, + "description": "If true, and ``command`` is not available to be run then an exception is raised and cloud-init will record failure. Otherwise, only debug error is mentioned. Default: ``false``" + } + } + } + } + }, + "cc_set_hostname": { + "type": "object", + "properties": { + "preserve_hostname": { + "type": "boolean", + "default": false, + "description": "If true, the hostname will not be changed. Default: ``false``" + }, + "hostname": { + "type": "string", + "description": "The hostname to set" + }, + "fqdn": { + "type": "string", + "description": "The fully qualified domain name to set" + }, + "prefer_fqdn_over_hostname": { + "type": "boolean", + "description": "If true, the fqdn will be used if it is set. If false, the hostname will be used. If unset, the result is distro-dependent" + }, + "create_hostname_file": { + "type": "boolean", + "default": true, + "description": "If ``false``, the hostname file (e.g. /etc/hostname) will not be created if it does not exist. On systems that use systemd, setting create_hostname_file to ``false`` will set the hostname transiently. If ``true``, the hostname file will always be created and the hostname will be set statically on systemd systems. Default: ``true``" + } + } + }, + "cc_set_passwords": { + "type": "object", + "properties": { + "ssh_pwauth": { + "oneOf": [ + { + "type": "boolean" + }, + { + "type": "string", + "changed": true, + "changed_version": "22.3", + "changed_description": "Use of non-boolean values for this field is deprecated." + } + ], + "description": "Sets whether or not to accept password authentication. ``true`` will enable password auth. ``false`` will disable. Default: leave the value unchanged. In order for this config to be applied, SSH may need to be restarted. On systemd systems, this restart will only happen if the SSH service has already been started. On non-systemd systems, a restart will be attempted regardless of the service state." + }, + "chpasswd": { + "type": "object", + "additionalProperties": false, + "properties": { + "expire": { + "type": "boolean", + "default": true, + "description": "Whether to expire all user passwords such that a password will need to be reset on the user's next login. Default: ``true``" + }, + "users": { + "description": "This key represents a list of existing users to set passwords for. Each item under users contains the following required keys: ``name`` and ``password`` or in the case of a randomly generated password, ``name`` and ``type``. The ``type`` key has a default value of ``hash``, and may alternatively be set to ``text`` or ``RANDOM``. Randomly generated passwords may be insecure, use at your own risk.", + "type": "array", + "items": { + "minItems": 1, + "type": "object", + "anyOf": [ + { + "required": [ + "name", + "type" + ], + "additionalProperties": false, + "properties": { + "name": { + "type": "string" + }, + "type": { + "enum": [ + "RANDOM" + ], + "type": "string" + } + } + }, + { + "required": [ + "name", + "password" + ], + "additionalProperties": false, + "properties": { + "name": { + "type": "string" + }, + "type": { + "enum": [ + "hash", + "text" + ], + "default": "hash", + "type": "string" + }, + "password": { + "type": "string" + } + } + } + ] + } + }, + "list": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "type": "string", + "pattern": "^.+:.+$" + } + } + ], + "minItems": 1, + "description": "List of ``username:password`` pairs. Each user will have the corresponding password set. A password can be randomly generated by specifying ``RANDOM`` or ``R`` as a user's password. A hashed password, created by a tool like ``mkpasswd``, can be specified. A regex (``r'\\$(1|2a|2y|5|6)(\\$.+){2}'``) is used to determine if a password value should be treated as a hash.", + "deprecated": true, + "deprecated_version": "22.2", + "deprecated_description": "Use ``users`` instead." + } + } + }, + "password": { + "type": "string", + "description": "Set the default user's password. Ignored if ``chpasswd`` ``list`` is used" + } + } + }, + "cc_snap": { + "type": "object", + "properties": { + "snap": { + "type": "object", + "minProperties": 1, + "additionalProperties": false, + "properties": { + "assertions": { + "description": "Properly-signed snap assertions which will run before and snap ``commands``.", + "type": [ + "object", + "array" + ], + "items": { + "type": "string" + }, + "additionalItems": false, + "minItems": 1, + "minProperties": 1, + "uniqueItems": true, + "additionalProperties": { + "type": "string" + } + }, + "commands": { + "type": [ + "object", + "array" + ], + "description": "Snap commands to run on the target system", + "items": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "type": "string" + } + } + ] + }, + "additionalItems": false, + "minItems": 1, + "minProperties": 1, + "additionalProperties": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "type": "string" + } + } + ] + } + } + } + } + } + }, + "cc_spacewalk": { + "type": "object", + "properties": { + "spacewalk": { + "type": "object", + "additionalProperties": false, + "properties": { + "server": { + "type": "string", + "description": "The Spacewalk server to use" + }, + "proxy": { + "type": "string", + "description": "The proxy to use when connecting to Spacewalk" + }, + "activation_key": { + "type": "string", + "description": "The activation key to use when registering with Spacewalk" + } + } + } + } + }, + "cc_ssh_authkey_fingerprints": { + "type": "object", + "properties": { + "no_ssh_fingerprints": { + "type": "boolean", + "default": false, + "description": "If true, SSH fingerprints will not be written. Default: ``false``" + }, + "authkey_hash": { + "type": "string", + "default": "sha256", + "description": "The hash type to use when generating SSH fingerprints. Default: ``sha256``" + } + } + }, + "cc_ssh_import_id": { + "type": "object", + "properties": { + "ssh_import_id": { + "type": "array", + "items": { + "type": "string", + "description": "The SSH public key to import" + } + } + } + }, + "cc_ssh": { + "type": "object", + "properties": { + "ssh_keys": { + "type": "object", + "description": "A dictionary entries for the public and private host keys of each desired key type. Entries in the ``ssh_keys`` config dict should have keys in the format ``_private``, ``_public``, and, optionally, ``_certificate``, e.g. ``rsa_private: ``, ``rsa_public: ``, and ``rsa_certificate: ``. Not all key types have to be specified, ones left unspecified will not be used. If this config option is used, then separate keys will not be automatically generated. In order to specify multi-line private host keys and certificates, use YAML multi-line syntax. **Note:** Your ssh keys might possibly be visible to unprivileged users on your system, depending on your cloud's security model.", + "additionalProperties": false, + "patternProperties": { + "^(ecdsa|ed25519|rsa)_(public|private|certificate)$": { + "label": "", + "type": "string" + } + } + }, + "ssh_authorized_keys": { + "type": "array", + "minItems": 1, + "description": "The SSH public keys to add ``.ssh/authorized_keys`` in the default user's home directory", + "items": { + "type": "string" + } + }, + "ssh_deletekeys": { + "type": "boolean", + "default": true, + "description": "Remove host SSH keys. This prevents re-use of a private host key from an image with default host SSH keys. Default: ``true``" + }, + "ssh_genkeytypes": { + "type": "array", + "description": "The SSH key types to generate. Default: ``[rsa, ecdsa, ed25519]``", + "default": [ + "ecdsa", + "ed25519", + "rsa" + ], + "minItems": 1, + "items": { + "type": "string", + "enum": [ + "ecdsa", + "ed25519", + "rsa" + ] + } + }, + "disable_root": { + "type": "boolean", + "default": true, + "description": "Disable root login. Default: ``true``" + }, + "disable_root_opts": { + "type": "string", + "default": "``no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command=\"echo 'Please login as the user \\\"$USER\\\" rather than the user \\\"$DISABLE_USER\\\".';echo;sleep 10;exit 142\"``", + "description": "Disable root login options. If ``disable_root_opts`` is specified and contains the string ``$USER``, it will be replaced with the username of the default user. Default: ``no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command=\"echo 'Please login as the user \\\"$USER\\\" rather than the user \\\"$DISABLE_USER\\\".';echo;sleep 10;exit 142\"``" + }, + "allow_public_ssh_keys": { + "type": "boolean", + "default": true, + "description": "If ``true``, will import the public SSH keys from the datasource's metadata to the user's ``.ssh/authorized_keys`` file. Default: ``true``" + }, + "ssh_quiet_keygen": { + "type": "boolean", + "default": false, + "description": "If ``true``, will suppress the output of key generation to the console. Default: ``false``" + }, + "ssh_publish_hostkeys": { + "type": "object", + "additionalProperties": false, + "properties": { + "enabled": { + "type": "boolean", + "default": true, + "description": "If true, will read host keys from ``/etc/ssh/*.pub`` and publish them to the datasource (if supported). Default: ``true``" + }, + "blacklist": { + "type": "array", + "description": "The SSH key types to ignore when publishing. Default: ``[]`` to publish all SSH key types", + "items": { + "type": "string" + } + } + } + } + } + }, + "cc_timezone": { + "type": "object", + "properties": { + "timezone": { + "type": "string", + "description": "The timezone to use as represented in /usr/share/zoneinfo" + } + } + }, + "cc_ubuntu_advantage": { + "type": "object", + "properties": { + "ubuntu_advantage": { + "type": "object", + "additionalProperties": false, + "properties": { + "enable": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Optional list of ubuntu-advantage services to enable. Any of: cc-eal, cis, esm-infra, fips, fips-updates, livepatch. By default, a given contract token will automatically enable a number of services, use this list to supplement which services should additionally be enabled. Any service unavailable on a given Ubuntu release or unentitled in a given contract will remain disabled. In Ubuntu Pro instances, if this list is given, then only those services will be enabled, ignoring contract defaults. Passing beta services here will cause an error." + }, + "enable_beta": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Optional list of ubuntu-advantage beta services to enable. By default, a given contract token will automatically enable a number of services, use this list to supplement which services should additionally be enabled. Any service unavailable on a given Ubuntu release or unentitled in a given contract will remain disabled. In Ubuntu Pro instances, if this list is given, then only those services will be enabled, ignoring contract defaults." + }, + "token": { + "type": "string", + "description": "Contract token obtained from https://ubuntu.com/advantage to attach. Required for non-Pro instances." + }, + "features": { + "type": "object", + "description": "Ubuntu Advantage features.", + "additionalProperties": false, + "properties": { + "disable_auto_attach": { + "type": "boolean", + "description": "Optional boolean for controlling if ua-auto-attach.service (in Ubuntu Pro instances) will be attempted each boot. Default: ``false``", + "default": false + } + } + }, + "config": { + "type": "object", + "description": "Configuration settings or override Ubuntu Advantage config.", + "additionalProperties": true, + "properties": { + "http_proxy": { + "type": [ + "string", + "null" + ], + "format": "uri", + "description": "Ubuntu Advantage HTTP Proxy URL or null to unset." + }, + "https_proxy": { + "type": [ + "string", + "null" + ], + "format": "uri", + "description": "Ubuntu Advantage HTTPS Proxy URL or null to unset." + }, + "global_apt_http_proxy": { + "type": [ + "string", + "null" + ], + "format": "uri", + "description": "HTTP Proxy URL used for all APT repositories on a system or null to unset. Stored at ``/etc/apt/apt.conf.d/90ubuntu-advantage-aptproxy``" + }, + "global_apt_https_proxy": { + "type": [ + "string", + "null" + ], + "format": "uri", + "description": "HTTPS Proxy URL used for all APT repositories on a system or null to unset. Stored at ``/etc/apt/apt.conf.d/90ubuntu-advantage-aptproxy``" + }, + "ua_apt_http_proxy": { + "type": [ + "string", + "null" + ], + "format": "uri", + "description": "HTTP Proxy URL used only for Ubuntu Advantage APT repositories or null to unset. Stored at ``/etc/apt/apt.conf.d/90ubuntu-advantage-aptproxy``" + }, + "ua_apt_https_proxy": { + "type": [ + "string", + "null" + ], + "format": "uri", + "description": "HTTPS Proxy URL used only for Ubuntu Advantage APT repositories or null to unset. Stored at ``/etc/apt/apt.conf.d/90ubuntu-advantage-aptproxy``" + } + } + } + } + } + } + }, + "cc_ubuntu_drivers": { + "type": "object", + "properties": { + "drivers": { + "type": "object", + "additionalProperties": false, + "properties": { + "nvidia": { + "type": "object", + "required": [ + "license-accepted" + ], + "additionalProperties": false, + "properties": { + "license-accepted": { + "type": "boolean", + "description": "Do you accept the NVIDIA driver license?" + }, + "version": { + "type": "string", + "description": "The version of the driver to install (e.g. \"390\", \"410\"). Default: latest version." + } + } + } + } + } + } + }, + "cc_update_etc_hosts": { + "type": "object", + "properties": { + "manage_etc_hosts": { + "default": false, + "description": "Whether to manage ``/etc/hosts`` on the system. If ``true``, render the hosts file using ``/etc/cloud/templates/hosts.tmpl`` replacing ``$hostname`` and ``$fdqn``. If ``localhost``, append a ``127.0.1.1`` entry that resolves from FQDN and hostname every boot. Default: ``false``", + "oneOf": [ + { + "enum": [ + true, + false, + "localhost" + ] + }, + { + "enum": [ + "template" + ], + "changed_description": "Use of ``template`` is deprecated, use ``true`` instead.", + "changed": true, + "changed_version": "22.3" + } + ] + }, + "fqdn": { + "type": "string", + "description": "Optional fully qualified domain name to use when updating ``/etc/hosts``. Preferred over ``hostname`` if both are provided. In absence of ``hostname`` and ``fqdn`` in cloud-config, the ``local-hostname`` value will be used from datasource metadata." + }, + "hostname": { + "type": "string", + "description": "Hostname to set when rendering ``/etc/hosts``. If ``fqdn`` is set, the hostname extracted from ``fqdn`` overrides ``hostname``." + } + } + }, + "cc_update_hostname": { + "type": "object", + "properties": { + "preserve_hostname": { + "type": "boolean", + "default": false, + "description": "Do not update system hostname when ``true``. Default: ``false``." + }, + "prefer_fqdn_over_hostname": { + "type": "boolean", + "default": null, + "description": "By default, it is distro-dependent whether cloud-init uses the short hostname or fully qualified domain name when both ``local-hostname` and ``fqdn`` are both present in instance metadata. When set ``true``, use fully qualified domain name if present as hostname instead of short hostname. When set ``false``, use ``hostname`` config value if present, otherwise fallback to ``fqdn``." + }, + "create_hostname_file": { + "type": "boolean", + "default": true, + "description": "If ``false``, the hostname file (e.g. /etc/hostname) will not be created if it does not exist. On systems that use systemd, setting create_hostname_file to ``false`` will set the hostname transiently. If ``true``, the hostname file will always be created and the hostname will be set statically on systemd systems. Default: ``true``" + } + } + }, + "cc_users_groups": { + "type": "object", + "properties": { + "groups": { + "type": [ + "string", + "object", + "array" + ], + "hidden": [ + "patternProperties" + ], + "$ref": "#/$defs/users_groups.groups_by_groupname", + "items": { + "type": [ + "string", + "object" + ], + "$ref": "#/$defs/users_groups.groups_by_groupname" + }, + "minItems": 1 + }, + "user": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "object", + "$ref": "#/$defs/users_groups.user" + } + ], + "description": "The ``user`` dictionary values override the ``default_user`` configuration from ``/etc/cloud/cloud.cfg``. The `user` dictionary keys supported for the default_user are the same as the ``users`` schema." + }, + "users": { + "type": [ + "string", + "array", + "object" + ], + "items": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "object", + "$ref": "#/$defs/users_groups.user" + } + ] + } + } + } + }, + "cc_wireguard": { + "type": "object", + "properties": { + "wireguard": { + "type": [ + "null", + "object" + ], + "properties": { + "interfaces": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Name of the interface. Typically wgx (example: wg0)" + }, + "config_path": { + "type": "string", + "description": "Path to configuration file of Wireguard interface" + }, + "content": { + "type": "string", + "description": "Wireguard interface configuration. Contains key, peer, ..." + } + }, + "additionalProperties": false + }, + "minItems": 1 + }, + "readinessprobe": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true, + "description": "List of shell commands to be executed as probes." + } + }, + "required": [ + "interfaces" + ], + "minProperties": 1, + "additionalProperties": false + } + } + }, + "cc_write_files": { + "type": "object", + "properties": { + "write_files": { + "type": "array", + "items": { + "type": "object", + "required": [ + "path" + ], + "additionalProperties": false, + "properties": { + "path": { + "type": "string", + "description": "Path of the file to which ``content`` is decoded and written" + }, + "content": { + "type": "string", + "default": "''", + "description": "Optional content to write to the provided ``path``. When content is present and encoding is not 'text/plain', decode the content prior to writing. Default: ``''``" + }, + "owner": { + "type": "string", + "default": "root:root", + "description": "Optional owner:group to chown on the file and new directories. Default: ``root:root``" + }, + "permissions": { + "type": "string", + "default": "'0o644'", + "description": "Optional file permissions to set on ``path`` represented as an octal string '0###'. Default: ``0o644``" + }, + "encoding": { + "type": "string", + "default": "text/plain", + "enum": [ + "gz", + "gzip", + "gz+base64", + "gzip+base64", + "gz+b64", + "gzip+b64", + "b64", + "base64", + "text/plain" + ], + "description": "Optional encoding type of the content. Default: ``text/plain``. No decoding is performed by default. Supported encoding types are: gz, gzip, gz+base64, gzip+base64, gz+b64, gzip+b64, b64, base64" + }, + "append": { + "type": "boolean", + "default": false, + "description": "Whether to append ``content`` to existing file if ``path`` exists. Default: ``false``." + }, + "defer": { + "type": "boolean", + "default": false, + "description": "Defer writing the file until 'final' stage, after users were created, and packages were installed. Default: ``false``." + } + } + }, + "minItems": 1 + } + } + }, + "cc_yum_add_repo": { + "type": "object", + "properties": { + "yum_repo_dir": { + "type": "string", + "default": "/etc/yum.repos.d", + "description": "The repo parts directory where individual yum repo config files will be written. Default: ``/etc/yum.repos.d``" + }, + "yum_repos": { + "type": "object", + "minProperties": 1, + "additionalProperties": false, + "patternProperties": { + "^[0-9a-zA-Z -_]+$": { + "label": "", + "type": "object", + "description": "Object keyed on unique yum repo IDs. The key used will be used to write yum repo config files in ``yum_repo_dir``/.repo.", + "additionalProperties": false, + "properties": { + "baseurl": { + "type": "string", + "format": "uri", + "description": "URL to the directory where the yum repository's 'repodata' directory lives" + }, + "name": { + "type": "string", + "description": "Optional human-readable name of the yum repo." + }, + "enabled": { + "type": "boolean", + "default": true, + "description": "Whether to enable the repo. Default: ``true``." + } + }, + "patternProperties": { + "^[0-9a-zA-Z_]+$": { + "label": "", + "oneOf": [ + { + "type": "integer" + }, + { + "type": "boolean" + }, + { + "type": "string" + } + ], + "description": "Any supported yum repository configuration options will be written to the yum repo config file. See: man yum.conf" + } + }, + "required": [ + "baseurl" + ] + } + } + } + } + }, + "cc_zypper_add_repo": { + "type": "object", + "properties": { + "zypper": { + "type": "object", + "minProperties": 1, + "additionalProperties": true, + "properties": { + "repos": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": true, + "properties": { + "id": { + "type": "string", + "description": "The unique id of the repo, used when writing /etc/zypp/repos.d/.repo." + }, + "baseurl": { + "type": "string", + "format": "uri", + "description": "The base repositoy URL" + } + }, + "required": [ + "id", + "baseurl" + ] + }, + "minItems": 1 + }, + "config": { + "type": "object", + "description": "Any supported zypo.conf key is written to ``/etc/zypp/zypp.conf``" + } + } + } + } + }, + "output_log_operator": { + "oneOf": [ + { + "type": "string", + "description": "A filepath operation configuration. This is a string containing a filepath and an optional leading operator: '>', '>>' or '|'. Operators '>' and '>>' indicate whether to overwrite or append to the file. The operator '|' redirects content to the command arguments specified." + }, + { + "type": "array", + "description": "A list specifying filepath operation configuration for stdout and stderror", + "items": { + "type": [ + "string" + ] + }, + "minItems": 2, + "maxItems": 2 + }, + { + "type": "object", + "additionalProperties": false, + "properties": { + "output": { + "type": "string", + "description": "A filepath operation configuration. This is a string containing a filepath and an optional leading operator: '>', '>>' or '|'. Operators '>' and '>>' indicate whether to overwrite or append to the file. The operator '|' redirects content to the command arguments specified." + }, + "error": { + "type": "string", + "description": "A filepath operation configuration. A string containing a filepath and an optional leading operator: '>', '>>' or '|'. Operators '>' and '>>' indicate whether to overwrite or append to the file. The operator '|' redirects content to the command arguments specified." + } + } + } + ] + }, + "output_config": { + "type": "object", + "properties": { + "output": { + "type": "object", + "additionalProperties": false, + "properties": { + "all": { + "$ref": "#/$defs/output_log_operator" + }, + "init": { + "$ref": "#/$defs/output_log_operator" + }, + "config": { + "$ref": "#/$defs/output_log_operator" + }, + "final": { + "$ref": "#/$defs/output_log_operator" + } + } + } + } + }, + "reporting_config": { + "type": "object", + "properties": { + "reporting": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^.+$": { + "label": "", + "type": "object", + "oneOf": [ + { + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "log" + ] + }, + "level": { + "type": "string", + "enum": [ + "DEBUG", + "INFO", + "WARN", + "ERROR", + "FATAL" + ], + "default": "DEBUG" + } + } + }, + { + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "print" + ] + } + } + }, + { + "additionalProperties": false, + "required": [ + "type", + "endpoint" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "webhook" + ] + }, + "endpoint": { + "type": "string", + "format": "uri", + "description": "The URL to send the event to." + }, + "consumer_key": { + "type": "string", + "description": "The consumer key to use for the webhook." + }, + "token_key": { + "type": "string", + "description": "The token key to use for the webhook." + }, + "token_secret": { + "type": "string", + "description": "The token secret to use for the webhook." + }, + "consumer_secret": { + "type": "string", + "description": "The consumer secret to use for the webhook." + }, + "timeout": { + "type": "number", + "minimum": 0, + "description": "The timeout in seconds to wait for a response from the webhook." + }, + "retries": { + "type": "integer", + "minimum": 0, + "description": "The number of times to retry sending the webhook." + } + } + }, + { + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "hyperv" + ] + }, + "kvp_file_path": { + "type": "string", + "description": "The path to the KVP file to use for the hyperv reporter.", + "default": "/var/lib/hyperv/.kvp_pool_1" + }, + "event_types": { + "type": "array", + "items": { + "type": "string" + } + } + } + } + ] + } + } + } + } + } + }, + "allOf": [ + { + "$ref": "#/$defs/base_config" + }, + { + "$ref": "#/$defs/cc_ansible" + }, + { + "$ref": "#/$defs/cc_apk_configure" + }, + { + "$ref": "#/$defs/cc_apt_configure" + }, + { + "$ref": "#/$defs/cc_apt_pipelining" + }, + { + "$ref": "#/$defs/cc_ubuntu_autoinstall" + }, + { + "$ref": "#/$defs/cc_bootcmd" + }, + { + "$ref": "#/$defs/cc_byobu" + }, + { + "$ref": "#/$defs/cc_ca_certs" + }, + { + "$ref": "#/$defs/cc_chef" + }, + { + "$ref": "#/$defs/cc_disable_ec2_metadata" + }, + { + "$ref": "#/$defs/cc_disk_setup" + }, + { + "$ref": "#/$defs/cc_fan" + }, + { + "$ref": "#/$defs/cc_final_message" + }, + { + "$ref": "#/$defs/cc_growpart" + }, + { + "$ref": "#/$defs/cc_grub_dpkg" + }, + { + "$ref": "#/$defs/cc_install_hotplug" + }, + { + "$ref": "#/$defs/cc_keyboard" + }, + { + "$ref": "#/$defs/cc_keys_to_console" + }, + { + "$ref": "#/$defs/cc_landscape" + }, + { + "$ref": "#/$defs/cc_locale" + }, + { + "$ref": "#/$defs/cc_lxd" + }, + { + "$ref": "#/$defs/cc_mcollective" + }, + { + "$ref": "#/$defs/cc_migrator" + }, + { + "$ref": "#/$defs/cc_mounts" + }, + { + "$ref": "#/$defs/cc_ntp" + }, + { + "$ref": "#/$defs/cc_package_update_upgrade_install" + }, + { + "$ref": "#/$defs/cc_phone_home" + }, + { + "$ref": "#/$defs/cc_power_state_change" + }, + { + "$ref": "#/$defs/cc_puppet" + }, + { + "$ref": "#/$defs/cc_resizefs" + }, + { + "$ref": "#/$defs/cc_resolv_conf" + }, + { + "$ref": "#/$defs/cc_rh_subscription" + }, + { + "$ref": "#/$defs/cc_rsyslog" + }, + { + "$ref": "#/$defs/cc_runcmd" + }, + { + "$ref": "#/$defs/cc_salt_minion" + }, + { + "$ref": "#/$defs/cc_scripts_vendor" + }, + { + "$ref": "#/$defs/cc_seed_random" + }, + { + "$ref": "#/$defs/cc_set_hostname" + }, + { + "$ref": "#/$defs/cc_set_passwords" + }, + { + "$ref": "#/$defs/cc_snap" + }, + { + "$ref": "#/$defs/cc_spacewalk" + }, + { + "$ref": "#/$defs/cc_ssh_authkey_fingerprints" + }, + { + "$ref": "#/$defs/cc_ssh_import_id" + }, + { + "$ref": "#/$defs/cc_ssh" + }, + { + "$ref": "#/$defs/cc_timezone" + }, + { + "$ref": "#/$defs/cc_ubuntu_advantage" + }, + { + "$ref": "#/$defs/cc_ubuntu_drivers" + }, + { + "$ref": "#/$defs/cc_update_etc_hosts" + }, + { + "$ref": "#/$defs/cc_update_hostname" + }, + { + "$ref": "#/$defs/cc_users_groups" + }, + { + "$ref": "#/$defs/cc_wireguard" + }, + { + "$ref": "#/$defs/cc_write_files" + }, + { + "$ref": "#/$defs/cc_yum_add_repo" + }, + { + "$ref": "#/$defs/cc_zypper_add_repo" + }, + { + "$ref": "#/$defs/reporting_config" + }, + { + "$ref": "#/$defs/output_config" + } + ], + "properties": { + "allow_public_ssh_keys": {}, + "ansible": {}, + "apk_repos": {}, + "apt": {}, + "apt_pipelining": {}, + "apt_reboot_if_required": {}, + "apt_update": {}, + "apt_upgrade": {}, + "authkey_hash": {}, + "autoinstall": {}, + "bootcmd": {}, + "byobu_by_default": {}, + "ca-certs": {}, + "ca_certs": {}, + "chef": {}, + "chpasswd": {}, + "cloud_config_modules": {}, + "cloud_final_modules": {}, + "cloud_init_modules": {}, + "create_hostname_file": {}, + "device_aliases": {}, + "disable_ec2_metadata": {}, + "disable_root": {}, + "disable_root_opts": {}, + "disk_setup": {}, + "drivers": {}, + "fan": {}, + "final_message": {}, + "fqdn": {}, + "fs_setup": {}, + "groups": {}, + "growpart": {}, + "grub-dpkg": {}, + "grub_dpkg": {}, + "hostname": {}, + "keyboard": {}, + "landscape": {}, + "launch-index": {}, + "locale": {}, + "locale_configfile": {}, + "lxd": {}, + "manage_etc_hosts": {}, + "manage_resolv_conf": {}, + "mcollective": {}, + "merge_how": {}, + "merge_type": {}, + "migrate": {}, + "mount_default_fields": {}, + "mounts": {}, + "no_ssh_fingerprints": {}, + "ntp": {}, + "output": {}, + "package_reboot_if_required": {}, + "package_update": {}, + "package_upgrade": {}, + "packages": {}, + "password": {}, + "phone_home": {}, + "power_state": {}, + "prefer_fqdn_over_hostname": {}, + "preserve_hostname": {}, + "puppet": {}, + "random_seed": {}, + "reporting": {}, + "resize_rootfs": {}, + "resolv_conf": {}, + "rh_subscription": {}, + "rsyslog": {}, + "runcmd": {}, + "salt_minion": {}, + "snap": {}, + "spacewalk": {}, + "ssh": {}, + "ssh_authorized_keys": {}, + "ssh_deletekeys": {}, + "ssh_fp_console_blacklist": {}, + "ssh_genkeytypes": {}, + "ssh_import_id": {}, + "ssh_key_console_blacklist": {}, + "ssh_keys": {}, + "ssh_publish_hostkeys": {}, + "ssh_pwauth": {}, + "ssh_quiet_keygen": {}, + "swap": {}, + "timezone": {}, + "ubuntu_advantage": {}, + "updates": {}, + "user": {}, + "users": {}, + "vendor_data": {}, + "version": {}, + "wireguard": {}, + "write_files": {}, + "yum_repo_dir": {}, + "yum_repos": {}, + "zypper": {} + }, + "additionalProperties": false + } \ No newline at end of file diff --git a/pkg/util/cloudinit/validate/testdata/README.md b/pkg/util/cloudinit/validate/testdata/README.md new file mode 100644 index 000000000..456a012c5 --- /dev/null +++ b/pkg/util/cloudinit/validate/testdata/README.md @@ -0,0 +1,20 @@ +# Cloud-Init test data for validation + +## Overview + +This directory contains test data files relates to Cloud-Init: + +## Valid test data + +* [`valid-cloud-config-1.yaml`](./valid-cloud-config-1.yaml) + + * **`Copied`** `2023/12/14` + * **`Source`** https://cloudinit.readthedocs.io/en/latest/reference/examples.html#including-users-and-groups + * **`--help`** An example, valid CloudConfig that includes several users and groups + +## Invalid test data + +* [`invalid-cloud-config-1.yaml`](./valid-cloud-config-1.yaml) + + * **`--help`** A copy of `valid-cloud-config-1.yaml`, where the first, non-default user has a name that is an array rather than a string. + diff --git a/pkg/util/cloudinit/validate/testdata/invalid-cloud-config-1.yaml b/pkg/util/cloudinit/validate/testdata/invalid-cloud-config-1.yaml new file mode 100644 index 000000000..f670797b6 --- /dev/null +++ b/pkg/util/cloudinit/validate/testdata/invalid-cloud-config-1.yaml @@ -0,0 +1,150 @@ +#cloud-config +# Add groups to the system +# The following example adds the 'admingroup' group with members 'root' and 'sys' +# and the empty group cloud-users. +groups: + - admingroup: [root,sys] + - cloud-users + +# Add users to the system. Users are added after groups are added. +# Note: Most of these configuration options will not be honored if the user +# already exists. Following options are the exceptions and they are +# applicable on already-existing users: +# - 'plain_text_passwd', 'hashed_passwd', 'lock_passwd', 'sudo', +# 'ssh_authorized_keys', 'ssh_redirect_user'. +users: + - default + - name: # THIS IS INVALID, NAME CANNOT BE A LIST + - foobar + gecos: Foo B. Bar + primary_group: foobar + groups: users + selinux_user: staff_u + expiredate: '2032-09-01' + ssh_import_id: + - lp:falcojr + - gh:TheRealFalcon + lock_passwd: false + passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/ + - name: barfoo + gecos: Bar B. Foo + sudo: ALL=(ALL) NOPASSWD:ALL + groups: users, admin + ssh_import_id: + - lp:falcojr + - gh:TheRealFalcon + lock_passwd: true + ssh_authorized_keys: + - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSL7uWGj8cgWyIOaspgKdVy0cKJ+UTjfv7jBOjG2H/GN8bJVXy72XAvnhM0dUM+CCs8FOf0YlPX+Frvz2hKInrmRhZVwRSL129PasD12MlI3l44u6IwS1o/W86Q+tkQYEljtqDOo0a+cOsaZkvUNzUyEXUwz/lmYa6G4hMKZH4NBj7nbAAF96wsMCoyNwbWryBnDYUr6wMbjRR1J9Pw7Xh7WRC73wy4Va2YuOgbD3V/5ZrFPLbWZW/7TFXVrql04QVbyei4aiFR5n//GvoqwQDNe58LmbzX/xvxyKJYdny2zXmdAhMxbrpFQsfpkJ9E/H5w0yOdSvnWbUoG5xNGoOB csmith@fringe + - name: cloudy + gecos: Magic Cloud App Daemon User + inactive: '5' + system: true + - name: fizzbuzz + sudo: false + shell: /bin/bash + ssh_authorized_keys: + - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSL7uWGj8cgWyIOaspgKdVy0cKJ+UTjfv7jBOjG2H/GN8bJVXy72XAvnhM0dUM+CCs8FOf0YlPX+Frvz2hKInrmRhZVwRSL129PasD12MlI3l44u6IwS1o/W86Q+tkQYEljtqDOo0a+cOsaZkvUNzUyEXUwz/lmYa6G4hMKZH4NBj7nbAAF96wsMCoyNwbWryBnDYUr6wMbjRR1J9Pw7Xh7WRC73wy4Va2YuOgbD3V/5ZrFPLbWZW/7TFXVrql04QVbyei4aiFR5n//GvoqwQDNe58LmbzX/xvxyKJYdny2zXmdAhMxbrpFQsfpkJ9E/H5w0yOdSvnWbUoG5xNGoOB csmith@fringe + - snapuser: joe@joeuser.io + - name: nosshlogins + ssh_redirect_user: true + +# Valid Values: +# name: The user's login name +# expiredate: Date on which the user's account will be disabled. +# gecos: The user name's real name, i.e. "Bob B. Smith" +# homedir: Optional. Set to the local path you want to use. Defaults to +# /home/ +# primary_group: define the primary group. Defaults to a new group created +# named after the user. +# groups: Optional. Additional groups to add the user to. Defaults to none +# selinux_user: Optional. The SELinux user for the user's login, such as +# "staff_u". When this is omitted the system will select the default +# SELinux user. +# lock_passwd: Defaults to true. Lock the password to disable password login +# inactive: Number of days after password expires until account is disabled +# passwd: The hash -- not the password itself -- of the password you want +# to use for this user. You can generate a hash via: +# mkpasswd --method=SHA-512 --rounds=4096 +# (the above command would create from stdin an SHA-512 password hash +# with 4096 salt rounds) +# +# Please note: while the use of a hashed password is better than +# plain text, the use of this feature is not ideal. Also, +# using a high number of salting rounds will help, but it should +# not be relied upon. +# +# To highlight this risk, running John the Ripper against the +# example hash above, with a readily available wordlist, revealed +# the true password in 12 seconds on a i7-2620QM. +# +# In other words, this feature is a potential security risk and is +# provided for your convenience only. If you do not fully trust the +# medium over which your cloud-config will be transmitted, then you +# should not use this feature. +# +# no_create_home: When set to true, do not create home directory. +# no_user_group: When set to true, do not create a group named after the user. +# no_log_init: When set to true, do not initialize lastlog and faillog database. +# ssh_import_id: Optional. Import SSH ids +# ssh_authorized_keys: Optional. [list] Add keys to user's authorized keys file +# An error will be raised if no_create_home or system is +# also set. +# ssh_redirect_user: Optional. [bool] Set true to block ssh logins for cloud +# ssh public keys and emit a message redirecting logins to +# use instead. This option only disables cloud +# provided public-keys. An error will be raised if ssh_authorized_keys +# or ssh_import_id is provided for the same user. +# +# sudo: Defaults to none. Accepts a sudo rule string, a list of sudo rule +# strings or False to explicitly deny sudo usage. Examples: +# +# Allow a user unrestricted sudo access. +# sudo: ALL=(ALL) NOPASSWD:ALL +# +# Adding multiple sudo rule strings. +# sudo: +# - ALL=(ALL) NOPASSWD:/bin/mysql +# - ALL=(ALL) ALL +# +# Prevent sudo access for a user. +# sudo: False +# +# Note: Please double check your syntax and make sure it is valid. +# cloud-init does not parse/check the syntax of the sudo +# directive. +# system: Create the user as a system user. This means no home directory. +# snapuser: Create a Snappy (Ubuntu-Core) user via the snap create-user +# command available on Ubuntu systems. If the user has an account +# on the Ubuntu SSO, specifying the email will allow snap to +# request a username and any public ssh keys and will import +# these into the system with username specified by SSO account. +# If 'username' is not set in SSO, then username will be the +# shortname before the email domain. +# + +# Default user creation: +# +# Unless you define users, you will get a 'ubuntu' user on Ubuntu systems with the +# legacy permission (no password sudo, locked user, etc). If however, you want +# to have the 'ubuntu' user in addition to other users, you need to instruct +# cloud-init that you also want the default user. To do this use the following +# syntax: +# users: +# - default +# - bob +# - .... +# foobar: ... +# +# users[0] (the first user in users) overrides the user directive. +# +# The 'default' user above references the distro's config: +# system_info: +# default_user: +# name: Ubuntu +# plain_text_passwd: 'ubuntu' +# home: /home/ubuntu +# shell: /bin/bash +# lock_passwd: True +# gecos: Ubuntu +# groups: [adm, cdrom, dip, lxd, sudo] \ No newline at end of file diff --git a/pkg/util/cloudinit/validate/testdata/valid-cloud-config-1.yaml b/pkg/util/cloudinit/validate/testdata/valid-cloud-config-1.yaml new file mode 100644 index 000000000..6350f33ce --- /dev/null +++ b/pkg/util/cloudinit/validate/testdata/valid-cloud-config-1.yaml @@ -0,0 +1,149 @@ +#cloud-config +# Add groups to the system +# The following example adds the 'admingroup' group with members 'root' and 'sys' +# and the empty group cloud-users. +groups: + - admingroup: [root,sys] + - cloud-users + +# Add users to the system. Users are added after groups are added. +# Note: Most of these configuration options will not be honored if the user +# already exists. Following options are the exceptions and they are +# applicable on already-existing users: +# - 'plain_text_passwd', 'hashed_passwd', 'lock_passwd', 'sudo', +# 'ssh_authorized_keys', 'ssh_redirect_user'. +users: + - default + - name: foobar + gecos: Foo B. Bar + primary_group: foobar + groups: users + selinux_user: staff_u + expiredate: '2032-09-01' + ssh_import_id: + - lp:falcojr + - gh:TheRealFalcon + lock_passwd: false + passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/ + - name: barfoo + gecos: Bar B. Foo + sudo: ALL=(ALL) NOPASSWD:ALL + groups: users, admin + ssh_import_id: + - lp:falcojr + - gh:TheRealFalcon + lock_passwd: true + ssh_authorized_keys: + - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSL7uWGj8cgWyIOaspgKdVy0cKJ+UTjfv7jBOjG2H/GN8bJVXy72XAvnhM0dUM+CCs8FOf0YlPX+Frvz2hKInrmRhZVwRSL129PasD12MlI3l44u6IwS1o/W86Q+tkQYEljtqDOo0a+cOsaZkvUNzUyEXUwz/lmYa6G4hMKZH4NBj7nbAAF96wsMCoyNwbWryBnDYUr6wMbjRR1J9Pw7Xh7WRC73wy4Va2YuOgbD3V/5ZrFPLbWZW/7TFXVrql04QVbyei4aiFR5n//GvoqwQDNe58LmbzX/xvxyKJYdny2zXmdAhMxbrpFQsfpkJ9E/H5w0yOdSvnWbUoG5xNGoOB csmith@fringe + - name: cloudy + gecos: Magic Cloud App Daemon User + inactive: '5' + system: true + - name: fizzbuzz + sudo: false + shell: /bin/bash + ssh_authorized_keys: + - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSL7uWGj8cgWyIOaspgKdVy0cKJ+UTjfv7jBOjG2H/GN8bJVXy72XAvnhM0dUM+CCs8FOf0YlPX+Frvz2hKInrmRhZVwRSL129PasD12MlI3l44u6IwS1o/W86Q+tkQYEljtqDOo0a+cOsaZkvUNzUyEXUwz/lmYa6G4hMKZH4NBj7nbAAF96wsMCoyNwbWryBnDYUr6wMbjRR1J9Pw7Xh7WRC73wy4Va2YuOgbD3V/5ZrFPLbWZW/7TFXVrql04QVbyei4aiFR5n//GvoqwQDNe58LmbzX/xvxyKJYdny2zXmdAhMxbrpFQsfpkJ9E/H5w0yOdSvnWbUoG5xNGoOB csmith@fringe + - snapuser: joe@joeuser.io + - name: nosshlogins + ssh_redirect_user: true + +# Valid Values: +# name: The user's login name +# expiredate: Date on which the user's account will be disabled. +# gecos: The user name's real name, i.e. "Bob B. Smith" +# homedir: Optional. Set to the local path you want to use. Defaults to +# /home/ +# primary_group: define the primary group. Defaults to a new group created +# named after the user. +# groups: Optional. Additional groups to add the user to. Defaults to none +# selinux_user: Optional. The SELinux user for the user's login, such as +# "staff_u". When this is omitted the system will select the default +# SELinux user. +# lock_passwd: Defaults to true. Lock the password to disable password login +# inactive: Number of days after password expires until account is disabled +# passwd: The hash -- not the password itself -- of the password you want +# to use for this user. You can generate a hash via: +# mkpasswd --method=SHA-512 --rounds=4096 +# (the above command would create from stdin an SHA-512 password hash +# with 4096 salt rounds) +# +# Please note: while the use of a hashed password is better than +# plain text, the use of this feature is not ideal. Also, +# using a high number of salting rounds will help, but it should +# not be relied upon. +# +# To highlight this risk, running John the Ripper against the +# example hash above, with a readily available wordlist, revealed +# the true password in 12 seconds on a i7-2620QM. +# +# In other words, this feature is a potential security risk and is +# provided for your convenience only. If you do not fully trust the +# medium over which your cloud-config will be transmitted, then you +# should not use this feature. +# +# no_create_home: When set to true, do not create home directory. +# no_user_group: When set to true, do not create a group named after the user. +# no_log_init: When set to true, do not initialize lastlog and faillog database. +# ssh_import_id: Optional. Import SSH ids +# ssh_authorized_keys: Optional. [list] Add keys to user's authorized keys file +# An error will be raised if no_create_home or system is +# also set. +# ssh_redirect_user: Optional. [bool] Set true to block ssh logins for cloud +# ssh public keys and emit a message redirecting logins to +# use instead. This option only disables cloud +# provided public-keys. An error will be raised if ssh_authorized_keys +# or ssh_import_id is provided for the same user. +# +# sudo: Defaults to none. Accepts a sudo rule string, a list of sudo rule +# strings or False to explicitly deny sudo usage. Examples: +# +# Allow a user unrestricted sudo access. +# sudo: ALL=(ALL) NOPASSWD:ALL +# +# Adding multiple sudo rule strings. +# sudo: +# - ALL=(ALL) NOPASSWD:/bin/mysql +# - ALL=(ALL) ALL +# +# Prevent sudo access for a user. +# sudo: False +# +# Note: Please double check your syntax and make sure it is valid. +# cloud-init does not parse/check the syntax of the sudo +# directive. +# system: Create the user as a system user. This means no home directory. +# snapuser: Create a Snappy (Ubuntu-Core) user via the snap create-user +# command available on Ubuntu systems. If the user has an account +# on the Ubuntu SSO, specifying the email will allow snap to +# request a username and any public ssh keys and will import +# these into the system with username specified by SSO account. +# If 'username' is not set in SSO, then username will be the +# shortname before the email domain. +# + +# Default user creation: +# +# Unless you define users, you will get a 'ubuntu' user on Ubuntu systems with the +# legacy permission (no password sudo, locked user, etc). If however, you want +# to have the 'ubuntu' user in addition to other users, you need to instruct +# cloud-init that you also want the default user. To do this use the following +# syntax: +# users: +# - default +# - bob +# - .... +# foobar: ... +# +# users[0] (the first user in users) overrides the user directive. +# +# The 'default' user above references the distro's config: +# system_info: +# default_user: +# name: Ubuntu +# plain_text_passwd: 'ubuntu' +# home: /home/ubuntu +# shell: /bin/bash +# lock_passwd: True +# gecos: Ubuntu +# groups: [adm, cdrom, dip, lxd, sudo] \ No newline at end of file diff --git a/pkg/vmprovider/providers/vsphere2/cloudinit/validate/validate.go b/pkg/util/cloudinit/validate/validate.go similarity index 70% rename from pkg/vmprovider/providers/vsphere2/cloudinit/validate/validate.go rename to pkg/util/cloudinit/validate/validate.go index 151a54db9..fd2932897 100644 --- a/pkg/vmprovider/providers/vsphere2/cloudinit/validate/validate.go +++ b/pkg/util/cloudinit/validate/validate.go @@ -11,6 +11,7 @@ import ( "github.com/vmware-tanzu/vm-operator/api/v1alpha2/cloudinit" "github.com/vmware-tanzu/vm-operator/api/v1alpha2/common" + cloudinitschema "github.com/vmware-tanzu/vm-operator/pkg/util/cloudinit/schema" ) const ( @@ -18,9 +19,9 @@ const ( invalidWriteFileContent = "value must be a string, multi-line string, or SecretKeySelector" ) -// CloudConfig returns any errors encountered when validating the raw JSON -// portions of a CloudConfig. -func CloudConfig( +// CloudConfigJSONRawMessage returns any errors encountered when validating the +// json.RawMessage portions of a CloudConfig. +func CloudConfigJSONRawMessage( fieldPath *field.Path, in cloudinit.CloudConfig) field.ErrorList { @@ -109,3 +110,28 @@ func validateWriteFiles( return allErrs } + +// CloudConfigYAML returns an error if the provided CloudConfig YAML is not +// valid according to the CloudConfig schema. +// +// Please note the up-to-date schemas related to Cloud-Init may be found at +// https://github.com/canonical/cloud-init/tree/main/cloudinit/config/schemas. +func CloudConfigYAML(in string) error { + // The cloudinitschema.UnmarshalCloudconfig function only supports JSON + // input, so first we need to convert the CloudConfig YAML to JSON. + data := map[string]any{} + if err := yaml.Unmarshal([]byte(in), &data); err != nil { + return err + } + out, err := json.Marshal(data) + if err != nil { + return err + } + + // Validate the JSON CloudConfig. + if _, err := cloudinitschema.UnmarshalCloudconfig(out); err != nil { + return err + } + + return nil +} diff --git a/pkg/vmprovider/providers/vsphere2/cloudinit/validate/validate_suite_test.go b/pkg/util/cloudinit/validate/validate_suite_test.go similarity index 100% rename from pkg/vmprovider/providers/vsphere2/cloudinit/validate/validate_suite_test.go rename to pkg/util/cloudinit/validate/validate_suite_test.go diff --git a/pkg/vmprovider/providers/vsphere2/cloudinit/validate/validate_test.go b/pkg/util/cloudinit/validate/validate_test.go similarity index 70% rename from pkg/vmprovider/providers/vsphere2/cloudinit/validate/validate_test.go rename to pkg/util/cloudinit/validate/validate_test.go index 77d5be0f3..c6b9bfd1b 100644 --- a/pkg/vmprovider/providers/vsphere2/cloudinit/validate/validate_test.go +++ b/pkg/util/cloudinit/validate/validate_test.go @@ -5,6 +5,7 @@ package validate_test import ( "encoding/json" + "os" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -13,10 +14,10 @@ import ( vmopv1cloudinit "github.com/vmware-tanzu/vm-operator/api/v1alpha2/cloudinit" "github.com/vmware-tanzu/vm-operator/api/v1alpha2/common" - "github.com/vmware-tanzu/vm-operator/pkg/vmprovider/providers/vsphere2/cloudinit/validate" + cloudinitvalidate "github.com/vmware-tanzu/vm-operator/pkg/util/cloudinit/validate" ) -var _ = Describe("CloudConfig ValidateCloudConfig", func() { +var _ = Describe("Validate CloudConfigJSONRawMessage", func() { var ( cloudConfig vmopv1cloudinit.CloudConfig errs field.ErrorList @@ -57,7 +58,7 @@ var _ = Describe("CloudConfig ValidateCloudConfig", func() { }) JustBeforeEach(func() { - errs = validate.CloudConfig( + errs = cloudinitvalidate.CloudConfigJSONRawMessage( field.NewPath("spec").Child("bootstrap").Child("cloudInit"), cloudConfig) }) @@ -91,3 +92,38 @@ var _ = Describe("CloudConfig ValidateCloudConfig", func() { }) }) }) + +var _ = Describe("Validate CloudConfigYAML", func() { + var ( + err error + cloudConfigYAML string + ) + + JustBeforeEach(func() { + err = cloudinitvalidate.CloudConfigYAML(cloudConfigYAML) + }) + + When("The CloudConfig is valid", func() { + BeforeEach(func() { + data, err := os.ReadFile("./testdata/valid-cloud-config-1.yaml") + Expect(err).ToNot(HaveOccurred()) + Expect(data).ToNot(HaveLen(0)) + cloudConfigYAML = string(data) + }) + It("Should not return an error", func() { + Expect(err).ToNot(HaveOccurred()) + }) + }) + + When("The CloudConfig is invalid", func() { + BeforeEach(func() { + data, err := os.ReadFile("./testdata/invalid-cloud-config-1.yaml") + Expect(err).ToNot(HaveOccurred()) + Expect(data).ToNot(HaveLen(0)) + cloudConfigYAML = string(data) + }) + It("Should return an error", func() { + Expect(err).To(HaveOccurred()) + }) + }) +}) diff --git a/pkg/vmprovider/providers/vsphere2/vmlifecycle/bootstrap.go b/pkg/vmprovider/providers/vsphere2/vmlifecycle/bootstrap.go index 0ab2f31b9..6e9e087c1 100644 --- a/pkg/vmprovider/providers/vsphere2/vmlifecycle/bootstrap.go +++ b/pkg/vmprovider/providers/vsphere2/vmlifecycle/bootstrap.go @@ -14,7 +14,7 @@ import ( vmopv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha2" "github.com/vmware-tanzu/vm-operator/pkg/context" - "github.com/vmware-tanzu/vm-operator/pkg/vmprovider/providers/vsphere2/cloudinit" + "github.com/vmware-tanzu/vm-operator/pkg/util/cloudinit" "github.com/vmware-tanzu/vm-operator/pkg/vmprovider/providers/vsphere2/config" "github.com/vmware-tanzu/vm-operator/pkg/vmprovider/providers/vsphere2/constants" "github.com/vmware-tanzu/vm-operator/pkg/vmprovider/providers/vsphere2/network" diff --git a/pkg/vmprovider/providers/vsphere2/vmlifecycle/bootstrap_cloudinit.go b/pkg/vmprovider/providers/vsphere2/vmlifecycle/bootstrap_cloudinit.go index e7043283b..f5d5d07e3 100644 --- a/pkg/vmprovider/providers/vsphere2/vmlifecycle/bootstrap_cloudinit.go +++ b/pkg/vmprovider/providers/vsphere2/vmlifecycle/bootstrap_cloudinit.go @@ -13,7 +13,7 @@ import ( vmopv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha2" "github.com/vmware-tanzu/vm-operator/pkg/context" "github.com/vmware-tanzu/vm-operator/pkg/util" - "github.com/vmware-tanzu/vm-operator/pkg/vmprovider/providers/vsphere2/cloudinit" + "github.com/vmware-tanzu/vm-operator/pkg/util/cloudinit" "github.com/vmware-tanzu/vm-operator/pkg/vmprovider/providers/vsphere2/constants" "github.com/vmware-tanzu/vm-operator/pkg/vmprovider/providers/vsphere2/internal" "github.com/vmware-tanzu/vm-operator/pkg/vmprovider/providers/vsphere2/network" @@ -53,7 +53,7 @@ func BootStrapCloudInit( if bsArgs.CloudConfig == nil { return nil, nil, fmt.Errorf("cloudConfigSecretData is nil") } - data, err := cloudinit.MarshalYAML(vmCtx, *cooked, *bsArgs.CloudConfig) + data, err := cloudinit.MarshalYAML(*cooked, *bsArgs.CloudConfig) if err != nil { return nil, nil, err } diff --git a/pkg/vmprovider/providers/vsphere2/vmlifecycle/bootstrap_cloudinit_test.go b/pkg/vmprovider/providers/vsphere2/vmlifecycle/bootstrap_cloudinit_test.go index 66519d2fb..d161617ac 100644 --- a/pkg/vmprovider/providers/vsphere2/vmlifecycle/bootstrap_cloudinit_test.go +++ b/pkg/vmprovider/providers/vsphere2/vmlifecycle/bootstrap_cloudinit_test.go @@ -20,7 +20,7 @@ import ( "github.com/vmware-tanzu/vm-operator/api/v1alpha2/common" "github.com/vmware-tanzu/vm-operator/pkg/context" "github.com/vmware-tanzu/vm-operator/pkg/util" - "github.com/vmware-tanzu/vm-operator/pkg/vmprovider/providers/vsphere2/cloudinit" + "github.com/vmware-tanzu/vm-operator/pkg/util/cloudinit" "github.com/vmware-tanzu/vm-operator/pkg/vmprovider/providers/vsphere2/constants" "github.com/vmware-tanzu/vm-operator/pkg/vmprovider/providers/vsphere2/internal" "github.com/vmware-tanzu/vm-operator/pkg/vmprovider/providers/vsphere2/network" diff --git a/pkg/vmprovider/providers/vsphere2/vmprovider_vm_utils.go b/pkg/vmprovider/providers/vsphere2/vmprovider_vm_utils.go index a3a074c59..823135bc7 100644 --- a/pkg/vmprovider/providers/vsphere2/vmprovider_vm_utils.go +++ b/pkg/vmprovider/providers/vsphere2/vmprovider_vm_utils.go @@ -17,7 +17,7 @@ import ( conditions "github.com/vmware-tanzu/vm-operator/pkg/conditions2" "github.com/vmware-tanzu/vm-operator/pkg/context" "github.com/vmware-tanzu/vm-operator/pkg/util" - "github.com/vmware-tanzu/vm-operator/pkg/vmprovider/providers/vsphere2/cloudinit" + "github.com/vmware-tanzu/vm-operator/pkg/util/cloudinit" "github.com/vmware-tanzu/vm-operator/pkg/vmprovider/providers/vsphere2/constants" "github.com/vmware-tanzu/vm-operator/pkg/vmprovider/providers/vsphere2/instancestorage" "github.com/vmware-tanzu/vm-operator/pkg/vmprovider/providers/vsphere2/vmlifecycle" diff --git a/webhooks/virtualmachine/v1alpha2/validation/virtualmachine_validator.go b/webhooks/virtualmachine/v1alpha2/validation/virtualmachine_validator.go index 59abb9ddc..2da73b5f5 100644 --- a/webhooks/virtualmachine/v1alpha2/validation/virtualmachine_validator.go +++ b/webhooks/virtualmachine/v1alpha2/validation/virtualmachine_validator.go @@ -34,7 +34,7 @@ import ( "github.com/vmware-tanzu/vm-operator/pkg/context" "github.com/vmware-tanzu/vm-operator/pkg/lib" "github.com/vmware-tanzu/vm-operator/pkg/topology" - cloudinitvalidate "github.com/vmware-tanzu/vm-operator/pkg/vmprovider/providers/vsphere2/cloudinit/validate" + cloudinitvalidate "github.com/vmware-tanzu/vm-operator/pkg/util/cloudinit/validate" "github.com/vmware-tanzu/vm-operator/pkg/vmprovider/providers/vsphere2/config" "github.com/vmware-tanzu/vm-operator/pkg/vmprovider/providers/vsphere2/instancestorage" "github.com/vmware-tanzu/vm-operator/webhooks/common" @@ -215,7 +215,7 @@ func (v validator) validateBootstrap( allErrs = append(allErrs, field.Invalid(p, "cloudInit", "cloudConfig and rawCloudConfig are mutually exclusive")) } - allErrs = append(allErrs, cloudinitvalidate.CloudConfig(p, *v)...) + allErrs = append(allErrs, cloudinitvalidate.CloudConfigJSONRawMessage(p, *v)...) } }