diff --git a/incus/storage/drivers/driver_btrfs.go b/incus/storage/drivers/driver_btrfs.go index 091f4cf33ab..c96b50693a3 100644 --- a/incus/storage/drivers/driver_btrfs.go +++ b/incus/storage/drivers/driver_btrfs.go @@ -207,7 +207,7 @@ func (d *btrfs) Create() error { } else { // New btrfs subvolume on existing btrfs filesystem. cleanSource := filepath.Clean(hostPath) - lxdDir := shared.VarPath() + daemonDir := shared.VarPath() if shared.PathExists(hostPath) { hostPathFS, _ := filesystem.Detect(hostPath) @@ -216,7 +216,7 @@ func (d *btrfs) Create() error { } } - if strings.HasPrefix(cleanSource, lxdDir) { + if strings.HasPrefix(cleanSource, daemonDir) { if cleanSource != GetPoolMountPath(d.name) { return fmt.Errorf("Only allowed source path under %q is %q", shared.VarPath(), GetPoolMountPath(d.name)) } diff --git a/incus/storage/drivers/driver_btrfs_volumes.go b/incus/storage/drivers/driver_btrfs_volumes.go index b56af53cb3a..054be9b7d56 100644 --- a/incus/storage/drivers/driver_btrfs_volumes.go +++ b/incus/storage/drivers/driver_btrfs_volumes.go @@ -262,7 +262,7 @@ func (d *btrfs) CreateVolumeFromBackup(vol Volume, srcBackup backup.Info, srcDat var copyOps []btrfsCopyOp - // unpackVolume unpacks all subvolumes in a LXD volume from a backup tarball file. + // unpackVolume unpacks all subvolumes in a volume from a backup tarball file. unpackVolume := func(v Volume, srcFilePrefix string) error { _, snapName, _ := api.GetParentAndSnapshotName(v.name) @@ -515,7 +515,7 @@ func (d *btrfs) CreateVolumeFromMigration(vol Volume, conn io.ReadWriteCloser, v d.logger.Debug("Received BTRFS migration meta data header", logger.Ctx{"name": vol.name}) } else { - // Populate the migrationHeader subvolumes with root volumes only to support older LXD sources. + // Populate the migrationHeader subvolumes with root volumes only to support older sources. for _, snapName := range volTargetArgs.Snapshots { migrationHeader.Subvolumes = append(migrationHeader.Subvolumes, BTRFSSubVolume{ Snapshot: snapName, @@ -608,7 +608,7 @@ func (d *btrfs) createVolumeFromMigrationOptimized(vol Volume, conn io.ReadWrite // received. We don't use a map as the order should be kept. copyOps := []btrfsCopyOp{} - // receiveVolume receives all subvolumes in a LXD volume from the source. + // receiveVolume receives all subvolumes in a volume from the source. receiveVolume := func(v Volume, receivePath string) error { _, snapName, _ := api.GetParentAndSnapshotName(v.name) @@ -653,7 +653,7 @@ func (d *btrfs) createVolumeFromMigrationOptimized(vol Volume, conn io.ReadWrite return nil } - // Get instances directory (e.g. /var/lib/lxd/storage-pools/btrfs/containers). + // Get instances directory (e.g. /var/lib/incus/storage-pools/btrfs/containers). instancesPath := GetVolumeMountPath(d.name, vol.volType, "") // Create a temporary directory which will act as the parent directory of the received ro snapshot. @@ -1127,7 +1127,7 @@ func (d *btrfs) GetVolumeDiskPath(vol Volume) (string, error) { return genericVFSGetVolumeDiskPath(vol) } -// ListVolumes returns a list of LXD volumes in storage pool. +// ListVolumes returns a list of volumes in storage pool. func (d *btrfs) ListVolumes() ([]Volume, error) { return genericVFSListVolumes(d) } @@ -1434,7 +1434,7 @@ func (d *btrfs) migrateVolumeOptimized(vol Volume, conn io.ReadWriteCloser, volS } } - // Get instances directory (e.g. /var/lib/lxd/storage-pools/btrfs/containers). + // Get instances directory (e.g. /var/lib/incus/storage-pools/btrfs/containers). instancesPath := GetVolumeMountPath(d.name, vol.volType, "") // Create a temporary directory which will act as the parent directory of the read-only snapshot. diff --git a/incus/storage/drivers/driver_ceph.go b/incus/storage/drivers/driver_ceph.go index e2bde9b6120..888c5521625 100644 --- a/incus/storage/drivers/driver_ceph.go +++ b/incus/storage/drivers/driver_ceph.go @@ -92,9 +92,9 @@ func (d *ceph) Info() Info { } } -// getPlaceholderVolume returns the volume used to indicate if the pool is used by LXD. +// getPlaceholderVolume returns the volume used to indicate if the pool is in use. func (d *ceph) getPlaceholderVolume() Volume { - return NewVolume(d, d.name, VolumeType("lxd"), ContentTypeFS, d.config["ceph.osd.pool_name"], nil, nil) + return NewVolume(d, d.name, VolumeType("incus"), ContentTypeFS, d.config["ceph.osd.pool_name"], nil, nil) } // FillConfig populates the storage pool's configuration file with the default values. @@ -177,8 +177,8 @@ func (d *ceph) Create() error { d.logger.Warn("Failed to initialize pool", logger.Ctx{"pool": d.config["ceph.osd.pool_name"], "cluster": d.config["ceph.cluster_name"]}) } - // Create placeholder storage volume. Other LXD instances will use this to detect whether this osd - // pool is already in use by another LXD instance. + // Create placeholder storage volume. Other instances will use this to detect whether this osd + // pool is already in use by another instance. err = d.rbdCreateVolume(placeholderVol, "0") if err != nil { return err @@ -193,15 +193,15 @@ func (d *ceph) Create() error { if volExists { // ceph.osd.force_reuse is deprecated and should not be used. OSD pools are a logical - // construct there is no good reason not to create one for dedicated use by LXD. + // construct there is no good reason not to create one for dedicated use by the daemon. if shared.IsFalseOrEmpty(d.config["ceph.osd.force_reuse"]) { - return fmt.Errorf("Pool '%s' in cluster '%s' seems to be in use by another LXD instance. Use 'ceph.osd.force_reuse=true' to force", d.config["ceph.osd.pool_name"], d.config["ceph.cluster_name"]) + return fmt.Errorf("Pool '%s' in cluster '%s' seems to be in use by another Incus instance. Use 'ceph.osd.force_reuse=true' to force", d.config["ceph.osd.pool_name"], d.config["ceph.cluster_name"]) } d.config["volatile.pool.pristine"] = "false" } else { - // Create placeholder storage volume. Other LXD instances will use this to detect whether this osd - // pool is already in use by another LXD instance. + // Create placeholder storage volume. Other instances will use this to detect whether this osd + // pool is already in use by another instance. err := d.rbdCreateVolume(placeholderVol, "0") if err != nil { return err diff --git a/incus/storage/drivers/driver_ceph_utils.go b/incus/storage/drivers/driver_ceph_utils.go index 245af548472..8f58c0adb41 100644 --- a/incus/storage/drivers/driver_ceph_utils.go +++ b/incus/storage/drivers/driver_ceph_utils.go @@ -396,11 +396,11 @@ func (d *ceph) rbdListSnapshotClones(vol Volume, snapshotName string) ([]string, } // rbdMarkVolumeDeleted marks an RBD storage volume as being in "zombie" state. -// An RBD storage volume that is in zombie state is not tracked in LXD's +// An RBD storage volume that is in zombie state is not tracked in the // database anymore but still needs to be kept around for the sake of any // dependent storage entities in the storage pool. This usually happens when an // RBD storage volume has protected snapshots; a scenario most common when -// creating a sparse copy of a container or when LXD updated an image and the +// creating a sparse copy of a container or when it updated an image and the // image still has dependent container clones. func (d *ceph) rbdMarkVolumeDeleted(vol Volume, newVolumeName string) error { // Ensure that new volume contains the config from the source volume to maintain filesystem suffix on @@ -706,7 +706,7 @@ func (d *ceph) deleteVolume(vol Volume) (int, error) { // Only delete the parent snapshot of the instance if it is a zombie. // This includes both if the parent volume itself is a zombie, or if the just the snapshot - // is a zombie. If it is not we know that LXD is still using it. + // is a zombie. If it is not we know that Incus is still using it. if strings.HasPrefix(string(parentVol.volType), "zombie_") || strings.HasPrefix(parentSnapshotName, "zombie_") { ret, err := d.deleteVolumeSnapshot(parentVol, parentSnapshotName) if ret < 0 { @@ -777,7 +777,7 @@ func (d *ceph) deleteVolumeSnapshot(vol Volume, snapshotName string) (int, error return -1, err } - // Only delete the parent image if it is a zombie. If it is not we know that LXD is still using it. + // Only delete the parent image if it is a zombie. If it is not we know that Incus is still using it. if strings.HasPrefix(string(vol.volType), "zombie_") { ret, err := d.deleteVolume(vol) if ret < 0 { @@ -831,7 +831,7 @@ func (d *ceph) deleteVolumeSnapshot(vol Volume, snapshotName string) (int, error } // Only delete the parent image if it is a zombie. If it - // is not we know that LXD is still using it. + // is not we know that Incus is still using it. if strings.HasPrefix(string(vol.volType), "zombie_") { ret, err := d.deleteVolume(vol) if ret < 0 { @@ -859,7 +859,7 @@ func (d *ceph) deleteVolumeSnapshot(vol Volume, snapshotName string) (int, error } // parseParent splits a string describing a RBD storage entity into its components. -// This can be used on strings like: /_@ +// This can be used on strings like: /_@ // and will return a Volume and snapshot name. func (d *ceph) parseParent(parent string) (Volume, string, error) { vol := Volume{} @@ -930,9 +930,9 @@ func (d *ceph) parseParent(parent string) (Volume, string, error) { // parseClone splits a strings describing an RBD storage volume. // For example a string like -// /_ +// /_ // will be split into -// , , . +// , , . func (d *ceph) parseClone(clone string) (string, string, string, error) { idx := strings.Index(clone, "/") if idx == -1 { @@ -1101,7 +1101,7 @@ func (d *ceph) getRBDVolumeName(vol Volume, snapName string, zombie bool, withPo } // Let's say we want to send the a container "a" including snapshots "snap0" and -// "snap1" on storage pool "pool1" from LXD "l1" to LXD "l2" on storage pool +// "snap1" on storage pool "pool1" from Incus "l1" to Incus "l2" on storage pool // "pool2": // // The pool layout on "l1" would be: @@ -1114,7 +1114,7 @@ func (d *ceph) getRBDVolumeName(vol Volume, snapName string, zombie bool, withPo // // rbd export-diff pool1/container_a@snapshot_snap0 - | rbd import-diff - pool2/container_a // -// (Note that pool2/container_a must have been created by the receiving LXD +// (Note that pool2/container_a must have been created by the receiving Incus // instance before.) // // rbd export-diff pool1/container_a@snapshot_snap1 --from-snap snapshot_snap0 - | rbd import-diff - pool2/container_a diff --git a/incus/storage/drivers/driver_ceph_volumes.go b/incus/storage/drivers/driver_ceph_volumes.go index 3b1fa13fe9e..7fcfea0af1e 100644 --- a/incus/storage/drivers/driver_ceph_volumes.go +++ b/incus/storage/drivers/driver_ceph_volumes.go @@ -1060,7 +1060,7 @@ func (d *ceph) GetVolumeDiskPath(vol Volume) (string, error) { return "", ErrNotSupported } -// ListVolumes returns a list of LXD volumes in storage pool. +// ListVolumes returns a list of volumes in storage pool. func (d *ceph) ListVolumes() ([]Volume, error) { vols := make(map[string]Volume) diff --git a/incus/storage/drivers/driver_cephfs.go b/incus/storage/drivers/driver_cephfs.go index fafd9cf853b..8bcd430e32a 100644 --- a/incus/storage/drivers/driver_cephfs.go +++ b/incus/storage/drivers/driver_cephfs.go @@ -135,7 +135,7 @@ func (d *cephfs) Create() error { } // Create a temporary mountpoint. - mountPath, err := os.MkdirTemp("", "lxd_cephfs_") + mountPath, err := os.MkdirTemp("", "incus_cephfs_") if err != nil { return fmt.Errorf("Failed to create temporary directory under: %w", err) } @@ -178,7 +178,7 @@ func (d *cephfs) Create() error { // Check that the existing path is empty. ok, _ := shared.PathIsEmpty(filepath.Join(mountPoint, fsPath)) if !ok { - return fmt.Errorf("Only empty CEPHFS paths can be used as a LXD storage pool") + return fmt.Errorf("Only empty CEPHFS paths can be used as a storage pool") } return nil @@ -195,7 +195,7 @@ func (d *cephfs) Delete(op *operations.Operation) error { } // Create a temporary mountpoint. - mountPath, err := os.MkdirTemp("", "lxd_cephfs_") + mountPath, err := os.MkdirTemp("", "incus_cephfs_") if err != nil { return fmt.Errorf("Failed to create temporary directory under: %w", err) } diff --git a/incus/storage/drivers/driver_cephfs_volumes.go b/incus/storage/drivers/driver_cephfs_volumes.go index ccf20bb8aef..3bc8d3826c8 100644 --- a/incus/storage/drivers/driver_cephfs_volumes.go +++ b/incus/storage/drivers/driver_cephfs_volumes.go @@ -338,7 +338,7 @@ func (d *cephfs) GetVolumeDiskPath(vol Volume) (string, error) { return "", ErrNotSupported } -// ListVolumes returns a list of LXD volumes in storage pool. +// ListVolumes returns a list of volumes in storage pool. func (d *cephfs) ListVolumes() ([]Volume, error) { return genericVFSListVolumes(d) } diff --git a/incus/storage/drivers/driver_cephobject.go b/incus/storage/drivers/driver_cephobject.go index 556115f625d..75194e4262c 100644 --- a/incus/storage/drivers/driver_cephobject.go +++ b/incus/storage/drivers/driver_cephobject.go @@ -18,7 +18,7 @@ var cephobjectVersion string var cephobjectLoaded bool // cephobjectRadosgwAdminUser admin user in radosgw. -const cephobjectRadosgwAdminUser = "lxd-admin" +const cephobjectRadosgwAdminUser = "incus-admin" type cephobject struct { common diff --git a/incus/storage/drivers/driver_common.go b/incus/storage/drivers/driver_common.go index f9509110d3a..d77e2c7ba57 100644 --- a/incus/storage/drivers/driver_common.go +++ b/incus/storage/drivers/driver_common.go @@ -258,7 +258,7 @@ func (d *common) ApplyPatch(name string) error { // moveGPTAltHeader moves the GPT alternative header to the end of the disk device supplied. // If the device supplied is not detected as not being a GPT disk then no action is taken and nil is returned. // If the required sgdisk command is not available a warning is logged, but no error is returned, as really it is -// the job of the VM quest to ensure the partitions are resized to the size of the disk (as LXD does not dicatate +// the job of the VM quest to ensure the partitions are resized to the size of the disk (as Incus does not dictate // what partition structure (if any) the disk should have. However we do attempt to move the GPT alternative // header where possible so that the backup header is where it is expected in case of any corruption with the // primary header. @@ -367,7 +367,7 @@ func (d *common) GetVolumeDiskPath(vol Volume) (string, error) { return "", ErrNotSupported } -// ListVolumes returns a list of LXD volumes in storage pool. +// ListVolumes returns a list of volumes in storage pool. func (d *common) ListVolumes() ([]Volume, error) { return nil, ErrNotSupported } diff --git a/incus/storage/drivers/driver_dir.go b/incus/storage/drivers/driver_dir.go index a616e0d9bd9..6954967bc39 100644 --- a/incus/storage/drivers/driver_dir.go +++ b/incus/storage/drivers/driver_dir.go @@ -75,7 +75,7 @@ func (d *dir) Create() error { // Check that if within INCUS_DIR, we're at our expected spot. cleanSource := filepath.Clean(sourcePath) if strings.HasPrefix(cleanSource, shared.VarPath()) && cleanSource != GetPoolMountPath(d.name) { - return fmt.Errorf("Source path '%s' is within the LXD directory", cleanSource) + return fmt.Errorf("Source path '%s' is within the Incus directory", cleanSource) } // Check that the path is currently empty. diff --git a/incus/storage/drivers/driver_dir_volumes.go b/incus/storage/drivers/driver_dir_volumes.go index f4eac48849e..f548242b69e 100644 --- a/incus/storage/drivers/driver_dir_volumes.go +++ b/incus/storage/drivers/driver_dir_volumes.go @@ -361,7 +361,7 @@ func (d *dir) GetVolumeDiskPath(vol Volume) (string, error) { return genericVFSGetVolumeDiskPath(vol) } -// ListVolumes returns a list of LXD volumes in storage pool. +// ListVolumes returns a list of volumes in storage pool. func (d *dir) ListVolumes() ([]Volume, error) { return genericVFSListVolumes(d) } diff --git a/incus/storage/drivers/driver_lvm.go b/incus/storage/drivers/driver_lvm.go index cfb3212717c..230a1ae12e6 100644 --- a/incus/storage/drivers/driver_lvm.go +++ b/incus/storage/drivers/driver_lvm.go @@ -19,7 +19,7 @@ import ( "github.com/cyphar/incus/shared/validate" ) -const lvmVgPoolMarker = "lxd_pool" // Indicator tag used to mark volume groups as in use by LXD. +const lvmVgPoolMarker = "incus_pool" // Indicator tag used to mark volume groups as in use. var lvmLoaded bool var lvmVersion string @@ -132,7 +132,7 @@ func (d *lvm) Create() error { if d.config["source"] == "" || d.config["source"] == defaultSource { usingLoopFile = true - // We are using a LXD internal loopback file. + // We are using an internal loopback file. d.config["source"] = defaultSource if d.config["lvm.vg_name"] == "" { d.config["lvm.vg_name"] = d.name @@ -302,18 +302,18 @@ func (d *lvm) Create() error { } // Skip the in use checks if the force reuse option is enabled. This allows a storage pool to be - // backed by an existing non-empty volume group. Note: This option should be used with care, as LXD - // can then not guarantee that volume name conflicts won't occur with non-LXD created volumes in - // the same volume group. This could also potentially lead to LXD deleting a non-LXD volume should + // backed by an existing non-empty volume group. Note: This option should be used with care, as Incus + // can then not guarantee that volume name conflicts won't occur with non-Incus created volumes in + // the same volume group. This could also potentially lead to Incus deleting a non-Incus volume should // name conflicts occur. if shared.IsFalseOrEmpty(d.config["lvm.vg.force_reuse"]) { if !empty { return fmt.Errorf("Volume group %q is not empty", d.config["lvm.vg_name"]) } - // Check the tags on the volume group to check it is not already being used by LXD. + // Check the tags on the volume group to check it is not already being used. if shared.StringInSlice(lvmVgPoolMarker, vgTags) { - return fmt.Errorf("Volume group %q is already used by LXD", d.config["lvm.vg_name"]) + return fmt.Errorf("Volume group %q is already used by Incus", d.config["lvm.vg_name"]) } } } else { @@ -370,13 +370,13 @@ func (d *lvm) Create() error { } } - // Mark the volume group with the lvmVgPoolMarker tag to indicate it is now in use by LXD. + // Mark the volume group with the lvmVgPoolMarker tag to indicate it is now in use by Incus. _, err = shared.TryRunCommand("vgchange", "--addtag", lvmVgPoolMarker, d.config["lvm.vg_name"]) if err != nil { return err } - d.logger.Debug("LXD marker tag added to volume group", logger.Ctx{"vg_name": d.config["lvm.vg_name"]}) + d.logger.Debug("Incus marker tag added to volume group", logger.Ctx{"vg_name": d.config["lvm.vg_name"]}) revert.Success() return nil @@ -457,14 +457,14 @@ func (d *lvm) Delete(op *operations.Operation) error { d.logger.Debug("Volume group removed", logger.Ctx{"vg_name": d.config["lvm.vg_name"]}) } else { - // Otherwise just remove the lvmVgPoolMarker tag to indicate LXD no longer uses this VG. + // Otherwise just remove the lvmVgPoolMarker tag to indicate Incus no longer uses this VG. if shared.StringInSlice(lvmVgPoolMarker, vgTags) { _, err = shared.TryRunCommand("vgchange", "--deltag", lvmVgPoolMarker, d.config["lvm.vg_name"]) if err != nil { return fmt.Errorf("Failed to remove marker tag on volume group for the lvm storage pool: %w", err) } - d.logger.Debug("LXD marker tag removed from volume group", logger.Ctx{"vg_name": d.config["lvm.vg_name"]}) + d.logger.Debug("Incus marker tag removed from volume group", logger.Ctx{"vg_name": d.config["lvm.vg_name"]}) } } } diff --git a/incus/storage/drivers/driver_lvm_patches.go b/incus/storage/drivers/driver_lvm_patches.go index 93299bc7090..d3a2df66ebf 100644 --- a/incus/storage/drivers/driver_lvm_patches.go +++ b/incus/storage/drivers/driver_lvm_patches.go @@ -8,7 +8,7 @@ import ( "github.com/cyphar/incus/shared/logger" ) -// patchStorageSkipActivation set skipactivation=y on all LXD LVM logical volumes (excluding thin pool volumes). +// patchStorageSkipActivation set skipactivation=y on all Incus LVM logical volumes (excluding thin pool volumes). func (d *lvm) patchStorageSkipActivation() error { out, err := shared.RunCommand("lvs", "--noheadings", "-o", "lv_name,lv_attr", d.config["lvm.vg_name"]) if err != nil { @@ -24,7 +24,7 @@ func (d *lvm) patchStorageSkipActivation() error { volName := fields[0] volAttr := fields[1] - // Ignore non-LXD prefixes, and thinpool volumes (these should remain auto activated). + // Ignore non-Incus prefixes, and thinpool volumes (these should remain auto activated). if !strings.HasPrefix(volName, "images_") && !strings.HasPrefix(volName, "containers_") && !strings.HasPrefix(volName, "virtual-machines_") && !strings.HasPrefix(volName, "custom_") { continue } diff --git a/incus/storage/drivers/driver_lvm_utils.go b/incus/storage/drivers/driver_lvm_utils.go index 9fe9d42448d..6292a01f207 100644 --- a/incus/storage/drivers/driver_lvm_utils.go +++ b/incus/storage/drivers/driver_lvm_utils.go @@ -35,7 +35,7 @@ const lvmSnapshotSeparator = "-" const lvmEscapedHyphen = "--" // lvmThinpoolDefaultName is the default name for the thinpool volume. -const lvmThinpoolDefaultName = "LXDThinPool" +const lvmThinpoolDefaultName = "IncusThinPool" // usesThinpool indicates whether the config specifies to use a thin pool or not. func (d *lvm) usesThinpool() bool { @@ -721,7 +721,7 @@ func (d *lvm) thinPoolVolumeUsage(volDevPath string) (uint64, uint64, error) { // parseLogicalVolumeSnapshot parses a raw logical volume name (from lvs command) and checks whether it is a // snapshot of the supplied parent volume. Returns unescaped parsed snapshot name if snapshot volume recognised, -// empty string if not. The parent is required due to limitations in the naming scheme that LXD has historically +// empty string if not. The parent is required due to limitations in the naming scheme that Incus has historically // been used for naming logical volumes meaning that additional context of the parent is required to accurately // recognise snapshot volumes that belong to the parent. func (d *lvm) parseLogicalVolumeSnapshot(parent Volume, lvmVolName string) string { diff --git a/incus/storage/drivers/driver_lvm_volumes.go b/incus/storage/drivers/driver_lvm_volumes.go index 769e16b5208..f5307a6f24b 100644 --- a/incus/storage/drivers/driver_lvm_volumes.go +++ b/incus/storage/drivers/driver_lvm_volumes.go @@ -512,7 +512,7 @@ func (d *lvm) GetVolumeDiskPath(vol Volume) (string, error) { return "", ErrNotSupported } -// ListVolumes returns a list of LXD volumes in storage pool. +// ListVolumes returns a list of volumes in storage pool. func (d *lvm) ListVolumes() ([]Volume, error) { vols := make(map[string]Volume) @@ -565,7 +565,7 @@ func (d *lvm) ListVolumes() ([]Volume, error) { continue // Ignore VM filesystem volumes as we will just return the VM's block volume. } - // Unescape raw LVM name to LXD storage volume name. Safe to do now we know we are not dealing + // Unescape raw LVM name to storage volume name. Safe to do now we know we are not dealing // with snapshot volumes. volName = strings.Replace(volName, lvmEscapedHyphen, "-", -1) diff --git a/incus/storage/drivers/driver_mock.go b/incus/storage/drivers/driver_mock.go index a4e95bf3eae..72c7e27f30c 100644 --- a/incus/storage/drivers/driver_mock.go +++ b/incus/storage/drivers/driver_mock.go @@ -146,7 +146,7 @@ func (d *mock) GetVolumeDiskPath(vol Volume) (string, error) { return "", nil } -// ListVolumes returns a list of LXD volumes in storage pool. +// ListVolumes returns a list of volumes in storage pool. func (d *mock) ListVolumes() ([]Volume, error) { return nil, nil } diff --git a/incus/storage/drivers/driver_zfs_volumes.go b/incus/storage/drivers/driver_zfs_volumes.go index ac1e60b7a64..db9c01795a1 100644 --- a/incus/storage/drivers/driver_zfs_volumes.go +++ b/incus/storage/drivers/driver_zfs_volumes.go @@ -174,9 +174,9 @@ func (d *zfs) CreateVolume(vol Volume, filler *VolumeFiller, op *operations.Oper opts = []string{"volmode=none"} } - // Add custom property lxd:content_type which allows distinguishing between regular volumes, block_mode enabled volumes, and ISO volumes. + // Add custom property incus:content_type which allows distinguishing between regular volumes, block_mode enabled volumes, and ISO volumes. if vol.volType == VolumeTypeCustom { - opts = append(opts, fmt.Sprintf("lxd:content_type=%s", vol.contentType)) + opts = append(opts, fmt.Sprintf("incus:content_type=%s", vol.contentType)) } // Avoid double caching in the ARC cache and in the guest OS filesystem cache. @@ -1087,7 +1087,7 @@ func (d *zfs) createVolumeFromMigrationOptimized(vol Volume, conn io.ReadWriteCl // snapshots that match the requested snapshots in volTargetArgs.Snapshots are kept. Any other snapshot // data sets should be removed. keepDataset := func(dataSetName string) bool { - // Keep non-snapshot data sets and snapshots that don't have the LXD snapshot prefix indicator. + // Keep non-snapshot data sets and snapshots that don't have the snapshot prefix indicator. dataSetSnapshotPrefix := "@snapshot-" if !strings.HasPrefix(dataSetName, "@") || !strings.HasPrefix(dataSetName, dataSetSnapshotPrefix) { return false @@ -1792,7 +1792,7 @@ func (d *zfs) GetVolumeDiskPath(vol Volume) (string, error) { return d.getVolumeDiskPathFromDataset(d.dataset(vol, false)) } -// ListVolumes returns a list of LXD volumes in storage pool. +// ListVolumes returns a list of volumes in storage pool. func (d *zfs) ListVolumes() ([]Volume, error) { vols := make(map[string]Volume) @@ -1802,7 +1802,7 @@ func (d *zfs) ListVolumes() ([]Volume, error) { // However for custom block volumes it does not also end the volume name in zfsBlockVolSuffix (unlike the // LVM and Ceph drivers), so we must also retrieve the dataset type here and look for "volume" types // which also indicate this is a block volume. - cmd := exec.Command("zfs", "list", "-H", "-o", "name,type,lxd:content_type", "-r", "-t", "filesystem,volume", d.config["zfs.pool_name"]) + cmd := exec.Command("zfs", "list", "-H", "-o", "name,type,incus:content_type", "-r", "-t", "filesystem,volume", d.config["zfs.pool_name"]) stdout, err := cmd.StdoutPipe() if err != nil { return nil, err @@ -1830,7 +1830,7 @@ func (d *zfs) ListVolumes() ([]Volume, error) { zfsVolName := parts[0] zfsContentType := parts[1] - lxdContentType := parts[2] + incusContentType := parts[2] var volType VolumeType var volName string @@ -1876,9 +1876,9 @@ func (d *zfs) ListVolumes() ([]Volume, error) { v := NewVolume(d, d.name, volType, contentType, volName, make(map[string]string), d.config) if isBlock { - // Get correct content type from lxd:content_type property. - if lxdContentType != "-" { - v.contentType = ContentType(lxdContentType) + // Get correct content type from incus:content_type property. + if incusContentType != "-" { + v.contentType = ContentType(incusContentType) } if v.contentType == ContentTypeBlock { diff --git a/incus/storage/drivers/generic_vfs.go b/incus/storage/drivers/generic_vfs.go index ed7ea3321cc..4c9fd2e823e 100644 --- a/incus/storage/drivers/generic_vfs.go +++ b/incus/storage/drivers/generic_vfs.go @@ -1081,7 +1081,7 @@ func genericVFSCopyVolume(d Driver, initVolume func(vol Volume) (revert.Hook, er return nil } -// genericVFSListVolumes returns a list of LXD volumes in storage pool. +// genericVFSListVolumes returns a list of volumes in storage pool. func genericVFSListVolumes(d Driver) ([]Volume, error) { var vols []Volume poolName := d.Name() diff --git a/incus/storage/drivers/utils.go b/incus/storage/drivers/utils.go index 46a4bbf01ed..fa895d03847 100644 --- a/incus/storage/drivers/utils.go +++ b/incus/storage/drivers/utils.go @@ -795,7 +795,7 @@ func OperationLockName(operationName string, poolName string, volType VolumeType } // loopFileSizeDefault returns the size in GiB to use as the default size for a pool loop file. -// This is based on the size of the filesystem of LXD's VarPath(). +// This is based on the size of the filesystem of daemon's VarPath(). func loopFileSizeDefault() (uint64, error) { st := unix.Statfs_t{} err := unix.Statfs(shared.VarPath(), &st) diff --git a/incus/storage/drivers/utils_ceph.go b/incus/storage/drivers/utils_ceph.go index f520443197d..cc7cfa0ade1 100644 --- a/incus/storage/drivers/utils_ceph.go +++ b/incus/storage/drivers/utils_ceph.go @@ -49,7 +49,7 @@ func CephGetRBDImageName(vol Volume, snapName string, zombie bool) string { } } - // If the volume is to be in zombie state (i.e. not tracked by the LXD database), + // If the volume is to be in zombie state (i.e. not tracked in the database), // prefix the output with "zombie_". if zombie { out = fmt.Sprintf("zombie_%s", out) diff --git a/incus/storage/drivers/volume.go b/incus/storage/drivers/volume.go index c7cf8b0f3fa..89fa28efee6 100644 --- a/incus/storage/drivers/volume.go +++ b/incus/storage/drivers/volume.go @@ -16,8 +16,8 @@ import ( "github.com/cyphar/incus/shared/units" ) -// tmpVolSuffix Suffix to use for any temporary volumes created by LXD. -const tmpVolSuffix = ".lxdtmp" +// tmpVolSuffix Suffix to use for any temporary volumes created by Incus. +const tmpVolSuffix = ".incustmp" // isoVolSuffix suffix used for iso content type volumes. const isoVolSuffix = ".iso"