add(default)/delete root_mnt_pt(pool)", or returns
+ None if a disk member sanity check fails: ie if all the supplied devices
+ are either, not pool members (when deleting) or are already pool members
+ (when adding). If any device in the supplied dev_list fails this test then
+ no command is generated and None is returned.
:param pool: btrfs pool object
- :param dev_list_byid: list of devices to add/delete in by-id (without
- path).
- :param add: when true (default) or not specified then attempt to add
- dev_list devices to pool, or when specified as True attempt to delete
+ :param dev_list_byid: by-id device list to add/delete (without paths).
+ :param add: when true (default) or not specified then 'device add'
+ dev_list devices to pool, when specified as True 'device delete'
dev_list devices from pool.
- :return: Tuple of results from run_command(generated command) or None if
- the device member/pool sanity check fails.
+ :return: appropriate btrfs command, or None if member sanity checks failed.
"""
if pool.has_missing_dev and not add:
if dev_list_byid == []:
@@ -345,24 +429,27 @@ def resize_pool(pool, dev_list_byid, add=True):
root_mnt_pt = mount_root(pool)
cur_dev = cur_devices(root_mnt_pt)
resize_flag = 'add'
- if (not add):
+ if not add:
resize_flag = 'delete'
resize_cmd = [BTRFS, 'device', resize_flag, ]
# Until we verify that all devices are or are not already members of the
# given pool, depending on if we are adding (default) or removing
# (add=False), we set our resize flag to false.
resize = False
+ # TODO: This test looks to pass if only one member passes. Revisit.
+ # TODO: But we are after a fail if only one member fails.
for d in dev_list_byid:
if (resize_flag == 'add' and (d not in cur_dev)) or \
(resize_flag == 'delete' and ((d in cur_dev) or
d == 'missing')):
resize = True # Basic disk member of pool sanity check passed.
resize_cmd.append(d)
- if (not resize):
- logger.debug('Note: resize_pool() taking no action.')
+ if not resize:
+ logger.debug('Resize pool - member sanity check failed. '
+ 'Retuning None as btrfs add/delete command.')
return None
resize_cmd.append(root_mnt_pt)
- return run_command(resize_cmd)
+ return resize_cmd
def mount_root(pool):
@@ -1505,6 +1592,21 @@ def scrub_status(pool):
stats[fields[0]] = int(fields[1])
return stats
+@task()
+def start_resize_pool(cmd):
+ """
+ Note for device add, which is almost instantaneous, we are currently called
+ without the async function extension (start_resize_pool.async()) which
+ bypasses our @task() decorator and we are then called directly.
+
+ From https://github.com/dmgctrl/django-ztask we have:
+ "It is a recommended best practice that instead of passing a Django model
+ object to a task, you instead pass along the model's
+ ID or primary key, and re-get the object in the task function."
+ :param cmd: btrfs dev add/delete command in run_command() format (ie list).
+ """
+ logger.debug('Resize pool command ({}).'.format(cmd))
+ run_command(cmd)
@task()
def start_balance(mnt_pt, force=False, convert=None):
@@ -1576,6 +1678,99 @@ def balance_status(pool):
return stats
+def get_devid_usage(mnt_pt):
+ """
+ Extracts device usage information for a given mount point; includes
+ detached devices where devid is preserved but device name is replaced by
+ 'missing': where there can be multiple 'missing' entries.
+ Wraps 'btrfs device usage -b mnt_pt'.
+ Used by _update_disk_state() to retrieve detached disk size/allocated info.
+ :return: btrfs devid indexed dict with DevUsageInfo values
+ """
+ ALLOCATION_TYPES = ['Data', 'Metadata', 'System']
+ devid_usage_info = {}
+ cmd = [BTRFS, 'device', 'usage', '-b', mnt_pt]
+ o, e, rc = run_command(cmd)
+ devid = None # None allows for fast comparison for flag use.
+ temp_name = 'missing'
+ size = 0
+ allocated = 0
+ for line in o:
+ if line == '':
+ continue
+ fields = line.replace(',', ' ').split()
+ if fields[1] == 'slack:':
+ continue # We are not interested currently so skip for speed.
+ if fields[1] == 'ID:': # New device section: set devid index
+ devid = int(fields[2])
+ temp_name = fields[0]
+ elif fields[1] == 'size:':
+ size = int(fields[2]) / 1024 # Bytes to KB
+ elif fields[0] in ALLOCATION_TYPES:
+ allocated += int(fields[2]) / 1024 # Bytes to KB
+ elif fields[0] == 'Unallocated:':
+ # End of a legitimate device entry so record our tally so far:
+ devid_usage_info[devid] = DevUsageInfo(temp_name=temp_name,
+ size=size,
+ allocated=allocated)
+ allocated = 0 # Reset our per device tally prior to next entry.
+ # logger.debug('get_devid_usage() returning {}.'.format(devid_usage_info))
+ return devid_usage_info
+
+
+def balance_status_internal(pool):
+ """
+ As internal balance events, such as are initiated by btrfs dev remove, are
+ not reported by 'btrfs balance status', we have to devise our own system;
+ at least until these events, which can last hours, are surfaced otherwise.
+ Here we parse the output of 'btrfs dev usage -b mnt_pt' and look for a
+ negative unallocated value. This negative value progressively approaches
+ zero where upon the task is complete and the associated device disappears:
+ having had all of it's data removed.
+ Note that when more than one disk is removed btrfs internally does one at
+ a time so we need only find a single instance irrespective.
+
+ Until we get a better option this function serves a minimal subset of the
+ functions provided for regular balances by balance_status(pool) but for
+ 'internal' balances (our name) that are auto initiated on disk removal.
+ A future enhancement could be to ascertain partial percent done, which may
+ be viable by resourcing get_devid_usage(); but since a device size can be
+ zero, for a detached device, and allocated approaches zero while negative
+ unallocated does the same this may be tricky as we have no start state
+ datum: leaving only a whole pool analysis - indicated disks but then the
+ serial nature of removal hampers this approach.
+ :param pool: Pool db object.
+ :return: dictionary containing parsed info about the balance status,
+ ie indexed by 'status' and 'percent_done'.
+ """
+ stats = {'status': 'unknown', }
+ try:
+ mnt_pt = mount_root(pool)
+ except Exception as e:
+ logger.error('Exception while refreshing internal balance status for'
+ 'Pool({}). Returning '
+ '"unknown": {}'.format(pool.name, e.__str__()))
+ return stats
+ cmd = [BTRFS, 'dev', 'usage', '-b', mnt_pt]
+ o, err, rc = run_command(cmd, throw=False)
+ unallocated = None
+ for line in o:
+ if line == '':
+ continue
+ fields = line.replace(',', ' ').split()
+ if fields[0] == 'Unallocated:':
+ unallocated = int(fields[1])
+ if unallocated < 0:
+ stats['status'] = 'running'
+ break
+ if unallocated >= 0:
+ # We have not 'tell' so report a finished balance as there is no
+ # evidence of one happening.
+ stats['status'] = 'finished'
+ stats['percent_done'] = 100
+ return stats
+
+
def device_scan(dev_byid_list=['all']):
"""
When called with no parameters a 'btrfs device scan' is executed, ie a
diff --git a/src/rockstor/scripts/flash_optimize.py b/src/rockstor/scripts/flash_optimize.py
index ee222a9a5..f0a1efdf5 100644
--- a/src/rockstor/scripts/flash_optimize.py
+++ b/src/rockstor/scripts/flash_optimize.py
@@ -73,7 +73,7 @@ def trim_support(disk):
run_command([YUM, 'install', '-y', 'hdparm'])
logging.info('Installed hdparm successfully')
- o, e, rc = run_command(['hdparm', '-I', '/dev/%s' % disk])
+ o, e, rc = run_command(['hdparm', '-I', '{}'.format(disk)])
for l in o:
if (re.search('Data Set Management TRIM supported', l) is not None):
logging.debug('TRIM supported. info: %s' % l)
@@ -84,7 +84,7 @@ def trim_support(disk):
def is_flash(disk):
flash = False
- o, e, rc = run_command(['udevadm', 'info', '--path=/sys/block/%s' % disk])
+ o, e, rc = run_command(['udevadm', 'info', '--name', disk])
for l in o:
if (re.search('ID_BUS=', l) is not None):
if (l.strip().split()[1].split('=')[1] != 'usb'):
@@ -98,6 +98,8 @@ def is_flash(disk):
# /sys/block/disk/queue/rotational is not reliable, but if [deadline] is in
# /sys/block/disk/queue/scheduler, it's fair to assume flash
logging.debug('Checking if scheduler is set to [deadline] for %s' % disk)
+ disk = disk.split('/')[-1] # strip off the path
+ # Note that the following may fail for sys on luks dev.
with open('/sys/block/%s/queue/scheduler' % disk) as sfo:
for l in sfo.readlines():
if (re.search('\[deadline\]', l) is not None):
diff --git a/src/rockstor/storageadmin/migrations/0008_auto_20190115_1637.py b/src/rockstor/storageadmin/migrations/0008_auto_20190115_1637.py
new file mode 100644
index 000000000..4d473da45
--- /dev/null
+++ b/src/rockstor/storageadmin/migrations/0008_auto_20190115_1637.py
@@ -0,0 +1,29 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('storageadmin', '0007_auto_20181210_0740'),
+ ]
+
+ operations = [
+ migrations.AddField(
+ model_name='disk',
+ name='allocated',
+ field=models.BigIntegerField(default=0),
+ ),
+ migrations.AddField(
+ model_name='disk',
+ name='devid',
+ field=models.PositiveSmallIntegerField(default=0),
+ ),
+ migrations.AddField(
+ model_name='poolbalance',
+ name='internal',
+ field=models.BooleanField(default=False),
+ ),
+ ]
diff --git a/src/rockstor/storageadmin/models/disk.py b/src/rockstor/storageadmin/models/disk.py
index 34dd0fa6a..68831e579 100644
--- a/src/rockstor/storageadmin/models/disk.py
+++ b/src/rockstor/storageadmin/models/disk.py
@@ -44,8 +44,14 @@ class Disk(models.Model):
mostly derived from model and serial number.
"""
name = models.CharField(max_length=128, unique=True)
- """total size in KB"""
+ """btrfs devid 0 is place holder as real devids start from 1"""
+ devid = models.PositiveSmallIntegerField(default=0) # 0 to 32767
+ """total size in KB. Zero if btrfs device detached/last stage of delete."""
size = models.BigIntegerField(default=0)
+ """allocated in KB: ie per device 'used' in 'btrfs fi show' and total
+ listed per device in 'btrfs fi usage /mnt_pt'.
+ """
+ allocated = models.BigIntegerField(default=0)
"""true if disk went offline"""
offline = models.BooleanField(default=False)
"""whether the disk is partitioned at the moment. relevent for root disks
diff --git a/src/rockstor/storageadmin/models/pool_balance.py b/src/rockstor/storageadmin/models/pool_balance.py
index bd873c628..99fb97fbf 100644
--- a/src/rockstor/storageadmin/models/pool_balance.py
+++ b/src/rockstor/storageadmin/models/pool_balance.py
@@ -30,6 +30,8 @@ class PoolBalance(models.Model):
start_time = models.DateTimeField(auto_now=True)
end_time = models.DateTimeField(null=True)
percent_done = models.IntegerField(default=0)
+ # Flag to denote internal auto initiated balance ie during dev delete.
+ internal = models.BooleanField(default=False)
class Meta:
app_label = 'storageadmin'
diff --git a/src/rockstor/storageadmin/static/storageadmin/js/templates/pool/pool_info_module.jst b/src/rockstor/storageadmin/static/storageadmin/js/templates/pool/pool_info_module.jst
index 78e7d9859..5a06738bc 100644
--- a/src/rockstor/storageadmin/static/storageadmin/js/templates/pool/pool_info_module.jst
+++ b/src/rockstor/storageadmin/static/storageadmin/js/templates/pool/pool_info_module.jst
@@ -30,6 +30,7 @@
{{else}}
{{model.mount_status}}
{{/if}}
-
+
+ UUID: {{model.uuid}}
diff --git a/src/rockstor/storageadmin/static/storageadmin/js/templates/pool/poolrebalance_table_template.jst b/src/rockstor/storageadmin/static/storageadmin/js/templates/pool/poolrebalance_table_template.jst
index 6dfcf4e17..4f5fe4692 100644
--- a/src/rockstor/storageadmin/static/storageadmin/js/templates/pool/poolrebalance_table_template.jst
+++ b/src/rockstor/storageadmin/static/storageadmin/js/templates/pool/poolrebalance_table_template.jst
@@ -26,9 +26,11 @@
Id |
- Status |
+ Status |
+ Type |
Start Time |
- Percent finished |
+
+ Percent finished |
Errors or Notes |
diff --git a/src/rockstor/storageadmin/static/storageadmin/js/templates/pool/resize/remove_disks_complete.jst b/src/rockstor/storageadmin/static/storageadmin/js/templates/pool/resize/remove_disks_complete.jst
index 598de90cd..5cb9b48e8 100644
--- a/src/rockstor/storageadmin/static/storageadmin/js/templates/pool/resize/remove_disks_complete.jst
+++ b/src/rockstor/storageadmin/static/storageadmin/js/templates/pool/resize/remove_disks_complete.jst
@@ -6,6 +6,17 @@
-
Resize initiated - disk addition or raid change entails a subsequent Rockstor visible balance which may take several hours. Check status in the Balances tab. Disk delete progress is currently unmonitored.
+ Resize initiated - the associated balance can take several hours to complete and usually negatively impacts system performance.
+
Check the Balances tab for status. A page refresh will be required.
+
+
+
+
+ Expect reduced Web-UI responsiveness until this balance has finished.
+
Removed disks will have progressively smaller Allocated GB until they are finally removed.
+
Please note: a detached disk removal can fail with status ending:
+
"... no missing devices found to remove"
+
If you encounter this error see the Maintenance required section for guidance then try again after a reboot.
+
diff --git a/src/rockstor/storageadmin/static/storageadmin/js/templates/pool/resize_pool_info.jst b/src/rockstor/storageadmin/static/storageadmin/js/templates/pool/resize_pool_info.jst
index e8c81f1fd..b16a2c45d 100644
--- a/src/rockstor/storageadmin/static/storageadmin/js/templates/pool/resize_pool_info.jst
+++ b/src/rockstor/storageadmin/static/storageadmin/js/templates/pool/resize_pool_info.jst
@@ -42,7 +42,9 @@
Name |
Temp Name |
+ Btrfs DevID |
Capacity |
+ Allocated (%) |
Write I/O errors |
Read I/O errors |
Flush I/O errors |
@@ -66,7 +68,11 @@
{{this.temp_name}}
|
+
+ {{btrfsDevID this.devid}}
+ |
{{humanReadableSize this.size}} |
+ {{humanReadableAllocatedPercent this.allocated this.size}} |
{{ioErrorStatsTableData this.io_error_stats}}
{{/each}}
@@ -91,7 +97,11 @@
{{this.temp_name}}
|
+
+ {{btrfsDevID this.devid}}
+ |
{{humanReadableSize this.size}} |
+ {{humanReadableAllocatedPercent this.allocated this.size}} |
{{ioErrorStatsTableData this.io_error_stats}}
{{/each}}
diff --git a/src/rockstor/storageadmin/static/storageadmin/js/views/pool_details_layout_view.js b/src/rockstor/storageadmin/static/storageadmin/js/views/pool_details_layout_view.js
index ec3639cf5..075ca1bc7 100644
--- a/src/rockstor/storageadmin/static/storageadmin/js/views/pool_details_layout_view.js
+++ b/src/rockstor/storageadmin/static/storageadmin/js/views/pool_details_layout_view.js
@@ -263,7 +263,7 @@ PoolDetailsLayoutView = RockstorLayoutView.extend({
if (confirm('If any detached members are listed use the Resize/ReRaid button - "Remove disks" option instead. Click OK only if "(Some Missing)" and no "detached-..." appear in the Pool page Disks sub-section?')) {
var raid_level = _this.pool.get('raid');
var disk_names = ['missing'];
- var delete_missing_msg = ('Delete missing is initiated (can take several hours), a progress report is currently unavailable. Balance attempts are blocked for this period.');
+ var delete_missing_msg = ('Delete missing initiated - associated balance can take several hours and negatively impact system performance. Check Balances tab for status.');
$.ajax({
url: url,
type: 'PUT',
@@ -487,6 +487,29 @@ PoolDetailsLayoutView = RockstorLayoutView.extend({
return humanize.filesize(size * 1024);
});
+ Handlebars.registerHelper('humanReadableAllocatedPercent', function(allocated, size) {
+ var html = '';
+ html += humanize.filesize(allocated * 1024);
+ // One decimal place of % = 1 GB per TB = normal allocation unit.
+ if (size == 0) {
+ // we likely have a disk delete/removal in operation or a
+ // missing / detached device so flag.
+ html += ' Missing or removal in progress '
+ } else {
+ html += ' (' + ((allocated / size) * 100).toFixed(1) + '%)'
+
+ }
+ return new Handlebars.SafeString(html);
+ });
+
+ Handlebars.registerHelper('btrfsDevID', function(devid){
+ if (devid !== 0) {
+ return devid
+ }
+ var html = ' Page refresh required ';
+ return new Handlebars.SafeString(html)
+ });
+
Handlebars.registerHelper('isRoot', function(role){
if (role == 'root') {
return true;
diff --git a/src/rockstor/storageadmin/static/storageadmin/js/views/pool_rebalance_table.js b/src/rockstor/storageadmin/static/storageadmin/js/views/pool_rebalance_table.js
index 6b40d8bb5..b08430468 100644
--- a/src/rockstor/storageadmin/static/storageadmin/js/views/pool_rebalance_table.js
+++ b/src/rockstor/storageadmin/static/storageadmin/js/views/pool_rebalance_table.js
@@ -115,11 +115,31 @@ PoolRebalanceTableModule = RockstorModuleView.extend({
html += '' + poolrebalance.get('id') + ' | ';
html += '' + poolrebalance.get('status') + ' | ';
html += '';
+ internal_balance = poolrebalance.get('internal');
+ percent_done = poolrebalance.get('percent_done')
+ if (internal_balance) {
+ html += 'Disk Removal'
+ } else {
+ html += 'Regular'
+ }
+ html += ' | ';
+ html += '';
if (poolrebalance.get('start_time')) {
html += moment(poolrebalance.get('start_time')).format(RS_DATE_FORMAT);
}
html += ' | ';
- html += '' + poolrebalance.get('percent_done') + ' | ';
+ // html += '';
+ // if (poolrebalance.get('end_time')) {
+ // html += moment(poolrebalance.get('end_time')).format(RS_DATE_FORMAT);
+ // }
+ // html += ' | ';
+ html += '';
+ if (percent_done != 100 && internal_balance) {
+ html += 'unavailable';
+ } else {
+ html += percent_done;
+ }
+ html + ' | ';
html += '';
if (poolrebalance.get('message') != null) {
html += poolrebalance.get('message');
diff --git a/src/rockstor/storageadmin/views/command.py b/src/rockstor/storageadmin/views/command.py
index 52c2a5f52..caab8dfa2 100644
--- a/src/rockstor/storageadmin/views/command.py
+++ b/src/rockstor/storageadmin/views/command.py
@@ -23,9 +23,8 @@
from storageadmin.auth import DigestAuthentication
from rest_framework.permissions import IsAuthenticated
from storageadmin.views import DiskMixin
-from system.osi import (uptime, kernel_info, get_dev_byid_name,
- get_device_path)
-from fs.btrfs import (mount_share, mount_root, get_pool_info,
+from system.osi import (uptime, kernel_info, get_device_mapper_map)
+from fs.btrfs import (mount_share, mount_root, get_dev_pool_info,
pool_raid, mount_snap)
from system.ssh import (sftp_mount_map, sftp_mount)
from system.services import systemctl
@@ -56,6 +55,10 @@ class CommandView(DiskMixin, NFSExportMixin, APIView):
@staticmethod
@transaction.atomic
def _refresh_pool_state():
+ # Get map of dm-0 to /dev/mapper members ie luks-.. devices.
+ mapped_devs = get_device_mapper_map()
+ # Get temp_names (kernel names) to btrfs pool info for attached devs.
+ dev_pool_info = get_dev_pool_info()
for p in Pool.objects.all():
# If our pool has no disks, detached included, then delete it.
# We leave pools with all detached members in place intentionally.
@@ -70,22 +73,22 @@ def _refresh_pool_state():
continue
try:
# Get and save what info we can prior to mount.
- first_attached_dev = p.disk_set.attached().first()
- is_root_pool = (p.role == 'root')
- # Observe any redirect role by using target_name.
- byid_disk_name, is_byid = get_dev_byid_name(
- get_device_path(first_attached_dev.target_name))
- if is_byid:
- pool_info = get_pool_info(first_attached_dev.target_name,
- is_root_pool)
- pool_name = pool_info['label']
- else:
+ first_dev = p.disk_set.attached().first()
+ # Use target_name to account for redirect role.
+ if first_dev.target_name == first_dev.temp_name:
logger.error('Skipping pool ({}) mount as attached disk '
'({}) has no by-id name (no serial # ?)'.
- format(p.name,
- first_attached_dev.target_name))
+ format(p.name, first_dev.target_name))
continue
- p.name = pool_name
+ if first_dev.temp_name in mapped_devs:
+ dev_tmp_name = '/dev/mapper/{}'.format(
+ mapped_devs[first_dev.temp_name])
+ else:
+ dev_tmp_name = '/dev/{}'.format(first_dev.temp_name)
+ # For now we call get_dev_pool_info() once for each pool.
+ pool_info = dev_pool_info[dev_tmp_name]
+ p.name = pool_info.label
+ p.uuid = pool_info.uuid
p.save()
mount_root(p)
p.raid = pool_raid('%s%s' % (settings.MNT_PT, p.name))['data']
diff --git a/src/rockstor/storageadmin/views/disk.py b/src/rockstor/storageadmin/views/disk.py
index 4843c76a0..421a70c16 100644
--- a/src/rockstor/storageadmin/views/disk.py
+++ b/src/rockstor/storageadmin/views/disk.py
@@ -21,7 +21,8 @@
from django.db import transaction
from storageadmin.models import (Disk, Pool, Share)
from fs.btrfs import (enable_quota, mount_root,
- get_pool_info, pool_raid)
+ get_pool_info, pool_raid, get_dev_pool_info,
+ set_pool_label, get_devid_usage)
from storageadmin.serializers import DiskInfoSerializer
from storageadmin.util import handle_exception
from share_helpers import (import_shares, import_snapshots)
@@ -82,7 +83,7 @@ def _update_disk_state():
serial_numbers_seen = []
# Acquire a dictionary of crypttab entries, dev uuid as indexed.
dev_uuids_in_crypttab = get_crypttab_entries()
- # Acquire a dictionary of lsblk /dev names to /dev/disk/by-id names
+ # Acquire a dictionary of temp_names (no path) to /dev/disk/by-id names
byid_name_map = get_byid_name_map()
# Make sane our db entries in view of what we know we have attached.
# Device serial number is only known external unique entry, scan_disks
@@ -121,7 +122,8 @@ def _update_disk_state():
do.save() # make sure all updates are flushed to db
# Our db now has no device name info: all dev names are place holders.
# Iterate over attached drives to update the db's knowledge of them.
- # Kernel dev names are unique so safe to overwrite our db unique name.
+ # Get temp_name (kernel dev names) to btrfs pool info for all attached.
+ dev_pool_info = get_dev_pool_info()
for d in disks:
# start with an empty disk object
dob = None
@@ -131,7 +133,9 @@ def _update_disk_state():
disk_roles_identified = {}
# Convert our transient but just scanned so current sda type name
# to a more useful by-id type name as found in /dev/disk/by-id
- byid_disk_name, is_byid = get_dev_byid_name(d.name, True)
+ # Note path is removed as we store, ideally, byid in Disk.name.
+ byid_disk_name, is_byid = get_dev_byid_name(d.name,
+ remove_path=True)
# If the db has an entry with this disk's serial number then
# use this db entry and update the device name from our new scan.
if (Disk.objects.filter(serial=d.serial).exists()):
@@ -159,10 +163,14 @@ def _update_disk_state():
# non btrfs uuids to track filesystems or LUKS containers.
# Leaving as is for now to avoid db changes.
dob.btrfs_uuid = d.uuid
- # If attached disk has an fs and it isn't btrfs
- if (d.fstype is not None and d.fstype != 'btrfs'):
- # blank any btrfs_uuid it may have had previously.
+ # If attached disk isn't btrfs:
+ if d.fstype != 'btrfs':
+ # blank any btrfs related info it may have had previously.
+ # Pool affiliation addressed later in this loop.
+ # TODO: Consider moving later 'else dob.pool = None' to here.
dob.btrfs_uuid = None
+ dob.devid = 0
+ dob.allocated = 0
# ### BEGINNING OF ROLE FIELD UPDATE ###
# Update the role field with scan_disks findings.
# SCAN_DISKS_KNOWN_ROLES a list of scan_disks identifiable roles.
@@ -170,7 +178,8 @@ def _update_disk_state():
# N.B. We have a minor legacy issue in that prior to using json
# format for the db role field we stored one of 2 strings.
# If either of these 2 strings are found reset to db default of
- # None
+ # None.
+ # TODO: Can be removed post openSUSE as then no legacy installs.
if dob.role == 'isw_raid_member'\
or dob.role == 'linux_raid_member':
# These are the only legacy non json formatted roles used.
@@ -288,8 +297,8 @@ def _update_disk_state():
# In the above we fail over to "" on failed index for now.
disk_roles_identified['partitions'] = byid_partitions
# Now we join the previous non scan_disks identified roles dict
- # with those we have identified from our fresh scan_disks() data
- # and return the result to our db entry in json format.
+ # with those we have identified/updated from our fresh scan_disks()
+ # data and return the result to our db entry in json format.
# Note that dict of {} isn't None
if (non_scan_disks_roles != {}) or (disk_roles_identified != {}):
combined_roles = dict(non_scan_disks_roles,
@@ -297,22 +306,35 @@ def _update_disk_state():
dob.role = json.dumps(combined_roles)
else:
dob.role = None
- # END OF ROLE FIELD UPDATE
- # If our existing Pool db knows of this disk's pool:
- # First find pool association if any:
- if is_byid and d.fstype == 'btrfs':
- # use the canonical reference from get_pool_info()
- # TODO: The following breaks with btrfs in partition, needs:
- # TODO: if d.parted then extract the dev from d.partitions that
- # TODO: has value 'btrfs' and get it's byid_disk_name.
- # TODO: Added in pr #1949 commit a608d18 released (3.9.2-32)
- p_info = get_pool_info(byid_disk_name, d.root)
- # The above call also enacts a pool auto labeling mechanism.
- pool_name = p_info['label']
- else:
- # We fail over to the less robust disk label as no byid name.
- pool_name = d.label
- if Pool.objects.filter(name=pool_name).exists():
+ # ### END OF ROLE FIELD UPDATE ###
+ # Does our existing Pool db know of this disk's pool?
+ # Find pool association, if any, of the current disk:
+ pool_name = None # Until we find otherwise.
+ if d.fstype == 'btrfs':
+ # Use canonical 'btrfs fi show' source via get_dev_pool_info()
+ dev_name = d.name
+ if d.partitions != {}: # could have btrfs fs from a partition?
+ # d.partitions={'/dev/vdc1': 'vfat', '/dev/vdc2': 'btrfs'}
+ for partition, fs in d.partitions.iteritems():
+ if fs == 'btrfs': # We only allow one btrfs part / dev
+ dev_name = partition
+ break
+ p_info = dev_pool_info[dev_name]
+ pool_name = p_info.label
+ # TODO: First call we reset none pool label member count times!
+ # Corner case but room for efficiency improvement.
+ # Consider building a list of pools relabeled to address issue.
+ if pool_name == 'none':
+ pool_name = set_pool_label(p_info.uuid, dev_name, d.root)
+ # Update our disk database entry with btrfs specific data.
+ dob.devid = p_info.devid
+ # For btrfs we override disk size with more relevant btrfs size
+ # which should also fix btrfs in partition size as whole disk.
+ dob.size = p_info.size
+ dob.allocated = p_info.allocated
+ # Quick 'not None' test first to avoid redundant lengthy db filter.
+ if pool_name is not None \
+ and Pool.objects.filter(name=pool_name).exists():
# update the disk db object's pool field accordingly.
dob.pool = Pool.objects.get(name=pool_name)
# this is for backwards compatibility. root pools created
@@ -322,7 +344,7 @@ def _update_disk_state():
if (d.root is True):
dob.pool.role = 'root'
dob.pool.save()
- else: # disk not member of db pool via get_pool_info() / d.label
+ else: # disk not member of db pool via get_dev_pool_info()
dob.pool = None
# If no db pool has yet been found for this disk and
# the attached disk is our root disk (flagged by scan_disks):
@@ -340,8 +362,10 @@ def _update_disk_state():
logger.debug('++++ Creating special system pool db entry.')
root_compression = 'no'
root_raid = pool_raid('/')['data']
+ # scan_disks() has already acquired our fs uuid so inherit.
+ # We have already established btrfs as the fs type.
p = Pool(name=pool_name, raid=root_raid, role='root',
- compression=root_compression)
+ compression=root_compression, uuid=d.uuid)
p.save()
p.disk_set.add(dob)
# update disk db object to reflect special root pool status
@@ -349,15 +373,9 @@ def _update_disk_state():
dob.save()
p.size = p.usage_bound()
enable_quota(p)
- # scan_disks() has already acquired our fs uuid so inherit.
- # We have already established btrfs as the fs type.
- p.uuid = d.uuid
p.save()
else:
- # Likely unlabeled pool and no by-id name for system disk
- # and given we rely on get_pool_info(), which takes by-id
- # names, to label unlabelled pools we bail out for now with
- # an error log message.
+ # Likely unlabeled pool & auto label failed - system disk.
logger.error('Skipping system pool creation. Ensure the '
'system disk has a unique serial.')
# save our updated db disk object
@@ -398,6 +416,30 @@ def _update_disk_state():
except Exception as e:
logger.exception(e)
do.smart_available = do.smart_enabled = False
+ else: # We have offline / detached Disk db entries.
+ # Update detached disks previously know to a pool i.e. missing.
+ # After a reboot device name is lost and replaced by 'missing'
+ # so we compare via btrfs devid stored prior to detached state.
+ # N.B. potential flag mechanism to denote required reboot if
+ # missing device has non existent dev entry rather than missing
+ # otherwise remove missing / detached fails with:
+ # "no missing devices found to remove".
+ # Suspect this will be fixed in future btrfs variants.
+ if do.pool is not None and do.pool.is_mounted:
+ mnt_pt = '{}{}'.format(settings.MNT_PT, do.pool.name)
+ devid_usage = get_devid_usage(mnt_pt)
+ if do.devid in devid_usage:
+ dev_info = devid_usage[do.devid]
+ do.size = dev_info.size
+ do.allocated = dev_info.allocated
+ else:
+ # Our device has likely been removed from this pool as
+ # it's devid no longer show up in it's associated pool.
+ # Reset all btrfs related elements for disk db object:
+ do.pool = None
+ do.btrfs_uuid = None
+ do.devid = 0 # db default and int flag for None.
+ do.allocated = 0 # No devid_usage = no allocation.
do.save()
ds = DiskInfoSerializer(Disk.objects.all().order_by('name'), many=True)
return Response(ds.data)
@@ -640,6 +682,8 @@ def _wipe(self, did, request):
# either way (partitioned or not) we have just wiped any btrfs so we
# universally remove the btrfs_uuid.
disk.btrfs_uuid = None
+ disk.devid = 0
+ disk.allocated = 0
disk.save()
return Response(DiskInfoSerializer(disk).data)
@@ -672,9 +716,11 @@ def _luks_format(self, did, request, passphrase):
raise Exception(e_msg)
luks_format_disk(disk_name, passphrase)
disk.parted = isPartition # should be False by now.
- # The following value may well be updated with a more informed truth
+ # The following values may well be updated with a more informed truth
# from the next scan_disks() run via _update_disk_state()
disk.btrfs_uuid = None
+ disk.devid = 0
+ disk.allocated = 0
# Rather than await the next _update_disk_state() we populate our
# LUKS container role.
roles = {}
@@ -715,17 +761,26 @@ def _btrfs_disk_import(self, did, request):
p_info = get_pool_info(disk_name)
# Create our initial pool object, default to no compression.
po = Pool(name=p_info['label'], raid="unknown",
- compression="no")
+ compression="no", uuid=p_info['uuid'])
# need to save it so disk objects get updated properly in the for
# loop below.
po.save()
+ # p_info['disks'] = by_id name indexed dict with named tuple values
for device in p_info['disks']:
+ # Database uses base dev names in by-id format: acquire via;
disk_name, isPartition = \
self._reverse_role_filter_name(device, request)
+ # All bar system disk are stored in db as base byid name,
+ # a partition, if used, is then held in a redirect role.
+ # System's partition name is considered it's base name; but
+ # we don't have to import our system pool.
do = Disk.objects.get(name=disk_name)
do.pool = po
- # update this disk's parted property
+ # Update this disk's parted, devid, and used properties.
do.parted = isPartition
+ do.devid = p_info['disks'][device].devid
+ do.size = p_info['disks'][device].size
+ do.allocated = p_info['disks'][device].allocated
if isPartition:
# ensure a redirect role to reach this partition; ie:
# "redirect": "virtio-serial-3-part2"
@@ -750,7 +805,7 @@ def _btrfs_disk_import(self, did, request):
import_snapshots(share)
return Response(DiskInfoSerializer(disk).data)
except Exception as e:
- e_msg = ('Failed to import any pool on device id ({}). '
+ e_msg = ('Failed to import any pool on device db id ({}). '
'Error: ({}).').format(did, e.__str__())
handle_exception(Exception(e_msg), request)
diff --git a/src/rockstor/storageadmin/views/pool.py b/src/rockstor/storageadmin/views/pool.py
index 33844c549..3948a1e55 100644
--- a/src/rockstor/storageadmin/views/pool.py
+++ b/src/rockstor/storageadmin/views/pool.py
@@ -26,9 +26,10 @@
from django.db import transaction
from storageadmin.serializers import PoolInfoSerializer
from storageadmin.models import (Disk, Pool, Share, PoolBalance)
-from fs.btrfs import (add_pool, pool_usage, resize_pool, umount_root,
+from fs.btrfs import (add_pool, pool_usage, resize_pool_cmd, umount_root,
btrfs_uuid, mount_root, start_balance, usage_bound,
- remove_share, enable_quota, disable_quota, rescan_quotas)
+ remove_share, enable_quota, disable_quota, rescan_quotas,
+ start_resize_pool)
from system.osi import remount, trigger_udev_update
from storageadmin.util import handle_exception
from django.conf import settings
@@ -283,11 +284,44 @@ def _balance_start(self, pool, force=False, convert=None):
for t in Task.objects.all():
if (pickle.loads(t.args)[0] == mnt_pt):
tid = t.uuid
- time.sleep(0.2)
+ time.sleep(0.2) # 200 milliseconds
count += 1
logger.debug('balance tid = ({}).'.format(tid))
return tid
+ def _resize_pool_start(self, pool, dnames, add=True):
+ """
+ Async initiator for resize_pool(pool, dnames, add=False) as when a
+ device is deleted it initiates a btrfs internal balance which is not
+ accessible to 'btrfs balance status' but is a balance nevertheless.
+ Based on _balance_start()
+ :param pool: Pool object.
+ :param dnames: list of by-id device names without paths.
+ :param add: True if adding dnames, False if deleting (removing) dnames.
+ :return: 0 if
+ """
+ tid = 0
+ cmd = resize_pool_cmd(pool, dnames, add)
+ if cmd is None:
+ return tid
+ logger.info('Beginning device resize on pool ({}). '
+ 'Changed member devices:({}).'.format(pool.name, dnames))
+ if add:
+ # Mostly instantaneous so avoid complexity/overhead of django ztask
+ start_resize_pool(cmd)
+ return tid
+ # Device delete initiates long running internal balance: start async.
+ start_resize_pool.async(cmd)
+ # Try to find django-ztask id for (25*0.2) 5 seconds via cmd args match
+ count = 0
+ while tid == 0 and count < 25:
+ for t in Task.objects.all():
+ if pickle.loads(t.args)[0] == cmd:
+ tid = t.uuid
+ time.sleep(0.2) # 200 milliseconds
+ count += 1
+ logger.debug('Pool resize tid = ({}).'.format(tid))
+ return tid
class PoolListView(PoolMixin, rfc.GenericView):
def get_queryset(self, *args, **kwargs):
@@ -490,13 +524,14 @@ def put(self, request, pid, command):
'during a balance process.').format(pool.name)
handle_exception(Exception(e_msg), request)
- # TODO: run resize_pool() as async task like start_balance()
- resize_pool(pool, dnames) # None if no action
+ # _resize_pool_start() add dev mode is quick so no async or tid
+ self._resize_pool_start(pool, dnames)
force = False
# During dev add we also offer raid level change, if selected
# blanket apply '-f' to allow for reducing metadata integrity.
if new_raid != pool.raid:
force = True
+ # Django-ztask initialization as balance is long running.
tid = self._balance_start(pool, force=force, convert=new_raid)
ps = PoolBalance(pool=pool, tid=tid)
ps.save()
@@ -563,7 +598,7 @@ def put(self, request, pid, command):
usage = pool_usage('/%s/%s' % (settings.MNT_PT, pool.name))
size_cut = 0
for d in disks:
- size_cut += d.size
+ size_cut += d.allocated
if size_cut >= (pool.size - usage):
e_msg = ('Removing disks ({}) may shrink the pool by '
'{} KB, which is greater than available free '
@@ -571,21 +606,21 @@ def put(self, request, pid, command):
'not supported.').format(dnames, size_cut, usage)
handle_exception(Exception(e_msg), request)
- # TODO: run resize_pool() as async task like start_balance(),
- # particularly important on device delete as it initiates an
- # internal volume balance which cannot be monitored by:
- # btrfs balance status.
- # See https://github.com/rockstor/rockstor-core/issues/1722
- # Hence we need also to add a 'DIY' status / percentage
- # reporting method.
- resize_pool(pool, dnames, add=False) # None if no action
- # Unlike resize_pool() with add=True a delete has an implicit
- # balance where the deleted disks contents are re-distributed
- # across the remaining disks.
+ # Unlike resize_pool_start() with add=True a remove has an
+ # implicit balance where the removed disks contents are
+ # re-distributed across the remaining pool members.
+ # This internal balance cannot currently be monitored by the
+ # usual 'btrfs balance status /mnt_pt' command. So we have to
+ # use our own mechanism to assess it's status.
+ # Django-ztask initialization:
+ tid = self._resize_pool_start(pool, dnames, add=False)
+ ps = PoolBalance(pool=pool, tid=tid, internal=True)
+ ps.save()
- for d in disks:
- d.pool = None
- d.save()
+ # Setting disk.pool = None for all removed members is redundant
+ # as our next disk scan will re-find them until such time as
+ # our async task, and it's associated dev remove, has completed
+ # it's internal balance. This can take hours.
else:
e_msg = 'Command ({}) is not supported.'.format(command)
diff --git a/src/rockstor/storageadmin/views/pool_balance.py b/src/rockstor/storageadmin/views/pool_balance.py
index 49639d2ac..0599aeba1 100644
--- a/src/rockstor/storageadmin/views/pool_balance.py
+++ b/src/rockstor/storageadmin/views/pool_balance.py
@@ -23,7 +23,7 @@
from storageadmin.serializers import PoolBalanceSerializer
from storageadmin.models import (Pool, PoolBalance)
import rest_framework_custom as rfc
-from fs.btrfs import balance_status
+from fs.btrfs import balance_status, balance_status_internal
from pool import PoolMixin
import logging
@@ -69,7 +69,10 @@ def _balance_status(pool):
return ps
# Get the current status of balance on this pool, irrespective of
# a running balance task, ie command line intervention.
- cur_status = balance_status(pool)
+ if ps.internal:
+ cur_status = balance_status_internal(pool)
+ else:
+ cur_status = balance_status(pool)
previous_status = ps.status
# TODO: future "Balance Cancel" button should call us to have these
# TODO: values updated in the db table ready for display later.
@@ -81,6 +84,13 @@ def _balance_status(pool):
'cancelled at %s%% complete' % ps.percent_done
# and retain prior percent finished value
cur_status['percent_done'] = ps.percent_done
+ if previous_status == 'failed' \
+ and cur_status['status'] == 'finished':
+ # override current status as 'failed'
+ cur_status['status'] = 'failed'
+ # and retain prior percent finished value
+ cur_status['percent_done'] = ps.percent_done
+
if previous_status != 'finished' and previous_status != 'cancelled':
# update the last pool balance status with current status info.
PoolBalance.objects.filter(id=ps.id).update(**cur_status)
diff --git a/src/rockstor/system/luks.py b/src/rockstor/system/luks.py
index f25f18e4e..b9dd77be5 100644
--- a/src/rockstor/system/luks.py
+++ b/src/rockstor/system/luks.py
@@ -53,7 +53,7 @@ def get_open_luks_volume_status(mapped_device_name, byid_name_map):
/dev/disk/by-id/dm-name-
luks-3efb3830-fee1-4a9e-a5c6-ea456bfc269e is active and is in use.
:param mapped_device_name: any mapped device name accepted by cryptsetup,
- ie starting with "/dev/mapper/"
+ ie starting with "/dev/mapper/", path included or not, output unaffected.
:return: dictionary of the stated commands output or {} upon a non zero
return code from command execution.
"""
diff --git a/src/rockstor/system/osi.py b/src/rockstor/system/osi.py
index 89c753ca8..bcf9e01c2 100644
--- a/src/rockstor/system/osi.py
+++ b/src/rockstor/system/osi.py
@@ -135,8 +135,8 @@ def scan_disks(min_size, test_mode=False):
:param test_mode: Used by unit tests for deterministic 'fake-serial-' mode.
:return: List containing drives of interest
"""
- base_root_disk = root_disk()
- cmd = [LSBLK, '-P', '-o',
+ base_root_disk = root_disk() # /dev/sda if /dev/sda3, or md126 if md126p2
+ cmd = [LSBLK, '-P', '-p', '-o',
'NAME,MODEL,SERIAL,SIZE,TRAN,VENDOR,HCTL,TYPE,FSTYPE,LABEL,UUID']
o, e, rc = run_command(cmd)
dnames = {} # Working dictionary of devices.
@@ -224,7 +224,7 @@ def scan_disks(min_size, test_mode=False):
continue
# ----- Now we are done with easy exclusions we begin classification.
# If md device populate unused MODEL with basic member/raid summary.
- if (re.match('md', dmap['NAME']) is not None):
+ if (re.match('/dev/md', dmap['NAME']) is not None):
# cheap way to display our member drives
dmap['MODEL'] = get_md_members(dmap['NAME'])
# ------------ Start more complex classification -------------
@@ -375,7 +375,7 @@ def scan_disks(min_size, test_mode=False):
# a btrfs file system
# Regex to identify a partition on the base_root_disk.
# Root on 'sda3' gives base_root_disk 'sda'.
- if re.match('sd|vd', dmap['NAME']) is not None:
+ if re.match('/dev/sd|/dev/vd', dmap['NAME']) is not None:
# eg 'sda' or 'vda' with >= one additional digit,
part_regex = base_root_disk + '\d+'
else:
@@ -823,6 +823,31 @@ def mount_status(mnt_pt, return_boolean=False):
return 'unmounted'
+def dev_mount_point(dev_temp_name):
+ """
+ Parses /proc/mounts to return the first associated mount point for a given
+ device temp name (ie /dev/sda).
+ Note this is trivially different from mount_status() but intended initially
+ for use by set_pool_label.
+ :param dev_temp_name: /dev/sda3 or /dev/bcache0, or /dev/mapper/luks-...
+ :return: None if note device match found or first associated mount point.
+ """
+ with open('/proc/mounts') as pfo:
+ for each_line in pfo.readlines():
+ line_fields = each_line.split()
+ if len(line_fields) < 4:
+ # Avoid index issues as we expect >= 4 columns.
+ continue
+ if line_fields[0] in EXCLUDED_MOUNT_DEVS:
+ # Skip excluded/special mount devices ie sysfs, proc, etc.
+ continue
+ if line_fields[0] == dev_temp_name:
+ logger.debug('dev_mount_point returning {}'.format(line_fields[1]))
+ return line_fields[1]
+ logger.debug('dev_mount_point() returning None')
+ return None
+
+
def remount(mnt_pt, mnt_options):
if (is_mounted(mnt_pt)):
run_command([MOUNT, '-o', 'remount,%s' % mnt_options, mnt_pt])
@@ -902,9 +927,10 @@ def root_disk():
the returned value is sdc
The assumption with non md devices is that the partition number will be a
single character.
- :return: sdX type device name (without path) where root is mounted.
+ :return: /dev/sdX type device name (with path) where root is mounted.
"""
# TODO: Consider 'lsblk -no pkname devname' rather than parse and strip.
+ # -no pkname returns blank line with /dev/mapper/luks but no partitions.
# -n = no headings, -o specify output (pkname = Parent Kernel Name)
with open('/proc/mounts') as fo:
for line in fo.readlines():
@@ -915,11 +941,11 @@ def root_disk():
# Our root is on a mapped open LUKS container so we need
# not resolve the symlink, ie /dev/dm-0, as we loose info
# and lsblk's name output also uses the luks- name.
- # So we return the name minus it's /dev/mapper/ component
+ # So we return the name component
# as there are no partitions within these devices so it is
# it's own base device. N.B. we do not resolve to the
# parent device hosting the LUKS container itself.
- return fields[0][12:]
+ return fields[0]
# resolve symbolic links to their targets.
disk = os.path.realpath(fields[0])
if (re.match('/dev/md', disk) is not None):
@@ -934,7 +960,7 @@ def root_disk():
# numbers after "md" end. N.B. the following will also
# work if root is not in a partition ie on md126 directly.
end = re.search('\d+', disk).end()
- return disk[5:end]
+ return disk[:end]
if (re.match('/dev/nvme', disk) is not None):
# We have an nvme device. These have the following naming
# conventions.
@@ -946,13 +972,13 @@ def root_disk():
# device itself as with the /dev/md parsing just in case,
# so look for the end of the base device name via 'n1'.
end = re.search('n1', disk).end()
- return disk[5:end]
- # catch all that assumes we have eg /dev/sda3 and want "sda"
- # so start from 6th char and remove the last char
- # /dev/sda3 = sda
+ return disk[:end]
+ # catch all that assumes we have eg /dev/sda3 and want /dev/sda
+ # remove the last char
+ # /dev/sda3 = /dev/sda
# TODO: consider changing to same method as in md devs above
# TODO: to cope with more than one numeric in name.
- return disk[5:-1]
+ return disk[:-1]
msg = ('root filesystem is not BTRFS. During Rockstor installation, '
'you must select BTRFS instead of LVM and other options for '
'root filesystem. Please re-install Rockstor properly.')
@@ -962,7 +988,7 @@ def root_disk():
def get_md_members(device_name, test=None):
"""
Returns the md members from a given device, if the given device is not an
- md device or the udevadm info command returns a non 0 (error) then the an
+ md device or the udevadm info command returns a non 0 (error) then an
empty string is returned.
Example lines to parse from udevadmin:-
E: MD_DEVICE_sda_DEV=/dev/sda
@@ -972,7 +998,7 @@ def get_md_members(device_name, test=None):
Based on the get_disk_serial function.
N.B. may be deprecated on scan_disks move to udevadmin, or integrated.
Could consider parsing "mdadm --detail /dev/md1" instead
- :param device_name: eg md126 or md0p2
+ :param device_name: eg /dev/md126 or /dev/md0p2
:param test: if test is not None then it's contents is used in lieu of
udevadm output.
:return: String of all members listed in udevadm info --name=device_name
@@ -980,7 +1006,7 @@ def get_md_members(device_name, test=None):
"""
line_fields = []
# if non md device then return empty string
- if re.match('md', device_name) is None:
+ if re.match('/dev/md', device_name) is None:
return ''
members_string = ''
if test is None:
@@ -1039,7 +1065,7 @@ def get_disk_serial(device_name, device_type=None, test=None):
--------- Additional personality added for md devices ie md0p1 or md126,
these devices have no serial so we search for their MD_UUID and use that
instead.
- :param device_name: eg sda as per lsblk output used in scan_disks()
+ :param device_name: eg /dev/sda as per lsblk output used in scan_disks()
:param device_type: the lsblk TYPE for the given device eg: disk, crypt.
The equivalent to the output of lsblk -n -o TYPE device_name. Defaults to
None as an indication that the caller cannot provide this info.
@@ -1054,7 +1080,6 @@ def get_disk_serial(device_name, device_type=None, test=None):
# type indicates this then add the '/dev/mapper' path to device_name
# Set search string / flag for dm personality if need be.
if device_type == 'crypt':
- device_name = '/dev/mapper/%s' % device_name
# Assuming device mapped (DM) so without it's own serial.
uuid_search_string = 'DM_UUID'
# Note that we can't use "cryptsetup luksUUID " as this is for
@@ -1064,7 +1089,7 @@ def get_disk_serial(device_name, device_type=None, test=None):
# change that devices serial which in turn makes it appear as a
# different device to Rockstor.
# Set search string / flag for md personality if need be.
- if re.match('md', device_name) is not None:
+ if re.match('/dev/md', device_name) is not None:
uuid_search_string = 'MD_UUID'
if test is None:
out, err, rc = run_command([UDEVADM, 'info', '--name=' + device_name],
@@ -1277,10 +1302,11 @@ def get_bcache_device_type(device):
Cache devices have a "cache_replacement_policy"
The passed device will have already been identified as having:
lsblk FSTYPE=bcache
- :param device: as presented by lsblk output ie sdX type with no path
+ :param device: as presented by lsblk output ie /dev/sdX type with path
:return: "bdev" for "backing device" or "cdev" for "cache device" or
None ie neither indicator is found.
"""
+ device = device.split('/')[-1] # strip off the path
sys_path = ('/sys/block/%s/bcache/' % device)
if os.path.isfile(sys_path + 'label'):
return "bdev"
@@ -1557,7 +1583,7 @@ def get_dev_byid_name(device_name, remove_path=False):
N.B. As the subsystem of the device is embedded in the by-id name a drive's
by-id path will change if for example it is plugged in via usb rather than
ata subsystem.
- :param device_name: eg sda but can also be /dev/sda or even the by-id name
+ :param device_name: eg /dev/sda or even the by-id name (with path)
but only if the full path is specified with the by-id type name.
:param remove_path: flag request to strip the path from the returned device
name, if an error occurred or no by-id type name was found then the path
@@ -1576,12 +1602,7 @@ def get_dev_byid_name(device_name, remove_path=False):
byid_name = '' # Should never be returned prior to reassignment.
longest_byid_name_length = 0
devlinks = [] # Doubles as a flag for DEVLINKS line found.
- # Caveats for mapped devices that require paths for udevadm to work
- # ie openLUKS containers are named eg luks- but are not found by
- # udevadmin via --name unless a /dev/mapper path is provided.
- if re.match('luks-', str(device_name)) is not None:
- device_name = '/dev/mapper/%s' % device_name
- # Other special device name considerations can go here.
+ # Special device name considerations / pre-processing can go here.
cmd = [UDEVADM, 'info', '--query=property', '--name', str(device_name)]
out, err, rc = run_command(cmd, throw=False)
if len(out) > 0 and rc == 0:
@@ -1684,6 +1705,33 @@ def get_byid_name_map():
return byid_name_map
+def get_device_mapper_map():
+ """
+ Simple wrapper around 'ls -lr /dev/mapper' akin to get_byid_name_map() but
+ without the assumption of multiple entries and with differing field count
+ expectations.
+ :return: dictionary indexed (keyed) by 'dm-0' type names with associated
+ /dev/mapper names as the values (path included), or an empty dictionary if
+ a non zero return code was encountered by run_command or no /dev/mapper
+ names found.
+ """
+ device_mapper_map = {}
+ out, err, rc = run_command([LS, '-lr', '/dev/mapper'], throw=True)
+ if rc == 0 and len(out) > 3: # len 3 is only control char dev listed.
+ for each_line in out:
+ if each_line == '':
+ continue
+ # Split the line by spaces and '/' chars
+ line_fields = each_line.replace('/', ' ').split()
+ # Grab every dm-0 type name from the last field in the line and add
+ # it as a dictionary key with it's value as the mapped dir entry.
+ # Our full path is added as a convenience to our caller.
+ # {'dm-0': '/dev/mapper/luks-dd6589a6-14aa-4a5a-bcea-fe72e2dec333'}
+ if len(line_fields) == 12:
+ device_mapper_map[line_fields[-1]] = line_fields[-4]
+ return device_mapper_map
+
+
def get_device_path(by_id):
"""
Return full path for given device id.
@@ -1820,7 +1868,7 @@ def get_devname_old(device_name):
def get_devname(device_name, addPath=False):
"""Intended as a light and quicker way to retrieve a device name with or
- without path (default) from any legal udevadm --name parameter
+ without (default) path from any legal udevadm --name parameter
Simple wrapper around a call to:
udevadm info --query=name device_name
Works with device_name of eg sda /dev/sda /dev/disk/by-id/ and /dev/disk/
@@ -1842,7 +1890,7 @@ def get_devname(device_name, addPath=False):
if len(fields) == 1:
# we have a single word output so return it with or without path
if addPath:
- return '/dev/%s' % fields[0]
+ return '/dev/{}'.format(fields[0])
# return the word (device name ie sda) without added /dev/
return fields[0]
# a non one word reply was received on the first line from udevadm or
diff --git a/src/rockstor/system/tests/test_osi.py b/src/rockstor/system/tests/test_osi.py
index 8e135fedb..6cff4a351 100644
--- a/src/rockstor/system/tests/test_osi.py
+++ b/src/rockstor/system/tests/test_osi.py
@@ -43,10 +43,10 @@ def setUp(self):
self.patch_os_path_isfile = patch('os.path.isfile')
self.mock_os_path_isfile = self.patch_os_path_isfile.start()
- # root_disk() default mock - return sda for sda3 '/' from /proc/mounts
+ # root_disk() default mock - return /dev/sda for /dev/sda3 '/'
self.patch_root_disk = patch('system.osi.root_disk')
self.mock_root_disk = self.patch_root_disk.start()
- self.mock_root_disk.return_value = 'sda'
+ self.mock_root_disk.return_value = '/dev/sda'
def tearDown(self):
patch.stopall()
@@ -201,7 +201,10 @@ def test_get_dev_byid_name(self):
expected_result.append(
('/dev/disk/by-id/scsi-SATA_QEMU_HARDDISK_QM00009', True))
# Query on an openLUKS container (backed by bcache):
- dev_name.append('luks-a47f4950-3296-4504-b9a4-2dc75681a6ad')
+ # N.B. legacy versions of get_dev_byid_name() would auto add
+ # /dev/mapper if dev name matched 'luks-' this was later removed in
+ # favour of generating the full path in scan_disks().
+ dev_name.append('/dev/mapper/luks-a47f4950-3296-4504-b9a4-2dc75681a6ad')
remove_path.append(True)
out.append([
'DEVLINKS=/dev/disk/by-id/dm-name-luks-a47f4950-3296-4504-b9a4-2dc75681a6ad /dev/disk/by-id/dm-uuid-CRYPT-LUKS1-a47f495032964504b9a42dc75681a6ad-luks-a47f4950-3296-4504-b9a4-2dc75681a6ad /dev/disk/by-label/luks-pool-on-bcache /dev/disk/by-uuid/8ad02be6-fc5f-4342-bdd2-f992e7792a5b /dev/mapper/luks-a47f4950-3296-4504-b9a4-2dc75681a6ad', # noqa E501
@@ -379,35 +382,35 @@ def test_scan_disks_luks_on_bcache(self):
# Moc output for run_command with:
# lsblk -P -o NAME,MODEL,SERIAL,SIZE,TRAN,VENDOR,HCTL,TYPE,FSTYPE,LABEL,UUID # noqa E501
out = [[
- 'NAME="sdd" MODEL="QEMU HARDDISK " SERIAL="bcache-cdev" SIZE="2G" TRAN="sata" VENDOR="ATA " HCTL="3:0:0:0" TYPE="disk" FSTYPE="bcache" LABEL="" UUID="6efd5476-77a9-4f57-97a5-fa1a37d4338b"', # noqa E501
- 'NAME="bcache0" MODEL="" SERIAL="" SIZE="2G" TRAN="" VENDOR="" HCTL="" TYPE="disk" FSTYPE="crypto_LUKS" LABEL="" UUID="3efb3830-fee1-4a9e-a5c6-ea456bfc269e"', # noqa E501
- 'NAME="luks-3efb3830-fee1-4a9e-a5c6-ea456bfc269e" MODEL="" SERIAL="" SIZE="2G" TRAN="" VENDOR="" HCTL="" TYPE="crypt" FSTYPE="btrfs" LABEL="pool-on-mixed-luks" UUID="1fdd4b41-fdd0-40c4-8ae6-7d6309b09ded"', # noqa E501
- 'NAME="bcache16" MODEL="" SERIAL="" SIZE="2G" TRAN="" VENDOR="" HCTL="" TYPE="disk" FSTYPE="crypto_LUKS" LABEL="" UUID="a47f4950-3296-4504-b9a4-2dc75681a6ad"', # noqa E501
- 'NAME="luks-a47f4950-3296-4504-b9a4-2dc75681a6ad" MODEL="" SERIAL="" SIZE="2G" TRAN="" VENDOR="" HCTL="" TYPE="crypt" FSTYPE="btrfs" LABEL="pool-on-mixed-luks" UUID="1fdd4b41-fdd0-40c4-8ae6-7d6309b09ded"', # noqa E501
- 'NAME="sdb" MODEL="QEMU HARDDISK " SERIAL="bcache-bdev-1" SIZE="2G" TRAN="sata" VENDOR="ATA " HCTL="1:0:0:0" TYPE="disk" FSTYPE="bcache" LABEL="" UUID="c9ed805f-b141-4ce9-80c7-9f9e1f71195d"', # noqa E501
- 'NAME="bcache0" MODEL="" SERIAL="" SIZE="2G" TRAN="" VENDOR="" HCTL="" TYPE="disk" FSTYPE="crypto_LUKS" LABEL="" UUID="3efb3830-fee1-4a9e-a5c6-ea456bfc269e"', # noqa E501
- 'NAME="luks-3efb3830-fee1-4a9e-a5c6-ea456bfc269e" MODEL="" SERIAL="" SIZE="2G" TRAN="" VENDOR="" HCTL="" TYPE="crypt" FSTYPE="btrfs" LABEL="pool-on-mixed-luks" UUID="1fdd4b41-fdd0-40c4-8ae6-7d6309b09ded"', # noqa E501
- 'NAME="vdb" MODEL="" SERIAL="" SIZE="2G" TRAN="" VENDOR="0x1af4" HCTL="" TYPE="disk" FSTYPE="crypto_LUKS" LABEL="" UUID="41cd2e3c-3bd6-49fc-9f42-20e368a66efc"', # noqa E501
- 'NAME="luks-41cd2e3c-3bd6-49fc-9f42-20e368a66efc" MODEL="" SERIAL="" SIZE="2G" TRAN="" VENDOR="" HCTL="" TYPE="crypt" FSTYPE="btrfs" LABEL="pool-on-mixed-luks" UUID="1fdd4b41-fdd0-40c4-8ae6-7d6309b09ded"', # noqa E501
- 'NAME="sr0" MODEL="QEMU DVD-ROM " SERIAL="QM00001" SIZE="1024M" TRAN="ata" VENDOR="QEMU " HCTL="6:0:0:0" TYPE="rom" FSTYPE="" LABEL="" UUID=""', # noqa E501
- 'NAME="sdc" MODEL="QEMU HARDDISK " SERIAL="bcache-bdev-2" SIZE="2G" TRAN="sata" VENDOR="ATA " HCTL="2:0:0:0" TYPE="disk" FSTYPE="bcache" LABEL="" UUID="06754c95-4f78-4ffb-a243-5c85144d1833"', # noqa E501
- 'NAME="bcache16" MODEL="" SERIAL="" SIZE="2G" TRAN="" VENDOR="" HCTL="" TYPE="disk" FSTYPE="crypto_LUKS" LABEL="" UUID="a47f4950-3296-4504-b9a4-2dc75681a6ad"', # noqa E501
- 'NAME="luks-a47f4950-3296-4504-b9a4-2dc75681a6ad" MODEL="" SERIAL="" SIZE="2G" TRAN="" VENDOR="" HCTL="" TYPE="crypt" FSTYPE="btrfs" LABEL="pool-on-mixed-luks" UUID="1fdd4b41-fdd0-40c4-8ae6-7d6309b09ded"', # noqa E501
- 'NAME="sda" MODEL="QEMU HARDDISK " SERIAL="sys-drive-serial-num" SIZE="8G" TRAN="sata" VENDOR="ATA " HCTL="0:0:0:0" TYPE="disk" FSTYPE="" LABEL="" UUID=""', # noqa E501
- 'NAME="sda2" MODEL="" SERIAL="" SIZE="820M" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="swap" LABEL="" UUID="c25eec5f-d4bd-4670-b756-e8b687562f6e"', # noqa E501
- 'NAME="sda3" MODEL="" SERIAL="" SIZE="6.7G" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="btrfs" LABEL="rockstor_rockstor" UUID="a98f88c2-2031-4bd3-9124-2f9d8a77987c"', # noqa E501
- 'NAME="sda1" MODEL="" SERIAL="" SIZE="500M" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="ext4" LABEL="" UUID="6b8e342c-6cd6-40e8-a134-db302fad3f20"', # noqa E501
- 'NAME="vda" MODEL="" SERIAL="" SIZE="3G" TRAN="" VENDOR="0x1af4" HCTL="" TYPE="disk" FSTYPE="btrfs" LABEL="rock-pool" UUID="d7e5987d-9428-4b4a-9abb-f3d564e4c467"', # noqa E501
+ 'NAME="/dev/sdd" MODEL="QEMU HARDDISK " SERIAL="bcache-cdev" SIZE="2G" TRAN="sata" VENDOR="ATA " HCTL="3:0:0:0" TYPE="disk" FSTYPE="bcache" LABEL="" UUID="6efd5476-77a9-4f57-97a5-fa1a37d4338b"', # noqa E501
+ 'NAME="/dev/bcache0" MODEL="" SERIAL="" SIZE="2G" TRAN="" VENDOR="" HCTL="" TYPE="disk" FSTYPE="crypto_LUKS" LABEL="" UUID="3efb3830-fee1-4a9e-a5c6-ea456bfc269e"', # noqa E501
+ 'NAME="/dev/mapper/luks-3efb3830-fee1-4a9e-a5c6-ea456bfc269e" MODEL="" SERIAL="" SIZE="2G" TRAN="" VENDOR="" HCTL="" TYPE="crypt" FSTYPE="btrfs" LABEL="pool-on-mixed-luks" UUID="1fdd4b41-fdd0-40c4-8ae6-7d6309b09ded"', # noqa E501
+ 'NAME="/dev/bcache16" MODEL="" SERIAL="" SIZE="2G" TRAN="" VENDOR="" HCTL="" TYPE="disk" FSTYPE="crypto_LUKS" LABEL="" UUID="a47f4950-3296-4504-b9a4-2dc75681a6ad"', # noqa E501
+ 'NAME="/dev/mapper/luks-a47f4950-3296-4504-b9a4-2dc75681a6ad" MODEL="" SERIAL="" SIZE="2G" TRAN="" VENDOR="" HCTL="" TYPE="crypt" FSTYPE="btrfs" LABEL="pool-on-mixed-luks" UUID="1fdd4b41-fdd0-40c4-8ae6-7d6309b09ded"', # noqa E501
+ 'NAME="/dev/sdb" MODEL="QEMU HARDDISK " SERIAL="bcache-bdev-1" SIZE="2G" TRAN="sata" VENDOR="ATA " HCTL="1:0:0:0" TYPE="disk" FSTYPE="bcache" LABEL="" UUID="c9ed805f-b141-4ce9-80c7-9f9e1f71195d"', # noqa E501
+ 'NAME="/dev/bcache0" MODEL="" SERIAL="" SIZE="2G" TRAN="" VENDOR="" HCTL="" TYPE="disk" FSTYPE="crypto_LUKS" LABEL="" UUID="3efb3830-fee1-4a9e-a5c6-ea456bfc269e"', # noqa E501
+ 'NAME="/dev/mapper/luks-3efb3830-fee1-4a9e-a5c6-ea456bfc269e" MODEL="" SERIAL="" SIZE="2G" TRAN="" VENDOR="" HCTL="" TYPE="crypt" FSTYPE="btrfs" LABEL="pool-on-mixed-luks" UUID="1fdd4b41-fdd0-40c4-8ae6-7d6309b09ded"', # noqa E501
+ 'NAME="/dev/vdb" MODEL="" SERIAL="" SIZE="2G" TRAN="" VENDOR="0x1af4" HCTL="" TYPE="disk" FSTYPE="crypto_LUKS" LABEL="" UUID="41cd2e3c-3bd6-49fc-9f42-20e368a66efc"', # noqa E501
+ 'NAME="/dev/mapper/luks-41cd2e3c-3bd6-49fc-9f42-20e368a66efc" MODEL="" SERIAL="" SIZE="2G" TRAN="" VENDOR="" HCTL="" TYPE="crypt" FSTYPE="btrfs" LABEL="pool-on-mixed-luks" UUID="1fdd4b41-fdd0-40c4-8ae6-7d6309b09ded"', # noqa E501
+ 'NAME="/dev/sr0" MODEL="QEMU DVD-ROM " SERIAL="QM00001" SIZE="1024M" TRAN="ata" VENDOR="QEMU " HCTL="6:0:0:0" TYPE="rom" FSTYPE="" LABEL="" UUID=""', # noqa E501
+ 'NAME="/dev/sdc" MODEL="QEMU HARDDISK " SERIAL="bcache-bdev-2" SIZE="2G" TRAN="sata" VENDOR="ATA " HCTL="2:0:0:0" TYPE="disk" FSTYPE="bcache" LABEL="" UUID="06754c95-4f78-4ffb-a243-5c85144d1833"', # noqa E501
+ 'NAME="/dev/bcache16" MODEL="" SERIAL="" SIZE="2G" TRAN="" VENDOR="" HCTL="" TYPE="disk" FSTYPE="crypto_LUKS" LABEL="" UUID="a47f4950-3296-4504-b9a4-2dc75681a6ad"', # noqa E501
+ 'NAME="/dev/mapper/luks-a47f4950-3296-4504-b9a4-2dc75681a6ad" MODEL="" SERIAL="" SIZE="2G" TRAN="" VENDOR="" HCTL="" TYPE="crypt" FSTYPE="btrfs" LABEL="pool-on-mixed-luks" UUID="1fdd4b41-fdd0-40c4-8ae6-7d6309b09ded"', # noqa E501
+ 'NAME="/dev/sda" MODEL="QEMU HARDDISK " SERIAL="sys-drive-serial-num" SIZE="8G" TRAN="sata" VENDOR="ATA " HCTL="0:0:0:0" TYPE="disk" FSTYPE="" LABEL="" UUID=""', # noqa E501
+ 'NAME="/dev/sda2" MODEL="" SERIAL="" SIZE="820M" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="swap" LABEL="" UUID="c25eec5f-d4bd-4670-b756-e8b687562f6e"', # noqa E501
+ 'NAME="/dev/sda3" MODEL="" SERIAL="" SIZE="6.7G" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="btrfs" LABEL="rockstor_rockstor" UUID="a98f88c2-2031-4bd3-9124-2f9d8a77987c"', # noqa E501
+ 'NAME="/dev/sda1" MODEL="" SERIAL="" SIZE="500M" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="ext4" LABEL="" UUID="6b8e342c-6cd6-40e8-a134-db302fad3f20"', # noqa E501
+ 'NAME="/dev/vda" MODEL="" SERIAL="" SIZE="3G" TRAN="" VENDOR="0x1af4" HCTL="" TYPE="disk" FSTYPE="btrfs" LABEL="rock-pool" UUID="d7e5987d-9428-4b4a-9abb-f3d564e4c467"', # noqa E501
'']]
err = [['']]
rc = [0]
expected_result = [[
- Disk(name='vda', model=None, serial='serial-6', size=3145728,
+ Disk(name='/dev/vda', model=None, serial='serial-6', size=3145728,
transport=None, vendor='0x1af4', hctl=None, type='disk',
fstype='btrfs', label='rock-pool',
uuid='d7e5987d-9428-4b4a-9abb-f3d564e4c467', parted=False,
root=False, partitions={}),
- Disk(name='bcache0', model=None,
+ Disk(name='/dev/bcache0', model=None,
serial='bcache-c9ed805f-b141-4ce9-80c7-9f9e1f71195d',
size=2097152, transport=None,
vendor=None, hctl=None,
@@ -416,18 +419,18 @@ def test_scan_disks_luks_on_bcache(self):
uuid='3efb3830-fee1-4a9e-a5c6-ea456bfc269e',
parted=False, root=False,
partitions={}),
- Disk(name='luks-a47f4950-3296-4504-b9a4-2dc75681a6ad', model=None,
+ Disk(name='/dev/mapper/luks-a47f4950-3296-4504-b9a4-2dc75681a6ad', model=None, # noqa E501
serial='CRYPT-LUKS1-a47f495032964504b9a42dc75681a6ad-luks-a47f4950-3296-4504-b9a4-2dc75681a6ad', # noqa E501
size=2097152, transport=None, vendor=None, hctl=None,
type='crypt', fstype='btrfs', label='pool-on-mixed-luks',
uuid='1fdd4b41-fdd0-40c4-8ae6-7d6309b09ded', parted=False,
root=False, partitions={}),
- Disk(name='sdd', model='QEMU HARDDISK', serial='bcache-cdev',
+ Disk(name='/dev/sdd', model='QEMU HARDDISK', serial='bcache-cdev',
size=2097152, transport='sata', vendor='ATA', hctl='3:0:0:0',
type='disk', fstype='bcachecdev', label=None,
uuid='6efd5476-77a9-4f57-97a5-fa1a37d4338b', parted=False,
root=False, partitions={}),
- Disk(name='bcache16', model=None,
+ Disk(name='/dev/bcache16', model=None,
serial='bcache-06754c95-4f78-4ffb-a243-5c85144d1833',
size=2097152, transport=None,
vendor=None, hctl=None,
@@ -436,34 +439,34 @@ def test_scan_disks_luks_on_bcache(self):
uuid='a47f4950-3296-4504-b9a4-2dc75681a6ad',
parted=False, root=False,
partitions={}),
- Disk(name='luks-3efb3830-fee1-4a9e-a5c6-ea456bfc269e', model=None,
+ Disk(name='/dev/mapper/luks-3efb3830-fee1-4a9e-a5c6-ea456bfc269e', model=None, # noqa E501
serial='CRYPT-LUKS1-3efb3830fee14a9ea5c6ea456bfc269e-luks-3efb3830-fee1-4a9e-a5c6-ea456bfc269e', # noqa E501
size=2097152, transport=None, vendor=None, hctl=None,
type='crypt', fstype='btrfs', label='pool-on-mixed-luks',
uuid='1fdd4b41-fdd0-40c4-8ae6-7d6309b09ded', parted=False,
root=False, partitions={}),
- Disk(name='vdb', model=None, serial='serial-5', size=2097152,
+ Disk(name='/dev/vdb', model=None, serial='serial-5', size=2097152,
transport=None, vendor='0x1af4', hctl=None, type='disk',
fstype='crypto_LUKS', label=None,
uuid='41cd2e3c-3bd6-49fc-9f42-20e368a66efc', parted=False,
root=False, partitions={}),
- Disk(name='sda3', model='QEMU HARDDISK',
+ Disk(name='/dev/sda3', model='QEMU HARDDISK',
serial='sys-drive-serial-num',
size=7025459, transport='sata', vendor='ATA', hctl='0:0:0:0',
type='part', fstype='btrfs', label='rockstor_rockstor',
uuid='a98f88c2-2031-4bd3-9124-2f9d8a77987c', parted=True,
root=True, partitions={}),
- Disk(name='sdb', model='QEMU HARDDISK', serial='bcache-bdev-1',
+ Disk(name='/dev/sdb', model='QEMU HARDDISK', serial='bcache-bdev-1', # noqa E501
size=2097152, transport='sata', vendor='ATA', hctl='1:0:0:0',
type='disk', fstype='bcache', label=None,
uuid='c9ed805f-b141-4ce9-80c7-9f9e1f71195d', parted=False,
root=False, partitions={}),
- Disk(name='sdc', model='QEMU HARDDISK', serial='bcache-bdev-2',
+ Disk(name='/dev/sdc', model='QEMU HARDDISK', serial='bcache-bdev-2', # noqa E501
size=2097152, transport='sata', vendor='ATA', hctl='2:0:0:0',
type='disk', fstype='bcache', label=None,
uuid='06754c95-4f78-4ffb-a243-5c85144d1833', parted=False,
root=False, partitions={}),
- Disk(name='luks-41cd2e3c-3bd6-49fc-9f42-20e368a66efc', model=None,
+ Disk(name='/dev/mapper/luks-41cd2e3c-3bd6-49fc-9f42-20e368a66efc', model=None, # noqa E501
serial='CRYPT-LUKS1-41cd2e3c3bd649fc9f4220e368a66efc-luks-41cd2e3c-3bd6-49fc-9f42-20e368a66efc', # noqa E501
size=2097152, transport=None, vendor=None, hctl=None,
type='crypt', fstype='btrfs', label='pool-on-mixed-luks',
@@ -479,13 +482,13 @@ def dyn_disk_serial_return(*args, **kwargs):
# Entries only requred here if lsblk test data has no serial info:
# eg for bcache, LUKS, mdraid, and virtio type devices.
s_map = {
- 'bcache0': 'bcache-c9ed805f-b141-4ce9-80c7-9f9e1f71195d',
- 'bcache16': 'bcache-06754c95-4f78-4ffb-a243-5c85144d1833',
- 'luks-3efb3830-fee1-4a9e-a5c6-ea456bfc269e': 'CRYPT-LUKS1-3efb3830fee14a9ea5c6ea456bfc269e-luks-3efb3830-fee1-4a9e-a5c6-ea456bfc269e', # noqa E501
- 'luks-a47f4950-3296-4504-b9a4-2dc75681a6ad': 'CRYPT-LUKS1-a47f495032964504b9a42dc75681a6ad-luks-a47f4950-3296-4504-b9a4-2dc75681a6ad', # noqa E501
- 'luks-41cd2e3c-3bd6-49fc-9f42-20e368a66efc': 'CRYPT-LUKS1-41cd2e3c3bd649fc9f4220e368a66efc-luks-41cd2e3c-3bd6-49fc-9f42-20e368a66efc', # noqa E501
- 'vdb': 'serial-5',
- 'vda': 'serial-6'
+ '/dev/bcache0': 'bcache-c9ed805f-b141-4ce9-80c7-9f9e1f71195d',
+ '/dev/bcache16': 'bcache-06754c95-4f78-4ffb-a243-5c85144d1833',
+ '/dev/mapper/luks-3efb3830-fee1-4a9e-a5c6-ea456bfc269e': 'CRYPT-LUKS1-3efb3830fee14a9ea5c6ea456bfc269e-luks-3efb3830-fee1-4a9e-a5c6-ea456bfc269e', # noqa E501
+ '/dev/mapper/luks-a47f4950-3296-4504-b9a4-2dc75681a6ad': 'CRYPT-LUKS1-a47f495032964504b9a42dc75681a6ad-luks-a47f4950-3296-4504-b9a4-2dc75681a6ad', # noqa E501
+ '/dev/mapper/luks-41cd2e3c-3bd6-49fc-9f42-20e368a66efc': 'CRYPT-LUKS1-41cd2e3c3bd649fc9f4220e368a66efc-luks-41cd2e3c-3bd6-49fc-9f42-20e368a66efc', # noqa E501
+ '/dev/vdb': 'serial-5',
+ '/dev/vda': 'serial-6'
}
# First argument in get_disk_serial() is device_name, key off this
# for our dynamic mock return from s_map (serial map).
@@ -504,9 +507,9 @@ def dyn_disk_serial_return(*args, **kwargs):
def dyn_bcache_device_type(*args, **kwargs):
bc_dev_map = {
- 'sdd': 'cdev',
- 'sdb': 'bdev',
- 'sdc': 'bdev'
+ '/dev/sdd': 'cdev',
+ '/dev/sdb': 'bdev',
+ '/dev/sdc': 'bdev'
}
if args[0] in bc_dev_map:
return bc_dev_map[args[0]]
@@ -517,7 +520,9 @@ def dyn_bcache_device_type(*args, **kwargs):
# Iterate the test data sets for run_command running lsblk.
for o, e, r, expected in zip(out, err, rc, expected_result):
self.mock_run_command.return_value = (o, e, r)
+ expected.sort(key=operator.itemgetter(0))
returned = scan_disks(1048576)
+ returned.sort(key=operator.itemgetter(0))
self.assertEqual(returned, expected,
msg='Un-expected scan_disks() result:\n '
'returned = ({}).\n '
@@ -539,236 +544,236 @@ def test_scan_disks_dell_perk_h710_md1220_36_disks(self):
# N.B. listed in the order returned by lsblk.
# All base device (ie sda of sda3) have lsblk accessible serials.
out = [[
- 'NAME="sdy" MODEL="HUC101212CSS600 " SERIAL="5000cca01d2766c0" SIZE="1.1T" TRAN="sas" VENDOR="HGST " HCTL="1:0:11:0" TYPE="disk" FSTYPE="btrfs" LABEL="MD1220-DAS" UUID="12d76eb6-7aad-46ba-863e-d9c51e8e6f2d"', # noqa E501
- 'NAME="sdf" MODEL="PERC H710 " SERIAL="6848f690e936450021a4585b05e46fcc" SIZE="7.3T" TRAN="" VENDOR="DELL " HCTL="0:2:5:0" TYPE="disk" FSTYPE="btrfs" LABEL="BIGDATA" UUID="cb15142f-9d1e-4cb2-9b1f-adda3af6555f"', # noqa E501
- 'NAME="sdab" MODEL="ST91000640SS " SERIAL="5000c50063041947" SIZE="931.5G" TRAN="sas" VENDOR="SEAGATE " HCTL="1:0:14:0" TYPE="disk" FSTYPE="btrfs" LABEL="SCRATCH" UUID="a90e6787-1c45-46d6-a2ba-41017a17c1d5"', # noqa E501
- 'NAME="sdo" MODEL="HUC101212CSS600 " SERIAL="5000cca01d21bc10" SIZE="1.1T" TRAN="sas" VENDOR="HGST " HCTL="1:0:1:0" TYPE="disk" FSTYPE="btrfs" LABEL="MD1220-DAS" UUID="12d76eb6-7aad-46ba-863e-d9c51e8e6f2d"', # noqa E501
- 'NAME="sdw" MODEL="ST91000640SS " SERIAL="5000c500630450a3" SIZE="931.5G" TRAN="sas" VENDOR="SEAGATE " HCTL="1:0:9:0" TYPE="disk" FSTYPE="btrfs" LABEL="SCRATCH" UUID="a90e6787-1c45-46d6-a2ba-41017a17c1d5"', # noqa E501
- 'NAME="sdd" MODEL="PERC H710 " SERIAL="6848f690e9364500219f33b21773ea22" SIZE="558.4G" TRAN="" VENDOR="DELL " HCTL="0:2:3:0" TYPE="disk" FSTYPE="btrfs" LABEL="Test" UUID="612f1fc2-dfa8-4940-a1ad-e11c893b32ca"', # noqa E501
- 'NAME="sdm" MODEL="PERC H710 " SERIAL="6848f690e936450021acd1f30663b877" SIZE="7.3T" TRAN="" VENDOR="DELL " HCTL="0:2:12:0" TYPE="disk" FSTYPE="btrfs" LABEL="BIGDATA" UUID="cb15142f-9d1e-4cb2-9b1f-adda3af6555f"', # noqa E501
- 'NAME="sdu" MODEL="HUC101212CSS600 " SERIAL="5000cca01d273a24" SIZE="1.1T" TRAN="sas" VENDOR="HGST " HCTL="1:0:7:0" TYPE="disk" FSTYPE="btrfs" LABEL="MD1220-DAS" UUID="12d76eb6-7aad-46ba-863e-d9c51e8e6f2d"', # noqa E501
- 'NAME="sdai" MODEL="ST91000640SS " SERIAL="5000c5006303ea0f" SIZE="931.5G" TRAN="sas" VENDOR="SEAGATE " HCTL="1:0:21:0" TYPE="disk" FSTYPE="btrfs" LABEL="SCRATCH" UUID="a90e6787-1c45-46d6-a2ba-41017a17c1d5"', # noqa E501
- 'NAME="sdb" MODEL="PERC H710 " SERIAL="6848f690e9364500219f339b1610b547" SIZE="558.4G" TRAN="" VENDOR="DELL " HCTL="0:2:1:0" TYPE="disk" FSTYPE="btrfs" LABEL="Test" UUID="612f1fc2-dfa8-4940-a1ad-e11c893b32ca"', # noqa E501
- 'NAME="sdk" MODEL="PERC H710 " SERIAL="6848f690e936450021acd1e705b389c6" SIZE="7.3T" TRAN="" VENDOR="DELL " HCTL="0:2:10:0" TYPE="disk" FSTYPE="btrfs" LABEL="BIGDATA" UUID="cb15142f-9d1e-4cb2-9b1f-adda3af6555f"', # noqa E501
- 'NAME="sds" MODEL="HUC101212CSS600 " SERIAL="5000cca01d217968" SIZE="1.1T" TRAN="sas" VENDOR="HGST " HCTL="1:0:5:0" TYPE="disk" FSTYPE="btrfs" LABEL="MD1220-DAS" UUID="12d76eb6-7aad-46ba-863e-d9c51e8e6f2d"', # noqa E501
- 'NAME="sdag" MODEL="ST91000640SS " SERIAL="5000c50062cbc1f3" SIZE="931.5G" TRAN="sas" VENDOR="SEAGATE " HCTL="1:0:19:0" TYPE="disk" FSTYPE="btrfs" LABEL="SCRATCH" UUID="a90e6787-1c45-46d6-a2ba-41017a17c1d5"', # noqa E501
- 'NAME="sdi" MODEL="PERC H710 " SERIAL="6848f690e936450021a4586906bd9742" SIZE="7.3T" TRAN="" VENDOR="DELL " HCTL="0:2:8:0" TYPE="disk" FSTYPE="btrfs" LABEL="BIGDATA" UUID="cb15142f-9d1e-4cb2-9b1f-adda3af6555f"', # noqa E501
- 'NAME="sdq" MODEL="HUC101212CSS600 " SERIAL="5000cca01d29f384" SIZE="1.1T" TRAN="sas" VENDOR="HGST " HCTL="1:0:3:0" TYPE="disk" FSTYPE="btrfs" LABEL="MD1220-DAS" UUID="12d76eb6-7aad-46ba-863e-d9c51e8e6f2d"', # noqa E501
- 'NAME="sdae" MODEL="INTEL SSDSC2KW24" SERIAL="CVLT6153072G240CGN" SIZE="223.6G" TRAN="sas" VENDOR="ATA " HCTL="1:0:17:0" TYPE="disk" FSTYPE="btrfs" LABEL="INTEL_SSD" UUID="a504bf03-0299-4648-8a95-c91aba291de8"', # noqa E501
- 'NAME="sdz" MODEL="ST91000640SS " SERIAL="5000c5006304544b" SIZE="931.5G" TRAN="sas" VENDOR="SEAGATE " HCTL="1:0:12:0" TYPE="disk" FSTYPE="btrfs" LABEL="SCRATCH" UUID="a90e6787-1c45-46d6-a2ba-41017a17c1d5"', # noqa E501
- 'NAME="sdg" MODEL="PERC H710 " SERIAL="6848f690e936450021ed61830ae57fbf" SIZE="7.3T" TRAN="" VENDOR="DELL " HCTL="0:2:6:0" TYPE="disk" FSTYPE="btrfs" LABEL="BIGDATA" UUID="cb15142f-9d1e-4cb2-9b1f-adda3af6555f"', # noqa E501
- 'NAME="sdac" MODEL="ST91000640SS " SERIAL="5000c500630249cb" SIZE="931.5G" TRAN="sas" VENDOR="SEAGATE " HCTL="1:0:15:0" TYPE="disk" FSTYPE="btrfs" LABEL="SCRATCH" UUID="a90e6787-1c45-46d6-a2ba-41017a17c1d5"', # noqa E501
- 'NAME="sdx" MODEL="ST91000640SS " SERIAL="5000c50063044387" SIZE="931.5G" TRAN="sas" VENDOR="SEAGATE " HCTL="1:0:10:0" TYPE="disk" FSTYPE="btrfs" LABEL="SCRATCH" UUID="a90e6787-1c45-46d6-a2ba-41017a17c1d5"', # noqa E501
- 'NAME="sde" MODEL="PERC H710 " SERIAL="6848f690e9364500219f33bb17fe7d7b" SIZE="558.4G" TRAN="" VENDOR="DELL " HCTL="0:2:4:0" TYPE="disk" FSTYPE="btrfs" LABEL="Test" UUID="612f1fc2-dfa8-4940-a1ad-e11c893b32ca"', # noqa E501
- 'NAME="sdaa" MODEL="ST91000640SS " SERIAL="5000c50063044363" SIZE="931.5G" TRAN="sas" VENDOR="SEAGATE " HCTL="1:0:13:0" TYPE="disk" FSTYPE="btrfs" LABEL="SCRATCH" UUID="a90e6787-1c45-46d6-a2ba-41017a17c1d5"', # noqa E501
- 'NAME="sdn" MODEL="HUC101212CSS600 " SERIAL="5000cca01d2144ac" SIZE="1.1T" TRAN="sas" VENDOR="HGST " HCTL="1:0:0:0" TYPE="disk" FSTYPE="btrfs" LABEL="MD1220-DAS" UUID="12d76eb6-7aad-46ba-863e-d9c51e8e6f2d"', # noqa E501
- 'NAME="sdv" MODEL="HUC101212CSS600 " SERIAL="5000cca01d21893c" SIZE="1.1T" TRAN="sas" VENDOR="HGST " HCTL="1:0:8:0" TYPE="disk" FSTYPE="btrfs" LABEL="MD1220-DAS" UUID="12d76eb6-7aad-46ba-863e-d9c51e8e6f2d"', # noqa E501
- 'NAME="sdaj" MODEL="INTEL SSDSC2KW24" SERIAL="CVLT6181019S240CGN" SIZE="223.6G" TRAN="sas" VENDOR="ATA " HCTL="1:0:22:0" TYPE="disk" FSTYPE="btrfs" LABEL="INTEL_SSD" UUID="a504bf03-0299-4648-8a95-c91aba291de8"', # noqa E501
- 'NAME="sdc" MODEL="PERC H710 " SERIAL="6848f690e936450021ed614a077c1b44" SIZE="7.3T" TRAN="" VENDOR="DELL " HCTL="0:2:2:0" TYPE="disk" FSTYPE="btrfs" LABEL="BIGDATA" UUID="cb15142f-9d1e-4cb2-9b1f-adda3af6555f"', # noqa E501
- 'NAME="sdl" MODEL="PERC H710 " SERIAL="6848f690e936450021a4525005828671" SIZE="4.6T" TRAN="" VENDOR="DELL " HCTL="0:2:11:0" TYPE="disk" FSTYPE="btrfs" LABEL="5TBWDGREEN" UUID="a37956a8-a175-4906-82c1-bf843132da1a"', # noqa E501
- 'NAME="sdt" MODEL="HUC101212CSS600 " SERIAL="5000cca01d2af91c" SIZE="1.1T" TRAN="sas" VENDOR="HGST " HCTL="1:0:6:0" TYPE="disk" FSTYPE="btrfs" LABEL="MD1220-DAS" UUID="12d76eb6-7aad-46ba-863e-d9c51e8e6f2d"', # noqa E501
- 'NAME="sdah" MODEL="ST91000640SS " SERIAL="5000c50062cb366f" SIZE="931.5G" TRAN="sas" VENDOR="SEAGATE " HCTL="1:0:20:0" TYPE="disk" FSTYPE="btrfs" LABEL="SCRATCH" UUID="a90e6787-1c45-46d6-a2ba-41017a17c1d5"', # noqa E501
- 'NAME="sda" MODEL="PERC H710 " SERIAL="6848f690e936450018b7c3a11330997b" SIZE="278.9G" TRAN="" VENDOR="DELL " HCTL="0:2:0:0" TYPE="disk" FSTYPE="" LABEL="" UUID=""', # noqa E501
- 'NAME="sda2" MODEL="" SERIAL="" SIZE="13.8G" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="swap" LABEL="" UUID="a34b82d0-c342-41e0-a58d-4f0a0027829d"', # noqa E501
- 'NAME="sda3" MODEL="" SERIAL="" SIZE="264.7G" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="btrfs" LABEL="rockstor_rockstor" UUID="7f7acdd7-493e-4bb5-b801-b7b7dc289535"', # noqa E501
- 'NAME="sda1" MODEL="" SERIAL="" SIZE="500M" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="ext4" LABEL="" UUID="5d2848ff-ae8f-4c2f-b825-90621076acc1"', # noqa E501
- 'NAME="sdj" MODEL="PERC H710 " SERIAL="6848f690e936450021a45f9904046a2f" SIZE="2.7T" TRAN="" VENDOR="DELL " HCTL="0:2:9:0" TYPE="disk" FSTYPE="btrfs" LABEL="VMWARE_MECH_ARRAY" UUID="e6d13c0b-825f-4b43-81b6-7eb2b791b1c3"', # noqa E501
- 'NAME="sdr" MODEL="HUC101212CSS600 " SERIAL="5000cca01d2188e0" SIZE="1.1T" TRAN="sas" VENDOR="HGST " HCTL="1:0:4:0" TYPE="disk" FSTYPE="btrfs" LABEL="MD1220-DAS" UUID="12d76eb6-7aad-46ba-863e-d9c51e8e6f2d"', # noqa E501
- 'NAME="sdaf" MODEL="ST91000640SS " SERIAL="5000c500630425df" SIZE="931.5G" TRAN="sas" VENDOR="SEAGATE " HCTL="1:0:18:0" TYPE="disk" FSTYPE="btrfs" LABEL="SCRATCH" UUID="a90e6787-1c45-46d6-a2ba-41017a17c1d5"', # noqa E501
- 'NAME="sdh" MODEL="PERC H710 " SERIAL="6848f690e9364500219f33d919c7488a" SIZE="558.4G" TRAN="" VENDOR="DELL " HCTL="0:2:7:0" TYPE="disk" FSTYPE="btrfs" LABEL="Test" UUID="612f1fc2-dfa8-4940-a1ad-e11c893b32ca"', # noqa E501
- 'NAME="sdp" MODEL="HUC101212CSS600 " SERIAL="5000cca01d21885c" SIZE="1.1T" TRAN="sas" VENDOR="HGST " HCTL="1:0:2:0" TYPE="disk" FSTYPE="btrfs" LABEL="MD1220-DAS" UUID="12d76eb6-7aad-46ba-863e-d9c51e8e6f2d"', # noqa E501
- 'NAME="sdad" MODEL="INTEL SSDSC2KW24" SERIAL="CVLT618101SE240CGN" SIZE="223.6G" TRAN="sas" VENDOR="ATA " HCTL="1:0:16:0" TYPE="disk" FSTYPE="btrfs" LABEL="INTEL_SSD" UUID="a504bf03-0299-4648-8a95-c91aba291de8"', # noqa E501
+ 'NAME="/dev/sdy" MODEL="HUC101212CSS600 " SERIAL="5000cca01d2766c0" SIZE="1.1T" TRAN="sas" VENDOR="HGST " HCTL="1:0:11:0" TYPE="disk" FSTYPE="btrfs" LABEL="MD1220-DAS" UUID="12d76eb6-7aad-46ba-863e-d9c51e8e6f2d"', # noqa E501
+ 'NAME="/dev/sdf" MODEL="PERC H710 " SERIAL="6848f690e936450021a4585b05e46fcc" SIZE="7.3T" TRAN="" VENDOR="DELL " HCTL="0:2:5:0" TYPE="disk" FSTYPE="btrfs" LABEL="BIGDATA" UUID="cb15142f-9d1e-4cb2-9b1f-adda3af6555f"', # noqa E501
+ 'NAME="/dev/sdab" MODEL="ST91000640SS " SERIAL="5000c50063041947" SIZE="931.5G" TRAN="sas" VENDOR="SEAGATE " HCTL="1:0:14:0" TYPE="disk" FSTYPE="btrfs" LABEL="SCRATCH" UUID="a90e6787-1c45-46d6-a2ba-41017a17c1d5"', # noqa E501
+ 'NAME="/dev/sdo" MODEL="HUC101212CSS600 " SERIAL="5000cca01d21bc10" SIZE="1.1T" TRAN="sas" VENDOR="HGST " HCTL="1:0:1:0" TYPE="disk" FSTYPE="btrfs" LABEL="MD1220-DAS" UUID="12d76eb6-7aad-46ba-863e-d9c51e8e6f2d"', # noqa E501
+ 'NAME="/dev/sdw" MODEL="ST91000640SS " SERIAL="5000c500630450a3" SIZE="931.5G" TRAN="sas" VENDOR="SEAGATE " HCTL="1:0:9:0" TYPE="disk" FSTYPE="btrfs" LABEL="SCRATCH" UUID="a90e6787-1c45-46d6-a2ba-41017a17c1d5"', # noqa E501
+ 'NAME="/dev/sdd" MODEL="PERC H710 " SERIAL="6848f690e9364500219f33b21773ea22" SIZE="558.4G" TRAN="" VENDOR="DELL " HCTL="0:2:3:0" TYPE="disk" FSTYPE="btrfs" LABEL="Test" UUID="612f1fc2-dfa8-4940-a1ad-e11c893b32ca"', # noqa E501
+ 'NAME="/dev/sdm" MODEL="PERC H710 " SERIAL="6848f690e936450021acd1f30663b877" SIZE="7.3T" TRAN="" VENDOR="DELL " HCTL="0:2:12:0" TYPE="disk" FSTYPE="btrfs" LABEL="BIGDATA" UUID="cb15142f-9d1e-4cb2-9b1f-adda3af6555f"', # noqa E501
+ 'NAME="/dev/sdu" MODEL="HUC101212CSS600 " SERIAL="5000cca01d273a24" SIZE="1.1T" TRAN="sas" VENDOR="HGST " HCTL="1:0:7:0" TYPE="disk" FSTYPE="btrfs" LABEL="MD1220-DAS" UUID="12d76eb6-7aad-46ba-863e-d9c51e8e6f2d"', # noqa E501
+ 'NAME="/dev/sdai" MODEL="ST91000640SS " SERIAL="5000c5006303ea0f" SIZE="931.5G" TRAN="sas" VENDOR="SEAGATE " HCTL="1:0:21:0" TYPE="disk" FSTYPE="btrfs" LABEL="SCRATCH" UUID="a90e6787-1c45-46d6-a2ba-41017a17c1d5"', # noqa E501
+ 'NAME="/dev/sdb" MODEL="PERC H710 " SERIAL="6848f690e9364500219f339b1610b547" SIZE="558.4G" TRAN="" VENDOR="DELL " HCTL="0:2:1:0" TYPE="disk" FSTYPE="btrfs" LABEL="Test" UUID="612f1fc2-dfa8-4940-a1ad-e11c893b32ca"', # noqa E501
+ 'NAME="/dev/sdk" MODEL="PERC H710 " SERIAL="6848f690e936450021acd1e705b389c6" SIZE="7.3T" TRAN="" VENDOR="DELL " HCTL="0:2:10:0" TYPE="disk" FSTYPE="btrfs" LABEL="BIGDATA" UUID="cb15142f-9d1e-4cb2-9b1f-adda3af6555f"', # noqa E501
+ 'NAME="/dev/sds" MODEL="HUC101212CSS600 " SERIAL="5000cca01d217968" SIZE="1.1T" TRAN="sas" VENDOR="HGST " HCTL="1:0:5:0" TYPE="disk" FSTYPE="btrfs" LABEL="MD1220-DAS" UUID="12d76eb6-7aad-46ba-863e-d9c51e8e6f2d"', # noqa E501
+ 'NAME="/dev/sdag" MODEL="ST91000640SS " SERIAL="5000c50062cbc1f3" SIZE="931.5G" TRAN="sas" VENDOR="SEAGATE " HCTL="1:0:19:0" TYPE="disk" FSTYPE="btrfs" LABEL="SCRATCH" UUID="a90e6787-1c45-46d6-a2ba-41017a17c1d5"', # noqa E501
+ 'NAME="/dev/sdi" MODEL="PERC H710 " SERIAL="6848f690e936450021a4586906bd9742" SIZE="7.3T" TRAN="" VENDOR="DELL " HCTL="0:2:8:0" TYPE="disk" FSTYPE="btrfs" LABEL="BIGDATA" UUID="cb15142f-9d1e-4cb2-9b1f-adda3af6555f"', # noqa E501
+ 'NAME="/dev/sdq" MODEL="HUC101212CSS600 " SERIAL="5000cca01d29f384" SIZE="1.1T" TRAN="sas" VENDOR="HGST " HCTL="1:0:3:0" TYPE="disk" FSTYPE="btrfs" LABEL="MD1220-DAS" UUID="12d76eb6-7aad-46ba-863e-d9c51e8e6f2d"', # noqa E501
+ 'NAME="/dev/sdae" MODEL="INTEL SSDSC2KW24" SERIAL="CVLT6153072G240CGN" SIZE="223.6G" TRAN="sas" VENDOR="ATA " HCTL="1:0:17:0" TYPE="disk" FSTYPE="btrfs" LABEL="INTEL_SSD" UUID="a504bf03-0299-4648-8a95-c91aba291de8"', # noqa E501
+ 'NAME="/dev/sdz" MODEL="ST91000640SS " SERIAL="5000c5006304544b" SIZE="931.5G" TRAN="sas" VENDOR="SEAGATE " HCTL="1:0:12:0" TYPE="disk" FSTYPE="btrfs" LABEL="SCRATCH" UUID="a90e6787-1c45-46d6-a2ba-41017a17c1d5"', # noqa E501
+ 'NAME="/dev/sdg" MODEL="PERC H710 " SERIAL="6848f690e936450021ed61830ae57fbf" SIZE="7.3T" TRAN="" VENDOR="DELL " HCTL="0:2:6:0" TYPE="disk" FSTYPE="btrfs" LABEL="BIGDATA" UUID="cb15142f-9d1e-4cb2-9b1f-adda3af6555f"', # noqa E501
+ 'NAME="/dev/sdac" MODEL="ST91000640SS " SERIAL="5000c500630249cb" SIZE="931.5G" TRAN="sas" VENDOR="SEAGATE " HCTL="1:0:15:0" TYPE="disk" FSTYPE="btrfs" LABEL="SCRATCH" UUID="a90e6787-1c45-46d6-a2ba-41017a17c1d5"', # noqa E501
+ 'NAME="/dev/sdx" MODEL="ST91000640SS " SERIAL="5000c50063044387" SIZE="931.5G" TRAN="sas" VENDOR="SEAGATE " HCTL="1:0:10:0" TYPE="disk" FSTYPE="btrfs" LABEL="SCRATCH" UUID="a90e6787-1c45-46d6-a2ba-41017a17c1d5"', # noqa E501
+ 'NAME="/dev/sde" MODEL="PERC H710 " SERIAL="6848f690e9364500219f33bb17fe7d7b" SIZE="558.4G" TRAN="" VENDOR="DELL " HCTL="0:2:4:0" TYPE="disk" FSTYPE="btrfs" LABEL="Test" UUID="612f1fc2-dfa8-4940-a1ad-e11c893b32ca"', # noqa E501
+ 'NAME="/dev/sdaa" MODEL="ST91000640SS " SERIAL="5000c50063044363" SIZE="931.5G" TRAN="sas" VENDOR="SEAGATE " HCTL="1:0:13:0" TYPE="disk" FSTYPE="btrfs" LABEL="SCRATCH" UUID="a90e6787-1c45-46d6-a2ba-41017a17c1d5"', # noqa E501
+ 'NAME="/dev/sdn" MODEL="HUC101212CSS600 " SERIAL="5000cca01d2144ac" SIZE="1.1T" TRAN="sas" VENDOR="HGST " HCTL="1:0:0:0" TYPE="disk" FSTYPE="btrfs" LABEL="MD1220-DAS" UUID="12d76eb6-7aad-46ba-863e-d9c51e8e6f2d"', # noqa E501
+ 'NAME="/dev/sdv" MODEL="HUC101212CSS600 " SERIAL="5000cca01d21893c" SIZE="1.1T" TRAN="sas" VENDOR="HGST " HCTL="1:0:8:0" TYPE="disk" FSTYPE="btrfs" LABEL="MD1220-DAS" UUID="12d76eb6-7aad-46ba-863e-d9c51e8e6f2d"', # noqa E501
+ 'NAME="/dev/sdaj" MODEL="INTEL SSDSC2KW24" SERIAL="CVLT6181019S240CGN" SIZE="223.6G" TRAN="sas" VENDOR="ATA " HCTL="1:0:22:0" TYPE="disk" FSTYPE="btrfs" LABEL="INTEL_SSD" UUID="a504bf03-0299-4648-8a95-c91aba291de8"', # noqa E501
+ 'NAME="/dev/sdc" MODEL="PERC H710 " SERIAL="6848f690e936450021ed614a077c1b44" SIZE="7.3T" TRAN="" VENDOR="DELL " HCTL="0:2:2:0" TYPE="disk" FSTYPE="btrfs" LABEL="BIGDATA" UUID="cb15142f-9d1e-4cb2-9b1f-adda3af6555f"', # noqa E501
+ 'NAME="/dev/sdl" MODEL="PERC H710 " SERIAL="6848f690e936450021a4525005828671" SIZE="4.6T" TRAN="" VENDOR="DELL " HCTL="0:2:11:0" TYPE="disk" FSTYPE="btrfs" LABEL="5TBWDGREEN" UUID="a37956a8-a175-4906-82c1-bf843132da1a"', # noqa E501
+ 'NAME="/dev/sdt" MODEL="HUC101212CSS600 " SERIAL="5000cca01d2af91c" SIZE="1.1T" TRAN="sas" VENDOR="HGST " HCTL="1:0:6:0" TYPE="disk" FSTYPE="btrfs" LABEL="MD1220-DAS" UUID="12d76eb6-7aad-46ba-863e-d9c51e8e6f2d"', # noqa E501
+ 'NAME="/dev/sdah" MODEL="ST91000640SS " SERIAL="5000c50062cb366f" SIZE="931.5G" TRAN="sas" VENDOR="SEAGATE " HCTL="1:0:20:0" TYPE="disk" FSTYPE="btrfs" LABEL="SCRATCH" UUID="a90e6787-1c45-46d6-a2ba-41017a17c1d5"', # noqa E501
+ 'NAME="/dev/sda" MODEL="PERC H710 " SERIAL="6848f690e936450018b7c3a11330997b" SIZE="278.9G" TRAN="" VENDOR="DELL " HCTL="0:2:0:0" TYPE="disk" FSTYPE="" LABEL="" UUID=""', # noqa E501
+ 'NAME="/dev/sda2" MODEL="" SERIAL="" SIZE="13.8G" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="swap" LABEL="" UUID="a34b82d0-c342-41e0-a58d-4f0a0027829d"', # noqa E501
+ 'NAME="/dev/sda3" MODEL="" SERIAL="" SIZE="264.7G" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="btrfs" LABEL="rockstor_rockstor" UUID="7f7acdd7-493e-4bb5-b801-b7b7dc289535"', # noqa E501
+ 'NAME="/dev/sda1" MODEL="" SERIAL="" SIZE="500M" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="ext4" LABEL="" UUID="5d2848ff-ae8f-4c2f-b825-90621076acc1"', # noqa E501
+ 'NAME="/dev/sdj" MODEL="PERC H710 " SERIAL="6848f690e936450021a45f9904046a2f" SIZE="2.7T" TRAN="" VENDOR="DELL " HCTL="0:2:9:0" TYPE="disk" FSTYPE="btrfs" LABEL="VMWARE_MECH_ARRAY" UUID="e6d13c0b-825f-4b43-81b6-7eb2b791b1c3"', # noqa E501
+ 'NAME="/dev/sdr" MODEL="HUC101212CSS600 " SERIAL="5000cca01d2188e0" SIZE="1.1T" TRAN="sas" VENDOR="HGST " HCTL="1:0:4:0" TYPE="disk" FSTYPE="btrfs" LABEL="MD1220-DAS" UUID="12d76eb6-7aad-46ba-863e-d9c51e8e6f2d"', # noqa E501
+ 'NAME="/dev/sdaf" MODEL="ST91000640SS " SERIAL="5000c500630425df" SIZE="931.5G" TRAN="sas" VENDOR="SEAGATE " HCTL="1:0:18:0" TYPE="disk" FSTYPE="btrfs" LABEL="SCRATCH" UUID="a90e6787-1c45-46d6-a2ba-41017a17c1d5"', # noqa E501
+ 'NAME="/dev/sdh" MODEL="PERC H710 " SERIAL="6848f690e9364500219f33d919c7488a" SIZE="558.4G" TRAN="" VENDOR="DELL " HCTL="0:2:7:0" TYPE="disk" FSTYPE="btrfs" LABEL="Test" UUID="612f1fc2-dfa8-4940-a1ad-e11c893b32ca"', # noqa E501
+ 'NAME="/dev/sdp" MODEL="HUC101212CSS600 " SERIAL="5000cca01d21885c" SIZE="1.1T" TRAN="sas" VENDOR="HGST " HCTL="1:0:2:0" TYPE="disk" FSTYPE="btrfs" LABEL="MD1220-DAS" UUID="12d76eb6-7aad-46ba-863e-d9c51e8e6f2d"', # noqa E501
+ 'NAME="/dev/sdad" MODEL="INTEL SSDSC2KW24" SERIAL="CVLT618101SE240CGN" SIZE="223.6G" TRAN="sas" VENDOR="ATA " HCTL="1:0:16:0" TYPE="disk" FSTYPE="btrfs" LABEL="INTEL_SSD" UUID="a504bf03-0299-4648-8a95-c91aba291de8"', # noqa E501
''
]]
err = [['']]
rc = [0]
expected_result = [[
- Disk(name='sda3', model='PERC H710',
+ Disk(name='/dev/sda3', model='PERC H710',
serial='6848f690e936450018b7c3a11330997b', size=277558067,
transport=None, vendor='DELL', hctl='0:2:0:0', type='part',
fstype='btrfs', label='rockstor_rockstor',
uuid='7f7acdd7-493e-4bb5-b801-b7b7dc289535', parted=True,
root=True, partitions={}),
- Disk(name='sdt', model='HUC101212CSS600',
+ Disk(name='/dev/sdt', model='HUC101212CSS600',
serial='5000cca01d2af91c',
size=1181116006, transport='sas', vendor='HGST',
hctl='1:0:6:0',
type='disk', fstype='btrfs', label='MD1220-DAS',
uuid='12d76eb6-7aad-46ba-863e-d9c51e8e6f2d', parted=False,
root=False, partitions={}),
- Disk(name='sdu', model='HUC101212CSS600',
+ Disk(name='/dev/sdu', model='HUC101212CSS600',
serial='5000cca01d273a24',
size=1181116006, transport='sas', vendor='HGST',
hctl='1:0:7:0',
type='disk', fstype='btrfs', label='MD1220-DAS',
uuid='12d76eb6-7aad-46ba-863e-d9c51e8e6f2d', parted=False,
root=False, partitions={}),
- Disk(name='sdv', model='HUC101212CSS600',
+ Disk(name='/dev/sdv', model='HUC101212CSS600',
serial='5000cca01d21893c',
size=1181116006, transport='sas', vendor='HGST',
hctl='1:0:8:0',
type='disk', fstype='btrfs', label='MD1220-DAS',
uuid='12d76eb6-7aad-46ba-863e-d9c51e8e6f2d', parted=False,
root=False, partitions={}),
- Disk(name='sdw', model='ST91000640SS', serial='5000c500630450a3',
+ Disk(name='/dev/sdw', model='ST91000640SS', serial='5000c500630450a3', # noqa E501
size=976748544, transport='sas', vendor='SEAGATE',
hctl='1:0:9:0',
type='disk', fstype='btrfs', label='SCRATCH',
uuid='a90e6787-1c45-46d6-a2ba-41017a17c1d5', parted=False,
root=False, partitions={}),
- Disk(name='sdp', model='HUC101212CSS600',
+ Disk(name='/dev/sdp', model='HUC101212CSS600',
serial='5000cca01d21885c',
size=1181116006, transport='sas', vendor='HGST',
hctl='1:0:2:0',
type='disk', fstype='btrfs', label='MD1220-DAS',
uuid='12d76eb6-7aad-46ba-863e-d9c51e8e6f2d', parted=False,
root=False, partitions={}),
- Disk(name='sdq', model='HUC101212CSS600',
+ Disk(name='/dev/sdq', model='HUC101212CSS600',
serial='5000cca01d29f384',
size=1181116006, transport='sas', vendor='HGST',
hctl='1:0:3:0',
type='disk', fstype='btrfs', label='MD1220-DAS',
uuid='12d76eb6-7aad-46ba-863e-d9c51e8e6f2d', parted=False,
root=False, partitions={}),
- Disk(name='sdr', model='HUC101212CSS600',
+ Disk(name='/dev/sdr', model='HUC101212CSS600',
serial='5000cca01d2188e0',
size=1181116006, transport='sas', vendor='HGST',
hctl='1:0:4:0',
type='disk', fstype='btrfs', label='MD1220-DAS',
uuid='12d76eb6-7aad-46ba-863e-d9c51e8e6f2d', parted=False,
root=False, partitions={}),
- Disk(name='sds', model='HUC101212CSS600',
+ Disk(name='/dev/sds', model='HUC101212CSS600',
serial='5000cca01d217968',
size=1181116006, transport='sas', vendor='HGST',
hctl='1:0:5:0',
type='disk', fstype='btrfs', label='MD1220-DAS',
uuid='12d76eb6-7aad-46ba-863e-d9c51e8e6f2d', parted=False,
root=False, partitions={}),
- Disk(name='sdx', model='ST91000640SS', serial='5000c50063044387',
+ Disk(name='/dev/sdx', model='ST91000640SS', serial='5000c50063044387', # noqa E501
size=976748544, transport='sas', vendor='SEAGATE',
hctl='1:0:10:0', type='disk', fstype='btrfs', label='SCRATCH',
uuid='a90e6787-1c45-46d6-a2ba-41017a17c1d5', parted=False,
root=False, partitions={}),
- Disk(name='sdy', model='HUC101212CSS600',
+ Disk(name='/dev/sdy', model='HUC101212CSS600',
serial='5000cca01d2766c0',
size=1181116006, transport='sas', vendor='HGST',
hctl='1:0:11:0',
type='disk', fstype='btrfs', label='MD1220-DAS',
uuid='12d76eb6-7aad-46ba-863e-d9c51e8e6f2d', parted=False,
root=False, partitions={}),
- Disk(name='sdz', model='ST91000640SS', serial='5000c5006304544b',
+ Disk(name='/dev/sdz', model='ST91000640SS', serial='5000c5006304544b', # noqa E501
size=976748544, transport='sas', vendor='SEAGATE',
hctl='1:0:12:0', type='disk', fstype='btrfs', label='SCRATCH',
uuid='a90e6787-1c45-46d6-a2ba-41017a17c1d5', parted=False,
root=False, partitions={}),
- Disk(name='sdd', model='PERC H710',
+ Disk(name='/dev/sdd', model='PERC H710',
serial='6848f690e9364500219f33b21773ea22', size=585524838,
transport=None, vendor='DELL', hctl='0:2:3:0', type='disk',
fstype='btrfs', label='Test',
uuid='612f1fc2-dfa8-4940-a1ad-e11c893b32ca', parted=False,
root=False, partitions={}),
- Disk(name='sde', model='PERC H710',
+ Disk(name='/dev/sde', model='PERC H710',
serial='6848f690e9364500219f33bb17fe7d7b', size=585524838,
transport=None, vendor='DELL', hctl='0:2:4:0', type='disk',
fstype='btrfs', label='Test',
uuid='612f1fc2-dfa8-4940-a1ad-e11c893b32ca', parted=False,
root=False, partitions={}),
- Disk(name='sdf', model='PERC H710',
+ Disk(name='/dev/sdf', model='PERC H710',
serial='6848f690e936450021a4585b05e46fcc', size=7838315315,
transport=None, vendor='DELL', hctl='0:2:5:0', type='disk',
fstype='btrfs', label='BIGDATA',
uuid='cb15142f-9d1e-4cb2-9b1f-adda3af6555f', parted=False,
root=False, partitions={}),
- Disk(name='sdg', model='PERC H710',
+ Disk(name='/dev/sdg', model='PERC H710',
serial='6848f690e936450021ed61830ae57fbf', size=7838315315,
transport=None, vendor='DELL', hctl='0:2:6:0', type='disk',
fstype='btrfs', label='BIGDATA',
uuid='cb15142f-9d1e-4cb2-9b1f-adda3af6555f', parted=False,
root=False, partitions={}),
- Disk(name='sdb', model='PERC H710',
+ Disk(name='/dev/sdb', model='PERC H710',
serial='6848f690e9364500219f339b1610b547', size=585524838,
transport=None, vendor='DELL', hctl='0:2:1:0', type='disk',
fstype='btrfs', label='Test',
uuid='612f1fc2-dfa8-4940-a1ad-e11c893b32ca', parted=False,
root=False, partitions={}),
- Disk(name='sdc', model='PERC H710',
+ Disk(name='/dev/sdc', model='PERC H710',
serial='6848f690e936450021ed614a077c1b44', size=7838315315,
transport=None, vendor='DELL', hctl='0:2:2:0', type='disk',
fstype='btrfs', label='BIGDATA',
uuid='cb15142f-9d1e-4cb2-9b1f-adda3af6555f', parted=False,
root=False, partitions={}),
- Disk(name='sdl', model='PERC H710',
+ Disk(name='/dev/sdl', model='PERC H710',
serial='6848f690e936450021a4525005828671', size=4939212390,
transport=None, vendor='DELL', hctl='0:2:11:0',
type='disk', fstype='btrfs', label='5TBWDGREEN',
uuid='a37956a8-a175-4906-82c1-bf843132da1a', parted=False,
root=False, partitions={}),
- Disk(name='sdm', model='PERC H710',
+ Disk(name='/dev/sdm', model='PERC H710',
serial='6848f690e936450021acd1f30663b877', size=7838315315,
transport=None, vendor='DELL', hctl='0:2:12:0',
type='disk', fstype='btrfs', label='BIGDATA',
uuid='cb15142f-9d1e-4cb2-9b1f-adda3af6555f', parted=False,
root=False, partitions={}),
- Disk(name='sdn', model='HUC101212CSS600',
+ Disk(name='/dev/sdn', model='HUC101212CSS600',
serial='5000cca01d2144ac',
size=1181116006, transport='sas', vendor='HGST',
hctl='1:0:0:0',
type='disk', fstype='btrfs', label='MD1220-DAS',
uuid='12d76eb6-7aad-46ba-863e-d9c51e8e6f2d', parted=False,
root=False, partitions={}),
- Disk(name='sdo', model='HUC101212CSS600',
+ Disk(name='/dev/sdo', model='HUC101212CSS600',
serial='5000cca01d21bc10',
size=1181116006, transport='sas', vendor='HGST',
hctl='1:0:1:0',
type='disk', fstype='btrfs', label='MD1220-DAS',
uuid='12d76eb6-7aad-46ba-863e-d9c51e8e6f2d', parted=False,
root=False, partitions={}),
- Disk(name='sdh', model='PERC H710',
+ Disk(name='/dev/sdh', model='PERC H710',
serial='6848f690e9364500219f33d919c7488a', size=585524838,
transport=None, vendor='DELL', hctl='0:2:7:0', type='disk',
fstype='btrfs', label='Test',
uuid='612f1fc2-dfa8-4940-a1ad-e11c893b32ca', parted=False,
root=False, partitions={}),
- Disk(name='sdi', model='PERC H710',
+ Disk(name='/dev/sdi', model='PERC H710',
serial='6848f690e936450021a4586906bd9742', size=7838315315,
transport=None, vendor='DELL', hctl='0:2:8:0', type='disk',
fstype='btrfs', label='BIGDATA',
uuid='cb15142f-9d1e-4cb2-9b1f-adda3af6555f', parted=False,
root=False, partitions={}),
- Disk(name='sdj', model='PERC H710',
+ Disk(name='/dev/sdj', model='PERC H710',
serial='6848f690e936450021a45f9904046a2f', size=2899102924,
transport=None, vendor='DELL', hctl='0:2:9:0', type='disk',
fstype='btrfs', label='VMWARE_MECH_ARRAY',
uuid='e6d13c0b-825f-4b43-81b6-7eb2b791b1c3', parted=False,
root=False, partitions={}),
- Disk(name='sdk', model='PERC H710',
+ Disk(name='/dev/sdk', model='PERC H710',
serial='6848f690e936450021acd1e705b389c6', size=7838315315,
transport=None, vendor='DELL', hctl='0:2:10:0',
type='disk', fstype='btrfs', label='BIGDATA',
uuid='cb15142f-9d1e-4cb2-9b1f-adda3af6555f', parted=False,
root=False, partitions={}),
- Disk(name='sdaf', model='ST91000640SS',
+ Disk(name='/dev/sdaf', model='ST91000640SS',
serial='5000c500630425df',
size=976748544, transport='sas', vendor='SEAGATE',
hctl='1:0:18:0', type='disk', fstype='btrfs',
label='SCRATCH',
uuid='a90e6787-1c45-46d6-a2ba-41017a17c1d5', parted=False,
root=False, partitions={}),
- Disk(name='sdag', model='ST91000640SS',
+ Disk(name='/dev/sdag', model='ST91000640SS',
serial='5000c50062cbc1f3',
size=976748544, transport='sas', vendor='SEAGATE',
hctl='1:0:19:0', type='disk', fstype='btrfs',
label='SCRATCH',
uuid='a90e6787-1c45-46d6-a2ba-41017a17c1d5', parted=False,
root=False, partitions={}),
- Disk(name='sdad', model='INTEL SSDSC2KW24',
+ Disk(name='/dev/sdad', model='INTEL SSDSC2KW24',
serial='CVLT618101SE240CGN',
size=234461593, transport='sas', vendor='ATA',
hctl='1:0:16:0', type='disk', fstype='btrfs',
label='INTEL_SSD',
uuid='a504bf03-0299-4648-8a95-c91aba291de8', parted=False,
root=False, partitions={}),
- Disk(name='sdae', model='INTEL SSDSC2KW24',
+ Disk(name='/dev/sdae', model='INTEL SSDSC2KW24',
serial='CVLT6153072G240CGN',
size=234461593, transport='sas', vendor='ATA',
hctl='1:0:17:0',
@@ -777,42 +782,42 @@ def test_scan_disks_dell_perk_h710_md1220_36_disks(self):
root=False, partitions={}),
# N.B. we have sdab with serial=None, suspected due to first listed
# matching base root device name of sda (sda3).
- Disk(name='sdab', model='ST91000640SS',
+ Disk(name='/dev/sdab', model='ST91000640SS',
serial='5000c50063041947',
size=976748544, transport='sas', vendor='SEAGATE',
hctl='1:0:14:0', type='disk', fstype='btrfs',
label='SCRATCH',
uuid='a90e6787-1c45-46d6-a2ba-41017a17c1d5', parted=False,
root=False, partitions={}),
- Disk(name='sdac', model='ST91000640SS',
+ Disk(name='/dev/sdac', model='ST91000640SS',
serial='5000c500630249cb',
size=976748544, transport='sas', vendor='SEAGATE',
hctl='1:0:15:0', type='disk', fstype='btrfs',
label='SCRATCH',
uuid='a90e6787-1c45-46d6-a2ba-41017a17c1d5', parted=False,
root=False, partitions={}),
- Disk(name='sdaa', model='ST91000640SS',
+ Disk(name='/dev/sdaa', model='ST91000640SS',
serial='5000c50063044363',
size=976748544, transport='sas', vendor='SEAGATE',
hctl='1:0:13:0', type='disk', fstype='btrfs',
label='SCRATCH',
uuid='a90e6787-1c45-46d6-a2ba-41017a17c1d5', parted=False,
root=False, partitions={}),
- Disk(name='sdaj', model='INTEL SSDSC2KW24',
+ Disk(name='/dev/sdaj', model='INTEL SSDSC2KW24',
serial='CVLT6181019S240CGN',
size=234461593, transport='sas', vendor='ATA',
hctl='1:0:22:0', type='disk', fstype='btrfs',
label='INTEL_SSD',
uuid='a504bf03-0299-4648-8a95-c91aba291de8', parted=False,
root=False, partitions={}),
- Disk(name='sdah', model='ST91000640SS',
+ Disk(name='/dev/sdah', model='ST91000640SS',
serial='5000c50062cb366f',
size=976748544, transport='sas', vendor='SEAGATE',
hctl='1:0:20:0', type='disk', fstype='btrfs',
label='SCRATCH',
uuid='a90e6787-1c45-46d6-a2ba-41017a17c1d5', parted=False,
root=False, partitions={}),
- Disk(name='sdai', model='ST91000640SS',
+ Disk(name='/dev/sdai', model='ST91000640SS',
serial='5000c5006303ea0f',
size=976748544, transport='sas', vendor='SEAGATE',
hctl='1:0:21:0', type='disk', fstype='btrfs',
@@ -830,10 +835,8 @@ def test_scan_disks_dell_perk_h710_md1220_36_disks(self):
returned = scan_disks(1048576, test_mode=True)
# TODO: Would be nice to have differences found shown.
#
- # TODO: Test could also be more flexible / robust if we are
- # insensitive to order, ie sort both returned and expected
- # expected.sort(key=operator.itemgetter(0))
- # returned.sort(key=operator.itemgetter(0))
+ expected.sort(key=operator.itemgetter(0))
+ returned.sort(key=operator.itemgetter(0))
self.assertEqual(returned, expected,
msg='Un-expected scan_disks() result:\n '
'returned = ({}).\n '
@@ -856,18 +859,18 @@ def test_scan_disks_27_plus_disks_regression_issue(self):
devices: without this the issue does not present.
"""
out = [[
- 'NAME="sdab" MODEL="ST91000640SS " SERIAL="5000c50063041947" SIZE="931.5G" TRAN="sas" VENDOR="SEAGATE " HCTL="1:0:14:0" TYPE="disk" FSTYPE="btrfs" LABEL="SCRATCH" UUID="a90e6787-1c45-46d6-a2ba-41017a17c1d5"', # noqa E501
- 'NAME="sdai" MODEL="ST91000640SS " SERIAL="5000c5006303ea0f" SIZE="931.5G" TRAN="sas" VENDOR="SEAGATE " HCTL="1:0:21:0" TYPE="disk" FSTYPE="btrfs" LABEL="SCRATCH" UUID="a90e6787-1c45-46d6-a2ba-41017a17c1d5"', # noqa E501
- 'NAME="sda" MODEL="PERC H710 " SERIAL="6848f690e936450018b7c3a11330997b" SIZE="278.9G" TRAN="" VENDOR="DELL " HCTL="0:2:0:0" TYPE="disk" FSTYPE="" LABEL="" UUID=""', # noqa E501
- 'NAME="sda2" MODEL="" SERIAL="" SIZE="13.8G" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="swap" LABEL="" UUID="a34b82d0-c342-41e0-a58d-4f0a0027829d"', # noqa E501
- 'NAME="sda3" MODEL="" SERIAL="" SIZE="264.7G" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="btrfs" LABEL="rockstor_rockstor" UUID="7f7acdd7-493e-4bb5-b801-b7b7dc289535"', # noqa E501
- 'NAME="sda1" MODEL="" SERIAL="" SIZE="500M" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="ext4" LABEL="" UUID="5d2848ff-ae8f-4c2f-b825-90621076acc1"', # noqa E501
+ 'NAME="/dev/sdab" MODEL="ST91000640SS " SERIAL="5000c50063041947" SIZE="931.5G" TRAN="sas" VENDOR="SEAGATE " HCTL="1:0:14:0" TYPE="disk" FSTYPE="btrfs" LABEL="SCRATCH" UUID="a90e6787-1c45-46d6-a2ba-41017a17c1d5"', # noqa E501
+ 'NAME="/dev/sdai" MODEL="ST91000640SS " SERIAL="5000c5006303ea0f" SIZE="931.5G" TRAN="sas" VENDOR="SEAGATE " HCTL="1:0:21:0" TYPE="disk" FSTYPE="btrfs" LABEL="SCRATCH" UUID="a90e6787-1c45-46d6-a2ba-41017a17c1d5"', # noqa E501
+ 'NAME="/dev/sda" MODEL="PERC H710 " SERIAL="6848f690e936450018b7c3a11330997b" SIZE="278.9G" TRAN="" VENDOR="DELL " HCTL="0:2:0:0" TYPE="disk" FSTYPE="" LABEL="" UUID=""', # noqa E501
+ 'NAME="/dev/sda2" MODEL="" SERIAL="" SIZE="13.8G" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="swap" LABEL="" UUID="a34b82d0-c342-41e0-a58d-4f0a0027829d"', # noqa E501
+ 'NAME="/dev/sda3" MODEL="" SERIAL="" SIZE="264.7G" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="btrfs" LABEL="rockstor_rockstor" UUID="7f7acdd7-493e-4bb5-b801-b7b7dc289535"', # noqa E501
+ 'NAME="/dev/sda1" MODEL="" SERIAL="" SIZE="500M" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="ext4" LABEL="" UUID="5d2848ff-ae8f-4c2f-b825-90621076acc1"', # noqa E501
''
]]
err = [['']]
rc = [0]
expected_result = [[
- Disk(name='sda3', model='PERC H710',
+ Disk(name='/dev/sda3', model='PERC H710',
serial='6848f690e936450018b7c3a11330997b', size=277558067,
transport=None, vendor='DELL', hctl='0:2:0:0', type='part',
fstype='btrfs', label='rockstor_rockstor',
@@ -875,13 +878,13 @@ def test_scan_disks_27_plus_disks_regression_issue(self):
root=True, partitions={}),
# N.B. we have sdab with serial=None, suspected due to first listed
# matching base root device name of sda (sda3).
- Disk(name='sdab', model=None, serial=None, size=976748544,
+ Disk(name='/dev/sdab', model=None, serial=None, size=976748544,
transport=None, vendor=None, hctl=None, type='disk',
fstype='btrfs', label='SCRATCH',
uuid='a90e6787-1c45-46d6-a2ba-41017a17c1d5', parted=False,
root=True, partitions={}),
# Subsequent sda[a-z] device receives 'fake-serial-'
- Disk(name='sdai', model=None,
+ Disk(name='/dev/sdai', model=None,
serial='fake-serial-',
size=976748544, transport=None, vendor=None, hctl=None,
type='disk', fstype='btrfs', label='SCRATCH',
@@ -919,40 +922,40 @@ def test_scan_disks_luks_sys_disk(self):
# Rockstor sees this install as system on hole disk dev (open luks dev)
# ie the system btrfs volume is on whole disk not within a partition.
out = [[
- 'NAME="sdb" MODEL="QEMU HARDDISK " SERIAL="2" SIZE="5G" TRAN="sata" VENDOR="ATA " HCTL="5:0:0:0" TYPE="disk" FSTYPE="btrfs" LABEL="rock-pool" UUID="50b66542-9a19-4403-b5a0-cd22412d9ae9"', # noqa E501
- 'NAME="sr0" MODEL="QEMU DVD-ROM " SERIAL="QM00005" SIZE="1024M" TRAN="sata" VENDOR="QEMU " HCTL="2:0:0:0" TYPE="rom" FSTYPE="" LABEL="" UUID=""', # noqa E501
- 'NAME="sdc" MODEL="QEMU HARDDISK " SERIAL="QM00013" SIZE="8G" TRAN="sata" VENDOR="ATA " HCTL="6:0:0:0" TYPE="disk" FSTYPE="" LABEL="" UUID=""', # noqa E501
- 'NAME="sdc2" MODEL="" SERIAL="" SIZE="820M" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="crypto_LUKS" LABEL="" UUID="3efae1ba-dbdf-4102-8bdc-e607e3448a7d"', # noqa E501
- 'NAME="luks-3efae1ba-dbdf-4102-8bdc-e607e3448a7d" MODEL="" SERIAL="" SIZE="818M" TRAN="" VENDOR="" HCTL="" TYPE="crypt" FSTYPE="swap" LABEL="" UUID="1ef3c0a9-73b6-4271-a618-8fe4e580edac"', # noqa E501
- 'NAME="sdc3" MODEL="" SERIAL="" SIZE="6.7G" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="crypto_LUKS" LABEL="" UUID="315111a6-8d37-447a-8dbf-0c9026abc456"', # noqa E501
- 'NAME="luks-315111a6-8d37-447a-8dbf-0c9026abc456" MODEL="" SERIAL="" SIZE="6.7G" TRAN="" VENDOR="" HCTL="" TYPE="crypt" FSTYPE="btrfs" LABEL="rockstor_rockstor" UUID="d763b614-5eb3-45ac-8ac6-8f5aa5d0b74d"', # noqa E501
- 'NAME="sdc1" MODEL="" SERIAL="" SIZE="500M" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="ext4" LABEL="" UUID="bcd91aba-6f2d-441b-9f31-804ac094befe"', # noqa E501
- 'NAME="sda" MODEL="QEMU HARDDISK " SERIAL="1" SIZE="5G" TRAN="sata" VENDOR="ATA " HCTL="3:0:0:0" TYPE="disk" FSTYPE="btrfs" LABEL="rock-pool" UUID="50b66542-9a19-4403-b5a0-cd22412d9ae9"', # noqa E501
+ 'NAME="/dev/sdb" MODEL="QEMU HARDDISK " SERIAL="2" SIZE="5G" TRAN="sata" VENDOR="ATA " HCTL="5:0:0:0" TYPE="disk" FSTYPE="btrfs" LABEL="rock-pool" UUID="50b66542-9a19-4403-b5a0-cd22412d9ae9"', # noqa E501
+ 'NAME="/dev/sr0" MODEL="QEMU DVD-ROM " SERIAL="QM00005" SIZE="1024M" TRAN="sata" VENDOR="QEMU " HCTL="2:0:0:0" TYPE="rom" FSTYPE="" LABEL="" UUID=""', # noqa E501
+ 'NAME="/dev/sdc" MODEL="QEMU HARDDISK " SERIAL="QM00013" SIZE="8G" TRAN="sata" VENDOR="ATA " HCTL="6:0:0:0" TYPE="disk" FSTYPE="" LABEL="" UUID=""', # noqa E501
+ 'NAME="/dev/sdc2" MODEL="" SERIAL="" SIZE="820M" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="crypto_LUKS" LABEL="" UUID="3efae1ba-dbdf-4102-8bdc-e607e3448a7d"', # noqa E501
+ 'NAME="/dev/mapper/luks-3efae1ba-dbdf-4102-8bdc-e607e3448a7d" MODEL="" SERIAL="" SIZE="818M" TRAN="" VENDOR="" HCTL="" TYPE="crypt" FSTYPE="swap" LABEL="" UUID="1ef3c0a9-73b6-4271-a618-8fe4e580edac"', # noqa E501
+ 'NAME="/dev/sdc3" MODEL="" SERIAL="" SIZE="6.7G" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="crypto_LUKS" LABEL="" UUID="315111a6-8d37-447a-8dbf-0c9026abc456"', # noqa E501
+ 'NAME="/dev/mapper/luks-315111a6-8d37-447a-8dbf-0c9026abc456" MODEL="" SERIAL="" SIZE="6.7G" TRAN="" VENDOR="" HCTL="" TYPE="crypt" FSTYPE="btrfs" LABEL="rockstor_rockstor" UUID="d763b614-5eb3-45ac-8ac6-8f5aa5d0b74d"', # noqa E501
+ 'NAME="/dev/sdc1" MODEL="" SERIAL="" SIZE="500M" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="ext4" LABEL="" UUID="bcd91aba-6f2d-441b-9f31-804ac094befe"', # noqa E501
+ 'NAME="/dev/sda" MODEL="QEMU HARDDISK " SERIAL="1" SIZE="5G" TRAN="sata" VENDOR="ATA " HCTL="3:0:0:0" TYPE="disk" FSTYPE="btrfs" LABEL="rock-pool" UUID="50b66542-9a19-4403-b5a0-cd22412d9ae9"', # noqa E501
'']]
err = [['']]
rc = [0]
expected_result = [[
- Disk(name='luks-315111a6-8d37-447a-8dbf-0c9026abc456', model=None,
+ Disk(name='/dev/mapper/luks-315111a6-8d37-447a-8dbf-0c9026abc456', model=None, # noqa E501
serial='CRYPT-LUKS1-315111a68d37447a8dbf0c9026abc456-luks-315111a6-8d37-447a-8dbf-0c9026abc456', # noqa E501
size=7025459, transport=None, vendor=None, hctl=None,
type='crypt', fstype='btrfs', label='rockstor_rockstor',
uuid='d763b614-5eb3-45ac-8ac6-8f5aa5d0b74d', parted=False,
root=True, partitions={}),
- Disk(name='sda', model='QEMU HARDDISK', serial='1', size=5242880,
+ Disk(name='/dev/sda', model='QEMU HARDDISK', serial='1', size=5242880, # noqa E501
transport='sata', vendor='ATA', hctl='3:0:0:0', type='disk',
fstype='btrfs', label='rock-pool',
uuid='50b66542-9a19-4403-b5a0-cd22412d9ae9', parted=False,
root=False, partitions={}),
- Disk(name='sdb', model='QEMU HARDDISK', serial='2', size=5242880,
+ Disk(name='/dev/sdb', model='QEMU HARDDISK', serial='2', size=5242880, # noqa E501
transport='sata', vendor='ATA', hctl='5:0:0:0', type='disk',
fstype='btrfs', label='rock-pool',
uuid='50b66542-9a19-4403-b5a0-cd22412d9ae9', parted=False,
root=False, partitions={}),
- Disk(name='sdc', model='QEMU HARDDISK', serial='QM00013',
+ Disk(name='/dev/sdc', model='QEMU HARDDISK', serial='QM00013',
size=8388608, transport='sata', vendor='ATA', hctl='6:0:0:0',
type='disk', fstype='crypto_LUKS', label=None,
uuid='315111a6-8d37-447a-8dbf-0c9026abc456', parted=True,
- root=False, partitions={'sdc3': 'crypto_LUKS'})
+ root=False, partitions={'/dev/sdc3': 'crypto_LUKS'})
]]
# Establish dynamic mock behaviour for get_disk_serial()
@@ -964,7 +967,7 @@ def dyn_disk_serial_return(*args, **kwargs):
# Entries only requred here if lsblk test data has no serial info:
# eg for bcache, LUKS, mdraid, and virtio type devices.
s_map = {
- 'luks-315111a6-8d37-447a-8dbf-0c9026abc456': 'CRYPT-LUKS1-315111a68d37447a8dbf0c9026abc456-luks-315111a6-8d37-447a-8dbf-0c9026abc456' # noqa E501
+ '/dev/mapper/luks-315111a6-8d37-447a-8dbf-0c9026abc456': 'CRYPT-LUKS1-315111a68d37447a8dbf0c9026abc456-luks-315111a6-8d37-447a-8dbf-0c9026abc456' # noqa E501
}
# First argument in get_disk_serial() is device_name, key off this
# for our dynamic mock return from s_map (serial map).
@@ -980,7 +983,7 @@ def dyn_disk_serial_return(*args, **kwargs):
#
# Ensure we correctly mock our root_disk value away from file default
# of sda as we now have a root_disk on luks:
- self.mock_root_disk.return_value = 'luks-315111a6-8d37-447a-8dbf-0c9026abc456' # noqa E501
+ self.mock_root_disk.return_value = '/dev/mapper/luks-315111a6-8d37-447a-8dbf-0c9026abc456' # noqa E501
for o, e, r, expected in zip(out, err, rc, expected_result):
self.mock_run_command.return_value = (o, e, r)
@@ -1031,23 +1034,23 @@ def test_scan_disks_btrfs_in_partition(self):
root on sda, ie 'Regex to identify a partition on the base_root_disk.'
"""
out = [[
- 'NAME="sr0" MODEL="QEMU DVD-ROM " SERIAL="QM00001" SIZE="1024M" TRAN="ata" VENDOR="QEMU " HCTL="0:0:0:0" TYPE="rom" FSTYPE="" LABEL="" UUID=""', # noqa E501
- 'NAME="sda" MODEL="QEMU HARDDISK " SERIAL="QM00005" SIZE="8G" TRAN="sata" VENDOR="ATA " HCTL="2:0:0:0" TYPE="disk" FSTYPE="" LABEL="" UUID=""', # noqa E501
- 'NAME="sda2" MODEL="" SERIAL="" SIZE="820M" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="swap" LABEL="" UUID="aaf61037-23b1-4c3b-81ca-6d07f3ed922d"', # noqa E501
- 'NAME="sda3" MODEL="" SERIAL="" SIZE="6.7G" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="btrfs" LABEL="rockstor_rockstor" UUID="355f53a4-24e1-465e-95f3-7c422898f542"', # noqa E501
- 'NAME="sda1" MODEL="" SERIAL="" SIZE="500M" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="ext4" LABEL="" UUID="04ce9f16-a0a0-4db8-8719-1083a0d4f381"', # noqa E501
- 'NAME="vda" MODEL="" SERIAL="" SIZE="8G" TRAN="" VENDOR="0x1af4" HCTL="" TYPE="disk" FSTYPE="" LABEL="" UUID=""', # noqa E501
- 'NAME="vda2" MODEL="" SERIAL="" SIZE="4G" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="btrfs" LABEL="btrfs-in-partition" UUID="55284332-af66-4ca0-9647-99d9afbe0ec5"', # noqa E501
- 'NAME="vda1" MODEL="" SERIAL="" SIZE="4G" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="vfat" LABEL="" UUID="8F05-D915"', # noqa E501
+ 'NAME="/dev/sr0" MODEL="QEMU DVD-ROM " SERIAL="QM00001" SIZE="1024M" TRAN="ata" VENDOR="QEMU " HCTL="0:0:0:0" TYPE="rom" FSTYPE="" LABEL="" UUID=""', # noqa E501
+ 'NAME="/dev/sda" MODEL="QEMU HARDDISK " SERIAL="QM00005" SIZE="8G" TRAN="sata" VENDOR="ATA " HCTL="2:0:0:0" TYPE="disk" FSTYPE="" LABEL="" UUID=""', # noqa E501
+ 'NAME="/dev/sda2" MODEL="" SERIAL="" SIZE="820M" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="swap" LABEL="" UUID="aaf61037-23b1-4c3b-81ca-6d07f3ed922d"', # noqa E501
+ 'NAME="/dev/sda3" MODEL="" SERIAL="" SIZE="6.7G" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="btrfs" LABEL="rockstor_rockstor" UUID="355f53a4-24e1-465e-95f3-7c422898f542"', # noqa E501
+ 'NAME="/dev/sda1" MODEL="" SERIAL="" SIZE="500M" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="ext4" LABEL="" UUID="04ce9f16-a0a0-4db8-8719-1083a0d4f381"', # noqa E501
+ 'NAME="/dev/vda" MODEL="" SERIAL="" SIZE="8G" TRAN="" VENDOR="0x1af4" HCTL="" TYPE="disk" FSTYPE="" LABEL="" UUID=""', # noqa E501
+ 'NAME="/dev/vda2" MODEL="" SERIAL="" SIZE="4G" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="btrfs" LABEL="btrfs-in-partition" UUID="55284332-af66-4ca0-9647-99d9afbe0ec5"', # noqa E501
+ 'NAME="/dev/vda1" MODEL="" SERIAL="" SIZE="4G" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="vfat" LABEL="" UUID="8F05-D915"', # noqa E501
''], [
- 'NAME="sr0" MODEL="QEMU DVD-ROM " SERIAL="QM00001" SIZE="1024M" TRAN="ata" VENDOR="QEMU " HCTL="0:0:0:0" TYPE="rom" FSTYPE="" LABEL="" UUID=""', # noqa E501
- 'NAME="sda" MODEL="QEMU HARDDISK " SERIAL="QM00005" SIZE="8G" TRAN="sata" VENDOR="ATA " HCTL="2:0:0:0" TYPE="disk" FSTYPE="" LABEL="" UUID=""', # noqa E501
- 'NAME="sda2" MODEL="" SERIAL="" SIZE="820M" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="swap" LABEL="" UUID="aaf61037-23b1-4c3b-81ca-6d07f3ed922d"', # noqa E501
- 'NAME="sda3" MODEL="" SERIAL="" SIZE="6.7G" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="btrfs" LABEL="rockstor_rockstor" UUID="355f53a4-24e1-465e-95f3-7c422898f542"', # noqa E501
- 'NAME="sda1" MODEL="" SERIAL="" SIZE="500M" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="ext4" LABEL="" UUID="04ce9f16-a0a0-4db8-8719-1083a0d4f381"', # noqa E501
- 'NAME="sdap" MODEL="QEMU HARDDISK " SERIAL="42nd-scsi" SIZE="8G" TRAN="sata" VENDOR="ATA " HCTL="3:0:0:0" TYPE="disk" FSTYPE="" LABEL="" UUID=""', # noqa E501
- 'NAME="sdap2" MODEL="" SERIAL="" SIZE="4G" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="btrfs" LABEL="btrfs-in-partition" UUID="55284332-af66-4ca0-9647-99d9afbe0ec5"', # noqa E501
- 'NAME="sdap1" MODEL="" SERIAL="" SIZE="4G" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="vfat" LABEL="" UUID="8F05-D915"', # noqa E501
+ 'NAME="/dev/sr0" MODEL="QEMU DVD-ROM " SERIAL="QM00001" SIZE="1024M" TRAN="ata" VENDOR="QEMU " HCTL="0:0:0:0" TYPE="rom" FSTYPE="" LABEL="" UUID=""', # noqa E501
+ 'NAME="/dev/sda" MODEL="QEMU HARDDISK " SERIAL="QM00005" SIZE="8G" TRAN="sata" VENDOR="ATA " HCTL="2:0:0:0" TYPE="disk" FSTYPE="" LABEL="" UUID=""', # noqa E501
+ 'NAME="/dev/sda2" MODEL="" SERIAL="" SIZE="820M" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="swap" LABEL="" UUID="aaf61037-23b1-4c3b-81ca-6d07f3ed922d"', # noqa E501
+ 'NAME="/dev/sda3" MODEL="" SERIAL="" SIZE="6.7G" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="btrfs" LABEL="rockstor_rockstor" UUID="355f53a4-24e1-465e-95f3-7c422898f542"', # noqa E501
+ 'NAME="/dev/sda1" MODEL="" SERIAL="" SIZE="500M" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="ext4" LABEL="" UUID="04ce9f16-a0a0-4db8-8719-1083a0d4f381"', # noqa E501
+ 'NAME="/dev/sdap" MODEL="QEMU HARDDISK " SERIAL="42nd-scsi" SIZE="8G" TRAN="sata" VENDOR="ATA " HCTL="3:0:0:0" TYPE="disk" FSTYPE="" LABEL="" UUID=""', # noqa E501
+ 'NAME="/dev/sdap2" MODEL="" SERIAL="" SIZE="4G" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="btrfs" LABEL="btrfs-in-partition" UUID="55284332-af66-4ca0-9647-99d9afbe0ec5"', # noqa E501
+ 'NAME="/dev/sdap1" MODEL="" SERIAL="" SIZE="4G" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="vfat" LABEL="" UUID="8F05-D915"', # noqa E501
''
]]
err = [['']]
@@ -1057,24 +1060,26 @@ def test_scan_disks_btrfs_in_partition(self):
rc.append(0)
expected_result = [[
# Note partitions entry within vda, consistent with cli prep.
- Disk(name='vda', model=None, serial='serial-1', size=4194304,
+ Disk(name='/dev/vda', model=None, serial='serial-1', size=4194304,
transport=None, vendor='0x1af4', hctl=None, type='disk',
fstype='btrfs', label='btrfs-in-partition',
uuid='55284332-af66-4ca0-9647-99d9afbe0ec5', parted=True,
- root=False, partitions={'vda1': 'vfat', 'vda2': 'btrfs'}),
- Disk(name='sda3', model='QEMU HARDDISK', serial='QM00005',
+ root=False,
+ partitions={'/dev/vda1': 'vfat', '/dev/vda2': 'btrfs'}),
+ Disk(name='/dev/sda3', model='QEMU HARDDISK', serial='QM00005',
size=7025459, transport='sata', vendor='ATA', hctl='2:0:0:0',
type='part', fstype='btrfs', label='rockstor_rockstor',
uuid='355f53a4-24e1-465e-95f3-7c422898f542', parted=True,
root=True, partitions={})
], [
# Note sdap (42nd disk) hand crafted from above vda entry
- Disk(name='sdap', model='QEMU HARDDISK', serial='42nd-scsi',
+ Disk(name='/dev/sdap', model='QEMU HARDDISK', serial='42nd-scsi',
size=4194304, transport='sata', vendor='ATA', hctl='3:0:0:0',
type='disk', fstype='btrfs', label='btrfs-in-partition',
uuid='55284332-af66-4ca0-9647-99d9afbe0ec5', parted=True,
- root=False, partitions={'sdap1': 'vfat', 'sdap2': 'btrfs'}),
- Disk(name='sda3', model='QEMU HARDDISK', serial='QM00005',
+ root=False,
+ partitions={'/dev/sdap1': 'vfat', '/dev/sdap2': 'btrfs'}),
+ Disk(name='/dev/sda3', model='QEMU HARDDISK', serial='QM00005',
size=7025459, transport='sata', vendor='ATA', hctl='2:0:0:0',
type='part', fstype='btrfs', label='rockstor_rockstor',
uuid='355f53a4-24e1-465e-95f3-7c422898f542', parted=True,
@@ -1090,7 +1095,7 @@ def dyn_disk_serial_return(*args, **kwargs):
# Entries only requred here if lsblk test data has no serial info:
# eg for bcache, LUKS, mdraid, and virtio type devices.
s_map = {
- 'vda': 'serial-1'
+ '/dev/vda': 'serial-1'
}
# First argument in get_disk_serial() is device_name, key off this
# for our dynamic mock return from s_map (serial map).
@@ -1145,43 +1150,43 @@ def test_scan_disks_mdraid_sys_disk(self):
"""
out = [[
- 'NAME="sdb" MODEL="QEMU HARDDISK " SERIAL="md-serial-2" SIZE="8G" TRAN="sata" VENDOR="ATA " HCTL="3:0:0:0" TYPE="disk" FSTYPE="" LABEL="" UUID=""', # noqa E501
- 'NAME="sdb2" MODEL="" SERIAL="" SIZE="954M" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="linux_raid_member" LABEL="rockstor:boot" UUID="fc9fc706-e831-6b14-591e-0bc5bb008681"', # noqa E501
- 'NAME="md126" MODEL="" SERIAL="" SIZE="954M" TRAN="" VENDOR="" HCTL="" TYPE="raid1" FSTYPE="ext4" LABEL="" UUID="9df7d0f5-d109-4e84-a0f0-03a0cf0c03ad"', # noqa E501
- 'NAME="sdb3" MODEL="" SERIAL="" SIZE="1.4G" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="linux_raid_member" LABEL="rockstor:swap" UUID="9ed64a0b-10d2-72f9-4120-0f662c5b5d66"', # noqa E501
- 'NAME="md125" MODEL="" SERIAL="" SIZE="1.4G" TRAN="" VENDOR="" HCTL="" TYPE="raid1" FSTYPE="swap" LABEL="" UUID="1234d230-0aca-4b1d-9a10-c66744464d12"', # noqa E501
- 'NAME="sdb1" MODEL="" SERIAL="" SIZE="5.7G" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="linux_raid_member" LABEL="rockstor:root" UUID="183a555f-3a90-3f7d-0726-b4109a1d78ba"', # noqa E501
- 'NAME="md127" MODEL="" SERIAL="" SIZE="5.7G" TRAN="" VENDOR="" HCTL="" TYPE="raid1" FSTYPE="btrfs" LABEL="rockstor_rockstor" UUID="59800daa-fdfd-493f-837d-18e9b46bbb46"', # noqa E501
- 'NAME="sr0" MODEL="QEMU DVD-ROM " SERIAL="QM00001" SIZE="791M" TRAN="ata" VENDOR="QEMU " HCTL="0:0:0:0" TYPE="rom" FSTYPE="iso9660" LABEL="Rockstor 3 x86_64" UUID="2017-07-02-03-11-01-00"', # noqa E501
- 'NAME="sda" MODEL="QEMU HARDDISK " SERIAL="md-serial-1" SIZE="8G" TRAN="sata" VENDOR="ATA " HCTL="2:0:0:0" TYPE="disk" FSTYPE="" LABEL="" UUID=""', # noqa E501
- 'NAME="sda2" MODEL="" SERIAL="" SIZE="954M" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="linux_raid_member" LABEL="rockstor:boot" UUID="fc9fc706-e831-6b14-591e-0bc5bb008681"', # noqa E501
- 'NAME="md126" MODEL="" SERIAL="" SIZE="954M" TRAN="" VENDOR="" HCTL="" TYPE="raid1" FSTYPE="ext4" LABEL="" UUID="9df7d0f5-d109-4e84-a0f0-03a0cf0c03ad"', # noqa E501
- 'NAME="sda3" MODEL="" SERIAL="" SIZE="1.4G" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="linux_raid_member" LABEL="rockstor:swap" UUID="9ed64a0b-10d2-72f9-4120-0f662c5b5d66"', # noqa E501
- 'NAME="md125" MODEL="" SERIAL="" SIZE="1.4G" TRAN="" VENDOR="" HCTL="" TYPE="raid1" FSTYPE="swap" LABEL="" UUID="1234d230-0aca-4b1d-9a10-c66744464d12"', # noqa E501
- 'NAME="sda1" MODEL="" SERIAL="" SIZE="5.7G" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="linux_raid_member" LABEL="rockstor:root" UUID="183a555f-3a90-3f7d-0726-b4109a1d78ba"', # noqa E501
- 'NAME="md127" MODEL="" SERIAL="" SIZE="5.7G" TRAN="" VENDOR="" HCTL="" TYPE="raid1" FSTYPE="btrfs" LABEL="rockstor_rockstor" UUID="59800daa-fdfd-493f-837d-18e9b46bbb46"', # noqa E501
+ 'NAME="/dev/sdb" MODEL="QEMU HARDDISK " SERIAL="md-serial-2" SIZE="8G" TRAN="sata" VENDOR="ATA " HCTL="3:0:0:0" TYPE="disk" FSTYPE="" LABEL="" UUID=""', # noqa E501
+ 'NAME="/dev/sdb2" MODEL="" SERIAL="" SIZE="954M" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="linux_raid_member" LABEL="rockstor:boot" UUID="fc9fc706-e831-6b14-591e-0bc5bb008681"', # noqa E501
+ 'NAME="/dev/md126" MODEL="" SERIAL="" SIZE="954M" TRAN="" VENDOR="" HCTL="" TYPE="raid1" FSTYPE="ext4" LABEL="" UUID="9df7d0f5-d109-4e84-a0f0-03a0cf0c03ad"', # noqa E501
+ 'NAME="/dev/sdb3" MODEL="" SERIAL="" SIZE="1.4G" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="linux_raid_member" LABEL="rockstor:swap" UUID="9ed64a0b-10d2-72f9-4120-0f662c5b5d66"', # noqa E501
+ 'NAME="/dev/md125" MODEL="" SERIAL="" SIZE="1.4G" TRAN="" VENDOR="" HCTL="" TYPE="raid1" FSTYPE="swap" LABEL="" UUID="1234d230-0aca-4b1d-9a10-c66744464d12"', # noqa E501
+ 'NAME="/dev/sdb1" MODEL="" SERIAL="" SIZE="5.7G" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="linux_raid_member" LABEL="rockstor:root" UUID="183a555f-3a90-3f7d-0726-b4109a1d78ba"', # noqa E501
+ 'NAME="/dev/md127" MODEL="" SERIAL="" SIZE="5.7G" TRAN="" VENDOR="" HCTL="" TYPE="raid1" FSTYPE="btrfs" LABEL="rockstor_rockstor" UUID="59800daa-fdfd-493f-837d-18e9b46bbb46"', # noqa E501
+ 'NAME="/dev/sr0" MODEL="QEMU DVD-ROM " SERIAL="QM00001" SIZE="791M" TRAN="ata" VENDOR="QEMU " HCTL="0:0:0:0" TYPE="rom" FSTYPE="iso9660" LABEL="Rockstor 3 x86_64" UUID="2017-07-02-03-11-01-00"', # noqa E501
+ 'NAME="/dev/sda" MODEL="QEMU HARDDISK " SERIAL="md-serial-1" SIZE="8G" TRAN="sata" VENDOR="ATA " HCTL="2:0:0:0" TYPE="disk" FSTYPE="" LABEL="" UUID=""', # noqa E501
+ 'NAME="/dev/sda2" MODEL="" SERIAL="" SIZE="954M" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="linux_raid_member" LABEL="rockstor:boot" UUID="fc9fc706-e831-6b14-591e-0bc5bb008681"', # noqa E501
+ 'NAME="/dev/md126" MODEL="" SERIAL="" SIZE="954M" TRAN="" VENDOR="" HCTL="" TYPE="raid1" FSTYPE="ext4" LABEL="" UUID="9df7d0f5-d109-4e84-a0f0-03a0cf0c03ad"', # noqa E501
+ 'NAME="/dev/sda3" MODEL="" SERIAL="" SIZE="1.4G" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="linux_raid_member" LABEL="rockstor:swap" UUID="9ed64a0b-10d2-72f9-4120-0f662c5b5d66"', # noqa E501
+ 'NAME="/dev/md125" MODEL="" SERIAL="" SIZE="1.4G" TRAN="" VENDOR="" HCTL="" TYPE="raid1" FSTYPE="swap" LABEL="" UUID="1234d230-0aca-4b1d-9a10-c66744464d12"', # noqa E501
+ 'NAME="/dev/sda1" MODEL="" SERIAL="" SIZE="5.7G" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="linux_raid_member" LABEL="rockstor:root" UUID="183a555f-3a90-3f7d-0726-b4109a1d78ba"', # noqa E501
+ 'NAME="/dev/md127" MODEL="" SERIAL="" SIZE="5.7G" TRAN="" VENDOR="" HCTL="" TYPE="raid1" FSTYPE="btrfs" LABEL="rockstor_rockstor" UUID="59800daa-fdfd-493f-837d-18e9b46bbb46"', # noqa E501
'']]
err = [['']]
rc = [0]
expected_result = [[
- Disk(name='md127', model='[2] md-serial-1[0] md-serial-2[1] raid1',
+ Disk(name='/dev/md127', model='[2] md-serial-1[0] md-serial-2[1] raid1', # noqa E501
serial='183a555f:3a903f7d:0726b410:9a1d78ba', size=5976883,
transport=None, vendor=None, hctl=None, type='raid1',
fstype='btrfs', label='rockstor_rockstor',
uuid='59800daa-fdfd-493f-837d-18e9b46bbb46', parted=False,
root=True, partitions={}),
- Disk(name='sda', model='QEMU HARDDISK', serial='md-serial-1',
+ Disk(name='/dev/sda', model='QEMU HARDDISK', serial='md-serial-1',
size=8388608, transport='sata', vendor='ATA', hctl='2:0:0:0',
type='disk', fstype='linux_raid_member', label=None,
uuid=None, parted=True, root=False,
- partitions={'sda3': 'linux_raid_member',
- 'sda1': 'linux_raid_member'}),
- Disk(name='sdb', model='QEMU HARDDISK', serial='md-serial-2',
+ partitions={'/dev/sda3': 'linux_raid_member',
+ '/dev/sda1': 'linux_raid_member'}),
+ Disk(name='/dev/sdb', model='QEMU HARDDISK', serial='md-serial-2',
size=8388608, transport='sata', vendor='ATA', hctl='3:0:0:0',
type='disk', fstype='linux_raid_member', label=None,
uuid=None, parted=True, root=False,
- partitions={'sdb3': 'linux_raid_member',
- 'sdb1': 'linux_raid_member'})
+ partitions={'/dev/sdb3': 'linux_raid_member',
+ '/dev/sdb1': 'linux_raid_member'})
]]
# No LUKS or bcache mocking necessary as none in test data.
# Establish dynamic mock behaviour for get_disk_serial()
@@ -1193,9 +1198,9 @@ def dyn_disk_serial_return(*args, **kwargs):
# Entries only requred here if lsblk test data has no serial info:
# eg for bcache, LUKS, mdraid, and virtio type devices.
s_map = {
- 'md125': 'fc9fc706:e8316b14:591e0bc5:bb008681',
- 'md126': '9ed64a0b:10d272f9:41200f66:2c5b5d66',
- 'md127': '183a555f:3a903f7d:0726b410:9a1d78ba'
+ '/dev/md125': 'fc9fc706:e8316b14:591e0bc5:bb008681',
+ '/dev/md126': '9ed64a0b:10d272f9:41200f66:2c5b5d66',
+ '/dev/md127': '183a555f:3a903f7d:0726b410:9a1d78ba'
}
# First argument in get_disk_serial() is device_name, key off this
# for our dynamic mock return from s_map (serial map).
@@ -1209,7 +1214,7 @@ def dyn_disk_serial_return(*args, **kwargs):
self.mock_dyn_get_disk_serial.side_effect = dyn_disk_serial_return
# Ensure we correctly mock our root_disk value away from file default
# of sda as we now have a root_disk on md device.
- self.mock_root_disk.return_value = 'md127'
+ self.mock_root_disk.return_value = '/dev/md127'
# As we have an mdraid device of interest (the system disk) it's model
# info field is used to present basic info on it's members serials:
# We mock this as otherwise our wide scope run_command() mock breaks
@@ -1260,39 +1265,39 @@ def test_scan_disks_intel_bios_raid_sys_disk(self):
"""
out = [[
- 'NAME="sdb" MODEL="TOSHIBA MK1652GS" SERIAL="Z8A9CAZUT" SIZE="149.1G" TRAN="sata" VENDOR="ATA " HCTL="1:0:0:0" TYPE="disk" FSTYPE="isw_raid_member" LABEL="" UUID=""', # noqa E501
- 'NAME="md126" MODEL="" SERIAL="" SIZE="149G" TRAN="" VENDOR="" HCTL="" TYPE="raid1" FSTYPE="" LABEL="" UUID=""', # noqa E501
- 'NAME="md126p3" MODEL="" SERIAL="" SIZE="146.6G" TRAN="" VENDOR="" HCTL="" TYPE="md" FSTYPE="btrfs" LABEL="rockstor_rockstor00" UUID="1c59b842-5d08-4472-a731-c593ab0bff93"', # noqa E501
- 'NAME="md126p1" MODEL="" SERIAL="" SIZE="500M" TRAN="" VENDOR="" HCTL="" TYPE="md" FSTYPE="ext4" LABEL="" UUID="40e4a91f-6b08-4ea0-b0d1-e43d145558b3"', # noqa E501
- 'NAME="md126p2" MODEL="" SERIAL="" SIZE="2G" TRAN="" VENDOR="" HCTL="" TYPE="md" FSTYPE="swap" LABEL="" UUID="43d2f3dc-38cd-49ef-9e18-be35297c1412"', # noqa E501
- 'NAME="sdc" MODEL="SAMSUNG HM160HI " SERIAL="S1WWJ9BZ408430" SIZE="149.1G" TRAN="sata" VENDOR="ATA " HCTL="3:0:0:0" TYPE="disk" FSTYPE="isw_raid_member" LABEL="" UUID=""', # noqa E501
- 'NAME="md126" MODEL="" SERIAL="" SIZE="149G" TRAN="" VENDOR="" HCTL="" TYPE="raid1" FSTYPE="" LABEL="" UUID=""', # noqa E501
- 'NAME="md126p3" MODEL="" SERIAL="" SIZE="146.6G" TRAN="" VENDOR="" HCTL="" TYPE="md" FSTYPE="btrfs" LABEL="rockstor_rockstor00" UUID="1c59b842-5d08-4472-a731-c593ab0bff93"', # noqa E501
- 'NAME="md126p1" MODEL="" SERIAL="" SIZE="500M" TRAN="" VENDOR="" HCTL="" TYPE="md" FSTYPE="ext4" LABEL="" UUID="40e4a91f-6b08-4ea0-b0d1-e43d145558b3"', # noqa E501
- 'NAME="md126p2" MODEL="" SERIAL="" SIZE="2G" TRAN="" VENDOR="" HCTL="" TYPE="md" FSTYPE="swap" LABEL="" UUID="43d2f3dc-38cd-49ef-9e18-be35297c1412"', # noqa E501
- 'NAME="sda" MODEL="WDC WD3200AAKS-7" SERIAL="WD-WMAV20342011" SIZE="298.1G" TRAN="sata" VENDOR="ATA " HCTL="0:0:0:0" TYPE="disk" FSTYPE="" LABEL="" UUID=""', # noqa E501
+ 'NAME="/dev/sdb" MODEL="TOSHIBA MK1652GS" SERIAL="Z8A9CAZUT" SIZE="149.1G" TRAN="sata" VENDOR="ATA " HCTL="1:0:0:0" TYPE="disk" FSTYPE="isw_raid_member" LABEL="" UUID=""', # noqa E501
+ 'NAME="/dev/md126" MODEL="" SERIAL="" SIZE="149G" TRAN="" VENDOR="" HCTL="" TYPE="raid1" FSTYPE="" LABEL="" UUID=""', # noqa E501
+ 'NAME="/dev/md126p3" MODEL="" SERIAL="" SIZE="146.6G" TRAN="" VENDOR="" HCTL="" TYPE="md" FSTYPE="btrfs" LABEL="rockstor_rockstor00" UUID="1c59b842-5d08-4472-a731-c593ab0bff93"', # noqa E501
+ 'NAME="/dev/md126p1" MODEL="" SERIAL="" SIZE="500M" TRAN="" VENDOR="" HCTL="" TYPE="md" FSTYPE="ext4" LABEL="" UUID="40e4a91f-6b08-4ea0-b0d1-e43d145558b3"', # noqa E501
+ 'NAME="/dev/md126p2" MODEL="" SERIAL="" SIZE="2G" TRAN="" VENDOR="" HCTL="" TYPE="md" FSTYPE="swap" LABEL="" UUID="43d2f3dc-38cd-49ef-9e18-be35297c1412"', # noqa E501
+ 'NAME="/dev/sdc" MODEL="SAMSUNG HM160HI " SERIAL="S1WWJ9BZ408430" SIZE="149.1G" TRAN="sata" VENDOR="ATA " HCTL="3:0:0:0" TYPE="disk" FSTYPE="isw_raid_member" LABEL="" UUID=""', # noqa E501
+ 'NAME="/dev/md126" MODEL="" SERIAL="" SIZE="149G" TRAN="" VENDOR="" HCTL="" TYPE="raid1" FSTYPE="" LABEL="" UUID=""', # noqa E501
+ 'NAME="/dev/md126p3" MODEL="" SERIAL="" SIZE="146.6G" TRAN="" VENDOR="" HCTL="" TYPE="md" FSTYPE="btrfs" LABEL="rockstor_rockstor00" UUID="1c59b842-5d08-4472-a731-c593ab0bff93"', # noqa E501
+ 'NAME="/dev/md126p1" MODEL="" SERIAL="" SIZE="500M" TRAN="" VENDOR="" HCTL="" TYPE="md" FSTYPE="ext4" LABEL="" UUID="40e4a91f-6b08-4ea0-b0d1-e43d145558b3"', # noqa E501
+ 'NAME="/dev/md126p2" MODEL="" SERIAL="" SIZE="2G" TRAN="" VENDOR="" HCTL="" TYPE="md" FSTYPE="swap" LABEL="" UUID="43d2f3dc-38cd-49ef-9e18-be35297c1412"', # noqa E501
+ 'NAME="/dev/sda" MODEL="WDC WD3200AAKS-7" SERIAL="WD-WMAV20342011" SIZE="298.1G" TRAN="sata" VENDOR="ATA " HCTL="0:0:0:0" TYPE="disk" FSTYPE="" LABEL="" UUID=""', # noqa E501
'']]
err = [['']]
rc = [0]
expected_result = [[
- Disk(name='md126p3',
+ Disk(name='/dev/md126p3',
model='[2] Z8A9CAZUT[0] S1WWJ9BZ408430[1] raid1',
serial='a300e6b0:5d69eee6:98a2354a:0ba1e1eb', size=153721241,
transport=None, vendor=None, hctl=None, type='md',
fstype='btrfs', label='rockstor_rockstor00',
uuid='1c59b842-5d08-4472-a731-c593ab0bff93', parted=True,
root=True, partitions={}),
- Disk(name='sda', model='WDC WD3200AAKS-7',
+ Disk(name='/dev/sda', model='WDC WD3200AAKS-7',
serial='WD-WMAV20342011', size=312580505, transport='sata',
vendor='ATA', hctl='0:0:0:0', type='disk', fstype=None,
label=None, uuid=None, parted=False, root=False,
partitions={}),
- Disk(name='sdb', model='TOSHIBA MK1652GS', serial='Z8A9CAZUT',
+ Disk(name='/dev/sdb', model='TOSHIBA MK1652GS', serial='Z8A9CAZUT',
size=156342681, transport='sata', vendor='ATA',
hctl='1:0:0:0', type='disk', fstype='isw_raid_member',
label=None, uuid=None, parted=False, root=False,
partitions={}),
- Disk(name='sdc', model='SAMSUNG HM160HI', serial='S1WWJ9BZ408430',
+ Disk(name='/dev/sdc', model='SAMSUNG HM160HI', serial='S1WWJ9BZ408430', # noqa E501
size=156342681, transport='sata', vendor='ATA',
hctl='3:0:0:0', type='disk', fstype='isw_raid_member',
label=None, uuid=None, parted=False, root=False,
@@ -1311,9 +1316,9 @@ def dyn_disk_serial_return(*args, **kwargs):
# Note in the following our md126p3 partition has the same serial
# as it's base device.
s_map = {
- 'md126': 'a300e6b0:5d69eee6:98a2354a:0ba1e1eb',
- 'md126p3': 'a300e6b0:5d69eee6:98a2354a:0ba1e1eb',
- 'md127': 'a88a8eda:1e459751:3341ad9b:fe3031a0'
+ '/dev/md126': 'a300e6b0:5d69eee6:98a2354a:0ba1e1eb',
+ '/dev/md126p3': 'a300e6b0:5d69eee6:98a2354a:0ba1e1eb',
+ '/dev/md127': 'a88a8eda:1e459751:3341ad9b:fe3031a0'
}
# First argument in get_disk_serial() is device_name, key off this
# for our dynamic mock return from s_map (serial map).
@@ -1327,7 +1332,7 @@ def dyn_disk_serial_return(*args, **kwargs):
self.mock_dyn_get_disk_serial.side_effect = dyn_disk_serial_return
# Ensure we correctly mock our root_disk value away from file default
# of sda as we now have a root_disk on md device.
- self.mock_root_disk.return_value = 'md126'
+ self.mock_root_disk.return_value = '/dev/md126'
# As we have an mdraid device of interest (the system disk) it's model
# info field is used to present basic info on it's members serials:
# We mock this as otherwise our wide scope run_command() mock breaks
@@ -1372,47 +1377,47 @@ def test_scan_disks_intel_bios_raid_data_disk(self):
"""
# Out and expected_results have sda stripped for simplicity.
out = [[
- 'NAME="sdd" MODEL="Extreme " SERIAL="AA010312161642210668" SIZE="29.2G" TRAN="usb" VENDOR="SanDisk " HCTL="6:0:0:0" TYPE="disk" FSTYPE="" LABEL="" UUID=""', # noqa E501
- 'NAME="sdd2" MODEL="" SERIAL="" SIZE="2G" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="swap" LABEL="" UUID="422cc263-788e-4a74-a127-99695c380a2c"', # noqa E501
- 'NAME="sdd3" MODEL="" SERIAL="" SIZE="26.7G" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="btrfs" LABEL="rockstor_rockstor" UUID="d030d7ee-4c85-4317-96bf-6ff766fec9ef"', # noqa E501
- 'NAME="sdd1" MODEL="" SERIAL="" SIZE="500M" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="ext4" LABEL="" UUID="35c11bd3-bba1-4869-8a51-1e6bfaec15a2"', # noqa E501
- 'NAME="sdb" MODEL="TOSHIBA MK1652GS" SERIAL="Z8A9CAZUT" SIZE="149.1G" TRAN="sata" VENDOR="ATA " HCTL="1:0:0:0" TYPE="disk" FSTYPE="isw_raid_member" LABEL="" UUID=""', # noqa E501
- 'NAME="md126" MODEL="" SERIAL="" SIZE="149G" TRAN="" VENDOR="" HCTL="" TYPE="raid1" FSTYPE="" LABEL="" UUID=""', # noqa E501
- 'NAME="md126p3" MODEL="" SERIAL="" SIZE="146.6G" TRAN="" VENDOR="" HCTL="" TYPE="md" FSTYPE="btrfs" LABEL="rockstor_rockstor00" UUID="1c59b842-5d08-4472-a731-c593ab0bff93"', # noqa E501
- 'NAME="md126p1" MODEL="" SERIAL="" SIZE="500M" TRAN="" VENDOR="" HCTL="" TYPE="md" FSTYPE="ext4" LABEL="" UUID="40e4a91f-6b08-4ea0-b0d1-e43d145558b3"', # noqa E501
- 'NAME="md126p2" MODEL="" SERIAL="" SIZE="2G" TRAN="" VENDOR="" HCTL="" TYPE="md" FSTYPE="swap" LABEL="" UUID="43d2f3dc-38cd-49ef-9e18-be35297c1412"', # noqa E501
- 'NAME="sdc" MODEL="SAMSUNG HM160HI " SERIAL="S1WWJ9BZ408430" SIZE="149.1G" TRAN="sata" VENDOR="ATA " HCTL="3:0:0:0" TYPE="disk" FSTYPE="isw_raid_member" LABEL="" UUID=""', # noqa E501
- 'NAME="md126" MODEL="" SERIAL="" SIZE="149G" TRAN="" VENDOR="" HCTL="" TYPE="raid1" FSTYPE="" LABEL="" UUID=""', # noqa E501
- 'NAME="md126p3" MODEL="" SERIAL="" SIZE="146.6G" TRAN="" VENDOR="" HCTL="" TYPE="md" FSTYPE="btrfs" LABEL="rockstor_rockstor00" UUID="1c59b842-5d08-4472-a731-c593ab0bff93"', # noqa E501
- 'NAME="md126p1" MODEL="" SERIAL="" SIZE="500M" TRAN="" VENDOR="" HCTL="" TYPE="md" FSTYPE="ext4" LABEL="" UUID="40e4a91f-6b08-4ea0-b0d1-e43d145558b3"', # noqa E501
- 'NAME="md126p2" MODEL="" SERIAL="" SIZE="2G" TRAN="" VENDOR="" HCTL="" TYPE="md" FSTYPE="swap" LABEL="" UUID="43d2f3dc-38cd-49ef-9e18-be35297c1412"', # noqa E501
+ 'NAME="/dev/sdd" MODEL="Extreme " SERIAL="AA010312161642210668" SIZE="29.2G" TRAN="usb" VENDOR="SanDisk " HCTL="6:0:0:0" TYPE="disk" FSTYPE="" LABEL="" UUID=""', # noqa E501
+ 'NAME="/dev/sdd2" MODEL="" SERIAL="" SIZE="2G" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="swap" LABEL="" UUID="422cc263-788e-4a74-a127-99695c380a2c"', # noqa E501
+ 'NAME="/dev/sdd3" MODEL="" SERIAL="" SIZE="26.7G" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="btrfs" LABEL="rockstor_rockstor" UUID="d030d7ee-4c85-4317-96bf-6ff766fec9ef"', # noqa E501
+ 'NAME="/dev/sdd1" MODEL="" SERIAL="" SIZE="500M" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="ext4" LABEL="" UUID="35c11bd3-bba1-4869-8a51-1e6bfaec15a2"', # noqa E501
+ 'NAME="/dev/sdb" MODEL="TOSHIBA MK1652GS" SERIAL="Z8A9CAZUT" SIZE="149.1G" TRAN="sata" VENDOR="ATA " HCTL="1:0:0:0" TYPE="disk" FSTYPE="isw_raid_member" LABEL="" UUID=""', # noqa E501
+ 'NAME="/dev/md126" MODEL="" SERIAL="" SIZE="149G" TRAN="" VENDOR="" HCTL="" TYPE="raid1" FSTYPE="" LABEL="" UUID=""', # noqa E501
+ 'NAME="/dev/md126p3" MODEL="" SERIAL="" SIZE="146.6G" TRAN="" VENDOR="" HCTL="" TYPE="md" FSTYPE="btrfs" LABEL="rockstor_rockstor00" UUID="1c59b842-5d08-4472-a731-c593ab0bff93"', # noqa E501
+ 'NAME="/dev/md126p1" MODEL="" SERIAL="" SIZE="500M" TRAN="" VENDOR="" HCTL="" TYPE="md" FSTYPE="ext4" LABEL="" UUID="40e4a91f-6b08-4ea0-b0d1-e43d145558b3"', # noqa E501
+ 'NAME="/dev/md126p2" MODEL="" SERIAL="" SIZE="2G" TRAN="" VENDOR="" HCTL="" TYPE="md" FSTYPE="swap" LABEL="" UUID="43d2f3dc-38cd-49ef-9e18-be35297c1412"', # noqa E501
+ 'NAME="/dev/sdc" MODEL="SAMSUNG HM160HI " SERIAL="S1WWJ9BZ408430" SIZE="149.1G" TRAN="sata" VENDOR="ATA " HCTL="3:0:0:0" TYPE="disk" FSTYPE="isw_raid_member" LABEL="" UUID=""', # noqa E501
+ 'NAME="/dev/md126" MODEL="" SERIAL="" SIZE="149G" TRAN="" VENDOR="" HCTL="" TYPE="raid1" FSTYPE="" LABEL="" UUID=""', # noqa E501
+ 'NAME="/dev/md126p3" MODEL="" SERIAL="" SIZE="146.6G" TRAN="" VENDOR="" HCTL="" TYPE="md" FSTYPE="btrfs" LABEL="rockstor_rockstor00" UUID="1c59b842-5d08-4472-a731-c593ab0bff93"', # noqa E501
+ 'NAME="/dev/md126p1" MODEL="" SERIAL="" SIZE="500M" TRAN="" VENDOR="" HCTL="" TYPE="md" FSTYPE="ext4" LABEL="" UUID="40e4a91f-6b08-4ea0-b0d1-e43d145558b3"', # noqa E501
+ 'NAME="/dev/md126p2" MODEL="" SERIAL="" SIZE="2G" TRAN="" VENDOR="" HCTL="" TYPE="md" FSTYPE="swap" LABEL="" UUID="43d2f3dc-38cd-49ef-9e18-be35297c1412"', # noqa E501
'']]
err = [['']]
rc = [0]
expected_result = [[
- Disk(name='sdc', model='SAMSUNG HM160HI', serial='S1WWJ9BZ408430',
+ Disk(name='/dev/sdc', model='SAMSUNG HM160HI', serial='S1WWJ9BZ408430', # noqa E501
size=156342681, transport='sata', vendor='ATA',
hctl='3:0:0:0', type='disk', fstype='isw_raid_member',
label=None, uuid=None, parted=False, root=False,
partitions={}),
- Disk(name='sdb', model='TOSHIBA MK1652GS', serial='Z8A9CAZUT',
+ Disk(name='/dev/sdb', model='TOSHIBA MK1652GS', serial='Z8A9CAZUT',
size=156342681, transport='sata', vendor='ATA',
hctl='1:0:0:0', type='disk', fstype='isw_raid_member',
label=None, uuid=None, parted=False, root=False,
partitions={}),
- Disk(name='sdd3', model='Extreme', serial='AA010312161642210668',
+ Disk(name='/dev/sdd3', model='Extreme', serial='AA010312161642210668', # noqa E501
size=27996979, transport='usb', vendor='SanDisk',
hctl='6:0:0:0', type='part', fstype='btrfs',
label='rockstor_rockstor',
uuid='d030d7ee-4c85-4317-96bf-6ff766fec9ef', parted=True,
root=True, partitions={}),
- Disk(name='md126',
+ Disk(name='/dev/md126',
model='[2] Z8A9CAZUT[0] S1WWJ9BZ408430[1] raid1',
serial='a300e6b0:5d69eee6:98a2354a:0ba1e1eb', size=153721241,
transport=None, vendor=None, hctl=None, type='raid1',
fstype='btrfs', label='rockstor_rockstor00',
uuid='1c59b842-5d08-4472-a731-c593ab0bff93', parted=True,
- root=False, partitions={'md126p3': 'btrfs'})
+ root=False, partitions={'/dev/md126p3': 'btrfs'})
]]
# No LUKS or bcache mocking necessary as none in test data.
@@ -1427,9 +1432,9 @@ def dyn_disk_serial_return(*args, **kwargs):
# Note in the following our md126p3 partition has the same serial
# as it's base device.
s_map = {
- 'md126': 'a300e6b0:5d69eee6:98a2354a:0ba1e1eb',
- 'md126p3': 'a300e6b0:5d69eee6:98a2354a:0ba1e1eb',
- 'md127': 'a88a8eda:1e459751:3341ad9b:fe3031a0'
+ '/dev/md126': 'a300e6b0:5d69eee6:98a2354a:0ba1e1eb',
+ '/dev/md126p3': 'a300e6b0:5d69eee6:98a2354a:0ba1e1eb',
+ '/dev/md127': 'a88a8eda:1e459751:3341ad9b:fe3031a0'
}
# First argument in get_disk_serial() is device_name, key off this
# for our dynamic mock return from s_map (serial map).
@@ -1442,7 +1447,7 @@ def dyn_disk_serial_return(*args, **kwargs):
return 'missing-mock-serial-data-for-dev-{}'.format(args[0])
self.mock_dyn_get_disk_serial.side_effect = dyn_disk_serial_return
# Ensure we correctly mock our root_disk value away from file default.
- self.mock_root_disk.return_value = 'sdd'
+ self.mock_root_disk.return_value = '/dev/sdd'
# As we have an mdraid device of interest (the data disk) it's model
# info field is used to present basic info on it's members serials:
# We mock this as otherwise our wide scope run_command() mock breaks
@@ -1477,13 +1482,13 @@ def test_scan_disks_nvme_sys_disk(self):
# Test data based on 2 data drives (sdb, sdb) and an nvme system drive
# /dev/nvme0n1 as the base device.
out = [[
- 'NAME="sdb" MODEL="WDC WD100EFAX-68" SERIAL="7PKNDX1C" SIZE="9.1T" TRAN="sata" VENDOR="ATA " HCTL="1:0:0:0" TYPE="disk" FSTYPE="btrfs" LABEL="Data" UUID="d2f76ce6-85fd-4615-b4f8-77e1b6a69c60"', # noqa E501
- 'NAME="sda" MODEL="WDC WD100EFAX-68" SERIAL="7PKP0MNC" SIZE="9.1T" TRAN="sata" VENDOR="ATA " HCTL="0:0:0:0" TYPE="disk" FSTYPE="btrfs" LABEL="Data" UUID="d2f76ce6-85fd-4615-b4f8-77e1b6a69c60"', # noqa E501
- 'NAME="nvme0n1" MODEL="INTEL SSDPEKKW128G7 " SERIAL="BTPY72910KCW128A" SIZE="119.2G" TRAN="" VENDOR="" HCTL="" TYPE="disk" FSTYPE="" LABEL="" UUID=""', # noqa E501
- 'NAME="nvme0n1p3" MODEL="" SERIAL="" SIZE="7.8G" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="swap" LABEL="" UUID="d33115d8-3d8c-4f65-b560-8ebf72d08fbc"', # noqa E501
- 'NAME="nvme0n1p1" MODEL="" SERIAL="" SIZE="200M" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="vfat" LABEL="" UUID="53DC-1323"', # noqa E501
- 'NAME="nvme0n1p4" MODEL="" SERIAL="" SIZE="110.8G" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="btrfs" LABEL="rockstor_rockstor00" UUID="4a05477f-cd4a-4614-b264-d029d98928ab"', # noqa E501
- 'NAME="nvme0n1p2" MODEL="" SERIAL="" SIZE="500M" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="ext4" LABEL="" UUID="497a9eda-a655-4fc4-bad8-2d9aa8661980"', # noqa E501
+ 'NAME="/dev/sdb" MODEL="WDC WD100EFAX-68" SERIAL="7PKNDX1C" SIZE="9.1T" TRAN="sata" VENDOR="ATA " HCTL="1:0:0:0" TYPE="disk" FSTYPE="btrfs" LABEL="Data" UUID="d2f76ce6-85fd-4615-b4f8-77e1b6a69c60"', # noqa E501
+ 'NAME="/dev/sda" MODEL="WDC WD100EFAX-68" SERIAL="7PKP0MNC" SIZE="9.1T" TRAN="sata" VENDOR="ATA " HCTL="0:0:0:0" TYPE="disk" FSTYPE="btrfs" LABEL="Data" UUID="d2f76ce6-85fd-4615-b4f8-77e1b6a69c60"', # noqa E501
+ 'NAME="/dev/nvme0n1" MODEL="INTEL SSDPEKKW128G7 " SERIAL="BTPY72910KCW128A" SIZE="119.2G" TRAN="" VENDOR="" HCTL="" TYPE="disk" FSTYPE="" LABEL="" UUID=""', # noqa E501
+ 'NAME="/dev/nvme0n1p3" MODEL="" SERIAL="" SIZE="7.8G" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="swap" LABEL="" UUID="d33115d8-3d8c-4f65-b560-8ebf72d08fbc"', # noqa E501
+ 'NAME="/dev/nvme0n1p1" MODEL="" SERIAL="" SIZE="200M" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="vfat" LABEL="" UUID="53DC-1323"', # noqa E501
+ 'NAME="/dev/nvme0n1p4" MODEL="" SERIAL="" SIZE="110.8G" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="btrfs" LABEL="rockstor_rockstor00" UUID="4a05477f-cd4a-4614-b264-d029d98928ab"', # noqa E501
+ 'NAME="/dev/nvme0n1p2" MODEL="" SERIAL="" SIZE="500M" TRAN="" VENDOR="" HCTL="" TYPE="part" FSTYPE="ext4" LABEL="" UUID="497a9eda-a655-4fc4-bad8-2d9aa8661980"', # noqa E501
'']]
err = [['']]
rc = [0]
@@ -1493,29 +1498,29 @@ def test_scan_disks_nvme_sys_disk(self):
rc.append(0)
# Setup expected results
expected_result = [[
- Disk(name='sda', model='WDC WD100EFAX-68', serial='7PKP0MNC',
+ Disk(name='/dev/sda', model='WDC WD100EFAX-68', serial='7PKP0MNC',
size=9771050598, transport='sata', vendor='ATA',
hctl='0:0:0:0', type='disk', fstype='btrfs', label='Data',
uuid='d2f76ce6-85fd-4615-b4f8-77e1b6a69c60', parted=False,
root=False, partitions={}),
- Disk(name='sdb', model='WDC WD100EFAX-68', serial='7PKNDX1C',
+ Disk(name='/dev/sdb', model='WDC WD100EFAX-68', serial='7PKNDX1C',
size=9771050598, transport='sata', vendor='ATA',
hctl='1:0:0:0', type='disk', fstype='btrfs', label='Data',
uuid='d2f76ce6-85fd-4615-b4f8-77e1b6a69c60', parted=False,
root=False, partitions={})
], [
- Disk(name='nvme0n1p4', model='INTEL SSDPEKKW128G7',
+ Disk(name='/dev/nvme0n1p4', model='INTEL SSDPEKKW128G7',
serial='BTPY72910KCW128A', size=116182220, transport=None,
vendor=None, hctl=None, type='part', fstype='btrfs',
label='rockstor_rockstor00',
uuid='4a05477f-cd4a-4614-b264-d029d98928ab', parted=True,
root=True, partitions={}),
- Disk(name='sda', model='WDC WD100EFAX-68', serial='7PKP0MNC',
+ Disk(name='/dev/sda', model='WDC WD100EFAX-68', serial='7PKP0MNC',
size=9771050598, transport='sata', vendor='ATA',
hctl='0:0:0:0', type='disk', fstype='btrfs', label='Data',
uuid='d2f76ce6-85fd-4615-b4f8-77e1b6a69c60', parted=False,
root=False, partitions={}),
- Disk(name='sdb', model='WDC WD100EFAX-68', serial='7PKNDX1C',
+ Disk(name='/dev/sdb', model='WDC WD100EFAX-68', serial='7PKNDX1C',
size=9771050598, transport='sata', vendor='ATA',
hctl='1:0:0:0', type='disk', fstype='btrfs', label='Data',
uuid='d2f76ce6-85fd-4615-b4f8-77e1b6a69c60', parted=False,
@@ -1528,7 +1533,7 @@ def test_scan_disks_nvme_sys_disk(self):
# get_bcache_device_type()
# Ensure we correctly mock our root_disk value away from file default
# of sda as we now have a root_disk on an nvme device.
- self.mock_root_disk.return_value = 'nvme0n1'
+ self.mock_root_disk.return_value = '/dev/nvme0n1'
# Iterate the test data sets for run_command running lsblk.
for o, e, r, expected in zip(out, err, rc, expected_result):
self.mock_run_command.return_value = (o, e, r)
|