diff --git a/src/rockstor/fs/btrfs.py b/src/rockstor/fs/btrfs.py
index 58f97b5fa..214e99478 100644
--- a/src/rockstor/fs/btrfs.py
+++ b/src/rockstor/fs/btrfs.py
@@ -138,7 +138,7 @@ def pool_raid(mnt_pt):
if (len(fields) > 1):
block = fields[0][:-1].lower()
raid = fields[1][:-1].lower()
- if block not in raid_d and raid is not 'DUP':
+ if block not in raid_d:
raid_d[block] = raid
if (raid_d['metadata'] == 'single'):
raid_d['data'] = raid_d['metadata']
@@ -1225,15 +1225,15 @@ def scrub_status(pool):
@task()
def start_balance(mnt_pt, force=False, convert=None):
cmd = ['btrfs', 'balance', 'start', mnt_pt]
- # TODO: Confirm -f is doing what is intended, man states for reducing
- # TODO: metadata from say raid1 to single.
# With no filters we also get a warning that block some balances due to
# expected long execution time, in this case "--full-balance" is required.
- # N.B. currently force in Web-UI does not mean force here.
- if (force):
+ if force:
cmd.insert(3, '-f')
- if (convert is not None):
+ if convert is not None:
cmd.insert(3, '-dconvert=%s' % convert)
+ # Override metadata on single pools to be dup, as per btrfs default.
+ if convert == 'single':
+ convert = 'dup'
cmd.insert(3, '-mconvert=%s' % convert)
else:
# As we are running with no convert filters a warning and 10 second
@@ -1241,6 +1241,7 @@ def start_balance(mnt_pt, force=False, convert=None):
# This warning is now present in the Web-UI "Start a new balance"
# button tooltip.
cmd.insert(3, '--full-balance')
+ logger.debug('Balance command ({}).'.format(cmd))
run_command(cmd)
diff --git a/src/rockstor/storageadmin/static/storageadmin/js/views/add_pool.js b/src/rockstor/storageadmin/static/storageadmin/js/views/add_pool.js
index 760d43600..7c086e5df 100644
--- a/src/rockstor/storageadmin/static/storageadmin/js/views/add_pool.js
+++ b/src/rockstor/storageadmin/static/storageadmin/js/views/add_pool.js
@@ -191,7 +191,7 @@ AddPoolView = Backbone.View.extend({
this.$('#raid_level').tooltip({
html: true,
placement: 'right',
- title: 'Desired RAID level of the pool Single: No software raid. (Recommended while using hardware raid). Raid0, Raid1, Raid10, Raid5, and Raid6 are similar to conventional raid levels. See documentation for more information. WARNING: Raid5 and Raid6 are not production-ready'
+ title: 'Software RAID level Single: No RAID - one or more devices (-m dup enforced). Raid0, Raid1, Raid10, and the parity based Raid5 & Raid6 levels are all similar to conventional raid but chunk based, not device based. See docs for more info. WARNING: Raid5 and Raid6 are not production-ready'
});
this.$('#compression').tooltip({
diff --git a/src/rockstor/storageadmin/static/storageadmin/js/views/pool/resize/add_disks.js b/src/rockstor/storageadmin/static/storageadmin/js/views/pool/resize/add_disks.js
index f95882265..b549329ba 100644
--- a/src/rockstor/storageadmin/static/storageadmin/js/views/pool/resize/add_disks.js
+++ b/src/rockstor/storageadmin/static/storageadmin/js/views/pool/resize/add_disks.js
@@ -143,7 +143,7 @@ PoolAddDisks = RockstorWizardPage.extend({
Handlebars.registerHelper('display_raid_levels', function(){
var html = '';
var _this = this;
- var levels = ['raid0', 'raid1', 'raid10', 'raid5', 'raid6'];
+ var levels = ['single', 'raid0', 'raid1', 'raid10', 'raid5', 'raid6'];
_.each(levels, function(level) {
if (_this.raidLevel != level) {
html += '';
diff --git a/src/rockstor/storageadmin/static/storageadmin/js/views/pool/resize/raid_change.js b/src/rockstor/storageadmin/static/storageadmin/js/views/pool/resize/raid_change.js
index bc53d1b72..66a39c54c 100644
--- a/src/rockstor/storageadmin/static/storageadmin/js/views/pool/resize/raid_change.js
+++ b/src/rockstor/storageadmin/static/storageadmin/js/views/pool/resize/raid_change.js
@@ -77,7 +77,7 @@ PoolRaidChange = RockstorWizardPage.extend({
Handlebars.registerHelper('display_raid_levels', function() {
var html = '';
var _this = this;
- var levels = ['raid0', 'raid1', 'raid10', 'raid5', 'raid6'];
+ var levels = ['single', 'raid0', 'raid1', 'raid10', 'raid5', 'raid6'];
_.each(levels, function(level) {
if (_this.raidLevel != level) {
html += '';
diff --git a/src/rockstor/storageadmin/static/storageadmin/js/views/pool_details_layout_view.js b/src/rockstor/storageadmin/static/storageadmin/js/views/pool_details_layout_view.js
index 195b0bef0..e6d1709bc 100644
--- a/src/rockstor/storageadmin/static/storageadmin/js/views/pool_details_layout_view.js
+++ b/src/rockstor/storageadmin/static/storageadmin/js/views/pool_details_layout_view.js
@@ -113,7 +113,12 @@ PoolDetailsLayoutView = RockstorLayoutView.extend({
this.$('#ph-pool-usage').html(this.subviews['pool-usage'].render().el);
this.$('#ph-pool-scrubs').html(this.subviews['pool-scrubs'].render().el);
this.$('#ph-pool-rebalances').html(this.subviews['pool-rebalances'].render().el);
- this.renderDataTables();
+ // Sort all SubView tables in descending order by initial column.
+ // This way we see the latest Scrub / Balance items at the top.
+ var customs = {
+ 'order': [[0, 'desc']]
+ };
+ this.renderDataTables(customs);
this.$('#ph-compression-info').html(this.compression_info_template({
diff --git a/src/rockstor/storageadmin/views/pool.py b/src/rockstor/storageadmin/views/pool.py
index 7d2386261..955366b20 100644
--- a/src/rockstor/storageadmin/views/pool.py
+++ b/src/rockstor/storageadmin/views/pool.py
@@ -212,6 +212,13 @@ def _remount(cls, request, pool):
def _balance_start(self, pool, force=False, convert=None):
mnt_pt = mount_root(pool)
+ if convert is None and pool.raid == 'single':
+ # Btrfs balance without convert filters will convert dup level
+ # metadata on a single level data pool to raid1 on multi disk
+ # pools. Avoid by explicit convert in this instance.
+ logger.info('Preserve single data, dup metadata by explicit '
+ 'convert.')
+ convert = 'single'
start_balance.async(mnt_pt, force=force, convert=convert)
tid = 0
count = 0
@@ -375,7 +382,9 @@ def put(self, request, pid, command):
'web-ui' % d.name)
handle_exception(Exception(e_msg), request)
- if (pool.raid != 'single' and new_raid == 'single'):
+ if pool.raid == 'single' and new_raid == 'raid10':
+ # TODO: Consider removing once we have better space calc.
+ # Avoid extreme raid level change upwards (space issues).
e_msg = ('Pool migration from %s to %s is not supported.'
% (pool.raid, new_raid))
handle_exception(Exception(e_msg), request)
@@ -404,7 +413,12 @@ def put(self, request, pid, command):
handle_exception(Exception(e_msg), request)
resize_pool(pool, dnames)
- tid = self._balance_start(pool, convert=new_raid)
+ # During dev add we also offer raid level change, if selected
+ # blanket apply '-f' to allow for reducing metadata integrity.
+ force = False
+ if new_raid != pool.raid:
+ force = True
+ tid = self._balance_start(pool, force=force, convert=new_raid)
ps = PoolBalance(pool=pool, tid=tid)
ps.save()