From d581cdb2a0705aed99b7534ad8c164ce30377a4e Mon Sep 17 00:00:00 2001 From: Andrea Gelmini Date: Sun, 22 Dec 2024 04:02:27 +0100 Subject: [PATCH] Fix typos --- fs/bcachefs/Kconfig | 2 +- fs/bcachefs/acl.c | 5 +-- fs/bcachefs/alloc_background.c | 2 +- fs/bcachefs/alloc_foreground.c | 4 +-- fs/bcachefs/bcachefs.h | 2 +- fs/bcachefs/bcachefs_format.h | 8 ++--- fs/bcachefs/bcachefs_ioctl.h | 4 +-- fs/bcachefs/bset.h | 4 +-- fs/bcachefs/btree_cache.c | 5 +-- fs/bcachefs/btree_iter.c | 4 +-- fs/bcachefs/btree_iter.h | 48 ++++++++++++++++------------ fs/bcachefs/btree_key_cache.c | 10 +++--- fs/bcachefs/btree_types.h | 2 +- fs/bcachefs/btree_update.h | 2 +- fs/bcachefs/btree_update_interior.h | 2 +- fs/bcachefs/btree_write_buffer.c | 2 +- fs/bcachefs/checksum.c | 2 +- fs/bcachefs/data_update.c | 4 +-- fs/bcachefs/disk_accounting.c | 4 +-- fs/bcachefs/disk_accounting_format.h | 2 +- fs/bcachefs/ec.c | 12 +++---- fs/bcachefs/errcode.h | 2 +- fs/bcachefs/error.c | 2 +- fs/bcachefs/eytzinger.h | 2 +- fs/bcachefs/fs-io.c | 2 +- fs/bcachefs/fs.c | 8 ----- fs/bcachefs/fs.h | 2 +- fs/bcachefs/fsck.c | 4 +-- fs/bcachefs/inode.c | 2 +- fs/bcachefs/io_misc.c | 2 +- fs/bcachefs/io_write.c | 2 +- fs/bcachefs/journal.c | 4 +-- fs/bcachefs/journal_io.c | 2 +- fs/bcachefs/journal_types.h | 4 +-- fs/bcachefs/mean_and_variance.h | 4 +-- fs/bcachefs/mean_and_variance_test.c | 2 +- fs/bcachefs/opts.h | 2 +- fs/bcachefs/printbuf.c | 8 ++--- fs/bcachefs/printbuf.h | 4 +-- fs/bcachefs/rcu_pending.c | 4 +-- fs/bcachefs/rebalance.c | 4 +-- fs/bcachefs/sb-members_format.h | 2 +- fs/bcachefs/siphash.h | 2 +- fs/bcachefs/six.c | 14 ++++---- fs/bcachefs/six.h | 4 +-- fs/bcachefs/snapshot_format.h | 2 +- fs/bcachefs/subvolume.c | 2 +- fs/bcachefs/super-io.c | 2 +- fs/bcachefs/time_stats.h | 2 +- 49 files changed, 113 insertions(+), 113 deletions(-) diff --git a/fs/bcachefs/Kconfig b/fs/bcachefs/Kconfig index e8549d04dcb8d..c2566a970946b 100644 --- a/fs/bcachefs/Kconfig +++ b/fs/bcachefs/Kconfig @@ -38,7 +38,7 @@ config BCACHEFS_ERASURE_CODING depends on BCACHEFS_FS select QUOTACTL help - This enables the "erasure_code" filesysystem and inode option, which + This enables the "erasure_code" filesystem and inode option, which organizes data into reed-solomon stripes instead of ordinary replication. diff --git a/fs/bcachefs/acl.c b/fs/bcachefs/acl.c index 99487727ae64f..56b160e71a533 100644 --- a/fs/bcachefs/acl.c +++ b/fs/bcachefs/acl.c @@ -137,7 +137,7 @@ static struct posix_acl *bch2_acl_from_disk(struct btree_trans *trans, return NULL; acl = allocate_dropping_locks(trans, ret, - posix_acl_alloc(count, _gfp)); + posix_acl_alloc(count, GFP_KERNEL)); if (!acl) return ERR_PTR(-ENOMEM); if (ret) { @@ -422,7 +422,8 @@ int bch2_acl_chmod(struct btree_trans *trans, subvol_inum inum, if (ret) goto err; - ret = allocate_dropping_locks_errcode(trans, __posix_acl_chmod(&acl, _gfp, mode)); + ret = allocate_dropping_locks_errcode(trans, + __posix_acl_chmod(&acl, GFP_KERNEL, mode)); if (ret) goto err; diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c index 94e7bc889cb10..cc3cbcd11efea 100644 --- a/fs/bcachefs/alloc_background.c +++ b/fs/bcachefs/alloc_background.c @@ -1409,7 +1409,7 @@ int bch2_check_discard_freespace_key(struct btree_trans *trans, struct btree_ite if (!bch2_dev_bucket_exists(c, bucket)) { if (fsck_err(trans, need_discard_freespace_key_to_invalid_dev_bucket, - "entry in %s btree for nonexistant dev:bucket %llu:%llu", + "entry in %s btree for nonexistent dev:bucket %llu:%llu", bch2_btree_id_str(iter->btree_id), bucket.inode, bucket.offset)) goto delete; ret = 1; diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c index 6df41c331a52e..066a2d510d7cc 100644 --- a/fs/bcachefs/alloc_foreground.c +++ b/fs/bcachefs/alloc_foreground.c @@ -409,7 +409,7 @@ static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans, POS(ca->dev_idx, U64_MAX), 0, k, ret) { /* - * peek normally dosen't trim extents - they can span iter.pos, + * peek normally doesn't trim extents - they can span iter.pos, * which is not what we want here: */ iter.k.size = iter.k.p.offset - iter.pos.offset; @@ -1478,7 +1478,7 @@ void bch2_fs_allocator_foreground_init(struct bch_fs *c) mutex_init(&c->write_points_hash_lock); c->write_points_nr = ARRAY_SIZE(c->write_points); - /* open bucket 0 is a sentinal NULL: */ + /* open bucket 0 is a sentinel NULL: */ spin_lock_init(&c->open_buckets[0].lock); for (ob = c->open_buckets + 1; diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h index 161cf2f05d2ad..7ee05afdf741d 100644 --- a/fs/bcachefs/bcachefs.h +++ b/fs/bcachefs/bcachefs.h @@ -731,7 +731,7 @@ struct bch_fs { struct task_struct *recovery_task; /* - * Analagous to c->writes, for asynchronous ops that don't necessarily + * Analogous to c->writes, for asynchronous ops that don't necessarily * need fs to be read-write */ refcount_t ro_ref; diff --git a/fs/bcachefs/bcachefs_format.h b/fs/bcachefs/bcachefs_format.h index b0fac8b7915bb..638931a36d930 100644 --- a/fs/bcachefs/bcachefs_format.h +++ b/fs/bcachefs/bcachefs_format.h @@ -239,7 +239,7 @@ struct bkey { * * Specifically, when i was designing bkey, I wanted the header to be no * bigger than necessary so that bkey_packed could use the rest. That means that - * decently offten extent keys will fit into only 8 bytes, instead of spilling over + * decently often extent keys will fit into only 8 bytes, instead of spilling over * to 16. * * But packed_bkey treats the part after the header - the packed section - @@ -251,7 +251,7 @@ struct bkey { * So that constrains the key part of a bkig endian bkey to start right * after the header. * - * If we ever do a bkey_v2 and need to expand the hedaer by another byte for + * If we ever do a bkey_v2 and need to expand the header by another byte for * some reason - that will clean up this wart. */ __aligned(8) @@ -643,7 +643,7 @@ struct bch_sb_field_ext { /* * field 1: version name * field 2: BCH_VERSION(major, minor) - * field 3: recovery passess required on upgrade + * field 3: recovery passes required on upgrade */ #define BCH_METADATA_VERSIONS() \ x(bkey_renumber, BCH_VERSION(0, 10)) \ @@ -765,7 +765,7 @@ struct bch_sb { /* * Flags: - * BCH_SB_INITALIZED - set on first mount + * BCH_SB_INITIALIZED - set on first mount * BCH_SB_CLEAN - did we shut down cleanly? Just a hint, doesn't affect * behaviour of mount/recovery path: * BCH_SB_INODE_32BIT - limit inode numbers to 32 bits diff --git a/fs/bcachefs/bcachefs_ioctl.h b/fs/bcachefs/bcachefs_ioctl.h index 3c23bdf788cea..926a1af506abe 100644 --- a/fs/bcachefs/bcachefs_ioctl.h +++ b/fs/bcachefs/bcachefs_ioctl.h @@ -131,7 +131,7 @@ struct bch_ioctl_start { * may be either offline or offline. * * Will fail removing @dev would leave us with insufficient read write devices - * or degraded/unavailable data, unless the approprate BCH_FORCE_IF_* flags are + * or degraded/unavailable data, unless the appropriate BCH_FORCE_IF_* flags are * set. */ @@ -154,7 +154,7 @@ struct bch_ioctl_start { * * Will fail (similarly to BCH_IOCTL_DISK_SET_STATE) if offlining @dev would * leave us with insufficient read write devices or degraded/unavailable data, - * unless the approprate BCH_FORCE_IF_* flags are set. + * unless the appropriate BCH_FORCE_IF_* flags are set. */ struct bch_ioctl_disk { diff --git a/fs/bcachefs/bset.h b/fs/bcachefs/bset.h index 6953d55b72cca..bdd250e1be16c 100644 --- a/fs/bcachefs/bset.h +++ b/fs/bcachefs/bset.h @@ -45,7 +45,7 @@ * 4 in memory - we lazily resort as needed. * * We implement code here for creating and maintaining auxiliary search trees - * (described below) for searching an individial bset, and on top of that we + * (described below) for searching an individual bset, and on top of that we * implement a btree iterator. * * BTREE ITERATOR: @@ -178,7 +178,7 @@ static inline enum bset_aux_tree_type bset_aux_tree_type(const struct bset_tree * it used to be 64, but I realized the lookup code would touch slightly less * memory if it was 128. * - * It definites the number of bytes (in struct bset) per struct bkey_float in + * It defines the number of bytes (in struct bset) per struct bkey_float in * the auxiliar search tree - when we're done searching the bset_float tree we * have this many bytes left that we do a linear search over. * diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c index b00c6a20be271..6319a9a8ae6a1 100644 --- a/fs/bcachefs/btree_cache.c +++ b/fs/bcachefs/btree_cache.c @@ -828,7 +828,8 @@ struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_rea mutex_unlock(&bc->lock); - if (btree_node_data_alloc(c, b, GFP_NOWAIT|__GFP_NOWARN)) { + if (memalloc_flags_do(PF_MEMALLOC_NORECLAIM, + btree_node_data_alloc(c, b, GFP_KERNEL|__GFP_NOWARN))) { bch2_trans_unlock(trans); if (btree_node_data_alloc(c, b, GFP_KERNEL|__GFP_NOWARN)) goto err; @@ -1172,7 +1173,7 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path * /* * Check b->hash_val _before_ calling btree_node_lock() - this might not * be the node we want anymore, and trying to lock the wrong node could - * cause an unneccessary transaction restart: + * cause an unnecessary transaction restart: */ if (unlikely(!c->opts.btree_node_mem_ptr_optimization || !b || diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c index 291eb5eb0203c..a6aab81c64332 100644 --- a/fs/bcachefs/btree_iter.c +++ b/fs/bcachefs/btree_iter.c @@ -2430,7 +2430,7 @@ struct bkey_s_c bch2_btree_iter_peek_max(struct btree_iter *iter, struct bpos en } /* - * iter->pos should be mononotically increasing, and always be + * iter->pos should be monotonically increasing, and always be * equal to the key we just returned - except extents can * straddle iter->pos: */ @@ -3216,7 +3216,7 @@ u32 bch2_trans_begin(struct btree_trans *trans) /* * If the transaction wasn't restarted, we're presuming to be - * doing something new: dont keep iterators excpt the ones that + * doing something new: don't keep iterators except the ones that * are in use - except for the subvolumes btree: */ if (!trans->restarted && path->btree_id != BTREE_ID_subvolumes) diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h index e23608d2a26dd..07e0210c53aea 100644 --- a/fs/bcachefs/btree_iter.h +++ b/fs/bcachefs/btree_iter.h @@ -6,6 +6,8 @@ #include "btree_types.h" #include "trace.h" +#include + void bch2_trans_updates_to_text(struct printbuf *, struct btree_trans *); void bch2_btree_path_to_text(struct printbuf *, struct btree_trans *, btree_path_idx_t); void bch2_trans_paths_to_text(struct printbuf *, struct btree_trans *); @@ -874,29 +876,33 @@ struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *); (_do) ?: bch2_trans_relock(_trans); \ }) -#define allocate_dropping_locks_errcode(_trans, _do) \ -({ \ - gfp_t _gfp = GFP_NOWAIT|__GFP_NOWARN; \ - int _ret = _do; \ - \ - if (bch2_err_matches(_ret, ENOMEM)) { \ - _gfp = GFP_KERNEL; \ - _ret = drop_locks_do(_trans, _do); \ - } \ - _ret; \ +#define memalloc_flags_do(_flags, _do) \ +({ \ + unsigned _saved_flags = memalloc_flags_save(_flags); \ + typeof(_do) _ret = _do; \ + memalloc_noreclaim_restore(_saved_flags); \ + _ret; \ }) -#define allocate_dropping_locks(_trans, _ret, _do) \ -({ \ - gfp_t _gfp = GFP_NOWAIT|__GFP_NOWARN; \ - typeof(_do) _p = _do; \ - \ - _ret = 0; \ - if (unlikely(!_p)) { \ - _gfp = GFP_KERNEL; \ - _ret = drop_locks_do(_trans, ((_p = _do), 0)); \ - } \ - _p; \ +#define allocate_dropping_locks_errcode(_trans, _do) \ +({ \ + int _ret = memalloc_flags_do(PF_MEMALLOC_NORECLAIM|PF_MEMALLOC_NOWARN, _do);\ + \ + if (bch2_err_matches(_ret, ENOMEM)) { \ + _ret = drop_locks_do(_trans, _do); \ + } \ + _ret; \ +}) + +#define allocate_dropping_locks(_trans, _ret, _do) \ +({ \ + typeof(_do) _p = memalloc_flags_do(PF_MEMALLOC_NORECLAIM|PF_MEMALLOC_NOWARN, _do);\ + \ + _ret = 0; \ + if (unlikely(!_p)) { \ + _ret = drop_locks_do(_trans, ((_p = _do), 0)); \ + } \ + _p; \ }) #define bch2_trans_run(_c, _do) \ diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c index 382f99b774b8f..8905e73174656 100644 --- a/fs/bcachefs/btree_key_cache.c +++ b/fs/bcachefs/btree_key_cache.c @@ -116,14 +116,14 @@ static void bkey_cached_free(struct btree_key_cache *bc, this_cpu_inc(*bc->nr_pending); } -static struct bkey_cached *__bkey_cached_alloc(unsigned key_u64s, gfp_t gfp) +static struct bkey_cached *__bkey_cached_alloc(unsigned key_u64s) { - gfp |= __GFP_ACCOUNT|__GFP_RECLAIMABLE; + gfp_t gfp = GFP_KERNEL|__GFP_ACCOUNT|__GFP_RECLAIMABLE; struct bkey_cached *ck = kmem_cache_zalloc(bch2_key_cache, gfp); if (unlikely(!ck)) return NULL; - ck->k = kmalloc(key_u64s * sizeof(u64), gfp); + ck->k = kmalloc(key_u64s * sizeof(u64), GFP_KERNEL); if (unlikely(!ck->k)) { kmem_cache_free(bch2_key_cache, ck); return NULL; @@ -147,7 +147,7 @@ bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path, unsigned k goto lock; ck = allocate_dropping_locks(trans, ret, - __bkey_cached_alloc(key_u64s, _gfp)); + __bkey_cached_alloc(key_u64s)); if (ret) { if (ck) kfree(ck->k); @@ -243,7 +243,7 @@ static int btree_key_cache_create(struct btree_trans *trans, mark_btree_node_locked_noreset(ck_path, 0, BTREE_NODE_UNLOCKED); struct bkey_i *new_k = allocate_dropping_locks(trans, ret, - kmalloc(key_u64s * sizeof(u64), _gfp)); + kmalloc(key_u64s * sizeof(u64), GFP_KERNEL)); if (unlikely(!new_k)) { bch_err(trans->c, "error allocating memory for key cache key, btree %s u64s %u", bch2_btree_id_str(ck->key.btree_id), key_u64s); diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h index baab5288ecc9b..f5b41ad83b8c2 100644 --- a/fs/bcachefs/btree_types.h +++ b/fs/bcachefs/btree_types.h @@ -446,7 +446,7 @@ struct btree_insert_entry { /* Number of btree paths we preallocate, usually enough */ #define BTREE_ITER_INITIAL 64 /* - * Lmiit for btree_trans_too_many_iters(); this is enough that almost all code + * Limit for btree_trans_too_many_iters(); this is enough that almost all code * paths should run inside this limit, and if they don't it usually indicates a * bug (leaking/duplicated btree paths). * diff --git a/fs/bcachefs/btree_update.h b/fs/bcachefs/btree_update.h index 8f22ef9a7651a..96d208edafdb3 100644 --- a/fs/bcachefs/btree_update.h +++ b/fs/bcachefs/btree_update.h @@ -80,7 +80,7 @@ int __bch2_insert_snapshot_whiteouts(struct btree_trans *, enum btree_id, * For use when splitting extents in existing snapshots: * * If @old_pos is an interior snapshot node, iterate over descendent snapshot - * nodes: for every descendent snapshot in whiche @old_pos is overwritten and + * nodes: for every descendent snapshot in which @old_pos is overwritten and * not visible, emit a whiteout at @new_pos. */ static inline int bch2_insert_snapshot_whiteouts(struct btree_trans *trans, diff --git a/fs/bcachefs/btree_update_interior.h b/fs/bcachefs/btree_update_interior.h index 7930ffea3075d..ffc6a8c1f0125 100644 --- a/fs/bcachefs/btree_update_interior.h +++ b/fs/bcachefs/btree_update_interior.h @@ -116,7 +116,7 @@ struct btree_update { struct keylist parent_keys; /* * Enough room for btree_split's keys without realloc - btree node - * pointers never have crc/compression info, so we only need to acount + * pointers never have crc/compression info, so we only need to account * for the pointers for three keys */ u64 inline_keys[BKEY_BTREE_PTR_U64s_MAX * 3]; diff --git a/fs/bcachefs/btree_write_buffer.c b/fs/bcachefs/btree_write_buffer.c index 746db6d5a0fba..723ffbe6efaad 100644 --- a/fs/bcachefs/btree_write_buffer.c +++ b/fs/bcachefs/btree_write_buffer.c @@ -453,7 +453,7 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans) * journal replay has to split/rewrite nodes to make room for * its updates. * - * And for those new acounting updates, updates to the same + * And for those new accounting updates, updates to the same * counters get accumulated as they're flushed from the journal * to the write buffer - see the patch for eytzingcer tree * accumulated. So we could only overflow if the number of diff --git a/fs/bcachefs/checksum.c b/fs/bcachefs/checksum.c index 23a383577d4c7..3d4b5ead0afdb 100644 --- a/fs/bcachefs/checksum.c +++ b/fs/bcachefs/checksum.c @@ -23,7 +23,7 @@ /* * bch2_checksum state is an abstraction of the checksum state calculated over different pages. * it features page merging without having the checksum algorithm lose its state. - * for native checksum aglorithms (like crc), a default seed value will do. + * for native checksum algorithms (like crc), a default seed value will do. * for hash-like algorithms, a state needs to be stored */ diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c index 585214931e056..9cf1bce45baf9 100644 --- a/fs/bcachefs/data_update.c +++ b/fs/bcachefs/data_update.c @@ -224,7 +224,7 @@ static int __bch2_data_update_index_update(struct btree_trans *trans, * other updates * @new: extent with new pointers that we'll be adding to @insert * - * Fist, drop rewrite_ptrs from @new: + * First, drop rewrite_ptrs from @new: */ ptr_bit = 1; bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry_c) { @@ -703,7 +703,7 @@ int bch2_data_update_init(struct btree_trans *trans, /* * If device(s) were set to durability=0 after data was written to them - * we can end up with a duribilty=0 extent, and the normal algorithm + * we can end up with a durability=0 extent, and the normal algorithm * that tries not to increase durability doesn't work: */ if (!(durability_have + durability_removing)) diff --git a/fs/bcachefs/disk_accounting.c b/fs/bcachefs/disk_accounting.c index b32e91ba8be85..78ba98147f54e 100644 --- a/fs/bcachefs/disk_accounting.c +++ b/fs/bcachefs/disk_accounting.c @@ -25,7 +25,7 @@ * expensive, so we also have * * - In memory accounting, where accounting is stored as an array of percpu - * counters, indexed by an eytzinger array of disk acounting keys/bpos (which + * counters, indexed by an eytzinger array of disk accounting keys/bpos (which * are the same thing, excepting byte swabbing on big endian). * * Cheap to read, but non persistent. @@ -402,7 +402,7 @@ void bch2_accounting_mem_gc(struct bch_fs *c) * Read out accounting keys for replicas entries, as an array of * bch_replicas_usage entries. * - * Note: this may be deprecated/removed at smoe point in the future and replaced + * Note: this may be deprecated/removed at some point in the future and replaced * with something more general, it exists to support the ioctl used by the * 'bcachefs fs usage' command. */ diff --git a/fs/bcachefs/disk_accounting_format.h b/fs/bcachefs/disk_accounting_format.h index 7b6e6c97e6aa6..6588f69e98d7e 100644 --- a/fs/bcachefs/disk_accounting_format.h +++ b/fs/bcachefs/disk_accounting_format.h @@ -10,7 +10,7 @@ * Here, the key has considerably more structure than a typical key (bpos); an * accounting key is 'struct disk_accounting_pos', which is a union of bpos. * - * More specifically: a key is just a muliword integer (where word endianness + * More specifically: a key is just a multiword integer (where word endianness * matches native byte order), so we're treating bpos as an opaque 20 byte * integer and mapping bch_accounting_key to that. * diff --git a/fs/bcachefs/ec.c b/fs/bcachefs/ec.c index b211e90ac54e7..859db4f2160b9 100644 --- a/fs/bcachefs/ec.c +++ b/fs/bcachefs/ec.c @@ -915,12 +915,12 @@ int bch2_ec_read_extent(struct btree_trans *trans, struct bch_read_bio *rbio, /* stripe bucket accounting: */ -static int __ec_stripe_mem_alloc(struct bch_fs *c, size_t idx, gfp_t gfp) +static int __ec_stripe_mem_alloc(struct bch_fs *c, size_t idx) { ec_stripes_heap n, *h = &c->ec_stripes_heap; if (idx >= h->size) { - if (!init_heap(&n, max(1024UL, roundup_pow_of_two(idx + 1)), gfp)) + if (!init_heap(&n, max(1024UL, roundup_pow_of_two(idx + 1)), GFP_KERNEL)) return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc; mutex_lock(&c->ec_stripes_heap_lock); @@ -934,11 +934,11 @@ static int __ec_stripe_mem_alloc(struct bch_fs *c, size_t idx, gfp_t gfp) free_heap(&n); } - if (!genradix_ptr_alloc(&c->stripes, idx, gfp)) + if (!genradix_ptr_alloc(&c->stripes, idx, GFP_KERNEL)) return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc; if (c->gc_pos.phase != GC_PHASE_not_running && - !genradix_ptr_alloc(&c->gc_stripes, idx, gfp)) + !genradix_ptr_alloc(&c->gc_stripes, idx, GFP_KERNEL)) return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc; return 0; @@ -948,7 +948,7 @@ static int ec_stripe_mem_alloc(struct btree_trans *trans, struct btree_iter *iter) { return allocate_dropping_locks_errcode(trans, - __ec_stripe_mem_alloc(trans->c, iter->pos.offset, _gfp)); + __ec_stripe_mem_alloc(trans->c, iter->pos.offset)); } /* @@ -2382,7 +2382,7 @@ int bch2_stripes_read(struct bch_fs *c) if (k.k->type != KEY_TYPE_stripe) continue; - ret = __ec_stripe_mem_alloc(c, k.k->p.offset, GFP_KERNEL); + ret = __ec_stripe_mem_alloc(c, k.k->p.offset); if (ret) break; diff --git a/fs/bcachefs/errcode.h b/fs/bcachefs/errcode.h index c0df2587a5804..099999443d4d2 100644 --- a/fs/bcachefs/errcode.h +++ b/fs/bcachefs/errcode.h @@ -319,4 +319,4 @@ static inline long bch2_err_class(long err) #include const char *bch2_blk_status_to_str(blk_status_t); -#endif /* _BCACHFES_ERRCODE_H */ +#endif /* _BCACHEFS_ERRCODE_H */ diff --git a/fs/bcachefs/error.c b/fs/bcachefs/error.c index 038da6a61f6b5..6c03a04c82a3c 100644 --- a/fs/bcachefs/error.c +++ b/fs/bcachefs/error.c @@ -311,7 +311,7 @@ int __bch2_fsck_err(struct bch_fs *c, if (s) { /* * We may be called multiple times for the same error on - * transaction restart - this memoizes instead of asking the user + * transaction restart - this memorizes instead of asking the user * multiple times for the same error: */ if (s->last_msg && !strcmp(buf.buf, s->last_msg)) { diff --git a/fs/bcachefs/eytzinger.h b/fs/bcachefs/eytzinger.h index 0541192d7bc02..13ce67a1c05af 100644 --- a/fs/bcachefs/eytzinger.h +++ b/fs/bcachefs/eytzinger.h @@ -12,7 +12,7 @@ #endif /* - * Traversal for trees in eytzinger layout - a full binary tree layed out in an + * Traversal for trees in eytzinger layout - a full binary tree laid out in an * array. * * Consider using an eytzinger tree any time you would otherwise be doing binary diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c index 94bf34b9b65f0..92acf598f0573 100644 --- a/fs/bcachefs/fs-io.c +++ b/fs/bcachefs/fs-io.c @@ -922,7 +922,7 @@ loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src, goto err; /* - * due to alignment, we might have remapped slightly more than requsted + * due to alignment, we might have remapped slightly more than requested */ ret = min((u64) ret << 9, (u64) len); diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c index 3f83f131d0e81..55b38bc7f67b8 100644 --- a/fs/bcachefs/fs.c +++ b/fs/bcachefs/fs.c @@ -417,14 +417,6 @@ static struct bch_inode_info *bch2_inode_hash_insert(struct bch_fs *c, } } -#define memalloc_flags_do(_flags, _do) \ -({ \ - unsigned _saved_flags = memalloc_flags_save(_flags); \ - typeof(_do) _ret = _do; \ - memalloc_noreclaim_restore(_saved_flags); \ - _ret; \ -}) - static struct inode *bch2_alloc_inode(struct super_block *sb) { BUG(); diff --git a/fs/bcachefs/fs.h b/fs/bcachefs/fs.h index dd2198541455b..7a3dba8fefa0f 100644 --- a/fs/bcachefs/fs.h +++ b/fs/bcachefs/fs.h @@ -34,7 +34,7 @@ struct bch_inode_info { * * XXX: a device may have had a flush issued by some other codepath. It * would be better to keep for each device a sequence number that's - * incremented when we isusue a cache flush, and track here the sequence + * incremented when we issue a cache flush, and track here the sequence * number that needs flushing. */ struct bch_devs_mask ei_devs_need_flush; diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c index ea8c8ed069408..f50670cd4ce72 100644 --- a/fs/bcachefs/fsck.c +++ b/fs/bcachefs/fsck.c @@ -728,7 +728,7 @@ static bool key_visible_in_snapshot(struct bch_fs *c, struct snapshots_seen *see /* * We know that @id is a descendant of @ancestor, we're checking if * we've seen a key that overwrote @ancestor - i.e. also a descendent of - * @ascestor and with @id as a descendent. + * @ancestor and with @id as a descendent. * * But we already know that we're scanning IDs between @id and @ancestor * numerically, since snapshot ID lists are kept sorted, so if we find @@ -2208,7 +2208,7 @@ static int check_dirent_to_subvol(struct btree_trans *trans, struct btree_iter * if (fsck_err_on(le32_to_cpu(s.v->fs_path_parent) != parent_subvol, trans, subvol_fs_path_parent_wrong, - "subvol with wrong fs_path_parent, should be be %u\n%s", + "subvol with wrong fs_path_parent, should be %u\n%s", parent_subvol, (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) { struct bkey_i_subvolume *n = diff --git a/fs/bcachefs/inode.c b/fs/bcachefs/inode.c index 04ec05206f8cf..3a08742a8cda6 100644 --- a/fs/bcachefs/inode.c +++ b/fs/bcachefs/inode.c @@ -1417,7 +1417,7 @@ int bch2_delete_dead_inodes(struct bch_fs *c) /* * if we ran check_inodes() unlinked inodes will have already been * cleaned up but the write buffer will be out of sync; therefore we - * alway need a write buffer flush + * always need a write buffer flush */ ret = bch2_btree_write_buffer_flush_sync(trans); if (ret) diff --git a/fs/bcachefs/io_misc.c b/fs/bcachefs/io_misc.c index 5353979117b0a..e01e60219832e 100644 --- a/fs/bcachefs/io_misc.c +++ b/fs/bcachefs/io_misc.c @@ -135,7 +135,7 @@ int bch2_extent_fallocate(struct btree_trans *trans, } /* - * Returns -BCH_ERR_transacton_restart if we had to drop locks: + * Returns -BCH_ERR_transaction_restart if we had to drop locks: */ int bch2_fpunch_at(struct btree_trans *trans, struct btree_iter *iter, subvol_inum inum, u64 end, diff --git a/fs/bcachefs/io_write.c b/fs/bcachefs/io_write.c index 3e71860f66b9e..9067f2a04d283 100644 --- a/fs/bcachefs/io_write.c +++ b/fs/bcachefs/io_write.c @@ -1510,7 +1510,7 @@ static void __bch2_write(struct bch_write_op *op) /* * Sync or no? * - * If we're running asynchronously, wne may still want to block + * If we're running asynchronously, we may still want to block * synchronously here if we weren't able to submit all of the IO at * once, as that signals backpressure to the caller. */ diff --git a/fs/bcachefs/journal.c b/fs/bcachefs/journal.c index 2cd20114b74b9..326c9b317d26c 100644 --- a/fs/bcachefs/journal.c +++ b/fs/bcachefs/journal.c @@ -167,7 +167,7 @@ journal_error_check_stuck(struct journal *j, int error, unsigned flags) j->err_seq = journal_cur_seq(j); spin_unlock(&j->lock); - bch_err(c, "Journal stuck! Hava a pre-reservation but journal full (error %s)", + bch_err(c, "Journal stuck! Have a pre-reservation but journal full (error %s)", bch2_journal_errors[error]); bch2_journal_debug_to_text(&buf, j); bch_err(c, "%s", buf.buf); @@ -832,7 +832,7 @@ bool bch2_journal_noflush_seq(struct journal *j, u64 start, u64 end) unwritten_seq++) { struct journal_buf *buf = journal_seq_to_buf(j, unwritten_seq); - /* journal flush already in flight, or flush requseted */ + /* journal flush already in flight, or flush requested */ if (buf->must_flush) goto out; diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c index e1773ac278246..5757d8358e93f 100644 --- a/fs/bcachefs/journal_io.c +++ b/fs/bcachefs/journal_io.c @@ -159,7 +159,7 @@ static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca, /* * genradixes are indexed by a ulong, not a u64, so we can't index them * by sequence number directly: Assume instead that they will all fall - * within the range of +-2billion of the filrst one we find. + * within the range of +-2billion of the first one we find. */ if (!c->journal_entries_base_seq) c->journal_entries_base_seq = max_t(s64, 1, le64_to_cpu(j->seq) - S32_MAX); diff --git a/fs/bcachefs/journal_types.h b/fs/bcachefs/journal_types.h index e9bd716fbb710..0c16fde8dfad2 100644 --- a/fs/bcachefs/journal_types.h +++ b/fs/bcachefs/journal_types.h @@ -110,7 +110,7 @@ union journal_res_state { #define JOURNAL_ENTRY_SIZE_MAX (4U << 20) /* 4M */ /* - * We stash some journal state as sentinal values in cur_entry_offset: + * We stash some journal state as sentinel values in cur_entry_offset: * note - cur_entry_offset is in units of u64s */ #define JOURNAL_ENTRY_OFFSET_MAX ((1U << 20) - 1) @@ -207,7 +207,7 @@ struct journal { darray_u64 early_journal_entries; /* - * Protects journal_buf->data, when accessing without a jorunal + * Protects journal_buf->data, when accessing without a journal * reservation: for synchronization between the btree write buffer code * and the journal write path: */ diff --git a/fs/bcachefs/mean_and_variance.h b/fs/bcachefs/mean_and_variance.h index 47e4a3c3d26e7..281d6f9d1a74f 100644 --- a/fs/bcachefs/mean_and_variance.h +++ b/fs/bcachefs/mean_and_variance.h @@ -152,7 +152,7 @@ struct mean_and_variance { u128_u sum_squares; }; -/* expontentially weighted variant */ +/* exponentially weighted variant */ struct mean_and_variance_weighted { s64 mean; u64 variance; @@ -200,4 +200,4 @@ u64 mean_and_variance_weighted_get_variance(struct mean_and_variance_weighted s, u32 mean_and_variance_weighted_get_stddev(struct mean_and_variance_weighted s, u8 weight); -#endif // MEAN_AND_VAIRANCE_H_ +#endif // MEAN_AND_VARIANCE_H_ diff --git a/fs/bcachefs/mean_and_variance_test.c b/fs/bcachefs/mean_and_variance_test.c index e9d9c0212e44b..86f38db112d8c 100644 --- a/fs/bcachefs/mean_and_variance_test.c +++ b/fs/bcachefs/mean_and_variance_test.c @@ -25,7 +25,7 @@ static void mean_and_variance_basic_test(struct kunit *test) } /* - * Test values computed using a spreadsheet from the psuedocode at the bottom: + * Test values computed using a spreadsheet from the pseudocode at the bottom: * https://fanf2.user.srcf.net/hermes/doc/antiforgery/stats.pdf */ diff --git a/fs/bcachefs/opts.h b/fs/bcachefs/opts.h index e763d52e0f389..c7d454e378413 100644 --- a/fs/bcachefs/opts.h +++ b/fs/bcachefs/opts.h @@ -518,7 +518,7 @@ enum fsck_err_opts { OPT_FS|OPT_MOUNT|OPT_RUNTIME, \ OPT_BOOL(), \ BCH2_NO_SB_OPT, true, \ - NULL, "BTREE_ITER_prefetch casuse btree nodes to be\n"\ + NULL, "BTREE_ITER_prefetch causes btree nodes to be\n"\ " prefetched sequentially") #define BCH_DEV_OPT_SETTERS() \ diff --git a/fs/bcachefs/printbuf.c b/fs/bcachefs/printbuf.c index 4cf5a2af1e6ff..855c7c07b6185 100644 --- a/fs/bcachefs/printbuf.c +++ b/fs/bcachefs/printbuf.c @@ -236,7 +236,7 @@ void bch2_printbuf_tabstop_pop(struct printbuf *buf) * bch2_printbuf_tabstop_set() - add a tabstop, n spaces from the previous tabstop * * @buf: printbuf to control - * @spaces: number of spaces from previous tabpstop + * @spaces: number of spaces from previous tabstop * * In the future this function may allocate memory if setting more than * PRINTBUF_INLINE_TABSTOPS or setting tabstops more than 255 spaces from start @@ -388,7 +388,7 @@ void bch2_prt_tab_rjust(struct printbuf *buf) * @str: string to print * @count: number of bytes to print * - * The following contol characters are handled as so: + * The following control characters are handled as so: * \n: prt_newline newline that obeys current indent level * \t: prt_tab advance to next tabstop * \r: prt_tab_rjust advance to next tabstop, with right justification @@ -435,7 +435,7 @@ void bch2_prt_human_readable_s64(struct printbuf *out, s64 v) * @out: output printbuf * @v: integer to print * - * Units are either raw (default), or human reabable units (controlled via + * Units are either raw (default), or human readable units (controlled via * @buf->human_readable_units) */ void bch2_prt_units_u64(struct printbuf *out, u64 v) @@ -451,7 +451,7 @@ void bch2_prt_units_u64(struct printbuf *out, u64 v) * @out: output printbuf * @v: integer to print * - * Units are either raw (default), or human reabable units (controlled via + * Units are either raw (default), or human readable units (controlled via * @buf->human_readable_units) */ void bch2_prt_units_s64(struct printbuf *out, s64 v) diff --git a/fs/bcachefs/printbuf.h b/fs/bcachefs/printbuf.h index 1d570387b77f1..bfb2ca11afe4c 100644 --- a/fs/bcachefs/printbuf.h +++ b/fs/bcachefs/printbuf.h @@ -25,7 +25,7 @@ * everything to the kernel log buffer, and then those pretty-printers can be * used by other code that outputs to kernel log, sysfs, debugfs, etc. * - * Memory allocation: Outputing to a printbuf may allocate memory. This + * Memory allocation: Outputting to a printbuf may allocate memory. This * allocation is done with GFP_KERNEL, by default: use the newer * memalloc_*_(save|restore) functions as needed. * @@ -56,7 +56,7 @@ * next tabstop - right justifying it. * * Make sure you use prt_newline() instead of \n in the format string for indent - * level and tabstops to work corretly. + * level and tabstops to work correctly. * * Output units: printbuf->units exists to tell pretty-printers how to output * numbers: a raw value (e.g. directly from a superblock field), as bytes, or as diff --git a/fs/bcachefs/rcu_pending.c b/fs/bcachefs/rcu_pending.c index bef2aa1b8bcdd..0f60f5632bdb7 100644 --- a/fs/bcachefs/rcu_pending.c +++ b/fs/bcachefs/rcu_pending.c @@ -354,7 +354,7 @@ rcu_pending_enqueue_list(struct rcu_pending_pcpu *p, rcu_gp_poll_state_t seq, /* * kvfree_rcu_mightsleep(): we weren't passed an * rcu_head, but we need one: use the low bit of the - * ponter to free to flag that the head needs to be + * pointer to free to flag that the head needs to be * freed as well: */ ptr = (void *)(((unsigned long) ptr)|1UL); @@ -401,7 +401,7 @@ rcu_pending_enqueue_list(struct rcu_pending_pcpu *p, rcu_gp_poll_state_t seq, /* * __rcu_pending_enqueue: enqueue a pending RCU item, to be processed (via - * pending->pracess) once grace period elapses. + * pending->process) once grace period elapses. * * Attempt to enqueue items onto a radix tree; if memory allocation fails, fall * back to a linked list. diff --git a/fs/bcachefs/rebalance.c b/fs/bcachefs/rebalance.c index 4adc74cd3f70b..f265a64da91b5 100644 --- a/fs/bcachefs/rebalance.c +++ b/fs/bcachefs/rebalance.c @@ -202,7 +202,7 @@ int bch2_get_update_rebalance_opts(struct btree_trans *trans, bkey_reassemble(n, k); - /* On successfull transaction commit, @k was invalidated: */ + /* On successful transaction commit, @k was invalidated: */ return bch2_bkey_set_needs_rebalance(trans->c, io_opts, n) ?: bch2_trans_update(trans, iter, n, BTREE_UPDATE_internal_snapshot_node) ?: @@ -637,7 +637,7 @@ void bch2_rebalance_stop(struct bch_fs *c) c->rebalance.thread = NULL; if (p) { - /* for sychronizing with rebalance_wakeup() */ + /* for synchronizing with rebalance_wakeup() */ synchronize_rcu(); kthread_stop(p); diff --git a/fs/bcachefs/sb-members_format.h b/fs/bcachefs/sb-members_format.h index 2adf1221a440f..0bae879539471 100644 --- a/fs/bcachefs/sb-members_format.h +++ b/fs/bcachefs/sb-members_format.h @@ -9,7 +9,7 @@ #define BCH_SB_MEMBERS_MAX 64 /* - * Sentinal value - indicates a device that does not exist + * Sentinel value - indicates a device that does not exist */ #define BCH_SB_MEMBER_INVALID 255 diff --git a/fs/bcachefs/siphash.h b/fs/bcachefs/siphash.h index 3dfaf34a43b28..b1374d9e1c1a7 100644 --- a/fs/bcachefs/siphash.h +++ b/fs/bcachefs/siphash.h @@ -36,7 +36,7 @@ * optimized for speed on short messages returning a 64bit hash/digest value. * * The number of rounds is defined during the initialization: - * SipHash24_Init() for the fast and resonable strong version + * SipHash24_Init() for the fast and reasonable strong version * SipHash48_Init() for the strong version (half as fast) * * struct SIPHASH_CTX ctx; diff --git a/fs/bcachefs/six.c b/fs/bcachefs/six.c index 617d07e53b20c..25fa4f0094c69 100644 --- a/fs/bcachefs/six.c +++ b/fs/bcachefs/six.c @@ -111,7 +111,7 @@ static inline unsigned pcpu_read_count(struct six_lock *lock) * Returns 1 on success, 0 on failure * * In percpu reader mode, a failed trylock may cause a spurious trylock failure - * for anoter thread taking the competing lock type, and we may havve to do a + * for another thread taking the competing lock type, and we may have to do a * wakeup: when a wakeup is required, we return -1 - wakeup_type. */ static int __do_six_trylock(struct six_lock *lock, enum six_lock_type type, @@ -234,7 +234,7 @@ static void __six_lock_wakeup(struct six_lock *lock, enum six_lock_type lock_typ /* * Similar to percpu_rwsem_wake_function(), we need to guard - * against the wakee noticing w->lock_acquired, returning, and + * against the wake noticing w->lock_acquired, returning, and * then exiting before we do the wakeup: */ task = get_task_struct(w->task); @@ -597,7 +597,7 @@ static void do_six_unlock_type(struct six_lock *lock, enum six_lock_type type) * @type: SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write * @ip: ip parameter for lockdep/lockstat, i.e. _THIS_IP_ * - * When a lock is held multiple times (because six_lock_incement()) was used), + * When a lock is held multiple times (because six_lock_increment()) was used), * this decrements the 'lock held' counter by one. * * For example: @@ -631,7 +631,7 @@ EXPORT_SYMBOL_GPL(six_unlock_ip); /** * six_lock_downgrade - convert an intent lock to a read lock - * @lock: lock to dowgrade + * @lock: lock to downgrade * * @lock will have read count incremented and intent count decremented */ @@ -750,7 +750,7 @@ EXPORT_SYMBOL_GPL(six_lock_increment); * six_lock_wakeup_all - wake up all waiters on @lock * @lock: lock to wake up waiters for * - * Wakeing up waiters will cause them to re-run should_sleep_fn, which may then + * Waking up waiters will cause them to re-run should_sleep_fn, which may then * abort the lock operation. * * This function is never needed in a bug-free program; it's only useful in @@ -798,7 +798,7 @@ EXPORT_SYMBOL_GPL(six_lock_counts); * @lock: lock to add/subtract readers for * @nr: reader count to add/subtract * - * When an upper layer is implementing lock reentrency, we may have both read + * When an upper layer is implementing lock reentrancy, we may have both read * and intent locks on the same lock. * * When we need to take a write lock, the read locks will cause self-deadlock, @@ -829,7 +829,7 @@ EXPORT_SYMBOL_GPL(six_lock_readers_add); * six_lock_exit - release resources held by a lock prior to freeing * @lock: lock to exit * - * When a lock was initialized in percpu mode (SIX_OLCK_INIT_PCPU), this is + * When a lock was initialized in percpu mode (SIX_LOCK_INIT_PCPU), this is * required to free the percpu read counts. */ void six_lock_exit(struct six_lock *lock) diff --git a/fs/bcachefs/six.h b/fs/bcachefs/six.h index 68d46fd7f3912..d110f88e04802 100644 --- a/fs/bcachefs/six.h +++ b/fs/bcachefs/six.h @@ -79,7 +79,7 @@ * six_unlock_read(&foo->lock); * foo->lock is now fully unlocked. * - * Since the intent state supercedes read, it's legal to increment the read + * Since the intent state supersedes read, it's legal to increment the read * counter when holding an intent lock, but not the reverse. * * A lock may only be held once for write: six_lock_increment(.., SIX_LOCK_write) @@ -296,7 +296,7 @@ void six_unlock_ip(struct six_lock *lock, enum six_lock_type type, unsigned long * @lock: lock to unlock * @type: SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write * - * When a lock is held multiple times (because six_lock_incement()) was used), + * When a lock is held multiple times (because six_lock_increment()) was used), * this decrements the 'lock held' counter by one. * * For example: diff --git a/fs/bcachefs/snapshot_format.h b/fs/bcachefs/snapshot_format.h index aabcd3a74cd95..73c2dce0a320e 100644 --- a/fs/bcachefs/snapshot_format.h +++ b/fs/bcachefs/snapshot_format.h @@ -23,7 +23,7 @@ LE32_BITMASK(BCH_SNAPSHOT_SUBVOL, struct bch_snapshot, flags, 1, 2) /* * Snapshot trees: * - * The snapshot_trees btree gives us persistent indentifier for each tree of + * The snapshot_trees btree gives us persistent identifier for each tree of * bch_snapshot nodes, and allow us to record and easily find the root/master * subvolume that other snapshots were created from: */ diff --git a/fs/bcachefs/subvolume.c b/fs/bcachefs/subvolume.c index 0e756e35c3d9d..f0e094b9c1045 100644 --- a/fs/bcachefs/subvolume.c +++ b/fs/bcachefs/subvolume.c @@ -387,7 +387,7 @@ static int bch2_subvolume_reparent(struct btree_trans *trans, * structure of how snapshot subvolumes were created - the parent subvolume of * each snapshot subvolume. * - * When a subvolume is deleted, we scan for child subvolumes and reparant them, + * When a subvolume is deleted, we scan for children subvolumes and fix them, * to avoid dangling references: */ static int bch2_subvolumes_reparent(struct btree_trans *trans, u32 subvolid_to_delete) diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c index dbc09e305c275..042dc2da35623 100644 --- a/fs/bcachefs/super-io.c +++ b/fs/bcachefs/super-io.c @@ -442,7 +442,7 @@ static int bch2_sb_validate(struct bch_sb_handle *disk_sb, /* * Been seeing a bug where these are getting inexplicably * zeroed, so we're now validating them, but we have to be - * careful not to preven people's filesystems from mounting: + * careful not to prevent people's filesystems from mounting: */ if (!BCH_SB_JOURNAL_FLUSH_DELAY(sb)) SET_BCH_SB_JOURNAL_FLUSH_DELAY(sb, 1000); diff --git a/fs/bcachefs/time_stats.h b/fs/bcachefs/time_stats.h index dc6493f7bbabc..40ade9bf15d7a 100644 --- a/fs/bcachefs/time_stats.h +++ b/fs/bcachefs/time_stats.h @@ -12,7 +12,7 @@ * - sum of all event durations * - average event duration, standard and weighted * - standard deviation of event durations, standard and weighted - * and analagous statistics for the frequency of events + * and analogous statistics for the frequency of events * * We provide both mean and weighted mean (exponentially weighted), and standard * deviation and weighted standard deviation, to give an efficient-to-compute