diff --git a/fs/bcachefs/Kconfig b/fs/bcachefs/Kconfig index 5bac803ea367..30a54983ba0f 100644 --- a/fs/bcachefs/Kconfig +++ b/fs/bcachefs/Kconfig @@ -38,7 +38,7 @@ config BCACHEFS_ERASURE_CODING depends on BCACHEFS_FS select QUOTACTL help - This enables the "erasure_code" filesysystem and inode option, which + This enables the "erasure_code" filesystem and inode option, which organizes data into reed-solomon stripes instead of ordinary replication. diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c index c010af286a11..dc72f13c12e6 100644 --- a/fs/bcachefs/alloc_background.c +++ b/fs/bcachefs/alloc_background.c @@ -1380,7 +1380,7 @@ static noinline_for_stack int bch2_check_discard_freespace_key(struct btree_tran if (fsck_err_on(!bch2_dev_bucket_exists(c, pos), trans, need_discard_freespace_key_to_invalid_dev_bucket, - "entry in %s btree for nonexistant dev:bucket %llu:%llu", + "entry in %s btree for nonexistent dev:bucket %llu:%llu", bch2_btree_id_str(iter->btree_id), pos.inode, pos.offset)) goto delete; diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c index bf8fb9cd650b..c9d739c1984b 100644 --- a/fs/bcachefs/alloc_foreground.c +++ b/fs/bcachefs/alloc_foreground.c @@ -353,7 +353,7 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc if (!bkey_eq(bp_pos, POS_MAX)) { /* * Bucket may have data in it - we don't call - * bc2h_trans_inconnsistent() because fsck hasn't + * bc2h_trans_inconsistent() because fsck hasn't * finished yet */ ob = NULL; @@ -1552,7 +1552,7 @@ void bch2_fs_allocator_foreground_init(struct bch_fs *c) mutex_init(&c->write_points_hash_lock); c->write_points_nr = ARRAY_SIZE(c->write_points); - /* open bucket 0 is a sentinal NULL: */ + /* open bucket 0 is a sentinel NULL: */ spin_lock_init(&c->open_buckets[0].lock); for (ob = c->open_buckets + 1; diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h index fd60fd729d62..9f56002e6615 100644 --- a/fs/bcachefs/bcachefs.h +++ b/fs/bcachefs/bcachefs.h @@ -733,7 +733,7 @@ struct bch_fs { struct percpu_ref writes; #endif /* - * Analagous to c->writes, for asynchronous ops that don't necessarily + * Analogous to c->writes, for asynchronous ops that don't necessarily * need fs to be read-write */ refcount_t ro_ref; diff --git a/fs/bcachefs/bcachefs_format.h b/fs/bcachefs/bcachefs_format.h index 8c4addddd07e..f0fe77a0ab5a 100644 --- a/fs/bcachefs/bcachefs_format.h +++ b/fs/bcachefs/bcachefs_format.h @@ -239,7 +239,7 @@ struct bkey { * * Specifically, when i was designing bkey, I wanted the header to be no * bigger than necessary so that bkey_packed could use the rest. That means that - * decently offten extent keys will fit into only 8 bytes, instead of spilling over + * decently often extent keys will fit into only 8 bytes, instead of spilling over * to 16. * * But packed_bkey treats the part after the header - the packed section - @@ -251,7 +251,7 @@ struct bkey { * So that constrains the key part of a bkig endian bkey to start right * after the header. * - * If we ever do a bkey_v2 and need to expand the hedaer by another byte for + * If we ever do a bkey_v2 and need to expand the header by another byte for * some reason - that will clean up this wart. */ __aligned(8) @@ -499,8 +499,6 @@ struct bch_sb_field { #include "disk_groups_format.h" #include "extents_format.h" #include "ec_format.h" -#include "dirent_format.h" -#include "disk_groups_format.h" #include "inode_format.h" #include "journal_seq_blacklist_format.h" #include "logged_ops_format.h" @@ -758,7 +756,7 @@ struct bch_sb { /* * Flags: - * BCH_SB_INITALIZED - set on first mount + * BCH_SB_INITIALIZED - set on first mount * BCH_SB_CLEAN - did we shut down cleanly? Just a hint, doesn't affect * behaviour of mount/recovery path: * BCH_SB_INODE_32BIT - limit inode numbers to 32 bits diff --git a/fs/bcachefs/bcachefs_ioctl.h b/fs/bcachefs/bcachefs_ioctl.h index 3c23bdf788ce..926a1af506ab 100644 --- a/fs/bcachefs/bcachefs_ioctl.h +++ b/fs/bcachefs/bcachefs_ioctl.h @@ -131,7 +131,7 @@ struct bch_ioctl_start { * may be either offline or offline. * * Will fail removing @dev would leave us with insufficient read write devices - * or degraded/unavailable data, unless the approprate BCH_FORCE_IF_* flags are + * or degraded/unavailable data, unless the appropriate BCH_FORCE_IF_* flags are * set. */ @@ -154,7 +154,7 @@ struct bch_ioctl_start { * * Will fail (similarly to BCH_IOCTL_DISK_SET_STATE) if offlining @dev would * leave us with insufficient read write devices or degraded/unavailable data, - * unless the approprate BCH_FORCE_IF_* flags are set. + * unless the appropriate BCH_FORCE_IF_* flags are set. */ struct bch_ioctl_disk { diff --git a/fs/bcachefs/bset.h b/fs/bcachefs/bset.h index 6953d55b72cc..bdd250e1be16 100644 --- a/fs/bcachefs/bset.h +++ b/fs/bcachefs/bset.h @@ -45,7 +45,7 @@ * 4 in memory - we lazily resort as needed. * * We implement code here for creating and maintaining auxiliary search trees - * (described below) for searching an individial bset, and on top of that we + * (described below) for searching an individual bset, and on top of that we * implement a btree iterator. * * BTREE ITERATOR: @@ -178,7 +178,7 @@ static inline enum bset_aux_tree_type bset_aux_tree_type(const struct bset_tree * it used to be 64, but I realized the lookup code would touch slightly less * memory if it was 128. * - * It definites the number of bytes (in struct bset) per struct bkey_float in + * It defines the number of bytes (in struct bset) per struct bkey_float in * the auxiliar search tree - when we're done searching the bset_float tree we * have this many bytes left that we do a linear search over. * diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c index 1ed5db049e72..be215750a226 100644 --- a/fs/bcachefs/btree_cache.c +++ b/fs/bcachefs/btree_cache.c @@ -1070,7 +1070,7 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path * /* * Check b->hash_val _before_ calling btree_node_lock() - this might not * be the node we want anymore, and trying to lock the wrong node could - * cause an unneccessary transaction restart: + * cause an unnecessary transaction restart: */ if (unlikely(!c->opts.btree_node_mem_ptr_optimization || !b || diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c index 120ffd68ab0a..a6aa7709c2b5 100644 --- a/fs/bcachefs/btree_gc.c +++ b/fs/bcachefs/btree_gc.c @@ -625,7 +625,7 @@ static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id, /* * We require a commit before key_trigger() because - * key_trigger(BTREE_TRIGGER_GC) is not idempotant; we'll calculate the + * key_trigger(BTREE_TRIGGER_GC) is not independent; we'll calculate the * wrong result if we run it multiple times. */ unsigned flags = !iter ? BTREE_TRIGGER_is_root : 0; diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c index bfe9f0c1e1be..d2542f8492af 100644 --- a/fs/bcachefs/btree_iter.c +++ b/fs/bcachefs/btree_iter.c @@ -2372,7 +2372,7 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e } /* - * iter->pos should be mononotically increasing, and always be + * iter->pos should be monotonically increasing, and always be * equal to the key we just returned - except extents can * straddle iter->pos: */ @@ -3070,7 +3070,7 @@ u32 bch2_trans_begin(struct btree_trans *trans) /* * If the transaction wasn't restarted, we're presuming to be - * doing something new: dont keep iterators excpt the ones that + * doing something new: don't keep iterators except the ones that * are in use - except for the subvolumes btree: */ if (!trans->restarted && path->btree_id != BTREE_ID_subvolumes) diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h index c1ab824e1c34..7f7caa499325 100644 --- a/fs/bcachefs/btree_types.h +++ b/fs/bcachefs/btree_types.h @@ -441,7 +441,7 @@ struct btree_insert_entry { /* Number of btree paths we preallocate, usually enough */ #define BTREE_ITER_INITIAL 64 /* - * Lmiit for btree_trans_too_many_iters(); this is enough that almost all code + * Limit for btree_trans_too_many_iters(); this is enough that almost all code * paths should run inside this limit, and if they don't it usually indicates a * bug (leaking/duplicated btree paths). * diff --git a/fs/bcachefs/btree_update.h b/fs/bcachefs/btree_update.h index 60393e98084d..c1db85b78e84 100644 --- a/fs/bcachefs/btree_update.h +++ b/fs/bcachefs/btree_update.h @@ -82,7 +82,7 @@ int __bch2_insert_snapshot_whiteouts(struct btree_trans *, enum btree_id, * For use when splitting extents in existing snapshots: * * If @old_pos is an interior snapshot node, iterate over descendent snapshot - * nodes: for every descendent snapshot in whiche @old_pos is overwritten and + * nodes: for every descendent snapshot in which @old_pos is overwritten and * not visible, emit a whiteout at @new_pos. */ static inline int bch2_insert_snapshot_whiteouts(struct btree_trans *trans, diff --git a/fs/bcachefs/btree_update_interior.h b/fs/bcachefs/btree_update_interior.h index 10f400957f21..30788fd3197b 100644 --- a/fs/bcachefs/btree_update_interior.h +++ b/fs/bcachefs/btree_update_interior.h @@ -116,7 +116,7 @@ struct btree_update { struct keylist parent_keys; /* * Enough room for btree_split's keys without realloc - btree node - * pointers never have crc/compression info, so we only need to acount + * pointers never have crc/compression info, so we only need to account * for the pointers for three keys */ u64 inline_keys[BKEY_BTREE_PTR_U64s_MAX * 3]; diff --git a/fs/bcachefs/btree_write_buffer.c b/fs/bcachefs/btree_write_buffer.c index 3f56b584f8ec..1dadfdc3377f 100644 --- a/fs/bcachefs/btree_write_buffer.c +++ b/fs/bcachefs/btree_write_buffer.c @@ -451,7 +451,7 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans) * journal replay has to split/rewrite nodes to make room for * its updates. * - * And for those new acounting updates, updates to the same + * And for those new accounting updates, updates to the same * counters get accumulated as they're flushed from the journal * to the write buffer - see the patch for eytzingcer tree * accumulated. So we could only overflow if the number of diff --git a/fs/bcachefs/checksum.c b/fs/bcachefs/checksum.c index ce8fc677bef9..b23e573ce23d 100644 --- a/fs/bcachefs/checksum.c +++ b/fs/bcachefs/checksum.c @@ -22,7 +22,7 @@ /* * bch2_checksum state is an abstraction of the checksum state calculated over different pages. * it features page merging without having the checksum algorithm lose its state. - * for native checksum aglorithms (like crc), a default seed value will do. + * for native checksum algorithms (like crc), a default seed value will do. * for hash-like algorithms, a state needs to be stored */ diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c index 004894ad4147..84a78ede3ec5 100644 --- a/fs/bcachefs/data_update.c +++ b/fs/bcachefs/data_update.c @@ -228,7 +228,7 @@ static int __bch2_data_update_index_update(struct btree_trans *trans, * other updates * @new: extent with new pointers that we'll be adding to @insert * - * Fist, drop rewrite_ptrs from @new: + * First, drop rewrite_ptrs from @new: */ i = 0; bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry_c) { @@ -701,7 +701,7 @@ int bch2_data_update_init(struct btree_trans *trans, /* * If device(s) were set to durability=0 after data was written to them - * we can end up with a duribilty=0 extent, and the normal algorithm + * we can end up with a durability=0 extent, and the normal algorithm * that tries not to increase durability doesn't work: */ if (!(durability_have + durability_removing)) diff --git a/fs/bcachefs/disk_accounting.c b/fs/bcachefs/disk_accounting.c index e972e2bca546..9bc1b40d3560 100644 --- a/fs/bcachefs/disk_accounting.c +++ b/fs/bcachefs/disk_accounting.c @@ -25,7 +25,7 @@ * expensive, so we also have * * - In memory accounting, where accounting is stored as an array of percpu - * counters, indexed by an eytzinger array of disk acounting keys/bpos (which + * counters, indexed by an eytzinger array of disk accounting keys/bpos (which * are the same thing, excepting byte swabbing on big endian). * * Cheap to read, but non persistent. @@ -371,7 +371,7 @@ void bch2_accounting_mem_gc(struct bch_fs *c) * Read out accounting keys for replicas entries, as an array of * bch_replicas_usage entries. * - * Note: this may be deprecated/removed at smoe point in the future and replaced + * Note: this may be deprecated/removed at some point in the future and replaced * with something more general, it exists to support the ioctl used by the * 'bcachefs fs usage' command. */ diff --git a/fs/bcachefs/disk_accounting_format.h b/fs/bcachefs/disk_accounting_format.h index 7b6e6c97e6aa..6588f69e98d7 100644 --- a/fs/bcachefs/disk_accounting_format.h +++ b/fs/bcachefs/disk_accounting_format.h @@ -10,7 +10,7 @@ * Here, the key has considerably more structure than a typical key (bpos); an * accounting key is 'struct disk_accounting_pos', which is a union of bpos. * - * More specifically: a key is just a muliword integer (where word endianness + * More specifically: a key is just a multiword integer (where word endianness * matches native byte order), so we're treating bpos as an opaque 20 byte * integer and mapping bch_accounting_key to that. * diff --git a/fs/bcachefs/errcode.h b/fs/bcachefs/errcode.h index 96be8d600ca0..2cfdd9ca3e94 100644 --- a/fs/bcachefs/errcode.h +++ b/fs/bcachefs/errcode.h @@ -293,4 +293,4 @@ static inline long bch2_err_class(long err) const char *bch2_blk_status_to_str(blk_status_t); -#endif /* _BCACHFES_ERRCODE_H */ +#endif /* _BCACHEFS_ERRCODE_H */ diff --git a/fs/bcachefs/error.c b/fs/bcachefs/error.c index 95afa7bf2020..5f21eb250f53 100644 --- a/fs/bcachefs/error.c +++ b/fs/bcachefs/error.c @@ -269,7 +269,7 @@ int __bch2_fsck_err(struct bch_fs *c, if (s) { /* * We may be called multiple times for the same error on - * transaction restart - this memoizes instead of asking the user + * transaction restart - this memorizes instead of asking the user * multiple times for the same error: */ if (s->last_msg && !strcmp(buf.buf, s->last_msg)) { diff --git a/fs/bcachefs/eytzinger.h b/fs/bcachefs/eytzinger.h index 0541192d7bc0..13ce67a1c05a 100644 --- a/fs/bcachefs/eytzinger.h +++ b/fs/bcachefs/eytzinger.h @@ -12,7 +12,7 @@ #endif /* - * Traversal for trees in eytzinger layout - a full binary tree layed out in an + * Traversal for trees in eytzinger layout - a full binary tree laid out in an * array. * * Consider using an eytzinger tree any time you would otherwise be doing binary diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c index 71d0fa387509..36efbb6ff40d 100644 --- a/fs/bcachefs/fs-io.c +++ b/fs/bcachefs/fs-io.c @@ -877,7 +877,7 @@ loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src, goto err; /* - * due to alignment, we might have remapped slightly more than requsted + * due to alignment, we might have remapped slightly more than requested */ ret = min((u64) ret << 9, (u64) len); diff --git a/fs/bcachefs/fs.h b/fs/bcachefs/fs.h index f8f8878121c3..411cd11b5cf3 100644 --- a/fs/bcachefs/fs.h +++ b/fs/bcachefs/fs.h @@ -33,7 +33,7 @@ struct bch_inode_info { * * XXX: a device may have had a flush issued by some other codepath. It * would be better to keep for each device a sequence number that's - * incremented when we isusue a cache flush, and track here the sequence + * incremented when we issue a cache flush, and track here the sequence * number that needs flushing. */ struct bch_devs_mask ei_devs_need_flush; diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c index 83bd31b44aad..9ae79ebb4c7e 100644 --- a/fs/bcachefs/fsck.c +++ b/fs/bcachefs/fsck.c @@ -569,7 +569,7 @@ static bool key_visible_in_snapshot(struct bch_fs *c, struct snapshots_seen *see /* * We know that @id is a descendant of @ancestor, we're checking if * we've seen a key that overwrote @ancestor - i.e. also a descendent of - * @ascestor and with @id as a descendent. + * @ancestor and with @id as a descendent. * * But we already know that we're scanning IDs between @id and @ancestor * numerically, since snapshot ID lists are kept sorted, so if we find @@ -1984,7 +1984,7 @@ static int check_dirent_to_subvol(struct btree_trans *trans, struct btree_iter * if (fsck_err_on(le32_to_cpu(s.v->fs_path_parent) != parent_subvol, trans, subvol_fs_path_parent_wrong, - "subvol with wrong fs_path_parent, should be be %u\n%s", + "subvol with wrong fs_path_parent, should be %u\n%s", parent_subvol, (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) { struct bkey_i_subvolume *n = diff --git a/fs/bcachefs/inode.c b/fs/bcachefs/inode.c index 6ac0ff7e074b..752620f6d443 100644 --- a/fs/bcachefs/inode.c +++ b/fs/bcachefs/inode.c @@ -1161,7 +1161,7 @@ int bch2_delete_dead_inodes(struct bch_fs *c) /* * if we ran check_inodes() unlinked inodes will have already been * cleaned up but the write buffer will be out of sync; therefore we - * alway need a write buffer flush + * always need a write buffer flush */ ret = bch2_btree_write_buffer_flush_sync(trans); if (ret) diff --git a/fs/bcachefs/io_misc.c b/fs/bcachefs/io_misc.c index 177ed331c00b..2806c49936b8 100644 --- a/fs/bcachefs/io_misc.c +++ b/fs/bcachefs/io_misc.c @@ -133,7 +133,7 @@ int bch2_extent_fallocate(struct btree_trans *trans, } /* - * Returns -BCH_ERR_transacton_restart if we had to drop locks: + * Returns -BCH_ERR_transaction_restart if we had to drop locks: */ int bch2_fpunch_at(struct btree_trans *trans, struct btree_iter *iter, subvol_inum inum, u64 end, diff --git a/fs/bcachefs/io_write.c b/fs/bcachefs/io_write.c index d3b5be7fd9bf..d04c2449dcda 100644 --- a/fs/bcachefs/io_write.c +++ b/fs/bcachefs/io_write.c @@ -1494,7 +1494,7 @@ static void __bch2_write(struct bch_write_op *op) /* * Sync or no? * - * If we're running asynchronously, wne may still want to block + * If we're running asynchronously, we may still want to block * synchronously here if we weren't able to submit all of the IO at * once, as that signals backpressure to the caller. */ diff --git a/fs/bcachefs/journal.c b/fs/bcachefs/journal.c index f5f7db50ca31..12249cf95087 100644 --- a/fs/bcachefs/journal.c +++ b/fs/bcachefs/journal.c @@ -167,7 +167,7 @@ journal_error_check_stuck(struct journal *j, int error, unsigned flags) j->err_seq = journal_cur_seq(j); spin_unlock(&j->lock); - bch_err(c, "Journal stuck! Hava a pre-reservation but journal full (error %s)", + bch_err(c, "Journal stuck! Have a pre-reservation but journal full (error %s)", bch2_journal_errors[error]); bch2_journal_debug_to_text(&buf, j); bch_err(c, "%s", buf.buf); @@ -803,7 +803,7 @@ bool bch2_journal_noflush_seq(struct journal *j, u64 seq) unwritten_seq++) { struct journal_buf *buf = journal_seq_to_buf(j, unwritten_seq); - /* journal flush already in flight, or flush requseted */ + /* journal flush already in flight, or flush requested */ if (buf->must_flush) goto out; diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c index 30460bce04be..916f7dfe6bfe 100644 --- a/fs/bcachefs/journal_io.c +++ b/fs/bcachefs/journal_io.c @@ -157,7 +157,7 @@ static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca, /* * genradixes are indexed by a ulong, not a u64, so we can't index them * by sequence number directly: Assume instead that they will all fall - * within the range of +-2billion of the filrst one we find. + * within the range of +-2billion of the first one we find. */ if (!c->journal_entries_base_seq) c->journal_entries_base_seq = max_t(s64, 1, le64_to_cpu(j->seq) - S32_MAX); diff --git a/fs/bcachefs/journal_types.h b/fs/bcachefs/journal_types.h index 19183fcf7ad7..952211f56ede 100644 --- a/fs/bcachefs/journal_types.h +++ b/fs/bcachefs/journal_types.h @@ -107,7 +107,7 @@ union journal_res_state { #define JOURNAL_ENTRY_SIZE_MAX (4U << 20) /* 4M */ /* - * We stash some journal state as sentinal values in cur_entry_offset: + * We stash some journal state as sentinel values in cur_entry_offset: * note - cur_entry_offset is in units of u64s */ #define JOURNAL_ENTRY_OFFSET_MAX ((1U << 20) - 1) @@ -202,7 +202,7 @@ struct journal { darray_u64 early_journal_entries; /* - * Protects journal_buf->data, when accessing without a jorunal + * Protects journal_buf->data, when accessing without a journal * reservation: for synchronization between the btree write buffer code * and the journal write path: */ diff --git a/fs/bcachefs/mean_and_variance.h b/fs/bcachefs/mean_and_variance.h index 47e4a3c3d26e..281d6f9d1a74 100644 --- a/fs/bcachefs/mean_and_variance.h +++ b/fs/bcachefs/mean_and_variance.h @@ -152,7 +152,7 @@ struct mean_and_variance { u128_u sum_squares; }; -/* expontentially weighted variant */ +/* exponentially weighted variant */ struct mean_and_variance_weighted { s64 mean; u64 variance; @@ -200,4 +200,4 @@ u64 mean_and_variance_weighted_get_variance(struct mean_and_variance_weighted s, u32 mean_and_variance_weighted_get_stddev(struct mean_and_variance_weighted s, u8 weight); -#endif // MEAN_AND_VAIRANCE_H_ +#endif // MEAN_AND_VARIANCE_H_ diff --git a/fs/bcachefs/mean_and_variance_test.c b/fs/bcachefs/mean_and_variance_test.c index e9d9c0212e44..86f38db112d8 100644 --- a/fs/bcachefs/mean_and_variance_test.c +++ b/fs/bcachefs/mean_and_variance_test.c @@ -25,7 +25,7 @@ static void mean_and_variance_basic_test(struct kunit *test) } /* - * Test values computed using a spreadsheet from the psuedocode at the bottom: + * Test values computed using a spreadsheet from the pseudocode at the bottom: * https://fanf2.user.srcf.net/hermes/doc/antiforgery/stats.pdf */ diff --git a/fs/bcachefs/opts.h b/fs/bcachefs/opts.h index f2ae13f0fbad..0d65e75c5f61 100644 --- a/fs/bcachefs/opts.h +++ b/fs/bcachefs/opts.h @@ -503,7 +503,7 @@ enum fsck_err_opts { OPT_FS|OPT_MOUNT|OPT_RUNTIME, \ OPT_BOOL(), \ BCH2_NO_SB_OPT, true, \ - NULL, "BTREE_ITER_prefetch casuse btree nodes to be\n"\ + NULL, "BTREE_ITER_prefetch causes btree nodes to be\n"\ " prefetched sequentially") #define BCH_DEV_OPT_SETTERS() \ diff --git a/fs/bcachefs/printbuf.c b/fs/bcachefs/printbuf.c index 4cf5a2af1e6f..53d45be98a60 100644 --- a/fs/bcachefs/printbuf.c +++ b/fs/bcachefs/printbuf.c @@ -236,7 +236,7 @@ void bch2_printbuf_tabstop_pop(struct printbuf *buf) * bch2_printbuf_tabstop_set() - add a tabstop, n spaces from the previous tabstop * * @buf: printbuf to control - * @spaces: number of spaces from previous tabpstop + * @spaces: number of spaces from previous tabstop * * In the future this function may allocate memory if setting more than * PRINTBUF_INLINE_TABSTOPS or setting tabstops more than 255 spaces from start @@ -388,7 +388,7 @@ void bch2_prt_tab_rjust(struct printbuf *buf) * @str: string to print * @count: number of bytes to print * - * The following contol characters are handled as so: + * The following control characters are handled as so: * \n: prt_newline newline that obeys current indent level * \t: prt_tab advance to next tabstop * \r: prt_tab_rjust advance to next tabstop, with right justification diff --git a/fs/bcachefs/printbuf.h b/fs/bcachefs/printbuf.h index 1d570387b77f..bfb2ca11afe4 100644 --- a/fs/bcachefs/printbuf.h +++ b/fs/bcachefs/printbuf.h @@ -25,7 +25,7 @@ * everything to the kernel log buffer, and then those pretty-printers can be * used by other code that outputs to kernel log, sysfs, debugfs, etc. * - * Memory allocation: Outputing to a printbuf may allocate memory. This + * Memory allocation: Outputting to a printbuf may allocate memory. This * allocation is done with GFP_KERNEL, by default: use the newer * memalloc_*_(save|restore) functions as needed. * @@ -56,7 +56,7 @@ * next tabstop - right justifying it. * * Make sure you use prt_newline() instead of \n in the format string for indent - * level and tabstops to work corretly. + * level and tabstops to work correctly. * * Output units: printbuf->units exists to tell pretty-printers how to output * numbers: a raw value (e.g. directly from a superblock field), as bytes, or as diff --git a/fs/bcachefs/rcu_pending.c b/fs/bcachefs/rcu_pending.c index 40a20192eee8..62812f742caa 100644 --- a/fs/bcachefs/rcu_pending.c +++ b/fs/bcachefs/rcu_pending.c @@ -338,7 +338,7 @@ rcu_pending_enqueue_list(struct rcu_pending_pcpu *p, unsigned long seq, /* * kvfree_rcu_mightsleep(): we weren't passed an * rcu_head, but we need one: use the low bit of the - * ponter to free to flag that the head needs to be + * pointer to free to flag that the head needs to be * freed as well: */ ptr = (void *)(((unsigned long) ptr)|1UL); @@ -385,7 +385,7 @@ rcu_pending_enqueue_list(struct rcu_pending_pcpu *p, unsigned long seq, /* * __rcu_pending_enqueue: enqueue a pending RCU item, to be processed (via - * pending->pracess) once grace period elapses. + * pending->process) once grace period elapses. * * Attempt to enqueue items onto a radix tree; if memory allocation fails, fall * back to a linked list. diff --git a/fs/bcachefs/rebalance.c b/fs/bcachefs/rebalance.c index 2d299a37cf07..27ed8267a383 100644 --- a/fs/bcachefs/rebalance.c +++ b/fs/bcachefs/rebalance.c @@ -451,7 +451,7 @@ void bch2_rebalance_stop(struct bch_fs *c) c->rebalance.thread = NULL; if (p) { - /* for sychronizing with rebalance_wakeup() */ + /* for synchronizing with rebalance_wakeup() */ synchronize_rcu(); kthread_stop(p); diff --git a/fs/bcachefs/sb-members_format.h b/fs/bcachefs/sb-members_format.h index d727d2dfda08..0a9e17ba104a 100644 --- a/fs/bcachefs/sb-members_format.h +++ b/fs/bcachefs/sb-members_format.h @@ -9,7 +9,7 @@ #define BCH_SB_MEMBERS_MAX 64 /* - * Sentinal value - indicates a device that does not exist + * Sentinel value - indicates a device that does not exist */ #define BCH_SB_MEMBER_INVALID 255 diff --git a/fs/bcachefs/siphash.h b/fs/bcachefs/siphash.h index 3dfaf34a43b2..b1374d9e1c1a 100644 --- a/fs/bcachefs/siphash.h +++ b/fs/bcachefs/siphash.h @@ -36,7 +36,7 @@ * optimized for speed on short messages returning a 64bit hash/digest value. * * The number of rounds is defined during the initialization: - * SipHash24_Init() for the fast and resonable strong version + * SipHash24_Init() for the fast and reasonable strong version * SipHash48_Init() for the strong version (half as fast) * * struct SIPHASH_CTX ctx; diff --git a/fs/bcachefs/six.c b/fs/bcachefs/six.c index 3a494c5d1247..dc157e719b6b 100644 --- a/fs/bcachefs/six.c +++ b/fs/bcachefs/six.c @@ -111,7 +111,7 @@ static inline unsigned pcpu_read_count(struct six_lock *lock) * Returns 1 on success, 0 on failure * * In percpu reader mode, a failed trylock may cause a spurious trylock failure - * for anoter thread taking the competing lock type, and we may havve to do a + * for another thread taking the competing lock type, and we may have to do a * wakeup: when a wakeup is required, we return -1 - wakeup_type. */ static int __do_six_trylock(struct six_lock *lock, enum six_lock_type type, @@ -228,7 +228,7 @@ static void __six_lock_wakeup(struct six_lock *lock, enum six_lock_type lock_typ /* * Similar to percpu_rwsem_wake_function(), we need to guard - * against the wakee noticing w->lock_acquired, returning, and + * against the wake noticing w->lock_acquired, returning, and * then exiting before we do the wakeup: */ task = get_task_struct(w->task); @@ -591,7 +591,7 @@ static void do_six_unlock_type(struct six_lock *lock, enum six_lock_type type) * @type: SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write * @ip: ip parameter for lockdep/lockstat, i.e. _THIS_IP_ * - * When a lock is held multiple times (because six_lock_incement()) was used), + * When a lock is held multiple times (because six_lock_increment()) was used), * this decrements the 'lock held' counter by one. * * For example: @@ -625,7 +625,7 @@ EXPORT_SYMBOL_GPL(six_unlock_ip); /** * six_lock_downgrade - convert an intent lock to a read lock - * @lock: lock to dowgrade + * @lock: lock to downgrade * * @lock will have read count incremented and intent count decremented */ @@ -744,7 +744,7 @@ EXPORT_SYMBOL_GPL(six_lock_increment); * six_lock_wakeup_all - wake up all waiters on @lock * @lock: lock to wake up waiters for * - * Wakeing up waiters will cause them to re-run should_sleep_fn, which may then + * Waking up waiters will cause them to re-run should_sleep_fn, which may then * abort the lock operation. * * This function is never needed in a bug-free program; it's only useful in @@ -792,7 +792,7 @@ EXPORT_SYMBOL_GPL(six_lock_counts); * @lock: lock to add/subtract readers for * @nr: reader count to add/subtract * - * When an upper layer is implementing lock reentrency, we may have both read + * When an upper layer is implementing lock reentrancy, we may have both read * and intent locks on the same lock. * * When we need to take a write lock, the read locks will cause self-deadlock, @@ -823,7 +823,7 @@ EXPORT_SYMBOL_GPL(six_lock_readers_add); * six_lock_exit - release resources held by a lock prior to freeing * @lock: lock to exit * - * When a lock was initialized in percpu mode (SIX_OLCK_INIT_PCPU), this is + * When a lock was initialized in percpu mode (SIX_LOCK_INIT_PCPU), this is * required to free the percpu read counts. */ void six_lock_exit(struct six_lock *lock) diff --git a/fs/bcachefs/six.h b/fs/bcachefs/six.h index 68d46fd7f391..d110f88e0480 100644 --- a/fs/bcachefs/six.h +++ b/fs/bcachefs/six.h @@ -79,7 +79,7 @@ * six_unlock_read(&foo->lock); * foo->lock is now fully unlocked. * - * Since the intent state supercedes read, it's legal to increment the read + * Since the intent state supersedes read, it's legal to increment the read * counter when holding an intent lock, but not the reverse. * * A lock may only be held once for write: six_lock_increment(.., SIX_LOCK_write) @@ -296,7 +296,7 @@ void six_unlock_ip(struct six_lock *lock, enum six_lock_type type, unsigned long * @lock: lock to unlock * @type: SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write * - * When a lock is held multiple times (because six_lock_incement()) was used), + * When a lock is held multiple times (because six_lock_increment()) was used), * this decrements the 'lock held' counter by one. * * For example: diff --git a/fs/bcachefs/snapshot.c b/fs/bcachefs/snapshot.c index 8b18a9b483a4..7731d39a217b 100644 --- a/fs/bcachefs/snapshot.c +++ b/fs/bcachefs/snapshot.c @@ -1417,7 +1417,7 @@ static int delete_dead_snapshots_process_key(struct btree_trans *trans, * position, we're only going to keep the one in the newest * snapshot (we delete the others above) - the rest have been * overwritten and are redundant, and for the key we're going to keep we - * need to move it to the equivalance class ID if it's not there + * need to move it to the equivalence class ID if it's not there * already. */ if (equiv != k.k->p.snapshot) { diff --git a/fs/bcachefs/snapshot_format.h b/fs/bcachefs/snapshot_format.h index aabcd3a74cd9..73c2dce0a320 100644 --- a/fs/bcachefs/snapshot_format.h +++ b/fs/bcachefs/snapshot_format.h @@ -23,7 +23,7 @@ LE32_BITMASK(BCH_SNAPSHOT_SUBVOL, struct bch_snapshot, flags, 1, 2) /* * Snapshot trees: * - * The snapshot_trees btree gives us persistent indentifier for each tree of + * The snapshot_trees btree gives us persistent identifier for each tree of * bch_snapshot nodes, and allow us to record and easily find the root/master * subvolume that other snapshots were created from: */ diff --git a/fs/bcachefs/subvolume.c b/fs/bcachefs/subvolume.c index dbe834cb349f..ef7235b0e9f4 100644 --- a/fs/bcachefs/subvolume.c +++ b/fs/bcachefs/subvolume.c @@ -383,7 +383,7 @@ static int bch2_subvolume_reparent(struct btree_trans *trans, * structure of how snapshot subvolumes were created - the parent subvolume of * each snapshot subvolume. * - * When a subvolume is deleted, we scan for child subvolumes and reparant them, + * When a subvolume is deleted, we scan for children subvolumes and fix them, * to avoid dangling references: */ static int bch2_subvolumes_reparent(struct btree_trans *trans, u32 subvolid_to_delete) diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c index d86d5dae54c9..cd09a796b96d 100644 --- a/fs/bcachefs/super-io.c +++ b/fs/bcachefs/super-io.c @@ -405,7 +405,7 @@ static int bch2_sb_validate(struct bch_sb_handle *disk_sb, /* * Been seeing a bug where these are getting inexplicably * zeroed, so we're now validating them, but we have to be - * careful not to preven people's filesystems from mounting: + * careful not to prevent people's filesystems from mounting: */ if (!BCH_SB_JOURNAL_FLUSH_DELAY(sb)) SET_BCH_SB_JOURNAL_FLUSH_DELAY(sb, 1000); diff --git a/fs/bcachefs/time_stats.h b/fs/bcachefs/time_stats.h index dc6493f7bbab..40ade9bf15d7 100644 --- a/fs/bcachefs/time_stats.h +++ b/fs/bcachefs/time_stats.h @@ -12,7 +12,7 @@ * - sum of all event durations * - average event duration, standard and weighted * - standard deviation of event durations, standard and weighted - * and analagous statistics for the frequency of events + * and analogous statistics for the frequency of events * * We provide both mean and weighted mean (exponentially weighted), and standard * deviation and weighted standard deviation, to give an efficient-to-compute