Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

NAS-132432 / 24.10.1 / Cherry-pick important zfs commits from upstream to stable/electriceel for 24.10.1 freeze #259

Merged
merged 25 commits into from
Nov 14, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
c45a833
Fix an uninitialized data access (#16511)
asomers Sep 10, 2024
5102987
Remove extra newline from spa_set_allocator().
amotin Sep 17, 2024
75a8c79
Avoid fault diagnosis if multiple vdevs have errors
don-brady Sep 18, 2024
0d4d1b2
arcstat: add structural, types, states breakdown
tkittich Sep 18, 2024
3699b88
zio_compress: introduce max size threshold
gmelikov Sep 10, 2019
4ecdf62
ZLE compression: don't use BPE_PAYLOAD_SIZE
gmelikov May 29, 2024
3ace0a4
arc_hdr_authenticate: make explicit error
gmelikov Jun 3, 2024
22d183c
Evicting too many bytes from MFU metadata
tkittich Sep 24, 2024
e42b277
Properly release key in spa_keystore_dsl_key_hold_dd()
amotin Sep 25, 2024
4a72da5
Restrict raidz faulted vdev count
don-brady Oct 1, 2024
c168599
ARC: Cache arc_c value during arc_evict()
amotin Oct 4, 2024
2bfbe0e
Fix generation of kernel uevents for snapshot rename on linux
JKDingwall Oct 6, 2024
4b9e7f4
zpool/zfs: allow --json wherever -j is allowed
robn Oct 11, 2024
e303a29
Pack dmu_buf_impl_t by 16 bytes
amotin Oct 25, 2024
e8ae9fc
On the first vdev open ignore impossible ashift hints
amotin Oct 29, 2024
9886013
vdev_disk: try harder to ensure IO alignment rules
robn Oct 25, 2024
465f165
vdev_disk: move abd return and free off the interrupt handler
robn Oct 25, 2024
d5e828f
Added output to `zpool online` and `offline`
rincebrain Nov 1, 2024
c7a8970
Verify parent_dev before calling udev_device_get_sysattr_value
Uglymotha Nov 5, 2024
a51e85e
Use simple folio migration function
tstabrawa Nov 3, 2024
5821cd9
JSON: fix user properties output for zfs list
usaleem-ix Nov 7, 2024
6b05248
JSON: fix user properties output for zpool list
usaleem-ix Nov 8, 2024
9641126
Fix user properties output for zpool list
usaleem-ix Nov 8, 2024
ce92af2
L2ARC: Move different stats updates earlier
amotin Nov 13, 2024
2ef56a7
zvol_os.c: Increase optimal IO size
ixhamza Nov 12, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion cmd/arc_summary
Original file line number Diff line number Diff line change
Expand Up @@ -566,7 +566,7 @@ def section_arc(kstats_dict):
l2_hdr_size = arc_stats['l2_hdr_size']
abd_chunk_waste_size = arc_stats['abd_chunk_waste_size']

prt_1('ARC structal breakdown (current size):', f_bytes(arc_size))
prt_1('ARC structural breakdown (current size):', f_bytes(arc_size))
prt_i2('Compressed size:',
f_perc(compressed_size, arc_size), f_bytes(compressed_size))
prt_i2('Overhead size:',
Expand Down
297 changes: 209 additions & 88 deletions cmd/arcstat.in

Large diffs are not rendered by default.

101 changes: 72 additions & 29 deletions cmd/zed/agents/zfs_diagnosis.c
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@ typedef struct zfs_case_data {
uint64_t zc_ena;
uint64_t zc_pool_guid;
uint64_t zc_vdev_guid;
uint64_t zc_parent_guid;
int zc_pool_state;
char zc_serd_checksum[MAX_SERDLEN];
char zc_serd_io[MAX_SERDLEN];
Expand Down Expand Up @@ -181,10 +182,10 @@ zfs_case_unserialize(fmd_hdl_t *hdl, fmd_case_t *cp)
}

/*
* count other unique slow-io cases in a pool
* Return count of other unique SERD cases under same vdev parent
*/
static uint_t
zfs_other_slow_cases(fmd_hdl_t *hdl, const zfs_case_data_t *zfs_case)
zfs_other_serd_cases(fmd_hdl_t *hdl, const zfs_case_data_t *zfs_case)
{
zfs_case_t *zcp;
uint_t cases = 0;
Expand All @@ -206,10 +207,32 @@ zfs_other_slow_cases(fmd_hdl_t *hdl, const zfs_case_data_t *zfs_case)

for (zcp = uu_list_first(zfs_cases); zcp != NULL;
zcp = uu_list_next(zfs_cases, zcp)) {
if (zcp->zc_data.zc_pool_guid == zfs_case->zc_pool_guid &&
zcp->zc_data.zc_vdev_guid != zfs_case->zc_vdev_guid &&
zcp->zc_data.zc_serd_slow_io[0] != '\0' &&
fmd_serd_active(hdl, zcp->zc_data.zc_serd_slow_io)) {
zfs_case_data_t *zcd = &zcp->zc_data;

/*
* must be same pool and parent vdev but different leaf vdev
*/
if (zcd->zc_pool_guid != zfs_case->zc_pool_guid ||
zcd->zc_parent_guid != zfs_case->zc_parent_guid ||
zcd->zc_vdev_guid == zfs_case->zc_vdev_guid) {
continue;
}

/*
* Check if there is another active serd case besides zfs_case
*
* Only one serd engine will be assigned to the case
*/
if (zcd->zc_serd_checksum[0] == zfs_case->zc_serd_checksum[0] &&
fmd_serd_active(hdl, zcd->zc_serd_checksum)) {
cases++;
}
if (zcd->zc_serd_io[0] == zfs_case->zc_serd_io[0] &&
fmd_serd_active(hdl, zcd->zc_serd_io)) {
cases++;
}
if (zcd->zc_serd_slow_io[0] == zfs_case->zc_serd_slow_io[0] &&
fmd_serd_active(hdl, zcd->zc_serd_slow_io)) {
cases++;
}
}
Expand Down Expand Up @@ -502,6 +525,34 @@ zfs_ereport_when(fmd_hdl_t *hdl, nvlist_t *nvl, er_timeval_t *when)
}
}

/*
* Record the specified event in the SERD engine and return a
* boolean value indicating whether or not the engine fired as
* the result of inserting this event.
*
* When the pool has similar active cases on other vdevs, then
* the fired state is disregarded and the case is retired.
*/
static int
zfs_fm_serd_record(fmd_hdl_t *hdl, const char *name, fmd_event_t *ep,
zfs_case_t *zcp, const char *err_type)
{
int fired = fmd_serd_record(hdl, name, ep);
int peers = 0;

if (fired && (peers = zfs_other_serd_cases(hdl, &zcp->zc_data)) > 0) {
fmd_hdl_debug(hdl, "pool %llu is tracking %d other %s cases "
"-- skip faulting the vdev %llu",
(u_longlong_t)zcp->zc_data.zc_pool_guid,
peers, err_type,
(u_longlong_t)zcp->zc_data.zc_vdev_guid);
zfs_case_retire(hdl, zcp);
fired = 0;
}

return (fired);
}

/*
* Main fmd entry point.
*/
Expand All @@ -510,7 +561,7 @@ zfs_fm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class)
{
zfs_case_t *zcp, *dcp;
int32_t pool_state;
uint64_t ena, pool_guid, vdev_guid;
uint64_t ena, pool_guid, vdev_guid, parent_guid;
uint64_t checksum_n, checksum_t;
uint64_t io_n, io_t;
er_timeval_t pool_load;
Expand Down Expand Up @@ -600,6 +651,9 @@ zfs_fm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class)
if (nvlist_lookup_uint64(nvl,
FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID, &vdev_guid) != 0)
vdev_guid = 0;
if (nvlist_lookup_uint64(nvl,
FM_EREPORT_PAYLOAD_ZFS_PARENT_GUID, &parent_guid) != 0)
parent_guid = 0;
if (nvlist_lookup_uint64(nvl, FM_EREPORT_ENA, &ena) != 0)
ena = 0;

Expand Down Expand Up @@ -710,6 +764,7 @@ zfs_fm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class)
data.zc_ena = ena;
data.zc_pool_guid = pool_guid;
data.zc_vdev_guid = vdev_guid;
data.zc_parent_guid = parent_guid;
data.zc_pool_state = (int)pool_state;

fmd_buf_write(hdl, cs, CASE_DATA, &data, sizeof (data));
Expand Down Expand Up @@ -872,8 +927,10 @@ zfs_fm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class)
SEC2NSEC(io_t));
zfs_case_serialize(zcp);
}
if (fmd_serd_record(hdl, zcp->zc_data.zc_serd_io, ep))
if (zfs_fm_serd_record(hdl, zcp->zc_data.zc_serd_io,
ep, zcp, "io error")) {
checkremove = B_TRUE;
}
} else if (fmd_nvl_class_match(hdl, nvl,
ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_DELAY))) {
uint64_t slow_io_n, slow_io_t;
Expand All @@ -899,25 +956,10 @@ zfs_fm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class)
}
/* Pass event to SERD engine and see if this triggers */
if (zcp->zc_data.zc_serd_slow_io[0] != '\0' &&
fmd_serd_record(hdl, zcp->zc_data.zc_serd_slow_io,
ep)) {
/*
* Ignore a slow io diagnosis when other
* VDEVs in the pool show signs of being slow.
*/
if (zfs_other_slow_cases(hdl, &zcp->zc_data)) {
zfs_case_retire(hdl, zcp);
fmd_hdl_debug(hdl, "pool %llu has "
"multiple slow io cases -- skip "
"degrading vdev %llu",
(u_longlong_t)
zcp->zc_data.zc_pool_guid,
(u_longlong_t)
zcp->zc_data.zc_vdev_guid);
} else {
zfs_case_solve(hdl, zcp,
"fault.fs.zfs.vdev.slow_io");
}
zfs_fm_serd_record(hdl,
zcp->zc_data.zc_serd_slow_io, ep, zcp, "slow io")) {
zfs_case_solve(hdl, zcp,
"fault.fs.zfs.vdev.slow_io");
}
} else if (fmd_nvl_class_match(hdl, nvl,
ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_CHECKSUM))) {
Expand Down Expand Up @@ -968,8 +1010,9 @@ zfs_fm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class)
SEC2NSEC(checksum_t));
zfs_case_serialize(zcp);
}
if (fmd_serd_record(hdl,
zcp->zc_data.zc_serd_checksum, ep)) {
if (zfs_fm_serd_record(hdl,
zcp->zc_data.zc_serd_checksum, ep, zcp,
"checksum")) {
zfs_case_solve(hdl, zcp,
"fault.fs.zfs.vdev.checksum");
}
Expand Down
3 changes: 2 additions & 1 deletion cmd/zed/zed_disk_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,8 @@ dev_event_nvlist(struct udev_device *dev)
* is /dev/sda.
*/
struct udev_device *parent_dev = udev_device_get_parent(dev);
if ((value = udev_device_get_sysattr_value(parent_dev, "size"))
if (parent_dev != NULL &&
(value = udev_device_get_sysattr_value(parent_dev, "size"))
!= NULL) {
uint64_t numval = DEV_BSIZE;

Expand Down
35 changes: 30 additions & 5 deletions cmd/zfs/zfs_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -2162,6 +2162,7 @@ zfs_do_get(int argc, char **argv)
cb.cb_type = ZFS_TYPE_DATASET;

struct option long_options[] = {
{"json", no_argument, NULL, 'j'},
{"json-int", no_argument, NULL, ZFS_OPTION_JSON_NUMS_AS_INT},
{0, 0, 0, 0}
};
Expand Down Expand Up @@ -3760,8 +3761,13 @@ collect_dataset(zfs_handle_t *zhp, list_cbdata_t *cb)
if (cb->cb_json) {
if (pl->pl_prop == ZFS_PROP_NAME)
continue;
const char *prop_name;
if (pl->pl_prop != ZPROP_USERPROP)
prop_name = zfs_prop_to_name(pl->pl_prop);
else
prop_name = pl->pl_user_prop;
if (zprop_nvlist_one_property(
zfs_prop_to_name(pl->pl_prop), propstr,
prop_name, propstr,
sourcetype, source, NULL, props,
cb->cb_json_as_int) != 0)
nomem();
Expand Down Expand Up @@ -3852,6 +3858,7 @@ zfs_do_list(int argc, char **argv)
nvlist_t *data = NULL;

struct option long_options[] = {
{"json", no_argument, NULL, 'j'},
{"json-int", no_argument, NULL, ZFS_OPTION_JSON_NUMS_AS_INT},
{0, 0, 0, 0}
};
Expand Down Expand Up @@ -7436,9 +7443,15 @@ share_mount(int op, int argc, char **argv)
uint_t nthr;
jsobj = data = item = NULL;

struct option long_options[] = {
{"json", no_argument, NULL, 'j'},
{0, 0, 0, 0}
};

/* check options */
while ((c = getopt(argc, argv, op == OP_MOUNT ? ":ajRlvo:Of" : "al"))
!= -1) {
while ((c = getopt_long(argc, argv,
op == OP_MOUNT ? ":ajRlvo:Of" : "al",
op == OP_MOUNT ? long_options : NULL, NULL)) != -1) {
switch (c) {
case 'a':
do_all = 1;
Expand Down Expand Up @@ -8374,8 +8387,14 @@ zfs_do_channel_program(int argc, char **argv)
boolean_t sync_flag = B_TRUE, json_output = B_FALSE;
zpool_handle_t *zhp;

struct option long_options[] = {
{"json", no_argument, NULL, 'j'},
{0, 0, 0, 0}
};

/* check options */
while ((c = getopt(argc, argv, "nt:m:j")) != -1) {
while ((c = getopt_long(argc, argv, "nt:m:j", long_options,
NULL)) != -1) {
switch (c) {
case 't':
case 'm': {
Expand Down Expand Up @@ -9083,7 +9102,13 @@ zfs_do_version(int argc, char **argv)
int c;
nvlist_t *jsobj = NULL, *zfs_ver = NULL;
boolean_t json = B_FALSE;
while ((c = getopt(argc, argv, "j")) != -1) {

struct option long_options[] = {
{"json", no_argument, NULL, 'j'},
{0, 0, 0, 0}
};

while ((c = getopt_long(argc, argv, "j", long_options, NULL)) != -1) {
switch (c) {
case 'j':
json = B_TRUE;
Expand Down
35 changes: 30 additions & 5 deletions cmd/zpool/zpool_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -6870,8 +6870,13 @@ collect_pool(zpool_handle_t *zhp, list_cbdata_t *cb)
if (cb->cb_json) {
if (pl->pl_prop == ZPOOL_PROP_NAME)
continue;
const char *prop_name;
if (pl->pl_prop != ZPROP_USERPROP)
prop_name = zpool_prop_to_name(pl->pl_prop);
else
prop_name = pl->pl_user_prop;
(void) zprop_nvlist_one_property(
zpool_prop_to_name(pl->pl_prop), propstr,
prop_name, propstr,
sourcetype, NULL, NULL, props, cb->cb_json_as_int);
} else {
/*
Expand Down Expand Up @@ -7328,6 +7333,7 @@ zpool_do_list(int argc, char **argv)
current_prop_type = ZFS_TYPE_POOL;

struct option long_options[] = {
{"json", no_argument, NULL, 'j'},
{"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT},
{"json-pool-key-guid", no_argument, NULL,
ZPOOL_OPTION_POOL_KEY_GUID},
Expand Down Expand Up @@ -7953,8 +7959,11 @@ zpool_do_online(int argc, char **argv)

poolname = argv[0];

if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {
(void) fprintf(stderr, gettext("failed to open pool "
"\"%s\""), poolname);
return (1);
}

for (i = 1; i < argc; i++) {
vdev_state_t oldstate;
Expand All @@ -7975,12 +7984,15 @@ zpool_do_online(int argc, char **argv)
&l2cache, NULL);
if (tgt == NULL) {
ret = 1;
(void) fprintf(stderr, gettext("couldn't find device "
"\"%s\" in pool \"%s\"\n"), argv[i], poolname);
continue;
}
uint_t vsc;
oldstate = ((vdev_stat_t *)fnvlist_lookup_uint64_array(tgt,
ZPOOL_CONFIG_VDEV_STATS, &vsc))->vs_state;
if (zpool_vdev_online(zhp, argv[i], flags, &newstate) == 0) {
if ((rc = zpool_vdev_online(zhp, argv[i], flags,
&newstate)) == 0) {
if (newstate != VDEV_STATE_HEALTHY) {
(void) printf(gettext("warning: device '%s' "
"onlined, but remains in faulted state\n"),
Expand All @@ -8006,6 +8018,9 @@ zpool_do_online(int argc, char **argv)
}
}
} else {
(void) fprintf(stderr, gettext("Failed to online "
"\"%s\" in pool \"%s\": %d\n"),
argv[i], poolname, rc);
ret = 1;
}
}
Expand Down Expand Up @@ -8090,8 +8105,11 @@ zpool_do_offline(int argc, char **argv)

poolname = argv[0];

if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {
(void) fprintf(stderr, gettext("failed to open pool "
"\"%s\""), poolname);
return (1);
}

for (i = 1; i < argc; i++) {
uint64_t guid = zpool_vdev_path_to_guid(zhp, argv[i]);
Expand Down Expand Up @@ -10958,6 +10976,7 @@ zpool_do_status(int argc, char **argv)

struct option long_options[] = {
{"power", no_argument, NULL, ZPOOL_OPTION_POWER},
{"json", no_argument, NULL, 'j'},
{"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT},
{"json-flat-vdevs", no_argument, NULL,
ZPOOL_OPTION_JSON_FLAT_VDEVS},
Expand Down Expand Up @@ -12563,6 +12582,7 @@ zpool_do_get(int argc, char **argv)
current_prop_type = cb.cb_type;

struct option long_options[] = {
{"json", no_argument, NULL, 'j'},
{"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT},
{"json-pool-key-guid", no_argument, NULL,
ZPOOL_OPTION_POOL_KEY_GUID},
Expand Down Expand Up @@ -13477,7 +13497,12 @@ zpool_do_version(int argc, char **argv)
int c;
nvlist_t *jsobj = NULL, *zfs_ver = NULL;
boolean_t json = B_FALSE;
while ((c = getopt(argc, argv, "j")) != -1) {

struct option long_options[] = {
{"json", no_argument, NULL, 'j'},
};

while ((c = getopt_long(argc, argv, "j", long_options, NULL)) != -1) {
switch (c) {
case 'j':
json = B_TRUE;
Expand Down
3 changes: 2 additions & 1 deletion cmd/zstream/zstream_recompress.c
Original file line number Diff line number Diff line change
Expand Up @@ -288,7 +288,8 @@ zstream_do_recompress(int argc, char *argv[])
abd_t *pabd =
abd_get_from_buf_struct(&abd, buf, bufsz);
size_t csize = zio_compress_data(ctype, &dabd,
&pabd, drrw->drr_logical_size, level);
&pabd, drrw->drr_logical_size,
drrw->drr_logical_size, level);
size_t rounded =
P2ROUNDUP(csize, SPA_MINBLOCKSIZE);
if (rounded >= drrw->drr_logical_size) {
Expand Down
Loading
Loading