Skip to content

Commit

Permalink
f2fs: compress: fix race condition of overwrite vs truncate
Browse files Browse the repository at this point in the history
pos_fsstress testcase complains a panic as belew:

------------[ cut here ]------------
kernel BUG at fs/f2fs/compress.c:1082!
invalid opcode: 0000 [#1] SMP PTI
CPU: 4 PID: 2753477 Comm: kworker/u16:2 Tainted: G           OE     5.12.0-rc1-custom #1
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.14.0-2 04/01/2014
Workqueue: writeback wb_workfn (flush-252:16)
RIP: 0010:prepare_compress_overwrite+0x4c0/0x760 [f2fs]
Call Trace:
 f2fs_prepare_compress_overwrite+0x5f/0x80 [f2fs]
 f2fs_write_cache_pages+0x468/0x8a0 [f2fs]
 f2fs_write_data_pages+0x2a4/0x2f0 [f2fs]
 do_writepages+0x38/0xc0
 __writeback_single_inode+0x44/0x2a0
 writeback_sb_inodes+0x223/0x4d0
 __writeback_inodes_wb+0x56/0xf0
 wb_writeback+0x1dd/0x290
 wb_workfn+0x309/0x500
 process_one_work+0x220/0x3c0
 worker_thread+0x53/0x420
 kthread+0x12f/0x150
 ret_from_fork+0x22/0x30

The root cause is truncate() may race with overwrite as below,
so that one reference count left in page can not guarantee the
page attaching in mapping tree all the time, after truncation,
later find_lock_page() may return NULL pointer.

- prepare_compress_overwrite
 - f2fs_pagecache_get_page
 - unlock_page
					- f2fs_setattr
					 - truncate_setsize
					  - truncate_inode_page
					   - delete_from_page_cache
 - find_lock_page

Fix this by avoiding referencing updated page.

Fixes: 4c8ff70 ("f2fs: support data compression")
Signed-off-by: Chao Yu <[email protected]>
Signed-off-by: Jaegeuk Kim <[email protected]>
  • Loading branch information
chaseyu authored and Jaegeuk Kim committed May 11, 2021
1 parent a12cc5b commit a949dc5
Showing 1 changed file with 12 additions and 23 deletions.
35 changes: 12 additions & 23 deletions fs/f2fs/compress.c
Original file line number Diff line number Diff line change
Expand Up @@ -117,19 +117,6 @@ static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
f2fs_drop_rpages(cc, len, true);
}

static void f2fs_put_rpages_mapping(struct address_space *mapping,
pgoff_t start, int len)
{
int i;

for (i = 0; i < len; i++) {
struct page *page = find_get_page(mapping, start + i);

put_page(page);
put_page(page);
}
}

static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
struct writeback_control *wbc, bool redirty, int unlock)
{
Expand Down Expand Up @@ -1036,7 +1023,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
}

if (PageUptodate(page))
unlock_page(page);
f2fs_put_page(page, 1);
else
f2fs_compress_ctx_add_page(cc, page);
}
Expand All @@ -1046,32 +1033,34 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,

ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
&last_block_in_bio, false, true);
f2fs_put_rpages(cc);
f2fs_destroy_compress_ctx(cc);
if (ret)
goto release_pages;
goto out;
if (bio)
f2fs_submit_bio(sbi, bio, DATA);

ret = f2fs_init_compress_ctx(cc);
if (ret)
goto release_pages;
goto out;
}

for (i = 0; i < cc->cluster_size; i++) {
f2fs_bug_on(sbi, cc->rpages[i]);

page = find_lock_page(mapping, start_idx + i);
f2fs_bug_on(sbi, !page);
if (!page) {
/* page can be truncated */
goto release_and_retry;
}

f2fs_wait_on_page_writeback(page, DATA, true, true);

f2fs_compress_ctx_add_page(cc, page);
f2fs_put_page(page, 0);

if (!PageUptodate(page)) {
release_and_retry:
f2fs_put_rpages(cc);
f2fs_unlock_rpages(cc, i + 1);
f2fs_put_rpages_mapping(mapping, start_idx,
cc->cluster_size);
f2fs_destroy_compress_ctx(cc);
goto retry;
}
Expand Down Expand Up @@ -1103,10 +1092,10 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
}

unlock_pages:
f2fs_put_rpages(cc);
f2fs_unlock_rpages(cc, i);
release_pages:
f2fs_put_rpages_mapping(mapping, start_idx, i);
f2fs_destroy_compress_ctx(cc);
out:
return ret;
}

Expand Down

0 comments on commit a949dc5

Please sign in to comment.