btrfs: add unlikely annotations to branches leading to EIO

The unlikely() annotation is a static prediction hint that compiler may
use to reorder code out of hot path. We use it elsewhere (namely
tree-checker.c) for error branches that almost never happen, where
EIO is one of them.

Reviewed-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
David Sterba 2025-09-17 19:53:55 +02:00
parent 9264d004a6
commit cc53bd2085
19 changed files with 81 additions and 83 deletions

View File

@ -859,7 +859,7 @@ static int add_missing_keys(struct btrfs_fs_info *fs_info,
free_pref(ref); free_pref(ref);
return PTR_ERR(eb); return PTR_ERR(eb);
} }
if (!extent_buffer_uptodate(eb)) { if (unlikely(!extent_buffer_uptodate(eb))) {
free_pref(ref); free_pref(ref);
free_extent_buffer(eb); free_extent_buffer(eb);
return -EIO; return -EIO;
@ -1614,7 +1614,7 @@ static int find_parent_nodes(struct btrfs_backref_walk_ctx *ctx,
ret = PTR_ERR(eb); ret = PTR_ERR(eb);
goto out; goto out;
} }
if (!extent_buffer_uptodate(eb)) { if (unlikely(!extent_buffer_uptodate(eb))) {
free_extent_buffer(eb); free_extent_buffer(eb);
ret = -EIO; ret = -EIO;
goto out; goto out;

View File

@ -849,8 +849,8 @@ int btrfs_repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
if (ret < 0) if (ret < 0)
goto out_counter_dec; goto out_counter_dec;
if (!smap.dev->bdev || if (unlikely(!smap.dev->bdev ||
!test_bit(BTRFS_DEV_STATE_WRITEABLE, &smap.dev->dev_state)) { !test_bit(BTRFS_DEV_STATE_WRITEABLE, &smap.dev->dev_state))) {
ret = -EIO; ret = -EIO;
goto out_counter_dec; goto out_counter_dec;
} }

View File

@ -924,7 +924,7 @@ static struct folio *defrag_prepare_one_folio(struct btrfs_inode *inode, pgoff_t
folio_put(folio); folio_put(folio);
goto again; goto again;
} }
if (!folio_test_uptodate(folio)) { if (unlikely(!folio_test_uptodate(folio))) {
folio_unlock(folio); folio_unlock(folio);
folio_put(folio); folio_put(folio);
return ERR_PTR(-EIO); return ERR_PTR(-EIO);

View File

@ -177,8 +177,7 @@ int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info)
* allow 'btrfs dev replace_cancel' if src/tgt device is * allow 'btrfs dev replace_cancel' if src/tgt device is
* missing * missing
*/ */
if (!dev_replace->srcdev && if (unlikely(!dev_replace->srcdev && !btrfs_test_opt(fs_info, DEGRADED))) {
!btrfs_test_opt(fs_info, DEGRADED)) {
ret = -EIO; ret = -EIO;
btrfs_warn(fs_info, btrfs_warn(fs_info,
"cannot mount because device replace operation is ongoing and"); "cannot mount because device replace operation is ongoing and");
@ -186,8 +185,7 @@ int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info)
"srcdev (devid %llu) is missing, need to run 'btrfs dev scan'?", "srcdev (devid %llu) is missing, need to run 'btrfs dev scan'?",
src_devid); src_devid);
} }
if (!dev_replace->tgtdev && if (unlikely(!dev_replace->tgtdev && !btrfs_test_opt(fs_info, DEGRADED))) {
!btrfs_test_opt(fs_info, DEGRADED)) {
ret = -EIO; ret = -EIO;
btrfs_warn(fs_info, btrfs_warn(fs_info,
"cannot mount because device replace operation is ongoing and"); "cannot mount because device replace operation is ongoing and");

View File

@ -370,21 +370,21 @@ int btrfs_validate_extent_buffer(struct extent_buffer *eb,
ASSERT(check); ASSERT(check);
found_start = btrfs_header_bytenr(eb); found_start = btrfs_header_bytenr(eb);
if (found_start != eb->start) { if (unlikely(found_start != eb->start)) {
btrfs_err_rl(fs_info, btrfs_err_rl(fs_info,
"bad tree block start, mirror %u want %llu have %llu", "bad tree block start, mirror %u want %llu have %llu",
eb->read_mirror, eb->start, found_start); eb->read_mirror, eb->start, found_start);
ret = -EIO; ret = -EIO;
goto out; goto out;
} }
if (check_tree_block_fsid(eb)) { if (unlikely(check_tree_block_fsid(eb))) {
btrfs_err_rl(fs_info, "bad fsid on logical %llu mirror %u", btrfs_err_rl(fs_info, "bad fsid on logical %llu mirror %u",
eb->start, eb->read_mirror); eb->start, eb->read_mirror);
ret = -EIO; ret = -EIO;
goto out; goto out;
} }
found_level = btrfs_header_level(eb); found_level = btrfs_header_level(eb);
if (found_level >= BTRFS_MAX_LEVEL) { if (unlikely(found_level >= BTRFS_MAX_LEVEL)) {
btrfs_err(fs_info, btrfs_err(fs_info,
"bad tree block level, mirror %u level %d on logical %llu", "bad tree block level, mirror %u level %d on logical %llu",
eb->read_mirror, btrfs_header_level(eb), eb->start); eb->read_mirror, btrfs_header_level(eb), eb->start);
@ -410,7 +410,7 @@ int btrfs_validate_extent_buffer(struct extent_buffer *eb,
} }
} }
if (found_level != check->level) { if (unlikely(found_level != check->level)) {
btrfs_err(fs_info, btrfs_err(fs_info,
"level verify failed on logical %llu mirror %u wanted %u found %u", "level verify failed on logical %llu mirror %u wanted %u found %u",
eb->start, eb->read_mirror, check->level, found_level); eb->start, eb->read_mirror, check->level, found_level);
@ -1046,7 +1046,7 @@ static struct btrfs_root *read_tree_root_path(struct btrfs_root *tree_root,
root->node = NULL; root->node = NULL;
goto fail; goto fail;
} }
if (!btrfs_buffer_uptodate(root->node, generation, false)) { if (unlikely(!btrfs_buffer_uptodate(root->node, generation, false))) {
ret = -EIO; ret = -EIO;
goto fail; goto fail;
} }
@ -2058,7 +2058,7 @@ static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
u64 bytenr = btrfs_super_log_root(disk_super); u64 bytenr = btrfs_super_log_root(disk_super);
int level = btrfs_super_log_root_level(disk_super); int level = btrfs_super_log_root_level(disk_super);
if (fs_devices->rw_devices == 0) { if (unlikely(fs_devices->rw_devices == 0)) {
btrfs_warn(fs_info, "log replay required on RO media"); btrfs_warn(fs_info, "log replay required on RO media");
return -EIO; return -EIO;
} }
@ -2079,7 +2079,7 @@ static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
btrfs_put_root(log_tree_root); btrfs_put_root(log_tree_root);
return ret; return ret;
} }
if (!extent_buffer_uptodate(log_tree_root->node)) { if (unlikely(!extent_buffer_uptodate(log_tree_root->node))) {
btrfs_err(fs_info, "failed to read log tree"); btrfs_err(fs_info, "failed to read log tree");
btrfs_put_root(log_tree_root); btrfs_put_root(log_tree_root);
return -EIO; return -EIO;
@ -2641,7 +2641,7 @@ static int load_super_root(struct btrfs_root *root, u64 bytenr, u64 gen, int lev
root->node = NULL; root->node = NULL;
return ret; return ret;
} }
if (!extent_buffer_uptodate(root->node)) { if (unlikely(!extent_buffer_uptodate(root->node))) {
free_extent_buffer(root->node); free_extent_buffer(root->node);
root->node = NULL; root->node = NULL;
return -EIO; return -EIO;
@ -3469,7 +3469,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
* below in btrfs_init_dev_replace(). * below in btrfs_init_dev_replace().
*/ */
btrfs_free_extra_devids(fs_devices); btrfs_free_extra_devids(fs_devices);
if (!fs_devices->latest_dev->bdev) { if (unlikely(!fs_devices->latest_dev->bdev)) {
btrfs_err(fs_info, "failed to read devices"); btrfs_err(fs_info, "failed to read devices");
ret = -EIO; ret = -EIO;
goto fail_tree_roots; goto fail_tree_roots;
@ -3963,7 +3963,7 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
* Checks last_flush_error of disks in order to determine the device * Checks last_flush_error of disks in order to determine the device
* state. * state.
*/ */
if (errors_wait && !btrfs_check_rw_degradable(info, NULL)) if (unlikely(errors_wait && !btrfs_check_rw_degradable(info, NULL)))
return -EIO; return -EIO;
return 0; return 0;
@ -4076,7 +4076,7 @@ int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
if (ret) if (ret)
total_errors++; total_errors++;
} }
if (total_errors > max_errors) { if (unlikely(total_errors > max_errors)) {
btrfs_err(fs_info, "%d errors while writing supers", btrfs_err(fs_info, "%d errors while writing supers",
total_errors); total_errors);
mutex_unlock(&fs_info->fs_devices->device_list_mutex); mutex_unlock(&fs_info->fs_devices->device_list_mutex);
@ -4101,7 +4101,7 @@ int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
total_errors++; total_errors++;
} }
mutex_unlock(&fs_info->fs_devices->device_list_mutex); mutex_unlock(&fs_info->fs_devices->device_list_mutex);
if (total_errors > max_errors) { if (unlikely(total_errors > max_errors)) {
btrfs_handle_fs_error(fs_info, -EIO, btrfs_handle_fs_error(fs_info, -EIO,
"%d errors while writing supers", "%d errors while writing supers",
total_errors); total_errors);

View File

@ -5638,7 +5638,7 @@ static int maybe_drop_reference(struct btrfs_trans_handle *trans, struct btrfs_r
ref.parent = path->nodes[level]->start; ref.parent = path->nodes[level]->start;
} else { } else {
ASSERT(btrfs_root_id(root) == btrfs_header_owner(path->nodes[level])); ASSERT(btrfs_root_id(root) == btrfs_header_owner(path->nodes[level]));
if (btrfs_root_id(root) != btrfs_header_owner(path->nodes[level])) { if (unlikely(btrfs_root_id(root) != btrfs_header_owner(path->nodes[level]))) {
btrfs_err(root->fs_info, "mismatched block owner"); btrfs_err(root->fs_info, "mismatched block owner");
return -EIO; return -EIO;
} }
@ -5774,7 +5774,7 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
level--; level--;
ASSERT(level == btrfs_header_level(next)); ASSERT(level == btrfs_header_level(next));
if (level != btrfs_header_level(next)) { if (unlikely(level != btrfs_header_level(next))) {
btrfs_err(root->fs_info, "mismatched level"); btrfs_err(root->fs_info, "mismatched level");
ret = -EIO; ret = -EIO;
goto out_unlock; goto out_unlock;

View File

@ -3880,7 +3880,7 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int mirror_num,
return ret; return ret;
wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_READING, TASK_UNINTERRUPTIBLE); wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_READING, TASK_UNINTERRUPTIBLE);
if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) if (unlikely(!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)))
return -EIO; return -EIO;
return 0; return 0;
} }

View File

@ -1057,7 +1057,7 @@ int btrfs_split_extent_map(struct btrfs_inode *inode, u64 start, u64 len, u64 pr
btrfs_lock_extent(&inode->io_tree, start, start + len - 1, NULL); btrfs_lock_extent(&inode->io_tree, start, start + len - 1, NULL);
write_lock(&em_tree->lock); write_lock(&em_tree->lock);
em = btrfs_lookup_extent_mapping(em_tree, start, len); em = btrfs_lookup_extent_mapping(em_tree, start, len);
if (!em) { if (unlikely(!em)) {
ret = -EIO; ret = -EIO;
goto out_unlock; goto out_unlock;
} }

View File

@ -815,7 +815,7 @@ static int prepare_uptodate_folio(struct inode *inode, struct folio *folio, u64
if (ret) if (ret)
return ret; return ret;
folio_lock(folio); folio_lock(folio);
if (!folio_test_uptodate(folio)) { if (unlikely(!folio_test_uptodate(folio))) {
folio_unlock(folio); folio_unlock(folio);
return -EIO; return -EIO;
} }

View File

@ -137,12 +137,12 @@ static int btrfs_search_prev_slot(struct btrfs_trans_handle *trans,
if (ret < 0) if (ret < 0)
return ret; return ret;
if (ret == 0) { if (unlikely(ret == 0)) {
DEBUG_WARN(); DEBUG_WARN();
return -EIO; return -EIO;
} }
if (p->slots[0] == 0) { if (unlikely(p->slots[0] == 0)) {
DEBUG_WARN("no previous slot found"); DEBUG_WARN("no previous slot found");
return -EIO; return -EIO;
} }
@ -293,7 +293,7 @@ int btrfs_convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
expected_extent_count = btrfs_free_space_extent_count(leaf, info); expected_extent_count = btrfs_free_space_extent_count(leaf, info);
btrfs_release_path(path); btrfs_release_path(path);
if (extent_count != expected_extent_count) { if (unlikely(extent_count != expected_extent_count)) {
btrfs_err(fs_info, btrfs_err(fs_info,
"incorrect extent count for %llu; counted %u, expected %u", "incorrect extent count for %llu; counted %u, expected %u",
block_group->start, extent_count, block_group->start, extent_count,
@ -465,7 +465,7 @@ int btrfs_convert_free_space_to_extents(struct btrfs_trans_handle *trans,
start_bit = find_next_bit_le(bitmap, nrbits, end_bit); start_bit = find_next_bit_le(bitmap, nrbits, end_bit);
} }
if (extent_count != expected_extent_count) { if (unlikely(extent_count != expected_extent_count)) {
btrfs_err(fs_info, btrfs_err(fs_info,
"incorrect extent count for %llu; counted %u, expected %u", "incorrect extent count for %llu; counted %u, expected %u",
block_group->start, extent_count, block_group->start, extent_count,
@ -1611,7 +1611,7 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl,
extent_count++; extent_count++;
} }
if (extent_count != expected_extent_count) { if (unlikely(extent_count != expected_extent_count)) {
btrfs_err(fs_info, btrfs_err(fs_info,
"incorrect extent count for %llu; counted %u, expected %u", "incorrect extent count for %llu; counted %u, expected %u",
block_group->start, extent_count, block_group->start, extent_count,
@ -1672,7 +1672,7 @@ static int load_free_space_extents(struct btrfs_caching_control *caching_ctl,
extent_count++; extent_count++;
} }
if (extent_count != expected_extent_count) { if (unlikely(extent_count != expected_extent_count)) {
btrfs_err(fs_info, btrfs_err(fs_info,
"incorrect extent count for %llu; counted %u, expected %u", "incorrect extent count for %llu; counted %u, expected %u",
block_group->start, extent_count, block_group->start, extent_count,

View File

@ -3104,7 +3104,7 @@ int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent)
if (!freespace_inode) if (!freespace_inode)
btrfs_lockdep_acquire(fs_info, btrfs_ordered_extent); btrfs_lockdep_acquire(fs_info, btrfs_ordered_extent);
if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) { if (unlikely(test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags))) {
ret = -EIO; ret = -EIO;
goto out; goto out;
} }
@ -3370,7 +3370,7 @@ int btrfs_check_block_csum(struct btrfs_fs_info *fs_info, phys_addr_t paddr, u8
const u8 * const csum_expected) const u8 * const csum_expected)
{ {
btrfs_calculate_block_csum(fs_info, paddr, csum); btrfs_calculate_block_csum(fs_info, paddr, csum);
if (memcmp(csum, csum_expected, fs_info->csum_size)) if (unlikely(memcmp(csum, csum_expected, fs_info->csum_size) != 0))
return -EIO; return -EIO;
return 0; return 0;
} }
@ -4842,7 +4842,7 @@ static int truncate_block_zero_beyond_eof(struct btrfs_inode *inode, u64 start)
folio_put(folio); folio_put(folio);
goto again; goto again;
} }
if (!folio_test_uptodate(folio)) { if (unlikely(!folio_test_uptodate(folio))) {
ret = -EIO; ret = -EIO;
goto out_unlock; goto out_unlock;
} }
@ -4986,7 +4986,7 @@ int btrfs_truncate_block(struct btrfs_inode *inode, u64 offset, u64 start, u64 e
folio_put(folio); folio_put(folio);
goto again; goto again;
} }
if (!folio_test_uptodate(folio)) { if (unlikely(!folio_test_uptodate(folio))) {
ret = -EIO; ret = -EIO;
goto out_unlock; goto out_unlock;
} }
@ -7179,7 +7179,7 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
insert: insert:
ret = 0; ret = 0;
btrfs_release_path(path); btrfs_release_path(path);
if (em->start > start || btrfs_extent_map_end(em) <= start) { if (unlikely(em->start > start || btrfs_extent_map_end(em) <= start)) {
btrfs_err(fs_info, btrfs_err(fs_info,
"bad extent! em: [%llu %llu] passed [%llu %llu]", "bad extent! em: [%llu %llu] passed [%llu %llu]",
em->start, em->len, start, len); em->start, em->len, start, len);
@ -9298,7 +9298,7 @@ static ssize_t btrfs_encoded_read_inline(
ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode), ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode),
extent_start, 0); extent_start, 0);
if (ret) { if (ret) {
if (ret > 0) { if (unlikely(ret > 0)) {
/* The extent item disappeared? */ /* The extent item disappeared? */
return -EIO; return -EIO;
} }

View File

@ -2538,7 +2538,7 @@ static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
return -EUCLEAN; return -EUCLEAN;
} }
if (!extent_buffer_uptodate(src_eb) || !extent_buffer_uptodate(dst_eb)) { if (unlikely(!extent_buffer_uptodate(src_eb) || !extent_buffer_uptodate(dst_eb))) {
ret = -EIO; ret = -EIO;
goto out; goto out;
} }
@ -4843,7 +4843,7 @@ int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
reloc_eb = NULL; reloc_eb = NULL;
goto free_out; goto free_out;
} }
if (!extent_buffer_uptodate(reloc_eb)) { if (unlikely(!extent_buffer_uptodate(reloc_eb))) {
ret = -EIO; ret = -EIO;
goto free_out; goto free_out;
} }

View File

@ -1167,7 +1167,7 @@ static int rbio_add_io_sector(struct btrfs_raid_bio *rbio,
/* Check if we have reached tolerance early. */ /* Check if we have reached tolerance early. */
found_errors = get_rbio_veritical_errors(rbio, sector_nr, found_errors = get_rbio_veritical_errors(rbio, sector_nr,
NULL, NULL); NULL, NULL);
if (found_errors > rbio->bioc->max_errors) if (unlikely(found_errors > rbio->bioc->max_errors))
return -EIO; return -EIO;
return 0; return 0;
} }
@ -1847,7 +1847,7 @@ static int recover_vertical(struct btrfs_raid_bio *rbio, int sector_nr,
if (!found_errors) if (!found_errors)
return 0; return 0;
if (found_errors > rbio->bioc->max_errors) if (unlikely(found_errors > rbio->bioc->max_errors))
return -EIO; return -EIO;
/* /*
@ -2399,7 +2399,7 @@ static void rmw_rbio(struct btrfs_raid_bio *rbio)
int found_errors; int found_errors;
found_errors = get_rbio_veritical_errors(rbio, sectornr, NULL, NULL); found_errors = get_rbio_veritical_errors(rbio, sectornr, NULL, NULL);
if (found_errors > rbio->bioc->max_errors) { if (unlikely(found_errors > rbio->bioc->max_errors)) {
ret = -EIO; ret = -EIO;
break; break;
} }
@ -2688,7 +2688,7 @@ static int recover_scrub_rbio(struct btrfs_raid_bio *rbio)
found_errors = get_rbio_veritical_errors(rbio, sector_nr, found_errors = get_rbio_veritical_errors(rbio, sector_nr,
&faila, &failb); &faila, &failb);
if (found_errors > rbio->bioc->max_errors) { if (unlikely(found_errors > rbio->bioc->max_errors)) {
ret = -EIO; ret = -EIO;
goto out; goto out;
} }
@ -2712,7 +2712,7 @@ static int recover_scrub_rbio(struct btrfs_raid_bio *rbio)
* data, so the capability of the repair is declined. (In the * data, so the capability of the repair is declined. (In the
* case of RAID5, we can not repair anything.) * case of RAID5, we can not repair anything.)
*/ */
if (dfail > rbio->bioc->max_errors - 1) { if (unlikely(dfail > rbio->bioc->max_errors - 1)) {
ret = -EIO; ret = -EIO;
goto out; goto out;
} }
@ -2729,7 +2729,7 @@ static int recover_scrub_rbio(struct btrfs_raid_bio *rbio)
* scrubbing parity, luckily, use the other one to repair the * scrubbing parity, luckily, use the other one to repair the
* data, or we can not repair the data stripe. * data, or we can not repair the data stripe.
*/ */
if (failp != rbio->scrubp) { if (unlikely(failp != rbio->scrubp)) {
ret = -EIO; ret = -EIO;
goto out; goto out;
} }
@ -2820,7 +2820,7 @@ static void scrub_rbio(struct btrfs_raid_bio *rbio)
int found_errors; int found_errors;
found_errors = get_rbio_veritical_errors(rbio, sector_nr, NULL, NULL); found_errors = get_rbio_veritical_errors(rbio, sector_nr, NULL, NULL);
if (found_errors > rbio->bioc->max_errors) { if (unlikely(found_errors > rbio->bioc->max_errors)) {
ret = -EIO; ret = -EIO;
break; break;
} }

View File

@ -2270,7 +2270,7 @@ static int do_relocation(struct btrfs_trans_handle *trans,
bytenr = btrfs_node_blockptr(upper->eb, slot); bytenr = btrfs_node_blockptr(upper->eb, slot);
if (lowest) { if (lowest) {
if (bytenr != node->bytenr) { if (unlikely(bytenr != node->bytenr)) {
btrfs_err(root->fs_info, btrfs_err(root->fs_info,
"lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu", "lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu",
bytenr, node->bytenr, slot, bytenr, node->bytenr, slot,
@ -2447,7 +2447,7 @@ static int get_tree_block_key(struct btrfs_fs_info *fs_info,
eb = read_tree_block(fs_info, block->bytenr, &check); eb = read_tree_block(fs_info, block->bytenr, &check);
if (IS_ERR(eb)) if (IS_ERR(eb))
return PTR_ERR(eb); return PTR_ERR(eb);
if (!extent_buffer_uptodate(eb)) { if (unlikely(!extent_buffer_uptodate(eb))) {
free_extent_buffer(eb); free_extent_buffer(eb);
return -EIO; return -EIO;
} }
@ -2832,7 +2832,7 @@ static int relocate_one_folio(struct reloc_control *rc,
if (!folio_test_uptodate(folio)) { if (!folio_test_uptodate(folio)) {
btrfs_read_folio(NULL, folio); btrfs_read_folio(NULL, folio);
folio_lock(folio); folio_lock(folio);
if (!folio_test_uptodate(folio)) { if (unlikely(!folio_test_uptodate(folio))) {
ret = -EIO; ret = -EIO;
goto release_folio; goto release_folio;
} }

View File

@ -1987,7 +1987,7 @@ static int flush_scrub_stripes(struct scrub_ctx *sctx)
* metadata, we should immediately abort. * metadata, we should immediately abort.
*/ */
for (int i = 0; i < nr_stripes; i++) { for (int i = 0; i < nr_stripes; i++) {
if (stripe_has_metadata_error(&sctx->stripes[i])) { if (unlikely(stripe_has_metadata_error(&sctx->stripes[i]))) {
ret = -EIO; ret = -EIO;
goto out; goto out;
} }
@ -2181,7 +2181,7 @@ static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
* As we may hit an empty data stripe while it's missing. * As we may hit an empty data stripe while it's missing.
*/ */
bitmap_and(&error, &error, &has_extent, stripe->nr_sectors); bitmap_and(&error, &error, &has_extent, stripe->nr_sectors);
if (!bitmap_empty(&error, stripe->nr_sectors)) { if (unlikely(!bitmap_empty(&error, stripe->nr_sectors))) {
btrfs_err(fs_info, btrfs_err(fs_info,
"scrub: unrepaired sectors detected, full stripe %llu data stripe %u errors %*pbl", "scrub: unrepaired sectors detected, full stripe %llu data stripe %u errors %*pbl",
full_stripe_start, i, stripe->nr_sectors, full_stripe_start, i, stripe->nr_sectors,
@ -2875,8 +2875,8 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
btrfs_put_block_group(cache); btrfs_put_block_group(cache);
if (ret) if (ret)
break; break;
if (sctx->is_dev_replace && if (unlikely(sctx->is_dev_replace &&
atomic64_read(&dev_replace->num_write_errors) > 0) { atomic64_read(&dev_replace->num_write_errors) > 0)) {
ret = -EIO; ret = -EIO;
break; break;
} }
@ -2904,7 +2904,7 @@ static int scrub_one_super(struct scrub_ctx *sctx, struct btrfs_device *dev,
if (ret < 0) if (ret < 0)
return ret; return ret;
ret = btrfs_check_super_csum(fs_info, sb); ret = btrfs_check_super_csum(fs_info, sb);
if (ret != 0) { if (unlikely(ret != 0)) {
btrfs_err_rl(fs_info, btrfs_err_rl(fs_info,
"scrub: super block at physical %llu devid %llu has bad csum", "scrub: super block at physical %llu devid %llu has bad csum",
physical, dev->devid); physical, dev->devid);
@ -3080,8 +3080,8 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
} }
mutex_lock(&fs_info->scrub_lock); mutex_lock(&fs_info->scrub_lock);
if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || if (unlikely(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) { test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state))) {
mutex_unlock(&fs_info->scrub_lock); mutex_unlock(&fs_info->scrub_lock);
mutex_unlock(&fs_info->fs_devices->device_list_mutex); mutex_unlock(&fs_info->fs_devices->device_list_mutex);
ret = -EIO; ret = -EIO;

View File

@ -646,7 +646,7 @@ static int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off)
ret = kernel_write(filp, buf + pos, len - pos, off); ret = kernel_write(filp, buf + pos, len - pos, off);
if (ret < 0) if (ret < 0)
return ret; return ret;
if (ret == 0) if (unlikely(ret == 0))
return -EIO; return -EIO;
pos += ret; pos += ret;
} }
@ -1723,7 +1723,7 @@ static int read_symlink(struct btrfs_root *root,
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0) if (ret < 0)
return ret; return ret;
if (ret) { if (unlikely(ret)) {
/* /*
* An empty symlink inode. Can happen in rare error paths when * An empty symlink inode. Can happen in rare error paths when
* creating a symlink (transaction committed before the inode * creating a symlink (transaction committed before the inode
@ -5199,7 +5199,7 @@ static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len)
if (!folio_test_uptodate(folio)) { if (!folio_test_uptodate(folio)) {
btrfs_read_folio(NULL, folio); btrfs_read_folio(NULL, folio);
folio_lock(folio); folio_lock(folio);
if (!folio_test_uptodate(folio)) { if (unlikely(!folio_test_uptodate(folio))) {
folio_unlock(folio); folio_unlock(folio);
btrfs_err(fs_info, btrfs_err(fs_info,
"send: IO error at offset %llu for inode %llu root %llu", "send: IO error at offset %llu for inode %llu root %llu",
@ -6961,7 +6961,7 @@ static int changed_ref(struct send_ctx *sctx,
{ {
int ret = 0; int ret = 0;
if (sctx->cur_ino != sctx->cmp_key->objectid) { if (unlikely(sctx->cur_ino != sctx->cmp_key->objectid)) {
inconsistent_snapshot_error(sctx, result, "reference"); inconsistent_snapshot_error(sctx, result, "reference");
return -EIO; return -EIO;
} }
@ -6989,7 +6989,7 @@ static int changed_xattr(struct send_ctx *sctx,
{ {
int ret = 0; int ret = 0;
if (sctx->cur_ino != sctx->cmp_key->objectid) { if (unlikely(sctx->cur_ino != sctx->cmp_key->objectid)) {
inconsistent_snapshot_error(sctx, result, "xattr"); inconsistent_snapshot_error(sctx, result, "xattr");
return -EIO; return -EIO;
} }

View File

@ -291,7 +291,7 @@ int zlib_compress_folios(struct list_head *ws, struct btrfs_inode *inode,
ret = zlib_deflate(&workspace->strm, Z_FINISH); ret = zlib_deflate(&workspace->strm, Z_FINISH);
if (ret == Z_STREAM_END) if (ret == Z_STREAM_END)
break; break;
if (ret != Z_OK && ret != Z_BUF_ERROR) { if (unlikely(ret != Z_OK && ret != Z_BUF_ERROR)) {
zlib_deflateEnd(&workspace->strm); zlib_deflateEnd(&workspace->strm);
ret = -EIO; ret = -EIO;
goto out; goto out;

View File

@ -274,7 +274,7 @@ static int btrfs_get_dev_zones(struct btrfs_device *device, u64 pos,
return ret; return ret;
} }
*nr_zones = ret; *nr_zones = ret;
if (!ret) if (unlikely(!ret))
return -EIO; return -EIO;
/* Populate cache */ /* Populate cache */
@ -503,7 +503,7 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
sector = zones[nr_zones - 1].start + zones[nr_zones - 1].len; sector = zones[nr_zones - 1].start + zones[nr_zones - 1].len;
} }
if (nreported != zone_info->nr_zones) { if (unlikely(nreported != zone_info->nr_zones)) {
btrfs_err(device->fs_info, btrfs_err(device->fs_info,
"inconsistent number of zones on %s (%u/%u)", "inconsistent number of zones on %s (%u/%u)",
rcu_dereference(device->name), nreported, rcu_dereference(device->name), nreported,
@ -513,7 +513,7 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
} }
if (max_active_zones) { if (max_active_zones) {
if (nactive > max_active_zones) { if (unlikely(nactive > max_active_zones)) {
if (bdev_max_active_zones(bdev) == 0) { if (bdev_max_active_zones(bdev) == 0) {
max_active_zones = 0; max_active_zones = 0;
zone_info->max_active_zones = 0; zone_info->max_active_zones = 0;
@ -901,7 +901,7 @@ int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw,
zones); zones);
if (ret < 0) if (ret < 0)
return ret; return ret;
if (ret != BTRFS_NR_SB_LOG_ZONES) if (unlikely(ret != BTRFS_NR_SB_LOG_ZONES))
return -EIO; return -EIO;
return sb_log_location(bdev, zones, rw, bytenr_ret); return sb_log_location(bdev, zones, rw, bytenr_ret);
@ -1357,7 +1357,7 @@ static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
return 0; return 0;
} }
if (zone.type == BLK_ZONE_TYPE_CONVENTIONAL) { if (unlikely(zone.type == BLK_ZONE_TYPE_CONVENTIONAL)) {
btrfs_err(fs_info, btrfs_err(fs_info,
"zoned: unexpected conventional zone %llu on device %s (devid %llu)", "zoned: unexpected conventional zone %llu on device %s (devid %llu)",
zone.start << SECTOR_SHIFT, rcu_dereference(device->name), zone.start << SECTOR_SHIFT, rcu_dereference(device->name),
@ -1399,7 +1399,7 @@ static int btrfs_load_block_group_single(struct btrfs_block_group *bg,
struct zone_info *info, struct zone_info *info,
unsigned long *active) unsigned long *active)
{ {
if (info->alloc_offset == WP_MISSING_DEV) { if (unlikely(info->alloc_offset == WP_MISSING_DEV)) {
btrfs_err(bg->fs_info, btrfs_err(bg->fs_info,
"zoned: cannot recover write pointer for zone %llu", "zoned: cannot recover write pointer for zone %llu",
info->physical); info->physical);
@ -1428,13 +1428,13 @@ static int btrfs_load_block_group_dup(struct btrfs_block_group *bg,
bg->zone_capacity = min_not_zero(zone_info[0].capacity, zone_info[1].capacity); bg->zone_capacity = min_not_zero(zone_info[0].capacity, zone_info[1].capacity);
if (zone_info[0].alloc_offset == WP_MISSING_DEV) { if (unlikely(zone_info[0].alloc_offset == WP_MISSING_DEV)) {
btrfs_err(bg->fs_info, btrfs_err(bg->fs_info,
"zoned: cannot recover write pointer for zone %llu", "zoned: cannot recover write pointer for zone %llu",
zone_info[0].physical); zone_info[0].physical);
return -EIO; return -EIO;
} }
if (zone_info[1].alloc_offset == WP_MISSING_DEV) { if (unlikely(zone_info[1].alloc_offset == WP_MISSING_DEV)) {
btrfs_err(bg->fs_info, btrfs_err(bg->fs_info,
"zoned: cannot recover write pointer for zone %llu", "zoned: cannot recover write pointer for zone %llu",
zone_info[1].physical); zone_info[1].physical);
@ -1447,14 +1447,14 @@ static int btrfs_load_block_group_dup(struct btrfs_block_group *bg,
if (zone_info[1].alloc_offset == WP_CONVENTIONAL) if (zone_info[1].alloc_offset == WP_CONVENTIONAL)
zone_info[1].alloc_offset = last_alloc; zone_info[1].alloc_offset = last_alloc;
if (zone_info[0].alloc_offset != zone_info[1].alloc_offset) { if (unlikely(zone_info[0].alloc_offset != zone_info[1].alloc_offset)) {
btrfs_err(bg->fs_info, btrfs_err(bg->fs_info,
"zoned: write pointer offset mismatch of zones in DUP profile"); "zoned: write pointer offset mismatch of zones in DUP profile");
return -EIO; return -EIO;
} }
if (test_bit(0, active) != test_bit(1, active)) { if (test_bit(0, active) != test_bit(1, active)) {
if (!btrfs_zone_activate(bg)) if (unlikely(!btrfs_zone_activate(bg)))
return -EIO; return -EIO;
} else if (test_bit(0, active)) { } else if (test_bit(0, active)) {
set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags); set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
@ -1489,16 +1489,16 @@ static int btrfs_load_block_group_raid1(struct btrfs_block_group *bg,
if (zone_info[i].alloc_offset == WP_CONVENTIONAL) if (zone_info[i].alloc_offset == WP_CONVENTIONAL)
zone_info[i].alloc_offset = last_alloc; zone_info[i].alloc_offset = last_alloc;
if ((zone_info[0].alloc_offset != zone_info[i].alloc_offset) && if (unlikely((zone_info[0].alloc_offset != zone_info[i].alloc_offset) &&
!btrfs_test_opt(fs_info, DEGRADED)) { !btrfs_test_opt(fs_info, DEGRADED))) {
btrfs_err(fs_info, btrfs_err(fs_info,
"zoned: write pointer offset mismatch of zones in %s profile", "zoned: write pointer offset mismatch of zones in %s profile",
btrfs_bg_type_to_raid_name(map->type)); btrfs_bg_type_to_raid_name(map->type));
return -EIO; return -EIO;
} }
if (test_bit(0, active) != test_bit(i, active)) { if (test_bit(0, active) != test_bit(i, active)) {
if (!btrfs_test_opt(fs_info, DEGRADED) && if (unlikely(!btrfs_test_opt(fs_info, DEGRADED) &&
!btrfs_zone_activate(bg)) { !btrfs_zone_activate(bg))) {
return -EIO; return -EIO;
} }
} else { } else {
@ -1554,7 +1554,7 @@ static int btrfs_load_block_group_raid0(struct btrfs_block_group *bg,
} }
if (test_bit(0, active) != test_bit(i, active)) { if (test_bit(0, active) != test_bit(i, active)) {
if (!btrfs_zone_activate(bg)) if (unlikely(!btrfs_zone_activate(bg)))
return -EIO; return -EIO;
} else { } else {
if (test_bit(0, active)) if (test_bit(0, active))
@ -1586,7 +1586,7 @@ static int btrfs_load_block_group_raid10(struct btrfs_block_group *bg,
continue; continue;
if (test_bit(0, active) != test_bit(i, active)) { if (test_bit(0, active) != test_bit(i, active)) {
if (!btrfs_zone_activate(bg)) if (unlikely(!btrfs_zone_activate(bg)))
return -EIO; return -EIO;
} else { } else {
if (test_bit(0, active)) if (test_bit(0, active))
@ -1643,7 +1643,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
return 0; return 0;
/* Sanity check */ /* Sanity check */
if (!IS_ALIGNED(length, fs_info->zone_size)) { if (unlikely(!IS_ALIGNED(length, fs_info->zone_size))) {
btrfs_err(fs_info, btrfs_err(fs_info,
"zoned: block group %llu len %llu unaligned to zone size %llu", "zoned: block group %llu len %llu unaligned to zone size %llu",
logical, length, fs_info->zone_size); logical, length, fs_info->zone_size);
@ -1756,7 +1756,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
return -EINVAL; return -EINVAL;
} }
if (cache->alloc_offset > cache->zone_capacity) { if (unlikely(cache->alloc_offset > cache->zone_capacity)) {
btrfs_err(fs_info, btrfs_err(fs_info,
"zoned: invalid write pointer %llu (larger than zone capacity %llu) in block group %llu", "zoned: invalid write pointer %llu (larger than zone capacity %llu) in block group %llu",
cache->alloc_offset, cache->zone_capacity, cache->alloc_offset, cache->zone_capacity,
@ -2087,7 +2087,7 @@ static int read_zone_info(struct btrfs_fs_info *fs_info, u64 logical,
ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical, ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
&mapped_length, &bioc, NULL, NULL); &mapped_length, &bioc, NULL, NULL);
if (ret || !bioc || mapped_length < PAGE_SIZE) { if (unlikely(ret || !bioc || mapped_length < PAGE_SIZE)) {
ret = -EIO; ret = -EIO;
goto out_put_bioc; goto out_put_bioc;
} }

View File

@ -654,7 +654,7 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
if (workspace->in_buf.pos == workspace->in_buf.size) { if (workspace->in_buf.pos == workspace->in_buf.size) {
kunmap_local(workspace->in_buf.src); kunmap_local(workspace->in_buf.src);
folio_in_index++; folio_in_index++;
if (folio_in_index >= total_folios_in) { if (unlikely(folio_in_index >= total_folios_in)) {
workspace->in_buf.src = NULL; workspace->in_buf.src = NULL;
ret = -EIO; ret = -EIO;
goto done; goto done;