From 4511fd86db6f8f94f8aff01044f5c69aa38f81f4 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 24 Oct 2025 18:08:09 +0100 Subject: [PATCH 01/11] filemap: Add folio_next_pos() Replace the open-coded implementation in ocfs2 (which loses the top 32 bits on 32-bit architectures) with a helper in pagemap.h. Fixes: 35edec1d52c0 (ocfs2: update truncate handling of partial clusters) Signed-off-by: Matthew Wilcox (Oracle) Link: https://patch.msgid.link/20251024170822.1427218-2-willy@infradead.org Reviewed-by: Joseph Qi Reviewed-by: Christoph Hellwig Cc: Mark Fasheh Cc: Joel Becker Cc: Joseph Qi Cc: ocfs2-devel@lists.linux.dev Signed-off-by: Christian Brauner --- fs/ocfs2/alloc.c | 2 +- include/linux/pagemap.h | 11 +++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index 162711cc5b20..b267ec580da9 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -6892,7 +6892,7 @@ static void ocfs2_zero_cluster_folios(struct inode *inode, loff_t start, ocfs2_map_and_dirty_folio(inode, handle, from, to, folio, 1, &phys); - start = folio_next_index(folio) << PAGE_SHIFT; + start = folio_next_pos(folio); } out: if (folios) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 09b581c1d878..e16576e3763a 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -941,6 +941,17 @@ static inline pgoff_t folio_next_index(const struct folio *folio) return folio->index + folio_nr_pages(folio); } +/** + * folio_next_pos - Get the file position of the next folio. + * @folio: The current folio. + * + * Return: The position of the folio which follows this folio in the file. + */ +static inline loff_t folio_next_pos(const struct folio *folio) +{ + return (loff_t)folio_next_index(folio) << PAGE_SHIFT; +} + /** * folio_file_page - The page for a particular index. * @folio: The folio which contains this index. From 48f3784b17d91457586fbbc292675206f166a138 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 24 Oct 2025 18:08:10 +0100 Subject: [PATCH 02/11] btrfs: Use folio_next_pos() btrfs defined its own variant of folio_next_pos() called folio_end(). This is an ambiguous name as 'end' might be exclusive or inclusive. Switch to the new folio_next_pos(). Signed-off-by: Matthew Wilcox (Oracle) Link: https://patch.msgid.link/20251024170822.1427218-3-willy@infradead.org Acked-by: David Sterba Cc: Chris Mason Cc: David Sterba Cc: linux-btrfs@vger.kernel.org Signed-off-by: Christian Brauner --- fs/btrfs/compression.h | 4 ++-- fs/btrfs/defrag.c | 7 ++++--- fs/btrfs/extent_io.c | 16 ++++++++-------- fs/btrfs/file.c | 9 +++++---- fs/btrfs/inode.c | 11 ++++++----- fs/btrfs/misc.h | 5 ----- fs/btrfs/ordered-data.c | 2 +- fs/btrfs/subpage.c | 5 +++-- 8 files changed, 29 insertions(+), 30 deletions(-) diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index eba188a9e3bb..aee1fd21cdd6 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -85,8 +85,8 @@ static inline u32 btrfs_calc_input_length(struct folio *folio, u64 range_end, u6 { /* @cur must be inside the folio. */ ASSERT(folio_pos(folio) <= cur); - ASSERT(cur < folio_end(folio)); - return min(range_end, folio_end(folio)) - cur; + ASSERT(cur < folio_next_pos(folio)); + return umin(range_end, folio_next_pos(folio)) - cur; } int btrfs_alloc_compress_wsm(struct btrfs_fs_info *fs_info); diff --git a/fs/btrfs/defrag.c b/fs/btrfs/defrag.c index 7b277934f66f..8fb353feacc9 100644 --- a/fs/btrfs/defrag.c +++ b/fs/btrfs/defrag.c @@ -886,7 +886,7 @@ static struct folio *defrag_prepare_one_folio(struct btrfs_inode *inode, pgoff_t } lock_start = folio_pos(folio); - lock_end = folio_end(folio) - 1; + lock_end = folio_next_pos(folio) - 1; /* Wait for any existing ordered extent in the range */ while (1) { struct btrfs_ordered_extent *ordered; @@ -1178,7 +1178,8 @@ static int defrag_one_locked_target(struct btrfs_inode *inode, if (!folio) break; - if (start >= folio_end(folio) || start + len <= folio_pos(folio)) + if (start >= folio_next_pos(folio) || + start + len <= folio_pos(folio)) continue; btrfs_folio_clamp_clear_checked(fs_info, folio, start, len); btrfs_folio_clamp_set_dirty(fs_info, folio, start, len); @@ -1219,7 +1220,7 @@ static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len, folios[i] = NULL; goto free_folios; } - cur = folio_end(folios[i]); + cur = folio_next_pos(folios[i]); } for (int i = 0; i < nr_pages; i++) { if (!folios[i]) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index c123a3ef154a..d808465163a5 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -333,7 +333,7 @@ static noinline int lock_delalloc_folios(struct inode *inode, goto out; } range_start = max_t(u64, folio_pos(folio), start); - range_len = min_t(u64, folio_end(folio), end + 1) - range_start; + range_len = min_t(u64, folio_next_pos(folio), end + 1) - range_start; btrfs_folio_set_lock(fs_info, folio, range_start, range_len); processed_end = range_start + range_len - 1; @@ -387,7 +387,7 @@ noinline_for_stack bool find_lock_delalloc_range(struct inode *inode, ASSERT(orig_end > orig_start); /* The range should at least cover part of the folio */ - ASSERT(!(orig_start >= folio_end(locked_folio) || + ASSERT(!(orig_start >= folio_next_pos(locked_folio) || orig_end <= folio_pos(locked_folio))); again: /* step one, find a bunch of delalloc bytes starting at start */ @@ -493,7 +493,7 @@ static void end_folio_read(struct folio *folio, bool uptodate, u64 start, u32 le struct btrfs_fs_info *fs_info = folio_to_fs_info(folio); ASSERT(folio_pos(folio) <= start && - start + len <= folio_end(folio)); + start + len <= folio_next_pos(folio)); if (uptodate && btrfs_verify_folio(folio, start, len)) btrfs_folio_set_uptodate(fs_info, folio, start, len); @@ -1201,7 +1201,7 @@ static bool can_skip_one_ordered_range(struct btrfs_inode *inode, * finished our folio read and unlocked the folio. */ if (btrfs_folio_test_dirty(fs_info, folio, cur, blocksize)) { - u64 range_len = min(folio_end(folio), + u64 range_len = umin(folio_next_pos(folio), ordered->file_offset + ordered->num_bytes) - cur; ret = true; @@ -1223,7 +1223,7 @@ static bool can_skip_one_ordered_range(struct btrfs_inode *inode, * So we return true and update @next_ret to the OE/folio boundary. */ if (btrfs_folio_test_uptodate(fs_info, folio, cur, blocksize)) { - u64 range_len = min(folio_end(folio), + u64 range_len = umin(folio_next_pos(folio), ordered->file_offset + ordered->num_bytes) - cur; /* @@ -2215,7 +2215,7 @@ static noinline_for_stack void write_one_eb(struct extent_buffer *eb, for (int i = 0; i < num_extent_folios(eb); i++) { struct folio *folio = eb->folios[i]; u64 range_start = max_t(u64, eb->start, folio_pos(folio)); - u32 range_len = min_t(u64, folio_end(folio), + u32 range_len = min_t(u64, folio_next_pos(folio), eb->start + eb->len) - range_start; folio_lock(folio); @@ -2619,7 +2619,7 @@ void extent_write_locked_range(struct inode *inode, const struct folio *locked_f continue; } - cur_end = min_t(u64, folio_end(folio) - 1, end); + cur_end = min_t(u64, folio_next_pos(folio) - 1, end); cur_len = cur_end + 1 - cur; ASSERT(folio_test_locked(folio)); @@ -3860,7 +3860,7 @@ int read_extent_buffer_pages_nowait(struct extent_buffer *eb, int mirror_num, for (int i = 0; i < num_extent_folios(eb); i++) { struct folio *folio = eb->folios[i]; u64 range_start = max_t(u64, eb->start, folio_pos(folio)); - u32 range_len = min_t(u64, folio_end(folio), + u32 range_len = min_t(u64, folio_next_pos(folio), eb->start + eb->len) - range_start; bio_add_folio_nofail(&bbio->bio, folio, range_len, diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 7efd1f8a1912..977931cfa71e 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -89,7 +89,8 @@ int btrfs_dirty_folio(struct btrfs_inode *inode, struct folio *folio, loff_t pos num_bytes = round_up(write_bytes + pos - start_pos, fs_info->sectorsize); ASSERT(num_bytes <= U32_MAX); - ASSERT(folio_pos(folio) <= pos && folio_end(folio) >= pos + write_bytes); + ASSERT(folio_pos(folio) <= pos && + folio_next_pos(folio) >= pos + write_bytes); end_of_last_block = start_pos + num_bytes - 1; @@ -799,7 +800,7 @@ static int prepare_uptodate_folio(struct inode *inode, struct folio *folio, u64 u64 len) { u64 clamp_start = max_t(u64, pos, folio_pos(folio)); - u64 clamp_end = min_t(u64, pos + len, folio_end(folio)); + u64 clamp_end = min_t(u64, pos + len, folio_next_pos(folio)); const u32 blocksize = inode_to_fs_info(inode)->sectorsize; int ret = 0; @@ -1254,8 +1255,8 @@ static int copy_one_range(struct btrfs_inode *inode, struct iov_iter *iter, * The reserved range goes beyond the current folio, shrink the reserved * space to the folio boundary. */ - if (reserved_start + reserved_len > folio_end(folio)) { - const u64 last_block = folio_end(folio); + if (reserved_start + reserved_len > folio_next_pos(folio)) { + const u64 last_block = folio_next_pos(folio); shrink_reserved_space(inode, *data_reserved, reserved_start, reserved_len, last_block - reserved_start, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 3b1b3a0553ee..b7b498fffa4f 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -409,7 +409,7 @@ static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode, continue; } - index = folio_end(folio) >> PAGE_SHIFT; + index = folio_next_index(folio); /* * Here we just clear all Ordered bits for every page in the * range, then btrfs_mark_ordered_io_finished() will handle @@ -2336,7 +2336,8 @@ int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct folio *locked_fol * The range must cover part of the @locked_folio, or a return of 1 * can confuse the caller. */ - ASSERT(!(end <= folio_pos(locked_folio) || start >= folio_end(locked_folio))); + ASSERT(!(end <= folio_pos(locked_folio) || + start >= folio_next_pos(locked_folio))); if (should_nocow(inode, start, end)) { ret = run_delalloc_nocow(inode, locked_folio, start, end); @@ -2743,7 +2744,7 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work) struct btrfs_inode *inode = fixup->inode; struct btrfs_fs_info *fs_info = inode->root->fs_info; u64 page_start = folio_pos(folio); - u64 page_end = folio_end(folio) - 1; + u64 page_end = folio_next_pos(folio) - 1; int ret = 0; bool free_delalloc_space = true; @@ -4855,7 +4856,7 @@ static int truncate_block_zero_beyond_eof(struct btrfs_inode *inode, u64 start) */ zero_start = max_t(u64, folio_pos(folio), start); - zero_end = folio_end(folio); + zero_end = folio_next_pos(folio); folio_zero_range(folio, zero_start - folio_pos(folio), zero_end - zero_start); @@ -5038,7 +5039,7 @@ int btrfs_truncate_block(struct btrfs_inode *inode, u64 offset, u64 start, u64 e * not reach disk, it still affects our page caches. */ zero_start = max_t(u64, folio_pos(folio), start); - zero_end = min_t(u64, folio_end(folio) - 1, end); + zero_end = min_t(u64, folio_next_pos(folio) - 1, end); } else { zero_start = max_t(u64, block_start, start); zero_end = min_t(u64, block_end, end); diff --git a/fs/btrfs/misc.h b/fs/btrfs/misc.h index 60f9b000d644..17b71e1285e5 100644 --- a/fs/btrfs/misc.h +++ b/fs/btrfs/misc.h @@ -209,9 +209,4 @@ static inline bool bitmap_test_range_all_zero(const unsigned long *addr, return (found_set == start + nbits); } -static inline u64 folio_end(struct folio *folio) -{ - return folio_pos(folio) + folio_size(folio); -} - #endif diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 2829f20d7bb5..7fedebbee558 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -359,7 +359,7 @@ static bool can_finish_ordered_extent(struct btrfs_ordered_extent *ordered, if (folio) { ASSERT(folio->mapping); ASSERT(folio_pos(folio) <= file_offset); - ASSERT(file_offset + len <= folio_end(folio)); + ASSERT(file_offset + len <= folio_next_pos(folio)); /* * Ordered flag indicates whether we still have diff --git a/fs/btrfs/subpage.c b/fs/btrfs/subpage.c index 5ca8d4db6722..a7ba868e9372 100644 --- a/fs/btrfs/subpage.c +++ b/fs/btrfs/subpage.c @@ -186,7 +186,8 @@ static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info, * unmapped page like dummy extent buffer pages. */ if (folio->mapping) - ASSERT(folio_pos(folio) <= start && start + len <= folio_end(folio), + ASSERT(folio_pos(folio) <= start && + start + len <= folio_next_pos(folio), "start=%llu len=%u folio_pos=%llu folio_size=%zu", start, len, folio_pos(folio), folio_size(folio)); } @@ -217,7 +218,7 @@ static void btrfs_subpage_clamp_range(struct folio *folio, u64 *start, u32 *len) if (folio_pos(folio) >= orig_start + orig_len) *len = 0; else - *len = min_t(u64, folio_end(folio), orig_start + orig_len) - *start; + *len = min_t(u64, folio_next_pos(folio), orig_start + orig_len) - *start; } static bool btrfs_subpage_end_and_test_lock(const struct btrfs_fs_info *fs_info, From 6870892b6437bf7bdf37ca875bf842fe1e1385df Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 24 Oct 2025 18:08:11 +0100 Subject: [PATCH 03/11] buffer: Use folio_next_pos() This is one instruction more efficient than open-coding folio_pos() + folio_size(). It's the equivalent of (x + y) << z rather than x << z + y << z. Signed-off-by: Matthew Wilcox (Oracle) Link: https://patch.msgid.link/20251024170822.1427218-4-willy@infradead.org Reviewed-by: Jan Kara Reviewed-by: Christoph Hellwig Cc: Jan Kara Signed-off-by: Christian Brauner --- fs/buffer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/buffer.c b/fs/buffer.c index 6a8752f7bbed..185ceb0d6baa 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -2732,7 +2732,7 @@ int block_write_full_folio(struct folio *folio, struct writeback_control *wbc, loff_t i_size = i_size_read(inode); /* Is the folio fully inside i_size? */ - if (folio_pos(folio) + folio_size(folio) <= i_size) + if (folio_next_pos(folio) <= i_size) return __block_write_full_folio(inode, folio, get_block, wbc); /* Is the folio fully outside i_size? (truncate in progress) */ From 4db47b252190e850e6fd4835c1ca6deea540487d Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 24 Oct 2025 18:08:12 +0100 Subject: [PATCH 04/11] ext4: Use folio_next_pos() This is one instruction more efficient than open-coding folio_pos() + folio_size(). It's the equivalent of (x + y) << z rather than x << z + y << z. Signed-off-by: Matthew Wilcox (Oracle) Link: https://patch.msgid.link/20251024170822.1427218-5-willy@infradead.org Reviewed-by: Christoph Hellwig Reviewed-by: Zhang Yi Cc: Theodore Ts'o Cc: Andreas Dilger Cc: linux-ext4@vger.kernel.org Signed-off-by: Christian Brauner --- fs/ext4/inode.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index f9e4ac87211e..c18465265ce6 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -1319,8 +1319,8 @@ static int ext4_write_begin(const struct kiocb *iocb, if (IS_ERR(folio)) return PTR_ERR(folio); - if (pos + len > folio_pos(folio) + folio_size(folio)) - len = folio_pos(folio) + folio_size(folio) - pos; + if (len > folio_next_pos(folio) - pos) + len = folio_next_pos(folio) - pos; from = offset_in_folio(folio, pos); to = from + len; @@ -2704,7 +2704,7 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd) if (mpd->map.m_len == 0) mpd->start_pos = folio_pos(folio); - mpd->next_pos = folio_pos(folio) + folio_size(folio); + mpd->next_pos = folio_next_pos(folio); /* * Writeout when we cannot modify metadata is simple. * Just submit the page. For data=journal mode we @@ -3146,8 +3146,8 @@ static int ext4_da_write_begin(const struct kiocb *iocb, if (IS_ERR(folio)) return PTR_ERR(folio); - if (pos + len > folio_pos(folio) + folio_size(folio)) - len = folio_pos(folio) + folio_size(folio) - pos; + if (len > folio_next_pos(folio) - pos) + len = folio_next_pos(folio) - pos; ret = ext4_block_write_begin(NULL, folio, pos, len, ext4_da_get_block_prep); From 4fcafa30b70a9cecc1ca04fbeec95a61b52b9d35 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 24 Oct 2025 18:08:13 +0100 Subject: [PATCH 05/11] f2fs: Use folio_next_pos() This is one instruction more efficient than open-coding folio_pos() + folio_size(). It's the equivalent of (x + y) << z rather than x << z + y << z. Signed-off-by: Matthew Wilcox (Oracle) Link: https://patch.msgid.link/20251024170822.1427218-6-willy@infradead.org Reviewed-by: Chao Yu Cc: Jaegeuk Kim Cc: Chao Yu Cc: linux-f2fs-devel@lists.sourceforge.net Signed-off-by: Christian Brauner --- fs/f2fs/compress.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c index 6ad8d3bc6df7..be53e06caf3d 100644 --- a/fs/f2fs/compress.c +++ b/fs/f2fs/compress.c @@ -1329,7 +1329,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, } folio = page_folio(cc->rpages[last_index]); - psize = folio_pos(folio) + folio_size(folio); + psize = folio_next_pos(folio); err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false); if (err) From 5f0fc785322dfcd8b16e921760c0a4e685086a1f Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 24 Oct 2025 18:08:14 +0100 Subject: [PATCH 06/11] gfs2: Use folio_next_pos() This is one instruction more efficient than open-coding folio_pos() + folio_size(). It's the equivalent of (x + y) << z rather than x << z + y << z. Signed-off-by: Matthew Wilcox (Oracle) Link: https://patch.msgid.link/20251024170822.1427218-7-willy@infradead.org Cc: Andreas Gruenbacher Cc: gfs2@lists.linux.dev Signed-off-by: Christian Brauner --- fs/gfs2/aops.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index 47d74afd63ac..d8ba97bad8bb 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c @@ -81,8 +81,7 @@ static int gfs2_write_jdata_folio(struct folio *folio, * the page size, the remaining memory is zeroed when mapped, and * writes to that region are not written out to the file." */ - if (folio_pos(folio) < i_size && - i_size < folio_pos(folio) + folio_size(folio)) + if (folio_pos(folio) < i_size && i_size < folio_next_pos(folio)) folio_zero_segment(folio, offset_in_folio(folio, i_size), folio_size(folio)); From ac97520804757b685bc3f29811ac25616183ead3 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 24 Oct 2025 18:08:15 +0100 Subject: [PATCH 07/11] iomap: Use folio_next_pos() This is one instruction more efficient than open-coding folio_pos() + folio_size(). It's the equivalent of (x + y) << z rather than x << z + y << z. Signed-off-by: Matthew Wilcox (Oracle) Link: https://patch.msgid.link/20251024170822.1427218-8-willy@infradead.org Reviewed-by: Christoph Hellwig Reviewed-by: Darrick J. Wong Cc: Christian Brauner Cc: Darrick J. Wong Cc: linux-xfs@vger.kernel.org Signed-off-by: Christian Brauner --- fs/iomap/buffered-io.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 8b847a1e27f1..32a27f36372d 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -707,7 +707,7 @@ static int __iomap_write_begin(const struct iomap_iter *iter, * are not changing pagecache contents. */ if (!(iter->flags & IOMAP_UNSHARE) && pos <= folio_pos(folio) && - pos + len >= folio_pos(folio) + folio_size(folio)) + pos + len >= folio_next_pos(folio)) return 0; ifs = ifs_alloc(iter->inode, folio, iter->flags); @@ -1097,8 +1097,7 @@ static void iomap_write_delalloc_ifs_punch(struct inode *inode, if (!ifs) return; - last_byte = min_t(loff_t, end_byte - 1, - folio_pos(folio) + folio_size(folio) - 1); + last_byte = min_t(loff_t, end_byte - 1, folio_next_pos(folio) - 1); first_blk = offset_in_folio(folio, start_byte) >> blkbits; last_blk = offset_in_folio(folio, last_byte) >> blkbits; for (i = first_blk; i <= last_blk; i++) { @@ -1129,8 +1128,7 @@ static void iomap_write_delalloc_punch(struct inode *inode, struct folio *folio, * Make sure the next punch start is correctly bound to * the end of this data range, not the end of the folio. */ - *punch_start_byte = min_t(loff_t, end_byte, - folio_pos(folio) + folio_size(folio)); + *punch_start_byte = min_t(loff_t, end_byte, folio_next_pos(folio)); } /* @@ -1170,7 +1168,7 @@ static void iomap_write_delalloc_scan(struct inode *inode, start_byte, end_byte, iomap, punch); /* move offset to start of next folio in range */ - start_byte = folio_pos(folio) + folio_size(folio); + start_byte = folio_next_pos(folio); folio_unlock(folio); folio_put(folio); } From 2408900d408ae55ab89861d2dd75ef6e51405dcd Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 24 Oct 2025 18:08:16 +0100 Subject: [PATCH 08/11] netfs: Use folio_next_pos() This is one instruction more efficient than open-coding folio_pos() + folio_size(). It's the equivalent of (x + y) << z rather than x << z + y << z. Signed-off-by: Matthew Wilcox (Oracle) Link: https://patch.msgid.link/20251024170822.1427218-9-willy@infradead.org Acked-by: David Howells Reviewed-by: Paulo Alcantara (Red Hat) Cc: David Howells Cc: Paulo Alcantara Cc: netfs@lists.linux.dev Signed-off-by: Christian Brauner --- fs/netfs/buffered_write.c | 2 +- fs/netfs/misc.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/netfs/buffered_write.c b/fs/netfs/buffered_write.c index 09394ac2c180..f9d62abef2ac 100644 --- a/fs/netfs/buffered_write.c +++ b/fs/netfs/buffered_write.c @@ -535,7 +535,7 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr folio_unlock(folio); err = filemap_fdatawrite_range(mapping, folio_pos(folio), - folio_pos(folio) + folio_size(folio)); + folio_next_pos(folio)); switch (err) { case 0: ret = VM_FAULT_RETRY; diff --git a/fs/netfs/misc.c b/fs/netfs/misc.c index 486166460e17..82342c6d22cb 100644 --- a/fs/netfs/misc.c +++ b/fs/netfs/misc.c @@ -298,7 +298,7 @@ bool netfs_release_folio(struct folio *folio, gfp_t gfp) if (folio_test_dirty(folio)) return false; - end = umin(folio_pos(folio) + folio_size(folio), i_size_read(&ctx->inode)); + end = umin(folio_next_pos(folio), i_size_read(&ctx->inode)); if (end > ctx->zero_point) ctx->zero_point = end; From ac0a11113de3f0007283d20f5a38615ea4c6e9fd Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 24 Oct 2025 18:08:17 +0100 Subject: [PATCH 09/11] xfs: Use folio_next_pos() This is one instruction more efficient than open-coding folio_pos() + folio_size(). It's the equivalent of (x + y) << z rather than x << z + y << z. Signed-off-by: Matthew Wilcox (Oracle) Link: https://patch.msgid.link/20251024170822.1427218-10-willy@infradead.org Reviewed-by: Christoph Hellwig Cc: Carlos Maiolino Cc: linux-xfs@vger.kernel.org Signed-off-by: Christian Brauner --- fs/xfs/scrub/xfarray.c | 2 +- fs/xfs/xfs_aops.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/xfs/scrub/xfarray.c b/fs/xfs/scrub/xfarray.c index cdd13ed9c569..ed2e8c64b1a8 100644 --- a/fs/xfs/scrub/xfarray.c +++ b/fs/xfs/scrub/xfarray.c @@ -834,7 +834,7 @@ xfarray_sort_scan( si->first_folio_idx = xfarray_idx(si->array, folio_pos(si->folio) + si->array->obj_size - 1); - next_pos = folio_pos(si->folio) + folio_size(si->folio); + next_pos = folio_next_pos(si->folio); si->last_folio_idx = xfarray_idx(si->array, next_pos - 1); if (xfarray_pos(si->array, si->last_folio_idx + 1) > next_pos) si->last_folio_idx--; diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index a26f79815533..ad76d5d01dd1 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -271,7 +271,7 @@ xfs_discard_folio( * folio itself and not the start offset that is passed in. */ xfs_bmap_punch_delalloc_range(ip, XFS_DATA_FORK, pos, - folio_pos(folio) + folio_size(folio), NULL); + folio_next_pos(folio), NULL); } /* From 60a70e61430b2d568bc5e96f629c5855ee159ace Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 24 Oct 2025 18:08:18 +0100 Subject: [PATCH 10/11] mm: Use folio_next_pos() This is one instruction more efficient than open-coding folio_pos() + folio_size(). It's the equivalent of (x + y) << z rather than x << z + y << z. Signed-off-by: Matthew Wilcox (Oracle) Link: https://patch.msgid.link/20251024170822.1427218-11-willy@infradead.org Reviewed-by: David Hildenbrand Reviewed-by: Christoph Hellwig Cc: Hugh Dickins Cc: Baolin Wang Cc: Andrew Morton Cc: linux-mm@kvack.org Signed-off-by: Christian Brauner --- mm/shmem.c | 2 +- mm/truncate.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/shmem.c b/mm/shmem.c index b9081b817d28..c819cecf1ed9 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1133,7 +1133,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT); folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT); if (folio) { - same_folio = lend < folio_pos(folio) + folio_size(folio); + same_folio = lend < folio_next_pos(folio); folio_mark_dirty(folio); if (!truncate_inode_partial_folio(folio, lstart, lend)) { start = folio_next_index(folio); diff --git a/mm/truncate.c b/mm/truncate.c index 91eb92a5ce4f..a3d673533e32 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -387,7 +387,7 @@ void truncate_inode_pages_range(struct address_space *mapping, same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT); folio = __filemap_get_folio(mapping, lstart >> PAGE_SHIFT, FGP_LOCK, 0); if (!IS_ERR(folio)) { - same_folio = lend < folio_pos(folio) + folio_size(folio); + same_folio = lend < folio_next_pos(folio); if (!truncate_inode_partial_folio(folio, lstart, lend)) { start = folio_next_index(folio); if (same_folio) From 37d369fa97cc0774ea4eab726d16bcb5fbe3a104 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Sun, 23 Nov 2025 22:05:15 +0000 Subject: [PATCH 11/11] fs: Add uoff_t In a recent commit, I inadvertently changed a comparison from being an unsigned comparison (on 64-bit systems) to being a signed comparison (which it had always been on 32-bit systems). This led to a sporadic fstests failure. To make sure this comparison is always unsigned, introduce a new type, uoff_t which is the unsigned version of loff_t. Generally file sizes are restricted to being a signed integer, but in these two places it is convenient to pass -1 to indicate "up to the end of the file". Signed-off-by: Matthew Wilcox (Oracle) Link: https://patch.msgid.link/20251123220518.1447261-1-willy@infradead.org Signed-off-by: Christian Brauner --- include/linux/mm.h | 8 ++++---- include/linux/shmem_fs.h | 2 +- include/linux/types.h | 1 + include/uapi/asm-generic/posix_types.h | 1 + mm/shmem.c | 6 +++--- mm/truncate.c | 2 +- 6 files changed, 11 insertions(+), 9 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index d16b33bacc32..2a36d1bcf491 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3495,10 +3495,10 @@ struct vm_unmapped_area_info { extern unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info); /* truncate.c */ -extern void truncate_inode_pages(struct address_space *, loff_t); -extern void truncate_inode_pages_range(struct address_space *, - loff_t lstart, loff_t lend); -extern void truncate_inode_pages_final(struct address_space *); +void truncate_inode_pages(struct address_space *mapping, loff_t lstart); +void truncate_inode_pages_range(struct address_space *mapping, loff_t lstart, + uoff_t lend); +void truncate_inode_pages_final(struct address_space *mapping); /* generic vm_area_ops exported for stackable file systems */ extern vm_fault_t filemap_fault(struct vm_fault *vmf); diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 0e47465ef0fd..774efe592a9a 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -111,7 +111,7 @@ struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, pgoff_t index, gfp_t gfp_mask); int shmem_writeout(struct folio *folio, struct swap_iocb **plug, struct list_head *folio_list); -void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end); +void shmem_truncate_range(struct inode *inode, loff_t start, uoff_t end); int shmem_unuse(unsigned int type); #ifdef CONFIG_TRANSPARENT_HUGEPAGE diff --git a/include/linux/types.h b/include/linux/types.h index 6dfdb8e8e4c3..d4437e9c452c 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -50,6 +50,7 @@ typedef __kernel_old_gid_t old_gid_t; #if defined(__GNUC__) typedef __kernel_loff_t loff_t; +typedef __kernel_uoff_t uoff_t; #endif /* diff --git a/include/uapi/asm-generic/posix_types.h b/include/uapi/asm-generic/posix_types.h index b5f7594eee7a..0a90ad92dbf3 100644 --- a/include/uapi/asm-generic/posix_types.h +++ b/include/uapi/asm-generic/posix_types.h @@ -86,6 +86,7 @@ typedef struct { */ typedef __kernel_long_t __kernel_off_t; typedef long long __kernel_loff_t; +typedef unsigned long long __kernel_uoff_t; typedef __kernel_long_t __kernel_old_time_t; #ifndef __KERNEL__ typedef __kernel_long_t __kernel_time_t; diff --git a/mm/shmem.c b/mm/shmem.c index c819cecf1ed9..43b41a42c463 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1076,7 +1076,7 @@ static struct folio *shmem_get_partial_folio(struct inode *inode, pgoff_t index) * Remove range of pages and swap entries from page cache, and free them. * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate. */ -static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, +static void shmem_undo_range(struct inode *inode, loff_t lstart, uoff_t lend, bool unfalloc) { struct address_space *mapping = inode->i_mapping; @@ -1227,7 +1227,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, shmem_recalc_inode(inode, 0, -nr_swaps_freed); } -void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) +void shmem_truncate_range(struct inode *inode, loff_t lstart, uoff_t lend) { shmem_undo_range(inode, lstart, lend, false); inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); @@ -5776,7 +5776,7 @@ unsigned long shmem_get_unmapped_area(struct file *file, } #endif -void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) +void shmem_truncate_range(struct inode *inode, loff_t lstart, uoff_t lend) { truncate_inode_pages_range(inode->i_mapping, lstart, lend); } diff --git a/mm/truncate.c b/mm/truncate.c index a3d673533e32..fbe848fdc391 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -339,7 +339,7 @@ long mapping_evict_folio(struct address_space *mapping, struct folio *folio) * page aligned properly. */ void truncate_inode_pages_range(struct address_space *mapping, - loff_t lstart, loff_t lend) + loff_t lstart, uoff_t lend) { pgoff_t start; /* inclusive */ pgoff_t end; /* exclusive */