vfs-6.19-rc1.folio

-----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQRAhzRXHqcMeLMyaSiRxhvAZXjcogUCaSmOZQAKCRCRxhvAZXjc
 onGBAQDtqeO0jZzS7q9UxlJ84Wj/H9w+9INpO4jMxtWK4svhUAEAghG4qVxRvkE2
 Qh+wrpTPIC7OCQ78k8psDRmkj9cn8QA=
 =FCVN
 -----END PGP SIGNATURE-----

Merge tag 'vfs-6.19-rc1.folio' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs

Pull folio updates from Christian Brauner:
 "Add a new folio_next_pos() helper function that returns the file
  position of the first byte after the current folio. This is a common
  operation in filesystems when needing to know the end of the current
  folio.

  The helper is lifted from btrfs which already had its own version, and
  is now used across multiple filesystems and subsystems:
   - btrfs
   - buffer
   - ext4
   - f2fs
   - gfs2
   - iomap
   - netfs
   - xfs
   - mm

  This fixes a long-standing bug in ocfs2 on 32-bit systems with files
  larger than 2GiB. Presumably this is not a common configuration, but
  the fix is backported anyway. The other filesystems did not have bugs,
  they were just mildly inefficient.

  This also introduce uoff_t as the unsigned version of loff_t. A recent
  commit inadvertently changed a comparison from being unsigned (on
  64-bit systems) to being signed (which it had always been on 32-bit
  systems), leading to sporadic fstests failures.

  Generally file sizes are restricted to being a signed integer, but in
  places where -1 is passed to indicate "up to the end of the file", it
  is convenient to have an unsigned type to ensure comparisons are
  always unsigned regardless of architecture"

* tag 'vfs-6.19-rc1.folio' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs:
  fs: Add uoff_t
  mm: Use folio_next_pos()
  xfs: Use folio_next_pos()
  netfs: Use folio_next_pos()
  iomap: Use folio_next_pos()
  gfs2: Use folio_next_pos()
  f2fs: Use folio_next_pos()
  ext4: Use folio_next_pos()
  buffer: Use folio_next_pos()
  btrfs: Use folio_next_pos()
  filemap: Add folio_next_pos()
This commit is contained in:
Linus Torvalds 2025-12-01 10:26:38 -08:00
commit f2e74ecfba
25 changed files with 70 additions and 61 deletions

View File

@ -85,8 +85,8 @@ static inline u32 btrfs_calc_input_length(struct folio *folio, u64 range_end, u6
{ {
/* @cur must be inside the folio. */ /* @cur must be inside the folio. */
ASSERT(folio_pos(folio) <= cur); ASSERT(folio_pos(folio) <= cur);
ASSERT(cur < folio_end(folio)); ASSERT(cur < folio_next_pos(folio));
return min(range_end, folio_end(folio)) - cur; return umin(range_end, folio_next_pos(folio)) - cur;
} }
int btrfs_alloc_compress_wsm(struct btrfs_fs_info *fs_info); int btrfs_alloc_compress_wsm(struct btrfs_fs_info *fs_info);

View File

@ -886,7 +886,7 @@ static struct folio *defrag_prepare_one_folio(struct btrfs_inode *inode, pgoff_t
} }
lock_start = folio_pos(folio); lock_start = folio_pos(folio);
lock_end = folio_end(folio) - 1; lock_end = folio_next_pos(folio) - 1;
/* Wait for any existing ordered extent in the range */ /* Wait for any existing ordered extent in the range */
while (1) { while (1) {
struct btrfs_ordered_extent *ordered; struct btrfs_ordered_extent *ordered;
@ -1178,7 +1178,8 @@ static int defrag_one_locked_target(struct btrfs_inode *inode,
if (!folio) if (!folio)
break; break;
if (start >= folio_end(folio) || start + len <= folio_pos(folio)) if (start >= folio_next_pos(folio) ||
start + len <= folio_pos(folio))
continue; continue;
btrfs_folio_clamp_clear_checked(fs_info, folio, start, len); btrfs_folio_clamp_clear_checked(fs_info, folio, start, len);
btrfs_folio_clamp_set_dirty(fs_info, folio, start, len); btrfs_folio_clamp_set_dirty(fs_info, folio, start, len);
@ -1219,7 +1220,7 @@ static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
folios[i] = NULL; folios[i] = NULL;
goto free_folios; goto free_folios;
} }
cur = folio_end(folios[i]); cur = folio_next_pos(folios[i]);
} }
for (int i = 0; i < nr_pages; i++) { for (int i = 0; i < nr_pages; i++) {
if (!folios[i]) if (!folios[i])

View File

@ -333,7 +333,7 @@ static noinline int lock_delalloc_folios(struct inode *inode,
goto out; goto out;
} }
range_start = max_t(u64, folio_pos(folio), start); range_start = max_t(u64, folio_pos(folio), start);
range_len = min_t(u64, folio_end(folio), end + 1) - range_start; range_len = min_t(u64, folio_next_pos(folio), end + 1) - range_start;
btrfs_folio_set_lock(fs_info, folio, range_start, range_len); btrfs_folio_set_lock(fs_info, folio, range_start, range_len);
processed_end = range_start + range_len - 1; processed_end = range_start + range_len - 1;
@ -387,7 +387,7 @@ noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
ASSERT(orig_end > orig_start); ASSERT(orig_end > orig_start);
/* The range should at least cover part of the folio */ /* The range should at least cover part of the folio */
ASSERT(!(orig_start >= folio_end(locked_folio) || ASSERT(!(orig_start >= folio_next_pos(locked_folio) ||
orig_end <= folio_pos(locked_folio))); orig_end <= folio_pos(locked_folio)));
again: again:
/* step one, find a bunch of delalloc bytes starting at start */ /* step one, find a bunch of delalloc bytes starting at start */
@ -493,7 +493,7 @@ static void end_folio_read(struct folio *folio, bool uptodate, u64 start, u32 le
struct btrfs_fs_info *fs_info = folio_to_fs_info(folio); struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
ASSERT(folio_pos(folio) <= start && ASSERT(folio_pos(folio) <= start &&
start + len <= folio_end(folio)); start + len <= folio_next_pos(folio));
if (uptodate && btrfs_verify_folio(folio, start, len)) if (uptodate && btrfs_verify_folio(folio, start, len))
btrfs_folio_set_uptodate(fs_info, folio, start, len); btrfs_folio_set_uptodate(fs_info, folio, start, len);
@ -1201,7 +1201,7 @@ static bool can_skip_one_ordered_range(struct btrfs_inode *inode,
* finished our folio read and unlocked the folio. * finished our folio read and unlocked the folio.
*/ */
if (btrfs_folio_test_dirty(fs_info, folio, cur, blocksize)) { if (btrfs_folio_test_dirty(fs_info, folio, cur, blocksize)) {
u64 range_len = min(folio_end(folio), u64 range_len = umin(folio_next_pos(folio),
ordered->file_offset + ordered->num_bytes) - cur; ordered->file_offset + ordered->num_bytes) - cur;
ret = true; ret = true;
@ -1223,7 +1223,7 @@ static bool can_skip_one_ordered_range(struct btrfs_inode *inode,
* So we return true and update @next_ret to the OE/folio boundary. * So we return true and update @next_ret to the OE/folio boundary.
*/ */
if (btrfs_folio_test_uptodate(fs_info, folio, cur, blocksize)) { if (btrfs_folio_test_uptodate(fs_info, folio, cur, blocksize)) {
u64 range_len = min(folio_end(folio), u64 range_len = umin(folio_next_pos(folio),
ordered->file_offset + ordered->num_bytes) - cur; ordered->file_offset + ordered->num_bytes) - cur;
/* /*
@ -2215,7 +2215,7 @@ static noinline_for_stack void write_one_eb(struct extent_buffer *eb,
for (int i = 0; i < num_extent_folios(eb); i++) { for (int i = 0; i < num_extent_folios(eb); i++) {
struct folio *folio = eb->folios[i]; struct folio *folio = eb->folios[i];
u64 range_start = max_t(u64, eb->start, folio_pos(folio)); u64 range_start = max_t(u64, eb->start, folio_pos(folio));
u32 range_len = min_t(u64, folio_end(folio), u32 range_len = min_t(u64, folio_next_pos(folio),
eb->start + eb->len) - range_start; eb->start + eb->len) - range_start;
folio_lock(folio); folio_lock(folio);
@ -2624,7 +2624,7 @@ void extent_write_locked_range(struct inode *inode, const struct folio *locked_f
continue; continue;
} }
cur_end = min_t(u64, folio_end(folio) - 1, end); cur_end = min_t(u64, folio_next_pos(folio) - 1, end);
cur_len = cur_end + 1 - cur; cur_len = cur_end + 1 - cur;
ASSERT(folio_test_locked(folio)); ASSERT(folio_test_locked(folio));
@ -3865,7 +3865,7 @@ int read_extent_buffer_pages_nowait(struct extent_buffer *eb, int mirror_num,
for (int i = 0; i < num_extent_folios(eb); i++) { for (int i = 0; i < num_extent_folios(eb); i++) {
struct folio *folio = eb->folios[i]; struct folio *folio = eb->folios[i];
u64 range_start = max_t(u64, eb->start, folio_pos(folio)); u64 range_start = max_t(u64, eb->start, folio_pos(folio));
u32 range_len = min_t(u64, folio_end(folio), u32 range_len = min_t(u64, folio_next_pos(folio),
eb->start + eb->len) - range_start; eb->start + eb->len) - range_start;
bio_add_folio_nofail(&bbio->bio, folio, range_len, bio_add_folio_nofail(&bbio->bio, folio, range_len,

View File

@ -89,7 +89,8 @@ int btrfs_dirty_folio(struct btrfs_inode *inode, struct folio *folio, loff_t pos
num_bytes = round_up(write_bytes + pos - start_pos, num_bytes = round_up(write_bytes + pos - start_pos,
fs_info->sectorsize); fs_info->sectorsize);
ASSERT(num_bytes <= U32_MAX); ASSERT(num_bytes <= U32_MAX);
ASSERT(folio_pos(folio) <= pos && folio_end(folio) >= pos + write_bytes); ASSERT(folio_pos(folio) <= pos &&
folio_next_pos(folio) >= pos + write_bytes);
end_of_last_block = start_pos + num_bytes - 1; end_of_last_block = start_pos + num_bytes - 1;
@ -799,7 +800,7 @@ static int prepare_uptodate_folio(struct inode *inode, struct folio *folio, u64
u64 len) u64 len)
{ {
u64 clamp_start = max_t(u64, pos, folio_pos(folio)); u64 clamp_start = max_t(u64, pos, folio_pos(folio));
u64 clamp_end = min_t(u64, pos + len, folio_end(folio)); u64 clamp_end = min_t(u64, pos + len, folio_next_pos(folio));
const u32 blocksize = inode_to_fs_info(inode)->sectorsize; const u32 blocksize = inode_to_fs_info(inode)->sectorsize;
int ret = 0; int ret = 0;
@ -1254,8 +1255,8 @@ static int copy_one_range(struct btrfs_inode *inode, struct iov_iter *iter,
* The reserved range goes beyond the current folio, shrink the reserved * The reserved range goes beyond the current folio, shrink the reserved
* space to the folio boundary. * space to the folio boundary.
*/ */
if (reserved_start + reserved_len > folio_end(folio)) { if (reserved_start + reserved_len > folio_next_pos(folio)) {
const u64 last_block = folio_end(folio); const u64 last_block = folio_next_pos(folio);
shrink_reserved_space(inode, *data_reserved, reserved_start, shrink_reserved_space(inode, *data_reserved, reserved_start,
reserved_len, last_block - reserved_start, reserved_len, last_block - reserved_start,

View File

@ -411,7 +411,7 @@ static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode,
continue; continue;
} }
index = folio_end(folio) >> PAGE_SHIFT; index = folio_next_index(folio);
/* /*
* Here we just clear all Ordered bits for every page in the * Here we just clear all Ordered bits for every page in the
* range, then btrfs_mark_ordered_io_finished() will handle * range, then btrfs_mark_ordered_io_finished() will handle
@ -2338,7 +2338,8 @@ int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct folio *locked_fol
* The range must cover part of the @locked_folio, or a return of 1 * The range must cover part of the @locked_folio, or a return of 1
* can confuse the caller. * can confuse the caller.
*/ */
ASSERT(!(end <= folio_pos(locked_folio) || start >= folio_end(locked_folio))); ASSERT(!(end <= folio_pos(locked_folio) ||
start >= folio_next_pos(locked_folio)));
if (should_nocow(inode, start, end)) { if (should_nocow(inode, start, end)) {
ret = run_delalloc_nocow(inode, locked_folio, start, end); ret = run_delalloc_nocow(inode, locked_folio, start, end);
@ -2745,7 +2746,7 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
struct btrfs_inode *inode = fixup->inode; struct btrfs_inode *inode = fixup->inode;
struct btrfs_fs_info *fs_info = inode->root->fs_info; struct btrfs_fs_info *fs_info = inode->root->fs_info;
u64 page_start = folio_pos(folio); u64 page_start = folio_pos(folio);
u64 page_end = folio_end(folio) - 1; u64 page_end = folio_next_pos(folio) - 1;
int ret = 0; int ret = 0;
bool free_delalloc_space = true; bool free_delalloc_space = true;
@ -4857,7 +4858,7 @@ static int truncate_block_zero_beyond_eof(struct btrfs_inode *inode, u64 start)
*/ */
zero_start = max_t(u64, folio_pos(folio), start); zero_start = max_t(u64, folio_pos(folio), start);
zero_end = folio_end(folio); zero_end = folio_next_pos(folio);
folio_zero_range(folio, zero_start - folio_pos(folio), folio_zero_range(folio, zero_start - folio_pos(folio),
zero_end - zero_start); zero_end - zero_start);
@ -5040,7 +5041,7 @@ int btrfs_truncate_block(struct btrfs_inode *inode, u64 offset, u64 start, u64 e
* not reach disk, it still affects our page caches. * not reach disk, it still affects our page caches.
*/ */
zero_start = max_t(u64, folio_pos(folio), start); zero_start = max_t(u64, folio_pos(folio), start);
zero_end = min_t(u64, folio_end(folio) - 1, end); zero_end = min_t(u64, folio_next_pos(folio) - 1, end);
} else { } else {
zero_start = max_t(u64, block_start, start); zero_start = max_t(u64, block_start, start);
zero_end = min_t(u64, block_end, end); zero_end = min_t(u64, block_end, end);

View File

@ -209,9 +209,4 @@ static inline bool bitmap_test_range_all_zero(const unsigned long *addr,
return (found_set == start + nbits); return (found_set == start + nbits);
} }
static inline u64 folio_end(struct folio *folio)
{
return folio_pos(folio) + folio_size(folio);
}
#endif #endif

View File

@ -359,7 +359,7 @@ static bool can_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
if (folio) { if (folio) {
ASSERT(folio->mapping); ASSERT(folio->mapping);
ASSERT(folio_pos(folio) <= file_offset); ASSERT(folio_pos(folio) <= file_offset);
ASSERT(file_offset + len <= folio_end(folio)); ASSERT(file_offset + len <= folio_next_pos(folio));
/* /*
* Ordered flag indicates whether we still have * Ordered flag indicates whether we still have

View File

@ -186,7 +186,8 @@ static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info,
* unmapped page like dummy extent buffer pages. * unmapped page like dummy extent buffer pages.
*/ */
if (folio->mapping) if (folio->mapping)
ASSERT(folio_pos(folio) <= start && start + len <= folio_end(folio), ASSERT(folio_pos(folio) <= start &&
start + len <= folio_next_pos(folio),
"start=%llu len=%u folio_pos=%llu folio_size=%zu", "start=%llu len=%u folio_pos=%llu folio_size=%zu",
start, len, folio_pos(folio), folio_size(folio)); start, len, folio_pos(folio), folio_size(folio));
} }
@ -217,7 +218,7 @@ static void btrfs_subpage_clamp_range(struct folio *folio, u64 *start, u32 *len)
if (folio_pos(folio) >= orig_start + orig_len) if (folio_pos(folio) >= orig_start + orig_len)
*len = 0; *len = 0;
else else
*len = min_t(u64, folio_end(folio), orig_start + orig_len) - *start; *len = min_t(u64, folio_next_pos(folio), orig_start + orig_len) - *start;
} }
static bool btrfs_subpage_end_and_test_lock(const struct btrfs_fs_info *fs_info, static bool btrfs_subpage_end_and_test_lock(const struct btrfs_fs_info *fs_info,

View File

@ -2732,7 +2732,7 @@ int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
loff_t i_size = i_size_read(inode); loff_t i_size = i_size_read(inode);
/* Is the folio fully inside i_size? */ /* Is the folio fully inside i_size? */
if (folio_pos(folio) + folio_size(folio) <= i_size) if (folio_next_pos(folio) <= i_size)
return __block_write_full_folio(inode, folio, get_block, wbc); return __block_write_full_folio(inode, folio, get_block, wbc);
/* Is the folio fully outside i_size? (truncate in progress) */ /* Is the folio fully outside i_size? (truncate in progress) */

View File

@ -1318,8 +1318,8 @@ static int ext4_write_begin(const struct kiocb *iocb,
if (IS_ERR(folio)) if (IS_ERR(folio))
return PTR_ERR(folio); return PTR_ERR(folio);
if (pos + len > folio_pos(folio) + folio_size(folio)) if (len > folio_next_pos(folio) - pos)
len = folio_pos(folio) + folio_size(folio) - pos; len = folio_next_pos(folio) - pos;
from = offset_in_folio(folio, pos); from = offset_in_folio(folio, pos);
to = from + len; to = from + len;
@ -2700,7 +2700,7 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
if (mpd->map.m_len == 0) if (mpd->map.m_len == 0)
mpd->start_pos = folio_pos(folio); mpd->start_pos = folio_pos(folio);
mpd->next_pos = folio_pos(folio) + folio_size(folio); mpd->next_pos = folio_next_pos(folio);
/* /*
* Writeout when we cannot modify metadata is simple. * Writeout when we cannot modify metadata is simple.
* Just submit the page. For data=journal mode we * Just submit the page. For data=journal mode we
@ -3142,8 +3142,8 @@ static int ext4_da_write_begin(const struct kiocb *iocb,
if (IS_ERR(folio)) if (IS_ERR(folio))
return PTR_ERR(folio); return PTR_ERR(folio);
if (pos + len > folio_pos(folio) + folio_size(folio)) if (len > folio_next_pos(folio) - pos)
len = folio_pos(folio) + folio_size(folio) - pos; len = folio_next_pos(folio) - pos;
ret = ext4_block_write_begin(NULL, folio, pos, len, ret = ext4_block_write_begin(NULL, folio, pos, len,
ext4_da_get_block_prep); ext4_da_get_block_prep);

View File

@ -1329,7 +1329,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
} }
folio = page_folio(cc->rpages[last_index]); folio = page_folio(cc->rpages[last_index]);
psize = folio_pos(folio) + folio_size(folio); psize = folio_next_pos(folio);
err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false); err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
if (err) if (err)

View File

@ -81,8 +81,7 @@ static int gfs2_write_jdata_folio(struct folio *folio,
* the page size, the remaining memory is zeroed when mapped, and * the page size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file." * writes to that region are not written out to the file."
*/ */
if (folio_pos(folio) < i_size && if (folio_pos(folio) < i_size && i_size < folio_next_pos(folio))
i_size < folio_pos(folio) + folio_size(folio))
folio_zero_segment(folio, offset_in_folio(folio, i_size), folio_zero_segment(folio, offset_in_folio(folio, i_size),
folio_size(folio)); folio_size(folio));

View File

@ -775,7 +775,7 @@ static int __iomap_write_begin(const struct iomap_iter *iter,
* are not changing pagecache contents. * are not changing pagecache contents.
*/ */
if (!(iter->flags & IOMAP_UNSHARE) && pos <= folio_pos(folio) && if (!(iter->flags & IOMAP_UNSHARE) && pos <= folio_pos(folio) &&
pos + len >= folio_pos(folio) + folio_size(folio)) pos + len >= folio_next_pos(folio))
return 0; return 0;
ifs = ifs_alloc(iter->inode, folio, iter->flags); ifs = ifs_alloc(iter->inode, folio, iter->flags);
@ -1214,8 +1214,7 @@ static void iomap_write_delalloc_ifs_punch(struct inode *inode,
if (!ifs) if (!ifs)
return; return;
last_byte = min_t(loff_t, end_byte - 1, last_byte = min_t(loff_t, end_byte - 1, folio_next_pos(folio) - 1);
folio_pos(folio) + folio_size(folio) - 1);
first_blk = offset_in_folio(folio, start_byte) >> blkbits; first_blk = offset_in_folio(folio, start_byte) >> blkbits;
last_blk = offset_in_folio(folio, last_byte) >> blkbits; last_blk = offset_in_folio(folio, last_byte) >> blkbits;
while ((first_blk = ifs_next_clean_block(folio, first_blk, last_blk)) while ((first_blk = ifs_next_clean_block(folio, first_blk, last_blk))
@ -1247,8 +1246,7 @@ static void iomap_write_delalloc_punch(struct inode *inode, struct folio *folio,
* Make sure the next punch start is correctly bound to * Make sure the next punch start is correctly bound to
* the end of this data range, not the end of the folio. * the end of this data range, not the end of the folio.
*/ */
*punch_start_byte = min_t(loff_t, end_byte, *punch_start_byte = min_t(loff_t, end_byte, folio_next_pos(folio));
folio_pos(folio) + folio_size(folio));
} }
/* /*
@ -1288,7 +1286,7 @@ static void iomap_write_delalloc_scan(struct inode *inode,
start_byte, end_byte, iomap, punch); start_byte, end_byte, iomap, punch);
/* move offset to start of next folio in range */ /* move offset to start of next folio in range */
start_byte = folio_pos(folio) + folio_size(folio); start_byte = folio_next_pos(folio);
folio_unlock(folio); folio_unlock(folio);
folio_put(folio); folio_put(folio);
} }

View File

@ -535,7 +535,7 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr
folio_unlock(folio); folio_unlock(folio);
err = filemap_fdatawrite_range(mapping, err = filemap_fdatawrite_range(mapping,
folio_pos(folio), folio_pos(folio),
folio_pos(folio) + folio_size(folio)); folio_next_pos(folio));
switch (err) { switch (err) {
case 0: case 0:
ret = VM_FAULT_RETRY; ret = VM_FAULT_RETRY;

View File

@ -298,7 +298,7 @@ bool netfs_release_folio(struct folio *folio, gfp_t gfp)
if (folio_test_dirty(folio)) if (folio_test_dirty(folio))
return false; return false;
end = umin(folio_pos(folio) + folio_size(folio), i_size_read(&ctx->inode)); end = umin(folio_next_pos(folio), i_size_read(&ctx->inode));
if (end > ctx->zero_point) if (end > ctx->zero_point)
ctx->zero_point = end; ctx->zero_point = end;

View File

@ -6892,7 +6892,7 @@ static void ocfs2_zero_cluster_folios(struct inode *inode, loff_t start,
ocfs2_map_and_dirty_folio(inode, handle, from, to, folio, 1, ocfs2_map_and_dirty_folio(inode, handle, from, to, folio, 1,
&phys); &phys);
start = folio_next_index(folio) << PAGE_SHIFT; start = folio_next_pos(folio);
} }
out: out:
if (folios) if (folios)

View File

@ -834,7 +834,7 @@ xfarray_sort_scan(
si->first_folio_idx = xfarray_idx(si->array, si->first_folio_idx = xfarray_idx(si->array,
folio_pos(si->folio) + si->array->obj_size - 1); folio_pos(si->folio) + si->array->obj_size - 1);
next_pos = folio_pos(si->folio) + folio_size(si->folio); next_pos = folio_next_pos(si->folio);
si->last_folio_idx = xfarray_idx(si->array, next_pos - 1); si->last_folio_idx = xfarray_idx(si->array, next_pos - 1);
if (xfarray_pos(si->array, si->last_folio_idx + 1) > next_pos) if (xfarray_pos(si->array, si->last_folio_idx + 1) > next_pos)
si->last_folio_idx--; si->last_folio_idx--;

View File

@ -271,7 +271,7 @@ xfs_discard_folio(
* folio itself and not the start offset that is passed in. * folio itself and not the start offset that is passed in.
*/ */
xfs_bmap_punch_delalloc_range(ip, XFS_DATA_FORK, pos, xfs_bmap_punch_delalloc_range(ip, XFS_DATA_FORK, pos,
folio_pos(folio) + folio_size(folio), NULL); folio_next_pos(folio), NULL);
} }
/* /*

View File

@ -3502,10 +3502,10 @@ struct vm_unmapped_area_info {
extern unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info); extern unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info);
/* truncate.c */ /* truncate.c */
extern void truncate_inode_pages(struct address_space *, loff_t); void truncate_inode_pages(struct address_space *mapping, loff_t lstart);
extern void truncate_inode_pages_range(struct address_space *, void truncate_inode_pages_range(struct address_space *mapping, loff_t lstart,
loff_t lstart, loff_t lend); uoff_t lend);
extern void truncate_inode_pages_final(struct address_space *); void truncate_inode_pages_final(struct address_space *mapping);
/* generic vm_area_ops exported for stackable file systems */ /* generic vm_area_ops exported for stackable file systems */
extern vm_fault_t filemap_fault(struct vm_fault *vmf); extern vm_fault_t filemap_fault(struct vm_fault *vmf);

View File

@ -938,6 +938,17 @@ static inline pgoff_t folio_next_index(const struct folio *folio)
return folio->index + folio_nr_pages(folio); return folio->index + folio_nr_pages(folio);
} }
/**
* folio_next_pos - Get the file position of the next folio.
* @folio: The current folio.
*
* Return: The position of the folio which follows this folio in the file.
*/
static inline loff_t folio_next_pos(const struct folio *folio)
{
return (loff_t)folio_next_index(folio) << PAGE_SHIFT;
}
/** /**
* folio_file_page - The page for a particular index. * folio_file_page - The page for a particular index.
* @folio: The folio which contains this index. * @folio: The folio which contains this index.

View File

@ -111,7 +111,7 @@ struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask); pgoff_t index, gfp_t gfp_mask);
int shmem_writeout(struct folio *folio, struct swap_iocb **plug, int shmem_writeout(struct folio *folio, struct swap_iocb **plug,
struct list_head *folio_list); struct list_head *folio_list);
void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end); void shmem_truncate_range(struct inode *inode, loff_t start, uoff_t end);
int shmem_unuse(unsigned int type); int shmem_unuse(unsigned int type);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE

View File

@ -50,6 +50,7 @@ typedef __kernel_old_gid_t old_gid_t;
#if defined(__GNUC__) #if defined(__GNUC__)
typedef __kernel_loff_t loff_t; typedef __kernel_loff_t loff_t;
typedef __kernel_uoff_t uoff_t;
#endif #endif
/* /*

View File

@ -86,6 +86,7 @@ typedef struct {
*/ */
typedef __kernel_long_t __kernel_off_t; typedef __kernel_long_t __kernel_off_t;
typedef long long __kernel_loff_t; typedef long long __kernel_loff_t;
typedef unsigned long long __kernel_uoff_t;
typedef __kernel_long_t __kernel_old_time_t; typedef __kernel_long_t __kernel_old_time_t;
#ifndef __KERNEL__ #ifndef __KERNEL__
typedef __kernel_long_t __kernel_time_t; typedef __kernel_long_t __kernel_time_t;

View File

@ -1075,7 +1075,7 @@ static struct folio *shmem_get_partial_folio(struct inode *inode, pgoff_t index)
* Remove range of pages and swap entries from page cache, and free them. * Remove range of pages and swap entries from page cache, and free them.
* If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate. * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
*/ */
static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, static void shmem_undo_range(struct inode *inode, loff_t lstart, uoff_t lend,
bool unfalloc) bool unfalloc)
{ {
struct address_space *mapping = inode->i_mapping; struct address_space *mapping = inode->i_mapping;
@ -1132,7 +1132,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT); same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT); folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT);
if (folio) { if (folio) {
same_folio = lend < folio_pos(folio) + folio_size(folio); same_folio = lend < folio_next_pos(folio);
folio_mark_dirty(folio); folio_mark_dirty(folio);
if (!truncate_inode_partial_folio(folio, lstart, lend)) { if (!truncate_inode_partial_folio(folio, lstart, lend)) {
start = folio_next_index(folio); start = folio_next_index(folio);
@ -1226,7 +1226,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
shmem_recalc_inode(inode, 0, -nr_swaps_freed); shmem_recalc_inode(inode, 0, -nr_swaps_freed);
} }
void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) void shmem_truncate_range(struct inode *inode, loff_t lstart, uoff_t lend)
{ {
shmem_undo_range(inode, lstart, lend, false); shmem_undo_range(inode, lstart, lend, false);
inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
@ -5778,7 +5778,7 @@ unsigned long shmem_get_unmapped_area(struct file *file,
} }
#endif #endif
void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) void shmem_truncate_range(struct inode *inode, loff_t lstart, uoff_t lend)
{ {
truncate_inode_pages_range(inode->i_mapping, lstart, lend); truncate_inode_pages_range(inode->i_mapping, lstart, lend);
} }

View File

@ -364,7 +364,7 @@ long mapping_evict_folio(struct address_space *mapping, struct folio *folio)
* page aligned properly. * page aligned properly.
*/ */
void truncate_inode_pages_range(struct address_space *mapping, void truncate_inode_pages_range(struct address_space *mapping,
loff_t lstart, loff_t lend) loff_t lstart, uoff_t lend)
{ {
pgoff_t start; /* inclusive */ pgoff_t start; /* inclusive */
pgoff_t end; /* exclusive */ pgoff_t end; /* exclusive */
@ -412,7 +412,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT); same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
folio = __filemap_get_folio(mapping, lstart >> PAGE_SHIFT, FGP_LOCK, 0); folio = __filemap_get_folio(mapping, lstart >> PAGE_SHIFT, FGP_LOCK, 0);
if (!IS_ERR(folio)) { if (!IS_ERR(folio)) {
same_folio = lend < folio_pos(folio) + folio_size(folio); same_folio = lend < folio_next_pos(folio);
if (!truncate_inode_partial_folio(folio, lstart, lend)) { if (!truncate_inode_partial_folio(folio, lstart, lend)) {
start = folio_next_index(folio); start = folio_next_index(folio);
if (same_folio) if (same_folio)