mirror of https://github.com/torvalds/linux.git
Merge patch series "Add and use folio_next_pos()"
Matthew Wilcox (Oracle) <willy@infradead.org> says: It's relatively common in filesystems to want to know the end of the current folio we're looking at. So common in fact that btrfs has its own helper for that. Lift that helper to filemap and use it everywhere that I've noticed it could be used. This actually fixes a long-standing bug in ocfs2 on 32-bit systems with files larger than 2GiB. Presumably this is not a common configuration, but I've marked it for backport anyway. The other filesystems are all fine; none of them have a bug, they're just mildly inefficient. I think this should all go in via Christian's tree, ideally with acks from the various fs maintainers (cc'd on their individual patches). * patches from https://patch.msgid.link/20251024170822.1427218-1-willy@infradead.org: mm: Use folio_next_pos() xfs: Use folio_next_pos() netfs: Use folio_next_pos() iomap: Use folio_next_pos() gfs2: Use folio_next_pos() f2fs: Use folio_next_pos() ext4: Use folio_next_pos() buffer: Use folio_next_pos() btrfs: Use folio_next_pos() filemap: Add folio_next_pos() Link: https://patch.msgid.link/20251024170822.1427218-1-willy@infradead.org Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
commit
a77a59592f
|
|
@ -85,8 +85,8 @@ static inline u32 btrfs_calc_input_length(struct folio *folio, u64 range_end, u6
|
|||
{
|
||||
/* @cur must be inside the folio. */
|
||||
ASSERT(folio_pos(folio) <= cur);
|
||||
ASSERT(cur < folio_end(folio));
|
||||
return min(range_end, folio_end(folio)) - cur;
|
||||
ASSERT(cur < folio_next_pos(folio));
|
||||
return umin(range_end, folio_next_pos(folio)) - cur;
|
||||
}
|
||||
|
||||
int btrfs_alloc_compress_wsm(struct btrfs_fs_info *fs_info);
|
||||
|
|
|
|||
|
|
@ -886,7 +886,7 @@ static struct folio *defrag_prepare_one_folio(struct btrfs_inode *inode, pgoff_t
|
|||
}
|
||||
|
||||
lock_start = folio_pos(folio);
|
||||
lock_end = folio_end(folio) - 1;
|
||||
lock_end = folio_next_pos(folio) - 1;
|
||||
/* Wait for any existing ordered extent in the range */
|
||||
while (1) {
|
||||
struct btrfs_ordered_extent *ordered;
|
||||
|
|
@ -1178,7 +1178,8 @@ static int defrag_one_locked_target(struct btrfs_inode *inode,
|
|||
|
||||
if (!folio)
|
||||
break;
|
||||
if (start >= folio_end(folio) || start + len <= folio_pos(folio))
|
||||
if (start >= folio_next_pos(folio) ||
|
||||
start + len <= folio_pos(folio))
|
||||
continue;
|
||||
btrfs_folio_clamp_clear_checked(fs_info, folio, start, len);
|
||||
btrfs_folio_clamp_set_dirty(fs_info, folio, start, len);
|
||||
|
|
@ -1219,7 +1220,7 @@ static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
|
|||
folios[i] = NULL;
|
||||
goto free_folios;
|
||||
}
|
||||
cur = folio_end(folios[i]);
|
||||
cur = folio_next_pos(folios[i]);
|
||||
}
|
||||
for (int i = 0; i < nr_pages; i++) {
|
||||
if (!folios[i])
|
||||
|
|
|
|||
|
|
@ -333,7 +333,7 @@ static noinline int lock_delalloc_folios(struct inode *inode,
|
|||
goto out;
|
||||
}
|
||||
range_start = max_t(u64, folio_pos(folio), start);
|
||||
range_len = min_t(u64, folio_end(folio), end + 1) - range_start;
|
||||
range_len = min_t(u64, folio_next_pos(folio), end + 1) - range_start;
|
||||
btrfs_folio_set_lock(fs_info, folio, range_start, range_len);
|
||||
|
||||
processed_end = range_start + range_len - 1;
|
||||
|
|
@ -387,7 +387,7 @@ noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
|
|||
ASSERT(orig_end > orig_start);
|
||||
|
||||
/* The range should at least cover part of the folio */
|
||||
ASSERT(!(orig_start >= folio_end(locked_folio) ||
|
||||
ASSERT(!(orig_start >= folio_next_pos(locked_folio) ||
|
||||
orig_end <= folio_pos(locked_folio)));
|
||||
again:
|
||||
/* step one, find a bunch of delalloc bytes starting at start */
|
||||
|
|
@ -493,7 +493,7 @@ static void end_folio_read(struct folio *folio, bool uptodate, u64 start, u32 le
|
|||
struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
|
||||
|
||||
ASSERT(folio_pos(folio) <= start &&
|
||||
start + len <= folio_end(folio));
|
||||
start + len <= folio_next_pos(folio));
|
||||
|
||||
if (uptodate && btrfs_verify_folio(folio, start, len))
|
||||
btrfs_folio_set_uptodate(fs_info, folio, start, len);
|
||||
|
|
@ -1201,7 +1201,7 @@ static bool can_skip_one_ordered_range(struct btrfs_inode *inode,
|
|||
* finished our folio read and unlocked the folio.
|
||||
*/
|
||||
if (btrfs_folio_test_dirty(fs_info, folio, cur, blocksize)) {
|
||||
u64 range_len = min(folio_end(folio),
|
||||
u64 range_len = umin(folio_next_pos(folio),
|
||||
ordered->file_offset + ordered->num_bytes) - cur;
|
||||
|
||||
ret = true;
|
||||
|
|
@ -1223,7 +1223,7 @@ static bool can_skip_one_ordered_range(struct btrfs_inode *inode,
|
|||
* So we return true and update @next_ret to the OE/folio boundary.
|
||||
*/
|
||||
if (btrfs_folio_test_uptodate(fs_info, folio, cur, blocksize)) {
|
||||
u64 range_len = min(folio_end(folio),
|
||||
u64 range_len = umin(folio_next_pos(folio),
|
||||
ordered->file_offset + ordered->num_bytes) - cur;
|
||||
|
||||
/*
|
||||
|
|
@ -2215,7 +2215,7 @@ static noinline_for_stack void write_one_eb(struct extent_buffer *eb,
|
|||
for (int i = 0; i < num_extent_folios(eb); i++) {
|
||||
struct folio *folio = eb->folios[i];
|
||||
u64 range_start = max_t(u64, eb->start, folio_pos(folio));
|
||||
u32 range_len = min_t(u64, folio_end(folio),
|
||||
u32 range_len = min_t(u64, folio_next_pos(folio),
|
||||
eb->start + eb->len) - range_start;
|
||||
|
||||
folio_lock(folio);
|
||||
|
|
@ -2619,7 +2619,7 @@ void extent_write_locked_range(struct inode *inode, const struct folio *locked_f
|
|||
continue;
|
||||
}
|
||||
|
||||
cur_end = min_t(u64, folio_end(folio) - 1, end);
|
||||
cur_end = min_t(u64, folio_next_pos(folio) - 1, end);
|
||||
cur_len = cur_end + 1 - cur;
|
||||
|
||||
ASSERT(folio_test_locked(folio));
|
||||
|
|
@ -3860,7 +3860,7 @@ int read_extent_buffer_pages_nowait(struct extent_buffer *eb, int mirror_num,
|
|||
for (int i = 0; i < num_extent_folios(eb); i++) {
|
||||
struct folio *folio = eb->folios[i];
|
||||
u64 range_start = max_t(u64, eb->start, folio_pos(folio));
|
||||
u32 range_len = min_t(u64, folio_end(folio),
|
||||
u32 range_len = min_t(u64, folio_next_pos(folio),
|
||||
eb->start + eb->len) - range_start;
|
||||
|
||||
bio_add_folio_nofail(&bbio->bio, folio, range_len,
|
||||
|
|
|
|||
|
|
@ -89,7 +89,8 @@ int btrfs_dirty_folio(struct btrfs_inode *inode, struct folio *folio, loff_t pos
|
|||
num_bytes = round_up(write_bytes + pos - start_pos,
|
||||
fs_info->sectorsize);
|
||||
ASSERT(num_bytes <= U32_MAX);
|
||||
ASSERT(folio_pos(folio) <= pos && folio_end(folio) >= pos + write_bytes);
|
||||
ASSERT(folio_pos(folio) <= pos &&
|
||||
folio_next_pos(folio) >= pos + write_bytes);
|
||||
|
||||
end_of_last_block = start_pos + num_bytes - 1;
|
||||
|
||||
|
|
@ -799,7 +800,7 @@ static int prepare_uptodate_folio(struct inode *inode, struct folio *folio, u64
|
|||
u64 len)
|
||||
{
|
||||
u64 clamp_start = max_t(u64, pos, folio_pos(folio));
|
||||
u64 clamp_end = min_t(u64, pos + len, folio_end(folio));
|
||||
u64 clamp_end = min_t(u64, pos + len, folio_next_pos(folio));
|
||||
const u32 blocksize = inode_to_fs_info(inode)->sectorsize;
|
||||
int ret = 0;
|
||||
|
||||
|
|
@ -1254,8 +1255,8 @@ static int copy_one_range(struct btrfs_inode *inode, struct iov_iter *iter,
|
|||
* The reserved range goes beyond the current folio, shrink the reserved
|
||||
* space to the folio boundary.
|
||||
*/
|
||||
if (reserved_start + reserved_len > folio_end(folio)) {
|
||||
const u64 last_block = folio_end(folio);
|
||||
if (reserved_start + reserved_len > folio_next_pos(folio)) {
|
||||
const u64 last_block = folio_next_pos(folio);
|
||||
|
||||
shrink_reserved_space(inode, *data_reserved, reserved_start,
|
||||
reserved_len, last_block - reserved_start,
|
||||
|
|
|
|||
|
|
@ -409,7 +409,7 @@ static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode,
|
|||
continue;
|
||||
}
|
||||
|
||||
index = folio_end(folio) >> PAGE_SHIFT;
|
||||
index = folio_next_index(folio);
|
||||
/*
|
||||
* Here we just clear all Ordered bits for every page in the
|
||||
* range, then btrfs_mark_ordered_io_finished() will handle
|
||||
|
|
@ -2336,7 +2336,8 @@ int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct folio *locked_fol
|
|||
* The range must cover part of the @locked_folio, or a return of 1
|
||||
* can confuse the caller.
|
||||
*/
|
||||
ASSERT(!(end <= folio_pos(locked_folio) || start >= folio_end(locked_folio)));
|
||||
ASSERT(!(end <= folio_pos(locked_folio) ||
|
||||
start >= folio_next_pos(locked_folio)));
|
||||
|
||||
if (should_nocow(inode, start, end)) {
|
||||
ret = run_delalloc_nocow(inode, locked_folio, start, end);
|
||||
|
|
@ -2743,7 +2744,7 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
|
|||
struct btrfs_inode *inode = fixup->inode;
|
||||
struct btrfs_fs_info *fs_info = inode->root->fs_info;
|
||||
u64 page_start = folio_pos(folio);
|
||||
u64 page_end = folio_end(folio) - 1;
|
||||
u64 page_end = folio_next_pos(folio) - 1;
|
||||
int ret = 0;
|
||||
bool free_delalloc_space = true;
|
||||
|
||||
|
|
@ -4855,7 +4856,7 @@ static int truncate_block_zero_beyond_eof(struct btrfs_inode *inode, u64 start)
|
|||
*/
|
||||
|
||||
zero_start = max_t(u64, folio_pos(folio), start);
|
||||
zero_end = folio_end(folio);
|
||||
zero_end = folio_next_pos(folio);
|
||||
folio_zero_range(folio, zero_start - folio_pos(folio),
|
||||
zero_end - zero_start);
|
||||
|
||||
|
|
@ -5038,7 +5039,7 @@ int btrfs_truncate_block(struct btrfs_inode *inode, u64 offset, u64 start, u64 e
|
|||
* not reach disk, it still affects our page caches.
|
||||
*/
|
||||
zero_start = max_t(u64, folio_pos(folio), start);
|
||||
zero_end = min_t(u64, folio_end(folio) - 1, end);
|
||||
zero_end = min_t(u64, folio_next_pos(folio) - 1, end);
|
||||
} else {
|
||||
zero_start = max_t(u64, block_start, start);
|
||||
zero_end = min_t(u64, block_end, end);
|
||||
|
|
|
|||
|
|
@ -209,9 +209,4 @@ static inline bool bitmap_test_range_all_zero(const unsigned long *addr,
|
|||
return (found_set == start + nbits);
|
||||
}
|
||||
|
||||
static inline u64 folio_end(struct folio *folio)
|
||||
{
|
||||
return folio_pos(folio) + folio_size(folio);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -359,7 +359,7 @@ static bool can_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
|
|||
if (folio) {
|
||||
ASSERT(folio->mapping);
|
||||
ASSERT(folio_pos(folio) <= file_offset);
|
||||
ASSERT(file_offset + len <= folio_end(folio));
|
||||
ASSERT(file_offset + len <= folio_next_pos(folio));
|
||||
|
||||
/*
|
||||
* Ordered flag indicates whether we still have
|
||||
|
|
|
|||
|
|
@ -186,7 +186,8 @@ static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info,
|
|||
* unmapped page like dummy extent buffer pages.
|
||||
*/
|
||||
if (folio->mapping)
|
||||
ASSERT(folio_pos(folio) <= start && start + len <= folio_end(folio),
|
||||
ASSERT(folio_pos(folio) <= start &&
|
||||
start + len <= folio_next_pos(folio),
|
||||
"start=%llu len=%u folio_pos=%llu folio_size=%zu",
|
||||
start, len, folio_pos(folio), folio_size(folio));
|
||||
}
|
||||
|
|
@ -217,7 +218,7 @@ static void btrfs_subpage_clamp_range(struct folio *folio, u64 *start, u32 *len)
|
|||
if (folio_pos(folio) >= orig_start + orig_len)
|
||||
*len = 0;
|
||||
else
|
||||
*len = min_t(u64, folio_end(folio), orig_start + orig_len) - *start;
|
||||
*len = min_t(u64, folio_next_pos(folio), orig_start + orig_len) - *start;
|
||||
}
|
||||
|
||||
static bool btrfs_subpage_end_and_test_lock(const struct btrfs_fs_info *fs_info,
|
||||
|
|
|
|||
|
|
@ -2732,7 +2732,7 @@ int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
|
|||
loff_t i_size = i_size_read(inode);
|
||||
|
||||
/* Is the folio fully inside i_size? */
|
||||
if (folio_pos(folio) + folio_size(folio) <= i_size)
|
||||
if (folio_next_pos(folio) <= i_size)
|
||||
return __block_write_full_folio(inode, folio, get_block, wbc);
|
||||
|
||||
/* Is the folio fully outside i_size? (truncate in progress) */
|
||||
|
|
|
|||
|
|
@ -1319,8 +1319,8 @@ static int ext4_write_begin(const struct kiocb *iocb,
|
|||
if (IS_ERR(folio))
|
||||
return PTR_ERR(folio);
|
||||
|
||||
if (pos + len > folio_pos(folio) + folio_size(folio))
|
||||
len = folio_pos(folio) + folio_size(folio) - pos;
|
||||
if (len > folio_next_pos(folio) - pos)
|
||||
len = folio_next_pos(folio) - pos;
|
||||
|
||||
from = offset_in_folio(folio, pos);
|
||||
to = from + len;
|
||||
|
|
@ -2704,7 +2704,7 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
|
|||
|
||||
if (mpd->map.m_len == 0)
|
||||
mpd->start_pos = folio_pos(folio);
|
||||
mpd->next_pos = folio_pos(folio) + folio_size(folio);
|
||||
mpd->next_pos = folio_next_pos(folio);
|
||||
/*
|
||||
* Writeout when we cannot modify metadata is simple.
|
||||
* Just submit the page. For data=journal mode we
|
||||
|
|
@ -3146,8 +3146,8 @@ static int ext4_da_write_begin(const struct kiocb *iocb,
|
|||
if (IS_ERR(folio))
|
||||
return PTR_ERR(folio);
|
||||
|
||||
if (pos + len > folio_pos(folio) + folio_size(folio))
|
||||
len = folio_pos(folio) + folio_size(folio) - pos;
|
||||
if (len > folio_next_pos(folio) - pos)
|
||||
len = folio_next_pos(folio) - pos;
|
||||
|
||||
ret = ext4_block_write_begin(NULL, folio, pos, len,
|
||||
ext4_da_get_block_prep);
|
||||
|
|
|
|||
|
|
@ -1329,7 +1329,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
|
|||
}
|
||||
|
||||
folio = page_folio(cc->rpages[last_index]);
|
||||
psize = folio_pos(folio) + folio_size(folio);
|
||||
psize = folio_next_pos(folio);
|
||||
|
||||
err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
|
||||
if (err)
|
||||
|
|
|
|||
|
|
@ -81,8 +81,7 @@ static int gfs2_write_jdata_folio(struct folio *folio,
|
|||
* the page size, the remaining memory is zeroed when mapped, and
|
||||
* writes to that region are not written out to the file."
|
||||
*/
|
||||
if (folio_pos(folio) < i_size &&
|
||||
i_size < folio_pos(folio) + folio_size(folio))
|
||||
if (folio_pos(folio) < i_size && i_size < folio_next_pos(folio))
|
||||
folio_zero_segment(folio, offset_in_folio(folio, i_size),
|
||||
folio_size(folio));
|
||||
|
||||
|
|
|
|||
|
|
@ -707,7 +707,7 @@ static int __iomap_write_begin(const struct iomap_iter *iter,
|
|||
* are not changing pagecache contents.
|
||||
*/
|
||||
if (!(iter->flags & IOMAP_UNSHARE) && pos <= folio_pos(folio) &&
|
||||
pos + len >= folio_pos(folio) + folio_size(folio))
|
||||
pos + len >= folio_next_pos(folio))
|
||||
return 0;
|
||||
|
||||
ifs = ifs_alloc(iter->inode, folio, iter->flags);
|
||||
|
|
@ -1097,8 +1097,7 @@ static void iomap_write_delalloc_ifs_punch(struct inode *inode,
|
|||
if (!ifs)
|
||||
return;
|
||||
|
||||
last_byte = min_t(loff_t, end_byte - 1,
|
||||
folio_pos(folio) + folio_size(folio) - 1);
|
||||
last_byte = min_t(loff_t, end_byte - 1, folio_next_pos(folio) - 1);
|
||||
first_blk = offset_in_folio(folio, start_byte) >> blkbits;
|
||||
last_blk = offset_in_folio(folio, last_byte) >> blkbits;
|
||||
for (i = first_blk; i <= last_blk; i++) {
|
||||
|
|
@ -1129,8 +1128,7 @@ static void iomap_write_delalloc_punch(struct inode *inode, struct folio *folio,
|
|||
* Make sure the next punch start is correctly bound to
|
||||
* the end of this data range, not the end of the folio.
|
||||
*/
|
||||
*punch_start_byte = min_t(loff_t, end_byte,
|
||||
folio_pos(folio) + folio_size(folio));
|
||||
*punch_start_byte = min_t(loff_t, end_byte, folio_next_pos(folio));
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -1170,7 +1168,7 @@ static void iomap_write_delalloc_scan(struct inode *inode,
|
|||
start_byte, end_byte, iomap, punch);
|
||||
|
||||
/* move offset to start of next folio in range */
|
||||
start_byte = folio_pos(folio) + folio_size(folio);
|
||||
start_byte = folio_next_pos(folio);
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -535,7 +535,7 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr
|
|||
folio_unlock(folio);
|
||||
err = filemap_fdatawrite_range(mapping,
|
||||
folio_pos(folio),
|
||||
folio_pos(folio) + folio_size(folio));
|
||||
folio_next_pos(folio));
|
||||
switch (err) {
|
||||
case 0:
|
||||
ret = VM_FAULT_RETRY;
|
||||
|
|
|
|||
|
|
@ -298,7 +298,7 @@ bool netfs_release_folio(struct folio *folio, gfp_t gfp)
|
|||
if (folio_test_dirty(folio))
|
||||
return false;
|
||||
|
||||
end = umin(folio_pos(folio) + folio_size(folio), i_size_read(&ctx->inode));
|
||||
end = umin(folio_next_pos(folio), i_size_read(&ctx->inode));
|
||||
if (end > ctx->zero_point)
|
||||
ctx->zero_point = end;
|
||||
|
||||
|
|
|
|||
|
|
@ -6892,7 +6892,7 @@ static void ocfs2_zero_cluster_folios(struct inode *inode, loff_t start,
|
|||
ocfs2_map_and_dirty_folio(inode, handle, from, to, folio, 1,
|
||||
&phys);
|
||||
|
||||
start = folio_next_index(folio) << PAGE_SHIFT;
|
||||
start = folio_next_pos(folio);
|
||||
}
|
||||
out:
|
||||
if (folios)
|
||||
|
|
|
|||
|
|
@ -834,7 +834,7 @@ xfarray_sort_scan(
|
|||
si->first_folio_idx = xfarray_idx(si->array,
|
||||
folio_pos(si->folio) + si->array->obj_size - 1);
|
||||
|
||||
next_pos = folio_pos(si->folio) + folio_size(si->folio);
|
||||
next_pos = folio_next_pos(si->folio);
|
||||
si->last_folio_idx = xfarray_idx(si->array, next_pos - 1);
|
||||
if (xfarray_pos(si->array, si->last_folio_idx + 1) > next_pos)
|
||||
si->last_folio_idx--;
|
||||
|
|
|
|||
|
|
@ -271,7 +271,7 @@ xfs_discard_folio(
|
|||
* folio itself and not the start offset that is passed in.
|
||||
*/
|
||||
xfs_bmap_punch_delalloc_range(ip, XFS_DATA_FORK, pos,
|
||||
folio_pos(folio) + folio_size(folio), NULL);
|
||||
folio_next_pos(folio), NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -941,6 +941,17 @@ static inline pgoff_t folio_next_index(const struct folio *folio)
|
|||
return folio->index + folio_nr_pages(folio);
|
||||
}
|
||||
|
||||
/**
|
||||
* folio_next_pos - Get the file position of the next folio.
|
||||
* @folio: The current folio.
|
||||
*
|
||||
* Return: The position of the folio which follows this folio in the file.
|
||||
*/
|
||||
static inline loff_t folio_next_pos(const struct folio *folio)
|
||||
{
|
||||
return (loff_t)folio_next_index(folio) << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
/**
|
||||
* folio_file_page - The page for a particular index.
|
||||
* @folio: The folio which contains this index.
|
||||
|
|
|
|||
|
|
@ -1133,7 +1133,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
|
|||
same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
|
||||
folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT);
|
||||
if (folio) {
|
||||
same_folio = lend < folio_pos(folio) + folio_size(folio);
|
||||
same_folio = lend < folio_next_pos(folio);
|
||||
folio_mark_dirty(folio);
|
||||
if (!truncate_inode_partial_folio(folio, lstart, lend)) {
|
||||
start = folio_next_index(folio);
|
||||
|
|
|
|||
|
|
@ -387,7 +387,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
|
|||
same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
|
||||
folio = __filemap_get_folio(mapping, lstart >> PAGE_SHIFT, FGP_LOCK, 0);
|
||||
if (!IS_ERR(folio)) {
|
||||
same_folio = lend < folio_pos(folio) + folio_size(folio);
|
||||
same_folio = lend < folio_next_pos(folio);
|
||||
if (!truncate_inode_partial_folio(folio, lstart, lend)) {
|
||||
start = folio_next_index(folio);
|
||||
if (same_folio)
|
||||
|
|
|
|||
Loading…
Reference in New Issue