mirror of https://github.com/torvalds/linux.git
iomap: replace iomap_folio_ops with iomap_write_ops
The iomap_folio_ops are only used for buffered writes, including the zero and unshare variants. Rename them to iomap_write_ops to better describe the usage, and pass them through the call chain like the other operation specific methods instead of through the iomap. xfs_iomap_valid grows a IOMAP_HOLE check to keep the existing behavior that never attached the folio_ops to a iomap representing a hole. Signed-off-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/20250710133343.399917-12-hch@lst.de Acked-by: Damien Le Moal <dlemoal@kernel.org> Reviewed-by: Brian Foster <bfoster@redhat.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
parent
8b217cf779
commit
2a5574fc57
|
|
@ -167,7 +167,6 @@ structure below:
|
||||||
struct dax_device *dax_dev;
|
struct dax_device *dax_dev;
|
||||||
void *inline_data;
|
void *inline_data;
|
||||||
void *private;
|
void *private;
|
||||||
const struct iomap_folio_ops *folio_ops;
|
|
||||||
u64 validity_cookie;
|
u64 validity_cookie;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
@ -292,8 +291,6 @@ The fields are as follows:
|
||||||
<https://lore.kernel.org/all/20180619164137.13720-7-hch@lst.de/>`_.
|
<https://lore.kernel.org/all/20180619164137.13720-7-hch@lst.de/>`_.
|
||||||
This value will be passed unchanged to ``->iomap_end``.
|
This value will be passed unchanged to ``->iomap_end``.
|
||||||
|
|
||||||
* ``folio_ops`` will be covered in the section on pagecache operations.
|
|
||||||
|
|
||||||
* ``validity_cookie`` is a magic freshness value set by the filesystem
|
* ``validity_cookie`` is a magic freshness value set by the filesystem
|
||||||
that should be used to detect stale mappings.
|
that should be used to detect stale mappings.
|
||||||
For pagecache operations this is critical for correct operation
|
For pagecache operations this is critical for correct operation
|
||||||
|
|
|
||||||
|
|
@ -57,16 +57,12 @@ The following address space operations can be wrapped easily:
|
||||||
* ``bmap``
|
* ``bmap``
|
||||||
* ``swap_activate``
|
* ``swap_activate``
|
||||||
|
|
||||||
``struct iomap_folio_ops``
|
``struct iomap_write_ops``
|
||||||
--------------------------
|
--------------------------
|
||||||
|
|
||||||
The ``->iomap_begin`` function for pagecache operations may set the
|
|
||||||
``struct iomap::folio_ops`` field to an ops structure to override
|
|
||||||
default behaviors of iomap:
|
|
||||||
|
|
||||||
.. code-block:: c
|
.. code-block:: c
|
||||||
|
|
||||||
struct iomap_folio_ops {
|
struct iomap_write_ops {
|
||||||
struct folio *(*get_folio)(struct iomap_iter *iter, loff_t pos,
|
struct folio *(*get_folio)(struct iomap_iter *iter, loff_t pos,
|
||||||
unsigned len);
|
unsigned len);
|
||||||
void (*put_folio)(struct inode *inode, loff_t pos, unsigned copied,
|
void (*put_folio)(struct inode *inode, loff_t pos, unsigned copied,
|
||||||
|
|
|
||||||
|
|
@ -723,7 +723,8 @@ blkdev_direct_write(struct kiocb *iocb, struct iov_iter *from)
|
||||||
|
|
||||||
static ssize_t blkdev_buffered_write(struct kiocb *iocb, struct iov_iter *from)
|
static ssize_t blkdev_buffered_write(struct kiocb *iocb, struct iov_iter *from)
|
||||||
{
|
{
|
||||||
return iomap_file_buffered_write(iocb, from, &blkdev_iomap_ops, NULL);
|
return iomap_file_buffered_write(iocb, from, &blkdev_iomap_ops, NULL,
|
||||||
|
NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
||||||
|
|
@ -963,12 +963,16 @@ static struct folio *
|
||||||
gfs2_iomap_get_folio(struct iomap_iter *iter, loff_t pos, unsigned len)
|
gfs2_iomap_get_folio(struct iomap_iter *iter, loff_t pos, unsigned len)
|
||||||
{
|
{
|
||||||
struct inode *inode = iter->inode;
|
struct inode *inode = iter->inode;
|
||||||
|
struct gfs2_inode *ip = GFS2_I(inode);
|
||||||
unsigned int blockmask = i_blocksize(inode) - 1;
|
unsigned int blockmask = i_blocksize(inode) - 1;
|
||||||
struct gfs2_sbd *sdp = GFS2_SB(inode);
|
struct gfs2_sbd *sdp = GFS2_SB(inode);
|
||||||
unsigned int blocks;
|
unsigned int blocks;
|
||||||
struct folio *folio;
|
struct folio *folio;
|
||||||
int status;
|
int status;
|
||||||
|
|
||||||
|
if (!gfs2_is_jdata(ip) && !gfs2_is_stuffed(ip))
|
||||||
|
return iomap_get_folio(iter, pos, len);
|
||||||
|
|
||||||
blocks = ((pos & blockmask) + len + blockmask) >> inode->i_blkbits;
|
blocks = ((pos & blockmask) + len + blockmask) >> inode->i_blkbits;
|
||||||
status = gfs2_trans_begin(sdp, RES_DINODE + blocks, 0);
|
status = gfs2_trans_begin(sdp, RES_DINODE + blocks, 0);
|
||||||
if (status)
|
if (status)
|
||||||
|
|
@ -987,7 +991,7 @@ static void gfs2_iomap_put_folio(struct inode *inode, loff_t pos,
|
||||||
struct gfs2_inode *ip = GFS2_I(inode);
|
struct gfs2_inode *ip = GFS2_I(inode);
|
||||||
struct gfs2_sbd *sdp = GFS2_SB(inode);
|
struct gfs2_sbd *sdp = GFS2_SB(inode);
|
||||||
|
|
||||||
if (!gfs2_is_stuffed(ip))
|
if (gfs2_is_jdata(ip) && !gfs2_is_stuffed(ip))
|
||||||
gfs2_trans_add_databufs(ip->i_gl, folio,
|
gfs2_trans_add_databufs(ip->i_gl, folio,
|
||||||
offset_in_folio(folio, pos),
|
offset_in_folio(folio, pos),
|
||||||
copied);
|
copied);
|
||||||
|
|
@ -995,13 +999,14 @@ static void gfs2_iomap_put_folio(struct inode *inode, loff_t pos,
|
||||||
folio_unlock(folio);
|
folio_unlock(folio);
|
||||||
folio_put(folio);
|
folio_put(folio);
|
||||||
|
|
||||||
if (tr->tr_num_buf_new)
|
if (gfs2_is_jdata(ip) || gfs2_is_stuffed(ip)) {
|
||||||
__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
|
if (tr->tr_num_buf_new)
|
||||||
|
__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
|
||||||
gfs2_trans_end(sdp);
|
gfs2_trans_end(sdp);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct iomap_folio_ops gfs2_iomap_folio_ops = {
|
const struct iomap_write_ops gfs2_iomap_write_ops = {
|
||||||
.get_folio = gfs2_iomap_get_folio,
|
.get_folio = gfs2_iomap_get_folio,
|
||||||
.put_folio = gfs2_iomap_put_folio,
|
.put_folio = gfs2_iomap_put_folio,
|
||||||
};
|
};
|
||||||
|
|
@ -1078,8 +1083,6 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
|
||||||
gfs2_trans_end(sdp);
|
gfs2_trans_end(sdp);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (gfs2_is_stuffed(ip) || gfs2_is_jdata(ip))
|
|
||||||
iomap->folio_ops = &gfs2_iomap_folio_ops;
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_trans_end:
|
out_trans_end:
|
||||||
|
|
@ -1304,7 +1307,7 @@ static int gfs2_block_zero_range(struct inode *inode, loff_t from, loff_t length
|
||||||
return 0;
|
return 0;
|
||||||
length = min(length, inode->i_size - from);
|
length = min(length, inode->i_size - from);
|
||||||
return iomap_zero_range(inode, from, length, NULL, &gfs2_iomap_ops,
|
return iomap_zero_range(inode, from, length, NULL, &gfs2_iomap_ops,
|
||||||
NULL);
|
&gfs2_iomap_write_ops, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define GFS2_JTRUNC_REVOKES 8192
|
#define GFS2_JTRUNC_REVOKES 8192
|
||||||
|
|
|
||||||
|
|
@ -44,6 +44,7 @@ static inline void gfs2_write_calc_reserv(const struct gfs2_inode *ip,
|
||||||
}
|
}
|
||||||
|
|
||||||
extern const struct iomap_ops gfs2_iomap_ops;
|
extern const struct iomap_ops gfs2_iomap_ops;
|
||||||
|
extern const struct iomap_write_ops gfs2_iomap_write_ops;
|
||||||
extern const struct iomap_writeback_ops gfs2_writeback_ops;
|
extern const struct iomap_writeback_ops gfs2_writeback_ops;
|
||||||
|
|
||||||
int gfs2_unstuff_dinode(struct gfs2_inode *ip);
|
int gfs2_unstuff_dinode(struct gfs2_inode *ip);
|
||||||
|
|
|
||||||
|
|
@ -1058,7 +1058,8 @@ static ssize_t gfs2_file_buffered_write(struct kiocb *iocb,
|
||||||
}
|
}
|
||||||
|
|
||||||
pagefault_disable();
|
pagefault_disable();
|
||||||
ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops, NULL);
|
ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops,
|
||||||
|
&gfs2_iomap_write_ops, NULL);
|
||||||
pagefault_enable();
|
pagefault_enable();
|
||||||
if (ret > 0)
|
if (ret > 0)
|
||||||
written += ret;
|
written += ret;
|
||||||
|
|
|
||||||
|
|
@ -733,28 +733,27 @@ static int __iomap_write_begin(const struct iomap_iter *iter, size_t len,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct folio *__iomap_get_folio(struct iomap_iter *iter, size_t len)
|
static struct folio *__iomap_get_folio(struct iomap_iter *iter,
|
||||||
|
const struct iomap_write_ops *write_ops, size_t len)
|
||||||
{
|
{
|
||||||
const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
|
|
||||||
loff_t pos = iter->pos;
|
loff_t pos = iter->pos;
|
||||||
|
|
||||||
if (!mapping_large_folio_support(iter->inode->i_mapping))
|
if (!mapping_large_folio_support(iter->inode->i_mapping))
|
||||||
len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
|
len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
|
||||||
|
|
||||||
if (folio_ops && folio_ops->get_folio)
|
if (write_ops && write_ops->get_folio)
|
||||||
return folio_ops->get_folio(iter, pos, len);
|
return write_ops->get_folio(iter, pos, len);
|
||||||
else
|
return iomap_get_folio(iter, pos, len);
|
||||||
return iomap_get_folio(iter, pos, len);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __iomap_put_folio(struct iomap_iter *iter, size_t ret,
|
static void __iomap_put_folio(struct iomap_iter *iter,
|
||||||
|
const struct iomap_write_ops *write_ops, size_t ret,
|
||||||
struct folio *folio)
|
struct folio *folio)
|
||||||
{
|
{
|
||||||
const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
|
|
||||||
loff_t pos = iter->pos;
|
loff_t pos = iter->pos;
|
||||||
|
|
||||||
if (folio_ops && folio_ops->put_folio) {
|
if (write_ops && write_ops->put_folio) {
|
||||||
folio_ops->put_folio(iter->inode, pos, ret, folio);
|
write_ops->put_folio(iter->inode, pos, ret, folio);
|
||||||
} else {
|
} else {
|
||||||
folio_unlock(folio);
|
folio_unlock(folio);
|
||||||
folio_put(folio);
|
folio_put(folio);
|
||||||
|
|
@ -791,10 +790,10 @@ static int iomap_write_begin_inline(const struct iomap_iter *iter,
|
||||||
* offset, and length. Callers can optionally pass a max length *plen,
|
* offset, and length. Callers can optionally pass a max length *plen,
|
||||||
* otherwise init to zero.
|
* otherwise init to zero.
|
||||||
*/
|
*/
|
||||||
static int iomap_write_begin(struct iomap_iter *iter, struct folio **foliop,
|
static int iomap_write_begin(struct iomap_iter *iter,
|
||||||
|
const struct iomap_write_ops *write_ops, struct folio **foliop,
|
||||||
size_t *poffset, u64 *plen)
|
size_t *poffset, u64 *plen)
|
||||||
{
|
{
|
||||||
const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
|
|
||||||
const struct iomap *srcmap = iomap_iter_srcmap(iter);
|
const struct iomap *srcmap = iomap_iter_srcmap(iter);
|
||||||
loff_t pos = iter->pos;
|
loff_t pos = iter->pos;
|
||||||
u64 len = min_t(u64, SIZE_MAX, iomap_length(iter));
|
u64 len = min_t(u64, SIZE_MAX, iomap_length(iter));
|
||||||
|
|
@ -809,7 +808,7 @@ static int iomap_write_begin(struct iomap_iter *iter, struct folio **foliop,
|
||||||
if (fatal_signal_pending(current))
|
if (fatal_signal_pending(current))
|
||||||
return -EINTR;
|
return -EINTR;
|
||||||
|
|
||||||
folio = __iomap_get_folio(iter, len);
|
folio = __iomap_get_folio(iter, write_ops, len);
|
||||||
if (IS_ERR(folio))
|
if (IS_ERR(folio))
|
||||||
return PTR_ERR(folio);
|
return PTR_ERR(folio);
|
||||||
|
|
||||||
|
|
@ -823,8 +822,8 @@ static int iomap_write_begin(struct iomap_iter *iter, struct folio **foliop,
|
||||||
* could do the wrong thing here (zero a page range incorrectly or fail
|
* could do the wrong thing here (zero a page range incorrectly or fail
|
||||||
* to zero) and corrupt data.
|
* to zero) and corrupt data.
|
||||||
*/
|
*/
|
||||||
if (folio_ops && folio_ops->iomap_valid) {
|
if (write_ops && write_ops->iomap_valid) {
|
||||||
bool iomap_valid = folio_ops->iomap_valid(iter->inode,
|
bool iomap_valid = write_ops->iomap_valid(iter->inode,
|
||||||
&iter->iomap);
|
&iter->iomap);
|
||||||
if (!iomap_valid) {
|
if (!iomap_valid) {
|
||||||
iter->iomap.flags |= IOMAP_F_STALE;
|
iter->iomap.flags |= IOMAP_F_STALE;
|
||||||
|
|
@ -850,8 +849,7 @@ static int iomap_write_begin(struct iomap_iter *iter, struct folio **foliop,
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
__iomap_put_folio(iter, 0, folio);
|
__iomap_put_folio(iter, write_ops, 0, folio);
|
||||||
|
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -923,7 +921,8 @@ static bool iomap_write_end(struct iomap_iter *iter, size_t len, size_t copied,
|
||||||
return __iomap_write_end(iter->inode, pos, len, copied, folio);
|
return __iomap_write_end(iter->inode, pos, len, copied, folio);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
|
static int iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i,
|
||||||
|
const struct iomap_write_ops *write_ops)
|
||||||
{
|
{
|
||||||
ssize_t total_written = 0;
|
ssize_t total_written = 0;
|
||||||
int status = 0;
|
int status = 0;
|
||||||
|
|
@ -967,7 +966,8 @@ static int iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
status = iomap_write_begin(iter, &folio, &offset, &bytes);
|
status = iomap_write_begin(iter, write_ops, &folio, &offset,
|
||||||
|
&bytes);
|
||||||
if (unlikely(status)) {
|
if (unlikely(status)) {
|
||||||
iomap_write_failed(iter->inode, iter->pos, bytes);
|
iomap_write_failed(iter->inode, iter->pos, bytes);
|
||||||
break;
|
break;
|
||||||
|
|
@ -996,7 +996,7 @@ static int iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
|
||||||
i_size_write(iter->inode, pos + written);
|
i_size_write(iter->inode, pos + written);
|
||||||
iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
|
iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
|
||||||
}
|
}
|
||||||
__iomap_put_folio(iter, written, folio);
|
__iomap_put_folio(iter, write_ops, written, folio);
|
||||||
|
|
||||||
if (old_size < pos)
|
if (old_size < pos)
|
||||||
pagecache_isize_extended(iter->inode, old_size, pos);
|
pagecache_isize_extended(iter->inode, old_size, pos);
|
||||||
|
|
@ -1029,7 +1029,8 @@ static int iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
|
||||||
|
|
||||||
ssize_t
|
ssize_t
|
||||||
iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
|
iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
|
||||||
const struct iomap_ops *ops, void *private)
|
const struct iomap_ops *ops,
|
||||||
|
const struct iomap_write_ops *write_ops, void *private)
|
||||||
{
|
{
|
||||||
struct iomap_iter iter = {
|
struct iomap_iter iter = {
|
||||||
.inode = iocb->ki_filp->f_mapping->host,
|
.inode = iocb->ki_filp->f_mapping->host,
|
||||||
|
|
@ -1046,7 +1047,7 @@ iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
|
||||||
iter.flags |= IOMAP_DONTCACHE;
|
iter.flags |= IOMAP_DONTCACHE;
|
||||||
|
|
||||||
while ((ret = iomap_iter(&iter, ops)) > 0)
|
while ((ret = iomap_iter(&iter, ops)) > 0)
|
||||||
iter.status = iomap_write_iter(&iter, i);
|
iter.status = iomap_write_iter(&iter, i, write_ops);
|
||||||
|
|
||||||
if (unlikely(iter.pos == iocb->ki_pos))
|
if (unlikely(iter.pos == iocb->ki_pos))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
@ -1280,7 +1281,8 @@ void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(iomap_write_delalloc_release);
|
EXPORT_SYMBOL_GPL(iomap_write_delalloc_release);
|
||||||
|
|
||||||
static int iomap_unshare_iter(struct iomap_iter *iter)
|
static int iomap_unshare_iter(struct iomap_iter *iter,
|
||||||
|
const struct iomap_write_ops *write_ops)
|
||||||
{
|
{
|
||||||
struct iomap *iomap = &iter->iomap;
|
struct iomap *iomap = &iter->iomap;
|
||||||
u64 bytes = iomap_length(iter);
|
u64 bytes = iomap_length(iter);
|
||||||
|
|
@ -1295,14 +1297,15 @@ static int iomap_unshare_iter(struct iomap_iter *iter)
|
||||||
bool ret;
|
bool ret;
|
||||||
|
|
||||||
bytes = min_t(u64, SIZE_MAX, bytes);
|
bytes = min_t(u64, SIZE_MAX, bytes);
|
||||||
status = iomap_write_begin(iter, &folio, &offset, &bytes);
|
status = iomap_write_begin(iter, write_ops, &folio, &offset,
|
||||||
|
&bytes);
|
||||||
if (unlikely(status))
|
if (unlikely(status))
|
||||||
return status;
|
return status;
|
||||||
if (iomap->flags & IOMAP_F_STALE)
|
if (iomap->flags & IOMAP_F_STALE)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
ret = iomap_write_end(iter, bytes, bytes, folio);
|
ret = iomap_write_end(iter, bytes, bytes, folio);
|
||||||
__iomap_put_folio(iter, bytes, folio);
|
__iomap_put_folio(iter, write_ops, bytes, folio);
|
||||||
if (WARN_ON_ONCE(!ret))
|
if (WARN_ON_ONCE(!ret))
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
||||||
|
|
@ -1320,7 +1323,8 @@ static int iomap_unshare_iter(struct iomap_iter *iter)
|
||||||
|
|
||||||
int
|
int
|
||||||
iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
|
iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
|
||||||
const struct iomap_ops *ops)
|
const struct iomap_ops *ops,
|
||||||
|
const struct iomap_write_ops *write_ops)
|
||||||
{
|
{
|
||||||
struct iomap_iter iter = {
|
struct iomap_iter iter = {
|
||||||
.inode = inode,
|
.inode = inode,
|
||||||
|
|
@ -1335,7 +1339,7 @@ iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
|
||||||
|
|
||||||
iter.len = min(len, size - pos);
|
iter.len = min(len, size - pos);
|
||||||
while ((ret = iomap_iter(&iter, ops)) > 0)
|
while ((ret = iomap_iter(&iter, ops)) > 0)
|
||||||
iter.status = iomap_unshare_iter(&iter);
|
iter.status = iomap_unshare_iter(&iter, write_ops);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(iomap_file_unshare);
|
EXPORT_SYMBOL_GPL(iomap_file_unshare);
|
||||||
|
|
@ -1354,7 +1358,8 @@ static inline int iomap_zero_iter_flush_and_stale(struct iomap_iter *i)
|
||||||
return filemap_write_and_wait_range(mapping, i->pos, end);
|
return filemap_write_and_wait_range(mapping, i->pos, end);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
|
static int iomap_zero_iter(struct iomap_iter *iter, bool *did_zero,
|
||||||
|
const struct iomap_write_ops *write_ops)
|
||||||
{
|
{
|
||||||
u64 bytes = iomap_length(iter);
|
u64 bytes = iomap_length(iter);
|
||||||
int status;
|
int status;
|
||||||
|
|
@ -1365,7 +1370,8 @@ static int iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
|
||||||
bool ret;
|
bool ret;
|
||||||
|
|
||||||
bytes = min_t(u64, SIZE_MAX, bytes);
|
bytes = min_t(u64, SIZE_MAX, bytes);
|
||||||
status = iomap_write_begin(iter, &folio, &offset, &bytes);
|
status = iomap_write_begin(iter, write_ops, &folio, &offset,
|
||||||
|
&bytes);
|
||||||
if (status)
|
if (status)
|
||||||
return status;
|
return status;
|
||||||
if (iter->iomap.flags & IOMAP_F_STALE)
|
if (iter->iomap.flags & IOMAP_F_STALE)
|
||||||
|
|
@ -1378,7 +1384,7 @@ static int iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
|
||||||
folio_mark_accessed(folio);
|
folio_mark_accessed(folio);
|
||||||
|
|
||||||
ret = iomap_write_end(iter, bytes, bytes, folio);
|
ret = iomap_write_end(iter, bytes, bytes, folio);
|
||||||
__iomap_put_folio(iter, bytes, folio);
|
__iomap_put_folio(iter, write_ops, bytes, folio);
|
||||||
if (WARN_ON_ONCE(!ret))
|
if (WARN_ON_ONCE(!ret))
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
||||||
|
|
@ -1394,7 +1400,8 @@ static int iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
|
||||||
|
|
||||||
int
|
int
|
||||||
iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
|
iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
|
||||||
const struct iomap_ops *ops, void *private)
|
const struct iomap_ops *ops,
|
||||||
|
const struct iomap_write_ops *write_ops, void *private)
|
||||||
{
|
{
|
||||||
struct iomap_iter iter = {
|
struct iomap_iter iter = {
|
||||||
.inode = inode,
|
.inode = inode,
|
||||||
|
|
@ -1424,7 +1431,8 @@ iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
|
||||||
filemap_range_needs_writeback(mapping, pos, pos + plen - 1)) {
|
filemap_range_needs_writeback(mapping, pos, pos + plen - 1)) {
|
||||||
iter.len = plen;
|
iter.len = plen;
|
||||||
while ((ret = iomap_iter(&iter, ops)) > 0)
|
while ((ret = iomap_iter(&iter, ops)) > 0)
|
||||||
iter.status = iomap_zero_iter(&iter, did_zero);
|
iter.status = iomap_zero_iter(&iter, did_zero,
|
||||||
|
write_ops);
|
||||||
|
|
||||||
iter.len = len - (iter.pos - pos);
|
iter.len = len - (iter.pos - pos);
|
||||||
if (ret || !iter.len)
|
if (ret || !iter.len)
|
||||||
|
|
@ -1455,7 +1463,7 @@ iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
iter.status = iomap_zero_iter(&iter, did_zero);
|
iter.status = iomap_zero_iter(&iter, did_zero, write_ops);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
@ -1463,7 +1471,8 @@ EXPORT_SYMBOL_GPL(iomap_zero_range);
|
||||||
|
|
||||||
int
|
int
|
||||||
iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
|
iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
|
||||||
const struct iomap_ops *ops, void *private)
|
const struct iomap_ops *ops,
|
||||||
|
const struct iomap_write_ops *write_ops, void *private)
|
||||||
{
|
{
|
||||||
unsigned int blocksize = i_blocksize(inode);
|
unsigned int blocksize = i_blocksize(inode);
|
||||||
unsigned int off = pos & (blocksize - 1);
|
unsigned int off = pos & (blocksize - 1);
|
||||||
|
|
@ -1472,7 +1481,7 @@ iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
|
||||||
if (!off)
|
if (!off)
|
||||||
return 0;
|
return 0;
|
||||||
return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops,
|
return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops,
|
||||||
private);
|
write_ops, private);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(iomap_truncate_page);
|
EXPORT_SYMBOL_GPL(iomap_truncate_page);
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -979,7 +979,8 @@ xfs_file_buffered_write(
|
||||||
|
|
||||||
trace_xfs_file_buffered_write(iocb, from);
|
trace_xfs_file_buffered_write(iocb, from);
|
||||||
ret = iomap_file_buffered_write(iocb, from,
|
ret = iomap_file_buffered_write(iocb, from,
|
||||||
&xfs_buffered_write_iomap_ops, NULL);
|
&xfs_buffered_write_iomap_ops, &xfs_iomap_write_ops,
|
||||||
|
NULL);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we hit a space limit, try to free up some lingering preallocated
|
* If we hit a space limit, try to free up some lingering preallocated
|
||||||
|
|
@ -1059,7 +1060,8 @@ xfs_file_buffered_write_zoned(
|
||||||
retry:
|
retry:
|
||||||
trace_xfs_file_buffered_write(iocb, from);
|
trace_xfs_file_buffered_write(iocb, from);
|
||||||
ret = iomap_file_buffered_write(iocb, from,
|
ret = iomap_file_buffered_write(iocb, from,
|
||||||
&xfs_buffered_write_iomap_ops, &ac);
|
&xfs_buffered_write_iomap_ops, &xfs_iomap_write_ops,
|
||||||
|
&ac);
|
||||||
if (ret == -ENOSPC && !cleared_space) {
|
if (ret == -ENOSPC && !cleared_space) {
|
||||||
/*
|
/*
|
||||||
* Kick off writeback to convert delalloc space and release the
|
* Kick off writeback to convert delalloc space and release the
|
||||||
|
|
|
||||||
|
|
@ -79,6 +79,9 @@ xfs_iomap_valid(
|
||||||
{
|
{
|
||||||
struct xfs_inode *ip = XFS_I(inode);
|
struct xfs_inode *ip = XFS_I(inode);
|
||||||
|
|
||||||
|
if (iomap->type == IOMAP_HOLE)
|
||||||
|
return true;
|
||||||
|
|
||||||
if (iomap->validity_cookie !=
|
if (iomap->validity_cookie !=
|
||||||
xfs_iomap_inode_sequence(ip, iomap->flags)) {
|
xfs_iomap_inode_sequence(ip, iomap->flags)) {
|
||||||
trace_xfs_iomap_invalid(ip, iomap);
|
trace_xfs_iomap_invalid(ip, iomap);
|
||||||
|
|
@ -89,7 +92,7 @@ xfs_iomap_valid(
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct iomap_folio_ops xfs_iomap_folio_ops = {
|
const struct iomap_write_ops xfs_iomap_write_ops = {
|
||||||
.iomap_valid = xfs_iomap_valid,
|
.iomap_valid = xfs_iomap_valid,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
@ -151,7 +154,6 @@ xfs_bmbt_to_iomap(
|
||||||
iomap->flags |= IOMAP_F_DIRTY;
|
iomap->flags |= IOMAP_F_DIRTY;
|
||||||
|
|
||||||
iomap->validity_cookie = sequence_cookie;
|
iomap->validity_cookie = sequence_cookie;
|
||||||
iomap->folio_ops = &xfs_iomap_folio_ops;
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -2198,7 +2200,8 @@ xfs_zero_range(
|
||||||
return dax_zero_range(inode, pos, len, did_zero,
|
return dax_zero_range(inode, pos, len, did_zero,
|
||||||
&xfs_dax_write_iomap_ops);
|
&xfs_dax_write_iomap_ops);
|
||||||
return iomap_zero_range(inode, pos, len, did_zero,
|
return iomap_zero_range(inode, pos, len, did_zero,
|
||||||
&xfs_buffered_write_iomap_ops, ac);
|
&xfs_buffered_write_iomap_ops, &xfs_iomap_write_ops,
|
||||||
|
ac);
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
|
|
@ -2214,5 +2217,6 @@ xfs_truncate_page(
|
||||||
return dax_truncate_page(inode, pos, did_zero,
|
return dax_truncate_page(inode, pos, did_zero,
|
||||||
&xfs_dax_write_iomap_ops);
|
&xfs_dax_write_iomap_ops);
|
||||||
return iomap_truncate_page(inode, pos, did_zero,
|
return iomap_truncate_page(inode, pos, did_zero,
|
||||||
&xfs_buffered_write_iomap_ops, ac);
|
&xfs_buffered_write_iomap_ops, &xfs_iomap_write_ops,
|
||||||
|
ac);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -57,5 +57,6 @@ extern const struct iomap_ops xfs_seek_iomap_ops;
|
||||||
extern const struct iomap_ops xfs_xattr_iomap_ops;
|
extern const struct iomap_ops xfs_xattr_iomap_ops;
|
||||||
extern const struct iomap_ops xfs_dax_write_iomap_ops;
|
extern const struct iomap_ops xfs_dax_write_iomap_ops;
|
||||||
extern const struct iomap_ops xfs_atomic_write_cow_iomap_ops;
|
extern const struct iomap_ops xfs_atomic_write_cow_iomap_ops;
|
||||||
|
extern const struct iomap_write_ops xfs_iomap_write_ops;
|
||||||
|
|
||||||
#endif /* __XFS_IOMAP_H__*/
|
#endif /* __XFS_IOMAP_H__*/
|
||||||
|
|
|
||||||
|
|
@ -1881,7 +1881,8 @@ xfs_reflink_unshare(
|
||||||
&xfs_dax_write_iomap_ops);
|
&xfs_dax_write_iomap_ops);
|
||||||
else
|
else
|
||||||
error = iomap_file_unshare(inode, offset, len,
|
error = iomap_file_unshare(inode, offset, len,
|
||||||
&xfs_buffered_write_iomap_ops);
|
&xfs_buffered_write_iomap_ops,
|
||||||
|
&xfs_iomap_write_ops);
|
||||||
if (error)
|
if (error)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -572,7 +572,8 @@ static ssize_t zonefs_file_buffered_write(struct kiocb *iocb,
|
||||||
if (ret <= 0)
|
if (ret <= 0)
|
||||||
goto inode_unlock;
|
goto inode_unlock;
|
||||||
|
|
||||||
ret = iomap_file_buffered_write(iocb, from, &zonefs_write_iomap_ops, NULL);
|
ret = iomap_file_buffered_write(iocb, from, &zonefs_write_iomap_ops,
|
||||||
|
NULL, NULL);
|
||||||
if (ret == -EIO)
|
if (ret == -EIO)
|
||||||
zonefs_io_error(inode, true);
|
zonefs_io_error(inode, true);
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -101,8 +101,6 @@ struct vm_fault;
|
||||||
*/
|
*/
|
||||||
#define IOMAP_NULL_ADDR -1ULL /* addr is not valid */
|
#define IOMAP_NULL_ADDR -1ULL /* addr is not valid */
|
||||||
|
|
||||||
struct iomap_folio_ops;
|
|
||||||
|
|
||||||
struct iomap {
|
struct iomap {
|
||||||
u64 addr; /* disk offset of mapping, bytes */
|
u64 addr; /* disk offset of mapping, bytes */
|
||||||
loff_t offset; /* file offset of mapping, bytes */
|
loff_t offset; /* file offset of mapping, bytes */
|
||||||
|
|
@ -113,7 +111,6 @@ struct iomap {
|
||||||
struct dax_device *dax_dev; /* dax_dev for dax operations */
|
struct dax_device *dax_dev; /* dax_dev for dax operations */
|
||||||
void *inline_data;
|
void *inline_data;
|
||||||
void *private; /* filesystem private */
|
void *private; /* filesystem private */
|
||||||
const struct iomap_folio_ops *folio_ops;
|
|
||||||
u64 validity_cookie; /* used with .iomap_valid() */
|
u64 validity_cookie; /* used with .iomap_valid() */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
@ -143,16 +140,11 @@ static inline bool iomap_inline_data_valid(const struct iomap *iomap)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* When a filesystem sets folio_ops in an iomap mapping it returns, get_folio
|
|
||||||
* and put_folio will be called for each folio written to. This only applies
|
|
||||||
* to buffered writes as unbuffered writes will not typically have folios
|
|
||||||
* associated with them.
|
|
||||||
*
|
|
||||||
* When get_folio succeeds, put_folio will always be called to do any
|
* When get_folio succeeds, put_folio will always be called to do any
|
||||||
* cleanup work necessary. put_folio is responsible for unlocking and putting
|
* cleanup work necessary. put_folio is responsible for unlocking and putting
|
||||||
* @folio.
|
* @folio.
|
||||||
*/
|
*/
|
||||||
struct iomap_folio_ops {
|
struct iomap_write_ops {
|
||||||
struct folio *(*get_folio)(struct iomap_iter *iter, loff_t pos,
|
struct folio *(*get_folio)(struct iomap_iter *iter, loff_t pos,
|
||||||
unsigned len);
|
unsigned len);
|
||||||
void (*put_folio)(struct inode *inode, loff_t pos, unsigned copied,
|
void (*put_folio)(struct inode *inode, loff_t pos, unsigned copied,
|
||||||
|
|
@ -335,7 +327,8 @@ static inline bool iomap_want_unshare_iter(const struct iomap_iter *iter)
|
||||||
}
|
}
|
||||||
|
|
||||||
ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
|
ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
|
||||||
const struct iomap_ops *ops, void *private);
|
const struct iomap_ops *ops,
|
||||||
|
const struct iomap_write_ops *write_ops, void *private);
|
||||||
int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops);
|
int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops);
|
||||||
void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops);
|
void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops);
|
||||||
bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count);
|
bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count);
|
||||||
|
|
@ -344,11 +337,14 @@ bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags);
|
||||||
void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len);
|
void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len);
|
||||||
bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio);
|
bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio);
|
||||||
int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
|
int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
|
||||||
const struct iomap_ops *ops);
|
const struct iomap_ops *ops,
|
||||||
|
const struct iomap_write_ops *write_ops);
|
||||||
int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
|
int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
|
||||||
bool *did_zero, const struct iomap_ops *ops, void *private);
|
bool *did_zero, const struct iomap_ops *ops,
|
||||||
|
const struct iomap_write_ops *write_ops, void *private);
|
||||||
int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
|
int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
|
||||||
const struct iomap_ops *ops, void *private);
|
const struct iomap_ops *ops,
|
||||||
|
const struct iomap_write_ops *write_ops, void *private);
|
||||||
vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops,
|
vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops,
|
||||||
void *private);
|
void *private);
|
||||||
typedef void (*iomap_punch_t)(struct inode *inode, loff_t offset, loff_t length,
|
typedef void (*iomap_punch_t)(struct inode *inode, loff_t offset, loff_t length,
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue