mirror of https://github.com/torvalds/linux.git
vfs-6.16-rc1.writepage
-----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQRAhzRXHqcMeLMyaSiRxhvAZXjcogUCaDBPTgAKCRCRxhvAZXjc ovkTAP9tyN24Oo+koY/2UedYBxM54cW4BCCRsVmkzfr8NSVdwwD/dg+v6gS8+nyD 3jlR0Z/08UyMHapB7fnAuFxPXXc8oAo= =e55o -----END PGP SIGNATURE----- Merge tag 'vfs-6.16-rc1.writepage' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs Pull final writepage conversion from Christian Brauner: "This converts vboxfs from ->writepage() to ->writepages(). This was the last user of the ->writepage() method. So remove ->writepage() completely and all references to it" * tag 'vfs-6.16-rc1.writepage' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs: fs: Remove aops->writepage mm: Remove swap_writepage() and shmem_writepage() ttm: Call shmem_writeout() from ttm_backup_backup_page() i915: Use writeback_iter() shmem: Add shmem_writeout() writeback: Remove writeback_use_writepage() migrate: Remove call to ->writepage vboxsf: Convert to writepages 9p: Add a migrate_folio method
This commit is contained in:
commit
dc76285144
|
|
@ -3019,7 +3019,7 @@ Filesystem Support for Writeback
|
||||||
--------------------------------
|
--------------------------------
|
||||||
|
|
||||||
A filesystem can support cgroup writeback by updating
|
A filesystem can support cgroup writeback by updating
|
||||||
address_space_operations->writepage[s]() to annotate bio's using the
|
address_space_operations->writepages() to annotate bio's using the
|
||||||
following two functions.
|
following two functions.
|
||||||
|
|
||||||
wbc_init_bio(@wbc, @bio)
|
wbc_init_bio(@wbc, @bio)
|
||||||
|
|
|
||||||
|
|
@ -1409,7 +1409,7 @@ read the ciphertext into the page cache and decrypt it in-place. The
|
||||||
folio lock must be held until decryption has finished, to prevent the
|
folio lock must be held until decryption has finished, to prevent the
|
||||||
folio from becoming visible to userspace prematurely.
|
folio from becoming visible to userspace prematurely.
|
||||||
|
|
||||||
For the write path (->writepage()) of regular files, filesystems
|
For the write path (->writepages()) of regular files, filesystems
|
||||||
cannot encrypt data in-place in the page cache, since the cached
|
cannot encrypt data in-place in the page cache, since the cached
|
||||||
plaintext must be preserved. Instead, filesystems must encrypt into a
|
plaintext must be preserved. Instead, filesystems must encrypt into a
|
||||||
temporary buffer or "bounce page", then write out the temporary
|
temporary buffer or "bounce page", then write out the temporary
|
||||||
|
|
|
||||||
|
|
@ -249,7 +249,6 @@ address_space_operations
|
||||||
========================
|
========================
|
||||||
prototypes::
|
prototypes::
|
||||||
|
|
||||||
int (*writepage)(struct page *page, struct writeback_control *wbc);
|
|
||||||
int (*read_folio)(struct file *, struct folio *);
|
int (*read_folio)(struct file *, struct folio *);
|
||||||
int (*writepages)(struct address_space *, struct writeback_control *);
|
int (*writepages)(struct address_space *, struct writeback_control *);
|
||||||
bool (*dirty_folio)(struct address_space *, struct folio *folio);
|
bool (*dirty_folio)(struct address_space *, struct folio *folio);
|
||||||
|
|
@ -280,7 +279,6 @@ locking rules:
|
||||||
====================== ======================== ========= ===============
|
====================== ======================== ========= ===============
|
||||||
ops folio locked i_rwsem invalidate_lock
|
ops folio locked i_rwsem invalidate_lock
|
||||||
====================== ======================== ========= ===============
|
====================== ======================== ========= ===============
|
||||||
writepage: yes, unlocks (see below)
|
|
||||||
read_folio: yes, unlocks shared
|
read_folio: yes, unlocks shared
|
||||||
writepages:
|
writepages:
|
||||||
dirty_folio: maybe
|
dirty_folio: maybe
|
||||||
|
|
@ -309,54 +307,6 @@ completion.
|
||||||
|
|
||||||
->readahead() unlocks the folios that I/O is attempted on like ->read_folio().
|
->readahead() unlocks the folios that I/O is attempted on like ->read_folio().
|
||||||
|
|
||||||
->writepage() is used for two purposes: for "memory cleansing" and for
|
|
||||||
"sync". These are quite different operations and the behaviour may differ
|
|
||||||
depending upon the mode.
|
|
||||||
|
|
||||||
If writepage is called for sync (wbc->sync_mode != WBC_SYNC_NONE) then
|
|
||||||
it *must* start I/O against the page, even if that would involve
|
|
||||||
blocking on in-progress I/O.
|
|
||||||
|
|
||||||
If writepage is called for memory cleansing (sync_mode ==
|
|
||||||
WBC_SYNC_NONE) then its role is to get as much writeout underway as
|
|
||||||
possible. So writepage should try to avoid blocking against
|
|
||||||
currently-in-progress I/O.
|
|
||||||
|
|
||||||
If the filesystem is not called for "sync" and it determines that it
|
|
||||||
would need to block against in-progress I/O to be able to start new I/O
|
|
||||||
against the page the filesystem should redirty the page with
|
|
||||||
redirty_page_for_writepage(), then unlock the page and return zero.
|
|
||||||
This may also be done to avoid internal deadlocks, but rarely.
|
|
||||||
|
|
||||||
If the filesystem is called for sync then it must wait on any
|
|
||||||
in-progress I/O and then start new I/O.
|
|
||||||
|
|
||||||
The filesystem should unlock the page synchronously, before returning to the
|
|
||||||
caller, unless ->writepage() returns special WRITEPAGE_ACTIVATE
|
|
||||||
value. WRITEPAGE_ACTIVATE means that page cannot really be written out
|
|
||||||
currently, and VM should stop calling ->writepage() on this page for some
|
|
||||||
time. VM does this by moving page to the head of the active list, hence the
|
|
||||||
name.
|
|
||||||
|
|
||||||
Unless the filesystem is going to redirty_page_for_writepage(), unlock the page
|
|
||||||
and return zero, writepage *must* run set_page_writeback() against the page,
|
|
||||||
followed by unlocking it. Once set_page_writeback() has been run against the
|
|
||||||
page, write I/O can be submitted and the write I/O completion handler must run
|
|
||||||
end_page_writeback() once the I/O is complete. If no I/O is submitted, the
|
|
||||||
filesystem must run end_page_writeback() against the page before returning from
|
|
||||||
writepage.
|
|
||||||
|
|
||||||
That is: after 2.5.12, pages which are under writeout are *not* locked. Note,
|
|
||||||
if the filesystem needs the page to be locked during writeout, that is ok, too,
|
|
||||||
the page is allowed to be unlocked at any point in time between the calls to
|
|
||||||
set_page_writeback() and end_page_writeback().
|
|
||||||
|
|
||||||
Note, failure to run either redirty_page_for_writepage() or the combination of
|
|
||||||
set_page_writeback()/end_page_writeback() on a page submitted to writepage
|
|
||||||
will leave the page itself marked clean but it will be tagged as dirty in the
|
|
||||||
radix tree. This incoherency can lead to all sorts of hard-to-debug problems
|
|
||||||
in the filesystem like having dirty inodes at umount and losing written data.
|
|
||||||
|
|
||||||
->writepages() is used for periodic writeback and for syscall-initiated
|
->writepages() is used for periodic writeback and for syscall-initiated
|
||||||
sync operations. The address_space should start I/O against at least
|
sync operations. The address_space should start I/O against at least
|
||||||
``*nr_to_write`` pages. ``*nr_to_write`` must be decremented for each page
|
``*nr_to_write`` pages. ``*nr_to_write`` must be decremented for each page
|
||||||
|
|
@ -364,8 +314,8 @@ which is written. The address_space implementation may write more (or less)
|
||||||
pages than ``*nr_to_write`` asks for, but it should try to be reasonably close.
|
pages than ``*nr_to_write`` asks for, but it should try to be reasonably close.
|
||||||
If nr_to_write is NULL, all dirty pages must be written.
|
If nr_to_write is NULL, all dirty pages must be written.
|
||||||
|
|
||||||
writepages should _only_ write pages which are present on
|
writepages should _only_ write pages which are present in
|
||||||
mapping->io_pages.
|
mapping->i_pages.
|
||||||
|
|
||||||
->dirty_folio() is called from various places in the kernel when
|
->dirty_folio() is called from various places in the kernel when
|
||||||
the target folio is marked as needing writeback. The folio cannot be
|
the target folio is marked as needing writeback. The folio cannot be
|
||||||
|
|
|
||||||
|
|
@ -716,9 +716,8 @@ page lookup by address, and keeping track of pages tagged as Dirty or
|
||||||
Writeback.
|
Writeback.
|
||||||
|
|
||||||
The first can be used independently to the others. The VM can try to
|
The first can be used independently to the others. The VM can try to
|
||||||
either write dirty pages in order to clean them, or release clean pages
|
release clean pages in order to reuse them. To do this it can call
|
||||||
in order to reuse them. To do this it can call the ->writepage method
|
->release_folio on clean folios with the private
|
||||||
on dirty pages, and ->release_folio on clean folios with the private
|
|
||||||
flag set. Clean pages without PagePrivate and with no external references
|
flag set. Clean pages without PagePrivate and with no external references
|
||||||
will be released without notice being given to the address_space.
|
will be released without notice being given to the address_space.
|
||||||
|
|
||||||
|
|
@ -731,8 +730,8 @@ maintains information about the PG_Dirty and PG_Writeback status of each
|
||||||
page, so that pages with either of these flags can be found quickly.
|
page, so that pages with either of these flags can be found quickly.
|
||||||
|
|
||||||
The Dirty tag is primarily used by mpage_writepages - the default
|
The Dirty tag is primarily used by mpage_writepages - the default
|
||||||
->writepages method. It uses the tag to find dirty pages to call
|
->writepages method. It uses the tag to find dirty pages to
|
||||||
->writepage on. If mpage_writepages is not used (i.e. the address
|
write back. If mpage_writepages is not used (i.e. the address
|
||||||
provides its own ->writepages) , the PAGECACHE_TAG_DIRTY tag is almost
|
provides its own ->writepages) , the PAGECACHE_TAG_DIRTY tag is almost
|
||||||
unused. write_inode_now and sync_inode do use it (through
|
unused. write_inode_now and sync_inode do use it (through
|
||||||
__sync_single_inode) to check if ->writepages has been successful in
|
__sync_single_inode) to check if ->writepages has been successful in
|
||||||
|
|
@ -756,23 +755,23 @@ pages, however the address_space has finer control of write sizes.
|
||||||
|
|
||||||
The read process essentially only requires 'read_folio'. The write
|
The read process essentially only requires 'read_folio'. The write
|
||||||
process is more complicated and uses write_begin/write_end or
|
process is more complicated and uses write_begin/write_end or
|
||||||
dirty_folio to write data into the address_space, and writepage and
|
dirty_folio to write data into the address_space, and
|
||||||
writepages to writeback data to storage.
|
writepages to writeback data to storage.
|
||||||
|
|
||||||
Adding and removing pages to/from an address_space is protected by the
|
Adding and removing pages to/from an address_space is protected by the
|
||||||
inode's i_mutex.
|
inode's i_mutex.
|
||||||
|
|
||||||
When data is written to a page, the PG_Dirty flag should be set. It
|
When data is written to a page, the PG_Dirty flag should be set. It
|
||||||
typically remains set until writepage asks for it to be written. This
|
typically remains set until writepages asks for it to be written. This
|
||||||
should clear PG_Dirty and set PG_Writeback. It can be actually written
|
should clear PG_Dirty and set PG_Writeback. It can be actually written
|
||||||
at any point after PG_Dirty is clear. Once it is known to be safe,
|
at any point after PG_Dirty is clear. Once it is known to be safe,
|
||||||
PG_Writeback is cleared.
|
PG_Writeback is cleared.
|
||||||
|
|
||||||
Writeback makes use of a writeback_control structure to direct the
|
Writeback makes use of a writeback_control structure to direct the
|
||||||
operations. This gives the writepage and writepages operations some
|
operations. This gives the writepages operation some
|
||||||
information about the nature of and reason for the writeback request,
|
information about the nature of and reason for the writeback request,
|
||||||
and the constraints under which it is being done. It is also used to
|
and the constraints under which it is being done. It is also used to
|
||||||
return information back to the caller about the result of a writepage or
|
return information back to the caller about the result of a
|
||||||
writepages request.
|
writepages request.
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -819,7 +818,6 @@ cache in your filesystem. The following members are defined:
|
||||||
.. code-block:: c
|
.. code-block:: c
|
||||||
|
|
||||||
struct address_space_operations {
|
struct address_space_operations {
|
||||||
int (*writepage)(struct page *page, struct writeback_control *wbc);
|
|
||||||
int (*read_folio)(struct file *, struct folio *);
|
int (*read_folio)(struct file *, struct folio *);
|
||||||
int (*writepages)(struct address_space *, struct writeback_control *);
|
int (*writepages)(struct address_space *, struct writeback_control *);
|
||||||
bool (*dirty_folio)(struct address_space *, struct folio *);
|
bool (*dirty_folio)(struct address_space *, struct folio *);
|
||||||
|
|
@ -848,25 +846,6 @@ cache in your filesystem. The following members are defined:
|
||||||
int (*swap_rw)(struct kiocb *iocb, struct iov_iter *iter);
|
int (*swap_rw)(struct kiocb *iocb, struct iov_iter *iter);
|
||||||
};
|
};
|
||||||
|
|
||||||
``writepage``
|
|
||||||
called by the VM to write a dirty page to backing store. This
|
|
||||||
may happen for data integrity reasons (i.e. 'sync'), or to free
|
|
||||||
up memory (flush). The difference can be seen in
|
|
||||||
wbc->sync_mode. The PG_Dirty flag has been cleared and
|
|
||||||
PageLocked is true. writepage should start writeout, should set
|
|
||||||
PG_Writeback, and should make sure the page is unlocked, either
|
|
||||||
synchronously or asynchronously when the write operation
|
|
||||||
completes.
|
|
||||||
|
|
||||||
If wbc->sync_mode is WB_SYNC_NONE, ->writepage doesn't have to
|
|
||||||
try too hard if there are problems, and may choose to write out
|
|
||||||
other pages from the mapping if that is easier (e.g. due to
|
|
||||||
internal dependencies). If it chooses not to start writeout, it
|
|
||||||
should return AOP_WRITEPAGE_ACTIVATE so that the VM will not
|
|
||||||
keep calling ->writepage on that page.
|
|
||||||
|
|
||||||
See the file "Locking" for more details.
|
|
||||||
|
|
||||||
``read_folio``
|
``read_folio``
|
||||||
Called by the page cache to read a folio from the backing store.
|
Called by the page cache to read a folio from the backing store.
|
||||||
The 'file' argument supplies authentication information to network
|
The 'file' argument supplies authentication information to network
|
||||||
|
|
@ -909,7 +888,7 @@ cache in your filesystem. The following members are defined:
|
||||||
given and that many pages should be written if possible. If no
|
given and that many pages should be written if possible. If no
|
||||||
->writepages is given, then mpage_writepages is used instead.
|
->writepages is given, then mpage_writepages is used instead.
|
||||||
This will choose pages from the address space that are tagged as
|
This will choose pages from the address space that are tagged as
|
||||||
DIRTY and will pass them to ->writepage.
|
DIRTY and will write them back.
|
||||||
|
|
||||||
``dirty_folio``
|
``dirty_folio``
|
||||||
called by the VM to mark a folio as dirty. This is particularly
|
called by the VM to mark a folio as dirty. This is particularly
|
||||||
|
|
|
||||||
|
|
@ -37,7 +37,7 @@
|
||||||
enum wbt_flags {
|
enum wbt_flags {
|
||||||
WBT_TRACKED = 1, /* write, tracked for throttling */
|
WBT_TRACKED = 1, /* write, tracked for throttling */
|
||||||
WBT_READ = 2, /* read */
|
WBT_READ = 2, /* read */
|
||||||
WBT_SWAP = 4, /* write, from swap_writepage() */
|
WBT_SWAP = 4, /* write, from swap_writeout() */
|
||||||
WBT_DISCARD = 8, /* discard */
|
WBT_DISCARD = 8, /* discard */
|
||||||
|
|
||||||
WBT_NR_BITS = 4, /* number of bits */
|
WBT_NR_BITS = 4, /* number of bits */
|
||||||
|
|
|
||||||
|
|
@ -305,36 +305,20 @@ void __shmem_writeback(size_t size, struct address_space *mapping)
|
||||||
.range_end = LLONG_MAX,
|
.range_end = LLONG_MAX,
|
||||||
.for_reclaim = 1,
|
.for_reclaim = 1,
|
||||||
};
|
};
|
||||||
unsigned long i;
|
struct folio *folio = NULL;
|
||||||
|
int error = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Leave mmapings intact (GTT will have been revoked on unbinding,
|
* Leave mmapings intact (GTT will have been revoked on unbinding,
|
||||||
* leaving only CPU mmapings around) and add those pages to the LRU
|
* leaving only CPU mmapings around) and add those folios to the LRU
|
||||||
* instead of invoking writeback so they are aged and paged out
|
* instead of invoking writeback so they are aged and paged out
|
||||||
* as normal.
|
* as normal.
|
||||||
*/
|
*/
|
||||||
|
while ((folio = writeback_iter(mapping, &wbc, folio, &error))) {
|
||||||
/* Begin writeback on each dirty page */
|
if (folio_mapped(folio))
|
||||||
for (i = 0; i < size >> PAGE_SHIFT; i++) {
|
folio_redirty_for_writepage(&wbc, folio);
|
||||||
struct page *page;
|
else
|
||||||
|
error = shmem_writeout(folio, &wbc);
|
||||||
page = find_lock_page(mapping, i);
|
|
||||||
if (!page)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (!page_mapped(page) && clear_page_dirty_for_io(page)) {
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
SetPageReclaim(page);
|
|
||||||
ret = mapping->a_ops->writepage(page, &wbc);
|
|
||||||
if (!PageWriteback(page))
|
|
||||||
ClearPageReclaim(page);
|
|
||||||
if (!ret)
|
|
||||||
goto put;
|
|
||||||
}
|
|
||||||
unlock_page(page);
|
|
||||||
put:
|
|
||||||
put_page(page);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -120,13 +120,13 @@ ttm_backup_backup_page(struct file *backup, struct page *page,
|
||||||
.for_reclaim = 1,
|
.for_reclaim = 1,
|
||||||
};
|
};
|
||||||
folio_set_reclaim(to_folio);
|
folio_set_reclaim(to_folio);
|
||||||
ret = mapping->a_ops->writepage(folio_file_page(to_folio, idx), &wbc);
|
ret = shmem_writeout(to_folio, &wbc);
|
||||||
if (!folio_test_writeback(to_folio))
|
if (!folio_test_writeback(to_folio))
|
||||||
folio_clear_reclaim(to_folio);
|
folio_clear_reclaim(to_folio);
|
||||||
/*
|
/*
|
||||||
* If writepage succeeds, it unlocks the folio.
|
* If writeout succeeds, it unlocks the folio. errors
|
||||||
* writepage() errors are otherwise dropped, since writepage()
|
* are otherwise dropped, since writeout is only best
|
||||||
* is only best effort here.
|
* effort here.
|
||||||
*/
|
*/
|
||||||
if (ret)
|
if (ret)
|
||||||
folio_unlock(to_folio);
|
folio_unlock(to_folio);
|
||||||
|
|
|
||||||
|
|
@ -164,4 +164,5 @@ const struct address_space_operations v9fs_addr_operations = {
|
||||||
.invalidate_folio = netfs_invalidate_folio,
|
.invalidate_folio = netfs_invalidate_folio,
|
||||||
.direct_IO = noop_direct_IO,
|
.direct_IO = noop_direct_IO,
|
||||||
.writepages = netfs_writepages,
|
.writepages = netfs_writepages,
|
||||||
|
.migrate_folio = filemap_migrate_folio,
|
||||||
};
|
};
|
||||||
|
|
|
||||||
|
|
@ -2730,7 +2730,7 @@ int block_truncate_page(struct address_space *mapping,
|
||||||
EXPORT_SYMBOL(block_truncate_page);
|
EXPORT_SYMBOL(block_truncate_page);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The generic ->writepage function for buffer-backed address_spaces
|
* The generic write folio function for buffer-backed address_spaces
|
||||||
*/
|
*/
|
||||||
int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
|
int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
|
||||||
void *get_block)
|
void *get_block)
|
||||||
|
|
@ -2750,7 +2750,7 @@ int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The folio straddles i_size. It must be zeroed out on each and every
|
* The folio straddles i_size. It must be zeroed out on each and every
|
||||||
* writepage invocation because it may be mmapped. "A file is mapped
|
* writeback invocation because it may be mmapped. "A file is mapped
|
||||||
* in multiples of the page size. For a file that is not a multiple of
|
* in multiples of the page size. For a file that is not a multiple of
|
||||||
* the page size, the remaining memory is zeroed when mapped, and
|
* the page size, the remaining memory is zeroed when mapped, and
|
||||||
* writes to that region are not written out to the file."
|
* writes to that region are not written out to the file."
|
||||||
|
|
|
||||||
|
|
@ -262,40 +262,42 @@ static struct vboxsf_handle *vboxsf_get_write_handle(struct vboxsf_inode *sf_i)
|
||||||
return sf_handle;
|
return sf_handle;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int vboxsf_writepage(struct page *page, struct writeback_control *wbc)
|
static int vboxsf_writepages(struct address_space *mapping,
|
||||||
|
struct writeback_control *wbc)
|
||||||
{
|
{
|
||||||
struct inode *inode = page->mapping->host;
|
struct inode *inode = mapping->host;
|
||||||
|
struct folio *folio = NULL;
|
||||||
struct vboxsf_inode *sf_i = VBOXSF_I(inode);
|
struct vboxsf_inode *sf_i = VBOXSF_I(inode);
|
||||||
struct vboxsf_handle *sf_handle;
|
struct vboxsf_handle *sf_handle;
|
||||||
loff_t off = page_offset(page);
|
|
||||||
loff_t size = i_size_read(inode);
|
loff_t size = i_size_read(inode);
|
||||||
u32 nwrite = PAGE_SIZE;
|
int error;
|
||||||
u8 *buf;
|
|
||||||
int err;
|
|
||||||
|
|
||||||
if (off + PAGE_SIZE > size)
|
|
||||||
nwrite = size & ~PAGE_MASK;
|
|
||||||
|
|
||||||
sf_handle = vboxsf_get_write_handle(sf_i);
|
sf_handle = vboxsf_get_write_handle(sf_i);
|
||||||
if (!sf_handle)
|
if (!sf_handle)
|
||||||
return -EBADF;
|
return -EBADF;
|
||||||
|
|
||||||
buf = kmap(page);
|
while ((folio = writeback_iter(mapping, wbc, folio, &error))) {
|
||||||
err = vboxsf_write(sf_handle->root, sf_handle->handle,
|
loff_t off = folio_pos(folio);
|
||||||
|
u32 nwrite = folio_size(folio);
|
||||||
|
u8 *buf;
|
||||||
|
|
||||||
|
if (nwrite > size - off)
|
||||||
|
nwrite = size - off;
|
||||||
|
|
||||||
|
buf = kmap_local_folio(folio, 0);
|
||||||
|
error = vboxsf_write(sf_handle->root, sf_handle->handle,
|
||||||
off, &nwrite, buf);
|
off, &nwrite, buf);
|
||||||
kunmap(page);
|
kunmap_local(buf);
|
||||||
|
|
||||||
|
folio_unlock(folio);
|
||||||
|
}
|
||||||
|
|
||||||
kref_put(&sf_handle->refcount, vboxsf_handle_release);
|
kref_put(&sf_handle->refcount, vboxsf_handle_release);
|
||||||
|
|
||||||
if (err == 0) {
|
|
||||||
/* mtime changed */
|
/* mtime changed */
|
||||||
|
if (error == 0)
|
||||||
sf_i->force_restat = 1;
|
sf_i->force_restat = 1;
|
||||||
} else {
|
return error;
|
||||||
ClearPageUptodate(page);
|
|
||||||
}
|
|
||||||
|
|
||||||
unlock_page(page);
|
|
||||||
return err;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int vboxsf_write_end(struct file *file, struct address_space *mapping,
|
static int vboxsf_write_end(struct file *file, struct address_space *mapping,
|
||||||
|
|
@ -347,10 +349,11 @@ static int vboxsf_write_end(struct file *file, struct address_space *mapping,
|
||||||
*/
|
*/
|
||||||
const struct address_space_operations vboxsf_reg_aops = {
|
const struct address_space_operations vboxsf_reg_aops = {
|
||||||
.read_folio = vboxsf_read_folio,
|
.read_folio = vboxsf_read_folio,
|
||||||
.writepage = vboxsf_writepage,
|
.writepages = vboxsf_writepages,
|
||||||
.dirty_folio = filemap_dirty_folio,
|
.dirty_folio = filemap_dirty_folio,
|
||||||
.write_begin = simple_write_begin,
|
.write_begin = simple_write_begin,
|
||||||
.write_end = vboxsf_write_end,
|
.write_end = vboxsf_write_end,
|
||||||
|
.migrate_folio = filemap_migrate_folio,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const char *vboxsf_get_link(struct dentry *dentry, struct inode *inode,
|
static const char *vboxsf_get_link(struct dentry *dentry, struct inode *inode,
|
||||||
|
|
|
||||||
|
|
@ -433,7 +433,6 @@ static inline bool is_sync_kiocb(struct kiocb *kiocb)
|
||||||
}
|
}
|
||||||
|
|
||||||
struct address_space_operations {
|
struct address_space_operations {
|
||||||
int (*writepage)(struct page *page, struct writeback_control *wbc);
|
|
||||||
int (*read_folio)(struct file *, struct folio *);
|
int (*read_folio)(struct file *, struct folio *);
|
||||||
|
|
||||||
/* Write back some dirty pages from this mapping. */
|
/* Write back some dirty pages from this mapping. */
|
||||||
|
|
|
||||||
|
|
@ -104,10 +104,11 @@ static inline bool shmem_mapping(struct address_space *mapping)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_SHMEM */
|
#endif /* CONFIG_SHMEM */
|
||||||
extern void shmem_unlock_mapping(struct address_space *mapping);
|
void shmem_unlock_mapping(struct address_space *mapping);
|
||||||
extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
|
struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
|
||||||
pgoff_t index, gfp_t gfp_mask);
|
pgoff_t index, gfp_t gfp_mask);
|
||||||
extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
|
int shmem_writeout(struct folio *folio, struct writeback_control *wbc);
|
||||||
|
void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
|
||||||
int shmem_unuse(unsigned int type);
|
int shmem_unuse(unsigned int type);
|
||||||
|
|
||||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||||
|
|
|
||||||
58
mm/migrate.c
58
mm/migrate.c
|
|
@ -946,67 +946,21 @@ int filemap_migrate_folio(struct address_space *mapping,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(filemap_migrate_folio);
|
EXPORT_SYMBOL_GPL(filemap_migrate_folio);
|
||||||
|
|
||||||
/*
|
|
||||||
* Writeback a folio to clean the dirty state
|
|
||||||
*/
|
|
||||||
static int writeout(struct address_space *mapping, struct folio *folio)
|
|
||||||
{
|
|
||||||
struct writeback_control wbc = {
|
|
||||||
.sync_mode = WB_SYNC_NONE,
|
|
||||||
.nr_to_write = 1,
|
|
||||||
.range_start = 0,
|
|
||||||
.range_end = LLONG_MAX,
|
|
||||||
.for_reclaim = 1
|
|
||||||
};
|
|
||||||
int rc;
|
|
||||||
|
|
||||||
if (!mapping->a_ops->writepage)
|
|
||||||
/* No write method for the address space */
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if (!folio_clear_dirty_for_io(folio))
|
|
||||||
/* Someone else already triggered a write */
|
|
||||||
return -EAGAIN;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* A dirty folio may imply that the underlying filesystem has
|
|
||||||
* the folio on some queue. So the folio must be clean for
|
|
||||||
* migration. Writeout may mean we lose the lock and the
|
|
||||||
* folio state is no longer what we checked for earlier.
|
|
||||||
* At this point we know that the migration attempt cannot
|
|
||||||
* be successful.
|
|
||||||
*/
|
|
||||||
remove_migration_ptes(folio, folio, 0);
|
|
||||||
|
|
||||||
rc = mapping->a_ops->writepage(&folio->page, &wbc);
|
|
||||||
|
|
||||||
if (rc != AOP_WRITEPAGE_ACTIVATE)
|
|
||||||
/* unlocked. Relock */
|
|
||||||
folio_lock(folio);
|
|
||||||
|
|
||||||
return (rc < 0) ? -EIO : -EAGAIN;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Default handling if a filesystem does not provide a migration function.
|
* Default handling if a filesystem does not provide a migration function.
|
||||||
*/
|
*/
|
||||||
static int fallback_migrate_folio(struct address_space *mapping,
|
static int fallback_migrate_folio(struct address_space *mapping,
|
||||||
struct folio *dst, struct folio *src, enum migrate_mode mode)
|
struct folio *dst, struct folio *src, enum migrate_mode mode)
|
||||||
{
|
{
|
||||||
if (folio_test_dirty(src)) {
|
WARN_ONCE(mapping->a_ops->writepages,
|
||||||
/* Only writeback folios in full synchronous migration */
|
"%ps does not implement migrate_folio\n",
|
||||||
switch (mode) {
|
mapping->a_ops);
|
||||||
case MIGRATE_SYNC:
|
if (folio_test_dirty(src))
|
||||||
break;
|
|
||||||
default:
|
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
|
||||||
return writeout(mapping, src);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Buffers may be managed in a filesystem specific way.
|
* Filesystem may have private data at folio->private that we
|
||||||
* We must have no buffers or drop them.
|
* can't migrate automatically.
|
||||||
*/
|
*/
|
||||||
if (!filemap_release_folio(src, GFP_KERNEL))
|
if (!filemap_release_folio(src, GFP_KERNEL))
|
||||||
return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
|
return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
|
||||||
|
|
|
||||||
|
|
@ -2621,27 +2621,6 @@ int write_cache_pages(struct address_space *mapping,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(write_cache_pages);
|
EXPORT_SYMBOL(write_cache_pages);
|
||||||
|
|
||||||
static int writeback_use_writepage(struct address_space *mapping,
|
|
||||||
struct writeback_control *wbc)
|
|
||||||
{
|
|
||||||
struct folio *folio = NULL;
|
|
||||||
struct blk_plug plug;
|
|
||||||
int err;
|
|
||||||
|
|
||||||
blk_start_plug(&plug);
|
|
||||||
while ((folio = writeback_iter(mapping, wbc, folio, &err))) {
|
|
||||||
err = mapping->a_ops->writepage(&folio->page, wbc);
|
|
||||||
if (err == AOP_WRITEPAGE_ACTIVATE) {
|
|
||||||
folio_unlock(folio);
|
|
||||||
err = 0;
|
|
||||||
}
|
|
||||||
mapping_set_error(mapping, err);
|
|
||||||
}
|
|
||||||
blk_finish_plug(&plug);
|
|
||||||
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
|
int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
@ -2652,14 +2631,11 @@ int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
|
||||||
wb = inode_to_wb_wbc(mapping->host, wbc);
|
wb = inode_to_wb_wbc(mapping->host, wbc);
|
||||||
wb_bandwidth_estimate_start(wb);
|
wb_bandwidth_estimate_start(wb);
|
||||||
while (1) {
|
while (1) {
|
||||||
if (mapping->a_ops->writepages) {
|
if (mapping->a_ops->writepages)
|
||||||
ret = mapping->a_ops->writepages(mapping, wbc);
|
ret = mapping->a_ops->writepages(mapping, wbc);
|
||||||
} else if (mapping->a_ops->writepage) {
|
else
|
||||||
ret = writeback_use_writepage(mapping, wbc);
|
|
||||||
} else {
|
|
||||||
/* deal with chardevs and other special files */
|
/* deal with chardevs and other special files */
|
||||||
ret = 0;
|
ret = 0;
|
||||||
}
|
|
||||||
if (ret != -ENOMEM || wbc->sync_mode != WB_SYNC_ALL)
|
if (ret != -ENOMEM || wbc->sync_mode != WB_SYNC_ALL)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -237,9 +237,8 @@ static void swap_zeromap_folio_clear(struct folio *folio)
|
||||||
* We may have stale swap cache pages in memory: notice
|
* We may have stale swap cache pages in memory: notice
|
||||||
* them here and get rid of the unnecessary final write.
|
* them here and get rid of the unnecessary final write.
|
||||||
*/
|
*/
|
||||||
int swap_writepage(struct page *page, struct writeback_control *wbc)
|
int swap_writeout(struct folio *folio, struct writeback_control *wbc)
|
||||||
{
|
{
|
||||||
struct folio *folio = page_folio(page);
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (folio_free_swap(folio)) {
|
if (folio_free_swap(folio)) {
|
||||||
|
|
|
||||||
33
mm/shmem.c
33
mm/shmem.c
|
|
@ -98,7 +98,7 @@ static struct vfsmount *shm_mnt __ro_after_init;
|
||||||
#define SHORT_SYMLINK_LEN 128
|
#define SHORT_SYMLINK_LEN 128
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* shmem_fallocate communicates with shmem_fault or shmem_writepage via
|
* shmem_fallocate communicates with shmem_fault or shmem_writeout via
|
||||||
* inode->i_private (with i_rwsem making sure that it has only one user at
|
* inode->i_private (with i_rwsem making sure that it has only one user at
|
||||||
* a time): we would prefer not to enlarge the shmem inode just for that.
|
* a time): we would prefer not to enlarge the shmem inode just for that.
|
||||||
*/
|
*/
|
||||||
|
|
@ -107,7 +107,7 @@ struct shmem_falloc {
|
||||||
pgoff_t start; /* start of range currently being fallocated */
|
pgoff_t start; /* start of range currently being fallocated */
|
||||||
pgoff_t next; /* the next page offset to be fallocated */
|
pgoff_t next; /* the next page offset to be fallocated */
|
||||||
pgoff_t nr_falloced; /* how many new pages have been fallocated */
|
pgoff_t nr_falloced; /* how many new pages have been fallocated */
|
||||||
pgoff_t nr_unswapped; /* how often writepage refused to swap out */
|
pgoff_t nr_unswapped; /* how often writeout refused to swap out */
|
||||||
};
|
};
|
||||||
|
|
||||||
struct shmem_options {
|
struct shmem_options {
|
||||||
|
|
@ -446,7 +446,7 @@ static void shmem_recalc_inode(struct inode *inode, long alloced, long swapped)
|
||||||
/*
|
/*
|
||||||
* Special case: whereas normally shmem_recalc_inode() is called
|
* Special case: whereas normally shmem_recalc_inode() is called
|
||||||
* after i_mapping->nrpages has already been adjusted (up or down),
|
* after i_mapping->nrpages has already been adjusted (up or down),
|
||||||
* shmem_writepage() has to raise swapped before nrpages is lowered -
|
* shmem_writeout() has to raise swapped before nrpages is lowered -
|
||||||
* to stop a racing shmem_recalc_inode() from thinking that a page has
|
* to stop a racing shmem_recalc_inode() from thinking that a page has
|
||||||
* been freed. Compensate here, to avoid the need for a followup call.
|
* been freed. Compensate here, to avoid the need for a followup call.
|
||||||
*/
|
*/
|
||||||
|
|
@ -1536,12 +1536,15 @@ int shmem_unuse(unsigned int type)
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/**
|
||||||
* Move the page from the page cache to the swap cache.
|
* shmem_writeout - Write the folio to swap
|
||||||
|
* @folio: The folio to write
|
||||||
|
* @wbc: How writeback is to be done
|
||||||
|
*
|
||||||
|
* Move the folio from the page cache to the swap cache.
|
||||||
*/
|
*/
|
||||||
static int shmem_writepage(struct page *page, struct writeback_control *wbc)
|
int shmem_writeout(struct folio *folio, struct writeback_control *wbc)
|
||||||
{
|
{
|
||||||
struct folio *folio = page_folio(page);
|
|
||||||
struct address_space *mapping = folio->mapping;
|
struct address_space *mapping = folio->mapping;
|
||||||
struct inode *inode = mapping->host;
|
struct inode *inode = mapping->host;
|
||||||
struct shmem_inode_info *info = SHMEM_I(inode);
|
struct shmem_inode_info *info = SHMEM_I(inode);
|
||||||
|
|
@ -1550,13 +1553,6 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
|
||||||
int nr_pages;
|
int nr_pages;
|
||||||
bool split = false;
|
bool split = false;
|
||||||
|
|
||||||
/*
|
|
||||||
* Our capabilities prevent regular writeback or sync from ever calling
|
|
||||||
* shmem_writepage; but a stacking filesystem might use ->writepage of
|
|
||||||
* its underlying filesystem, in which case tmpfs should write out to
|
|
||||||
* swap only in response to memory pressure, and not for the writeback
|
|
||||||
* threads or sync.
|
|
||||||
*/
|
|
||||||
if (WARN_ON_ONCE(!wbc->for_reclaim))
|
if (WARN_ON_ONCE(!wbc->for_reclaim))
|
||||||
goto redirty;
|
goto redirty;
|
||||||
|
|
||||||
|
|
@ -1586,9 +1582,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
|
||||||
try_split:
|
try_split:
|
||||||
/* Ensure the subpages are still dirty */
|
/* Ensure the subpages are still dirty */
|
||||||
folio_test_set_dirty(folio);
|
folio_test_set_dirty(folio);
|
||||||
if (split_huge_page_to_list_to_order(page, wbc->list, 0))
|
if (split_folio_to_list(folio, wbc->list))
|
||||||
goto redirty;
|
goto redirty;
|
||||||
folio = page_folio(page);
|
|
||||||
folio_clear_dirty(folio);
|
folio_clear_dirty(folio);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1646,7 +1641,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
|
||||||
|
|
||||||
mutex_unlock(&shmem_swaplist_mutex);
|
mutex_unlock(&shmem_swaplist_mutex);
|
||||||
BUG_ON(folio_mapped(folio));
|
BUG_ON(folio_mapped(folio));
|
||||||
return swap_writepage(&folio->page, wbc);
|
return swap_writeout(folio, wbc);
|
||||||
}
|
}
|
||||||
|
|
||||||
list_del_init(&info->swaplist);
|
list_del_init(&info->swaplist);
|
||||||
|
|
@ -1660,6 +1655,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
|
||||||
folio_unlock(folio);
|
folio_unlock(folio);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(shmem_writeout);
|
||||||
|
|
||||||
#if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
|
#if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
|
||||||
static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
|
static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
|
||||||
|
|
@ -3768,7 +3764,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
|
||||||
index--;
|
index--;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Inform shmem_writepage() how far we have reached.
|
* Inform shmem_writeout() how far we have reached.
|
||||||
* No need for lock or barrier: we have the page lock.
|
* No need for lock or barrier: we have the page lock.
|
||||||
*/
|
*/
|
||||||
if (!folio_test_uptodate(folio))
|
if (!folio_test_uptodate(folio))
|
||||||
|
|
@ -5191,7 +5187,6 @@ static int shmem_error_remove_folio(struct address_space *mapping,
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct address_space_operations shmem_aops = {
|
static const struct address_space_operations shmem_aops = {
|
||||||
.writepage = shmem_writepage,
|
|
||||||
.dirty_folio = noop_dirty_folio,
|
.dirty_folio = noop_dirty_folio,
|
||||||
#ifdef CONFIG_TMPFS
|
#ifdef CONFIG_TMPFS
|
||||||
.write_begin = shmem_write_begin,
|
.write_begin = shmem_write_begin,
|
||||||
|
|
|
||||||
|
|
@ -20,7 +20,7 @@ static inline void swap_read_unplug(struct swap_iocb *plug)
|
||||||
__swap_read_unplug(plug);
|
__swap_read_unplug(plug);
|
||||||
}
|
}
|
||||||
void swap_write_unplug(struct swap_iocb *sio);
|
void swap_write_unplug(struct swap_iocb *sio);
|
||||||
int swap_writepage(struct page *page, struct writeback_control *wbc);
|
int swap_writeout(struct folio *folio, struct writeback_control *wbc);
|
||||||
void __swap_writepage(struct folio *folio, struct writeback_control *wbc);
|
void __swap_writepage(struct folio *folio, struct writeback_control *wbc);
|
||||||
|
|
||||||
/* linux/mm/swap_state.c */
|
/* linux/mm/swap_state.c */
|
||||||
|
|
@ -141,7 +141,7 @@ static inline struct folio *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
|
static inline int swap_writeout(struct folio *f, struct writeback_control *wbc)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -30,7 +30,6 @@
|
||||||
* vmscan's shrink_folio_list.
|
* vmscan's shrink_folio_list.
|
||||||
*/
|
*/
|
||||||
static const struct address_space_operations swap_aops = {
|
static const struct address_space_operations swap_aops = {
|
||||||
.writepage = swap_writepage,
|
|
||||||
.dirty_folio = noop_dirty_folio,
|
.dirty_folio = noop_dirty_folio,
|
||||||
#ifdef CONFIG_MIGRATION
|
#ifdef CONFIG_MIGRATION
|
||||||
.migrate_folio = migrate_folio,
|
.migrate_folio = migrate_folio,
|
||||||
|
|
|
||||||
|
|
@ -2368,7 +2368,7 @@ static int try_to_unuse(unsigned int type)
|
||||||
* Limit the number of retries? No: when mmget_not_zero()
|
* Limit the number of retries? No: when mmget_not_zero()
|
||||||
* above fails, that mm is likely to be freeing swap from
|
* above fails, that mm is likely to be freeing swap from
|
||||||
* exit_mmap(), which proceeds at its own independent pace;
|
* exit_mmap(), which proceeds at its own independent pace;
|
||||||
* and even shmem_writepage() could have been preempted after
|
* and even shmem_writeout() could have been preempted after
|
||||||
* folio_alloc_swap(), temporarily hiding that swap. It's easy
|
* folio_alloc_swap(), temporarily hiding that swap. It's easy
|
||||||
* and robust (though cpu-intensive) just to keep retrying.
|
* and robust (though cpu-intensive) just to keep retrying.
|
||||||
*/
|
*/
|
||||||
|
|
|
||||||
29
mm/vmscan.c
29
mm/vmscan.c
|
|
@ -648,21 +648,20 @@ typedef enum {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* pageout is called by shrink_folio_list() for each dirty folio.
|
* pageout is called by shrink_folio_list() for each dirty folio.
|
||||||
* Calls ->writepage().
|
|
||||||
*/
|
*/
|
||||||
static pageout_t pageout(struct folio *folio, struct address_space *mapping,
|
static pageout_t pageout(struct folio *folio, struct address_space *mapping,
|
||||||
struct swap_iocb **plug, struct list_head *folio_list)
|
struct swap_iocb **plug, struct list_head *folio_list)
|
||||||
{
|
{
|
||||||
|
int (*writeout)(struct folio *, struct writeback_control *);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the folio is dirty, only perform writeback if that write
|
* We no longer attempt to writeback filesystem folios here, other
|
||||||
* will be non-blocking. To prevent this allocation from being
|
* than tmpfs/shmem. That's taken care of in page-writeback.
|
||||||
* stalled by pagecache activity. But note that there may be
|
* If we find a dirty filesystem folio at the end of the LRU list,
|
||||||
* stalls if we need to run get_block(). We could test
|
* typically that means the filesystem is saturating the storage
|
||||||
* PagePrivate for that.
|
* with contiguous writes and telling it to write a folio here
|
||||||
*
|
* would only make the situation worse by injecting an element
|
||||||
* If this process is currently in __generic_file_write_iter() against
|
* of random access.
|
||||||
* this folio's queue, we can perform writeback even if that
|
|
||||||
* will block.
|
|
||||||
*
|
*
|
||||||
* If the folio is swapcache, write it back even if that would
|
* If the folio is swapcache, write it back even if that would
|
||||||
* block, for some throttling. This happens by accident, because
|
* block, for some throttling. This happens by accident, because
|
||||||
|
|
@ -685,7 +684,11 @@ static pageout_t pageout(struct folio *folio, struct address_space *mapping,
|
||||||
}
|
}
|
||||||
return PAGE_KEEP;
|
return PAGE_KEEP;
|
||||||
}
|
}
|
||||||
if (mapping->a_ops->writepage == NULL)
|
if (shmem_mapping(mapping))
|
||||||
|
writeout = shmem_writeout;
|
||||||
|
else if (folio_test_anon(folio))
|
||||||
|
writeout = swap_writeout;
|
||||||
|
else
|
||||||
return PAGE_ACTIVATE;
|
return PAGE_ACTIVATE;
|
||||||
|
|
||||||
if (folio_clear_dirty_for_io(folio)) {
|
if (folio_clear_dirty_for_io(folio)) {
|
||||||
|
|
@ -708,7 +711,7 @@ static pageout_t pageout(struct folio *folio, struct address_space *mapping,
|
||||||
wbc.list = folio_list;
|
wbc.list = folio_list;
|
||||||
|
|
||||||
folio_set_reclaim(folio);
|
folio_set_reclaim(folio);
|
||||||
res = mapping->a_ops->writepage(&folio->page, &wbc);
|
res = writeout(folio, &wbc);
|
||||||
if (res < 0)
|
if (res < 0)
|
||||||
handle_write_error(mapping, folio, res);
|
handle_write_error(mapping, folio, res);
|
||||||
if (res == AOP_WRITEPAGE_ACTIVATE) {
|
if (res == AOP_WRITEPAGE_ACTIVATE) {
|
||||||
|
|
@ -717,7 +720,7 @@ static pageout_t pageout(struct folio *folio, struct address_space *mapping,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!folio_test_writeback(folio)) {
|
if (!folio_test_writeback(folio)) {
|
||||||
/* synchronous write or broken a_ops? */
|
/* synchronous write? */
|
||||||
folio_clear_reclaim(folio);
|
folio_clear_reclaim(folio);
|
||||||
}
|
}
|
||||||
trace_mm_vmscan_write_folio(folio);
|
trace_mm_vmscan_write_folio(folio);
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue