vfs-6.13-rc7.fixes

-----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQRAhzRXHqcMeLMyaSiRxhvAZXjcogUCZ3vs1AAKCRCRxhvAZXjc
 omdqAP9Mn4HF85p5X7WRtUgrF7MGQft3EBfWE+sUxCMTc49NGQD/Ti7hqGNleEih
 MmjUjLZSG1e3lFHYQm0nqmjO2RexbQ0=
 =Li7D
 -----END PGP SIGNATURE-----

Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs

Pull vfs fixes from Christian Brauner:

 - Relax assertions on failure to encode file handles

   The ->encode_fh() method can fail for various reasons. None of them
   warrant a WARN_ON().

 - Fix overlayfs file handle encoding by allowing encoding an fid from
   an inode without an alias

 - Make sure fuse_dir_open() handles FOPEN_KEEP_CACHE. If it's not
   specified fuse needs to invaludate the directory inode page cache

 - Fix qnx6 so it builds with gcc-15

 - Various fixes for netfslib and ceph and nfs filesystems:
     - Ignore silly rename files from afs and nfs when building header
       archives
     - Fix read result collection in netfslib with multiple subrequests
     - Handle ENOMEM for netfslib buffered reads
     - Fix oops in nfs_netfs_init_request()
     - Parse the secctx command immediately in cachefiles
     - Remove a redundant smp_rmb() in netfslib
     - Handle recursion in read retry in netfslib
     - Fix clearing of folio_queue
     - Fix missing cancellation of copy-to_cache when the cache for a
       file is temporarly disabled in netfslib

 - Sanity check the hfs root record

 - Fix zero padding data issues in concurrent write scenarios

 - Fix is_mnt_ns_file() after converting nsfs to path_from_stashed()

 - Fix missing declaration of init_files

 - Increase I/O priority when writing revoke records in jbd2

 - Flush filesystem device before updating tail sequence in jbd2

* tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs: (23 commits)
  ovl: support encoding fid from inode with no alias
  ovl: pass realinode to ovl_encode_real_fh() instead of realdentry
  fuse: respect FOPEN_KEEP_CACHE on opendir
  netfs: Fix is-caching check in read-retry
  netfs: Fix the (non-)cancellation of copy when cache is temporarily disabled
  netfs: Fix ceph copy to cache on write-begin
  netfs: Work around recursion by abandoning retry if nothing read
  netfs: Fix missing barriers by using clear_and_wake_up_bit()
  netfs: Remove redundant use of smp_rmb()
  cachefiles: Parse the "secctx" immediately
  nfs: Fix oops in nfs_netfs_init_request() when copying to cache
  netfs: Fix enomem handling in buffered reads
  netfs: Fix non-contiguous donation between completed reads
  kheaders: Ignore silly-rename files
  fs: relax assertions on failure to encode file handles
  fs: fix missing declaration of init_files
  fs: fix is_mnt_ns_file()
  iomap: fix zero padding data issue in concurrent append writes
  iomap: pass byte granular end position to iomap_add_to_ioend
  jbd2: flush filesystem device before updating tail sequence
  ...
This commit is contained in:
Linus Torvalds 2025-01-06 10:26:39 -08:00
commit fbfd64d25c
31 changed files with 217 additions and 123 deletions

View File

@ -57,6 +57,8 @@ static void v9fs_issue_write(struct netfs_io_subrequest *subreq)
int err, len; int err, len;
len = p9_client_write(fid, subreq->start, &subreq->io_iter, &err); len = p9_client_write(fid, subreq->start, &subreq->io_iter, &err);
if (len > 0)
__set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
netfs_write_subrequest_terminated(subreq, len ?: err, false); netfs_write_subrequest_terminated(subreq, len ?: err, false);
} }
@ -80,8 +82,10 @@ static void v9fs_issue_read(struct netfs_io_subrequest *subreq)
if (pos + total >= i_size_read(rreq->inode)) if (pos + total >= i_size_read(rreq->inode))
__set_bit(NETFS_SREQ_HIT_EOF, &subreq->flags); __set_bit(NETFS_SREQ_HIT_EOF, &subreq->flags);
if (!err) if (!err) {
subreq->transferred += total; subreq->transferred += total;
__set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
}
netfs_read_subreq_terminated(subreq, err, false); netfs_read_subreq_terminated(subreq, err, false);
} }

View File

@ -122,7 +122,7 @@ static void afs_issue_write_worker(struct work_struct *work)
if (subreq->debug_index == 3) if (subreq->debug_index == 3)
return netfs_write_subrequest_terminated(subreq, -ENOANO, false); return netfs_write_subrequest_terminated(subreq, -ENOANO, false);
if (!test_bit(NETFS_SREQ_RETRYING, &subreq->flags)) { if (!subreq->retry_count) {
set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags); set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
return netfs_write_subrequest_terminated(subreq, -EAGAIN, false); return netfs_write_subrequest_terminated(subreq, -EAGAIN, false);
} }
@ -149,6 +149,9 @@ static void afs_issue_write_worker(struct work_struct *work)
afs_wait_for_operation(op); afs_wait_for_operation(op);
ret = afs_put_operation(op); ret = afs_put_operation(op);
switch (ret) { switch (ret) {
case 0:
__set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
break;
case -EACCES: case -EACCES:
case -EPERM: case -EPERM:
case -ENOKEY: case -ENOKEY:

View File

@ -15,6 +15,7 @@
#include <linux/namei.h> #include <linux/namei.h>
#include <linux/poll.h> #include <linux/poll.h>
#include <linux/mount.h> #include <linux/mount.h>
#include <linux/security.h>
#include <linux/statfs.h> #include <linux/statfs.h>
#include <linux/ctype.h> #include <linux/ctype.h>
#include <linux/string.h> #include <linux/string.h>
@ -576,7 +577,7 @@ static int cachefiles_daemon_dir(struct cachefiles_cache *cache, char *args)
*/ */
static int cachefiles_daemon_secctx(struct cachefiles_cache *cache, char *args) static int cachefiles_daemon_secctx(struct cachefiles_cache *cache, char *args)
{ {
char *secctx; int err;
_enter(",%s", args); _enter(",%s", args);
@ -585,16 +586,16 @@ static int cachefiles_daemon_secctx(struct cachefiles_cache *cache, char *args)
return -EINVAL; return -EINVAL;
} }
if (cache->secctx) { if (cache->have_secid) {
pr_err("Second security context specified\n"); pr_err("Second security context specified\n");
return -EINVAL; return -EINVAL;
} }
secctx = kstrdup(args, GFP_KERNEL); err = security_secctx_to_secid(args, strlen(args), &cache->secid);
if (!secctx) if (err)
return -ENOMEM; return err;
cache->secctx = secctx; cache->have_secid = true;
return 0; return 0;
} }
@ -820,7 +821,6 @@ static void cachefiles_daemon_unbind(struct cachefiles_cache *cache)
put_cred(cache->cache_cred); put_cred(cache->cache_cred);
kfree(cache->rootdirname); kfree(cache->rootdirname);
kfree(cache->secctx);
kfree(cache->tag); kfree(cache->tag);
_leave(""); _leave("");

View File

@ -122,7 +122,6 @@ struct cachefiles_cache {
#define CACHEFILES_STATE_CHANGED 3 /* T if state changed (poll trigger) */ #define CACHEFILES_STATE_CHANGED 3 /* T if state changed (poll trigger) */
#define CACHEFILES_ONDEMAND_MODE 4 /* T if in on-demand read mode */ #define CACHEFILES_ONDEMAND_MODE 4 /* T if in on-demand read mode */
char *rootdirname; /* name of cache root directory */ char *rootdirname; /* name of cache root directory */
char *secctx; /* LSM security context */
char *tag; /* cache binding tag */ char *tag; /* cache binding tag */
refcount_t unbind_pincount;/* refcount to do daemon unbind */ refcount_t unbind_pincount;/* refcount to do daemon unbind */
struct xarray reqs; /* xarray of pending on-demand requests */ struct xarray reqs; /* xarray of pending on-demand requests */
@ -130,6 +129,8 @@ struct cachefiles_cache {
struct xarray ondemand_ids; /* xarray for ondemand_id allocation */ struct xarray ondemand_ids; /* xarray for ondemand_id allocation */
u32 ondemand_id_next; u32 ondemand_id_next;
u32 msg_id_next; u32 msg_id_next;
u32 secid; /* LSM security id */
bool have_secid; /* whether "secid" was set */
}; };
static inline bool cachefiles_in_ondemand_mode(struct cachefiles_cache *cache) static inline bool cachefiles_in_ondemand_mode(struct cachefiles_cache *cache)

View File

@ -18,7 +18,7 @@ int cachefiles_get_security_ID(struct cachefiles_cache *cache)
struct cred *new; struct cred *new;
int ret; int ret;
_enter("{%s}", cache->secctx); _enter("{%u}", cache->have_secid ? cache->secid : 0);
new = prepare_kernel_cred(current); new = prepare_kernel_cred(current);
if (!new) { if (!new) {
@ -26,8 +26,8 @@ int cachefiles_get_security_ID(struct cachefiles_cache *cache)
goto error; goto error;
} }
if (cache->secctx) { if (cache->have_secid) {
ret = set_security_override_from_ctx(new, cache->secctx); ret = set_security_override(new, cache->secid);
if (ret < 0) { if (ret < 0) {
put_cred(new); put_cred(new);
pr_err("Security denies permission to nominate security context: error %d\n", pr_err("Security denies permission to nominate security context: error %d\n",

View File

@ -22,6 +22,7 @@
#include <linux/close_range.h> #include <linux/close_range.h>
#include <linux/file_ref.h> #include <linux/file_ref.h>
#include <net/sock.h> #include <net/sock.h>
#include <linux/init_task.h>
#include "internal.h" #include "internal.h"

View File

@ -1681,6 +1681,8 @@ static int fuse_dir_open(struct inode *inode, struct file *file)
*/ */
if (ff->open_flags & (FOPEN_STREAM | FOPEN_NONSEEKABLE)) if (ff->open_flags & (FOPEN_STREAM | FOPEN_NONSEEKABLE))
nonseekable_open(inode, file); nonseekable_open(inode, file);
if (!(ff->open_flags & FOPEN_KEEP_CACHE))
invalidate_inode_pages2(inode->i_mapping);
} }
return err; return err;

View File

@ -349,11 +349,13 @@ static int hfs_fill_super(struct super_block *sb, struct fs_context *fc)
goto bail_no_root; goto bail_no_root;
res = hfs_cat_find_brec(sb, HFS_ROOT_CNID, &fd); res = hfs_cat_find_brec(sb, HFS_ROOT_CNID, &fd);
if (!res) { if (!res) {
if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) { if (fd.entrylength != sizeof(rec.dir)) {
res = -EIO; res = -EIO;
goto bail_hfs_find; goto bail_hfs_find;
} }
hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength); hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength);
if (rec.type != HFS_CDR_DIR)
res = -EIO;
} }
if (res) if (res)
goto bail_hfs_find; goto bail_hfs_find;

View File

@ -1774,7 +1774,8 @@ static bool iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t pos)
*/ */
static int iomap_add_to_ioend(struct iomap_writepage_ctx *wpc, static int iomap_add_to_ioend(struct iomap_writepage_ctx *wpc,
struct writeback_control *wbc, struct folio *folio, struct writeback_control *wbc, struct folio *folio,
struct inode *inode, loff_t pos, unsigned len) struct inode *inode, loff_t pos, loff_t end_pos,
unsigned len)
{ {
struct iomap_folio_state *ifs = folio->private; struct iomap_folio_state *ifs = folio->private;
size_t poff = offset_in_folio(folio, pos); size_t poff = offset_in_folio(folio, pos);
@ -1793,15 +1794,60 @@ static int iomap_add_to_ioend(struct iomap_writepage_ctx *wpc,
if (ifs) if (ifs)
atomic_add(len, &ifs->write_bytes_pending); atomic_add(len, &ifs->write_bytes_pending);
/*
* Clamp io_offset and io_size to the incore EOF so that ondisk
* file size updates in the ioend completion are byte-accurate.
* This avoids recovering files with zeroed tail regions when
* writeback races with appending writes:
*
* Thread 1: Thread 2:
* ------------ -----------
* write [A, A+B]
* update inode size to A+B
* submit I/O [A, A+BS]
* write [A+B, A+B+C]
* update inode size to A+B+C
* <I/O completes, updates disk size to min(A+B+C, A+BS)>
* <power failure>
*
* After reboot:
* 1) with A+B+C < A+BS, the file has zero padding in range
* [A+B, A+B+C]
*
* |< Block Size (BS) >|
* |DDDDDDDDDDDD0000000000000|
* ^ ^ ^
* A A+B A+B+C
* (EOF)
*
* 2) with A+B+C > A+BS, the file has zero padding in range
* [A+B, A+BS]
*
* |< Block Size (BS) >|< Block Size (BS) >|
* |DDDDDDDDDDDD0000000000000|00000000000000000000000000|
* ^ ^ ^ ^
* A A+B A+BS A+B+C
* (EOF)
*
* D = Valid Data
* 0 = Zero Padding
*
* Note that this defeats the ability to chain the ioends of
* appending writes.
*/
wpc->ioend->io_size += len; wpc->ioend->io_size += len;
if (wpc->ioend->io_offset + wpc->ioend->io_size > end_pos)
wpc->ioend->io_size = end_pos - wpc->ioend->io_offset;
wbc_account_cgroup_owner(wbc, folio, len); wbc_account_cgroup_owner(wbc, folio, len);
return 0; return 0;
} }
static int iomap_writepage_map_blocks(struct iomap_writepage_ctx *wpc, static int iomap_writepage_map_blocks(struct iomap_writepage_ctx *wpc,
struct writeback_control *wbc, struct folio *folio, struct writeback_control *wbc, struct folio *folio,
struct inode *inode, u64 pos, unsigned dirty_len, struct inode *inode, u64 pos, u64 end_pos,
unsigned *count) unsigned dirty_len, unsigned *count)
{ {
int error; int error;
@ -1826,7 +1872,7 @@ static int iomap_writepage_map_blocks(struct iomap_writepage_ctx *wpc,
break; break;
default: default:
error = iomap_add_to_ioend(wpc, wbc, folio, inode, pos, error = iomap_add_to_ioend(wpc, wbc, folio, inode, pos,
map_len); end_pos, map_len);
if (!error) if (!error)
(*count)++; (*count)++;
break; break;
@ -1897,11 +1943,11 @@ static bool iomap_writepage_handle_eof(struct folio *folio, struct inode *inode,
* remaining memory is zeroed when mapped, and writes to that * remaining memory is zeroed when mapped, and writes to that
* region are not written out to the file. * region are not written out to the file.
* *
* Also adjust the writeback range to skip all blocks entirely * Also adjust the end_pos to the end of file and skip writeback
* beyond i_size. * for all blocks entirely beyond i_size.
*/ */
folio_zero_segment(folio, poff, folio_size(folio)); folio_zero_segment(folio, poff, folio_size(folio));
*end_pos = round_up(isize, i_blocksize(inode)); *end_pos = isize;
} }
return true; return true;
@ -1914,6 +1960,7 @@ static int iomap_writepage_map(struct iomap_writepage_ctx *wpc,
struct inode *inode = folio->mapping->host; struct inode *inode = folio->mapping->host;
u64 pos = folio_pos(folio); u64 pos = folio_pos(folio);
u64 end_pos = pos + folio_size(folio); u64 end_pos = pos + folio_size(folio);
u64 end_aligned = 0;
unsigned count = 0; unsigned count = 0;
int error = 0; int error = 0;
u32 rlen; u32 rlen;
@ -1955,9 +2002,10 @@ static int iomap_writepage_map(struct iomap_writepage_ctx *wpc,
/* /*
* Walk through the folio to find dirty areas to write back. * Walk through the folio to find dirty areas to write back.
*/ */
while ((rlen = iomap_find_dirty_range(folio, &pos, end_pos))) { end_aligned = round_up(end_pos, i_blocksize(inode));
while ((rlen = iomap_find_dirty_range(folio, &pos, end_aligned))) {
error = iomap_writepage_map_blocks(wpc, wbc, folio, inode, error = iomap_writepage_map_blocks(wpc, wbc, folio, inode,
pos, rlen, &count); pos, end_pos, rlen, &count);
if (error) if (error)
break; break;
pos += rlen; pos += rlen;

View File

@ -772,9 +772,9 @@ void jbd2_journal_commit_transaction(journal_t *journal)
/* /*
* If the journal is not located on the file system device, * If the journal is not located on the file system device,
* then we must flush the file system device before we issue * then we must flush the file system device before we issue
* the commit record * the commit record and update the journal tail sequence.
*/ */
if (commit_transaction->t_need_data_flush && if ((commit_transaction->t_need_data_flush || update_tail) &&
(journal->j_fs_dev != journal->j_dev) && (journal->j_fs_dev != journal->j_dev) &&
(journal->j_flags & JBD2_BARRIER)) (journal->j_flags & JBD2_BARRIER))
blkdev_issue_flush(journal->j_fs_dev); blkdev_issue_flush(journal->j_fs_dev);

View File

@ -654,7 +654,7 @@ static void flush_descriptor(journal_t *journal,
set_buffer_jwrite(descriptor); set_buffer_jwrite(descriptor);
BUFFER_TRACE(descriptor, "write"); BUFFER_TRACE(descriptor, "write");
set_buffer_dirty(descriptor); set_buffer_dirty(descriptor);
write_dirty_buffer(descriptor, REQ_SYNC); write_dirty_buffer(descriptor, JBD2_JOURNAL_REQ_FLAGS);
} }
#endif #endif

View File

@ -2055,9 +2055,15 @@ SYSCALL_DEFINE1(oldumount, char __user *, name)
static bool is_mnt_ns_file(struct dentry *dentry) static bool is_mnt_ns_file(struct dentry *dentry)
{ {
struct ns_common *ns;
/* Is this a proxy for a mount namespace? */ /* Is this a proxy for a mount namespace? */
return dentry->d_op == &ns_dentry_operations && if (dentry->d_op != &ns_dentry_operations)
dentry->d_fsdata == &mntns_operations; return false;
ns = d_inode(dentry)->i_private;
return ns->ops == &mntns_operations;
} }
struct ns_common *from_mnt_ns(struct mnt_namespace *mnt) struct ns_common *from_mnt_ns(struct mnt_namespace *mnt)

View File

@ -275,22 +275,14 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
netfs_stat(&netfs_n_rh_download); netfs_stat(&netfs_n_rh_download);
if (rreq->netfs_ops->prepare_read) { if (rreq->netfs_ops->prepare_read) {
ret = rreq->netfs_ops->prepare_read(subreq); ret = rreq->netfs_ops->prepare_read(subreq);
if (ret < 0) { if (ret < 0)
atomic_dec(&rreq->nr_outstanding); goto prep_failed;
netfs_put_subrequest(subreq, false,
netfs_sreq_trace_put_cancel);
break;
}
trace_netfs_sreq(subreq, netfs_sreq_trace_prepare); trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
} }
slice = netfs_prepare_read_iterator(subreq); slice = netfs_prepare_read_iterator(subreq);
if (slice < 0) { if (slice < 0)
atomic_dec(&rreq->nr_outstanding); goto prep_iter_failed;
netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel);
ret = slice;
break;
}
rreq->netfs_ops->issue_read(subreq); rreq->netfs_ops->issue_read(subreq);
goto done; goto done;
@ -302,6 +294,8 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
trace_netfs_sreq(subreq, netfs_sreq_trace_submit); trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
netfs_stat(&netfs_n_rh_zero); netfs_stat(&netfs_n_rh_zero);
slice = netfs_prepare_read_iterator(subreq); slice = netfs_prepare_read_iterator(subreq);
if (slice < 0)
goto prep_iter_failed;
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
netfs_read_subreq_terminated(subreq, 0, false); netfs_read_subreq_terminated(subreq, 0, false);
goto done; goto done;
@ -310,6 +304,8 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
if (source == NETFS_READ_FROM_CACHE) { if (source == NETFS_READ_FROM_CACHE) {
trace_netfs_sreq(subreq, netfs_sreq_trace_submit); trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
slice = netfs_prepare_read_iterator(subreq); slice = netfs_prepare_read_iterator(subreq);
if (slice < 0)
goto prep_iter_failed;
netfs_read_cache_to_pagecache(rreq, subreq); netfs_read_cache_to_pagecache(rreq, subreq);
goto done; goto done;
} }
@ -318,6 +314,14 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
break; break;
prep_iter_failed:
ret = slice;
prep_failed:
subreq->error = ret;
atomic_dec(&rreq->nr_outstanding);
netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel);
break;
done: done:
size -= slice; size -= slice;
start += slice; start += slice;

View File

@ -104,7 +104,6 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
trace_netfs_rreq(wreq, netfs_rreq_trace_wait_ip); trace_netfs_rreq(wreq, netfs_rreq_trace_wait_ip);
wait_on_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS, wait_on_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS,
TASK_UNINTERRUPTIBLE); TASK_UNINTERRUPTIBLE);
smp_rmb(); /* Read error/transferred after RIP flag */
ret = wreq->error; ret = wreq->error;
if (ret == 0) { if (ret == 0) {
ret = wreq->transferred; ret = wreq->transferred;

View File

@ -62,10 +62,14 @@ static void netfs_unlock_read_folio(struct netfs_io_subrequest *subreq,
} else { } else {
trace_netfs_folio(folio, netfs_folio_trace_read_done); trace_netfs_folio(folio, netfs_folio_trace_read_done);
} }
folioq_clear(folioq, slot);
} else { } else {
// TODO: Use of PG_private_2 is deprecated. // TODO: Use of PG_private_2 is deprecated.
if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags))
netfs_pgpriv2_mark_copy_to_cache(subreq, rreq, folioq, slot); netfs_pgpriv2_mark_copy_to_cache(subreq, rreq, folioq, slot);
else
folioq_clear(folioq, slot);
} }
if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) { if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
@ -77,8 +81,6 @@ static void netfs_unlock_read_folio(struct netfs_io_subrequest *subreq,
folio_unlock(folio); folio_unlock(folio);
} }
} }
folioq_clear(folioq, slot);
} }
/* /*
@ -247,16 +249,17 @@ static bool netfs_consume_read_data(struct netfs_io_subrequest *subreq, bool was
/* Deal with the trickiest case: that this subreq is in the middle of a /* Deal with the trickiest case: that this subreq is in the middle of a
* folio, not touching either edge, but finishes first. In such a * folio, not touching either edge, but finishes first. In such a
* case, we donate to the previous subreq, if there is one, so that the * case, we donate to the previous subreq, if there is one and if it is
* donation is only handled when that completes - and remove this * contiguous, so that the donation is only handled when that completes
* subreq from the list. * - and remove this subreq from the list.
* *
* If the previous subreq finished first, we will have acquired their * If the previous subreq finished first, we will have acquired their
* donation and should be able to unlock folios and/or donate nextwards. * donation and should be able to unlock folios and/or donate nextwards.
*/ */
if (!subreq->consumed && if (!subreq->consumed &&
!prev_donated && !prev_donated &&
!list_is_first(&subreq->rreq_link, &rreq->subrequests)) { !list_is_first(&subreq->rreq_link, &rreq->subrequests) &&
subreq->start == prev->start + prev->len) {
prev = list_prev_entry(subreq, rreq_link); prev = list_prev_entry(subreq, rreq_link);
WRITE_ONCE(prev->next_donated, prev->next_donated + subreq->len); WRITE_ONCE(prev->next_donated, prev->next_donated + subreq->len);
subreq->start += subreq->len; subreq->start += subreq->len;
@ -378,8 +381,7 @@ static void netfs_rreq_assess(struct netfs_io_request *rreq)
task_io_account_read(rreq->transferred); task_io_account_read(rreq->transferred);
trace_netfs_rreq(rreq, netfs_rreq_trace_wake_ip); trace_netfs_rreq(rreq, netfs_rreq_trace_wake_ip);
clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags); clear_and_wake_up_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
trace_netfs_rreq(rreq, netfs_rreq_trace_done); trace_netfs_rreq(rreq, netfs_rreq_trace_done);
netfs_clear_subrequests(rreq, false); netfs_clear_subrequests(rreq, false);
@ -438,7 +440,7 @@ void netfs_read_subreq_progress(struct netfs_io_subrequest *subreq,
rreq->origin == NETFS_READPAGE || rreq->origin == NETFS_READPAGE ||
rreq->origin == NETFS_READ_FOR_WRITE)) { rreq->origin == NETFS_READ_FOR_WRITE)) {
netfs_consume_read_data(subreq, was_async); netfs_consume_read_data(subreq, was_async);
__clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags); __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
} }
} }
EXPORT_SYMBOL(netfs_read_subreq_progress); EXPORT_SYMBOL(netfs_read_subreq_progress);
@ -497,7 +499,7 @@ void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq,
rreq->origin == NETFS_READPAGE || rreq->origin == NETFS_READPAGE ||
rreq->origin == NETFS_READ_FOR_WRITE)) { rreq->origin == NETFS_READ_FOR_WRITE)) {
netfs_consume_read_data(subreq, was_async); netfs_consume_read_data(subreq, was_async);
__clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags); __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
} }
rreq->transferred += subreq->transferred; rreq->transferred += subreq->transferred;
} }
@ -511,10 +513,13 @@ void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq,
} else { } else {
trace_netfs_sreq(subreq, netfs_sreq_trace_short); trace_netfs_sreq(subreq, netfs_sreq_trace_short);
if (subreq->transferred > subreq->consumed) { if (subreq->transferred > subreq->consumed) {
__set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags); /* If we didn't read new data, abandon retry. */
__clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags); if (subreq->retry_count &&
set_bit(NETFS_RREQ_NEED_RETRY, &rreq->flags); test_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags)) {
} else if (!__test_and_set_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags)) { __set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
set_bit(NETFS_RREQ_NEED_RETRY, &rreq->flags);
}
} else if (test_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags)) {
__set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags); __set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
set_bit(NETFS_RREQ_NEED_RETRY, &rreq->flags); set_bit(NETFS_RREQ_NEED_RETRY, &rreq->flags);
} else { } else {

View File

@ -170,6 +170,10 @@ void netfs_pgpriv2_write_to_the_cache(struct netfs_io_request *rreq)
trace_netfs_write(wreq, netfs_write_trace_copy_to_cache); trace_netfs_write(wreq, netfs_write_trace_copy_to_cache);
netfs_stat(&netfs_n_wh_copy_to_cache); netfs_stat(&netfs_n_wh_copy_to_cache);
if (!wreq->io_streams[1].avail) {
netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
goto couldnt_start;
}
for (;;) { for (;;) {
error = netfs_pgpriv2_copy_folio(wreq, folio); error = netfs_pgpriv2_copy_folio(wreq, folio);

View File

@ -49,13 +49,15 @@ static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
* up to the first permanently failed one. * up to the first permanently failed one.
*/ */
if (!rreq->netfs_ops->prepare_read && if (!rreq->netfs_ops->prepare_read &&
!test_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags)) { !rreq->cache_resources.ops) {
struct netfs_io_subrequest *subreq; struct netfs_io_subrequest *subreq;
list_for_each_entry(subreq, &rreq->subrequests, rreq_link) { list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
if (test_bit(NETFS_SREQ_FAILED, &subreq->flags)) if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
break; break;
if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) { if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
__clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
subreq->retry_count++;
netfs_reset_iter(subreq); netfs_reset_iter(subreq);
netfs_reissue_read(rreq, subreq); netfs_reissue_read(rreq, subreq);
} }
@ -137,7 +139,8 @@ static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
stream0->sreq_max_len = subreq->len; stream0->sreq_max_len = subreq->len;
__clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags); __clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
__set_bit(NETFS_SREQ_RETRYING, &subreq->flags); __clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
subreq->retry_count++;
spin_lock_bh(&rreq->lock); spin_lock_bh(&rreq->lock);
list_add_tail(&subreq->rreq_link, &rreq->subrequests); list_add_tail(&subreq->rreq_link, &rreq->subrequests);
@ -213,7 +216,6 @@ static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
subreq->error = -ENOMEM; subreq->error = -ENOMEM;
__clear_bit(NETFS_SREQ_FAILED, &subreq->flags); __clear_bit(NETFS_SREQ_FAILED, &subreq->flags);
__clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags); __clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
__clear_bit(NETFS_SREQ_RETRYING, &subreq->flags);
} }
spin_lock_bh(&rreq->lock); spin_lock_bh(&rreq->lock);
list_splice_tail_init(&queue, &rreq->subrequests); list_splice_tail_init(&queue, &rreq->subrequests);

View File

@ -179,7 +179,6 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq,
struct iov_iter source = subreq->io_iter; struct iov_iter source = subreq->io_iter;
iov_iter_revert(&source, subreq->len - source.count); iov_iter_revert(&source, subreq->len - source.count);
__set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit); netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
netfs_reissue_write(stream, subreq, &source); netfs_reissue_write(stream, subreq, &source);
} }
@ -234,7 +233,7 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq,
/* Renegotiate max_len (wsize) */ /* Renegotiate max_len (wsize) */
trace_netfs_sreq(subreq, netfs_sreq_trace_retry); trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
__clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags); __clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
__set_bit(NETFS_SREQ_RETRYING, &subreq->flags); subreq->retry_count++;
stream->prepare_write(subreq); stream->prepare_write(subreq);
part = min(len, stream->sreq_max_len); part = min(len, stream->sreq_max_len);
@ -279,7 +278,7 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq,
subreq->start = start; subreq->start = start;
subreq->debug_index = atomic_inc_return(&wreq->subreq_counter); subreq->debug_index = atomic_inc_return(&wreq->subreq_counter);
subreq->stream_nr = to->stream_nr; subreq->stream_nr = to->stream_nr;
__set_bit(NETFS_SREQ_RETRYING, &subreq->flags); subreq->retry_count = 1;
trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index, trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index,
refcount_read(&subreq->ref), refcount_read(&subreq->ref),
@ -501,8 +500,7 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq)
goto need_retry; goto need_retry;
if ((notes & MADE_PROGRESS) && test_bit(NETFS_RREQ_PAUSE, &wreq->flags)) { if ((notes & MADE_PROGRESS) && test_bit(NETFS_RREQ_PAUSE, &wreq->flags)) {
trace_netfs_rreq(wreq, netfs_rreq_trace_unpause); trace_netfs_rreq(wreq, netfs_rreq_trace_unpause);
clear_bit_unlock(NETFS_RREQ_PAUSE, &wreq->flags); clear_and_wake_up_bit(NETFS_RREQ_PAUSE, &wreq->flags);
wake_up_bit(&wreq->flags, NETFS_RREQ_PAUSE);
} }
if (notes & NEED_REASSESS) { if (notes & NEED_REASSESS) {
@ -605,8 +603,7 @@ void netfs_write_collection_worker(struct work_struct *work)
_debug("finished"); _debug("finished");
trace_netfs_rreq(wreq, netfs_rreq_trace_wake_ip); trace_netfs_rreq(wreq, netfs_rreq_trace_wake_ip);
clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &wreq->flags); clear_and_wake_up_bit(NETFS_RREQ_IN_PROGRESS, &wreq->flags);
wake_up_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS);
if (wreq->iocb) { if (wreq->iocb) {
size_t written = min(wreq->transferred, wreq->len); size_t written = min(wreq->transferred, wreq->len);
@ -714,8 +711,7 @@ void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error,
trace_netfs_sreq(subreq, netfs_sreq_trace_terminated); trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
clear_bit_unlock(NETFS_SREQ_IN_PROGRESS, &subreq->flags); clear_and_wake_up_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
wake_up_bit(&subreq->flags, NETFS_SREQ_IN_PROGRESS);
/* If we are at the head of the queue, wake up the collector, /* If we are at the head of the queue, wake up the collector,
* transferring a ref to it if we were the ones to do so. * transferring a ref to it if we were the ones to do so.

View File

@ -244,6 +244,8 @@ void netfs_reissue_write(struct netfs_io_stream *stream,
iov_iter_advance(source, size); iov_iter_advance(source, size);
iov_iter_truncate(&subreq->io_iter, size); iov_iter_truncate(&subreq->io_iter, size);
subreq->retry_count++;
__clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
__set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags); __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
netfs_do_issue_write(stream, subreq); netfs_do_issue_write(stream, subreq);
} }

View File

@ -263,6 +263,12 @@ int nfs_netfs_readahead(struct readahead_control *ractl)
static atomic_t nfs_netfs_debug_id; static atomic_t nfs_netfs_debug_id;
static int nfs_netfs_init_request(struct netfs_io_request *rreq, struct file *file) static int nfs_netfs_init_request(struct netfs_io_request *rreq, struct file *file)
{ {
if (!file) {
if (WARN_ON_ONCE(rreq->origin != NETFS_PGPRIV2_COPY_TO_CACHE))
return -EIO;
return 0;
}
rreq->netfs_priv = get_nfs_open_context(nfs_file_open_context(file)); rreq->netfs_priv = get_nfs_open_context(nfs_file_open_context(file));
rreq->debug_id = atomic_inc_return(&nfs_netfs_debug_id); rreq->debug_id = atomic_inc_return(&nfs_netfs_debug_id);
/* [DEPRECATED] Use PG_private_2 to mark folio being written to the cache. */ /* [DEPRECATED] Use PG_private_2 to mark folio being written to the cache. */
@ -274,7 +280,8 @@ static int nfs_netfs_init_request(struct netfs_io_request *rreq, struct file *fi
static void nfs_netfs_free_request(struct netfs_io_request *rreq) static void nfs_netfs_free_request(struct netfs_io_request *rreq)
{ {
put_nfs_open_context(rreq->netfs_priv); if (rreq->netfs_priv)
put_nfs_open_context(rreq->netfs_priv);
} }
static struct nfs_netfs_io_data *nfs_netfs_alloc(struct netfs_io_subrequest *sreq) static struct nfs_netfs_io_data *nfs_netfs_alloc(struct netfs_io_subrequest *sreq)

View File

@ -47,10 +47,8 @@ static void show_mark_fhandle(struct seq_file *m, struct inode *inode)
size = f->handle_bytes >> 2; size = f->handle_bytes >> 2;
ret = exportfs_encode_fid(inode, (struct fid *)f->f_handle, &size); ret = exportfs_encode_fid(inode, (struct fid *)f->f_handle, &size);
if ((ret == FILEID_INVALID) || (ret < 0)) { if ((ret == FILEID_INVALID) || (ret < 0))
WARN_ONCE(1, "Can't encode file handler for inotify: %d\n", ret);
return; return;
}
f->handle_type = ret; f->handle_type = ret;
f->handle_bytes = size * sizeof(u32); f->handle_bytes = size * sizeof(u32);

View File

@ -415,13 +415,13 @@ int ovl_set_attr(struct ovl_fs *ofs, struct dentry *upperdentry,
return err; return err;
} }
struct ovl_fh *ovl_encode_real_fh(struct ovl_fs *ofs, struct dentry *real, struct ovl_fh *ovl_encode_real_fh(struct ovl_fs *ofs, struct inode *realinode,
bool is_upper) bool is_upper)
{ {
struct ovl_fh *fh; struct ovl_fh *fh;
int fh_type, dwords; int fh_type, dwords;
int buflen = MAX_HANDLE_SZ; int buflen = MAX_HANDLE_SZ;
uuid_t *uuid = &real->d_sb->s_uuid; uuid_t *uuid = &realinode->i_sb->s_uuid;
int err; int err;
/* Make sure the real fid stays 32bit aligned */ /* Make sure the real fid stays 32bit aligned */
@ -438,13 +438,13 @@ struct ovl_fh *ovl_encode_real_fh(struct ovl_fs *ofs, struct dentry *real,
* the price or reconnecting the dentry. * the price or reconnecting the dentry.
*/ */
dwords = buflen >> 2; dwords = buflen >> 2;
fh_type = exportfs_encode_fh(real, (void *)fh->fb.fid, &dwords, 0); fh_type = exportfs_encode_inode_fh(realinode, (void *)fh->fb.fid,
&dwords, NULL, 0);
buflen = (dwords << 2); buflen = (dwords << 2);
err = -EIO; err = -EIO;
if (WARN_ON(fh_type < 0) || if (fh_type < 0 || fh_type == FILEID_INVALID ||
WARN_ON(buflen > MAX_HANDLE_SZ) || WARN_ON(buflen > MAX_HANDLE_SZ))
WARN_ON(fh_type == FILEID_INVALID))
goto out_err; goto out_err;
fh->fb.version = OVL_FH_VERSION; fh->fb.version = OVL_FH_VERSION;
@ -480,7 +480,7 @@ struct ovl_fh *ovl_get_origin_fh(struct ovl_fs *ofs, struct dentry *origin)
if (!ovl_can_decode_fh(origin->d_sb)) if (!ovl_can_decode_fh(origin->d_sb))
return NULL; return NULL;
return ovl_encode_real_fh(ofs, origin, false); return ovl_encode_real_fh(ofs, d_inode(origin), false);
} }
int ovl_set_origin_fh(struct ovl_fs *ofs, const struct ovl_fh *fh, int ovl_set_origin_fh(struct ovl_fs *ofs, const struct ovl_fh *fh,
@ -505,7 +505,7 @@ static int ovl_set_upper_fh(struct ovl_fs *ofs, struct dentry *upper,
const struct ovl_fh *fh; const struct ovl_fh *fh;
int err; int err;
fh = ovl_encode_real_fh(ofs, upper, true); fh = ovl_encode_real_fh(ofs, d_inode(upper), true);
if (IS_ERR(fh)) if (IS_ERR(fh))
return PTR_ERR(fh); return PTR_ERR(fh);

View File

@ -176,35 +176,37 @@ static int ovl_connect_layer(struct dentry *dentry)
* *
* Return 0 for upper file handle, > 0 for lower file handle or < 0 on error. * Return 0 for upper file handle, > 0 for lower file handle or < 0 on error.
*/ */
static int ovl_check_encode_origin(struct dentry *dentry) static int ovl_check_encode_origin(struct inode *inode)
{ {
struct ovl_fs *ofs = OVL_FS(dentry->d_sb); struct ovl_fs *ofs = OVL_FS(inode->i_sb);
bool decodable = ofs->config.nfs_export; bool decodable = ofs->config.nfs_export;
struct dentry *dentry;
int err;
/* No upper layer? */ /* No upper layer? */
if (!ovl_upper_mnt(ofs)) if (!ovl_upper_mnt(ofs))
return 1; return 1;
/* Lower file handle for non-upper non-decodable */ /* Lower file handle for non-upper non-decodable */
if (!ovl_dentry_upper(dentry) && !decodable) if (!ovl_inode_upper(inode) && !decodable)
return 1; return 1;
/* Upper file handle for pure upper */ /* Upper file handle for pure upper */
if (!ovl_dentry_lower(dentry)) if (!ovl_inode_lower(inode))
return 0; return 0;
/* /*
* Root is never indexed, so if there's an upper layer, encode upper for * Root is never indexed, so if there's an upper layer, encode upper for
* root. * root.
*/ */
if (dentry == dentry->d_sb->s_root) if (inode == d_inode(inode->i_sb->s_root))
return 0; return 0;
/* /*
* Upper decodable file handle for non-indexed upper. * Upper decodable file handle for non-indexed upper.
*/ */
if (ovl_dentry_upper(dentry) && decodable && if (ovl_inode_upper(inode) && decodable &&
!ovl_test_flag(OVL_INDEX, d_inode(dentry))) !ovl_test_flag(OVL_INDEX, inode))
return 0; return 0;
/* /*
@ -213,14 +215,23 @@ static int ovl_check_encode_origin(struct dentry *dentry)
* ovl_connect_layer() will try to make origin's layer "connected" by * ovl_connect_layer() will try to make origin's layer "connected" by
* copying up a "connectable" ancestor. * copying up a "connectable" ancestor.
*/ */
if (d_is_dir(dentry) && decodable) if (!decodable || !S_ISDIR(inode->i_mode))
return ovl_connect_layer(dentry); return 1;
dentry = d_find_any_alias(inode);
if (!dentry)
return -ENOENT;
err = ovl_connect_layer(dentry);
dput(dentry);
if (err < 0)
return err;
/* Lower file handle for indexed and non-upper dir/non-dir */ /* Lower file handle for indexed and non-upper dir/non-dir */
return 1; return 1;
} }
static int ovl_dentry_to_fid(struct ovl_fs *ofs, struct dentry *dentry, static int ovl_dentry_to_fid(struct ovl_fs *ofs, struct inode *inode,
u32 *fid, int buflen) u32 *fid, int buflen)
{ {
struct ovl_fh *fh = NULL; struct ovl_fh *fh = NULL;
@ -231,13 +242,13 @@ static int ovl_dentry_to_fid(struct ovl_fs *ofs, struct dentry *dentry,
* Check if we should encode a lower or upper file handle and maybe * Check if we should encode a lower or upper file handle and maybe
* copy up an ancestor to make lower file handle connectable. * copy up an ancestor to make lower file handle connectable.
*/ */
err = enc_lower = ovl_check_encode_origin(dentry); err = enc_lower = ovl_check_encode_origin(inode);
if (enc_lower < 0) if (enc_lower < 0)
goto fail; goto fail;
/* Encode an upper or lower file handle */ /* Encode an upper or lower file handle */
fh = ovl_encode_real_fh(ofs, enc_lower ? ovl_dentry_lower(dentry) : fh = ovl_encode_real_fh(ofs, enc_lower ? ovl_inode_lower(inode) :
ovl_dentry_upper(dentry), !enc_lower); ovl_inode_upper(inode), !enc_lower);
if (IS_ERR(fh)) if (IS_ERR(fh))
return PTR_ERR(fh); return PTR_ERR(fh);
@ -251,8 +262,8 @@ static int ovl_dentry_to_fid(struct ovl_fs *ofs, struct dentry *dentry,
return err; return err;
fail: fail:
pr_warn_ratelimited("failed to encode file handle (%pd2, err=%i)\n", pr_warn_ratelimited("failed to encode file handle (ino=%lu, err=%i)\n",
dentry, err); inode->i_ino, err);
goto out; goto out;
} }
@ -260,19 +271,13 @@ static int ovl_encode_fh(struct inode *inode, u32 *fid, int *max_len,
struct inode *parent) struct inode *parent)
{ {
struct ovl_fs *ofs = OVL_FS(inode->i_sb); struct ovl_fs *ofs = OVL_FS(inode->i_sb);
struct dentry *dentry;
int bytes, buflen = *max_len << 2; int bytes, buflen = *max_len << 2;
/* TODO: encode connectable file handles */ /* TODO: encode connectable file handles */
if (parent) if (parent)
return FILEID_INVALID; return FILEID_INVALID;
dentry = d_find_any_alias(inode); bytes = ovl_dentry_to_fid(ofs, inode, fid, buflen);
if (!dentry)
return FILEID_INVALID;
bytes = ovl_dentry_to_fid(ofs, dentry, fid, buflen);
dput(dentry);
if (bytes <= 0) if (bytes <= 0)
return FILEID_INVALID; return FILEID_INVALID;

View File

@ -542,7 +542,7 @@ int ovl_verify_origin_xattr(struct ovl_fs *ofs, struct dentry *dentry,
struct ovl_fh *fh; struct ovl_fh *fh;
int err; int err;
fh = ovl_encode_real_fh(ofs, real, is_upper); fh = ovl_encode_real_fh(ofs, d_inode(real), is_upper);
err = PTR_ERR(fh); err = PTR_ERR(fh);
if (IS_ERR(fh)) { if (IS_ERR(fh)) {
fh = NULL; fh = NULL;
@ -738,7 +738,7 @@ int ovl_get_index_name(struct ovl_fs *ofs, struct dentry *origin,
struct ovl_fh *fh; struct ovl_fh *fh;
int err; int err;
fh = ovl_encode_real_fh(ofs, origin, false); fh = ovl_encode_real_fh(ofs, d_inode(origin), false);
if (IS_ERR(fh)) if (IS_ERR(fh))
return PTR_ERR(fh); return PTR_ERR(fh);

View File

@ -865,7 +865,7 @@ int ovl_copy_up_with_data(struct dentry *dentry);
int ovl_maybe_copy_up(struct dentry *dentry, int flags); int ovl_maybe_copy_up(struct dentry *dentry, int flags);
int ovl_copy_xattr(struct super_block *sb, const struct path *path, struct dentry *new); int ovl_copy_xattr(struct super_block *sb, const struct path *path, struct dentry *new);
int ovl_set_attr(struct ovl_fs *ofs, struct dentry *upper, struct kstat *stat); int ovl_set_attr(struct ovl_fs *ofs, struct dentry *upper, struct kstat *stat);
struct ovl_fh *ovl_encode_real_fh(struct ovl_fs *ofs, struct dentry *real, struct ovl_fh *ovl_encode_real_fh(struct ovl_fs *ofs, struct inode *realinode,
bool is_upper); bool is_upper);
struct ovl_fh *ovl_get_origin_fh(struct ovl_fs *ofs, struct dentry *origin); struct ovl_fh *ovl_get_origin_fh(struct ovl_fs *ofs, struct dentry *origin);
int ovl_set_origin_fh(struct ovl_fs *ofs, const struct ovl_fh *fh, int ovl_set_origin_fh(struct ovl_fs *ofs, const struct ovl_fh *fh,

View File

@ -179,8 +179,7 @@ static int qnx6_statfs(struct dentry *dentry, struct kstatfs *buf)
*/ */
static const char *qnx6_checkroot(struct super_block *s) static const char *qnx6_checkroot(struct super_block *s)
{ {
static char match_root[2][3] = {".\0\0", "..\0"}; int error = 0;
int i, error = 0;
struct qnx6_dir_entry *dir_entry; struct qnx6_dir_entry *dir_entry;
struct inode *root = d_inode(s->s_root); struct inode *root = d_inode(s->s_root);
struct address_space *mapping = root->i_mapping; struct address_space *mapping = root->i_mapping;
@ -189,11 +188,9 @@ static const char *qnx6_checkroot(struct super_block *s)
if (IS_ERR(folio)) if (IS_ERR(folio))
return "error reading root directory"; return "error reading root directory";
dir_entry = kmap_local_folio(folio, 0); dir_entry = kmap_local_folio(folio, 0);
for (i = 0; i < 2; i++) { if (memcmp(dir_entry[0].de_fname, ".", 2) ||
/* maximum 3 bytes - due to match_root limitation */ memcmp(dir_entry[1].de_fname, "..", 3))
if (strncmp(dir_entry[i].de_fname, match_root[i], 3)) error = 1;
error = 1;
}
folio_release_kmap(folio, dir_entry); folio_release_kmap(folio, dir_entry);
if (error) if (error)
return "error reading root directory."; return "error reading root directory.";

View File

@ -1319,14 +1319,16 @@ cifs_readv_callback(struct mid_q_entry *mid)
} }
if (rdata->result == -ENODATA) { if (rdata->result == -ENODATA) {
__set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags);
rdata->result = 0; rdata->result = 0;
__set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags);
} else { } else {
size_t trans = rdata->subreq.transferred + rdata->got_bytes; size_t trans = rdata->subreq.transferred + rdata->got_bytes;
if (trans < rdata->subreq.len && if (trans < rdata->subreq.len &&
rdata->subreq.start + trans == ictx->remote_i_size) { rdata->subreq.start + trans == ictx->remote_i_size) {
__set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags);
rdata->result = 0; rdata->result = 0;
__set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags);
} else if (rdata->got_bytes > 0) {
__set_bit(NETFS_SREQ_MADE_PROGRESS, &rdata->subreq.flags);
} }
} }
@ -1670,10 +1672,13 @@ cifs_writev_callback(struct mid_q_entry *mid)
if (written > wdata->subreq.len) if (written > wdata->subreq.len)
written &= 0xFFFF; written &= 0xFFFF;
if (written < wdata->subreq.len) if (written < wdata->subreq.len) {
result = -ENOSPC; result = -ENOSPC;
else } else {
result = written; result = written;
if (written > 0)
__set_bit(NETFS_SREQ_MADE_PROGRESS, &wdata->subreq.flags);
}
break; break;
case MID_REQUEST_SUBMITTED: case MID_REQUEST_SUBMITTED:
case MID_RETRY_NEEDED: case MID_RETRY_NEEDED:

View File

@ -4615,6 +4615,7 @@ smb2_readv_callback(struct mid_q_entry *mid)
__set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags); __set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags);
rdata->result = 0; rdata->result = 0;
} }
__set_bit(NETFS_SREQ_MADE_PROGRESS, &rdata->subreq.flags);
} }
trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, rdata->credits.value, trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, rdata->credits.value,
server->credits, server->in_flight, server->credits, server->in_flight,
@ -4842,10 +4843,12 @@ smb2_writev_callback(struct mid_q_entry *mid)
cifs_stats_bytes_written(tcon, written); cifs_stats_bytes_written(tcon, written);
if (written < wdata->subreq.len) if (written < wdata->subreq.len) {
wdata->result = -ENOSPC; wdata->result = -ENOSPC;
else } else if (written > 0) {
wdata->subreq.len = written; wdata->subreq.len = written;
__set_bit(NETFS_SREQ_MADE_PROGRESS, &wdata->subreq.flags);
}
break; break;
case MID_REQUEST_SUBMITTED: case MID_REQUEST_SUBMITTED:
case MID_RETRY_NEEDED: case MID_RETRY_NEEDED:
@ -5014,7 +5017,7 @@ smb2_async_writev(struct cifs_io_subrequest *wdata)
} }
#endif #endif
if (test_bit(NETFS_SREQ_RETRYING, &wdata->subreq.flags)) if (wdata->subreq.retry_count > 0)
smb2_set_replay(server, &rqst); smb2_set_replay(server, &rqst);
cifs_dbg(FYI, "async write at %llu %u bytes iter=%zx\n", cifs_dbg(FYI, "async write at %llu %u bytes iter=%zx\n",

View File

@ -335,7 +335,7 @@ struct iomap_ioend {
u16 io_type; u16 io_type;
u16 io_flags; /* IOMAP_F_* */ u16 io_flags; /* IOMAP_F_* */
struct inode *io_inode; /* file being written to */ struct inode *io_inode; /* file being written to */
size_t io_size; /* size of the extent */ size_t io_size; /* size of data within eof */
loff_t io_offset; /* offset in the file */ loff_t io_offset; /* offset in the file */
sector_t io_sector; /* start sector of ioend */ sector_t io_sector; /* start sector of ioend */
struct bio io_bio; /* MUST BE LAST! */ struct bio io_bio; /* MUST BE LAST! */

View File

@ -185,6 +185,7 @@ struct netfs_io_subrequest {
short error; /* 0 or error that occurred */ short error; /* 0 or error that occurred */
unsigned short debug_index; /* Index in list (for debugging output) */ unsigned short debug_index; /* Index in list (for debugging output) */
unsigned int nr_segs; /* Number of segs in io_iter */ unsigned int nr_segs; /* Number of segs in io_iter */
u8 retry_count; /* The number of retries (0 on initial pass) */
enum netfs_io_source source; /* Where to read from/write to */ enum netfs_io_source source; /* Where to read from/write to */
unsigned char stream_nr; /* I/O stream this belongs to */ unsigned char stream_nr; /* I/O stream this belongs to */
unsigned char curr_folioq_slot; /* Folio currently being read */ unsigned char curr_folioq_slot; /* Folio currently being read */
@ -194,14 +195,13 @@ struct netfs_io_subrequest {
#define NETFS_SREQ_COPY_TO_CACHE 0 /* Set if should copy the data to the cache */ #define NETFS_SREQ_COPY_TO_CACHE 0 /* Set if should copy the data to the cache */
#define NETFS_SREQ_CLEAR_TAIL 1 /* Set if the rest of the read should be cleared */ #define NETFS_SREQ_CLEAR_TAIL 1 /* Set if the rest of the read should be cleared */
#define NETFS_SREQ_SEEK_DATA_READ 3 /* Set if ->read() should SEEK_DATA first */ #define NETFS_SREQ_SEEK_DATA_READ 3 /* Set if ->read() should SEEK_DATA first */
#define NETFS_SREQ_NO_PROGRESS 4 /* Set if we didn't manage to read any data */ #define NETFS_SREQ_MADE_PROGRESS 4 /* Set if we transferred at least some data */
#define NETFS_SREQ_ONDEMAND 5 /* Set if it's from on-demand read mode */ #define NETFS_SREQ_ONDEMAND 5 /* Set if it's from on-demand read mode */
#define NETFS_SREQ_BOUNDARY 6 /* Set if ends on hard boundary (eg. ceph object) */ #define NETFS_SREQ_BOUNDARY 6 /* Set if ends on hard boundary (eg. ceph object) */
#define NETFS_SREQ_HIT_EOF 7 /* Set if short due to EOF */ #define NETFS_SREQ_HIT_EOF 7 /* Set if short due to EOF */
#define NETFS_SREQ_IN_PROGRESS 8 /* Unlocked when the subrequest completes */ #define NETFS_SREQ_IN_PROGRESS 8 /* Unlocked when the subrequest completes */
#define NETFS_SREQ_NEED_RETRY 9 /* Set if the filesystem requests a retry */ #define NETFS_SREQ_NEED_RETRY 9 /* Set if the filesystem requests a retry */
#define NETFS_SREQ_RETRYING 10 /* Set if we're retrying */ #define NETFS_SREQ_FAILED 10 /* Set if the subreq failed unretryably */
#define NETFS_SREQ_FAILED 11 /* Set if the subreq failed unretryably */
}; };
enum netfs_io_origin { enum netfs_io_origin {
@ -269,7 +269,6 @@ struct netfs_io_request {
size_t prev_donated; /* Fallback for subreq->prev_donated */ size_t prev_donated; /* Fallback for subreq->prev_donated */
refcount_t ref; refcount_t ref;
unsigned long flags; unsigned long flags;
#define NETFS_RREQ_COPY_TO_CACHE 1 /* Need to write to the cache */
#define NETFS_RREQ_NO_UNLOCK_FOLIO 2 /* Don't unlock no_unlock_folio on completion */ #define NETFS_RREQ_NO_UNLOCK_FOLIO 2 /* Don't unlock no_unlock_folio on completion */
#define NETFS_RREQ_DONT_UNLOCK_FOLIOS 3 /* Don't unlock the folios on completion */ #define NETFS_RREQ_DONT_UNLOCK_FOLIOS 3 /* Don't unlock the folios on completion */
#define NETFS_RREQ_FAILED 4 /* The request failed */ #define NETFS_RREQ_FAILED 4 /* The request failed */

View File

@ -89,6 +89,7 @@ find $cpio_dir -type f -print0 |
# Create archive and try to normalize metadata for reproducibility. # Create archive and try to normalize metadata for reproducibility.
tar "${KBUILD_BUILD_TIMESTAMP:+--mtime=$KBUILD_BUILD_TIMESTAMP}" \ tar "${KBUILD_BUILD_TIMESTAMP:+--mtime=$KBUILD_BUILD_TIMESTAMP}" \
--exclude=".__afs*" --exclude=".nfs*" \
--owner=0 --group=0 --sort=name --numeric-owner --mode=u=rw,go=r,a+X \ --owner=0 --group=0 --sort=name --numeric-owner --mode=u=rw,go=r,a+X \
-I $XZ -cf $tarfile -C $cpio_dir/ . > /dev/null -I $XZ -cf $tarfile -C $cpio_dir/ . > /dev/null