vfs-6.19-rc1.inode

-----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQRAhzRXHqcMeLMyaSiRxhvAZXjcogUCaSmOZAAKCRCRxhvAZXjc
 omMSAP9GLhavxyWQ24Q+49CNWWRQWDY1wTOiUK2BwtIvZ0YEcAD8D1dAiMckL5pC
 RwEAVA5p+y+qi+bZP0KXCBxQddoTIQM=
 =zo/J
 -----END PGP SIGNATURE-----

Merge tag 'vfs-6.19-rc1.inode' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs

Pull vfs inode updates from Christian Brauner:
 "Features:

   - Hide inode->i_state behind accessors. Open-coded accesses prevent
     asserting they are done correctly. One obvious aspect is locking,
     but significantly more can be checked. For example it can be
     detected when the code is clearing flags which are already missing,
     or is setting flags when it is illegal (e.g., I_FREEING when
     ->i_count > 0)

   - Provide accessors for ->i_state, converts all filesystems using
     coccinelle and manual conversions (btrfs, ceph, smb, f2fs, gfs2,
     overlayfs, nilfs2, xfs), and makes plain ->i_state access fail to
     compile

   - Rework I_NEW handling to operate without fences, simplifying the
     code after the accessor infrastructure is in place

  Cleanups:

   - Move wait_on_inode() from writeback.h to fs.h

   - Spell out fenced ->i_state accesses with explicit smp_wmb/smp_rmb
     for clarity

   - Cosmetic fixes to LRU handling

   - Push list presence check into inode_io_list_del()

   - Touch up predicts in __d_lookup_rcu()

   - ocfs2: retire ocfs2_drop_inode() and I_WILL_FREE usage

   - Assert on ->i_count in iput_final()

   - Assert ->i_lock held in __iget()

  Fixes:

   - Add missing fences to I_NEW handling"

* tag 'vfs-6.19-rc1.inode' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs: (22 commits)
  dcache: touch up predicts in __d_lookup_rcu()
  fs: push list presence check into inode_io_list_del()
  fs: cosmetic fixes to lru handling
  fs: rework I_NEW handling to operate without fences
  fs: make plain ->i_state access fail to compile
  xfs: use the new ->i_state accessors
  nilfs2: use the new ->i_state accessors
  overlayfs: use the new ->i_state accessors
  gfs2: use the new ->i_state accessors
  f2fs: use the new ->i_state accessors
  smb: use the new ->i_state accessors
  ceph: use the new ->i_state accessors
  btrfs: use the new ->i_state accessors
  Manual conversion to use ->i_state accessors of all places not covered by coccinelle
  Coccinelle-based conversion to use ->i_state accessors
  fs: provide accessors for ->i_state
  fs: spell out fenced ->i_state accesses with explicit smp_wmb/smp_rmb
  fs: move wait_on_inode() from writeback.h to fs.h
  fs: add missing fences to I_NEW handling
  ocfs2: retire ocfs2_drop_inode() and I_WILL_FREE usage
  ...
This commit is contained in:
Linus Torvalds 2025-12-01 09:02:34 -08:00
commit 9368f0f941
115 changed files with 517 additions and 417 deletions

View File

@ -211,7 +211,7 @@ test and set for you.
e.g.::
inode = iget_locked(sb, ino);
if (inode->i_state & I_NEW) {
if (inode_state_read_once(inode) & I_NEW) {
err = read_inode_from_disk(inode);
if (err < 0) {
iget_failed(inode);

View File

@ -67,7 +67,7 @@ static void bdev_write_inode(struct block_device *bdev)
int ret;
spin_lock(&inode->i_lock);
while (inode->i_state & I_DIRTY) {
while (inode_state_read(inode) & I_DIRTY) {
spin_unlock(&inode->i_lock);
ret = write_inode_now(inode, true);
if (ret)
@ -1282,7 +1282,7 @@ void sync_bdevs(bool wait)
struct block_device *bdev;
spin_lock(&inode->i_lock);
if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) ||
if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE | I_NEW) ||
mapping->nrpages == 0) {
spin_unlock(&inode->i_lock);
continue;

View File

@ -433,7 +433,7 @@ static struct dax_device *dax_dev_get(dev_t devt)
return NULL;
dax_dev = to_dax_dev(inode);
if (inode->i_state & I_NEW) {
if (inode_state_read_once(inode) & I_NEW) {
set_bit(DAXDEV_ALIVE, &dax_dev->flags);
inode->i_cdev = &dax_dev->cdev;
inode->i_mode = S_IFCHR;

View File

@ -422,7 +422,7 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
inode = iget5_locked(sb, QID2INO(qid), test, v9fs_set_inode, st);
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
if (!(inode_state_read_once(inode) & I_NEW))
return inode;
/*
* initialize the inode with the stat info

View File

@ -112,7 +112,7 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
inode = iget5_locked(sb, QID2INO(qid), test, v9fs_set_inode_dotl, st);
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
if (!(inode_state_read_once(inode) & I_NEW))
return inode;
/*
* initialize the inode with the stat info

View File

@ -29,7 +29,7 @@ struct inode *affs_iget(struct super_block *sb, unsigned long ino)
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
if (!(inode_state_read_once(inode) & I_NEW))
return inode;
pr_debug("affs_iget(%lu)\n", inode->i_ino);

View File

@ -779,7 +779,7 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry)
struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode;
struct inode *inode = NULL, *ti;
afs_dataversion_t data_version = READ_ONCE(dvnode->status.data_version);
bool supports_ibulk;
bool supports_ibulk, isnew;
long ret;
int i;
@ -850,7 +850,7 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry)
* callback counters.
*/
ti = ilookup5_nowait(dir->i_sb, vp->fid.vnode,
afs_ilookup5_test_by_fid, &vp->fid);
afs_ilookup5_test_by_fid, &vp->fid, &isnew);
if (!IS_ERR_OR_NULL(ti)) {
vnode = AFS_FS_I(ti);
vp->dv_before = vnode->status.data_version;

View File

@ -64,7 +64,7 @@ static struct inode *afs_iget_pseudo_dir(struct super_block *sb, ino_t ino)
vnode = AFS_FS_I(inode);
if (inode->i_state & I_NEW) {
if (inode_state_read_once(inode) & I_NEW) {
netfs_inode_init(&vnode->netfs, NULL, false);
simple_inode_init_ts(inode);
set_nlink(inode, 2);
@ -259,7 +259,7 @@ static struct dentry *afs_lookup_atcell(struct inode *dir, struct dentry *dentry
vnode = AFS_FS_I(inode);
if (inode->i_state & I_NEW) {
if (inode_state_read_once(inode) & I_NEW) {
netfs_inode_init(&vnode->netfs, NULL, false);
simple_inode_init_ts(inode);
set_nlink(inode, 1);
@ -384,7 +384,7 @@ struct inode *afs_dynroot_iget_root(struct super_block *sb)
vnode = AFS_FS_I(inode);
/* there shouldn't be an existing inode */
if (inode->i_state & I_NEW) {
if (inode_state_read_once(inode) & I_NEW) {
netfs_inode_init(&vnode->netfs, NULL, false);
simple_inode_init_ts(inode);
set_nlink(inode, 2);

View File

@ -427,7 +427,7 @@ static void afs_fetch_status_success(struct afs_operation *op)
struct afs_vnode *vnode = vp->vnode;
int ret;
if (vnode->netfs.inode.i_state & I_NEW) {
if (inode_state_read_once(&vnode->netfs.inode) & I_NEW) {
ret = afs_inode_init_from_status(op, vp, vnode);
afs_op_set_error(op, ret);
if (ret == 0)
@ -579,7 +579,7 @@ struct inode *afs_iget(struct afs_operation *op, struct afs_vnode_param *vp)
inode, vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique);
/* deal with an existing inode */
if (!(inode->i_state & I_NEW)) {
if (!(inode_state_read_once(inode) & I_NEW)) {
_leave(" = %p", inode);
return inode;
}
@ -639,7 +639,7 @@ struct inode *afs_root_iget(struct super_block *sb, struct key *key)
_debug("GOT ROOT INODE %p { vl=%llx }", inode, as->volume->vid);
BUG_ON(!(inode->i_state & I_NEW));
BUG_ON(!(inode_state_read_once(inode) & I_NEW));
vnode = AFS_FS_I(inode);
vnode->cb_v_check = atomic_read(&as->volume->cb_v_break);
@ -748,7 +748,7 @@ void afs_evict_inode(struct inode *inode)
if ((S_ISDIR(inode->i_mode) ||
S_ISLNK(inode->i_mode)) &&
(inode->i_state & I_DIRTY) &&
(inode_state_read_once(inode) & I_DIRTY) &&
!sbi->dyn_root) {
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,

View File

@ -307,7 +307,7 @@ static struct inode *befs_iget(struct super_block *sb, unsigned long ino)
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
if (!(inode_state_read_once(inode) & I_NEW))
return inode;
befs_ino = BEFS_I(inode);

View File

@ -42,7 +42,7 @@ struct inode *bfs_iget(struct super_block *sb, unsigned long ino)
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
if (!(inode_state_read_once(inode) & I_NEW))
return inode;
if ((ino < BFS_ROOT_INO) || (ino > BFS_SB(inode->i_sb)->si_lasti)) {

View File

@ -3886,7 +3886,7 @@ static int btrfs_add_inode_to_root(struct btrfs_inode *inode, bool prealloc)
ASSERT(ret != -ENOMEM);
return ret;
} else if (existing) {
WARN_ON(!(existing->vfs_inode.i_state & (I_WILL_FREE | I_FREEING)));
WARN_ON(!(inode_state_read_once(&existing->vfs_inode) & (I_WILL_FREE | I_FREEING)));
}
return 0;
@ -5363,7 +5363,7 @@ static void evict_inode_truncate_pages(struct inode *inode)
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct rb_node *node;
ASSERT(inode->i_state & I_FREEING);
ASSERT(inode_state_read_once(inode) & I_FREEING);
truncate_inode_pages_final(&inode->i_data);
btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false);
@ -5801,7 +5801,7 @@ struct btrfs_inode *btrfs_iget_path(u64 ino, struct btrfs_root *root,
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->vfs_inode.i_state & I_NEW))
if (!(inode_state_read_once(&inode->vfs_inode) & I_NEW))
return inode;
ret = btrfs_read_locked_inode(inode, path);
@ -5825,7 +5825,7 @@ struct btrfs_inode *btrfs_iget(u64 ino, struct btrfs_root *root)
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->vfs_inode.i_state & I_NEW))
if (!(inode_state_read_once(&inode->vfs_inode) & I_NEW))
return inode;
path = btrfs_alloc_path();
@ -7486,7 +7486,7 @@ static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
u64 page_start = folio_pos(folio);
u64 page_end = page_start + folio_size(folio) - 1;
u64 cur;
int inode_evicting = inode->vfs_inode.i_state & I_FREEING;
int inode_evicting = inode_state_read_once(&inode->vfs_inode) & I_FREEING;
/*
* We have folio locked so no new ordered extent can be created on this

View File

@ -611,9 +611,9 @@ int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end,
return err;
ret = sync_mapping_buffers(inode->i_mapping);
if (!(inode->i_state & I_DIRTY_ALL))
if (!(inode_state_read_once(inode) & I_DIRTY_ALL))
goto out;
if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
if (datasync && !(inode_state_read_once(inode) & I_DIRTY_DATASYNC))
goto out;
err = sync_inode_metadata(inode, 1);

View File

@ -26,7 +26,7 @@ void ceph_fscache_register_inode_cookie(struct inode *inode)
return;
/* Only new inodes! */
if (!(inode->i_state & I_NEW))
if (!(inode_state_read_once(inode) & I_NEW))
return;
WARN_ON_ONCE(ci->netfs.cache);

View File

@ -329,7 +329,7 @@ int ceph_encode_encrypted_dname(struct inode *parent, char *buf, int elen)
out:
kfree(cryptbuf);
if (dir != parent) {
if ((dir->i_state & I_NEW))
if ((inode_state_read_once(dir) & I_NEW))
discard_new_inode(dir);
else
iput(dir);
@ -438,7 +438,7 @@ int ceph_fname_to_usr(const struct ceph_fname *fname, struct fscrypt_str *tname,
fscrypt_fname_free_buffer(&_tname);
out_inode:
if (dir != fname->dir) {
if ((dir->i_state & I_NEW))
if ((inode_state_read_once(dir) & I_NEW))
discard_new_inode(dir);
else
iput(dir);

View File

@ -740,7 +740,7 @@ static int ceph_finish_async_create(struct inode *dir, struct inode *inode,
vino.ino, ceph_ino(dir), dentry->d_name.name);
ceph_dir_clear_ordered(dir);
ceph_init_inode_acls(inode, as_ctx);
if (inode->i_state & I_NEW) {
if (inode_state_read_once(inode) & I_NEW) {
/*
* If it's not I_NEW, then someone created this before
* we got here. Assume the server is aware of it at
@ -901,7 +901,7 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
new_inode = NULL;
goto out_req;
}
WARN_ON_ONCE(!(new_inode->i_state & I_NEW));
WARN_ON_ONCE(!(inode_state_read_once(new_inode) & I_NEW));
spin_lock(&dentry->d_lock);
di->flags |= CEPH_DENTRY_ASYNC_CREATE;

View File

@ -132,7 +132,7 @@ struct inode *ceph_new_inode(struct inode *dir, struct dentry *dentry,
goto out_err;
}
inode->i_state = 0;
inode_state_assign_raw(inode, 0);
inode->i_mode = *mode;
err = ceph_security_init_secctx(dentry, *mode, as_ctx);
@ -201,7 +201,7 @@ struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino,
doutc(cl, "on %llx=%llx.%llx got %p new %d\n",
ceph_present_inode(inode), ceph_vinop(inode), inode,
!!(inode->i_state & I_NEW));
!!(inode_state_read_once(inode) & I_NEW));
return inode;
}
@ -228,7 +228,7 @@ struct inode *ceph_get_snapdir(struct inode *parent)
goto err;
}
if (!(inode->i_state & I_NEW) && !S_ISDIR(inode->i_mode)) {
if (!(inode_state_read_once(inode) & I_NEW) && !S_ISDIR(inode->i_mode)) {
pr_warn_once_client(cl, "bad snapdir inode type (mode=0%o)\n",
inode->i_mode);
goto err;
@ -261,7 +261,7 @@ struct inode *ceph_get_snapdir(struct inode *parent)
}
}
#endif
if (inode->i_state & I_NEW) {
if (inode_state_read_once(inode) & I_NEW) {
inode->i_op = &ceph_snapdir_iops;
inode->i_fop = &ceph_snapdir_fops;
ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
@ -270,7 +270,7 @@ struct inode *ceph_get_snapdir(struct inode *parent)
return inode;
err:
if ((inode->i_state & I_NEW))
if ((inode_state_read_once(inode) & I_NEW))
discard_new_inode(inode);
else
iput(inode);
@ -744,7 +744,7 @@ void ceph_evict_inode(struct inode *inode)
netfs_wait_for_outstanding_io(inode);
truncate_inode_pages_final(&inode->i_data);
if (inode->i_state & I_PINNING_NETFS_WB)
if (inode_state_read_once(inode) & I_PINNING_NETFS_WB)
ceph_fscache_unuse_cookie(inode, true);
clear_inode(inode);
@ -1013,7 +1013,7 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
le64_to_cpu(info->version), ci->i_version);
/* Once I_NEW is cleared, we can't change type or dev numbers */
if (inode->i_state & I_NEW) {
if (inode_state_read_once(inode) & I_NEW) {
inode->i_mode = mode;
} else {
if (inode_wrong_type(inode, mode)) {
@ -1090,7 +1090,7 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
#ifdef CONFIG_FS_ENCRYPTION
if (iinfo->fscrypt_auth_len &&
((inode->i_state & I_NEW) || (ci->fscrypt_auth_len == 0))) {
((inode_state_read_once(inode) & I_NEW) || (ci->fscrypt_auth_len == 0))) {
kfree(ci->fscrypt_auth);
ci->fscrypt_auth_len = iinfo->fscrypt_auth_len;
ci->fscrypt_auth = iinfo->fscrypt_auth;
@ -1692,13 +1692,13 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
pr_err_client(cl, "badness %p %llx.%llx\n", in,
ceph_vinop(in));
req->r_target_inode = NULL;
if (in->i_state & I_NEW)
if (inode_state_read_once(in) & I_NEW)
discard_new_inode(in);
else
iput(in);
goto done;
}
if (in->i_state & I_NEW)
if (inode_state_read_once(in) & I_NEW)
unlock_new_inode(in);
}
@ -1898,11 +1898,11 @@ static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
pr_err_client(cl, "inode badness on %p got %d\n", in,
rc);
err = rc;
if (in->i_state & I_NEW) {
if (inode_state_read_once(in) & I_NEW) {
ihold(in);
discard_new_inode(in);
}
} else if (in->i_state & I_NEW) {
} else if (inode_state_read_once(in) & I_NEW) {
unlock_new_inode(in);
}
@ -2114,7 +2114,7 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
pr_err_client(cl, "badness on %p %llx.%llx\n", in,
ceph_vinop(in));
if (d_really_is_negative(dn)) {
if (in->i_state & I_NEW) {
if (inode_state_read_once(in) & I_NEW) {
ihold(in);
discard_new_inode(in);
}
@ -2124,7 +2124,7 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
err = ret;
goto next_item;
}
if (in->i_state & I_NEW)
if (inode_state_read_once(in) & I_NEW)
unlock_new_inode(in);
if (d_really_is_negative(dn)) {

View File

@ -70,7 +70,7 @@ struct inode * coda_iget(struct super_block * sb, struct CodaFid * fid,
if (!inode)
return ERR_PTR(-ENOMEM);
if (inode->i_state & I_NEW) {
if (inode_state_read_once(inode) & I_NEW) {
cii = ITOC(inode);
/* we still need to set i_ino for things like stat(2) */
inode->i_ino = hash;
@ -148,7 +148,7 @@ struct inode *coda_fid_to_inode(struct CodaFid *fid, struct super_block *sb)
/* we should never see newly created inodes because we intentionally
* fail in the initialization callback */
BUG_ON(inode->i_state & I_NEW);
BUG_ON(inode_state_read_once(inode) & I_NEW);
return inode;
}

View File

@ -95,7 +95,7 @@ static struct inode *get_cramfs_inode(struct super_block *sb,
inode = iget_locked(sb, cramino(cramfs_inode, offset));
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
if (!(inode_state_read_once(inode) & I_NEW))
return inode;
switch (cramfs_inode->mode & S_IFMT) {

View File

@ -945,7 +945,7 @@ static void evict_dentries_for_decrypted_inodes(struct fscrypt_master_key *mk)
list_for_each_entry(ci, &mk->mk_decrypted_inodes, ci_master_key_link) {
inode = ci->ci_inode;
spin_lock(&inode->i_lock);
if (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW)) {
if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE | I_NEW)) {
spin_unlock(&inode->i_lock);
continue;
}

View File

@ -834,7 +834,7 @@ int fscrypt_drop_inode(struct inode *inode)
* userspace is still using the files, inodes can be dirtied between
* then and now. We mustn't lose any writes, so skip dirty inodes here.
*/
if (inode->i_state & I_DIRTY_ALL)
if (inode_state_read(inode) & I_DIRTY_ALL)
return 0;
/*

View File

@ -795,7 +795,7 @@ void d_mark_dontcache(struct inode *inode)
de->d_flags |= DCACHE_DONTCACHE;
spin_unlock(&de->d_lock);
}
inode->i_state |= I_DONTCACHE;
inode_state_set(inode, I_DONTCACHE);
spin_unlock(&inode->i_lock);
}
EXPORT_SYMBOL(d_mark_dontcache);
@ -1074,7 +1074,7 @@ struct dentry *d_find_alias_rcu(struct inode *inode)
spin_lock(&inode->i_lock);
// ->i_dentry and ->i_rcu are colocated, but the latter won't be
// used without having I_FREEING set, which means no aliases left
if (likely(!(inode->i_state & I_FREEING) && !hlist_empty(l))) {
if (likely(!(inode_state_read(inode) & I_FREEING) && !hlist_empty(l))) {
if (S_ISDIR(inode->i_mode)) {
de = hlist_entry(l->first, struct dentry, d_u.d_alias);
} else {
@ -1981,14 +1981,8 @@ void d_instantiate_new(struct dentry *entry, struct inode *inode)
security_d_instantiate(entry, inode);
spin_lock(&inode->i_lock);
__d_instantiate(entry, inode);
WARN_ON(!(inode->i_state & I_NEW));
inode->i_state &= ~I_NEW & ~I_CREATING;
/*
* Pairs with the barrier in prepare_to_wait_event() to make sure
* ___wait_var_event() either sees the bit cleared or
* waitqueue_active() check in wake_up_var() sees the waiter.
*/
smp_mb();
WARN_ON(!(inode_state_read(inode) & I_NEW));
inode_state_clear(inode, I_NEW | I_CREATING);
inode_wake_up_bit(inode, __I_NEW);
spin_unlock(&inode->i_lock);
}
@ -2307,11 +2301,20 @@ struct dentry *__d_lookup_rcu(const struct dentry *parent,
seq = raw_seqcount_begin(&dentry->d_seq);
if (dentry->d_parent != parent)
continue;
if (d_unhashed(dentry))
continue;
if (dentry->d_name.hash_len != hashlen)
continue;
if (dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0)
if (unlikely(dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0))
continue;
/*
* Check for the dentry being unhashed.
*
* As tempting as it is, we *can't* skip it because of a race window
* between us finding the dentry before it gets unhashed and loading
* the sequence counter after unhashing is finished.
*
* We can at least predict on it.
*/
if (unlikely(d_unhashed(dentry)))
continue;
*seqp = seq;
return dentry;

View File

@ -28,7 +28,7 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
* inodes without pages but we deliberately won't in case
* we need to reschedule to avoid softlockups.
*/
if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
if ((inode_state_read(inode) & (I_FREEING | I_WILL_FREE | I_NEW)) ||
(mapping_empty(inode->i_mapping) && !need_resched())) {
spin_unlock(&inode->i_lock);
continue;

View File

@ -95,7 +95,7 @@ static struct inode *__ecryptfs_get_inode(struct inode *lower_inode,
iput(lower_inode);
return ERR_PTR(-EACCES);
}
if (!(inode->i_state & I_NEW))
if (!(inode_state_read_once(inode) & I_NEW))
iput(lower_inode);
return inode;
@ -106,7 +106,7 @@ struct inode *ecryptfs_get_inode(struct inode *lower_inode,
{
struct inode *inode = __ecryptfs_get_inode(lower_inode, sb);
if (!IS_ERR(inode) && (inode->i_state & I_NEW))
if (!IS_ERR(inode) && (inode_state_read_once(inode) & I_NEW))
unlock_new_inode(inode);
return inode;
@ -364,7 +364,7 @@ static struct dentry *ecryptfs_lookup_interpose(struct dentry *dentry,
}
}
if (inode->i_state & I_NEW)
if (inode_state_read_once(inode) & I_NEW)
unlock_new_inode(inode);
return d_splice_alias(inode, dentry);
}

View File

@ -62,7 +62,7 @@ struct inode *efs_iget(struct super_block *super, unsigned long ino)
inode = iget_locked(super, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
if (!(inode_state_read_once(inode) & I_NEW))
return inode;
in = INODE_INFO(inode);

View File

@ -295,7 +295,7 @@ struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid)
if (!inode)
return ERR_PTR(-ENOMEM);
if (inode->i_state & I_NEW) {
if (inode_state_read_once(inode) & I_NEW) {
int err = erofs_fill_inode(inode);
if (err) {

View File

@ -1398,7 +1398,7 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
if (!(inode_state_read_once(inode) & I_NEW))
return inode;
ei = EXT2_I(inode);

View File

@ -202,7 +202,6 @@ void ext4_evict_inode(struct inode *inode)
* the inode. Flush worker is ignoring it because of I_FREEING flag but
* we still need to remove the inode from the writeback lists.
*/
if (!list_empty_careful(&inode->i_io_list))
inode_io_list_del(inode);
/*
@ -425,7 +424,7 @@ void ext4_check_map_extents_env(struct inode *inode)
if (!S_ISREG(inode->i_mode) ||
IS_NOQUOTA(inode) || IS_VERITY(inode) ||
is_special_ino(inode->i_sb, inode->i_ino) ||
(inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW)) ||
(inode_state_read_once(inode) & (I_FREEING | I_WILL_FREE | I_NEW)) ||
ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE) ||
ext4_verity_in_progress(inode))
return;
@ -3473,7 +3472,7 @@ static bool ext4_inode_datasync_dirty(struct inode *inode)
/* Any metadata buffers to write? */
if (!list_empty(&inode->i_mapping->i_private_list))
return true;
return inode->i_state & I_DIRTY_DATASYNC;
return inode_state_read_once(inode) & I_DIRTY_DATASYNC;
}
static void ext4_set_iomap(struct inode *inode, struct iomap *iomap,
@ -4552,7 +4551,7 @@ int ext4_truncate(struct inode *inode)
* or it's a completely new inode. In those cases we might not
* have i_rwsem locked because it's not necessary.
*/
if (!(inode->i_state & (I_NEW|I_FREEING)))
if (!(inode_state_read_once(inode) & (I_NEW | I_FREEING)))
WARN_ON(!inode_is_locked(inode));
trace_ext4_truncate_enter(inode);
@ -5210,7 +5209,7 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW)) {
if (!(inode_state_read_once(inode) & I_NEW)) {
ret = check_igot_inode(inode, flags, function, line);
if (ret) {
iput(inode);
@ -5549,7 +5548,7 @@ static void __ext4_update_other_inode_time(struct super_block *sb,
if (inode_is_dirtytime_only(inode)) {
struct ext4_inode_info *ei = EXT4_I(inode);
inode->i_state &= ~I_DIRTY_TIME;
inode_state_clear(inode, I_DIRTY_TIME);
spin_unlock(&inode->i_lock);
spin_lock(&ei->i_raw_lock);

View File

@ -107,7 +107,7 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
if (!sbi->s_journal || is_bad_inode(inode))
return 0;
WARN_ON_ONCE(!(inode->i_state & (I_NEW | I_FREEING)) &&
WARN_ON_ONCE(!(inode_state_read_once(inode) & (I_NEW | I_FREEING)) &&
!inode_is_locked(inode));
if (ext4_inode_orphan_tracked(inode))
return 0;
@ -232,7 +232,7 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode)
if (!sbi->s_journal && !(sbi->s_mount_state & EXT4_ORPHAN_FS))
return 0;
WARN_ON_ONCE(!(inode->i_state & (I_NEW | I_FREEING)) &&
WARN_ON_ONCE(!(inode_state_read_once(inode) & (I_NEW | I_FREEING)) &&
!inode_is_locked(inode));
if (ext4_test_inode_state(inode, EXT4_STATE_ORPHAN_FILE))
return ext4_orphan_file_del(handle, inode);

View File

@ -4222,7 +4222,7 @@ static int f2fs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
if (map.m_flags & F2FS_MAP_NEW)
iomap->flags |= IOMAP_F_NEW;
if ((inode->i_state & I_DIRTY_DATASYNC) ||
if ((inode_state_read_once(inode) & I_DIRTY_DATASYNC) ||
offset + length > i_size_read(inode))
iomap->flags |= IOMAP_F_DIRTY;

View File

@ -569,7 +569,7 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW)) {
if (!(inode_state_read_once(inode) & I_NEW)) {
if (is_meta_ino(sbi, ino)) {
f2fs_err(sbi, "inaccessible inode: %lu, run fsck to repair", ino);
set_sbi_flag(sbi, SBI_NEED_FSCK);

View File

@ -844,7 +844,7 @@ static int __f2fs_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
f2fs_i_links_write(inode, false);
spin_lock(&inode->i_lock);
inode->i_state |= I_LINKABLE;
inode_state_set(inode, I_LINKABLE);
spin_unlock(&inode->i_lock);
} else {
if (file)
@ -1057,7 +1057,7 @@ static int f2fs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
goto put_out_dir;
spin_lock(&whiteout->i_lock);
whiteout->i_state &= ~I_LINKABLE;
inode_state_clear(whiteout, I_LINKABLE);
spin_unlock(&whiteout->i_lock);
iput(whiteout);

View File

@ -1798,7 +1798,7 @@ static int f2fs_drop_inode(struct inode *inode)
* - f2fs_gc -> iput -> evict
* - inode_wait_for_writeback(inode)
*/
if ((!inode_unhashed(inode) && inode->i_state & I_SYNC)) {
if ((!inode_unhashed(inode) && inode_state_read(inode) & I_SYNC)) {
if (!inode->i_nlink && !is_bad_inode(inode)) {
/* to avoid evict_inode call simultaneously */
__iget(inode);

View File

@ -258,7 +258,7 @@ vxfs_iget(struct super_block *sbp, ino_t ino)
ip = iget_locked(sbp, ino);
if (!ip)
return ERR_PTR(-ENOMEM);
if (!(ip->i_state & I_NEW))
if (!(inode_state_read_once(ip) & I_NEW))
return ip;
vip = VXFS_INO(ip);

View File

@ -121,7 +121,7 @@ static bool inode_io_list_move_locked(struct inode *inode,
{
assert_spin_locked(&wb->list_lock);
assert_spin_locked(&inode->i_lock);
WARN_ON_ONCE(inode->i_state & I_FREEING);
WARN_ON_ONCE(inode_state_read(inode) & I_FREEING);
list_move(&inode->i_io_list, head);
@ -304,9 +304,9 @@ static void inode_cgwb_move_to_attached(struct inode *inode,
{
assert_spin_locked(&wb->list_lock);
assert_spin_locked(&inode->i_lock);
WARN_ON_ONCE(inode->i_state & I_FREEING);
WARN_ON_ONCE(inode_state_read(inode) & I_FREEING);
inode->i_state &= ~I_SYNC_QUEUED;
inode_state_clear(inode, I_SYNC_QUEUED);
if (wb != &wb->bdi->wb)
list_move(&inode->i_io_list, &wb->b_attached);
else
@ -408,7 +408,7 @@ static bool inode_do_switch_wbs(struct inode *inode,
* Once I_FREEING or I_WILL_FREE are visible under i_lock, the eviction
* path owns the inode and we shouldn't modify ->i_io_list.
*/
if (unlikely(inode->i_state & (I_FREEING | I_WILL_FREE)))
if (unlikely(inode_state_read(inode) & (I_FREEING | I_WILL_FREE)))
goto skip_switch;
trace_inode_switch_wbs(inode, old_wb, new_wb);
@ -451,7 +451,7 @@ static bool inode_do_switch_wbs(struct inode *inode,
if (!list_empty(&inode->i_io_list)) {
inode->i_wb = new_wb;
if (inode->i_state & I_DIRTY_ALL) {
if (inode_state_read(inode) & I_DIRTY_ALL) {
/*
* We need to keep b_dirty list sorted by
* dirtied_time_when. However properly sorting the
@ -476,10 +476,11 @@ static bool inode_do_switch_wbs(struct inode *inode,
switched = true;
skip_switch:
/*
* Paired with load_acquire in unlocked_inode_to_wb_begin() and
* Paired with an acquire fence in unlocked_inode_to_wb_begin() and
* ensures that the new wb is visible if they see !I_WB_SWITCH.
*/
smp_store_release(&inode->i_state, inode->i_state & ~I_WB_SWITCH);
smp_wmb();
inode_state_clear(inode, I_WB_SWITCH);
xa_unlock_irq(&mapping->i_pages);
spin_unlock(&inode->i_lock);
@ -600,12 +601,12 @@ static bool inode_prepare_wbs_switch(struct inode *inode,
/* while holding I_WB_SWITCH, no one else can update the association */
spin_lock(&inode->i_lock);
if (!(inode->i_sb->s_flags & SB_ACTIVE) ||
inode->i_state & (I_WB_SWITCH | I_FREEING | I_WILL_FREE) ||
inode_state_read(inode) & (I_WB_SWITCH | I_FREEING | I_WILL_FREE) ||
inode_to_wb(inode) == new_wb) {
spin_unlock(&inode->i_lock);
return false;
}
inode->i_state |= I_WB_SWITCH;
inode_state_set(inode, I_WB_SWITCH);
__iget(inode);
spin_unlock(&inode->i_lock);
@ -635,7 +636,7 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
struct bdi_writeback *new_wb = NULL;
/* noop if seems to be already in progress */
if (inode->i_state & I_WB_SWITCH)
if (inode_state_read_once(inode) & I_WB_SWITCH)
return;
/* avoid queueing a new switch if too many are already in flight */
@ -1236,9 +1237,9 @@ static void inode_cgwb_move_to_attached(struct inode *inode,
{
assert_spin_locked(&wb->list_lock);
assert_spin_locked(&inode->i_lock);
WARN_ON_ONCE(inode->i_state & I_FREEING);
WARN_ON_ONCE(inode_state_read(inode) & I_FREEING);
inode->i_state &= ~I_SYNC_QUEUED;
inode_state_clear(inode, I_SYNC_QUEUED);
list_del_init(&inode->i_io_list);
wb_io_lists_depopulated(wb);
}
@ -1348,10 +1349,17 @@ void inode_io_list_del(struct inode *inode)
{
struct bdi_writeback *wb;
/*
* FIXME: ext4 can call here from ext4_evict_inode() after evict() already
* unlinked the inode.
*/
if (list_empty_careful(&inode->i_io_list))
return;
wb = inode_to_wb_and_lock_list(inode);
spin_lock(&inode->i_lock);
inode->i_state &= ~I_SYNC_QUEUED;
inode_state_clear(inode, I_SYNC_QUEUED);
list_del_init(&inode->i_io_list);
wb_io_lists_depopulated(wb);
@ -1409,13 +1417,13 @@ static void redirty_tail_locked(struct inode *inode, struct bdi_writeback *wb)
{
assert_spin_locked(&inode->i_lock);
inode->i_state &= ~I_SYNC_QUEUED;
inode_state_clear(inode, I_SYNC_QUEUED);
/*
* When the inode is being freed just don't bother with dirty list
* tracking. Flush worker will ignore this inode anyway and it will
* trigger assertions in inode_io_list_move_locked().
*/
if (inode->i_state & I_FREEING) {
if (inode_state_read(inode) & I_FREEING) {
list_del_init(&inode->i_io_list);
wb_io_lists_depopulated(wb);
return;
@ -1449,9 +1457,9 @@ static void inode_sync_complete(struct inode *inode)
{
assert_spin_locked(&inode->i_lock);
inode->i_state &= ~I_SYNC;
inode_state_clear(inode, I_SYNC);
/* If inode is clean an unused, put it into LRU now... */
inode_add_lru(inode);
inode_lru_list_add(inode);
/* Called with inode->i_lock which ensures memory ordering. */
inode_wake_up_bit(inode, __I_SYNC);
}
@ -1493,7 +1501,7 @@ static int move_expired_inodes(struct list_head *delaying_queue,
spin_lock(&inode->i_lock);
list_move(&inode->i_io_list, &tmp);
moved++;
inode->i_state |= I_SYNC_QUEUED;
inode_state_set(inode, I_SYNC_QUEUED);
spin_unlock(&inode->i_lock);
if (sb_is_blkdev_sb(inode->i_sb))
continue;
@ -1579,14 +1587,14 @@ void inode_wait_for_writeback(struct inode *inode)
assert_spin_locked(&inode->i_lock);
if (!(inode->i_state & I_SYNC))
if (!(inode_state_read(inode) & I_SYNC))
return;
wq_head = inode_bit_waitqueue(&wqe, inode, __I_SYNC);
for (;;) {
prepare_to_wait_event(wq_head, &wqe.wq_entry, TASK_UNINTERRUPTIBLE);
/* Checking I_SYNC with inode->i_lock guarantees memory ordering. */
if (!(inode->i_state & I_SYNC))
if (!(inode_state_read(inode) & I_SYNC))
break;
spin_unlock(&inode->i_lock);
schedule();
@ -1612,7 +1620,7 @@ static void inode_sleep_on_writeback(struct inode *inode)
wq_head = inode_bit_waitqueue(&wqe, inode, __I_SYNC);
prepare_to_wait_event(wq_head, &wqe.wq_entry, TASK_UNINTERRUPTIBLE);
/* Checking I_SYNC with inode->i_lock guarantees memory ordering. */
sleep = !!(inode->i_state & I_SYNC);
sleep = !!(inode_state_read(inode) & I_SYNC);
spin_unlock(&inode->i_lock);
if (sleep)
schedule();
@ -1631,7 +1639,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
struct writeback_control *wbc,
unsigned long dirtied_before)
{
if (inode->i_state & I_FREEING)
if (inode_state_read(inode) & I_FREEING)
return;
/*
@ -1639,7 +1647,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
* shot. If still dirty, it will be redirty_tail()'ed below. Update
* the dirty time to prevent enqueue and sync it again.
*/
if ((inode->i_state & I_DIRTY) &&
if ((inode_state_read(inode) & I_DIRTY) &&
(wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
inode->dirtied_when = jiffies;
@ -1650,7 +1658,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
* is odd for clean inodes, it can happen for some
* filesystems so handle that gracefully.
*/
if (inode->i_state & I_DIRTY_ALL)
if (inode_state_read(inode) & I_DIRTY_ALL)
redirty_tail_locked(inode, wb);
else
inode_cgwb_move_to_attached(inode, wb);
@ -1676,17 +1684,17 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
*/
redirty_tail_locked(inode, wb);
}
} else if (inode->i_state & I_DIRTY) {
} else if (inode_state_read(inode) & I_DIRTY) {
/*
* Filesystems can dirty the inode during writeback operations,
* such as delayed allocation during submission or metadata
* updates after data IO completion.
*/
redirty_tail_locked(inode, wb);
} else if (inode->i_state & I_DIRTY_TIME) {
} else if (inode_state_read(inode) & I_DIRTY_TIME) {
inode->dirtied_when = jiffies;
inode_io_list_move_locked(inode, wb, &wb->b_dirty_time);
inode->i_state &= ~I_SYNC_QUEUED;
inode_state_clear(inode, I_SYNC_QUEUED);
} else {
/* The inode is clean. Remove from writeback lists. */
inode_cgwb_move_to_attached(inode, wb);
@ -1712,7 +1720,7 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
unsigned dirty;
int ret;
WARN_ON(!(inode->i_state & I_SYNC));
WARN_ON(!(inode_state_read_once(inode) & I_SYNC));
trace_writeback_single_inode_start(inode, wbc, nr_to_write);
@ -1736,7 +1744,7 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
* mark_inode_dirty_sync() to notify the filesystem about it and to
* change I_DIRTY_TIME into I_DIRTY_SYNC.
*/
if ((inode->i_state & I_DIRTY_TIME) &&
if ((inode_state_read_once(inode) & I_DIRTY_TIME) &&
(wbc->sync_mode == WB_SYNC_ALL ||
time_after(jiffies, inode->dirtied_time_when +
dirtytime_expire_interval * HZ))) {
@ -1751,8 +1759,8 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
* after handling timestamp expiration, as that may dirty the inode too.
*/
spin_lock(&inode->i_lock);
dirty = inode->i_state & I_DIRTY;
inode->i_state &= ~dirty;
dirty = inode_state_read(inode) & I_DIRTY;
inode_state_clear(inode, dirty);
/*
* Paired with smp_mb() in __mark_inode_dirty(). This allows
@ -1768,10 +1776,10 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
smp_mb();
if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
inode->i_state |= I_DIRTY_PAGES;
else if (unlikely(inode->i_state & I_PINNING_NETFS_WB)) {
if (!(inode->i_state & I_DIRTY_PAGES)) {
inode->i_state &= ~I_PINNING_NETFS_WB;
inode_state_set(inode, I_DIRTY_PAGES);
else if (unlikely(inode_state_read(inode) & I_PINNING_NETFS_WB)) {
if (!(inode_state_read(inode) & I_DIRTY_PAGES)) {
inode_state_clear(inode, I_PINNING_NETFS_WB);
wbc->unpinned_netfs_wb = true;
dirty |= I_PINNING_NETFS_WB; /* Cause write_inode */
}
@ -1807,11 +1815,11 @@ static int writeback_single_inode(struct inode *inode,
spin_lock(&inode->i_lock);
if (!icount_read(inode))
WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
WARN_ON(!(inode_state_read(inode) & (I_WILL_FREE | I_FREEING)));
else
WARN_ON(inode->i_state & I_WILL_FREE);
WARN_ON(inode_state_read(inode) & I_WILL_FREE);
if (inode->i_state & I_SYNC) {
if (inode_state_read(inode) & I_SYNC) {
/*
* Writeback is already running on the inode. For WB_SYNC_NONE,
* that's enough and we can just return. For WB_SYNC_ALL, we
@ -1822,7 +1830,7 @@ static int writeback_single_inode(struct inode *inode,
goto out;
inode_wait_for_writeback(inode);
}
WARN_ON(inode->i_state & I_SYNC);
WARN_ON(inode_state_read(inode) & I_SYNC);
/*
* If the inode is already fully clean, then there's nothing to do.
*
@ -1830,11 +1838,11 @@ static int writeback_single_inode(struct inode *inode,
* still under writeback, e.g. due to prior WB_SYNC_NONE writeback. If
* there are any such pages, we'll need to wait for them.
*/
if (!(inode->i_state & I_DIRTY_ALL) &&
if (!(inode_state_read(inode) & I_DIRTY_ALL) &&
(wbc->sync_mode != WB_SYNC_ALL ||
!mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)))
goto out;
inode->i_state |= I_SYNC;
inode_state_set(inode, I_SYNC);
wbc_attach_and_unlock_inode(wbc, inode);
ret = __writeback_single_inode(inode, wbc);
@ -1847,18 +1855,18 @@ static int writeback_single_inode(struct inode *inode,
* If the inode is freeing, its i_io_list shoudn't be updated
* as it can be finally deleted at this moment.
*/
if (!(inode->i_state & I_FREEING)) {
if (!(inode_state_read(inode) & I_FREEING)) {
/*
* If the inode is now fully clean, then it can be safely
* removed from its writeback list (if any). Otherwise the
* flusher threads are responsible for the writeback lists.
*/
if (!(inode->i_state & I_DIRTY_ALL))
if (!(inode_state_read(inode) & I_DIRTY_ALL))
inode_cgwb_move_to_attached(inode, wb);
else if (!(inode->i_state & I_SYNC_QUEUED)) {
if ((inode->i_state & I_DIRTY))
else if (!(inode_state_read(inode) & I_SYNC_QUEUED)) {
if ((inode_state_read(inode) & I_DIRTY))
redirty_tail_locked(inode, wb);
else if (inode->i_state & I_DIRTY_TIME) {
else if (inode_state_read(inode) & I_DIRTY_TIME) {
inode->dirtied_when = jiffies;
inode_io_list_move_locked(inode,
wb,
@ -1967,12 +1975,12 @@ static long writeback_sb_inodes(struct super_block *sb,
* kind writeout is handled by the freer.
*/
spin_lock(&inode->i_lock);
if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
if (inode_state_read(inode) & (I_NEW | I_FREEING | I_WILL_FREE)) {
redirty_tail_locked(inode, wb);
spin_unlock(&inode->i_lock);
continue;
}
if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
if ((inode_state_read(inode) & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
/*
* If this inode is locked for writeback and we are not
* doing writeback-for-data-integrity, move it to
@ -1994,14 +2002,14 @@ static long writeback_sb_inodes(struct super_block *sb,
* are doing WB_SYNC_NONE writeback. So this catches only the
* WB_SYNC_ALL case.
*/
if (inode->i_state & I_SYNC) {
if (inode_state_read(inode) & I_SYNC) {
/* Wait for I_SYNC. This function drops i_lock... */
inode_sleep_on_writeback(inode);
/* Inode may be gone, start again */
spin_lock(&wb->list_lock);
continue;
}
inode->i_state |= I_SYNC;
inode_state_set(inode, I_SYNC);
wbc_attach_and_unlock_inode(&wbc, inode);
write_chunk = writeback_chunk_size(wb, work);
@ -2039,7 +2047,7 @@ static long writeback_sb_inodes(struct super_block *sb,
*/
tmp_wb = inode_to_wb_and_lock_list(inode);
spin_lock(&inode->i_lock);
if (!(inode->i_state & I_DIRTY_ALL))
if (!(inode_state_read(inode) & I_DIRTY_ALL))
total_wrote++;
requeue_inode(inode, tmp_wb, &wbc, dirtied_before);
inode_sync_complete(inode);
@ -2545,10 +2553,10 @@ void __mark_inode_dirty(struct inode *inode, int flags)
* We tell ->dirty_inode callback that timestamps need to
* be updated by setting I_DIRTY_TIME in flags.
*/
if (inode->i_state & I_DIRTY_TIME) {
if (inode_state_read_once(inode) & I_DIRTY_TIME) {
spin_lock(&inode->i_lock);
if (inode->i_state & I_DIRTY_TIME) {
inode->i_state &= ~I_DIRTY_TIME;
if (inode_state_read(inode) & I_DIRTY_TIME) {
inode_state_clear(inode, I_DIRTY_TIME);
flags |= I_DIRTY_TIME;
}
spin_unlock(&inode->i_lock);
@ -2585,16 +2593,16 @@ void __mark_inode_dirty(struct inode *inode, int flags)
*/
smp_mb();
if ((inode->i_state & flags) == flags)
if ((inode_state_read_once(inode) & flags) == flags)
return;
spin_lock(&inode->i_lock);
if ((inode->i_state & flags) != flags) {
const int was_dirty = inode->i_state & I_DIRTY;
if ((inode_state_read(inode) & flags) != flags) {
const int was_dirty = inode_state_read(inode) & I_DIRTY;
inode_attach_wb(inode, NULL);
inode->i_state |= flags;
inode_state_set(inode, flags);
/*
* Grab inode's wb early because it requires dropping i_lock and we
@ -2613,7 +2621,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
* the inode it will place it on the appropriate superblock
* list, based upon its state.
*/
if (inode->i_state & I_SYNC_QUEUED)
if (inode_state_read(inode) & I_SYNC_QUEUED)
goto out_unlock;
/*
@ -2624,7 +2632,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
if (inode_unhashed(inode))
goto out_unlock;
}
if (inode->i_state & I_FREEING)
if (inode_state_read(inode) & I_FREEING)
goto out_unlock;
/*
@ -2639,7 +2647,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
if (dirtytime)
inode->dirtied_time_when = jiffies;
if (inode->i_state & I_DIRTY)
if (inode_state_read(inode) & I_DIRTY)
dirty_list = &wb->b_dirty;
else
dirty_list = &wb->b_dirty_time;
@ -2736,7 +2744,7 @@ static void wait_sb_inodes(struct super_block *sb)
spin_unlock_irq(&sb->s_inode_wblist_lock);
spin_lock(&inode->i_lock);
if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) {
if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE | I_NEW)) {
spin_unlock(&inode->i_lock);
spin_lock_irq(&sb->s_inode_wblist_lock);

View File

@ -160,7 +160,7 @@ static void fuse_evict_inode(struct inode *inode)
struct fuse_inode *fi = get_fuse_inode(inode);
/* Will write inode on close/munmap and in all other dirtiers */
WARN_ON(inode->i_state & I_DIRTY_INODE);
WARN_ON(inode_state_read_once(inode) & I_DIRTY_INODE);
if (FUSE_IS_DAX(inode))
dax_break_layout_final(inode);
@ -505,7 +505,7 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
if (!inode)
return NULL;
if ((inode->i_state & I_NEW)) {
if ((inode_state_read_once(inode) & I_NEW)) {
inode->i_flags |= S_NOATIME;
if (!fc->writeback_cache || !S_ISREG(attr->mode))
inode->i_flags |= S_NOCMTIME;

View File

@ -744,7 +744,7 @@ static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
{
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
int sync_state = inode->i_state & I_DIRTY;
int sync_state = inode_state_read_once(inode) & I_DIRTY;
struct gfs2_inode *ip = GFS2_I(inode);
int ret = 0, ret1 = 0;

View File

@ -957,7 +957,7 @@ static struct gfs2_inode *gfs2_grab_existing_inode(struct gfs2_glock *gl)
ip = NULL;
spin_unlock(&gl->gl_lockref.lock);
if (ip) {
wait_on_inode(&ip->i_inode);
wait_on_new_inode(&ip->i_inode);
if (is_bad_inode(&ip->i_inode)) {
iput(&ip->i_inode);
ip = NULL;

View File

@ -394,7 +394,7 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
u16 height, depth;
umode_t mode = be32_to_cpu(str->di_mode);
struct inode *inode = &ip->i_inode;
bool is_new = inode->i_state & I_NEW;
bool is_new = inode_state_read_once(inode) & I_NEW;
if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr))) {
gfs2_consist_inode(ip);

View File

@ -127,7 +127,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
ip = GFS2_I(inode);
if (inode->i_state & I_NEW) {
if (inode_state_read_once(inode) & I_NEW) {
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct gfs2_glock *io_gl;
int extra_flags = 0;
@ -924,7 +924,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
gfs2_dir_no_add(&da);
gfs2_glock_dq_uninit(&d_gh);
if (!IS_ERR_OR_NULL(inode)) {
if (inode->i_state & I_NEW)
if (inode_state_read_once(inode) & I_NEW)
iget_failed(inode);
else
iput(inode);

View File

@ -1751,7 +1751,7 @@ static void gfs2_evict_inodes(struct super_block *sb)
spin_lock(&sb->s_inode_list_lock);
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
spin_lock(&inode->i_lock);
if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) &&
if ((inode_state_read(inode) & (I_FREEING | I_WILL_FREE | I_NEW)) &&
!need_resched()) {
spin_unlock(&inode->i_lock);
continue;

View File

@ -42,7 +42,7 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
tree->inode = iget_locked(sb, id);
if (!tree->inode)
goto free_tree;
BUG_ON(!(tree->inode->i_state & I_NEW));
BUG_ON(!(inode_state_read_once(tree->inode) & I_NEW));
{
struct hfs_mdb *mdb = HFS_SB(sb)->mdb;
HFS_I(tree->inode)->flags = 0;

View File

@ -412,7 +412,7 @@ struct inode *hfs_iget(struct super_block *sb, struct hfs_cat_key *key, hfs_cat_
return NULL;
}
inode = iget5_locked(sb, cnid, hfs_test_inode, hfs_read_inode, &data);
if (inode && (inode->i_state & I_NEW))
if (inode && (inode_state_read_once(inode) & I_NEW))
unlock_new_inode(inode);
return inode;
}

View File

@ -65,7 +65,7 @@ struct inode *hfsplus_iget(struct super_block *sb, unsigned long ino)
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
if (!(inode_state_read_once(inode) & I_NEW))
return inode;
atomic_set(&HFSPLUS_I(inode)->opencnt, 0);

View File

@ -581,7 +581,7 @@ static struct inode *hostfs_iget(struct super_block *sb, char *name)
if (!inode)
return ERR_PTR(-ENOMEM);
if (inode->i_state & I_NEW) {
if (inode_state_read_once(inode) & I_NEW) {
unlock_new_inode(inode);
} else {
spin_lock(&inode->i_lock);

View File

@ -247,7 +247,7 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, unsigned in
result = ERR_PTR(-ENOMEM);
goto bail1;
}
if (result->i_state & I_NEW) {
if (inode_state_read_once(result) & I_NEW) {
hpfs_init_inode(result);
if (de->directory)
hpfs_read_inode(result);

View File

@ -196,7 +196,7 @@ void hpfs_write_inode(struct inode *i)
parent = iget_locked(i->i_sb, hpfs_inode->i_parent_dir);
if (parent) {
hpfs_inode->i_dirty = 0;
if (parent->i_state & I_NEW) {
if (inode_state_read_once(parent) & I_NEW) {
hpfs_init_inode(parent);
hpfs_read_inode(parent);
unlock_new_inode(parent);

View File

@ -233,7 +233,7 @@ int inode_init_always_gfp(struct super_block *sb, struct inode *inode, gfp_t gfp
inode->i_sb = sb;
inode->i_blkbits = sb->s_blocksize_bits;
inode->i_flags = 0;
inode->i_state = 0;
inode_state_assign_raw(inode, 0);
atomic64_set(&inode->i_sequence, 0);
atomic_set(&inode->i_count, 1);
inode->i_op = &empty_iops;
@ -471,7 +471,7 @@ EXPORT_SYMBOL(set_nlink);
void inc_nlink(struct inode *inode)
{
if (unlikely(inode->i_nlink == 0)) {
WARN_ON(!(inode->i_state & I_LINKABLE));
WARN_ON(!(inode_state_read_once(inode) & I_LINKABLE));
atomic_long_dec(&inode->i_sb->s_remove_count);
}
@ -530,23 +530,6 @@ void ihold(struct inode *inode)
}
EXPORT_SYMBOL(ihold);
static void __inode_add_lru(struct inode *inode, bool rotate)
{
if (inode->i_state & (I_DIRTY_ALL | I_SYNC | I_FREEING | I_WILL_FREE))
return;
if (icount_read(inode))
return;
if (!(inode->i_sb->s_flags & SB_ACTIVE))
return;
if (!mapping_shrinkable(&inode->i_data))
return;
if (list_lru_add_obj(&inode->i_sb->s_inode_lru, &inode->i_lru))
this_cpu_inc(nr_unused);
else if (rotate)
inode->i_state |= I_REFERENCED;
}
struct wait_queue_head *inode_bit_waitqueue(struct wait_bit_queue_entry *wqe,
struct inode *inode, u32 bit)
{
@ -558,18 +541,64 @@ struct wait_queue_head *inode_bit_waitqueue(struct wait_bit_queue_entry *wqe,
}
EXPORT_SYMBOL(inode_bit_waitqueue);
void wait_on_new_inode(struct inode *inode)
{
struct wait_bit_queue_entry wqe;
struct wait_queue_head *wq_head;
spin_lock(&inode->i_lock);
if (!(inode_state_read(inode) & I_NEW)) {
spin_unlock(&inode->i_lock);
return;
}
wq_head = inode_bit_waitqueue(&wqe, inode, __I_NEW);
for (;;) {
prepare_to_wait_event(wq_head, &wqe.wq_entry, TASK_UNINTERRUPTIBLE);
if (!(inode_state_read(inode) & I_NEW))
break;
spin_unlock(&inode->i_lock);
schedule();
spin_lock(&inode->i_lock);
}
finish_wait(wq_head, &wqe.wq_entry);
WARN_ON(inode_state_read(inode) & I_NEW);
spin_unlock(&inode->i_lock);
}
EXPORT_SYMBOL(wait_on_new_inode);
static void __inode_lru_list_add(struct inode *inode, bool rotate)
{
lockdep_assert_held(&inode->i_lock);
if (inode_state_read(inode) & (I_DIRTY_ALL | I_SYNC | I_FREEING | I_WILL_FREE))
return;
if (icount_read(inode))
return;
if (!(inode->i_sb->s_flags & SB_ACTIVE))
return;
if (!mapping_shrinkable(&inode->i_data))
return;
if (list_lru_add_obj(&inode->i_sb->s_inode_lru, &inode->i_lru))
this_cpu_inc(nr_unused);
else if (rotate)
inode_state_set(inode, I_REFERENCED);
}
/*
* Add inode to LRU if needed (inode is unused and clean).
*
* Needs inode->i_lock held.
*/
void inode_add_lru(struct inode *inode)
void inode_lru_list_add(struct inode *inode)
{
__inode_add_lru(inode, false);
__inode_lru_list_add(inode, false);
}
static void inode_lru_list_del(struct inode *inode)
{
if (list_empty(&inode->i_lru))
return;
if (list_lru_del_obj(&inode->i_sb->s_inode_lru, &inode->i_lru))
this_cpu_dec(nr_unused);
}
@ -577,15 +606,15 @@ static void inode_lru_list_del(struct inode *inode)
static void inode_pin_lru_isolating(struct inode *inode)
{
lockdep_assert_held(&inode->i_lock);
WARN_ON(inode->i_state & (I_LRU_ISOLATING | I_FREEING | I_WILL_FREE));
inode->i_state |= I_LRU_ISOLATING;
WARN_ON(inode_state_read(inode) & (I_LRU_ISOLATING | I_FREEING | I_WILL_FREE));
inode_state_set(inode, I_LRU_ISOLATING);
}
static void inode_unpin_lru_isolating(struct inode *inode)
{
spin_lock(&inode->i_lock);
WARN_ON(!(inode->i_state & I_LRU_ISOLATING));
inode->i_state &= ~I_LRU_ISOLATING;
WARN_ON(!(inode_state_read(inode) & I_LRU_ISOLATING));
inode_state_clear(inode, I_LRU_ISOLATING);
/* Called with inode->i_lock which ensures memory ordering. */
inode_wake_up_bit(inode, __I_LRU_ISOLATING);
spin_unlock(&inode->i_lock);
@ -597,7 +626,7 @@ static void inode_wait_for_lru_isolating(struct inode *inode)
struct wait_queue_head *wq_head;
lockdep_assert_held(&inode->i_lock);
if (!(inode->i_state & I_LRU_ISOLATING))
if (!(inode_state_read(inode) & I_LRU_ISOLATING))
return;
wq_head = inode_bit_waitqueue(&wqe, inode, __I_LRU_ISOLATING);
@ -607,14 +636,14 @@ static void inode_wait_for_lru_isolating(struct inode *inode)
* Checking I_LRU_ISOLATING with inode->i_lock guarantees
* memory ordering.
*/
if (!(inode->i_state & I_LRU_ISOLATING))
if (!(inode_state_read(inode) & I_LRU_ISOLATING))
break;
spin_unlock(&inode->i_lock);
schedule();
spin_lock(&inode->i_lock);
}
finish_wait(wq_head, &wqe.wq_entry);
WARN_ON(inode->i_state & I_LRU_ISOLATING);
WARN_ON(inode_state_read(inode) & I_LRU_ISOLATING);
}
/**
@ -761,11 +790,11 @@ void clear_inode(struct inode *inode)
*/
xa_unlock_irq(&inode->i_data.i_pages);
BUG_ON(!list_empty(&inode->i_data.i_private_list));
BUG_ON(!(inode->i_state & I_FREEING));
BUG_ON(inode->i_state & I_CLEAR);
BUG_ON(!(inode_state_read_once(inode) & I_FREEING));
BUG_ON(inode_state_read_once(inode) & I_CLEAR);
BUG_ON(!list_empty(&inode->i_wb_list));
/* don't need i_lock here, no concurrent mods to i_state */
inode->i_state = I_FREEING | I_CLEAR;
inode_state_assign_raw(inode, I_FREEING | I_CLEAR);
}
EXPORT_SYMBOL(clear_inode);
@ -786,12 +815,10 @@ static void evict(struct inode *inode)
{
const struct super_operations *op = inode->i_sb->s_op;
BUG_ON(!(inode->i_state & I_FREEING));
BUG_ON(!(inode_state_read_once(inode) & I_FREEING));
BUG_ON(!list_empty(&inode->i_lru));
if (!list_empty(&inode->i_io_list))
inode_io_list_del(inode);
inode_sb_list_del(inode);
spin_lock(&inode->i_lock);
@ -829,7 +856,7 @@ static void evict(struct inode *inode)
* This also means we don't need any fences for the call below.
*/
inode_wake_up_bit(inode, __I_NEW);
BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
BUG_ON(inode_state_read_once(inode) != (I_FREEING | I_CLEAR));
destroy_inode(inode);
}
@ -879,12 +906,12 @@ void evict_inodes(struct super_block *sb)
spin_unlock(&inode->i_lock);
continue;
}
if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
if (inode_state_read(inode) & (I_NEW | I_FREEING | I_WILL_FREE)) {
spin_unlock(&inode->i_lock);
continue;
}
inode->i_state |= I_FREEING;
inode_state_set(inode, I_FREEING);
inode_lru_list_del(inode);
spin_unlock(&inode->i_lock);
list_add(&inode->i_lru, &dispose);
@ -938,7 +965,7 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
* sync, or the last page cache deletion will requeue them.
*/
if (icount_read(inode) ||
(inode->i_state & ~I_REFERENCED) ||
(inode_state_read(inode) & ~I_REFERENCED) ||
!mapping_shrinkable(&inode->i_data)) {
list_lru_isolate(lru, &inode->i_lru);
spin_unlock(&inode->i_lock);
@ -947,8 +974,8 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
}
/* Recently referenced inodes get one more pass */
if (inode->i_state & I_REFERENCED) {
inode->i_state &= ~I_REFERENCED;
if (inode_state_read(inode) & I_REFERENCED) {
inode_state_clear(inode, I_REFERENCED);
spin_unlock(&inode->i_lock);
return LRU_ROTATE;
}
@ -975,8 +1002,8 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
return LRU_RETRY;
}
WARN_ON(inode->i_state & I_NEW);
inode->i_state |= I_FREEING;
WARN_ON(inode_state_read(inode) & I_NEW);
inode_state_set(inode, I_FREEING);
list_lru_isolate_move(lru, &inode->i_lru, freeable);
spin_unlock(&inode->i_lock);
@ -1008,7 +1035,8 @@ static void __wait_on_freeing_inode(struct inode *inode, bool is_inode_hash_lock
static struct inode *find_inode(struct super_block *sb,
struct hlist_head *head,
int (*test)(struct inode *, void *),
void *data, bool is_inode_hash_locked)
void *data, bool is_inode_hash_locked,
bool *isnew)
{
struct inode *inode = NULL;
@ -1025,16 +1053,17 @@ static struct inode *find_inode(struct super_block *sb,
if (!test(inode, data))
continue;
spin_lock(&inode->i_lock);
if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE)) {
__wait_on_freeing_inode(inode, is_inode_hash_locked);
goto repeat;
}
if (unlikely(inode->i_state & I_CREATING)) {
if (unlikely(inode_state_read(inode) & I_CREATING)) {
spin_unlock(&inode->i_lock);
rcu_read_unlock();
return ERR_PTR(-ESTALE);
}
__iget(inode);
*isnew = !!(inode_state_read(inode) & I_NEW);
spin_unlock(&inode->i_lock);
rcu_read_unlock();
return inode;
@ -1049,7 +1078,7 @@ static struct inode *find_inode(struct super_block *sb,
*/
static struct inode *find_inode_fast(struct super_block *sb,
struct hlist_head *head, unsigned long ino,
bool is_inode_hash_locked)
bool is_inode_hash_locked, bool *isnew)
{
struct inode *inode = NULL;
@ -1066,16 +1095,17 @@ static struct inode *find_inode_fast(struct super_block *sb,
if (inode->i_sb != sb)
continue;
spin_lock(&inode->i_lock);
if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE)) {
__wait_on_freeing_inode(inode, is_inode_hash_locked);
goto repeat;
}
if (unlikely(inode->i_state & I_CREATING)) {
if (unlikely(inode_state_read(inode) & I_CREATING)) {
spin_unlock(&inode->i_lock);
rcu_read_unlock();
return ERR_PTR(-ESTALE);
}
__iget(inode);
*isnew = !!(inode_state_read(inode) & I_NEW);
spin_unlock(&inode->i_lock);
rcu_read_unlock();
return inode;
@ -1180,14 +1210,8 @@ void unlock_new_inode(struct inode *inode)
{
lockdep_annotate_inode_mutex_key(inode);
spin_lock(&inode->i_lock);
WARN_ON(!(inode->i_state & I_NEW));
inode->i_state &= ~I_NEW & ~I_CREATING;
/*
* Pairs with the barrier in prepare_to_wait_event() to make sure
* ___wait_var_event() either sees the bit cleared or
* waitqueue_active() check in wake_up_var() sees the waiter.
*/
smp_mb();
WARN_ON(!(inode_state_read(inode) & I_NEW));
inode_state_clear(inode, I_NEW | I_CREATING);
inode_wake_up_bit(inode, __I_NEW);
spin_unlock(&inode->i_lock);
}
@ -1197,14 +1221,8 @@ void discard_new_inode(struct inode *inode)
{
lockdep_annotate_inode_mutex_key(inode);
spin_lock(&inode->i_lock);
WARN_ON(!(inode->i_state & I_NEW));
inode->i_state &= ~I_NEW;
/*
* Pairs with the barrier in prepare_to_wait_event() to make sure
* ___wait_var_event() either sees the bit cleared or
* waitqueue_active() check in wake_up_var() sees the waiter.
*/
smp_mb();
WARN_ON(!(inode_state_read(inode) & I_NEW));
inode_state_clear(inode, I_NEW);
inode_wake_up_bit(inode, __I_NEW);
spin_unlock(&inode->i_lock);
iput(inode);
@ -1260,6 +1278,7 @@ EXPORT_SYMBOL(unlock_two_nondirectories);
* @test: callback used for comparisons between inodes
* @set: callback used to initialize a new struct inode
* @data: opaque data pointer to pass to @test and @set
* @isnew: pointer to a bool which will indicate whether I_NEW is set
*
* Search for the inode specified by @hashval and @data in the inode cache,
* and if present return it with an increased reference count. This is a
@ -1278,12 +1297,13 @@ struct inode *inode_insert5(struct inode *inode, unsigned long hashval,
{
struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval);
struct inode *old;
bool isnew;
might_sleep();
again:
spin_lock(&inode_hash_lock);
old = find_inode(inode->i_sb, head, test, data, true);
old = find_inode(inode->i_sb, head, test, data, true, &isnew);
if (unlikely(old)) {
/*
* Uhhuh, somebody else created the same inode under us.
@ -1292,7 +1312,8 @@ struct inode *inode_insert5(struct inode *inode, unsigned long hashval,
spin_unlock(&inode_hash_lock);
if (IS_ERR(old))
return NULL;
wait_on_inode(old);
if (unlikely(isnew))
wait_on_new_inode(old);
if (unlikely(inode_unhashed(old))) {
iput(old);
goto again;
@ -1310,7 +1331,7 @@ struct inode *inode_insert5(struct inode *inode, unsigned long hashval,
* caller is responsible for filling in the contents
*/
spin_lock(&inode->i_lock);
inode->i_state |= I_NEW;
inode_state_set(inode, I_NEW);
hlist_add_head_rcu(&inode->i_hash, head);
spin_unlock(&inode->i_lock);
@ -1383,15 +1404,17 @@ struct inode *iget5_locked_rcu(struct super_block *sb, unsigned long hashval,
{
struct hlist_head *head = inode_hashtable + hash(sb, hashval);
struct inode *inode, *new;
bool isnew;
might_sleep();
again:
inode = find_inode(sb, head, test, data, false);
inode = find_inode(sb, head, test, data, false, &isnew);
if (inode) {
if (IS_ERR(inode))
return NULL;
wait_on_inode(inode);
if (unlikely(isnew))
wait_on_new_inode(inode);
if (unlikely(inode_unhashed(inode))) {
iput(inode);
goto again;
@ -1426,15 +1449,17 @@ struct inode *iget_locked(struct super_block *sb, unsigned long ino)
{
struct hlist_head *head = inode_hashtable + hash(sb, ino);
struct inode *inode;
bool isnew;
might_sleep();
again:
inode = find_inode_fast(sb, head, ino, false);
inode = find_inode_fast(sb, head, ino, false, &isnew);
if (inode) {
if (IS_ERR(inode))
return NULL;
wait_on_inode(inode);
if (unlikely(isnew))
wait_on_new_inode(inode);
if (unlikely(inode_unhashed(inode))) {
iput(inode);
goto again;
@ -1448,11 +1473,11 @@ struct inode *iget_locked(struct super_block *sb, unsigned long ino)
spin_lock(&inode_hash_lock);
/* We released the lock, so.. */
old = find_inode_fast(sb, head, ino, true);
old = find_inode_fast(sb, head, ino, true, &isnew);
if (!old) {
inode->i_ino = ino;
spin_lock(&inode->i_lock);
inode->i_state = I_NEW;
inode_state_assign(inode, I_NEW);
hlist_add_head_rcu(&inode->i_hash, head);
spin_unlock(&inode->i_lock);
spin_unlock(&inode_hash_lock);
@ -1474,7 +1499,8 @@ struct inode *iget_locked(struct super_block *sb, unsigned long ino)
if (IS_ERR(old))
return NULL;
inode = old;
wait_on_inode(inode);
if (unlikely(isnew))
wait_on_new_inode(inode);
if (unlikely(inode_unhashed(inode))) {
iput(inode);
goto again;
@ -1545,7 +1571,7 @@ EXPORT_SYMBOL(iunique);
struct inode *igrab(struct inode *inode)
{
spin_lock(&inode->i_lock);
if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) {
if (!(inode_state_read(inode) & (I_FREEING | I_WILL_FREE))) {
__iget(inode);
spin_unlock(&inode->i_lock);
} else {
@ -1578,13 +1604,13 @@ EXPORT_SYMBOL(igrab);
* Note2: @test is called with the inode_hash_lock held, so can't sleep.
*/
struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
int (*test)(struct inode *, void *), void *data)
int (*test)(struct inode *, void *), void *data, bool *isnew)
{
struct hlist_head *head = inode_hashtable + hash(sb, hashval);
struct inode *inode;
spin_lock(&inode_hash_lock);
inode = find_inode(sb, head, test, data, true);
inode = find_inode(sb, head, test, data, true, isnew);
spin_unlock(&inode_hash_lock);
return IS_ERR(inode) ? NULL : inode;
@ -1612,13 +1638,15 @@ struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
int (*test)(struct inode *, void *), void *data)
{
struct inode *inode;
bool isnew;
might_sleep();
again:
inode = ilookup5_nowait(sb, hashval, test, data);
inode = ilookup5_nowait(sb, hashval, test, data, &isnew);
if (inode) {
wait_on_inode(inode);
if (unlikely(isnew))
wait_on_new_inode(inode);
if (unlikely(inode_unhashed(inode))) {
iput(inode);
goto again;
@ -1640,16 +1668,18 @@ struct inode *ilookup(struct super_block *sb, unsigned long ino)
{
struct hlist_head *head = inode_hashtable + hash(sb, ino);
struct inode *inode;
bool isnew;
might_sleep();
again:
inode = find_inode_fast(sb, head, ino, false);
inode = find_inode_fast(sb, head, ino, false, &isnew);
if (inode) {
if (IS_ERR(inode))
return NULL;
wait_on_inode(inode);
if (unlikely(isnew))
wait_on_new_inode(inode);
if (unlikely(inode_unhashed(inode))) {
iput(inode);
goto again;
@ -1741,7 +1771,7 @@ struct inode *find_inode_rcu(struct super_block *sb, unsigned long hashval,
hlist_for_each_entry_rcu(inode, head, i_hash) {
if (inode->i_sb == sb &&
!(READ_ONCE(inode->i_state) & (I_FREEING | I_WILL_FREE)) &&
!(inode_state_read_once(inode) & (I_FREEING | I_WILL_FREE)) &&
test(inode, data))
return inode;
}
@ -1780,7 +1810,7 @@ struct inode *find_inode_by_ino_rcu(struct super_block *sb,
hlist_for_each_entry_rcu(inode, head, i_hash) {
if (inode->i_ino == ino &&
inode->i_sb == sb &&
!(READ_ONCE(inode->i_state) & (I_FREEING | I_WILL_FREE)))
!(inode_state_read_once(inode) & (I_FREEING | I_WILL_FREE)))
return inode;
}
return NULL;
@ -1792,6 +1822,7 @@ int insert_inode_locked(struct inode *inode)
struct super_block *sb = inode->i_sb;
ino_t ino = inode->i_ino;
struct hlist_head *head = inode_hashtable + hash(sb, ino);
bool isnew;
might_sleep();
@ -1804,7 +1835,7 @@ int insert_inode_locked(struct inode *inode)
if (old->i_sb != sb)
continue;
spin_lock(&old->i_lock);
if (old->i_state & (I_FREEING|I_WILL_FREE)) {
if (inode_state_read(old) & (I_FREEING | I_WILL_FREE)) {
spin_unlock(&old->i_lock);
continue;
}
@ -1812,21 +1843,23 @@ int insert_inode_locked(struct inode *inode)
}
if (likely(!old)) {
spin_lock(&inode->i_lock);
inode->i_state |= I_NEW | I_CREATING;
inode_state_set(inode, I_NEW | I_CREATING);
hlist_add_head_rcu(&inode->i_hash, head);
spin_unlock(&inode->i_lock);
spin_unlock(&inode_hash_lock);
return 0;
}
if (unlikely(old->i_state & I_CREATING)) {
if (unlikely(inode_state_read(old) & I_CREATING)) {
spin_unlock(&old->i_lock);
spin_unlock(&inode_hash_lock);
return -EBUSY;
}
__iget(old);
isnew = !!(inode_state_read(old) & I_NEW);
spin_unlock(&old->i_lock);
spin_unlock(&inode_hash_lock);
wait_on_inode(old);
if (isnew)
wait_on_new_inode(old);
if (unlikely(!inode_unhashed(old))) {
iput(old);
return -EBUSY;
@ -1843,7 +1876,7 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
might_sleep();
inode->i_state |= I_CREATING;
inode_state_set_raw(inode, I_CREATING);
old = inode_insert5(inode, hashval, test, NULL, data);
if (old != inode) {
@ -1875,10 +1908,10 @@ static void iput_final(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
const struct super_operations *op = inode->i_sb->s_op;
unsigned long state;
int drop;
WARN_ON(inode->i_state & I_NEW);
WARN_ON(inode_state_read(inode) & I_NEW);
VFS_BUG_ON_INODE(atomic_read(&inode->i_count) != 0, inode);
if (op->drop_inode)
drop = op->drop_inode(inode);
@ -1886,28 +1919,32 @@ static void iput_final(struct inode *inode)
drop = inode_generic_drop(inode);
if (!drop &&
!(inode->i_state & I_DONTCACHE) &&
!(inode_state_read(inode) & I_DONTCACHE) &&
(sb->s_flags & SB_ACTIVE)) {
__inode_add_lru(inode, true);
__inode_lru_list_add(inode, true);
spin_unlock(&inode->i_lock);
return;
}
state = inode->i_state;
if (!drop) {
WRITE_ONCE(inode->i_state, state | I_WILL_FREE);
/*
* Re-check ->i_count in case the ->drop_inode() hooks played games.
* Note we only execute this if the verdict was to drop the inode.
*/
VFS_BUG_ON_INODE(atomic_read(&inode->i_count) != 0, inode);
if (drop) {
inode_state_set(inode, I_FREEING);
} else {
inode_state_set(inode, I_WILL_FREE);
spin_unlock(&inode->i_lock);
write_inode_now(inode, 1);
spin_lock(&inode->i_lock);
state = inode->i_state;
WARN_ON(state & I_NEW);
state &= ~I_WILL_FREE;
WARN_ON(inode_state_read(inode) & I_NEW);
inode_state_replace(inode, I_WILL_FREE, I_FREEING);
}
WRITE_ONCE(inode->i_state, state | I_FREEING);
if (!list_empty(&inode->i_lru))
inode_lru_list_del(inode);
spin_unlock(&inode->i_lock);
@ -1931,7 +1968,7 @@ void iput(struct inode *inode)
retry:
lockdep_assert_not_held(&inode->i_lock);
VFS_BUG_ON_INODE(inode->i_state & I_CLEAR, inode);
VFS_BUG_ON_INODE(inode_state_read_once(inode) & I_CLEAR, inode);
/*
* Note this assert is technically racy as if the count is bogusly
* equal to one, then two CPUs racing to further drop it can both
@ -1942,14 +1979,14 @@ void iput(struct inode *inode)
if (atomic_add_unless(&inode->i_count, -1, 1))
return;
if ((inode->i_state & I_DIRTY_TIME) && inode->i_nlink) {
if ((inode_state_read_once(inode) & I_DIRTY_TIME) && inode->i_nlink) {
trace_writeback_lazytime_iput(inode);
mark_inode_dirty_sync(inode);
goto retry;
}
spin_lock(&inode->i_lock);
if (unlikely((inode->i_state & I_DIRTY_TIME) && inode->i_nlink)) {
if (unlikely((inode_state_read(inode) & I_DIRTY_TIME) && inode->i_nlink)) {
spin_unlock(&inode->i_lock);
goto retry;
}
@ -2962,7 +2999,7 @@ void dump_inode(struct inode *inode, const char *reason)
pr_warn("%s encountered for inode %px\n"
"fs %s mode %ho opflags 0x%hx flags 0x%x state 0x%x count %d\n",
reason, inode, sb->s_type->name, inode->i_mode, inode->i_opflags,
inode->i_flags, inode->i_state, atomic_read(&inode->i_count));
inode->i_flags, inode_state_read_once(inode), atomic_read(&inode->i_count));
}
EXPORT_SYMBOL(dump_inode);

View File

@ -1520,7 +1520,7 @@ struct inode *__isofs_iget(struct super_block *sb,
if (!inode)
return ERR_PTR(-ENOMEM);
if (inode->i_state & I_NEW) {
if (inode_state_read_once(inode) & I_NEW) {
ret = isofs_read_inode(inode, relocated);
if (ret < 0) {
iget_failed(inode);

View File

@ -265,7 +265,7 @@ struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
if (!(inode_state_read_once(inode) & I_NEW))
return inode;
f = JFFS2_INODE_INFO(inode);
@ -373,7 +373,7 @@ void jffs2_dirty_inode(struct inode *inode, int flags)
{
struct iattr iattr;
if (!(inode->i_state & I_DIRTY_DATASYNC)) {
if (!(inode_state_read_once(inode) & I_DIRTY_DATASYNC)) {
jffs2_dbg(2, "%s(): not calling setattr() for ino #%lu\n",
__func__, inode->i_ino);
return;

View File

@ -26,8 +26,8 @@ int jfs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
return rc;
inode_lock(inode);
if (!(inode->i_state & I_DIRTY_ALL) ||
(datasync && !(inode->i_state & I_DIRTY_DATASYNC))) {
if (!(inode_state_read_once(inode) & I_DIRTY_ALL) ||
(datasync && !(inode_state_read_once(inode) & I_DIRTY_DATASYNC))) {
/* Make sure committed changes hit the disk */
jfs_flush_journal(JFS_SBI(inode->i_sb)->log, 1);
inode_unlock(inode);

View File

@ -29,7 +29,7 @@ struct inode *jfs_iget(struct super_block *sb, unsigned long ino)
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
if (!(inode_state_read_once(inode) & I_NEW))
return inode;
ret = diRead(inode);

View File

@ -1287,7 +1287,7 @@ int txCommit(tid_t tid, /* transaction identifier */
* to verify this, only a trivial s/I_LOCK/I_SYNC/ was done.
* Joern
*/
if (tblk->u.ip->i_state & I_SYNC)
if (inode_state_read_once(tblk->u.ip) & I_SYNC)
tblk->xflag &= ~COMMIT_LAZY;
}

View File

@ -251,7 +251,7 @@ struct inode *kernfs_get_inode(struct super_block *sb, struct kernfs_node *kn)
struct inode *inode;
inode = iget_locked(sb, kernfs_ino(kn));
if (inode && (inode->i_state & I_NEW))
if (inode && (inode_state_read_once(inode) & I_NEW))
kernfs_init_inode(kn, inode);
return inode;

View File

@ -1542,9 +1542,9 @@ int __generic_file_fsync(struct file *file, loff_t start, loff_t end,
inode_lock(inode);
ret = sync_mapping_buffers(inode->i_mapping);
if (!(inode->i_state & I_DIRTY_ALL))
if (!(inode_state_read_once(inode) & I_DIRTY_ALL))
goto out;
if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
if (datasync && !(inode_state_read_once(inode) & I_DIRTY_DATASYNC))
goto out;
err = sync_inode_metadata(inode, 1);
@ -1664,7 +1664,7 @@ struct inode *alloc_anon_inode(struct super_block *s)
* list because mark_inode_dirty() will think
* that it already _is_ on the dirty list.
*/
inode->i_state = I_DIRTY;
inode_state_assign_raw(inode, I_DIRTY);
/*
* Historically anonymous inodes don't have a type at all and
* userspace has come to rely on this.

View File

@ -589,7 +589,7 @@ struct inode *minix_iget(struct super_block *sb, unsigned long ino)
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
if (!(inode_state_read_once(inode) & I_NEW))
return inode;
if (INODE_VERSION(inode) == MINIX_V1)

View File

@ -4106,7 +4106,7 @@ int vfs_tmpfile(struct mnt_idmap *idmap,
inode = file_inode(file);
if (!(open_flag & O_EXCL)) {
spin_lock(&inode->i_lock);
inode->i_state |= I_LINKABLE;
inode_state_set(inode, I_LINKABLE);
spin_unlock(&inode->i_lock);
}
security_inode_post_create_tmpfile(idmap, inode);
@ -5001,7 +5001,7 @@ int vfs_link(struct dentry *old_dentry, struct mnt_idmap *idmap,
inode_lock(inode);
/* Make sure we don't allow creating hardlink to an unlinked file */
if (inode->i_nlink == 0 && !(inode->i_state & I_LINKABLE))
if (inode->i_nlink == 0 && !(inode_state_read_once(inode) & I_LINKABLE))
error = -ENOENT;
else if (max_links && inode->i_nlink >= max_links)
error = -EMLINK;
@ -5011,9 +5011,9 @@ int vfs_link(struct dentry *old_dentry, struct mnt_idmap *idmap,
error = dir->i_op->link(old_dentry, dir, new_dentry);
}
if (!error && (inode->i_state & I_LINKABLE)) {
if (!error && (inode_state_read_once(inode) & I_LINKABLE)) {
spin_lock(&inode->i_lock);
inode->i_state &= ~I_LINKABLE;
inode_state_clear(inode, I_LINKABLE);
spin_unlock(&inode->i_lock);
}
inode_unlock(inode);

View File

@ -147,10 +147,10 @@ bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio)
if (!fscache_cookie_valid(cookie))
return true;
if (!(inode->i_state & I_PINNING_NETFS_WB)) {
if (!(inode_state_read_once(inode) & I_PINNING_NETFS_WB)) {
spin_lock(&inode->i_lock);
if (!(inode->i_state & I_PINNING_NETFS_WB)) {
inode->i_state |= I_PINNING_NETFS_WB;
if (!(inode_state_read(inode) & I_PINNING_NETFS_WB)) {
inode_state_set(inode, I_PINNING_NETFS_WB);
need_use = true;
}
spin_unlock(&inode->i_lock);
@ -192,7 +192,7 @@ void netfs_clear_inode_writeback(struct inode *inode, const void *aux)
{
struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode));
if (inode->i_state & I_PINNING_NETFS_WB) {
if (inode_state_read_once(inode) & I_PINNING_NETFS_WB) {
loff_t i_size = i_size_read(inode);
fscache_unuse_cookie(cookie, aux, &i_size);
}

View File

@ -36,12 +36,12 @@ void netfs_single_mark_inode_dirty(struct inode *inode)
mark_inode_dirty(inode);
if (caching && !(inode->i_state & I_PINNING_NETFS_WB)) {
if (caching && !(inode_state_read_once(inode) & I_PINNING_NETFS_WB)) {
bool need_use = false;
spin_lock(&inode->i_lock);
if (!(inode->i_state & I_PINNING_NETFS_WB)) {
inode->i_state |= I_PINNING_NETFS_WB;
if (!(inode_state_read(inode) & I_PINNING_NETFS_WB)) {
inode_state_set(inode, I_PINNING_NETFS_WB);
need_use = true;
}
spin_unlock(&inode->i_lock);

View File

@ -475,7 +475,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
goto out_no_inode;
}
if (inode->i_state & I_NEW) {
if (inode_state_read_once(inode) & I_NEW) {
struct nfs_inode *nfsi = NFS_I(inode);
unsigned long now = jiffies;

View File

@ -317,7 +317,7 @@ pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n");
pnfs_detach_layout_hdr(lo);
/* Notify pnfs_destroy_layout_final() that we're done */
if (inode->i_state & (I_FREEING | I_CLEAR))
if (inode_state_read(inode) & (I_FREEING | I_CLEAR))
wake_up_var_locked(lo, &inode->i_lock);
spin_unlock(&inode->i_lock);
pnfs_free_layout_hdr(lo);

View File

@ -1159,7 +1159,7 @@ static int wait_for_concurrent_writes(struct file *file)
dprintk("nfsd: write resume %d\n", task_pid_nr(current));
}
if (inode->i_state & I_DIRTY) {
if (inode_state_read_once(inode) & I_DIRTY) {
dprintk("nfsd: write sync %d\n", task_pid_nr(current));
err = vfs_fsync(file, 0);
}

View File

@ -1148,7 +1148,7 @@ int nilfs_cpfile_read(struct super_block *sb, size_t cpsize,
cpfile = nilfs_iget_locked(sb, NULL, NILFS_CPFILE_INO);
if (unlikely(!cpfile))
return -ENOMEM;
if (!(cpfile->i_state & I_NEW))
if (!(inode_state_read_once(cpfile) & I_NEW))
goto out;
err = nilfs_mdt_init(cpfile, NILFS_MDT_GFP, 0);

View File

@ -506,7 +506,7 @@ int nilfs_dat_read(struct super_block *sb, size_t entry_size,
dat = nilfs_iget_locked(sb, NULL, NILFS_DAT_INO);
if (unlikely(!dat))
return -ENOMEM;
if (!(dat->i_state & I_NEW))
if (!(inode_state_read_once(dat) & I_NEW))
goto out;
err = nilfs_mdt_init(dat, NILFS_MDT_GFP, sizeof(*di));

View File

@ -188,7 +188,7 @@ int nilfs_ifile_read(struct super_block *sb, struct nilfs_root *root,
ifile = nilfs_iget_locked(sb, root, NILFS_IFILE_INO);
if (unlikely(!ifile))
return -ENOMEM;
if (!(ifile->i_state & I_NEW))
if (!(inode_state_read_once(ifile) & I_NEW))
goto out;
err = nilfs_mdt_init(ifile, NILFS_MDT_GFP,

View File

@ -365,7 +365,7 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
failed_after_creation:
clear_nlink(inode);
if (inode->i_state & I_NEW)
if (inode_state_read_once(inode) & I_NEW)
unlock_new_inode(inode);
iput(inode); /*
* raw_inode will be deleted through
@ -562,7 +562,7 @@ struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
if (unlikely(!inode))
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW)) {
if (!(inode_state_read_once(inode) & I_NEW)) {
if (!inode->i_nlink) {
iput(inode);
return ERR_PTR(-ESTALE);
@ -591,7 +591,7 @@ struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
if (unlikely(!inode))
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
if (!(inode_state_read_once(inode) & I_NEW))
return inode;
err = nilfs_init_gcinode(inode);
@ -631,7 +631,7 @@ int nilfs_attach_btree_node_cache(struct inode *inode)
nilfs_iget_set, &args);
if (unlikely(!btnc_inode))
return -ENOMEM;
if (btnc_inode->i_state & I_NEW) {
if (inode_state_read_once(btnc_inode) & I_NEW) {
nilfs_init_btnc_inode(btnc_inode);
unlock_new_inode(btnc_inode);
}
@ -686,7 +686,7 @@ struct inode *nilfs_iget_for_shadow(struct inode *inode)
nilfs_iget_set, &args);
if (unlikely(!s_inode))
return ERR_PTR(-ENOMEM);
if (!(s_inode->i_state & I_NEW))
if (!(inode_state_read_once(s_inode) & I_NEW))
return inode;
NILFS_I(s_inode)->i_flags = 0;

View File

@ -1226,7 +1226,7 @@ int nilfs_sufile_read(struct super_block *sb, size_t susize,
sufile = nilfs_iget_locked(sb, NULL, NILFS_SUFILE_INO);
if (unlikely(!sufile))
return -ENOMEM;
if (!(sufile->i_state & I_NEW))
if (!(inode_state_read_once(sufile) & I_NEW))
goto out;
err = nilfs_mdt_init(sufile, NILFS_MDT_GFP, sizeof(*sui));

View File

@ -52,7 +52,7 @@ static void fsnotify_unmount_inodes(struct super_block *sb)
* the inode cannot have any associated watches.
*/
spin_lock(&inode->i_lock);
if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) {
if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE | I_NEW)) {
spin_unlock(&inode->i_lock);
continue;
}

View File

@ -537,7 +537,7 @@ struct inode *ntfs_iget5(struct super_block *sb, const struct MFT_REF *ref,
return ERR_PTR(-ENOMEM);
/* If this is a freshly allocated inode, need to read it now. */
if (inode->i_state & I_NEW)
if (inode_state_read_once(inode) & I_NEW)
inode = ntfs_read_mft(inode, name, ref);
else if (ref->seq != ntfs_i(inode)->mi.mrec->seq) {
/*

View File

@ -2487,7 +2487,7 @@ int ocfs2_inode_lock_full_nested(struct inode *inode,
* which hasn't been populated yet, so clear the refresh flag
* and let the caller handle it.
*/
if (inode->i_state & I_NEW) {
if (inode_state_read_once(inode) & I_NEW) {
status = 0;
if (lockres)
ocfs2_complete_lock_res_refresh(lockres, 0);

View File

@ -152,8 +152,8 @@ struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 blkno, unsigned flags,
mlog_errno(PTR_ERR(inode));
goto bail;
}
trace_ocfs2_iget5_locked(inode->i_state);
if (inode->i_state & I_NEW) {
trace_ocfs2_iget5_locked(inode_state_read_once(inode));
if (inode_state_read_once(inode) & I_NEW) {
rc = ocfs2_read_locked_inode(inode, &args);
unlock_new_inode(inode);
}
@ -1290,6 +1290,8 @@ static void ocfs2_clear_inode(struct inode *inode)
void ocfs2_evict_inode(struct inode *inode)
{
write_inode_now(inode, 1);
if (!inode->i_nlink ||
(OCFS2_I(inode)->ip_flags & OCFS2_INODE_MAYBE_ORPHANED)) {
ocfs2_delete_inode(inode);
@ -1299,27 +1301,6 @@ void ocfs2_evict_inode(struct inode *inode)
ocfs2_clear_inode(inode);
}
/* Called under inode_lock, with no more references on the
* struct inode, so it's safe here to check the flags field
* and to manipulate i_nlink without any other locks. */
int ocfs2_drop_inode(struct inode *inode)
{
struct ocfs2_inode_info *oi = OCFS2_I(inode);
trace_ocfs2_drop_inode((unsigned long long)oi->ip_blkno,
inode->i_nlink, oi->ip_flags);
assert_spin_locked(&inode->i_lock);
inode->i_state |= I_WILL_FREE;
spin_unlock(&inode->i_lock);
write_inode_now(inode, 1);
spin_lock(&inode->i_lock);
WARN_ON(inode->i_state & I_NEW);
inode->i_state &= ~I_WILL_FREE;
return 1;
}
/*
* This is called from our getattr.
*/

View File

@ -116,7 +116,6 @@ static inline struct ocfs2_caching_info *INODE_CACHE(struct inode *inode)
}
void ocfs2_evict_inode(struct inode *inode);
int ocfs2_drop_inode(struct inode *inode);
/* Flags for ocfs2_iget() */
#define OCFS2_FI_FLAG_SYSFILE 0x1

View File

@ -1569,8 +1569,6 @@ DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_delete_inode);
DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_clear_inode);
DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_drop_inode);
TRACE_EVENT(ocfs2_inode_revalidate,
TP_PROTO(void *inode, unsigned long long ino,
unsigned int flags),

View File

@ -129,7 +129,7 @@ static const struct super_operations ocfs2_sops = {
.statfs = ocfs2_statfs,
.alloc_inode = ocfs2_alloc_inode,
.free_inode = ocfs2_free_inode,
.drop_inode = ocfs2_drop_inode,
.drop_inode = inode_just_drop,
.evict_inode = ocfs2_evict_inode,
.sync_fs = ocfs2_sync_fs,
.put_super = ocfs2_put_super,

View File

@ -212,7 +212,7 @@ struct inode *omfs_iget(struct super_block *sb, ino_t ino)
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
if (!(inode_state_read_once(inode) & I_NEW))
return inode;
bh = omfs_bread(inode->i_sb, ino);

View File

@ -236,7 +236,7 @@ static struct dentry *openpromfs_lookup(struct inode *dir, struct dentry *dentry
mutex_unlock(&op_mutex);
if (IS_ERR(inode))
return ERR_CAST(inode);
if (inode->i_state & I_NEW) {
if (inode_state_read_once(inode) & I_NEW) {
simple_inode_init_ts(inode);
ent_oi = OP_I(inode);
ent_oi->type = ent_type;

View File

@ -1043,7 +1043,7 @@ struct inode *orangefs_iget(struct super_block *sb,
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
if (!(inode_state_read_once(inode) & I_NEW))
return inode;
error = orangefs_inode_getattr(inode, ORANGEFS_GETATTR_NEW);

View File

@ -247,7 +247,7 @@ int orangefs_inode_getattr(struct inode *inode, int flags)
spin_lock(&inode->i_lock);
/* Must have all the attributes in the mask and be within cache time. */
if ((!flags && time_before(jiffies, orangefs_inode->getattr_time)) ||
orangefs_inode->attr_valid || inode->i_state & I_DIRTY_PAGES) {
orangefs_inode->attr_valid || inode_state_read(inode) & I_DIRTY_PAGES) {
if (orangefs_inode->attr_valid) {
spin_unlock(&inode->i_lock);
write_inode_now(inode, 1);
@ -281,13 +281,13 @@ int orangefs_inode_getattr(struct inode *inode, int flags)
spin_lock(&inode->i_lock);
/* Must have all the attributes in the mask and be within cache time. */
if ((!flags && time_before(jiffies, orangefs_inode->getattr_time)) ||
orangefs_inode->attr_valid || inode->i_state & I_DIRTY_PAGES) {
orangefs_inode->attr_valid || inode_state_read(inode) & I_DIRTY_PAGES) {
if (orangefs_inode->attr_valid) {
spin_unlock(&inode->i_lock);
write_inode_now(inode, 1);
goto again2;
}
if (inode->i_state & I_DIRTY_PAGES) {
if (inode_state_read(inode) & I_DIRTY_PAGES) {
ret = 0;
goto out_unlock;
}

View File

@ -686,7 +686,7 @@ static int ovl_create_object(struct dentry *dentry, int mode, dev_t rdev,
goto out_drop_write;
spin_lock(&inode->i_lock);
inode->i_state |= I_CREATING;
inode_state_set(inode, I_CREATING);
spin_unlock(&inode->i_lock);
inode_init_owner(&nop_mnt_idmap, inode, dentry->d_parent->d_inode, mode);

View File

@ -1152,7 +1152,7 @@ struct inode *ovl_get_trap_inode(struct super_block *sb, struct dentry *dir)
if (!trap)
return ERR_PTR(-ENOMEM);
if (!(trap->i_state & I_NEW)) {
if (!(inode_state_read_once(trap) & I_NEW)) {
/* Conflicting layer roots? */
iput(trap);
return ERR_PTR(-ELOOP);
@ -1243,7 +1243,7 @@ struct inode *ovl_get_inode(struct super_block *sb,
inode = ovl_iget5(sb, oip->newinode, key);
if (!inode)
goto out_err;
if (!(inode->i_state & I_NEW)) {
if (!(inode_state_read_once(inode) & I_NEW)) {
/*
* Verify that the underlying files stored in the inode
* match those in the dentry.
@ -1303,7 +1303,7 @@ struct inode *ovl_get_inode(struct super_block *sb,
if (upperdentry)
ovl_check_protattr(inode, upperdentry);
if (inode->i_state & I_NEW)
if (inode_state_read_once(inode) & I_NEW)
unlock_new_inode(inode);
out:
return inode;

View File

@ -1019,8 +1019,8 @@ bool ovl_inuse_trylock(struct dentry *dentry)
bool locked = false;
spin_lock(&inode->i_lock);
if (!(inode->i_state & I_OVL_INUSE)) {
inode->i_state |= I_OVL_INUSE;
if (!(inode_state_read(inode) & I_OVL_INUSE)) {
inode_state_set(inode, I_OVL_INUSE);
locked = true;
}
spin_unlock(&inode->i_lock);
@ -1034,8 +1034,8 @@ void ovl_inuse_unlock(struct dentry *dentry)
struct inode *inode = d_inode(dentry);
spin_lock(&inode->i_lock);
WARN_ON(!(inode->i_state & I_OVL_INUSE));
inode->i_state &= ~I_OVL_INUSE;
WARN_ON(!(inode_state_read(inode) & I_OVL_INUSE));
inode_state_clear(inode, I_OVL_INUSE);
spin_unlock(&inode->i_lock);
}
}
@ -1046,7 +1046,7 @@ bool ovl_is_inuse(struct dentry *dentry)
bool inuse;
spin_lock(&inode->i_lock);
inuse = (inode->i_state & I_OVL_INUSE);
inuse = (inode_state_read(inode) & I_OVL_INUSE);
spin_unlock(&inode->i_lock);
return inuse;

View File

@ -908,7 +908,7 @@ static struct inode * get_pipe_inode(void)
* list because "mark_inode_dirty()" will think
* that it already _is_ on the dirty list.
*/
inode->i_state = I_DIRTY;
inode_state_assign_raw(inode, I_DIRTY);
inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();

View File

@ -290,7 +290,7 @@ struct inode *qnx4_iget(struct super_block *sb, unsigned long ino)
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
if (!(inode_state_read_once(inode) & I_NEW))
return inode;
qnx4_inode = qnx4_raw_inode(inode);

View File

@ -521,7 +521,7 @@ struct inode *qnx6_iget(struct super_block *sb, unsigned ino)
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
if (!(inode_state_read_once(inode) & I_NEW))
return inode;
ei = QNX6_I(inode);

View File

@ -1033,7 +1033,7 @@ static int add_dquot_ref(struct super_block *sb, int type)
spin_lock(&sb->s_inode_list_lock);
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
spin_lock(&inode->i_lock);
if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
if ((inode_state_read(inode) & (I_FREEING | I_WILL_FREE | I_NEW)) ||
!atomic_read(&inode->i_writecount) ||
!dqinit_needed(inode, type)) {
spin_unlock(&inode->i_lock);

View File

@ -302,7 +302,7 @@ static struct inode *romfs_iget(struct super_block *sb, unsigned long pos)
if (!i)
return ERR_PTR(-ENOMEM);
if (!(i->i_state & I_NEW))
if (!(inode_state_read_once(i) & I_NEW))
return i;
/* precalculate the data offset */

View File

@ -500,7 +500,7 @@ cifs_evict_inode(struct inode *inode)
{
netfs_wait_for_outstanding_io(inode);
truncate_inode_pages_final(&inode->i_data);
if (inode->i_state & I_PINNING_NETFS_WB)
if (inode_state_read_once(inode) & I_PINNING_NETFS_WB)
cifs_fscache_unuse_inode_cookie(inode, true);
cifs_fscache_release_inode_cookie(inode);
clear_inode(inode);

View File

@ -101,7 +101,7 @@ cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr)
cifs_dbg(FYI, "%s: revalidating inode %llu\n",
__func__, cifs_i->uniqueid);
if (inode->i_state & I_NEW) {
if (inode_state_read_once(inode) & I_NEW) {
cifs_dbg(FYI, "%s: inode %llu is new\n",
__func__, cifs_i->uniqueid);
return;
@ -146,7 +146,7 @@ cifs_nlink_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
*/
if (fattr->cf_flags & CIFS_FATTR_UNKNOWN_NLINK) {
/* only provide fake values on a new inode */
if (inode->i_state & I_NEW) {
if (inode_state_read_once(inode) & I_NEW) {
if (fattr->cf_cifsattrs & ATTR_DIRECTORY)
set_nlink(inode, 2);
else
@ -167,12 +167,12 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr,
struct cifsInodeInfo *cifs_i = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
if (!(inode->i_state & I_NEW) &&
if (!(inode_state_read_once(inode) & I_NEW) &&
unlikely(inode_wrong_type(inode, fattr->cf_mode))) {
CIFS_I(inode)->time = 0; /* force reval */
return -ESTALE;
}
if (inode->i_state & I_NEW)
if (inode_state_read_once(inode) & I_NEW)
CIFS_I(inode)->netfs.zero_point = fattr->cf_eof;
cifs_revalidate_cache(inode, fattr);
@ -194,7 +194,7 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr,
inode->i_gid = fattr->cf_gid;
/* if dynperm is set, don't clobber existing mode */
if (inode->i_state & I_NEW ||
if (inode_state_read(inode) & I_NEW ||
!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM))
inode->i_mode = fattr->cf_mode;
@ -236,7 +236,7 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr,
if (fattr->cf_flags & CIFS_FATTR_JUNCTION)
inode->i_flags |= S_AUTOMOUNT;
if (inode->i_state & I_NEW) {
if (inode_state_read_once(inode) & I_NEW) {
cifs_set_netfs_context(inode);
cifs_set_ops(inode);
}
@ -1638,7 +1638,7 @@ cifs_iget(struct super_block *sb, struct cifs_fattr *fattr)
cifs_fattr_to_inode(inode, fattr, false);
if (sb->s_flags & SB_NOATIME)
inode->i_flags |= S_NOATIME | S_NOCMTIME;
if (inode->i_state & I_NEW) {
if (inode_state_read_once(inode) & I_NEW) {
inode->i_ino = hash;
cifs_fscache_get_inode_cookie(inode);
unlock_new_inode(inode);

View File

@ -86,7 +86,7 @@ struct inode *squashfs_iget(struct super_block *sb, long long ino,
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
if (!(inode_state_read_once(inode) & I_NEW))
return inode;
err = squashfs_read_inode(inode, ino);

View File

@ -183,7 +183,7 @@ int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync)
if (!file->f_op->fsync)
return -EINVAL;
if (!datasync && (inode->i_state & I_DIRTY_TIME))
if (!datasync && (inode_state_read_once(inode) & I_DIRTY_TIME))
mark_inode_dirty_sync(inode);
return file->f_op->fsync(file, start, end, datasync);
}

View File

@ -1323,7 +1323,7 @@ int ubifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
inode_lock(inode);
/* Synchronize the inode unless this is a 'datasync()' call. */
if (!datasync || (inode->i_state & I_DIRTY_DATASYNC)) {
if (!datasync || (inode_state_read_once(inode) & I_DIRTY_DATASYNC)) {
err = inode->i_sb->s_op->write_inode(inode, NULL);
if (err)
goto out;

View File

@ -114,7 +114,7 @@ struct inode *ubifs_iget(struct super_block *sb, unsigned long inum)
inode = iget_locked(sb, inum);
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
if (!(inode_state_read_once(inode) & I_NEW))
return inode;
ui = ubifs_inode(inode);

View File

@ -1962,7 +1962,7 @@ struct inode *__udf_iget(struct super_block *sb, struct kernel_lb_addr *ino,
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW)) {
if (!(inode_state_read_once(inode) & I_NEW)) {
if (UDF_I(inode)->i_hidden != hidden_inode) {
iput(inode);
return ERR_PTR(-EFSCORRUPTED);

View File

@ -655,7 +655,7 @@ struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
if (!(inode_state_read_once(inode) & I_NEW))
return inode;
ufsi = UFS_I(inode);

View File

@ -1249,7 +1249,7 @@ xchk_irele(
* hits do not clear DONTCACHE, so we must do it here.
*/
spin_lock(&VFS_I(ip)->i_lock);
VFS_I(ip)->i_state &= ~I_DONTCACHE;
inode_state_clear(VFS_I(ip), I_DONTCACHE);
spin_unlock(&VFS_I(ip)->i_lock);
}

View File

@ -1933,7 +1933,7 @@ xrep_inode_pptr(
* Unlinked inodes that cannot be added to the directory tree will not
* have a parent pointer.
*/
if (inode->i_nlink == 0 && !(inode->i_state & I_LINKABLE))
if (inode->i_nlink == 0 && !(inode_state_read_once(inode) & I_LINKABLE))
return 0;
/* Children of the superblock do not have parent pointers. */

View File

@ -915,7 +915,7 @@ xchk_pptr_looks_zapped(
* Temporary files that cannot be linked into the directory tree do not
* have attr forks because they cannot ever have parents.
*/
if (inode->i_nlink == 0 && !(inode->i_state & I_LINKABLE))
if (inode->i_nlink == 0 && !(inode_state_read_once(inode) & I_LINKABLE))
return false;
/*

View File

@ -514,7 +514,7 @@ xfs_can_free_eofblocks(
* Caller must either hold the exclusive io lock; or be inactivating
* the inode, which guarantees there are no other users of the inode.
*/
if (!(VFS_I(ip)->i_state & I_FREEING))
if (!(inode_state_read_once(VFS_I(ip)) & I_FREEING))
xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL);
/* prealloc/delalloc exists only on regular files */

View File

@ -285,7 +285,7 @@ xfs_inode_mark_sick(
* is not the case here.
*/
spin_lock(&VFS_I(ip)->i_lock);
VFS_I(ip)->i_state &= ~I_DONTCACHE;
inode_state_clear(VFS_I(ip), I_DONTCACHE);
spin_unlock(&VFS_I(ip)->i_lock);
}
@ -309,7 +309,7 @@ xfs_inode_mark_corrupt(
* is not the case here.
*/
spin_lock(&VFS_I(ip)->i_lock);
VFS_I(ip)->i_state &= ~I_DONTCACHE;
inode_state_clear(VFS_I(ip), I_DONTCACHE);
spin_unlock(&VFS_I(ip)->i_lock);
}

View File

@ -334,7 +334,7 @@ xfs_reinit_inode(
dev_t dev = inode->i_rdev;
kuid_t uid = inode->i_uid;
kgid_t gid = inode->i_gid;
unsigned long state = inode->i_state;
unsigned long state = inode_state_read_once(inode);
error = inode_init_always(mp->m_super, inode);
@ -345,7 +345,7 @@ xfs_reinit_inode(
inode->i_rdev = dev;
inode->i_uid = uid;
inode->i_gid = gid;
inode->i_state = state;
inode_state_assign_raw(inode, state);
mapping_set_folio_min_order(inode->i_mapping,
M_IGEO(mp)->min_folio_order);
return error;
@ -411,7 +411,7 @@ xfs_iget_recycle(
ip->i_flags |= XFS_INEW;
xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
XFS_ICI_RECLAIM_TAG);
inode->i_state = I_NEW;
inode_state_assign_raw(inode, I_NEW);
spin_unlock(&ip->i_flags_lock);
spin_unlock(&pag->pag_ici_lock);

Some files were not shown because too many files have changed in this diff Show More