mount-related stuff for this cycle

* saner handling of guards in fs/namespace.c, getting
 rid of needlessly strong locking in some of the users.
 
 	* lock_mount() calling conventions change - have it set
 the environment for attaching to given location, storing the
 results in caller-supplied object, without altering the passed
 struct path.  Make unlock_mount() called as __cleanup for those
 objects.  It's not exactly guard(), but similar to it.
 
 	* MNT_WRITE_HOLD done right - mnt_hold_writers() does *not*
 mess with ->mnt_flags anymore, so insertion of a new mount into
 ->s_mounts of underlying superblock does not, in itself, expose
 ->mnt_flags of that mount to concurrent modifications.
 
 	* getting rid of pathological cases when umount() spends
 quadratic time removing the victims from propagation graph -
 part of that had been dealt with last cycle, this should finish
 it.
 
 	* a bunch of stuff constified.
 
 	* assorted cleanups.
 
 Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQQqUNBr3gm4hGXdBJlZ7Krx/gZQ6wUCaNhzLAAKCRBZ7Krx/gZQ
 63/IAP4yxJ6e3Pt66Uw0MeuSNmeLsQwb7mYo72lsYHpxjYANZAEAspMaLDU9NHxM
 Dy6WDVoJnf7+aDlD6E443YMfPX8XRQM=
 =5T+t
 -----END PGP SIGNATURE-----

Merge tag 'pull-mount' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs

Pull vfs mount updates from Al Viro:
 "Several piles this cycle, this mount-related one being the largest and
  trickiest:

   - saner handling of guards in fs/namespace.c, getting rid of
     needlessly strong locking in some of the users

   - lock_mount() calling conventions change - have it set the
     environment for attaching to given location, storing the results in
     caller-supplied object, without altering the passed struct path.

     Make unlock_mount() called as __cleanup for those objects. It's not
     exactly guard(), but similar to it

   - MNT_WRITE_HOLD done right.

     mnt_hold_writers() does *not* mess with ->mnt_flags anymore, so
     insertion of a new mount into ->s_mounts of underlying superblock
     does not, in itself, expose ->mnt_flags of that mount to concurrent
     modifications

   - getting rid of pathological cases when umount() spends quadratic
     time removing the victims from propagation graph - part of that had
     been dealt with last cycle, this should finish it

   - a bunch of stuff constified

   - assorted cleanups

* tag 'pull-mount' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (64 commits)
  constify {__,}mnt_is_readonly()
  WRITE_HOLD machinery: no need for to bump mount_lock seqcount
  struct mount: relocate MNT_WRITE_HOLD bit
  preparations to taking MNT_WRITE_HOLD out of ->mnt_flags
  setup_mnt(): primitive for connecting a mount to filesystem
  simplify the callers of mnt_unhold_writers()
  copy_mnt_ns(): use guards
  copy_mnt_ns(): use the regular mechanism for freeing empty mnt_ns on failure
  open_detached_copy(): separate creation of namespace into helper
  open_detached_copy(): don't bother with mount_lock_hash()
  path_has_submounts(): use guard(mount_locked_reader)
  fs/namespace.c: sanitize descriptions for {__,}lookup_mnt()
  ecryptfs: get rid of pointless mount references in ecryptfs dentries
  umount_tree(): take all victims out of propagation graph at once
  do_mount(): use __free(path_put)
  do_move_mount_old(): use __free(path_put)
  constify can_move_mount_beneath() arguments
  path_umount(): constify struct path argument
  may_copy_tree(), __do_loopback(): constify struct path argument
  path_mount(): constify struct path argument
  ...
This commit is contained in:
Linus Torvalds 2025-10-03 10:19:44 -07:00
commit e64aeecbbb
15 changed files with 610 additions and 652 deletions

View File

@ -1390,6 +1390,7 @@ struct check_mount {
unsigned int mounted;
};
/* locks: mount_locked_reader && dentry->d_lock */
static enum d_walk_ret path_check_mount(void *data, struct dentry *dentry)
{
struct check_mount *info = data;
@ -1416,9 +1417,8 @@ int path_has_submounts(const struct path *parent)
{
struct check_mount data = { .mnt = parent->mnt, .mounted = 0 };
read_seqlock_excl(&mount_lock);
guard(mount_locked_reader)();
d_walk(parent->dentry, &data, path_check_mount);
read_sequnlock_excl(&mount_lock);
return data.mounted;
}

View File

@ -59,14 +59,6 @@ static int ecryptfs_d_revalidate(struct inode *dir, const struct qstr *name,
return rc;
}
struct kmem_cache *ecryptfs_dentry_info_cache;
static void ecryptfs_dentry_free_rcu(struct rcu_head *head)
{
kmem_cache_free(ecryptfs_dentry_info_cache,
container_of(head, struct ecryptfs_dentry_info, rcu));
}
/**
* ecryptfs_d_release
* @dentry: The ecryptfs dentry
@ -75,11 +67,7 @@ static void ecryptfs_dentry_free_rcu(struct rcu_head *head)
*/
static void ecryptfs_d_release(struct dentry *dentry)
{
struct ecryptfs_dentry_info *p = dentry->d_fsdata;
if (p) {
path_put(&p->lower_path);
call_rcu(&p->rcu, ecryptfs_dentry_free_rcu);
}
dput(dentry->d_fsdata);
}
const struct dentry_operations ecryptfs_dops = {

View File

@ -258,13 +258,6 @@ struct ecryptfs_inode_info {
struct ecryptfs_crypt_stat crypt_stat;
};
/* dentry private data. Each dentry must keep track of a lower
* vfsmount too. */
struct ecryptfs_dentry_info {
struct path lower_path;
struct rcu_head rcu;
};
/**
* ecryptfs_global_auth_tok - A key used to encrypt all new files under the mountpoint
* @flags: Status flags
@ -348,6 +341,7 @@ struct ecryptfs_mount_crypt_stat {
/* superblock private data. */
struct ecryptfs_sb_info {
struct super_block *wsi_sb;
struct vfsmount *lower_mnt;
struct ecryptfs_mount_crypt_stat mount_crypt_stat;
};
@ -494,22 +488,25 @@ ecryptfs_set_superblock_lower(struct super_block *sb,
}
static inline void
ecryptfs_set_dentry_private(struct dentry *dentry,
struct ecryptfs_dentry_info *dentry_info)
ecryptfs_set_dentry_lower(struct dentry *dentry,
struct dentry *lower_dentry)
{
dentry->d_fsdata = dentry_info;
dentry->d_fsdata = lower_dentry;
}
static inline struct dentry *
ecryptfs_dentry_to_lower(struct dentry *dentry)
{
return ((struct ecryptfs_dentry_info *)dentry->d_fsdata)->lower_path.dentry;
return dentry->d_fsdata;
}
static inline const struct path *
ecryptfs_dentry_to_lower_path(struct dentry *dentry)
static inline struct path
ecryptfs_lower_path(struct dentry *dentry)
{
return &((struct ecryptfs_dentry_info *)dentry->d_fsdata)->lower_path;
return (struct path){
.mnt = ecryptfs_superblock_to_private(dentry->d_sb)->lower_mnt,
.dentry = ecryptfs_dentry_to_lower(dentry)
};
}
#define ecryptfs_printk(type, fmt, arg...) \
@ -532,7 +529,6 @@ extern unsigned int ecryptfs_number_of_users;
extern struct kmem_cache *ecryptfs_auth_tok_list_item_cache;
extern struct kmem_cache *ecryptfs_file_info_cache;
extern struct kmem_cache *ecryptfs_dentry_info_cache;
extern struct kmem_cache *ecryptfs_inode_info_cache;
extern struct kmem_cache *ecryptfs_sb_info_cache;
extern struct kmem_cache *ecryptfs_header_cache;
@ -557,7 +553,6 @@ int ecryptfs_encrypt_and_encode_filename(
size_t *encoded_name_size,
struct ecryptfs_mount_crypt_stat *mount_crypt_stat,
const char *name, size_t name_size);
struct dentry *ecryptfs_lower_dentry(struct dentry *this_dentry);
void ecryptfs_dump_hex(char *data, int bytes);
int virt_to_scatterlist(const void *addr, int size, struct scatterlist *sg,
int sg_size);

View File

@ -33,13 +33,12 @@ static ssize_t ecryptfs_read_update_atime(struct kiocb *iocb,
struct iov_iter *to)
{
ssize_t rc;
const struct path *path;
struct file *file = iocb->ki_filp;
rc = generic_file_read_iter(iocb, to);
if (rc >= 0) {
path = ecryptfs_dentry_to_lower_path(file->f_path.dentry);
touch_atime(path);
struct path path = ecryptfs_lower_path(file->f_path.dentry);
touch_atime(&path);
}
return rc;
}
@ -59,12 +58,11 @@ static ssize_t ecryptfs_splice_read_update_atime(struct file *in, loff_t *ppos,
size_t len, unsigned int flags)
{
ssize_t rc;
const struct path *path;
rc = filemap_splice_read(in, ppos, pipe, len, flags);
if (rc >= 0) {
path = ecryptfs_dentry_to_lower_path(in->f_path.dentry);
touch_atime(path);
struct path path = ecryptfs_lower_path(in->f_path.dentry);
touch_atime(&path);
}
return rc;
}
@ -283,6 +281,7 @@ static int ecryptfs_dir_open(struct inode *inode, struct file *file)
* ecryptfs_lookup() */
struct ecryptfs_file_info *file_info;
struct file *lower_file;
struct path path;
/* Released in ecryptfs_release or end of function if failure */
file_info = kmem_cache_zalloc(ecryptfs_file_info_cache, GFP_KERNEL);
@ -292,8 +291,8 @@ static int ecryptfs_dir_open(struct inode *inode, struct file *file)
"Error attempting to allocate memory\n");
return -ENOMEM;
}
lower_file = dentry_open(ecryptfs_dentry_to_lower_path(ecryptfs_dentry),
file->f_flags, current_cred());
path = ecryptfs_lower_path(ecryptfs_dentry);
lower_file = dentry_open(&path, file->f_flags, current_cred());
if (IS_ERR(lower_file)) {
printk(KERN_ERR "%s: Error attempting to initialize "
"the lower file for the dentry with name "

View File

@ -327,24 +327,15 @@ static int ecryptfs_i_size_read(struct dentry *dentry, struct inode *inode)
static struct dentry *ecryptfs_lookup_interpose(struct dentry *dentry,
struct dentry *lower_dentry)
{
const struct path *path = ecryptfs_dentry_to_lower_path(dentry->d_parent);
struct dentry *lower_parent = ecryptfs_dentry_to_lower(dentry->d_parent);
struct inode *inode, *lower_inode;
struct ecryptfs_dentry_info *dentry_info;
int rc = 0;
dentry_info = kmem_cache_alloc(ecryptfs_dentry_info_cache, GFP_KERNEL);
if (!dentry_info) {
dput(lower_dentry);
return ERR_PTR(-ENOMEM);
}
fsstack_copy_attr_atime(d_inode(dentry->d_parent),
d_inode(path->dentry));
d_inode(lower_parent));
BUG_ON(!d_count(lower_dentry));
ecryptfs_set_dentry_private(dentry, dentry_info);
dentry_info->lower_path.mnt = mntget(path->mnt);
dentry_info->lower_path.dentry = lower_dentry;
ecryptfs_set_dentry_lower(dentry, lower_dentry);
/*
* negative dentry can go positive under us here - its parent is not
@ -1021,10 +1012,10 @@ static int ecryptfs_getattr(struct mnt_idmap *idmap,
{
struct dentry *dentry = path->dentry;
struct kstat lower_stat;
struct path lower_path = ecryptfs_lower_path(dentry);
int rc;
rc = vfs_getattr_nosec(ecryptfs_dentry_to_lower_path(dentry),
&lower_stat, request_mask, flags);
rc = vfs_getattr_nosec(&lower_path, &lower_stat, request_mask, flags);
if (!rc) {
fsstack_copy_attr_all(d_inode(dentry),
ecryptfs_inode_to_lower(d_inode(dentry)));

View File

@ -106,15 +106,14 @@ static int ecryptfs_init_lower_file(struct dentry *dentry,
struct file **lower_file)
{
const struct cred *cred = current_cred();
const struct path *path = ecryptfs_dentry_to_lower_path(dentry);
struct path path = ecryptfs_lower_path(dentry);
int rc;
rc = ecryptfs_privileged_open(lower_file, path->dentry, path->mnt,
cred);
rc = ecryptfs_privileged_open(lower_file, path.dentry, path.mnt, cred);
if (rc) {
printk(KERN_ERR "Error opening lower file "
"for lower_dentry [0x%p] and lower_mnt [0x%p]; "
"rc = [%d]\n", path->dentry, path->mnt, rc);
"rc = [%d]\n", path.dentry, path.mnt, rc);
(*lower_file) = NULL;
}
return rc;
@ -437,7 +436,6 @@ static int ecryptfs_get_tree(struct fs_context *fc)
struct ecryptfs_fs_context *ctx = fc->fs_private;
struct ecryptfs_sb_info *sbi = fc->s_fs_info;
struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
struct ecryptfs_dentry_info *root_info;
const char *err = "Getting sb failed";
struct inode *inode;
struct path path;
@ -543,14 +541,8 @@ static int ecryptfs_get_tree(struct fs_context *fc)
goto out_free;
}
rc = -ENOMEM;
root_info = kmem_cache_zalloc(ecryptfs_dentry_info_cache, GFP_KERNEL);
if (!root_info)
goto out_free;
/* ->kill_sb() will take care of root_info */
ecryptfs_set_dentry_private(s->s_root, root_info);
root_info->lower_path = path;
ecryptfs_set_dentry_lower(s->s_root, path.dentry);
ecryptfs_superblock_to_private(s)->lower_mnt = path.mnt;
s->s_flags |= SB_ACTIVE;
fc->root = dget(s->s_root);
@ -580,6 +572,7 @@ static void ecryptfs_kill_block_super(struct super_block *sb)
kill_anon_super(sb);
if (!sb_info)
return;
mntput(sb_info->lower_mnt);
ecryptfs_destroy_mount_crypt_stat(&sb_info->mount_crypt_stat);
kmem_cache_free(ecryptfs_sb_info_cache, sb_info);
}
@ -667,11 +660,6 @@ static struct ecryptfs_cache_info {
.name = "ecryptfs_file_cache",
.size = sizeof(struct ecryptfs_file_info),
},
{
.cache = &ecryptfs_dentry_info_cache,
.name = "ecryptfs_dentry_info_cache",
.size = sizeof(struct ecryptfs_dentry_info),
},
{
.cache = &ecryptfs_inode_info_cache,
.name = "ecryptfs_inode_cache",

View File

@ -84,9 +84,9 @@ void mnt_put_write_access_file(struct file *file);
extern void dissolve_on_fput(struct vfsmount *);
extern bool may_mount(void);
int path_mount(const char *dev_name, struct path *path,
int path_mount(const char *dev_name, const struct path *path,
const char *type_page, unsigned long flags, void *data_page);
int path_umount(struct path *path, int flags);
int path_umount(const struct path *path, int flags);
int show_path(struct seq_file *m, struct dentry *root);

View File

@ -58,7 +58,10 @@ struct mount {
#endif
struct list_head mnt_mounts; /* list of children, anchored here */
struct list_head mnt_child; /* and going through their mnt_child */
struct list_head mnt_instance; /* mount instance on sb->s_mounts */
struct mount *mnt_next_for_sb; /* the next two fields are hlist_node, */
struct mount * __aligned(1) *mnt_pprev_for_sb;
/* except that LSB of pprev is stolen */
#define WRITE_HOLD 1 /* ... for use by mnt_hold_writers() */
const char *mnt_devname; /* Name of device e.g. /dev/dsk/hda1 */
struct list_head mnt_list;
struct list_head mnt_expire; /* link in fs-specific expiry list */
@ -148,6 +151,11 @@ static inline void get_mnt_ns(struct mnt_namespace *ns)
extern seqlock_t mount_lock;
DEFINE_LOCK_GUARD_0(mount_writer, write_seqlock(&mount_lock),
write_sequnlock(&mount_lock))
DEFINE_LOCK_GUARD_0(mount_locked_reader, read_seqlock_excl(&mount_lock),
read_sequnlock_excl(&mount_lock))
struct proc_mounts {
struct mnt_namespace *ns;
struct path root;
@ -224,4 +232,33 @@ static inline void mnt_notify_add(struct mount *m)
}
#endif
static inline struct mount *topmost_overmount(struct mount *m)
{
while (m->overmount)
m = m->overmount;
return m;
}
static inline bool __test_write_hold(struct mount * __aligned(1) *val)
{
return (unsigned long)val & WRITE_HOLD;
}
static inline bool test_write_hold(const struct mount *m)
{
return __test_write_hold(m->mnt_pprev_for_sb);
}
static inline void set_write_hold(struct mount *m)
{
m->mnt_pprev_for_sb = (void *)((unsigned long)m->mnt_pprev_for_sb
| WRITE_HOLD);
}
static inline void clear_write_hold(struct mount *m)
{
m->mnt_pprev_for_sb = (void *)((unsigned long)m->mnt_pprev_for_sb
& ~WRITE_HOLD);
}
struct mnt_namespace *mnt_ns_from_dentry(struct dentry *dentry);

File diff suppressed because it is too large Load Diff

View File

@ -29,6 +29,7 @@ static inline struct mount *next_slave(struct mount *p)
return hlist_entry(p->mnt_slave.next, struct mount, mnt_slave);
}
/* locks: namespace_shared && is_mounted(mnt) */
static struct mount *get_peer_under_root(struct mount *mnt,
struct mnt_namespace *ns,
const struct path *root)
@ -50,7 +51,7 @@ static struct mount *get_peer_under_root(struct mount *mnt,
* Get ID of closest dominating peer group having a representative
* under the given root.
*
* Caller must hold namespace_sem
* locks: namespace_shared
*/
int get_dominating_id(struct mount *mnt, const struct path *root)
{
@ -70,19 +71,6 @@ static inline bool will_be_unmounted(struct mount *m)
return m->mnt.mnt_flags & MNT_UMOUNT;
}
static struct mount *propagation_source(struct mount *mnt)
{
do {
struct mount *m;
for (m = next_peer(mnt); m != mnt; m = next_peer(m)) {
if (!will_be_unmounted(m))
return m;
}
mnt = mnt->mnt_master;
} while (mnt && will_be_unmounted(mnt));
return mnt;
}
static void transfer_propagation(struct mount *mnt, struct mount *to)
{
struct hlist_node *p = NULL, *n;
@ -111,11 +99,10 @@ void change_mnt_propagation(struct mount *mnt, int type)
return;
}
if (IS_MNT_SHARED(mnt)) {
if (type == MS_SLAVE || !hlist_empty(&mnt->mnt_slave_list))
m = propagation_source(mnt);
if (list_empty(&mnt->mnt_share)) {
mnt_release_group_id(mnt);
} else {
m = next_peer(mnt);
list_del_init(&mnt->mnt_share);
mnt->mnt_group_id = 0;
}
@ -136,6 +123,57 @@ void change_mnt_propagation(struct mount *mnt, int type)
}
}
static struct mount *trace_transfers(struct mount *m)
{
while (1) {
struct mount *next = next_peer(m);
if (next != m) {
list_del_init(&m->mnt_share);
m->mnt_group_id = 0;
m->mnt_master = next;
} else {
if (IS_MNT_SHARED(m))
mnt_release_group_id(m);
next = m->mnt_master;
}
hlist_del_init(&m->mnt_slave);
CLEAR_MNT_SHARED(m);
SET_MNT_MARK(m);
if (!next || !will_be_unmounted(next))
return next;
if (IS_MNT_MARKED(next))
return next->mnt_master;
m = next;
}
}
static void set_destinations(struct mount *m, struct mount *master)
{
struct mount *next;
while ((next = m->mnt_master) != master) {
m->mnt_master = master;
m = next;
}
}
void bulk_make_private(struct list_head *set)
{
struct mount *m;
list_for_each_entry(m, set, mnt_list)
if (!IS_MNT_MARKED(m))
set_destinations(m, trace_transfers(m));
list_for_each_entry(m, set, mnt_list) {
transfer_propagation(m, m->mnt_master);
m->mnt_master = NULL;
CLEAR_MNT_MARK(m);
}
}
static struct mount *__propagation_next(struct mount *m,
struct mount *origin)
{
@ -304,9 +342,8 @@ int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
err = PTR_ERR(this);
break;
}
read_seqlock_excl(&mount_lock);
scoped_guard(mount_locked_reader)
mnt_set_mountpoint(n, dest_mp, this);
read_sequnlock_excl(&mount_lock);
if (n->mnt_master)
SET_MNT_MARK(n->mnt_master);
copy = this;

View File

@ -42,6 +42,7 @@ static inline bool peers(const struct mount *m1, const struct mount *m2)
}
void change_mnt_propagation(struct mount *, int);
void bulk_make_private(struct list_head *);
int propagate_mnt(struct mount *, struct mountpoint *, struct mount *,
struct hlist_head *);
void propagate_umount(struct list_head *);

View File

@ -323,7 +323,6 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags,
if (!s)
return NULL;
INIT_LIST_HEAD(&s->s_mounts);
s->s_user_ns = get_user_ns(user_ns);
init_rwsem(&s->s_umount);
lockdep_set_class(&s->s_umount, &type->s_umount_key);
@ -408,7 +407,7 @@ static void __put_super(struct super_block *s)
list_del_init(&s->s_list);
WARN_ON(s->s_dentry_lru.node);
WARN_ON(s->s_inode_lru.node);
WARN_ON(!list_empty(&s->s_mounts));
WARN_ON(s->s_mounts);
call_rcu(&s->rcu, destroy_super_rcu);
}
}

View File

@ -1434,6 +1434,8 @@ struct sb_writers {
struct percpu_rw_semaphore rw_sem[SB_FREEZE_LEVELS];
};
struct mount;
struct super_block {
struct list_head s_list; /* Keep this first */
dev_t s_dev; /* search index; _not_ kdev_t */
@ -1468,7 +1470,7 @@ struct super_block {
__u16 s_encoding_flags;
#endif
struct hlist_bl_head s_roots; /* alternate root dentries for NFS */
struct list_head s_mounts; /* list of mounts; _not_ for fs use */
struct mount *s_mounts; /* list of mounts; _not_ for fs use */
struct block_device *s_bdev; /* can go away once we use an accessor for @s_bdev_file */
struct file *s_bdev_file;
struct backing_dev_info *s_bdi;

View File

@ -33,7 +33,6 @@ enum mount_flags {
MNT_NOSYMFOLLOW = 0x80,
MNT_SHRINKABLE = 0x100,
MNT_WRITE_HOLD = 0x200,
MNT_INTERNAL = 0x4000,
@ -52,7 +51,7 @@ enum mount_flags {
| MNT_READONLY | MNT_NOSYMFOLLOW,
MNT_ATIME_MASK = MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME,
MNT_INTERNAL_FLAGS = MNT_WRITE_HOLD | MNT_INTERNAL | MNT_DOOMED |
MNT_INTERNAL_FLAGS = MNT_INTERNAL | MNT_DOOMED |
MNT_SYNC_UMOUNT | MNT_LOCKED
};
@ -77,7 +76,7 @@ extern void mntput(struct vfsmount *mnt);
extern struct vfsmount *mntget(struct vfsmount *mnt);
extern void mnt_make_shortterm(struct vfsmount *mnt);
extern struct vfsmount *mnt_clone_internal(const struct path *path);
extern bool __mnt_is_readonly(struct vfsmount *mnt);
extern bool __mnt_is_readonly(const struct vfsmount *mnt);
extern bool mnt_may_suid(struct vfsmount *mnt);
extern struct vfsmount *clone_private_mount(const struct path *path);
@ -104,8 +103,8 @@ extern int may_umount_tree(struct vfsmount *);
extern int may_umount(struct vfsmount *);
int do_mount(const char *, const char __user *,
const char *, unsigned long, void *);
extern struct path *collect_paths(const struct path *, struct path *, unsigned);
extern void drop_collected_paths(struct path *, struct path *);
extern const struct path *collect_paths(const struct path *, struct path *, unsigned);
extern void drop_collected_paths(const struct path *, const struct path *);
extern void kern_unmount_array(struct vfsmount *mnt[], unsigned int num);
extern int cifs_root_data(char **dev, char **opts);

View File

@ -680,7 +680,7 @@ void audit_trim_trees(void)
struct audit_tree *tree;
struct path path;
struct audit_node *node;
struct path *paths;
const struct path *paths;
struct path array[16];
int err;
@ -703,7 +703,7 @@ void audit_trim_trees(void)
struct audit_chunk *chunk = find_chunk(node);
/* this could be NULL if the watch is dying else where... */
node->index |= 1U<<31;
for (struct path *p = paths; p->dentry; p++) {
for (const struct path *p = paths; p->dentry; p++) {
struct inode *inode = p->dentry->d_inode;
if (inode_to_key(inode) == chunk->key) {
node->index &= ~(1U<<31);
@ -742,9 +742,9 @@ void audit_put_tree(struct audit_tree *tree)
put_tree(tree);
}
static int tag_mounts(struct path *paths, struct audit_tree *tree)
static int tag_mounts(const struct path *paths, struct audit_tree *tree)
{
for (struct path *p = paths; p->dentry; p++) {
for (const struct path *p = paths; p->dentry; p++) {
int err = tag_chunk(p->dentry->d_inode, tree);
if (err)
return err;
@ -807,7 +807,7 @@ int audit_add_tree_rule(struct audit_krule *rule)
struct audit_tree *seed = rule->tree, *tree;
struct path path;
struct path array[16];
struct path *paths;
const struct path *paths;
int err;
rule->tree = NULL;
@ -879,7 +879,7 @@ int audit_tag_tree(char *old, char *new)
int failed = 0;
struct path path1, path2;
struct path array[16];
struct path *paths;
const struct path *paths;
int err;
err = kern_path(new, 0, &path2);