fs/proc/task_mmu: factor out proc_maps_private fields used by PROCMAP_QUERY

Refactor struct proc_maps_private so that the fields used by PROCMAP_QUERY
ioctl are moved into a separate structure. In the next patch this allows
ioctl to reuse some of the functions used for reading /proc/pid/maps
without using file->private_data. This prevents concurrent modification
of file->private_data members by ioctl and /proc/pid/maps readers.

The change is pure code refactoring and has no functional changes.

Link: https://lkml.kernel.org/r/20250808152850.2580887-3-surenb@google.com
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: SeongJae Park <sj@kernel.org>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Andrii Nakryiko <andrii@kernel.org>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jann Horn <jannh@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Josef Bacik <josef@toxicpanda.com>
Cc: Kalesh Singh <kaleshsingh@google.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: "Paul E . McKenney" <paulmck@kernel.org>
Cc: Peter Xu <peterx@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Thomas Weißschuh <linux@weissschuh.net>
Cc: T.J. Mercier <tjmercier@google.com>
Cc: Ye Bin <yebin10@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Suren Baghdasaryan 2025-08-08 08:28:48 -07:00 committed by Andrew Morton
parent 41f1055816
commit ee737a5a10
3 changed files with 63 additions and 53 deletions

View File

@ -378,16 +378,21 @@ extern void proc_self_init(void);
* task_[no]mmu.c * task_[no]mmu.c
*/ */
struct mem_size_stats; struct mem_size_stats;
struct proc_maps_private {
struct inode *inode; struct proc_maps_locking_ctx {
struct task_struct *task;
struct mm_struct *mm; struct mm_struct *mm;
struct vma_iterator iter;
loff_t last_pos;
#ifdef CONFIG_PER_VMA_LOCK #ifdef CONFIG_PER_VMA_LOCK
bool mmap_locked; bool mmap_locked;
struct vm_area_struct *locked_vma; struct vm_area_struct *locked_vma;
#endif #endif
};
struct proc_maps_private {
struct inode *inode;
struct task_struct *task;
struct vma_iterator iter;
loff_t last_pos;
struct proc_maps_locking_ctx lock_ctx;
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
struct mempolicy *task_mempolicy; struct mempolicy *task_mempolicy;
#endif #endif

View File

@ -132,18 +132,18 @@ static void release_task_mempolicy(struct proc_maps_private *priv)
#ifdef CONFIG_PER_VMA_LOCK #ifdef CONFIG_PER_VMA_LOCK
static void unlock_vma(struct proc_maps_private *priv) static void unlock_ctx_vma(struct proc_maps_locking_ctx *lock_ctx)
{ {
if (priv->locked_vma) { if (lock_ctx->locked_vma) {
vma_end_read(priv->locked_vma); vma_end_read(lock_ctx->locked_vma);
priv->locked_vma = NULL; lock_ctx->locked_vma = NULL;
} }
} }
static const struct seq_operations proc_pid_maps_op; static const struct seq_operations proc_pid_maps_op;
static inline bool lock_vma_range(struct seq_file *m, static inline bool lock_vma_range(struct seq_file *m,
struct proc_maps_private *priv) struct proc_maps_locking_ctx *lock_ctx)
{ {
/* /*
* smaps and numa_maps perform page table walk, therefore require * smaps and numa_maps perform page table walk, therefore require
@ -151,25 +151,25 @@ static inline bool lock_vma_range(struct seq_file *m,
* walking the vma tree under rcu read protection. * walking the vma tree under rcu read protection.
*/ */
if (m->op != &proc_pid_maps_op) { if (m->op != &proc_pid_maps_op) {
if (mmap_read_lock_killable(priv->mm)) if (mmap_read_lock_killable(lock_ctx->mm))
return false; return false;
priv->mmap_locked = true; lock_ctx->mmap_locked = true;
} else { } else {
rcu_read_lock(); rcu_read_lock();
priv->locked_vma = NULL; lock_ctx->locked_vma = NULL;
priv->mmap_locked = false; lock_ctx->mmap_locked = false;
} }
return true; return true;
} }
static inline void unlock_vma_range(struct proc_maps_private *priv) static inline void unlock_vma_range(struct proc_maps_locking_ctx *lock_ctx)
{ {
if (priv->mmap_locked) { if (lock_ctx->mmap_locked) {
mmap_read_unlock(priv->mm); mmap_read_unlock(lock_ctx->mm);
} else { } else {
unlock_vma(priv); unlock_ctx_vma(lock_ctx);
rcu_read_unlock(); rcu_read_unlock();
} }
} }
@ -177,15 +177,16 @@ static inline void unlock_vma_range(struct proc_maps_private *priv)
static struct vm_area_struct *get_next_vma(struct proc_maps_private *priv, static struct vm_area_struct *get_next_vma(struct proc_maps_private *priv,
loff_t last_pos) loff_t last_pos)
{ {
struct proc_maps_locking_ctx *lock_ctx = &priv->lock_ctx;
struct vm_area_struct *vma; struct vm_area_struct *vma;
if (priv->mmap_locked) if (lock_ctx->mmap_locked)
return vma_next(&priv->iter); return vma_next(&priv->iter);
unlock_vma(priv); unlock_ctx_vma(lock_ctx);
vma = lock_next_vma(priv->mm, &priv->iter, last_pos); vma = lock_next_vma(lock_ctx->mm, &priv->iter, last_pos);
if (!IS_ERR_OR_NULL(vma)) if (!IS_ERR_OR_NULL(vma))
priv->locked_vma = vma; lock_ctx->locked_vma = vma;
return vma; return vma;
} }
@ -193,14 +194,16 @@ static struct vm_area_struct *get_next_vma(struct proc_maps_private *priv,
static inline bool fallback_to_mmap_lock(struct proc_maps_private *priv, static inline bool fallback_to_mmap_lock(struct proc_maps_private *priv,
loff_t pos) loff_t pos)
{ {
if (priv->mmap_locked) struct proc_maps_locking_ctx *lock_ctx = &priv->lock_ctx;
if (lock_ctx->mmap_locked)
return false; return false;
rcu_read_unlock(); rcu_read_unlock();
mmap_read_lock(priv->mm); mmap_read_lock(lock_ctx->mm);
/* Reinitialize the iterator after taking mmap_lock */ /* Reinitialize the iterator after taking mmap_lock */
vma_iter_set(&priv->iter, pos); vma_iter_set(&priv->iter, pos);
priv->mmap_locked = true; lock_ctx->mmap_locked = true;
return true; return true;
} }
@ -208,14 +211,14 @@ static inline bool fallback_to_mmap_lock(struct proc_maps_private *priv,
#else /* CONFIG_PER_VMA_LOCK */ #else /* CONFIG_PER_VMA_LOCK */
static inline bool lock_vma_range(struct seq_file *m, static inline bool lock_vma_range(struct seq_file *m,
struct proc_maps_private *priv) struct proc_maps_locking_ctx *lock_ctx)
{ {
return mmap_read_lock_killable(priv->mm) == 0; return mmap_read_lock_killable(lock_ctx->mm) == 0;
} }
static inline void unlock_vma_range(struct proc_maps_private *priv) static inline void unlock_vma_range(struct proc_maps_locking_ctx *lock_ctx)
{ {
mmap_read_unlock(priv->mm); mmap_read_unlock(lock_ctx->mm);
} }
static struct vm_area_struct *get_next_vma(struct proc_maps_private *priv, static struct vm_area_struct *get_next_vma(struct proc_maps_private *priv,
@ -258,7 +261,7 @@ static struct vm_area_struct *proc_get_vma(struct seq_file *m, loff_t *ppos)
*ppos = vma->vm_end; *ppos = vma->vm_end;
} else { } else {
*ppos = SENTINEL_VMA_GATE; *ppos = SENTINEL_VMA_GATE;
vma = get_gate_vma(priv->mm); vma = get_gate_vma(priv->lock_ctx.mm);
} }
return vma; return vma;
@ -267,6 +270,7 @@ static struct vm_area_struct *proc_get_vma(struct seq_file *m, loff_t *ppos)
static void *m_start(struct seq_file *m, loff_t *ppos) static void *m_start(struct seq_file *m, loff_t *ppos)
{ {
struct proc_maps_private *priv = m->private; struct proc_maps_private *priv = m->private;
struct proc_maps_locking_ctx *lock_ctx;
loff_t last_addr = *ppos; loff_t last_addr = *ppos;
struct mm_struct *mm; struct mm_struct *mm;
@ -278,14 +282,15 @@ static void *m_start(struct seq_file *m, loff_t *ppos)
if (!priv->task) if (!priv->task)
return ERR_PTR(-ESRCH); return ERR_PTR(-ESRCH);
mm = priv->mm; lock_ctx = &priv->lock_ctx;
mm = lock_ctx->mm;
if (!mm || !mmget_not_zero(mm)) { if (!mm || !mmget_not_zero(mm)) {
put_task_struct(priv->task); put_task_struct(priv->task);
priv->task = NULL; priv->task = NULL;
return NULL; return NULL;
} }
if (!lock_vma_range(m, priv)) { if (!lock_vma_range(m, lock_ctx)) {
mmput(mm); mmput(mm);
put_task_struct(priv->task); put_task_struct(priv->task);
priv->task = NULL; priv->task = NULL;
@ -318,13 +323,13 @@ static void *m_next(struct seq_file *m, void *v, loff_t *ppos)
static void m_stop(struct seq_file *m, void *v) static void m_stop(struct seq_file *m, void *v)
{ {
struct proc_maps_private *priv = m->private; struct proc_maps_private *priv = m->private;
struct mm_struct *mm = priv->mm; struct mm_struct *mm = priv->lock_ctx.mm;
if (!priv->task) if (!priv->task)
return; return;
release_task_mempolicy(priv); release_task_mempolicy(priv);
unlock_vma_range(priv); unlock_vma_range(&priv->lock_ctx);
mmput(mm); mmput(mm);
put_task_struct(priv->task); put_task_struct(priv->task);
priv->task = NULL; priv->task = NULL;
@ -339,9 +344,9 @@ static int proc_maps_open(struct inode *inode, struct file *file,
return -ENOMEM; return -ENOMEM;
priv->inode = inode; priv->inode = inode;
priv->mm = proc_mem_open(inode, PTRACE_MODE_READ); priv->lock_ctx.mm = proc_mem_open(inode, PTRACE_MODE_READ);
if (IS_ERR(priv->mm)) { if (IS_ERR(priv->lock_ctx.mm)) {
int err = PTR_ERR(priv->mm); int err = PTR_ERR(priv->lock_ctx.mm);
seq_release_private(inode, file); seq_release_private(inode, file);
return err; return err;
@ -355,8 +360,8 @@ static int proc_map_release(struct inode *inode, struct file *file)
struct seq_file *seq = file->private_data; struct seq_file *seq = file->private_data;
struct proc_maps_private *priv = seq->private; struct proc_maps_private *priv = seq->private;
if (priv->mm) if (priv->lock_ctx.mm)
mmdrop(priv->mm); mmdrop(priv->lock_ctx.mm);
return seq_release_private(inode, file); return seq_release_private(inode, file);
} }
@ -610,7 +615,7 @@ static int do_procmap_query(struct proc_maps_private *priv, void __user *uarg)
if (!!karg.build_id_size != !!karg.build_id_addr) if (!!karg.build_id_size != !!karg.build_id_addr)
return -EINVAL; return -EINVAL;
mm = priv->mm; mm = priv->lock_ctx.mm;
if (!mm || !mmget_not_zero(mm)) if (!mm || !mmget_not_zero(mm))
return -ESRCH; return -ESRCH;
@ -1311,7 +1316,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
{ {
struct proc_maps_private *priv = m->private; struct proc_maps_private *priv = m->private;
struct mem_size_stats mss = {}; struct mem_size_stats mss = {};
struct mm_struct *mm = priv->mm; struct mm_struct *mm = priv->lock_ctx.mm;
struct vm_area_struct *vma; struct vm_area_struct *vma;
unsigned long vma_start = 0, last_vma_end = 0; unsigned long vma_start = 0, last_vma_end = 0;
int ret = 0; int ret = 0;
@ -1456,9 +1461,9 @@ static int smaps_rollup_open(struct inode *inode, struct file *file)
goto out_free; goto out_free;
priv->inode = inode; priv->inode = inode;
priv->mm = proc_mem_open(inode, PTRACE_MODE_READ); priv->lock_ctx.mm = proc_mem_open(inode, PTRACE_MODE_READ);
if (IS_ERR_OR_NULL(priv->mm)) { if (IS_ERR_OR_NULL(priv->lock_ctx.mm)) {
ret = priv->mm ? PTR_ERR(priv->mm) : -ESRCH; ret = priv->lock_ctx.mm ? PTR_ERR(priv->lock_ctx.mm) : -ESRCH;
single_release(inode, file); single_release(inode, file);
goto out_free; goto out_free;
@ -1476,8 +1481,8 @@ static int smaps_rollup_release(struct inode *inode, struct file *file)
struct seq_file *seq = file->private_data; struct seq_file *seq = file->private_data;
struct proc_maps_private *priv = seq->private; struct proc_maps_private *priv = seq->private;
if (priv->mm) if (priv->lock_ctx.mm)
mmdrop(priv->mm); mmdrop(priv->lock_ctx.mm);
kfree(priv); kfree(priv);
return single_release(inode, file); return single_release(inode, file);

View File

@ -204,7 +204,7 @@ static void *m_start(struct seq_file *m, loff_t *ppos)
if (!priv->task) if (!priv->task)
return ERR_PTR(-ESRCH); return ERR_PTR(-ESRCH);
mm = priv->mm; mm = priv->lock_ctx.mm;
if (!mm || !mmget_not_zero(mm)) { if (!mm || !mmget_not_zero(mm)) {
put_task_struct(priv->task); put_task_struct(priv->task);
priv->task = NULL; priv->task = NULL;
@ -226,7 +226,7 @@ static void *m_start(struct seq_file *m, loff_t *ppos)
static void m_stop(struct seq_file *m, void *v) static void m_stop(struct seq_file *m, void *v)
{ {
struct proc_maps_private *priv = m->private; struct proc_maps_private *priv = m->private;
struct mm_struct *mm = priv->mm; struct mm_struct *mm = priv->lock_ctx.mm;
if (!priv->task) if (!priv->task)
return; return;
@ -259,9 +259,9 @@ static int maps_open(struct inode *inode, struct file *file,
return -ENOMEM; return -ENOMEM;
priv->inode = inode; priv->inode = inode;
priv->mm = proc_mem_open(inode, PTRACE_MODE_READ); priv->lock_ctx.mm = proc_mem_open(inode, PTRACE_MODE_READ);
if (IS_ERR_OR_NULL(priv->mm)) { if (IS_ERR_OR_NULL(priv->lock_ctx.mm)) {
int err = priv->mm ? PTR_ERR(priv->mm) : -ESRCH; int err = priv->lock_ctx.mm ? PTR_ERR(priv->lock_ctx.mm) : -ESRCH;
seq_release_private(inode, file); seq_release_private(inode, file);
return err; return err;
@ -276,8 +276,8 @@ static int map_release(struct inode *inode, struct file *file)
struct seq_file *seq = file->private_data; struct seq_file *seq = file->private_data;
struct proc_maps_private *priv = seq->private; struct proc_maps_private *priv = seq->private;
if (priv->mm) if (priv->lock_ctx.mm)
mmdrop(priv->mm); mmdrop(priv->lock_ctx.mm);
return seq_release_private(inode, file); return seq_release_private(inode, file);
} }