mirror of https://github.com/torvalds/linux.git
Locking updates for v6.19:
Mutexes:
- Redo __mutex_init() to reduce generated code size
(Sebastian Andrzej Siewior)
Seqlocks:
- Introduce scoped_seqlock_read() (Peter Zijlstra)
- Change thread_group_cputime() to use scoped_seqlock_read()
(Oleg Nesterov)
- Change do_task_stat() to use scoped_seqlock_read()
(Oleg Nesterov)
- Change do_io_accounting() to use scoped_seqlock_read()
(Oleg Nesterov)
- Fix the incorrect documentation of read_seqbegin_or_lock() /
need_seqretry() (Oleg Nesterov)
- Allow KASAN to fail optimizing (Peter Zijlstra)
Local lock updates:
- Fix all kernel-doc warnings (Randy Dunlap)
- Add the <linux/local_lock*.h> headers to MAINTAINERS
(Sebastian Andrzej Siewior)
- Reduce the risk of shadowing via s/l/__l/ and s/tl/__tl/
(Vincent Mailhol)
Lock debugging:
- spinlock/debug: Fix data-race in do_raw_write_lock
(Alexander Sverdlin)
Atomic primitives infrastructure:
- atomic: Skip alignment check for try_cmpxchg() old arg
(Arnd Bergmann)
Rust runtime integration:
- sync: atomic: Enable generated Atomic<T> usage (Boqun Feng)
- sync: atomic: Implement Debug for Atomic<Debug> (Boqun Feng)
- debugfs: Remove Rust native atomics and replace them with
Linux versions (Boqun Feng)
- debugfs: Implement Reader for Mutex<T> only when T is Unpin
(Boqun Feng)
- lock: guard: Add T: Unpin bound to DerefMut (Daniel Almeida)
- lock: Pin the inner data (Daniel Almeida)
- lock: Add a Pin<&mut T> accessor (Daniel Almeida)
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-----BEGIN PGP SIGNATURE-----
iQJFBAABCgAvFiEEBpT5eoXrXCwVQwEKEnMQ0APhK1gFAmktVmgRHG1pbmdvQGtl
cm5lbC5vcmcACgkQEnMQ0APhK1hDqw//e9BTs9Yx6yLxZVRDagS7KrDKy3V3OMg1
9h+tXzCosGeNz7XwVzt590wsACJvX7/QtqgTFQy/GPYBW56WeVOSYSpA6I4s43G1
sz+EAc4BYPBWEwBQxCfPDwHI/M0MD0Im6mjpATc6Uv1Ct0+KS8iFXRnGUALe4bis
8aKV8ZHo81Wnu6v1B8GroExHolL/AMORYfEYHABpWEe+GpwxqQcRdZjc/eiEUzOg
umwMC9bNc5FAiPlku9Mh6pcBPjkMd9bGjVEIG8deJhm/aD8f/b0mgaxyaKgoHx8J
ptauY3kLYBLuV793U37SXuQVw6o2LGHCYvN1fX+g1D0YTIuqIq9Pz7ObZs7w4xDd
6iIK4QYP4Gjkvn0ZA275eI3ItcBEjJ2FD3ZDbkXNj+O4vEOrmG/OX4h2H5WGq/AU
zr9YfmkRn0InPeHeLU1UM3NdbKgwc/Bd6MubSwX4v7G7ws4CGDtlvA2d3xg5q8Ls
MQoAV+9QtiZ9prQjtd8nukgmh/+okPmCcnuEVXhSOZHpPjqXXnyUCTPyKXVkltdF
1u4oUHiQY7Jydfn0wZgEV4nASDeV2gz5BwKoSAuKvYc5HGhXnXxvzyJyHJy3dL8R
afGGQ3XfQhA0hUKoMiQFUk7p7dvjdAiHxN1EcvxxJqWVsaE/Gpik1GOm+FRn7Oqs
UMvspgGrbI4=
=KPgY
-----END PGP SIGNATURE-----
Merge tag 'locking-core-2025-12-01' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar:
"Mutexes:
- Redo __mutex_init() to reduce generated code size (Sebastian
Andrzej Siewior)
Seqlocks:
- Introduce scoped_seqlock_read() (Peter Zijlstra)
- Change thread_group_cputime() to use scoped_seqlock_read() (Oleg
Nesterov)
- Change do_task_stat() to use scoped_seqlock_read() (Oleg Nesterov)
- Change do_io_accounting() to use scoped_seqlock_read() (Oleg
Nesterov)
- Fix the incorrect documentation of read_seqbegin_or_lock() /
need_seqretry() (Oleg Nesterov)
- Allow KASAN to fail optimizing (Peter Zijlstra)
Local lock updates:
- Fix all kernel-doc warnings (Randy Dunlap)
- Add the <linux/local_lock*.h> headers to MAINTAINERS (Sebastian
Andrzej Siewior)
- Reduce the risk of shadowing via s/l/__l/ and s/tl/__tl/ (Vincent
Mailhol)
Lock debugging:
- spinlock/debug: Fix data-race in do_raw_write_lock (Alexander
Sverdlin)
Atomic primitives infrastructure:
- atomic: Skip alignment check for try_cmpxchg() old arg (Arnd
Bergmann)
Rust runtime integration:
- sync: atomic: Enable generated Atomic<T> usage (Boqun Feng)
- sync: atomic: Implement Debug for Atomic<Debug> (Boqun Feng)
- debugfs: Remove Rust native atomics and replace them with Linux
versions (Boqun Feng)
- debugfs: Implement Reader for Mutex<T> only when T is Unpin (Boqun
Feng)
- lock: guard: Add T: Unpin bound to DerefMut (Daniel Almeida)
- lock: Pin the inner data (Daniel Almeida)
- lock: Add a Pin<&mut T> accessor (Daniel Almeida)"
* tag 'locking-core-2025-12-01' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
locking/local_lock: Fix all kernel-doc warnings
locking/local_lock: s/l/__l/ and s/tl/__tl/ to reduce the risk of shadowing
locking/local_lock: Add the <linux/local_lock*.h> headers to MAINTAINERS
locking/mutex: Redo __mutex_init() to reduce generated code size
rust: debugfs: Replace the usage of Rust native atomics
rust: sync: atomic: Implement Debug for Atomic<Debug>
rust: sync: atomic: Make Atomic*Ops pub(crate)
seqlock: Allow KASAN to fail optimizing
rust: debugfs: Implement Reader for Mutex<T> only when T is Unpin
seqlock: Change do_io_accounting() to use scoped_seqlock_read()
seqlock: Change do_task_stat() to use scoped_seqlock_read()
seqlock: Change thread_group_cputime() to use scoped_seqlock_read()
seqlock: Introduce scoped_seqlock_read()
documentation: seqlock: fix the wrong documentation of read_seqbegin_or_lock/need_seqretry
atomic: Skip alignment check for try_cmpxchg() old arg
rust: lock: Add a Pin<&mut T> accessor
rust: lock: Pin the inner data
rust: lock: guard: Add T: Unpin bound to DerefMut
locking/spinlock/debug: Fix data-race in do_raw_write_lock
This commit is contained in:
commit
b53440f8e5
|
|
@ -220,13 +220,14 @@ Read path, three categories:
|
|||
according to a passed marker. This is used to avoid lockless readers
|
||||
starvation (too much retry loops) in case of a sharp spike in write
|
||||
activity. First, a lockless read is tried (even marker passed). If
|
||||
that trial fails (odd sequence counter is returned, which is used as
|
||||
the next iteration marker), the lockless read is transformed to a
|
||||
full locking read and no retry loop is necessary::
|
||||
that trial fails (sequence counter doesn't match), make the marker
|
||||
odd for the next iteration, the lockless read is transformed to a
|
||||
full locking read and no retry loop is necessary, for example::
|
||||
|
||||
/* marker; even initialization */
|
||||
int seq = 0;
|
||||
int seq = 1;
|
||||
do {
|
||||
seq++; /* 2 on the 1st/lockless path, otherwise odd */
|
||||
read_seqbegin_or_lock(&foo_seqlock, &seq);
|
||||
|
||||
/* ... [[read-side critical section]] ... */
|
||||
|
|
|
|||
|
|
@ -14536,6 +14536,7 @@ S: Maintained
|
|||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core
|
||||
F: Documentation/locking/
|
||||
F: arch/*/include/asm/spinlock*.h
|
||||
F: include/linux/local_lock*.h
|
||||
F: include/linux/lockdep*.h
|
||||
F: include/linux/mutex*.h
|
||||
F: include/linux/rwlock*.h
|
||||
|
|
|
|||
|
|
@ -481,7 +481,6 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
|
|||
unsigned long flags;
|
||||
int exit_code = task->exit_code;
|
||||
struct signal_struct *sig = task->signal;
|
||||
unsigned int seq = 1;
|
||||
|
||||
state = *get_task_state(task);
|
||||
vsize = eip = esp = 0;
|
||||
|
|
@ -538,10 +537,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
|
|||
if (permitted && (!whole || num_threads < 2))
|
||||
wchan = !task_is_running(task);
|
||||
|
||||
do {
|
||||
seq++; /* 2 on the 1st/lockless path, otherwise odd */
|
||||
flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq);
|
||||
|
||||
scoped_seqlock_read (&sig->stats_lock, ss_lock_irqsave) {
|
||||
cmin_flt = sig->cmin_flt;
|
||||
cmaj_flt = sig->cmaj_flt;
|
||||
cutime = sig->cutime;
|
||||
|
|
@ -563,8 +559,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
|
|||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
} while (need_seqretry(&sig->stats_lock, seq));
|
||||
done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
|
||||
}
|
||||
|
||||
if (whole) {
|
||||
thread_group_cputime_adjusted(task, &utime, &stime);
|
||||
|
|
|
|||
|
|
@ -3043,21 +3043,14 @@ static int do_io_accounting(struct task_struct *task, struct seq_file *m, int wh
|
|||
if (whole) {
|
||||
struct signal_struct *sig = task->signal;
|
||||
struct task_struct *t;
|
||||
unsigned int seq = 1;
|
||||
unsigned long flags;
|
||||
|
||||
rcu_read_lock();
|
||||
do {
|
||||
seq++; /* 2 on the 1st/lockless path, otherwise odd */
|
||||
flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq);
|
||||
|
||||
guard(rcu)();
|
||||
scoped_seqlock_read (&sig->stats_lock, ss_lock_irqsave) {
|
||||
acct = sig->ioac;
|
||||
__for_each_thread(sig, t)
|
||||
task_io_accounting_add(&acct, &t->ioac);
|
||||
|
||||
} while (need_seqretry(&sig->stats_lock, seq));
|
||||
done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
} else {
|
||||
acct = task->ioac;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1276,7 +1276,7 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new)
|
|||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
instrument_atomic_read_write(old, sizeof(*old));
|
||||
instrument_read_write(old, sizeof(*old));
|
||||
return raw_atomic_try_cmpxchg(v, old, new);
|
||||
}
|
||||
|
||||
|
|
@ -1298,7 +1298,7 @@ static __always_inline bool
|
|||
atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
|
||||
{
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
instrument_atomic_read_write(old, sizeof(*old));
|
||||
instrument_read_write(old, sizeof(*old));
|
||||
return raw_atomic_try_cmpxchg_acquire(v, old, new);
|
||||
}
|
||||
|
||||
|
|
@ -1321,7 +1321,7 @@ atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
|
|||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
instrument_atomic_read_write(old, sizeof(*old));
|
||||
instrument_read_write(old, sizeof(*old));
|
||||
return raw_atomic_try_cmpxchg_release(v, old, new);
|
||||
}
|
||||
|
||||
|
|
@ -1343,7 +1343,7 @@ static __always_inline bool
|
|||
atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
|
||||
{
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
instrument_atomic_read_write(old, sizeof(*old));
|
||||
instrument_read_write(old, sizeof(*old));
|
||||
return raw_atomic_try_cmpxchg_relaxed(v, old, new);
|
||||
}
|
||||
|
||||
|
|
@ -2854,7 +2854,7 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
|
|||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
instrument_atomic_read_write(old, sizeof(*old));
|
||||
instrument_read_write(old, sizeof(*old));
|
||||
return raw_atomic64_try_cmpxchg(v, old, new);
|
||||
}
|
||||
|
||||
|
|
@ -2876,7 +2876,7 @@ static __always_inline bool
|
|||
atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
|
||||
{
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
instrument_atomic_read_write(old, sizeof(*old));
|
||||
instrument_read_write(old, sizeof(*old));
|
||||
return raw_atomic64_try_cmpxchg_acquire(v, old, new);
|
||||
}
|
||||
|
||||
|
|
@ -2899,7 +2899,7 @@ atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
|
|||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
instrument_atomic_read_write(old, sizeof(*old));
|
||||
instrument_read_write(old, sizeof(*old));
|
||||
return raw_atomic64_try_cmpxchg_release(v, old, new);
|
||||
}
|
||||
|
||||
|
|
@ -2921,7 +2921,7 @@ static __always_inline bool
|
|||
atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
|
||||
{
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
instrument_atomic_read_write(old, sizeof(*old));
|
||||
instrument_read_write(old, sizeof(*old));
|
||||
return raw_atomic64_try_cmpxchg_relaxed(v, old, new);
|
||||
}
|
||||
|
||||
|
|
@ -4432,7 +4432,7 @@ atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
|
|||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
instrument_atomic_read_write(old, sizeof(*old));
|
||||
instrument_read_write(old, sizeof(*old));
|
||||
return raw_atomic_long_try_cmpxchg(v, old, new);
|
||||
}
|
||||
|
||||
|
|
@ -4454,7 +4454,7 @@ static __always_inline bool
|
|||
atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
|
||||
{
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
instrument_atomic_read_write(old, sizeof(*old));
|
||||
instrument_read_write(old, sizeof(*old));
|
||||
return raw_atomic_long_try_cmpxchg_acquire(v, old, new);
|
||||
}
|
||||
|
||||
|
|
@ -4477,7 +4477,7 @@ atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
|
|||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
instrument_atomic_read_write(old, sizeof(*old));
|
||||
instrument_read_write(old, sizeof(*old));
|
||||
return raw_atomic_long_try_cmpxchg_release(v, old, new);
|
||||
}
|
||||
|
||||
|
|
@ -4499,7 +4499,7 @@ static __always_inline bool
|
|||
atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
|
||||
{
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
instrument_atomic_read_write(old, sizeof(*old));
|
||||
instrument_read_write(old, sizeof(*old));
|
||||
return raw_atomic_long_try_cmpxchg_relaxed(v, old, new);
|
||||
}
|
||||
|
||||
|
|
@ -5050,4 +5050,4 @@ atomic_long_dec_if_positive(atomic_long_t *v)
|
|||
|
||||
|
||||
#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
|
||||
// 8829b337928e9508259079d32581775ececd415b
|
||||
// f618ac667f868941a84ce0ab2242f1786e049ed4
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@
|
|||
|
||||
/**
|
||||
* local_lock_init - Runtime initialize a lock instance
|
||||
* @lock: The lock variable
|
||||
*/
|
||||
#define local_lock_init(lock) __local_lock_init(lock)
|
||||
|
||||
|
|
@ -52,7 +53,8 @@
|
|||
__local_unlock_irqrestore(this_cpu_ptr(lock), flags)
|
||||
|
||||
/**
|
||||
* local_lock_init - Runtime initialize a lock instance
|
||||
* local_trylock_init - Runtime initialize a lock instance
|
||||
* @lock: The lock variable
|
||||
*/
|
||||
#define local_trylock_init(lock) __local_trylock_init(lock)
|
||||
|
||||
|
|
|
|||
|
|
@ -99,18 +99,18 @@ do { \
|
|||
|
||||
#define __local_lock_acquire(lock) \
|
||||
do { \
|
||||
local_trylock_t *tl; \
|
||||
local_lock_t *l; \
|
||||
local_trylock_t *__tl; \
|
||||
local_lock_t *__l; \
|
||||
\
|
||||
l = (local_lock_t *)(lock); \
|
||||
tl = (local_trylock_t *)l; \
|
||||
__l = (local_lock_t *)(lock); \
|
||||
__tl = (local_trylock_t *)__l; \
|
||||
_Generic((lock), \
|
||||
local_trylock_t *: ({ \
|
||||
lockdep_assert(tl->acquired == 0); \
|
||||
WRITE_ONCE(tl->acquired, 1); \
|
||||
lockdep_assert(__tl->acquired == 0); \
|
||||
WRITE_ONCE(__tl->acquired, 1); \
|
||||
}), \
|
||||
local_lock_t *: (void)0); \
|
||||
local_lock_acquire(l); \
|
||||
local_lock_acquire(__l); \
|
||||
} while (0)
|
||||
|
||||
#define __local_lock(lock) \
|
||||
|
|
@ -133,36 +133,36 @@ do { \
|
|||
|
||||
#define __local_trylock(lock) \
|
||||
({ \
|
||||
local_trylock_t *tl; \
|
||||
local_trylock_t *__tl; \
|
||||
\
|
||||
preempt_disable(); \
|
||||
tl = (lock); \
|
||||
if (READ_ONCE(tl->acquired)) { \
|
||||
__tl = (lock); \
|
||||
if (READ_ONCE(__tl->acquired)) { \
|
||||
preempt_enable(); \
|
||||
tl = NULL; \
|
||||
__tl = NULL; \
|
||||
} else { \
|
||||
WRITE_ONCE(tl->acquired, 1); \
|
||||
WRITE_ONCE(__tl->acquired, 1); \
|
||||
local_trylock_acquire( \
|
||||
(local_lock_t *)tl); \
|
||||
(local_lock_t *)__tl); \
|
||||
} \
|
||||
!!tl; \
|
||||
!!__tl; \
|
||||
})
|
||||
|
||||
#define __local_trylock_irqsave(lock, flags) \
|
||||
({ \
|
||||
local_trylock_t *tl; \
|
||||
local_trylock_t *__tl; \
|
||||
\
|
||||
local_irq_save(flags); \
|
||||
tl = (lock); \
|
||||
if (READ_ONCE(tl->acquired)) { \
|
||||
__tl = (lock); \
|
||||
if (READ_ONCE(__tl->acquired)) { \
|
||||
local_irq_restore(flags); \
|
||||
tl = NULL; \
|
||||
__tl = NULL; \
|
||||
} else { \
|
||||
WRITE_ONCE(tl->acquired, 1); \
|
||||
WRITE_ONCE(__tl->acquired, 1); \
|
||||
local_trylock_acquire( \
|
||||
(local_lock_t *)tl); \
|
||||
(local_lock_t *)__tl); \
|
||||
} \
|
||||
!!tl; \
|
||||
!!__tl; \
|
||||
})
|
||||
|
||||
/* preemption or migration must be disabled before calling __local_lock_is_locked */
|
||||
|
|
@ -170,16 +170,16 @@ do { \
|
|||
|
||||
#define __local_lock_release(lock) \
|
||||
do { \
|
||||
local_trylock_t *tl; \
|
||||
local_lock_t *l; \
|
||||
local_trylock_t *__tl; \
|
||||
local_lock_t *__l; \
|
||||
\
|
||||
l = (local_lock_t *)(lock); \
|
||||
tl = (local_trylock_t *)l; \
|
||||
local_lock_release(l); \
|
||||
__l = (local_lock_t *)(lock); \
|
||||
__tl = (local_trylock_t *)__l; \
|
||||
local_lock_release(__l); \
|
||||
_Generic((lock), \
|
||||
local_trylock_t *: ({ \
|
||||
lockdep_assert(tl->acquired == 1); \
|
||||
WRITE_ONCE(tl->acquired, 0); \
|
||||
lockdep_assert(__tl->acquired == 1); \
|
||||
WRITE_ONCE(__tl->acquired, 0); \
|
||||
}), \
|
||||
local_lock_t *: (void)0); \
|
||||
} while (0)
|
||||
|
|
@ -223,12 +223,12 @@ typedef spinlock_t local_trylock_t;
|
|||
#define INIT_LOCAL_LOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
|
||||
#define INIT_LOCAL_TRYLOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
|
||||
|
||||
#define __local_lock_init(l) \
|
||||
#define __local_lock_init(__l) \
|
||||
do { \
|
||||
local_spin_lock_init((l)); \
|
||||
local_spin_lock_init((__l)); \
|
||||
} while (0)
|
||||
|
||||
#define __local_trylock_init(l) __local_lock_init(l)
|
||||
#define __local_trylock_init(__l) __local_lock_init(__l)
|
||||
|
||||
#define __local_lock(__lock) \
|
||||
do { \
|
||||
|
|
|
|||
|
|
@ -86,8 +86,23 @@ do { \
|
|||
#define DEFINE_MUTEX(mutexname) \
|
||||
struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
|
||||
|
||||
extern void __mutex_init(struct mutex *lock, const char *name,
|
||||
struct lock_class_key *key);
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
void mutex_init_lockep(struct mutex *lock, const char *name, struct lock_class_key *key);
|
||||
|
||||
static inline void __mutex_init(struct mutex *lock, const char *name,
|
||||
struct lock_class_key *key)
|
||||
{
|
||||
mutex_init_lockep(lock, name, key);
|
||||
}
|
||||
#else
|
||||
extern void mutex_init_generic(struct mutex *lock);
|
||||
|
||||
static inline void __mutex_init(struct mutex *lock, const char *name,
|
||||
struct lock_class_key *key)
|
||||
{
|
||||
mutex_init_generic(lock);
|
||||
}
|
||||
#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
|
||||
|
||||
/**
|
||||
* mutex_is_locked - is the mutex locked
|
||||
|
|
@ -111,17 +126,27 @@ extern bool mutex_is_locked(struct mutex *lock);
|
|||
#define DEFINE_MUTEX(mutexname) \
|
||||
struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
|
||||
|
||||
extern void __mutex_rt_init(struct mutex *lock, const char *name,
|
||||
struct lock_class_key *key);
|
||||
|
||||
#define mutex_is_locked(l) rt_mutex_base_is_locked(&(l)->rtmutex)
|
||||
|
||||
#define __mutex_init(mutex, name, key) \
|
||||
do { \
|
||||
rt_mutex_base_init(&(mutex)->rtmutex); \
|
||||
__mutex_rt_init((mutex), name, key); \
|
||||
} while (0)
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
extern void mutex_rt_init_lockdep(struct mutex *mutex, const char *name,
|
||||
struct lock_class_key *key);
|
||||
|
||||
static inline void __mutex_init(struct mutex *lock, const char *name,
|
||||
struct lock_class_key *key)
|
||||
{
|
||||
mutex_rt_init_lockdep(lock, name, key);
|
||||
}
|
||||
|
||||
#else
|
||||
extern void mutex_rt_init_generic(struct mutex *mutex);
|
||||
|
||||
static inline void __mutex_init(struct mutex *lock, const char *name,
|
||||
struct lock_class_key *key)
|
||||
{
|
||||
mutex_rt_init_generic(lock);
|
||||
}
|
||||
#endif /* !CONFIG_LOCKDEP */
|
||||
#endif /* CONFIG_PREEMPT_RT */
|
||||
|
||||
#ifdef CONFIG_DEBUG_MUTEXES
|
||||
|
|
|
|||
|
|
@ -1209,4 +1209,118 @@ done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
|
|||
if (seq & 1)
|
||||
read_sequnlock_excl_irqrestore(lock, flags);
|
||||
}
|
||||
|
||||
enum ss_state {
|
||||
ss_done = 0,
|
||||
ss_lock,
|
||||
ss_lock_irqsave,
|
||||
ss_lockless,
|
||||
};
|
||||
|
||||
struct ss_tmp {
|
||||
enum ss_state state;
|
||||
unsigned long data;
|
||||
spinlock_t *lock;
|
||||
spinlock_t *lock_irqsave;
|
||||
};
|
||||
|
||||
static inline void __scoped_seqlock_cleanup(struct ss_tmp *sst)
|
||||
{
|
||||
if (sst->lock)
|
||||
spin_unlock(sst->lock);
|
||||
if (sst->lock_irqsave)
|
||||
spin_unlock_irqrestore(sst->lock_irqsave, sst->data);
|
||||
}
|
||||
|
||||
extern void __scoped_seqlock_invalid_target(void);
|
||||
|
||||
#if (defined(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION < 90000) || defined(CONFIG_KASAN)
|
||||
/*
|
||||
* For some reason some GCC-8 architectures (nios2, alpha) have trouble
|
||||
* determining that the ss_done state is impossible in __scoped_seqlock_next()
|
||||
* below.
|
||||
*
|
||||
* Similarly KASAN is known to confuse compilers enough to break this. But we
|
||||
* don't care about code quality for KASAN builds anyway.
|
||||
*/
|
||||
static inline void __scoped_seqlock_bug(void) { }
|
||||
#else
|
||||
/*
|
||||
* Canary for compiler optimization -- if the compiler doesn't realize this is
|
||||
* an impossible state, it very likely generates sub-optimal code here.
|
||||
*/
|
||||
extern void __scoped_seqlock_bug(void);
|
||||
#endif
|
||||
|
||||
static inline void
|
||||
__scoped_seqlock_next(struct ss_tmp *sst, seqlock_t *lock, enum ss_state target)
|
||||
{
|
||||
switch (sst->state) {
|
||||
case ss_done:
|
||||
__scoped_seqlock_bug();
|
||||
return;
|
||||
|
||||
case ss_lock:
|
||||
case ss_lock_irqsave:
|
||||
sst->state = ss_done;
|
||||
return;
|
||||
|
||||
case ss_lockless:
|
||||
if (!read_seqretry(lock, sst->data)) {
|
||||
sst->state = ss_done;
|
||||
return;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
switch (target) {
|
||||
case ss_done:
|
||||
__scoped_seqlock_invalid_target();
|
||||
return;
|
||||
|
||||
case ss_lock:
|
||||
sst->lock = &lock->lock;
|
||||
spin_lock(sst->lock);
|
||||
sst->state = ss_lock;
|
||||
return;
|
||||
|
||||
case ss_lock_irqsave:
|
||||
sst->lock_irqsave = &lock->lock;
|
||||
spin_lock_irqsave(sst->lock_irqsave, sst->data);
|
||||
sst->state = ss_lock_irqsave;
|
||||
return;
|
||||
|
||||
case ss_lockless:
|
||||
sst->data = read_seqbegin(lock);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
#define __scoped_seqlock_read(_seqlock, _target, _s) \
|
||||
for (struct ss_tmp _s __cleanup(__scoped_seqlock_cleanup) = \
|
||||
{ .state = ss_lockless, .data = read_seqbegin(_seqlock) }; \
|
||||
_s.state != ss_done; \
|
||||
__scoped_seqlock_next(&_s, _seqlock, _target))
|
||||
|
||||
/**
|
||||
* scoped_seqlock_read (lock, ss_state) - execute the read side critical
|
||||
* section without manual sequence
|
||||
* counter handling or calls to other
|
||||
* helpers
|
||||
* @lock: pointer to seqlock_t protecting the data
|
||||
* @ss_state: one of {ss_lock, ss_lock_irqsave, ss_lockless} indicating
|
||||
* the type of critical read section
|
||||
*
|
||||
* Example:
|
||||
*
|
||||
* scoped_seqlock_read (&lock, ss_lock) {
|
||||
* // read-side critical section
|
||||
* }
|
||||
*
|
||||
* Starts with a lockess pass first. If it fails, restarts the critical
|
||||
* section with the lock held.
|
||||
*/
|
||||
#define scoped_seqlock_read(_seqlock, _target) \
|
||||
__scoped_seqlock_read(_seqlock, _target, __UNIQUE_ID(seqlock))
|
||||
|
||||
#endif /* __LINUX_SEQLOCK_H */
|
||||
|
|
|
|||
|
|
@ -78,16 +78,8 @@ void debug_mutex_unlock(struct mutex *lock)
|
|||
}
|
||||
}
|
||||
|
||||
void debug_mutex_init(struct mutex *lock, const char *name,
|
||||
struct lock_class_key *key)
|
||||
void debug_mutex_init(struct mutex *lock)
|
||||
{
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
/*
|
||||
* Make sure we are not reinitializing a held lock:
|
||||
*/
|
||||
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
|
||||
lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_SLEEP);
|
||||
#endif
|
||||
lock->magic = lock;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -43,8 +43,7 @@
|
|||
# define MUTEX_WARN_ON(cond)
|
||||
#endif
|
||||
|
||||
void
|
||||
__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
|
||||
static void __mutex_init_generic(struct mutex *lock)
|
||||
{
|
||||
atomic_long_set(&lock->owner, 0);
|
||||
raw_spin_lock_init(&lock->wait_lock);
|
||||
|
|
@ -52,10 +51,8 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
|
|||
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
|
||||
osq_lock_init(&lock->osq);
|
||||
#endif
|
||||
|
||||
debug_mutex_init(lock, name, key);
|
||||
debug_mutex_init(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(__mutex_init);
|
||||
|
||||
static inline struct task_struct *__owner_task(unsigned long owner)
|
||||
{
|
||||
|
|
@ -142,6 +139,11 @@ static inline bool __mutex_trylock(struct mutex *lock)
|
|||
* There is nothing that would stop spreading the lockdep annotations outwards
|
||||
* except more code.
|
||||
*/
|
||||
void mutex_init_generic(struct mutex *lock)
|
||||
{
|
||||
__mutex_init_generic(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(mutex_init_generic);
|
||||
|
||||
/*
|
||||
* Optimistic trylock that only works in the uncontended case. Make sure to
|
||||
|
|
@ -166,7 +168,21 @@ static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
|
|||
|
||||
return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL);
|
||||
}
|
||||
#endif
|
||||
|
||||
#else /* !CONFIG_DEBUG_LOCK_ALLOC */
|
||||
|
||||
void mutex_init_lockep(struct mutex *lock, const char *name, struct lock_class_key *key)
|
||||
{
|
||||
__mutex_init_generic(lock);
|
||||
|
||||
/*
|
||||
* Make sure we are not reinitializing a held lock:
|
||||
*/
|
||||
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
|
||||
lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_SLEEP);
|
||||
}
|
||||
EXPORT_SYMBOL(mutex_init_lockep);
|
||||
#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
|
||||
|
||||
static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -59,8 +59,7 @@ extern void debug_mutex_add_waiter(struct mutex *lock,
|
|||
extern void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
|
||||
struct task_struct *task);
|
||||
extern void debug_mutex_unlock(struct mutex *lock);
|
||||
extern void debug_mutex_init(struct mutex *lock, const char *name,
|
||||
struct lock_class_key *key);
|
||||
extern void debug_mutex_init(struct mutex *lock);
|
||||
#else /* CONFIG_DEBUG_MUTEXES */
|
||||
# define debug_mutex_lock_common(lock, waiter) do { } while (0)
|
||||
# define debug_mutex_wake_waiter(lock, waiter) do { } while (0)
|
||||
|
|
@ -68,6 +67,6 @@ extern void debug_mutex_init(struct mutex *lock, const char *name,
|
|||
# define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0)
|
||||
# define debug_mutex_remove_waiter(lock, waiter, ti) do { } while (0)
|
||||
# define debug_mutex_unlock(lock) do { } while (0)
|
||||
# define debug_mutex_init(lock, name, key) do { } while (0)
|
||||
# define debug_mutex_init(lock) do { } while (0)
|
||||
#endif /* !CONFIG_DEBUG_MUTEXES */
|
||||
#endif /* CONFIG_PREEMPT_RT */
|
||||
|
|
|
|||
|
|
@ -515,13 +515,11 @@ void rt_mutex_debug_task_free(struct task_struct *task)
|
|||
|
||||
#ifdef CONFIG_PREEMPT_RT
|
||||
/* Mutexes */
|
||||
void __mutex_rt_init(struct mutex *mutex, const char *name,
|
||||
struct lock_class_key *key)
|
||||
static void __mutex_rt_init_generic(struct mutex *mutex)
|
||||
{
|
||||
rt_mutex_base_init(&mutex->rtmutex);
|
||||
debug_check_no_locks_freed((void *)mutex, sizeof(*mutex));
|
||||
lockdep_init_map_wait(&mutex->dep_map, name, key, 0, LD_WAIT_SLEEP);
|
||||
}
|
||||
EXPORT_SYMBOL(__mutex_rt_init);
|
||||
|
||||
static __always_inline int __mutex_lock_common(struct mutex *lock,
|
||||
unsigned int state,
|
||||
|
|
@ -542,6 +540,13 @@ static __always_inline int __mutex_lock_common(struct mutex *lock,
|
|||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
void mutex_rt_init_lockdep(struct mutex *mutex, const char *name, struct lock_class_key *key)
|
||||
{
|
||||
__mutex_rt_init_generic(mutex);
|
||||
lockdep_init_map_wait(&mutex->dep_map, name, key, 0, LD_WAIT_SLEEP);
|
||||
}
|
||||
EXPORT_SYMBOL(mutex_rt_init_lockdep);
|
||||
|
||||
void __sched mutex_lock_nested(struct mutex *lock, unsigned int subclass)
|
||||
{
|
||||
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
|
||||
|
|
@ -598,6 +603,12 @@ int __sched _mutex_trylock_nest_lock(struct mutex *lock,
|
|||
EXPORT_SYMBOL_GPL(_mutex_trylock_nest_lock);
|
||||
#else /* CONFIG_DEBUG_LOCK_ALLOC */
|
||||
|
||||
void mutex_rt_init_generic(struct mutex *mutex)
|
||||
{
|
||||
__mutex_rt_init_generic(mutex);
|
||||
}
|
||||
EXPORT_SYMBOL(mutex_rt_init_generic);
|
||||
|
||||
void __sched mutex_lock(struct mutex *lock)
|
||||
{
|
||||
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
|
||||
|
|
|
|||
|
|
@ -184,8 +184,8 @@ void do_raw_read_unlock(rwlock_t *lock)
|
|||
static inline void debug_write_lock_before(rwlock_t *lock)
|
||||
{
|
||||
RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
|
||||
RWLOCK_BUG_ON(lock->owner == current, lock, "recursion");
|
||||
RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
|
||||
RWLOCK_BUG_ON(READ_ONCE(lock->owner) == current, lock, "recursion");
|
||||
RWLOCK_BUG_ON(READ_ONCE(lock->owner_cpu) == raw_smp_processor_id(),
|
||||
lock, "cpu recursion");
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -313,10 +313,8 @@ static u64 read_sum_exec_runtime(struct task_struct *t)
|
|||
void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
|
||||
{
|
||||
struct signal_struct *sig = tsk->signal;
|
||||
u64 utime, stime;
|
||||
struct task_struct *t;
|
||||
unsigned int seq, nextseq;
|
||||
unsigned long flags;
|
||||
u64 utime, stime;
|
||||
|
||||
/*
|
||||
* Update current task runtime to account pending time since last
|
||||
|
|
@ -329,27 +327,19 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
|
|||
if (same_thread_group(current, tsk))
|
||||
(void) task_sched_runtime(current);
|
||||
|
||||
rcu_read_lock();
|
||||
/* Attempt a lockless read on the first round. */
|
||||
nextseq = 0;
|
||||
do {
|
||||
seq = nextseq;
|
||||
flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq);
|
||||
guard(rcu)();
|
||||
scoped_seqlock_read (&sig->stats_lock, ss_lock_irqsave) {
|
||||
times->utime = sig->utime;
|
||||
times->stime = sig->stime;
|
||||
times->sum_exec_runtime = sig->sum_sched_runtime;
|
||||
|
||||
for_each_thread(tsk, t) {
|
||||
__for_each_thread(sig, t) {
|
||||
task_cputime(t, &utime, &stime);
|
||||
times->utime += utime;
|
||||
times->stime += stime;
|
||||
times->sum_exec_runtime += read_sum_exec_runtime(t);
|
||||
}
|
||||
/* If lockless access failed, take the lock. */
|
||||
nextseq = 1;
|
||||
} while (need_seqretry(&sig->stats_lock, seq));
|
||||
done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
|
||||
|
|
|
|||
|
|
@ -4,14 +4,11 @@
|
|||
//! Traits for rendering or updating values exported to DebugFS.
|
||||
|
||||
use crate::prelude::*;
|
||||
use crate::sync::atomic::{Atomic, AtomicBasicOps, AtomicType, Relaxed};
|
||||
use crate::sync::Mutex;
|
||||
use crate::uaccess::UserSliceReader;
|
||||
use core::fmt::{self, Debug, Formatter};
|
||||
use core::str::FromStr;
|
||||
use core::sync::atomic::{
|
||||
AtomicI16, AtomicI32, AtomicI64, AtomicI8, AtomicIsize, AtomicU16, AtomicU32, AtomicU64,
|
||||
AtomicU8, AtomicUsize, Ordering,
|
||||
};
|
||||
|
||||
/// A trait for types that can be written into a string.
|
||||
///
|
||||
|
|
@ -50,7 +47,7 @@ pub trait Reader {
|
|||
fn read_from_slice(&self, reader: &mut UserSliceReader) -> Result;
|
||||
}
|
||||
|
||||
impl<T: FromStr> Reader for Mutex<T> {
|
||||
impl<T: FromStr + Unpin> Reader for Mutex<T> {
|
||||
fn read_from_slice(&self, reader: &mut UserSliceReader) -> Result {
|
||||
let mut buf = [0u8; 128];
|
||||
if reader.len() > buf.len() {
|
||||
|
|
@ -66,37 +63,21 @@ fn read_from_slice(&self, reader: &mut UserSliceReader) -> Result {
|
|||
}
|
||||
}
|
||||
|
||||
macro_rules! impl_reader_for_atomic {
|
||||
($(($atomic_type:ty, $int_type:ty)),*) => {
|
||||
$(
|
||||
impl Reader for $atomic_type {
|
||||
fn read_from_slice(&self, reader: &mut UserSliceReader) -> Result {
|
||||
let mut buf = [0u8; 21]; // Enough for a 64-bit number.
|
||||
if reader.len() > buf.len() {
|
||||
return Err(EINVAL);
|
||||
}
|
||||
let n = reader.len();
|
||||
reader.read_slice(&mut buf[..n])?;
|
||||
impl<T: AtomicType + FromStr> Reader for Atomic<T>
|
||||
where
|
||||
T::Repr: AtomicBasicOps,
|
||||
{
|
||||
fn read_from_slice(&self, reader: &mut UserSliceReader) -> Result {
|
||||
let mut buf = [0u8; 21]; // Enough for a 64-bit number.
|
||||
if reader.len() > buf.len() {
|
||||
return Err(EINVAL);
|
||||
}
|
||||
let n = reader.len();
|
||||
reader.read_slice(&mut buf[..n])?;
|
||||
|
||||
let s = core::str::from_utf8(&buf[..n]).map_err(|_| EINVAL)?;
|
||||
let val = s.trim().parse::<$int_type>().map_err(|_| EINVAL)?;
|
||||
self.store(val, Ordering::Relaxed);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
)*
|
||||
};
|
||||
let s = core::str::from_utf8(&buf[..n]).map_err(|_| EINVAL)?;
|
||||
let val = s.trim().parse::<T>().map_err(|_| EINVAL)?;
|
||||
self.store(val, Relaxed);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl_reader_for_atomic!(
|
||||
(AtomicI16, i16),
|
||||
(AtomicI32, i32),
|
||||
(AtomicI64, i64),
|
||||
(AtomicI8, i8),
|
||||
(AtomicIsize, isize),
|
||||
(AtomicU16, u16),
|
||||
(AtomicU32, u32),
|
||||
(AtomicU64, u64),
|
||||
(AtomicU8, u8),
|
||||
(AtomicUsize, usize)
|
||||
);
|
||||
|
|
|
|||
|
|
@ -22,9 +22,10 @@
|
|||
|
||||
pub use internal::AtomicImpl;
|
||||
pub use ordering::{Acquire, Full, Relaxed, Release};
|
||||
pub(crate) use internal::{AtomicArithmeticOps, AtomicBasicOps, AtomicExchangeOps};
|
||||
|
||||
use crate::build_error;
|
||||
use internal::{AtomicArithmeticOps, AtomicBasicOps, AtomicExchangeOps, AtomicRepr};
|
||||
use internal::AtomicRepr;
|
||||
use ordering::OrderingType;
|
||||
|
||||
/// A memory location which can be safely modified from multiple execution contexts.
|
||||
|
|
@ -306,6 +307,15 @@ pub fn store<Ordering: ordering::ReleaseOrRelaxed>(&self, v: T, _: Ordering) {
|
|||
}
|
||||
}
|
||||
|
||||
impl<T: AtomicType + core::fmt::Debug> core::fmt::Debug for Atomic<T>
|
||||
where
|
||||
T::Repr: AtomicBasicOps,
|
||||
{
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
||||
core::fmt::Debug::fmt(&self.load(Relaxed), f)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: AtomicType> Atomic<T>
|
||||
where
|
||||
T::Repr: AtomicExchangeOps,
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@
|
|||
types::{NotThreadSafe, Opaque, ScopeGuard},
|
||||
};
|
||||
use core::{cell::UnsafeCell, marker::PhantomPinned, pin::Pin};
|
||||
use pin_init::{pin_data, pin_init, PinInit};
|
||||
use pin_init::{pin_data, pin_init, PinInit, Wrapper};
|
||||
|
||||
pub mod mutex;
|
||||
pub mod spinlock;
|
||||
|
|
@ -115,6 +115,7 @@ pub struct Lock<T: ?Sized, B: Backend> {
|
|||
_pin: PhantomPinned,
|
||||
|
||||
/// The data protected by the lock.
|
||||
#[pin]
|
||||
pub(crate) data: UnsafeCell<T>,
|
||||
}
|
||||
|
||||
|
|
@ -127,9 +128,13 @@ unsafe impl<T: ?Sized + Send, B: Backend> Sync for Lock<T, B> {}
|
|||
|
||||
impl<T, B: Backend> Lock<T, B> {
|
||||
/// Constructs a new lock initialiser.
|
||||
pub fn new(t: T, name: &'static CStr, key: Pin<&'static LockClassKey>) -> impl PinInit<Self> {
|
||||
pub fn new(
|
||||
t: impl PinInit<T>,
|
||||
name: &'static CStr,
|
||||
key: Pin<&'static LockClassKey>,
|
||||
) -> impl PinInit<Self> {
|
||||
pin_init!(Self {
|
||||
data: UnsafeCell::new(t),
|
||||
data <- UnsafeCell::pin_init(t),
|
||||
_pin: PhantomPinned,
|
||||
// SAFETY: `slot` is valid while the closure is called and both `name` and `key` have
|
||||
// static lifetimes so they live indefinitely.
|
||||
|
|
@ -240,6 +245,31 @@ pub(crate) fn do_unlocked<U>(&mut self, cb: impl FnOnce() -> U) -> U {
|
|||
|
||||
cb()
|
||||
}
|
||||
|
||||
/// Returns a pinned mutable reference to the protected data.
|
||||
///
|
||||
/// The guard implements [`DerefMut`] when `T: Unpin`, so for [`Unpin`]
|
||||
/// types [`DerefMut`] should be used instead of this function.
|
||||
///
|
||||
/// [`DerefMut`]: core::ops::DerefMut
|
||||
/// [`Unpin`]: core::marker::Unpin
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// # use kernel::sync::{Mutex, MutexGuard};
|
||||
/// # use core::{pin::Pin, marker::PhantomPinned};
|
||||
/// struct Data(PhantomPinned);
|
||||
///
|
||||
/// fn example(mutex: &Mutex<Data>) {
|
||||
/// let mut data: MutexGuard<'_, Data> = mutex.lock();
|
||||
/// let mut data: Pin<&mut Data> = data.as_mut();
|
||||
/// }
|
||||
/// ```
|
||||
pub fn as_mut(&mut self) -> Pin<&mut T> {
|
||||
// SAFETY: `self.lock.data` is structurally pinned.
|
||||
unsafe { Pin::new_unchecked(&mut *self.lock.data.get()) }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ?Sized, B: Backend> core::ops::Deref for Guard<'_, T, B> {
|
||||
|
|
@ -251,7 +281,10 @@ fn deref(&self) -> &Self::Target {
|
|||
}
|
||||
}
|
||||
|
||||
impl<T: ?Sized, B: Backend> core::ops::DerefMut for Guard<'_, T, B> {
|
||||
impl<T: ?Sized, B: Backend> core::ops::DerefMut for Guard<'_, T, B>
|
||||
where
|
||||
T: Unpin,
|
||||
{
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
// SAFETY: The caller owns the lock, so it is safe to deref the protected data.
|
||||
unsafe { &mut *self.lock.data.get() }
|
||||
|
|
|
|||
|
|
@ -106,7 +106,10 @@ fn deref(&self) -> &Self::Target {
|
|||
}
|
||||
}
|
||||
|
||||
impl<B: GlobalLockBackend> core::ops::DerefMut for GlobalGuard<B> {
|
||||
impl<B: GlobalLockBackend> core::ops::DerefMut for GlobalGuard<B>
|
||||
where
|
||||
B::Item: Unpin,
|
||||
{
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.inner
|
||||
}
|
||||
|
|
|
|||
|
|
@ -32,14 +32,12 @@
|
|||
//! ```
|
||||
|
||||
use core::str::FromStr;
|
||||
use core::sync::atomic::AtomicUsize;
|
||||
use core::sync::atomic::Ordering;
|
||||
use kernel::c_str;
|
||||
use kernel::debugfs::{Dir, File};
|
||||
use kernel::new_mutex;
|
||||
use kernel::prelude::*;
|
||||
use kernel::sync::atomic::{Atomic, Relaxed};
|
||||
use kernel::sync::Mutex;
|
||||
|
||||
use kernel::{acpi, device::Core, of, platform, str::CString, types::ARef};
|
||||
|
||||
kernel::module_platform_driver! {
|
||||
|
|
@ -59,7 +57,7 @@ struct RustDebugFs {
|
|||
#[pin]
|
||||
_compatible: File<CString>,
|
||||
#[pin]
|
||||
counter: File<AtomicUsize>,
|
||||
counter: File<Atomic<usize>>,
|
||||
#[pin]
|
||||
inner: File<Mutex<Inner>>,
|
||||
}
|
||||
|
|
@ -109,7 +107,7 @@ fn probe(
|
|||
) -> Result<Pin<KBox<Self>>> {
|
||||
let result = KBox::try_pin_init(RustDebugFs::new(pdev), GFP_KERNEL)?;
|
||||
// We can still mutate fields through the files which are atomic or mutexed:
|
||||
result.counter.store(91, Ordering::Relaxed);
|
||||
result.counter.store(91, Relaxed);
|
||||
{
|
||||
let mut guard = result.inner.lock();
|
||||
guard.x = guard.y;
|
||||
|
|
@ -120,8 +118,8 @@ fn probe(
|
|||
}
|
||||
|
||||
impl RustDebugFs {
|
||||
fn build_counter(dir: &Dir) -> impl PinInit<File<AtomicUsize>> + '_ {
|
||||
dir.read_write_file(c_str!("counter"), AtomicUsize::new(0))
|
||||
fn build_counter(dir: &Dir) -> impl PinInit<File<Atomic<usize>>> + '_ {
|
||||
dir.read_write_file(c_str!("counter"), Atomic::<usize>::new(0))
|
||||
}
|
||||
|
||||
fn build_inner(dir: &Dir) -> impl PinInit<File<Mutex<Inner>>> + '_ {
|
||||
|
|
|
|||
|
|
@ -6,9 +6,9 @@
|
|||
//! `Scope::dir` to create a variety of files without the need to separately
|
||||
//! track them all.
|
||||
|
||||
use core::sync::atomic::AtomicUsize;
|
||||
use kernel::debugfs::{Dir, Scope};
|
||||
use kernel::prelude::*;
|
||||
use kernel::sync::atomic::Atomic;
|
||||
use kernel::sync::Mutex;
|
||||
use kernel::{c_str, new_mutex, str::CString};
|
||||
|
||||
|
|
@ -62,7 +62,7 @@ fn create_file_write(
|
|||
let file_name = CString::try_from_fmt(fmt!("{name_str}"))?;
|
||||
for sub in items {
|
||||
nums.push(
|
||||
AtomicUsize::new(sub.parse().map_err(|_| EINVAL)?),
|
||||
Atomic::<usize>::new(sub.parse().map_err(|_| EINVAL)?),
|
||||
GFP_KERNEL,
|
||||
)?;
|
||||
}
|
||||
|
|
@ -109,7 +109,7 @@ fn init(device_dir: Dir) -> impl PinInit<Self> {
|
|||
|
||||
struct DeviceData {
|
||||
name: CString,
|
||||
nums: KVec<AtomicUsize>,
|
||||
nums: KVec<Atomic<usize>>,
|
||||
}
|
||||
|
||||
fn init_control(base_dir: &Dir, dyn_dirs: Dir) -> impl PinInit<Scope<ModuleData>> + '_ {
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ gen_param_check()
|
|||
local arg="$1"; shift
|
||||
local type="${arg%%:*}"
|
||||
local name="$(gen_param_name "${arg}")"
|
||||
local rw="write"
|
||||
local rw="atomic_write"
|
||||
|
||||
case "${type#c}" in
|
||||
i) return;;
|
||||
|
|
@ -20,14 +20,17 @@ gen_param_check()
|
|||
|
||||
if [ ${type#c} != ${type} ]; then
|
||||
# We don't write to constant parameters.
|
||||
rw="read"
|
||||
rw="atomic_read"
|
||||
elif [ "${type}" = "p" ] ; then
|
||||
# The "old" argument in try_cmpxchg() gets accessed non-atomically
|
||||
rw="read_write"
|
||||
elif [ "${meta}" != "s" ]; then
|
||||
# An atomic RMW: if this parameter is not a constant, and this atomic is
|
||||
# not just a 's'tore, this parameter is both read from and written to.
|
||||
rw="read_write"
|
||||
rw="atomic_read_write"
|
||||
fi
|
||||
|
||||
printf "\tinstrument_atomic_${rw}(${name}, sizeof(*${name}));\n"
|
||||
printf "\tinstrument_${rw}(${name}, sizeof(*${name}));\n"
|
||||
}
|
||||
|
||||
#gen_params_checks(meta, arg...)
|
||||
|
|
|
|||
Loading…
Reference in New Issue