mirror of https://github.com/torvalds/linux.git
seqlock: Introduce scoped_seqlock_read()
The read_seqbegin/need_seqretry/done_seqretry API is cumbersome and
error prone. With the new helper the "typical" code like
int seq, nextseq;
unsigned long flags;
nextseq = 0;
do {
seq = nextseq;
flags = read_seqbegin_or_lock_irqsave(&seqlock, &seq);
// read-side critical section
nextseq = 1;
} while (need_seqretry(&seqlock, seq));
done_seqretry_irqrestore(&seqlock, seq, flags);
can be rewritten as
scoped_seqlock_read (&seqlock, ss_lock_irqsave) {
// read-side critical section
}
Original idea by Oleg Nesterov; with contributions from Linus.
Originally-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
This commit is contained in:
parent
28a0ee3119
commit
cc39f3872c
|
|
@ -1209,4 +1209,115 @@ done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
|
|||
if (seq & 1)
|
||||
read_sequnlock_excl_irqrestore(lock, flags);
|
||||
}
|
||||
|
||||
enum ss_state {
|
||||
ss_done = 0,
|
||||
ss_lock,
|
||||
ss_lock_irqsave,
|
||||
ss_lockless,
|
||||
};
|
||||
|
||||
struct ss_tmp {
|
||||
enum ss_state state;
|
||||
unsigned long data;
|
||||
spinlock_t *lock;
|
||||
spinlock_t *lock_irqsave;
|
||||
};
|
||||
|
||||
static inline void __scoped_seqlock_cleanup(struct ss_tmp *sst)
|
||||
{
|
||||
if (sst->lock)
|
||||
spin_unlock(sst->lock);
|
||||
if (sst->lock_irqsave)
|
||||
spin_unlock_irqrestore(sst->lock_irqsave, sst->data);
|
||||
}
|
||||
|
||||
extern void __scoped_seqlock_invalid_target(void);
|
||||
|
||||
#if defined(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION < 90000
|
||||
/*
|
||||
* For some reason some GCC-8 architectures (nios2, alpha) have trouble
|
||||
* determining that the ss_done state is impossible in __scoped_seqlock_next()
|
||||
* below.
|
||||
*/
|
||||
static inline void __scoped_seqlock_bug(void) { }
|
||||
#else
|
||||
/*
|
||||
* Canary for compiler optimization -- if the compiler doesn't realize this is
|
||||
* an impossible state, it very likely generates sub-optimal code here.
|
||||
*/
|
||||
extern void __scoped_seqlock_bug(void);
|
||||
#endif
|
||||
|
||||
static inline void
|
||||
__scoped_seqlock_next(struct ss_tmp *sst, seqlock_t *lock, enum ss_state target)
|
||||
{
|
||||
switch (sst->state) {
|
||||
case ss_done:
|
||||
__scoped_seqlock_bug();
|
||||
return;
|
||||
|
||||
case ss_lock:
|
||||
case ss_lock_irqsave:
|
||||
sst->state = ss_done;
|
||||
return;
|
||||
|
||||
case ss_lockless:
|
||||
if (!read_seqretry(lock, sst->data)) {
|
||||
sst->state = ss_done;
|
||||
return;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
switch (target) {
|
||||
case ss_done:
|
||||
__scoped_seqlock_invalid_target();
|
||||
return;
|
||||
|
||||
case ss_lock:
|
||||
sst->lock = &lock->lock;
|
||||
spin_lock(sst->lock);
|
||||
sst->state = ss_lock;
|
||||
return;
|
||||
|
||||
case ss_lock_irqsave:
|
||||
sst->lock_irqsave = &lock->lock;
|
||||
spin_lock_irqsave(sst->lock_irqsave, sst->data);
|
||||
sst->state = ss_lock_irqsave;
|
||||
return;
|
||||
|
||||
case ss_lockless:
|
||||
sst->data = read_seqbegin(lock);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
#define __scoped_seqlock_read(_seqlock, _target, _s) \
|
||||
for (struct ss_tmp _s __cleanup(__scoped_seqlock_cleanup) = \
|
||||
{ .state = ss_lockless, .data = read_seqbegin(_seqlock) }; \
|
||||
_s.state != ss_done; \
|
||||
__scoped_seqlock_next(&_s, _seqlock, _target))
|
||||
|
||||
/**
|
||||
* scoped_seqlock_read (lock, ss_state) - execute the read side critical
|
||||
* section without manual sequence
|
||||
* counter handling or calls to other
|
||||
* helpers
|
||||
* @lock: pointer to seqlock_t protecting the data
|
||||
* @ss_state: one of {ss_lock, ss_lock_irqsave, ss_lockless} indicating
|
||||
* the type of critical read section
|
||||
*
|
||||
* Example:
|
||||
*
|
||||
* scoped_seqlock_read (&lock, ss_lock) {
|
||||
* // read-side critical section
|
||||
* }
|
||||
*
|
||||
* Starts with a lockess pass first. If it fails, restarts the critical
|
||||
* section with the lock held.
|
||||
*/
|
||||
#define scoped_seqlock_read(_seqlock, _target) \
|
||||
__scoped_seqlock_read(_seqlock, _target, __UNIQUE_ID(seqlock))
|
||||
|
||||
#endif /* __LINUX_SEQLOCK_H */
|
||||
|
|
|
|||
Loading…
Reference in New Issue