powerpc/qspinlock: Add spinlock contention tracepoint

Add a lock contention tracepoint in the queued spinlock slowpath.
Also add the __lockfunc annotation so that in_lock_functions()
works as expected.

Signed-off-by: Nysal Jan K.A. <nysal@linux.ibm.com>
Reviewed-by: Shrikanth Hegde <sshegde@linux.ibm.com>
Signed-off-by: Madhavan Srinivasan <maddy@linux.ibm.com>
Link: https://patch.msgid.link/20250731061856.1858898-1-nysal@linux.ibm.com
This commit is contained in:
Nysal Jan K.A. 2025-07-31 11:48:53 +05:30 committed by Madhavan Srinivasan
parent 74db6cc331
commit 4f61d54d22
1 changed files with 10 additions and 9 deletions

View File

@ -9,6 +9,7 @@
#include <linux/sched/clock.h>
#include <asm/qspinlock.h>
#include <asm/paravirt.h>
#include <trace/events/lock.h>
#define MAX_NODES 4
@ -708,26 +709,26 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
qnodesp->count--;
}
void queued_spin_lock_slowpath(struct qspinlock *lock)
void __lockfunc queued_spin_lock_slowpath(struct qspinlock *lock)
{
trace_contention_begin(lock, LCB_F_SPIN);
/*
* This looks funny, but it induces the compiler to inline both
* sides of the branch rather than share code as when the condition
* is passed as the paravirt argument to the functions.
*/
if (IS_ENABLED(CONFIG_PARAVIRT_SPINLOCKS) && is_shared_processor()) {
if (try_to_steal_lock(lock, true)) {
if (try_to_steal_lock(lock, true))
spec_barrier();
return;
}
queued_spin_lock_mcs_queue(lock, true);
else
queued_spin_lock_mcs_queue(lock, true);
} else {
if (try_to_steal_lock(lock, false)) {
if (try_to_steal_lock(lock, false))
spec_barrier();
return;
}
queued_spin_lock_mcs_queue(lock, false);
else
queued_spin_lock_mcs_queue(lock, false);
}
trace_contention_end(lock, 0);
}
EXPORT_SYMBOL(queued_spin_lock_slowpath);