locking: Make owner_on_cpu() into <linux/sched.h>
Move the owner_on_cpu() from kernel/locking/rwsem.c into include/linux/sched.h with under CONFIG_SMP, then use it in the mutex/rwsem/rtmutex to simplify the code. Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20211203075935.136808-2-wangkefeng.wang@huawei.com
This commit is contained in:
parent
9a75bd0c52
commit
c0bed69daf
@ -2171,6 +2171,15 @@ extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
|
static inline bool owner_on_cpu(struct task_struct *owner)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* As lock holder preemption issue, we both skip spinning if
|
||||||
|
* task is not on cpu or its cpu is preempted
|
||||||
|
*/
|
||||||
|
return owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
|
||||||
|
}
|
||||||
|
|
||||||
/* Returns effective CPU energy utilization, as seen by the scheduler */
|
/* Returns effective CPU energy utilization, as seen by the scheduler */
|
||||||
unsigned long sched_cpu_util(int cpu, unsigned long max);
|
unsigned long sched_cpu_util(int cpu, unsigned long max);
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
|
@ -367,8 +367,7 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
|
|||||||
/*
|
/*
|
||||||
* Use vcpu_is_preempted to detect lock holder preemption issue.
|
* Use vcpu_is_preempted to detect lock holder preemption issue.
|
||||||
*/
|
*/
|
||||||
if (!owner->on_cpu || need_resched() ||
|
if (!owner_on_cpu(owner) || need_resched()) {
|
||||||
vcpu_is_preempted(task_cpu(owner))) {
|
|
||||||
ret = false;
|
ret = false;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -403,14 +402,8 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
|
|||||||
* structure won't go away during the spinning period.
|
* structure won't go away during the spinning period.
|
||||||
*/
|
*/
|
||||||
owner = __mutex_owner(lock);
|
owner = __mutex_owner(lock);
|
||||||
|
|
||||||
/*
|
|
||||||
* As lock holder preemption issue, we both skip spinning if task is not
|
|
||||||
* on cpu or its cpu is preempted
|
|
||||||
*/
|
|
||||||
|
|
||||||
if (owner)
|
if (owner)
|
||||||
retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
|
retval = owner_on_cpu(owner);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If lock->owner is not set, the mutex has been released. Return true
|
* If lock->owner is not set, the mutex has been released. Return true
|
||||||
|
@ -1382,9 +1382,8 @@ static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock,
|
|||||||
* for CONFIG_PREEMPT_RCU=y)
|
* for CONFIG_PREEMPT_RCU=y)
|
||||||
* - the VCPU on which owner runs is preempted
|
* - the VCPU on which owner runs is preempted
|
||||||
*/
|
*/
|
||||||
if (!owner->on_cpu || need_resched() ||
|
if (!owner_on_cpu(owner) || need_resched() ||
|
||||||
rt_mutex_waiter_is_top_waiter(lock, waiter) ||
|
rt_mutex_waiter_is_top_waiter(lock, waiter)) {
|
||||||
vcpu_is_preempted(task_cpu(owner))) {
|
|
||||||
res = false;
|
res = false;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -613,15 +613,6 @@ static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool owner_on_cpu(struct task_struct *owner)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* As lock holder preemption issue, we both skip spinning if
|
|
||||||
* task is not on cpu or its cpu is preempted
|
|
||||||
*/
|
|
||||||
return owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
|
static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
|
||||||
{
|
{
|
||||||
struct task_struct *owner;
|
struct task_struct *owner;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user