sched/psi: Use task->psi_flags to clear in CPU migration

[ Upstream commit 52b33d87b9197c51e8ffdc61873739d90dd0a16f ]

The commit d583d360a6 ("psi: Fix psi state corruption when schedule()
races with cgroup move") fixed a race problem by making cgroup_move_task()
use task->psi_flags instead of looking at the scheduler state.

We can extend task->psi_flags usage to CPU migration, which should be
a minor optimization for performance and code simplicity.

Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Link: https://lore.kernel.org/r/20220926081931.45420-1-zhouchengming@bytedance.com
Stable-dep-of: a430d99e3490 ("sched/fair: Fix value reported by hot tasks pulled in /proc/schedstat")
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Chengming Zhou 2022-09-26 16:19:31 +08:00 committed by Greg Kroah-Hartman
parent 8ca3dbd974
commit 371e1a0e38
3 changed files with 5 additions and 22 deletions

View File

@ -888,9 +888,6 @@ struct task_struct {
unsigned sched_reset_on_fork:1;
unsigned sched_contributes_to_load:1;
unsigned sched_migrated:1;
#ifdef CONFIG_PSI
unsigned sched_psi_wake_requeue:1;
#endif
/* Force alignment to the next boundary: */
unsigned :0;

View File

@ -2049,7 +2049,7 @@ static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
if (!(flags & ENQUEUE_RESTORE)) {
sched_info_enqueue(rq, p);
psi_enqueue(p, flags & ENQUEUE_WAKEUP);
psi_enqueue(p, (flags & ENQUEUE_WAKEUP) && !(flags & ENQUEUE_MIGRATED));
}
uclamp_rq_inc(rq, p);

View File

@ -132,11 +132,9 @@ static inline void psi_enqueue(struct task_struct *p, bool wakeup)
if (p->in_memstall)
set |= TSK_MEMSTALL_RUNNING;
if (!wakeup || p->sched_psi_wake_requeue) {
if (!wakeup) {
if (p->in_memstall)
set |= TSK_MEMSTALL;
if (p->sched_psi_wake_requeue)
p->sched_psi_wake_requeue = 0;
} else {
if (p->in_iowait)
clear |= TSK_IOWAIT;
@ -147,8 +145,6 @@ static inline void psi_enqueue(struct task_struct *p, bool wakeup)
static inline void psi_dequeue(struct task_struct *p, bool sleep)
{
int clear = TSK_RUNNING;
if (static_branch_likely(&psi_disabled))
return;
@ -161,10 +157,7 @@ static inline void psi_dequeue(struct task_struct *p, bool sleep)
if (sleep)
return;
if (p->in_memstall)
clear |= (TSK_MEMSTALL | TSK_MEMSTALL_RUNNING);
psi_task_change(p, clear, 0);
psi_task_change(p, p->psi_flags, 0);
}
static inline void psi_ttwu_dequeue(struct task_struct *p)
@ -176,19 +169,12 @@ static inline void psi_ttwu_dequeue(struct task_struct *p)
* deregister its sleep-persistent psi states from the old
* queue, and let psi_enqueue() know it has to requeue.
*/
if (unlikely(p->in_iowait || p->in_memstall)) {
if (unlikely(p->psi_flags)) {
struct rq_flags rf;
struct rq *rq;
int clear = 0;
if (p->in_iowait)
clear |= TSK_IOWAIT;
if (p->in_memstall)
clear |= TSK_MEMSTALL;
rq = __task_rq_lock(p, &rf);
psi_task_change(p, clear, 0);
p->sched_psi_wake_requeue = 1;
psi_task_change(p, p->psi_flags, 0);
__task_rq_unlock(rq, &rf);
}
}