aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorVineeth Pillai <viremana@linux.microsoft.com>2020-11-17 18:19:38 -0500
committerPeter Zijlstra <peterz@infradead.org>2021-05-12 11:43:29 +0200
commit8039e96fcc1de30d5bcaf05da9ca2de46a800826 (patch)
treebb88e1ba8c783a34c54be34b5c33ccc405fac444 /kernel/sched/core.c
parentsched: Add core wide task selection and scheduling (diff)
downloadlinux-8039e96fcc1de30d5bcaf05da9ca2de46a800826.tar.gz
linux-8039e96fcc1de30d5bcaf05da9ca2de46a800826.zip
sched/fair: Fix forced idle sibling starvation corner case
If there is only one long running local task and the sibling is forced idle, it might not get a chance to run until a schedule event happens on any cpu in the core. So we check for this condition during a tick to see if a sibling is starved and then give it a chance to schedule. Signed-off-by: Vineeth Pillai <viremana@linux.microsoft.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Don Hiatt <dhiatt@digitalocean.com> Tested-by: Hongyu Ning <hongyu.ning@linux.intel.com> Tested-by: Vincent Guittot <vincent.guittot@linaro.org> Link: https://lkml.kernel.org/r/20210422123308.617407840@infradead.org
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c15
1 files changed, 8 insertions, 7 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index db763f42a4b0..f5e1e6f96411 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5459,16 +5459,15 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
/* reset state */
rq->core->core_cookie = 0UL;
+ if (rq->core->core_forceidle) {
+ need_sync = true;
+ rq->core->core_forceidle = false;
+ }
for_each_cpu(i, smt_mask) {
struct rq *rq_i = cpu_rq(i);
rq_i->core_pick = NULL;
- if (rq_i->core_forceidle) {
- need_sync = true;
- rq_i->core_forceidle = false;
- }
-
if (i != cpu)
update_rq_clock(rq_i);
}
@@ -5588,8 +5587,10 @@ next_class:;
if (!rq_i->core_pick)
continue;
- if (is_task_rq_idle(rq_i->core_pick) && rq_i->nr_running)
- rq_i->core_forceidle = true;
+ if (is_task_rq_idle(rq_i->core_pick) && rq_i->nr_running &&
+ !rq_i->core->core_forceidle) {
+ rq_i->core->core_forceidle = true;
+ }
if (i == cpu) {
rq_i->core_pick = NULL;