diff --git a/kernel/process.c b/kernel/process.c index bd51d2bf..6b6fd492 100644 --- a/kernel/process.c +++ b/kernel/process.c @@ -3381,6 +3381,7 @@ void spin_sleep_or_schedule(void) v = get_this_cpu_local_var(); if (v->flags & CPU_FLAG_NEED_RESCHED || v->runq_len > 1) { + v->flags &= ~CPU_FLAG_NEED_RESCHED; do_schedule = 1; } @@ -3406,6 +3407,11 @@ void spin_sleep_or_schedule(void) } if (woken) { + if (do_schedule) { + irqstate = ihk_mc_spinlock_lock(&v->runq_lock); + v->flags |= CPU_FLAG_NEED_RESCHED; + ihk_mc_spinlock_unlock(&v->runq_lock, irqstate); + } return; } @@ -3432,6 +3438,16 @@ void schedule(void) if (cpu_local_var(no_preempt)) { kprintf("%s: WARNING can't schedule() while no preemption, cnt: %d\n", __FUNCTION__, cpu_local_var(no_preempt)); + + irqstate = cpu_disable_interrupt_save(); + ihk_mc_spinlock_lock_noirq( + &(get_this_cpu_local_var()->runq_lock)); + v = get_this_cpu_local_var(); + + v->flags |= CPU_FLAG_NEED_RESCHED; + + ihk_mc_spinlock_unlock_noirq(&v->runq_lock); + cpu_restore_interrupt(irqstate); return; } @@ -3444,8 +3460,6 @@ void schedule(void) prev = v->current; prevpid = v->prevpid; - v->flags &= ~CPU_FLAG_NEED_RESCHED; - /* All runnable processes are on the runqueue */ if (prev && prev != &cpu_local_var(idle)) { list_del(&prev->sched_list); @@ -3576,7 +3590,6 @@ void schedule(void) /* Have we migrated to another core meanwhile? */ if (v != get_this_cpu_local_var()) { v = get_this_cpu_local_var(); - v->flags &= ~CPU_FLAG_NEED_RESCHED; } } else { @@ -3610,6 +3623,7 @@ void check_need_resched(void) ihk_mc_spinlock_unlock(&v->runq_lock, irqstate); return; } + v->flags &= ~CPU_FLAG_NEED_RESCHED; ihk_mc_spinlock_unlock(&v->runq_lock, irqstate); schedule(); } diff --git a/kernel/syscall.c b/kernel/syscall.c index 6167ccdc..fa341445 100644 --- a/kernel/syscall.c +++ b/kernel/syscall.c @@ -259,6 +259,7 @@ long do_syscall(struct syscall_request *req, int cpu) if (v->flags & CPU_FLAG_NEED_RESCHED || v->runq_len > 1 || req->number == __NR_sched_setaffinity) { + v->flags &= ~CPU_FLAG_NEED_RESCHED; do_schedule = 1; } @@ -285,6 +286,16 @@ long do_syscall(struct syscall_request *req, int cpu) schedule(); waitq_finish_wait(&thread->scd_wq, &scd_wq_entry); } + else { + if (do_schedule) { + runq_irqstate = + ihk_mc_spinlock_lock( + &v->runq_lock); + v->flags |= CPU_FLAG_NEED_RESCHED; + ihk_mc_spinlock_unlock( + &v->runq_lock, runq_irqstate); + } + } cpu_restore_interrupt(flags); } @@ -4969,6 +4980,7 @@ do_sigsuspend(struct thread *thread, const sigset_t *set) v = get_this_cpu_local_var(); if (v->flags & CPU_FLAG_NEED_RESCHED) { + v->flags &= ~CPU_FLAG_NEED_RESCHED; do_schedule = 1; } @@ -7934,6 +7946,7 @@ SYSCALL_DECLARE(sched_yield) runq_irqstate = ihk_mc_spinlock_lock(&v->runq_lock); if (v->flags & CPU_FLAG_NEED_RESCHED || v->runq_len > 1) { + v->flags &= ~CPU_FLAG_NEED_RESCHED; do_schedule = 1; }