schedule(): disable auto thread migration
This commit is contained in:
106
kernel/process.c
106
kernel/process.c
@@ -2267,20 +2267,17 @@ redo:
|
||||
++v->runq_len;
|
||||
}
|
||||
|
||||
/* Toggle timesharing if CPU core is oversubscribed
|
||||
* (on last CPU core only for now) */
|
||||
if (ihk_mc_get_processor_id() == num_processors - 1) {
|
||||
if (v->runq_len > 1) {
|
||||
if (!cpu_local_var(timer_enabled)) {
|
||||
lapic_timer_enable(10000000);
|
||||
cpu_local_var(timer_enabled) = 1;
|
||||
}
|
||||
/* Toggle timesharing if CPU core is oversubscribed */
|
||||
if (v->runq_len > 1) {
|
||||
if (!cpu_local_var(timer_enabled)) {
|
||||
lapic_timer_enable(10000000);
|
||||
cpu_local_var(timer_enabled) = 1;
|
||||
}
|
||||
else {
|
||||
if (cpu_local_var(timer_enabled)) {
|
||||
lapic_timer_disable();
|
||||
cpu_local_var(timer_enabled) = 0;
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (cpu_local_var(timer_enabled)) {
|
||||
lapic_timer_disable();
|
||||
cpu_local_var(timer_enabled) = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2363,89 +2360,6 @@ redo:
|
||||
else {
|
||||
ihk_mc_spinlock_unlock(&(v->runq_lock), irqstate);
|
||||
}
|
||||
|
||||
#if 1
|
||||
/* FIXME: temporary solution.
|
||||
* move threads from the last CPU core to other available cores
|
||||
* if it's oversubscribed
|
||||
* Will be solved by proper timesharing in the future */
|
||||
if (ihk_mc_get_processor_id() == (num_processors - 1)) {
|
||||
int old_cpu_id;
|
||||
int cpu_id;
|
||||
struct cpu_local_var *v;
|
||||
struct cpu_local_var *cur_v;
|
||||
struct process *proc_to_move = NULL;
|
||||
unsigned long irqstate2;
|
||||
|
||||
irqstate = cpu_disable_interrupt_save();
|
||||
|
||||
ihk_mc_spinlock_lock_noirq(&(get_this_cpu_local_var()->runq_lock));
|
||||
v = get_this_cpu_local_var();
|
||||
|
||||
if (v->runq_len > 1) {
|
||||
/* Pick another process */
|
||||
list_for_each_entry_safe(proc, tmp, &(v->runq), sched_list) {
|
||||
if (proc != cpu_local_var(current)) {
|
||||
list_del(&proc->sched_list);
|
||||
--v->runq_len;
|
||||
proc_to_move = proc;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
ihk_mc_spinlock_unlock_noirq(&(v->runq_lock));
|
||||
|
||||
if (proc_to_move) {
|
||||
ihk_mc_spinlock_lock_noirq(&cpuid_head_lock);
|
||||
|
||||
for (cpu_id = num_processors - 2; cpu_id > -1; --cpu_id) {
|
||||
|
||||
if (get_cpu_local_var(cpu_id)->status !=
|
||||
CPU_STATUS_IDLE) {
|
||||
continue;
|
||||
}
|
||||
|
||||
get_cpu_local_var(cpu_id)->status = CPU_STATUS_RESERVED;
|
||||
break;
|
||||
}
|
||||
|
||||
ihk_mc_spinlock_unlock_noirq(&cpuid_head_lock);
|
||||
|
||||
if (cpu_id == -1) {
|
||||
kprintf("error: no more CPUs left to balance oversubscribed tail core\n");
|
||||
terminate_host(proc_to_move->ftn->pid);
|
||||
cpu_restore_interrupt(irqstate);
|
||||
return;
|
||||
}
|
||||
|
||||
v = get_cpu_local_var(cpu_id);
|
||||
cur_v = get_this_cpu_local_var();
|
||||
|
||||
double_rq_lock(cur_v, v, &irqstate2);
|
||||
|
||||
old_cpu_id = proc_to_move->cpu_id;
|
||||
proc_to_move->cpu_id = cpu_id;
|
||||
CPU_CLR(old_cpu_id, &proc_to_move->cpu_set);
|
||||
CPU_SET(cpu_id, &proc_to_move->cpu_set);
|
||||
settid(proc_to_move, 2, cpu_id, old_cpu_id);
|
||||
__runq_add_proc(proc_to_move, cpu_id);
|
||||
cpu_clear_and_set(old_cpu_id, cpu_id, &proc_to_move->vm->cpu_set,
|
||||
&proc_to_move->vm->cpu_set_lock);
|
||||
|
||||
double_rq_unlock(cur_v, v, irqstate2);
|
||||
|
||||
/* Kick scheduler */
|
||||
if (cpu_id != ihk_mc_get_processor_id())
|
||||
ihk_mc_interrupt_cpu(
|
||||
get_x86_cpu_local_variable(cpu_id)->apic_id,
|
||||
0xd1);
|
||||
|
||||
dkprintf("moved TID %d to CPU: %d\n",
|
||||
proc_to_move->ftn->tid, cpu_id);
|
||||
}
|
||||
cpu_restore_interrupt(irqstate);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
|
||||
Reference in New Issue
Block a user