process table traversing must be under interrupt inhibition
add finding process table function (findthread_and_lock/process_unlock)
This commit is contained in:
@@ -338,9 +338,11 @@ do_kill(int pid, int tid, int sig)
|
||||
struct sig_pending *pending;
|
||||
struct list_head *head;
|
||||
int rc;
|
||||
unsigned long irqstate;
|
||||
unsigned long irqstate = 0;
|
||||
struct k_sigaction *k;
|
||||
int doint;
|
||||
ihk_spinlock_t *savelock = NULL;
|
||||
int found = 0;
|
||||
|
||||
if(proc == NULL || proc->pid == 0){
|
||||
return -ESRCH;
|
||||
@@ -395,75 +397,106 @@ do_kill(int pid, int tid, int sig)
|
||||
return rc;
|
||||
}
|
||||
|
||||
irqstate = cpu_disable_interrupt_save();
|
||||
mask = __sigmask(sig);
|
||||
if(tid == -1){
|
||||
struct process *tproc0 = NULL;
|
||||
ihk_spinlock_t *savelock0 = NULL;
|
||||
|
||||
if(pid == proc->pid || pid == 0){
|
||||
tproc0 = proc;
|
||||
}
|
||||
for(i = 0; i < num_processors; i++){
|
||||
v = get_cpu_local_var(i);
|
||||
irqstate = ihk_mc_spinlock_lock(&(v->runq_lock));
|
||||
found = 0;
|
||||
ihk_mc_spinlock_lock_noirq(&(v->runq_lock));
|
||||
list_for_each_entry(p, &(v->runq), sched_list){
|
||||
if(p->pid == pid){
|
||||
if(p->tid == pid || tproc0 == NULL)
|
||||
tproc0 = p;
|
||||
if(mask & p->sigmask.__val[0]){
|
||||
if(p->tid == pid || tproc == NULL)
|
||||
if(p->tid == pid || tproc == NULL){
|
||||
if(!(mask & p->sigmask.__val[0])){
|
||||
tproc = p;
|
||||
if(!found && savelock)
|
||||
ihk_mc_spinlock_unlock_noirq(savelock);
|
||||
found = 1;
|
||||
savelock = &(v->runq_lock);
|
||||
if(savelock0 && savelock0 != savelock){
|
||||
ihk_mc_spinlock_unlock_noirq(savelock0);
|
||||
savelock0 = NULL;
|
||||
}
|
||||
}
|
||||
else if(tproc == NULL && tproc0 == NULL){
|
||||
tproc0 = p;
|
||||
found = 1;
|
||||
savelock0 = &(v->runq_lock);
|
||||
}
|
||||
}
|
||||
if(!(mask & p->sigmask.__val[0])){
|
||||
if(p->tid == pid || tproc == NULL){
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
ihk_mc_spinlock_unlock(&(v->runq_lock), irqstate);
|
||||
if(!found)
|
||||
ihk_mc_spinlock_unlock_noirq(&(v->runq_lock));
|
||||
}
|
||||
if(tproc == NULL)
|
||||
if(tproc == NULL){
|
||||
tproc = tproc0;
|
||||
savelock = savelock0;
|
||||
}
|
||||
}
|
||||
else if(pid == -1){
|
||||
for(i = 0; i < num_processors; i++){
|
||||
v = get_cpu_local_var(i);
|
||||
irqstate = ihk_mc_spinlock_lock(&(v->runq_lock));
|
||||
found = 0;
|
||||
ihk_mc_spinlock_lock_noirq(&(v->runq_lock));
|
||||
list_for_each_entry(p, &(v->runq), sched_list){
|
||||
if(p->pid > 0 &&
|
||||
p->tid == tid){
|
||||
savelock = &(v->runq_lock);
|
||||
found = 1;
|
||||
tproc = p;
|
||||
break;
|
||||
}
|
||||
}
|
||||
ihk_mc_spinlock_unlock(&(v->runq_lock), irqstate);
|
||||
if(!found)
|
||||
ihk_mc_spinlock_unlock_noirq(&(v->runq_lock));
|
||||
}
|
||||
}
|
||||
else{
|
||||
if(pid == 0)
|
||||
return -ESRCH;
|
||||
for(i = 0; i < num_processors; i++){
|
||||
v = get_cpu_local_var(i);
|
||||
irqstate = ihk_mc_spinlock_lock(&(v->runq_lock));
|
||||
found = 0;
|
||||
ihk_mc_spinlock_lock_noirq(&(v->runq_lock));
|
||||
list_for_each_entry(p, &(v->runq), sched_list){
|
||||
if(p->pid == pid &&
|
||||
p->tid == tid){
|
||||
savelock = &(v->runq_lock);
|
||||
found = 1;
|
||||
tproc = p;
|
||||
break;
|
||||
}
|
||||
}
|
||||
ihk_mc_spinlock_unlock(&(v->runq_lock), irqstate);
|
||||
if(found)
|
||||
break;
|
||||
ihk_mc_spinlock_unlock_noirq(&(v->runq_lock));
|
||||
}
|
||||
}
|
||||
|
||||
if(!tproc){
|
||||
cpu_restore_interrupt(irqstate);
|
||||
return -ESRCH;
|
||||
}
|
||||
if(sig == 0)
|
||||
if(sig == 0){
|
||||
ihk_mc_spinlock_unlock_noirq(savelock);
|
||||
cpu_restore_interrupt(irqstate);
|
||||
return 0;
|
||||
}
|
||||
|
||||
doint = 0;
|
||||
if(tid == -1){
|
||||
irqstate = ihk_mc_spinlock_lock(&tproc->sigshared->lock);
|
||||
ihk_mc_spinlock_lock_noirq(&tproc->sigshared->lock);
|
||||
head = &tproc->sigshared->sigpending;
|
||||
}
|
||||
else{
|
||||
irqstate = ihk_mc_spinlock_lock(&tproc->sigpendinglock);
|
||||
ihk_mc_spinlock_lock_noirq(&tproc->sigpendinglock);
|
||||
head = &tproc->sigpending;
|
||||
}
|
||||
|
||||
@@ -496,16 +529,25 @@ do_kill(int pid, int tid, int sig)
|
||||
}
|
||||
|
||||
if(tid == -1){
|
||||
ihk_mc_spinlock_unlock(&tproc->sigshared->lock, irqstate);
|
||||
ihk_mc_spinlock_unlock_noirq(&tproc->sigshared->lock);
|
||||
}
|
||||
else{
|
||||
ihk_mc_spinlock_unlock(&tproc->sigpendinglock, irqstate);
|
||||
ihk_mc_spinlock_unlock_noirq(&tproc->sigpendinglock);
|
||||
}
|
||||
|
||||
if(doint && !(mask & tproc->sigmask.__val[0])){
|
||||
int cpuid = tproc->cpu_id;
|
||||
if(proc != tproc){
|
||||
ihk_mc_interrupt_cpu(get_x86_cpu_local_variable(tproc->cpu_id)->apic_id, 0xd0);
|
||||
ihk_mc_interrupt_cpu(get_x86_cpu_local_variable(cpuid)->apic_id, 0xd0);
|
||||
}
|
||||
interrupt_syscall(tproc->pid, tproc->cpu_id);
|
||||
pid = tproc->pid;
|
||||
ihk_mc_spinlock_unlock_noirq(savelock);
|
||||
cpu_restore_interrupt(irqstate);
|
||||
interrupt_syscall(pid, cpuid);
|
||||
}
|
||||
else{
|
||||
ihk_mc_spinlock_unlock_noirq(savelock);
|
||||
cpu_restore_interrupt(irqstate);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -277,4 +277,7 @@ void check_need_resched(void);
|
||||
void cpu_set(int cpu, cpu_set_t *cpu_set, ihk_spinlock_t *lock);
|
||||
void cpu_clear(int cpu, cpu_set_t *cpu_set, ihk_spinlock_t *lock);
|
||||
|
||||
struct process *findthread_and_lock(int pid, int tid, void *savelock, unsigned long *irqstate);
|
||||
void process_unlock(void *savelock, unsigned long irqstate);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -1754,21 +1754,21 @@ void sched_init(void)
|
||||
#endif
|
||||
}
|
||||
|
||||
static void double_rq_lock(struct cpu_local_var *v1, struct cpu_local_var *v2)
|
||||
static void double_rq_lock(struct cpu_local_var *v1, struct cpu_local_var *v2, unsigned long *irqstate)
|
||||
{
|
||||
if (v1 < v2) {
|
||||
ihk_mc_spinlock_lock_noirq(&v1->runq_lock);
|
||||
*irqstate = ihk_mc_spinlock_lock(&v1->runq_lock);
|
||||
ihk_mc_spinlock_lock_noirq(&v2->runq_lock);
|
||||
} else {
|
||||
ihk_mc_spinlock_lock_noirq(&v2->runq_lock);
|
||||
*irqstate = ihk_mc_spinlock_lock(&v2->runq_lock);
|
||||
ihk_mc_spinlock_lock_noirq(&v1->runq_lock);
|
||||
}
|
||||
}
|
||||
|
||||
static void double_rq_unlock(struct cpu_local_var *v1, struct cpu_local_var *v2)
|
||||
static void double_rq_unlock(struct cpu_local_var *v1, struct cpu_local_var *v2, unsigned long irqstate)
|
||||
{
|
||||
ihk_mc_spinlock_unlock_noirq(&v1->runq_lock);
|
||||
ihk_mc_spinlock_unlock_noirq(&v2->runq_lock);
|
||||
ihk_mc_spinlock_unlock(&v2->runq_lock, irqstate);
|
||||
}
|
||||
|
||||
struct migrate_request {
|
||||
@@ -1782,6 +1782,7 @@ static void do_migrate(void)
|
||||
int cur_cpu_id = ihk_mc_get_processor_id();
|
||||
struct cpu_local_var *cur_v = get_cpu_local_var(cur_cpu_id);
|
||||
struct migrate_request *req, *tmp;
|
||||
unsigned long irqstate = 0;
|
||||
|
||||
ihk_mc_spinlock_lock_noirq(&cur_v->migq_lock);
|
||||
list_for_each_entry_safe(req, tmp, &cur_v->migq, list) {
|
||||
@@ -1805,7 +1806,7 @@ static void do_migrate(void)
|
||||
|
||||
/* 2. migrate thread */
|
||||
v = get_cpu_local_var(cpu_id);
|
||||
double_rq_lock(cur_v, v);
|
||||
double_rq_lock(cur_v, v, &irqstate);
|
||||
list_del(&req->proc->sched_list);
|
||||
cur_v->runq_len -= 1;
|
||||
old_cpu_id = req->proc->cpu_id;
|
||||
@@ -1821,7 +1822,7 @@ static void do_migrate(void)
|
||||
|
||||
if (v->runq_len == 1)
|
||||
ihk_mc_interrupt_cpu(get_x86_cpu_local_variable(cpu_id)->apic_id, 0xd1);
|
||||
double_rq_unlock(cur_v, v);
|
||||
double_rq_unlock(cur_v, v, irqstate);
|
||||
|
||||
ack:
|
||||
waitq_wakeup(&req->wq);
|
||||
@@ -2057,3 +2058,31 @@ void runq_del_proc(struct process *proc, int cpu_id)
|
||||
ihk_mc_spinlock_unlock(&(v->runq_lock), irqstate);
|
||||
}
|
||||
|
||||
struct process *
|
||||
findthread_and_lock(int pid, int tid, void *savelock, unsigned long *irqstate)
|
||||
{
|
||||
struct cpu_local_var *v;
|
||||
struct process *p;
|
||||
int i;
|
||||
extern int num_processors;
|
||||
|
||||
for(i = 0; i < num_processors; i++){
|
||||
v = get_cpu_local_var(i);
|
||||
*(ihk_spinlock_t **)savelock = &(v->runq_lock);
|
||||
*irqstate = ihk_mc_spinlock_lock(&(v->runq_lock));
|
||||
list_for_each_entry(p, &(v->runq), sched_list){
|
||||
if(p->pid == pid &&
|
||||
p->tid == tid){
|
||||
return p;
|
||||
}
|
||||
}
|
||||
ihk_mc_spinlock_unlock(&(v->runq_lock), *irqstate);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void
|
||||
process_unlock(void *savelock, unsigned long irqstate)
|
||||
{
|
||||
ihk_mc_spinlock_unlock((ihk_spinlock_t *)savelock, irqstate);
|
||||
}
|
||||
|
||||
@@ -1992,6 +1992,7 @@ SYSCALL_DECLARE(sched_setaffinity)
|
||||
cpu_set_t k_cpu_set, cpu_set;
|
||||
struct process *thread;
|
||||
int cpu_id;
|
||||
unsigned long irqstate;
|
||||
|
||||
if (sizeof(k_cpu_set) > len) {
|
||||
kprintf("%s:%d\n Too small buffer.", __FILE__, __LINE__);
|
||||
@@ -2012,11 +2013,11 @@ SYSCALL_DECLARE(sched_setaffinity)
|
||||
CPU_SET(cpu_id, &cpu_set);
|
||||
|
||||
for (cpu_id = 0; cpu_id < num_processors; cpu_id++) {
|
||||
ihk_mc_spinlock_lock_noirq(&get_cpu_local_var(cpu_id)->runq_lock);
|
||||
irqstate = ihk_mc_spinlock_lock(&get_cpu_local_var(cpu_id)->runq_lock);
|
||||
list_for_each_entry(thread, &get_cpu_local_var(cpu_id)->runq, sched_list)
|
||||
if (thread->pid && thread->tid == tid)
|
||||
goto found; /* without unlocking runq_lock */
|
||||
ihk_mc_spinlock_unlock_noirq(&get_cpu_local_var(cpu_id)->runq_lock);
|
||||
ihk_mc_spinlock_unlock(&get_cpu_local_var(cpu_id)->runq_lock, irqstate);
|
||||
}
|
||||
kprintf("%s:%d Thread not found.\n", __FILE__, __LINE__);
|
||||
return -ESRCH;
|
||||
@@ -2026,12 +2027,12 @@ found:
|
||||
|
||||
if (!CPU_ISSET(cpu_id, &thread->cpu_set)) {
|
||||
hold_process(thread);
|
||||
ihk_mc_spinlock_unlock_noirq(&get_cpu_local_var(cpu_id)->runq_lock);
|
||||
ihk_mc_spinlock_unlock(&get_cpu_local_var(cpu_id)->runq_lock, irqstate);
|
||||
sched_request_migrate(cpu_id, thread);
|
||||
release_process(thread);
|
||||
return 0;
|
||||
} else {
|
||||
ihk_mc_spinlock_unlock_noirq(&get_cpu_local_var(cpu_id)->runq_lock);
|
||||
ihk_mc_spinlock_unlock(&get_cpu_local_var(cpu_id)->runq_lock, irqstate);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
@@ -2046,6 +2047,7 @@ SYSCALL_DECLARE(sched_getaffinity)
|
||||
int ret;
|
||||
int found = 0;
|
||||
int i;
|
||||
unsigned long irqstate;
|
||||
|
||||
if (sizeof(k_cpu_set) > len) {
|
||||
kprintf("%s:%d Too small buffer.\n", __FILE__, __LINE__);
|
||||
@@ -2056,7 +2058,7 @@ SYSCALL_DECLARE(sched_getaffinity)
|
||||
extern int num_processors;
|
||||
for (i = 0; i < num_processors && !found; i++) {
|
||||
struct process *thread;
|
||||
ihk_mc_spinlock_lock_noirq(&get_cpu_local_var(i)->runq_lock);
|
||||
irqstate = ihk_mc_spinlock_lock(&get_cpu_local_var(i)->runq_lock);
|
||||
list_for_each_entry(thread, &get_cpu_local_var(i)->runq, sched_list) {
|
||||
if (thread->pid && thread->tid == tid) {
|
||||
found = 1;
|
||||
@@ -2064,7 +2066,7 @@ SYSCALL_DECLARE(sched_getaffinity)
|
||||
break;
|
||||
}
|
||||
}
|
||||
ihk_mc_spinlock_unlock_noirq(&get_cpu_local_var(i)->runq_lock);
|
||||
ihk_mc_spinlock_unlock(&get_cpu_local_var(i)->runq_lock, irqstate);
|
||||
}
|
||||
if (!found) {
|
||||
kprintf("%s:%d Thread not found.\n", __FILE__, __LINE__);
|
||||
|
||||
Reference in New Issue
Block a user