Kill child threads when receiving terminating signals(redmine#63)

Create process table (child is missing when sending a signal to a child process just after forking it)(redmine#61)
This commit is contained in:
Tomoki Shirasawa
2014-07-13 12:51:28 +09:00
parent 292b34fe21
commit 5e6ed852cb
3 changed files with 51 additions and 20 deletions

View File

@@ -288,14 +288,16 @@ check_signal(unsigned long rc, void *regs0)
unsigned long unsigned long
do_kill(int pid, int tid, int sig) do_kill(int pid, int tid, int sig)
{ {
struct cpu_local_var *v;
struct process *p;
struct process *proc = cpu_local_var(current); struct process *proc = cpu_local_var(current);
struct process *tproc = NULL; struct process *tproc = NULL;
int i; int i;
__sigset_t mask; __sigset_t mask;
struct sig_pending *pending; struct sig_pending *pending;
struct list_head *head; struct list_head *head;
int irqstate;
int rc; int rc;
unsigned long irqstate;
if(proc == NULL || proc->pid == 0){ if(proc == NULL || proc->pid == 0){
return -ESRCH; return -ESRCH;
@@ -314,37 +316,52 @@ do_kill(int pid, int tid, int sig)
} }
else{ else{
for(i = 0; i < num_processors; i++){ for(i = 0; i < num_processors; i++){
if(get_cpu_local_var(i)->current && v = get_cpu_local_var(i);
get_cpu_local_var(i)->current->pid == pid){ irqstate = ihk_mc_spinlock_lock(&(v->runq_lock));
tproc = get_cpu_local_var(i)->current; list_for_each_entry(p, &(v->runq), sched_list){
break; if(p->pid == pid){
tproc = p;
break;
}
} }
ihk_mc_spinlock_unlock(&(v->runq_lock), irqstate);
} }
} }
} }
else if(pid == -1){ else if(pid == -1){
for(i = 0; i < num_processors; i++) for(i = 0; i < num_processors; i++){
if(get_cpu_local_var(i)->current && v = get_cpu_local_var(i);
get_cpu_local_var(i)->current->pid > 0 && irqstate = ihk_mc_spinlock_lock(&(v->runq_lock));
get_cpu_local_var(i)->current->tid == tid){ list_for_each_entry(p, &(v->runq), sched_list){
tproc = get_cpu_local_var(i)->current; if(p->pid > 0 &&
break; p->tid == tid){
tproc = p;
break;
}
} }
ihk_mc_spinlock_unlock(&(v->runq_lock), irqstate);
}
} }
else{ else{
if(pid == 0) if(pid == 0)
return -ESRCH; return -ESRCH;
for(i = 0; i < num_processors; i++) for(i = 0; i < num_processors; i++){
if(get_cpu_local_var(i)->current && v = get_cpu_local_var(i);
get_cpu_local_var(i)->current->pid == pid && irqstate = ihk_mc_spinlock_lock(&(v->runq_lock));
get_cpu_local_var(i)->current->tid == tid){ list_for_each_entry(p, &(v->runq), sched_list){
tproc = get_cpu_local_var(i)->current; if(p->pid == pid &&
break; p->tid == tid){
tproc = p;
break;
}
} }
ihk_mc_spinlock_unlock(&(v->runq_lock), irqstate);
}
} }
if(!tproc) if(!tproc){
return -ESRCH; return -ESRCH;
}
if(sig == 0) if(sig == 0)
return 0; return 0;
@@ -375,7 +392,7 @@ do_kill(int pid, int tid, int sig)
} }
else{ else{
list_add_tail(&pending->list, head); list_add_tail(&pending->list, head);
proc->sigevent = 1; tproc->sigevent = 1;
} }
} }
if(tid == -1){ if(tid == -1){

View File

@@ -211,8 +211,11 @@ static void post_init(void)
} }
if (find_command_line("hidos")) { if (find_command_line("hidos")) {
extern ihk_spinlock_t syscall_lock;
init_host_syscall_channel(); init_host_syscall_channel();
init_host_syscall_channel2(); init_host_syscall_channel2();
ihk_mc_spinlock_init(&syscall_lock);
} }
ap_start(); ap_start();
} }

View File

@@ -168,6 +168,7 @@ static void send_syscall(struct syscall_request *req, int cpu, int pid)
#endif #endif
} }
ihk_spinlock_t syscall_lock;
long do_syscall(struct syscall_request *req, ihk_mc_user_context_t *ctx, long do_syscall(struct syscall_request *req, ihk_mc_user_context_t *ctx,
int cpu, int pid) int cpu, int pid)
@@ -176,6 +177,9 @@ long do_syscall(struct syscall_request *req, ihk_mc_user_context_t *ctx,
struct syscall_request req2 IHK_DMA_ALIGN; struct syscall_request req2 IHK_DMA_ALIGN;
struct syscall_params *scp; struct syscall_params *scp;
int error; int error;
long rc;
int islock = 0;
unsigned long irqstate;
dkprintf("SC(%d)[%3d] sending syscall\n", dkprintf("SC(%d)[%3d] sending syscall\n",
ihk_mc_get_processor_id(), ihk_mc_get_processor_id(),
@@ -184,6 +188,8 @@ long do_syscall(struct syscall_request *req, ihk_mc_user_context_t *ctx,
if(req->number == __NR_exit_group || if(req->number == __NR_exit_group ||
req->number == __NR_kill){ // interrupt syscall req->number == __NR_kill){ // interrupt syscall
scp = &get_cpu_local_var(0)->scp2; scp = &get_cpu_local_var(0)->scp2;
islock = 1;
irqstate = ihk_mc_spinlock_lock(&syscall_lock);
} }
else{ else{
scp = &get_cpu_local_var(cpu)->scp; scp = &get_cpu_local_var(cpu)->scp;
@@ -225,7 +231,12 @@ long do_syscall(struct syscall_request *req, ihk_mc_user_context_t *ctx,
ihk_mc_get_processor_id(), ihk_mc_get_processor_id(),
req->number, res->ret); req->number, res->ret);
return res->ret; rc = res->ret;
if(islock){
ihk_mc_spinlock_unlock(&syscall_lock, irqstate);
}
return rc;
} }
long syscall_generic_forwarding(int n, ihk_mc_user_context_t *ctx) long syscall_generic_forwarding(int n, ihk_mc_user_context_t *ctx)