refactoring process structures

This commit is contained in:
Tomoki Shirasawa
2015-10-13 23:04:08 +09:00
parent 2ca46fabfd
commit 04e193de13
23 changed files with 2586 additions and 2265 deletions

View File

@@ -1262,33 +1262,33 @@ int ihk_mc_interrupt_cpu(int cpu, int vector)
@ ensures proc->fp_regs == NULL; @ ensures proc->fp_regs == NULL;
@*/ @*/
void void
release_fp_regs(struct process *proc) release_fp_regs(struct thread *thread)
{ {
int pages; int pages;
if (proc && !proc->fp_regs) if (thread && !thread->fp_regs)
return; return;
pages = (sizeof(fp_regs_struct) + 4095) >> 12; pages = (sizeof(fp_regs_struct) + 4095) >> 12;
ihk_mc_free_pages(proc->fp_regs, pages); ihk_mc_free_pages(thread->fp_regs, pages);
proc->fp_regs = NULL; thread->fp_regs = NULL;
} }
void void
save_fp_regs(struct process *proc) save_fp_regs(struct thread *thread)
{ {
int pages; int pages;
if (!proc->fp_regs) { if (!thread->fp_regs) {
pages = (sizeof(fp_regs_struct) + 4095) >> 12; pages = (sizeof(fp_regs_struct) + 4095) >> 12;
proc->fp_regs = ihk_mc_alloc_pages(pages, IHK_MC_AP_NOWAIT); thread->fp_regs = ihk_mc_alloc_pages(pages, IHK_MC_AP_NOWAIT);
if (!proc->fp_regs) { if (!thread->fp_regs) {
kprintf("error: allocating fp_regs pages\n"); kprintf("error: allocating fp_regs pages\n");
return; return;
} }
memset(proc->fp_regs, 0, sizeof(fp_regs_struct)); memset(thread->fp_regs, 0, sizeof(fp_regs_struct));
} }
if (xsave_available) { if (xsave_available) {
@@ -1298,17 +1298,17 @@ save_fp_regs(struct process *proc)
low = 0x7; low = 0x7;
high = 0; high = 0;
asm volatile("xsave %0" : : "m" (*proc->fp_regs), "a" (low), "d" (high) asm volatile("xsave %0" : : "m" (*thread->fp_regs), "a" (low), "d" (high)
: "memory"); : "memory");
dkprintf("fp_regs for TID %d saved\n", proc->ftn->tid); dkprintf("fp_regs for TID %d saved\n", thread->tid);
} }
} }
void void
restore_fp_regs(struct process *proc) restore_fp_regs(struct thread *thread)
{ {
if (!proc->fp_regs) if (!thread->fp_regs)
return; return;
if (xsave_available) { if (xsave_available) {
@@ -1318,29 +1318,29 @@ restore_fp_regs(struct process *proc)
low = 0x7; low = 0x7;
high = 0; high = 0;
asm volatile("xrstor %0" : : "m" (*proc->fp_regs), asm volatile("xrstor %0" : : "m" (*thread->fp_regs),
"a" (low), "d" (high)); "a" (low), "d" (high));
dkprintf("fp_regs for TID %d restored\n", proc->ftn->tid); dkprintf("fp_regs for TID %d restored\n", thread->tid);
} }
// XXX: why release?? // XXX: why release??
//release_fp_regs(proc); //release_fp_regs(thread);
} }
ihk_mc_user_context_t *lookup_user_context(struct process *proc) ihk_mc_user_context_t *lookup_user_context(struct thread *thread)
{ {
ihk_mc_user_context_t *uctx = proc->uctx; ihk_mc_user_context_t *uctx = thread->uctx;
if ((!(proc->ftn->status & (PS_INTERRUPTIBLE | PS_UNINTERRUPTIBLE if ((!(thread->tstatus & (PS_INTERRUPTIBLE | PS_UNINTERRUPTIBLE
| PS_STOPPED | PS_TRACED)) | PS_STOPPED | PS_TRACED))
&& (proc != cpu_local_var(current))) && (thread != cpu_local_var(current)))
|| !uctx->is_gpr_valid) { || !uctx->is_gpr_valid) {
return NULL; return NULL;
} }
if (!uctx->is_sr_valid) { if (!uctx->is_sr_valid) {
uctx->sr.fs_base = proc->thread.tlsblock_base; uctx->sr.fs_base = thread->thread.tlsblock_base;
uctx->sr.gs_base = 0; uctx->sr.gs_base = 0;
uctx->sr.ds = 0; uctx->sr.ds = 0;
uctx->sr.es = 0; uctx->sr.es = 0;

View File

@@ -78,11 +78,11 @@ int get_prstatus_size(void)
* \brief Fill a prstatus structure. * \brief Fill a prstatus structure.
* *
* \param head A pointer to a note structure. * \param head A pointer to a note structure.
* \param proc A pointer to the current process structure. * \param thread A pointer to the current thread structure.
* \param regs0 A pointer to a x86_regs structure. * \param regs0 A pointer to a x86_regs structure.
*/ */
void fill_prstatus(struct note *head, struct process *proc, void *regs0) void fill_prstatus(struct note *head, struct thread *thread, void *regs0)
{ {
void *name; void *name;
struct elf_prstatus64 *prstatus; struct elf_prstatus64 *prstatus;
@@ -160,11 +160,11 @@ int get_prpsinfo_size(void)
* \brief Fill a prpsinfo structure. * \brief Fill a prpsinfo structure.
* *
* \param head A pointer to a note structure. * \param head A pointer to a note structure.
* \param proc A pointer to the current process structure. * \param thread A pointer to the current thread structure.
* \param regs A pointer to a x86_regs structure. * \param regs A pointer to a x86_regs structure.
*/ */
void fill_prpsinfo(struct note *head, struct process *proc, void *regs) void fill_prpsinfo(struct note *head, struct thread *thread, void *regs)
{ {
void *name; void *name;
struct elf_prpsinfo64 *prpsinfo; struct elf_prpsinfo64 *prpsinfo;
@@ -176,8 +176,8 @@ void fill_prpsinfo(struct note *head, struct process *proc, void *regs)
memcpy(name, "CORE", sizeof("CORE")); memcpy(name, "CORE", sizeof("CORE"));
prpsinfo = (struct elf_prpsinfo64 *)(name + align32(sizeof("CORE"))); prpsinfo = (struct elf_prpsinfo64 *)(name + align32(sizeof("CORE")));
prpsinfo->pr_state = proc->ftn->status; prpsinfo->pr_state = thread->tstatus;
prpsinfo->pr_pid = proc->ftn->pid; prpsinfo->pr_pid = thread->proc->pid;
/* /*
We leave most of the fields unfilled. We leave most of the fields unfilled.
@@ -210,11 +210,11 @@ int get_auxv_size(void)
* \brief Fill an AUXV structure. * \brief Fill an AUXV structure.
* *
* \param head A pointer to a note structure. * \param head A pointer to a note structure.
* \param proc A pointer to the current process structure. * \param thread A pointer to the current thread structure.
* \param regs A pointer to a x86_regs structure. * \param regs A pointer to a x86_regs structure.
*/ */
void fill_auxv(struct note *head, struct process *proc, void *regs) void fill_auxv(struct note *head, struct thread *thread, void *regs)
{ {
void *name; void *name;
void *auxv; void *auxv;
@@ -225,7 +225,7 @@ void fill_auxv(struct note *head, struct process *proc, void *regs)
name = (void *) (head + 1); name = (void *) (head + 1);
memcpy(name, "CORE", sizeof("CORE")); memcpy(name, "CORE", sizeof("CORE"));
auxv = name + align32(sizeof("CORE")); auxv = name + align32(sizeof("CORE"));
memcpy(auxv, proc->saved_auxv, sizeof(unsigned long) * AUXV_LEN); memcpy(auxv, thread->proc->saved_auxv, sizeof(unsigned long) * AUXV_LEN);
} }
/** /**
@@ -243,23 +243,23 @@ int get_note_size(void)
* \brief Fill the NOTE segment. * \brief Fill the NOTE segment.
* *
* \param head A pointer to a note structure. * \param head A pointer to a note structure.
* \param proc A pointer to the current process structure. * \param thread A pointer to the current thread structure.
* \param regs A pointer to a x86_regs structure. * \param regs A pointer to a x86_regs structure.
*/ */
void fill_note(void *note, struct process *proc, void *regs) void fill_note(void *note, struct thread *thread, void *regs)
{ {
fill_prstatus(note, proc, regs); fill_prstatus(note, thread, regs);
note += get_prstatus_size(); note += get_prstatus_size();
fill_prpsinfo(note, proc, regs); fill_prpsinfo(note, thread, regs);
note += get_prpsinfo_size(); note += get_prpsinfo_size();
fill_auxv(note, proc, regs); fill_auxv(note, thread, regs);
} }
/** /**
* \brief Generate an image of the core file. * \brief Generate an image of the core file.
* *
* \param proc A pointer to the current process structure. * \param thread A pointer to the current thread structure.
* \param regs A pointer to a x86_regs structure. * \param regs A pointer to a x86_regs structure.
* \param coretable(out) An array of core chunks. * \param coretable(out) An array of core chunks.
* \param chunks(out) Number of the entires of coretable. * \param chunks(out) Number of the entires of coretable.
@@ -271,7 +271,7 @@ void fill_note(void *note, struct process *proc, void *regs)
* should be zero. * should be zero.
*/ */
int gencore(struct process *proc, void *regs, int gencore(struct thread *thread, void *regs,
struct coretable **coretable, int *chunks) struct coretable **coretable, int *chunks)
{ {
struct coretable *ct = NULL; struct coretable *ct = NULL;
@@ -279,7 +279,7 @@ int gencore(struct process *proc, void *regs,
Elf64_Phdr *ph = NULL; Elf64_Phdr *ph = NULL;
void *note = NULL; void *note = NULL;
struct vm_range *range; struct vm_range *range;
struct process_vm *vm = proc->vm; struct process_vm *vm = thread->vm;
int segs = 1; /* the first one is for NOTE */ int segs = 1; /* the first one is for NOTE */
int notesize, phsize, alignednotesize; int notesize, phsize, alignednotesize;
unsigned int offset = 0; unsigned int offset = 0;
@@ -306,7 +306,7 @@ int gencore(struct process *proc, void *regs,
unsigned long p, phys; unsigned long p, phys;
int prevzero = 0; int prevzero = 0;
for (p = range->start; p < range->end; p += PAGE_SIZE) { for (p = range->start; p < range->end; p += PAGE_SIZE) {
if (ihk_mc_pt_virt_to_phys(proc->vm->page_table, if (ihk_mc_pt_virt_to_phys(thread->vm->address_space->page_table,
(void *)p, &phys) != 0) { (void *)p, &phys) != 0) {
prevzero = 1; prevzero = 1;
} else { } else {
@@ -326,7 +326,7 @@ int gencore(struct process *proc, void *regs,
dkprintf("we have %d segs and %d chunks.\n\n", segs, *chunks); dkprintf("we have %d segs and %d chunks.\n\n", segs, *chunks);
{ {
struct vm_regions region = proc->vm->region; struct vm_regions region = thread->vm->region;
dkprintf("text: %lx-%lx\n", region.text_start, region.text_end); dkprintf("text: %lx-%lx\n", region.text_start, region.text_end);
dkprintf("data: %lx-%lx\n", region.data_start, region.data_end); dkprintf("data: %lx-%lx\n", region.data_start, region.data_end);
@@ -364,7 +364,7 @@ int gencore(struct process *proc, void *regs,
goto fail; goto fail;
} }
memset(note, 0, alignednotesize); memset(note, 0, alignednotesize);
fill_note(note, proc, regs); fill_note(note, thread, regs);
/* prgram header for NOTE segment is exceptional */ /* prgram header for NOTE segment is exceptional */
ph[0].p_type = PT_NOTE; ph[0].p_type = PT_NOTE;
@@ -434,7 +434,7 @@ int gencore(struct process *proc, void *regs,
for (start = p = range->start; for (start = p = range->start;
p < range->end; p += PAGE_SIZE) { p < range->end; p += PAGE_SIZE) {
if (ihk_mc_pt_virt_to_phys(proc->vm->page_table, if (ihk_mc_pt_virt_to_phys(thread->vm->address_space->page_table,
(void *)p, &phys) != 0) { (void *)p, &phys) != 0) {
if (prevzero == 0) { if (prevzero == 0) {
/* We begin a new chunk */ /* We begin a new chunk */
@@ -472,9 +472,9 @@ int gencore(struct process *proc, void *regs,
i++; i++;
} }
} else { } else {
if ((proc->vm->region.user_start <= range->start) && if ((thread->vm->region.user_start <= range->start) &&
(range->end <= proc->vm->region.user_end)) { (range->end <= thread->vm->region.user_end)) {
if (ihk_mc_pt_virt_to_phys(proc->vm->page_table, if (ihk_mc_pt_virt_to_phys(thread->vm->address_space->page_table,
(void *)range->start, &phys) != 0) { (void *)range->start, &phys) != 0) {
dkprintf("could not convert user virtual address %lx" dkprintf("could not convert user virtual address %lx"
"to physical address", range->start); "to physical address", range->start);

View File

@@ -8,8 +8,9 @@
#include <ihk/atomic.h> #include <ihk/atomic.h>
//#define DEBUG_SPINLOCK //#define DEBUG_SPINLOCK
//#define DEBUG_MCS_RWLOCK
#ifdef DEBUG_SPINLOCK #if defined(DEBUG_SPINLOCK) || defined(DEBUG_MCS_RWLOCK)
int __kprintf(const char *format, ...); int __kprintf(const char *format, ...);
#endif #endif
@@ -26,7 +27,17 @@ static void ihk_mc_spinlock_init(ihk_spinlock_t *lock)
} }
#define SPIN_LOCK_UNLOCKED 0 #define SPIN_LOCK_UNLOCKED 0
static void ihk_mc_spinlock_lock_noirq(ihk_spinlock_t *lock) #ifdef DEBUG_SPINLOCK
#define ihk_mc_spinlock_lock_noirq(l) { \
__kprintf("[%d] call ihk_mc_spinlock_lock_noirq %p %s:%d\n", ihk_mc_get_processor_id(), (l), __FILE__, __LINE__); \
__ihk_mc_spinlock_lock_noirq(l); \
__kprintf("[%d] ret ihk_mc_spinlock_lock_noirq\n", ihk_mc_get_processor_id()); \
}
#else
#define ihk_mc_spinlock_lock_noirq __ihk_mc_spinlock_lock_noirq
#endif
static void __ihk_mc_spinlock_lock_noirq(ihk_spinlock_t *lock)
{ {
int inc = 0x00010000; int inc = 0x00010000;
int tmp; int tmp;
@@ -45,11 +56,6 @@ static void ihk_mc_spinlock_lock_noirq(ihk_spinlock_t *lock)
: "+Q" (inc), "+m" (*lock), "=r" (tmp) : : "memory", "cc"); : "+Q" (inc), "+m" (*lock), "=r" (tmp) : : "memory", "cc");
#endif #endif
#ifdef DEBUG_SPINLOCK
__kprintf("[%d] trying to grab lock: 0x%lX\n",
ihk_mc_get_processor_id(), lock);
#endif
preempt_disable(); preempt_disable();
asm volatile("lock; xaddl %0, %1\n" asm volatile("lock; xaddl %0, %1\n"
@@ -67,37 +73,58 @@ static void ihk_mc_spinlock_lock_noirq(ihk_spinlock_t *lock)
: :
: "memory", "cc"); : "memory", "cc");
#ifdef DEBUG_SPINLOCK
__kprintf("[%d] holding lock: 0x%lX\n", ihk_mc_get_processor_id(), lock);
#endif
} }
static unsigned long ihk_mc_spinlock_lock(ihk_spinlock_t *lock) #ifdef DEBUG_SPINLOCK
#define ihk_mc_spinlock_lock(l) ({ unsigned long rc;\
__kprintf("[%d] call ihk_mc_spinlock_lock %p %s:%d\n", ihk_mc_get_processor_id(), (l), __FILE__, __LINE__); \
rc = __ihk_mc_spinlock_lock(l);\
__kprintf("[%d] ret ihk_mc_spinlock_lock\n", ihk_mc_get_processor_id()); rc;\
})
#else
#define ihk_mc_spinlock_lock __ihk_mc_spinlock_lock
#endif
static unsigned long __ihk_mc_spinlock_lock(ihk_spinlock_t *lock)
{ {
unsigned long flags; unsigned long flags;
flags = cpu_disable_interrupt_save(); flags = cpu_disable_interrupt_save();
ihk_mc_spinlock_lock_noirq(lock); __ihk_mc_spinlock_lock_noirq(lock);
return flags; return flags;
} }
static void ihk_mc_spinlock_unlock_noirq(ihk_spinlock_t *lock) #ifdef DEBUG_SPINLOCK
#define ihk_mc_spinlock_unlock_noirq(l) { \
__kprintf("[%d] call ihk_mc_spinlock_unlock_noirq %p %s:%d\n", ihk_mc_get_processor_id(), (l), __FILE__, __LINE__); \
__ihk_mc_spinlock_unlock_noirq(l); \
__kprintf("[%d] ret ihk_mc_spinlock_unlock_noirq\n", ihk_mc_get_processor_id()); \
}
#else
#define ihk_mc_spinlock_unlock_noirq __ihk_mc_spinlock_unlock_noirq
#endif
static void __ihk_mc_spinlock_unlock_noirq(ihk_spinlock_t *lock)
{ {
asm volatile ("lock incw %0" : "+m"(*lock) : : "memory", "cc"); asm volatile ("lock incw %0" : "+m"(*lock) : : "memory", "cc");
preempt_enable(); preempt_enable();
} }
static void ihk_mc_spinlock_unlock(ihk_spinlock_t *lock, unsigned long flags) #ifdef DEBUG_SPINLOCK
#define ihk_mc_spinlock_unlock(l, f) { \
__kprintf("[%d] call ihk_mc_spinlock_unlock %p %s:%d\n", ihk_mc_get_processor_id(), (l), __FILE__, __LINE__); \
__ihk_mc_spinlock_unlock((l), (f)); \
__kprintf("[%d] ret ihk_mc_spinlock_unlock\n", ihk_mc_get_processor_id()); \
}
#else
#define ihk_mc_spinlock_unlock __ihk_mc_spinlock_unlock
#endif
static void __ihk_mc_spinlock_unlock(ihk_spinlock_t *lock, unsigned long flags)
{ {
ihk_mc_spinlock_unlock_noirq(lock); __ihk_mc_spinlock_unlock_noirq(lock);
cpu_restore_interrupt(flags); cpu_restore_interrupt(flags);
#ifdef DEBUG_SPINLOCK
__kprintf("[%d] released lock: 0x%lX\n", ihk_mc_get_processor_id(), lock);
#endif
} }
/* An implementation of the Mellor-Crummey Scott (MCS) lock */ /* An implementation of the Mellor-Crummey Scott (MCS) lock */
@@ -152,76 +179,85 @@ static void mcs_lock_unlock(struct mcs_lock_node *lock,
} }
// reader/writer lock // reader/writer lock
typedef struct rwlock_node { typedef struct mcs_rwlock_node {
ihk_atomic_t count; // num of readers (use only common reader) ihk_atomic_t count; // num of readers (use only common reader)
char type; // lock type char type; // lock type
#define RWLOCK_TYPE_COMMON_READER 0 #define MCS_RWLOCK_TYPE_COMMON_READER 0
#define RWLOCK_TYPE_READER 1 #define MCS_RWLOCK_TYPE_READER 1
#define RWLOCK_TYPE_WRITER 2 #define MCS_RWLOCK_TYPE_WRITER 2
char locked; // lock char locked; // lock
#define RWLOCK_LOCKED 1 #define MCS_RWLOCK_LOCKED 1
#define RWLOCK_UNLOCKED 0 #define MCS_RWLOCK_UNLOCKED 0
char dmy1; // unused char dmy1; // unused
char dmy2; // unused char dmy2; // unused
struct rwlock_node *next; struct mcs_rwlock_node *next;
} __attribute__((aligned(64))) rwlock_node_t; } __attribute__((aligned(64))) mcs_rwlock_node_t;
typedef struct rwlock_node_irqsave { typedef struct mcs_rwlock_node_irqsave {
struct rwlock_node node; struct mcs_rwlock_node node;
unsigned long irqsave; unsigned long irqsave;
} __attribute__((aligned(64))) rwlock_node_irqsave_t; } __attribute__((aligned(64))) mcs_rwlock_node_irqsave_t;
typedef struct rwlock_lock { typedef struct mcs_rwlock_lock {
struct rwlock_node reader; /* common reader lock */ struct mcs_rwlock_node reader; /* common reader lock */
struct rwlock_node *node; /* base */ struct mcs_rwlock_node *node; /* base */
} __attribute__((aligned(64))) rwlock_lock_t; } __attribute__((aligned(64))) mcs_rwlock_lock_t;
static void static void
rwlock_init(struct rwlock_lock *lock) mcs_rwlock_init(struct mcs_rwlock_lock *lock)
{ {
ihk_atomic_set(&lock->reader.count, 0); ihk_atomic_set(&lock->reader.count, 0);
lock->reader.type = RWLOCK_TYPE_COMMON_READER; lock->reader.type = MCS_RWLOCK_TYPE_COMMON_READER;
lock->node = NULL; lock->node = NULL;
} }
#ifdef DEBUG_MCS_RWLOCK
#define mcs_rwlock_writer_lock_noirq(l, n) { \
__kprintf("[%d] call mcs_rwlock_writer_lock_noirq %p %s:%d\n", ihk_mc_get_processor_id(), (l), __FILE__, __LINE__); \
__mcs_rwlock_writer_lock_noirq((l), (n)); \
__kprintf("[%d] ret mcs_rwlock_writer_lock_noirq\n", ihk_mc_get_processor_id()); \
}
#else
#define mcs_rwlock_writer_lock_noirq __mcs_rwlock_writer_lock_noirq
#endif
static void static void
rwlock_writer_lock_noirq(struct rwlock_lock *lock, struct rwlock_node *node) __mcs_rwlock_writer_lock_noirq(struct mcs_rwlock_lock *lock, struct mcs_rwlock_node *node)
{ {
struct rwlock_node *pred; struct mcs_rwlock_node *pred;
preempt_disable(); preempt_disable();
node->type = RWLOCK_TYPE_WRITER; node->type = MCS_RWLOCK_TYPE_WRITER;
node->next = NULL; node->next = NULL;
pred = (struct rwlock_node *)xchg8((unsigned long *)&lock->node, pred = (struct mcs_rwlock_node *)xchg8((unsigned long *)&lock->node,
(unsigned long)node); (unsigned long)node);
if (pred) { if (pred) {
node->locked = RWLOCK_LOCKED; node->locked = MCS_RWLOCK_LOCKED;
pred->next = node; pred->next = node;
while (node->locked != RWLOCK_UNLOCKED) { while (node->locked != MCS_RWLOCK_UNLOCKED) {
cpu_pause(); cpu_pause();
} }
} }
} }
static void static void
rwlock_unlock_readers(struct rwlock_lock *lock) mcs_rwlock_unlock_readers(struct mcs_rwlock_lock *lock)
{ {
struct rwlock_node *p; struct mcs_rwlock_node *p;
struct rwlock_node *f = NULL; struct mcs_rwlock_node *f = NULL;
struct rwlock_node *n; struct mcs_rwlock_node *n;
ihk_atomic_inc(&lock->reader.count); // protect to unlock reader ihk_atomic_inc(&lock->reader.count); // protect to unlock reader
for(p = &lock->reader; p->next; p = n){ for(p = &lock->reader; p->next; p = n){
n = p->next; n = p->next;
if(p->next->type == RWLOCK_TYPE_READER){ if(p->next->type == MCS_RWLOCK_TYPE_READER){
p->next = n->next; p->next = n->next;
if(lock->node == n){ if(lock->node == n){
struct rwlock_node *old; struct mcs_rwlock_node *old;
old = (struct rwlock_node *)atomic_cmpxchg8( old = (struct mcs_rwlock_node *)atomic_cmpxchg8(
(unsigned long *)&lock->node, (unsigned long *)&lock->node,
(unsigned long)n, (unsigned long)n,
(unsigned long)p); (unsigned long)p);
@@ -233,29 +269,44 @@ rwlock_unlock_readers(struct rwlock_lock *lock)
p->next = n->next; p->next = n->next;
} }
} }
else if(p->next == NULL){
while (n->next == NULL) {
cpu_pause();
}
p->next = n->next;
}
if(f){ if(f){
ihk_atomic_inc(&lock->reader.count); ihk_atomic_inc(&lock->reader.count);
n->locked = RWLOCK_UNLOCKED; n->locked = MCS_RWLOCK_UNLOCKED;
} }
else else
f = n; f = n;
n = p; n = p;
} }
if(n->next == NULL && lock->node != n){ if(n->next == NULL && lock->node != n){
while (n->next == NULL) { while (n->next == NULL && lock->node != n) {
cpu_pause(); cpu_pause();
} }
} }
} }
f->locked = RWLOCK_UNLOCKED; f->locked = MCS_RWLOCK_UNLOCKED;
} }
#ifdef DEBUG_MCS_RWLOCK
#define mcs_rwlock_writer_unlock_noirq(l, n) { \
__kprintf("[%d] call mcs_rwlock_writer_unlock_noirq %p %s:%d\n", ihk_mc_get_processor_id(), (l), __FILE__, __LINE__); \
__mcs_rwlock_writer_unlock_noirq((l), (n)); \
__kprintf("[%d] ret mcs_rwlock_writer_unlock_noirq\n", ihk_mc_get_processor_id()); \
}
#else
#define mcs_rwlock_writer_unlock_noirq __mcs_rwlock_writer_unlock_noirq
#endif
static void static void
rwlock_writer_unlock_noirq(struct rwlock_lock *lock, struct rwlock_node *node) __mcs_rwlock_writer_unlock_noirq(struct mcs_rwlock_lock *lock, struct mcs_rwlock_node *node)
{ {
if (node->next == NULL) { if (node->next == NULL) {
struct rwlock_node *old = (struct rwlock_node *) struct mcs_rwlock_node *old = (struct mcs_rwlock_node *)
atomic_cmpxchg8((unsigned long *)&lock->node, atomic_cmpxchg8((unsigned long *)&lock->node,
(unsigned long)node, (unsigned long)0); (unsigned long)node, (unsigned long)0);
@@ -268,42 +319,52 @@ rwlock_writer_unlock_noirq(struct rwlock_lock *lock, struct rwlock_node *node)
} }
} }
if(node->next->type == RWLOCK_TYPE_READER){ if(node->next->type == MCS_RWLOCK_TYPE_READER){
lock->reader.next = node->next; lock->reader.next = node->next;
rwlock_unlock_readers(lock); mcs_rwlock_unlock_readers(lock);
} }
else{ else{
node->next->locked = RWLOCK_UNLOCKED; node->next->locked = MCS_RWLOCK_UNLOCKED;
} }
out: out:
preempt_enable(); preempt_enable();
} }
#ifdef DEBUG_MCS_RWLOCK
#define mcs_rwlock_reader_lock_noirq(l, n) { \
__kprintf("[%d] call mcs_rwlock_reader_lock_noirq %p %s:%d\n", ihk_mc_get_processor_id(), (l), __FILE__, __LINE__); \
__mcs_rwlock_reader_lock_noirq((l), (n)); \
__kprintf("[%d] ret mcs_rwlock_reader_lock_noirq\n", ihk_mc_get_processor_id()); \
}
#else
#define mcs_rwlock_reader_lock_noirq __mcs_rwlock_reader_lock_noirq
#endif
static void static void
rwlock_reader_lock_noirq(struct rwlock_lock *lock, struct rwlock_node *node) __mcs_rwlock_reader_lock_noirq(struct mcs_rwlock_lock *lock, struct mcs_rwlock_node *node)
{ {
struct rwlock_node *pred; struct mcs_rwlock_node *pred;
preempt_disable(); preempt_disable();
node->type = RWLOCK_TYPE_READER; node->type = MCS_RWLOCK_TYPE_READER;
node->next = NULL; node->next = NULL;
node->dmy1 = ihk_mc_get_processor_id();
pred = (struct rwlock_node *)xchg8((unsigned long *)&lock->node, pred = (struct mcs_rwlock_node *)xchg8((unsigned long *)&lock->node,
(unsigned long)node); (unsigned long)node);
if (pred) { if (pred) {
if(pred == &lock->reader){ if(pred == &lock->reader){
if(ihk_atomic_inc_return(&pred->count) != 1){ if(ihk_atomic_inc_return(&pred->count) != 1){
struct rwlock_node *old; struct mcs_rwlock_node *old;
old = (struct rwlock_node *)atomic_cmpxchg8( old = (struct mcs_rwlock_node *)atomic_cmpxchg8(
(unsigned long *)&lock->node, (unsigned long *)&lock->node,
(unsigned long)node, (unsigned long)node,
(unsigned long)pred); (unsigned long)pred);
if (old == pred) { if (old == node) {
goto out; goto out;
} }
@@ -312,36 +373,45 @@ rwlock_reader_lock_noirq(struct rwlock_lock *lock, struct rwlock_node *node)
} }
pred->next = node->next; pred->next = node->next;
if(node->next->type == RWLOCK_TYPE_READER) if(node->next->type == MCS_RWLOCK_TYPE_READER)
rwlock_unlock_readers(lock); mcs_rwlock_unlock_readers(lock);
goto out; goto out;
} }
ihk_atomic_dec(&pred->count); ihk_atomic_dec(&pred->count);
} }
node->locked = RWLOCK_LOCKED; node->locked = MCS_RWLOCK_LOCKED;
pred->next = node; pred->next = node;
while (node->locked != RWLOCK_UNLOCKED) { while (node->locked != MCS_RWLOCK_UNLOCKED) {
cpu_pause(); cpu_pause();
} }
} }
else { else {
lock->reader.next = node; lock->reader.next = node;
rwlock_unlock_readers(lock); mcs_rwlock_unlock_readers(lock);
} }
out: out:
return; return;
} }
#ifdef DEBUG_MCS_RWLOCK
#define mcs_rwlock_reader_unlock_noirq(l, n) { \
__kprintf("[%d] call mcs_rwlock_reader_unlock_noirq %p %s:%d\n", ihk_mc_get_processor_id(), (l), __FILE__, __LINE__); \
__mcs_rwlock_reader_unlock_noirq((l), (n)); \
__kprintf("[%d] ret mcs_rwlock_reader_unlock_noirq\n", ihk_mc_get_processor_id()); \
}
#else
#define mcs_rwlock_reader_unlock_noirq __mcs_rwlock_reader_unlock_noirq
#endif
static void static void
rwlock_reader_unlock_noirq(struct rwlock_lock *lock, struct rwlock_node *node) __mcs_rwlock_reader_unlock_noirq(struct mcs_rwlock_lock *lock, struct mcs_rwlock_node *node)
{ {
if(ihk_atomic_dec_return(&lock->reader.count)) if(ihk_atomic_dec_return(&lock->reader.count))
goto out; goto out;
if (lock->reader.next == NULL) { if (lock->reader.next == NULL) {
struct rwlock_node *old; struct mcs_rwlock_node *old;
old = (struct rwlock_node *)atomic_cmpxchg8( old = (struct mcs_rwlock_node *)atomic_cmpxchg8(
(unsigned long *)&lock->node, (unsigned long *)&lock->node,
(unsigned long)&lock->reader, (unsigned long)&lock->reader,
(unsigned long)0); (unsigned long)0);
@@ -355,42 +425,78 @@ rwlock_reader_unlock_noirq(struct rwlock_lock *lock, struct rwlock_node *node)
} }
} }
if(lock->reader.next->type == RWLOCK_TYPE_READER){ if(lock->reader.next->type == MCS_RWLOCK_TYPE_READER){
rwlock_unlock_readers(lock); mcs_rwlock_unlock_readers(lock);
} }
else{ else{
lock->reader.next->locked = RWLOCK_UNLOCKED; lock->reader.next->locked = MCS_RWLOCK_UNLOCKED;
} }
out: out:
preempt_enable(); preempt_enable();
} }
#ifdef DEBUG_MCS_RWLOCK
#define mcs_rwlock_writer_lock(l, n) { \
__kprintf("[%d] call mcs_rwlock_writer_lock %p %s:%d\n", ihk_mc_get_processor_id(), (l), __FILE__, __LINE__); \
__mcs_rwlock_writer_lock((l), (n)); \
__kprintf("[%d] ret mcs_rwlock_writer_lock\n", ihk_mc_get_processor_id()); \
}
#else
#define mcs_rwlock_writer_lock __mcs_rwlock_writer_lock
#endif
static void static void
rwlock_writer_lock(struct rwlock_lock *lock, struct rwlock_node_irqsave *node) __mcs_rwlock_writer_lock(struct mcs_rwlock_lock *lock, struct mcs_rwlock_node_irqsave *node)
{ {
node->irqsave = cpu_disable_interrupt_save(); node->irqsave = cpu_disable_interrupt_save();
rwlock_writer_lock_noirq(lock, &node->node); __mcs_rwlock_writer_lock_noirq(lock, &node->node);
} }
#ifdef DEBUG_MCS_RWLOCK
#define mcs_rwlock_writer_unlock(l, n) { \
__kprintf("[%d] call mcs_rwlock_writer_unlock %p %s:%d\n", ihk_mc_get_processor_id(), (l), __FILE__, __LINE__); \
__mcs_rwlock_writer_unlock((l), (n)); \
__kprintf("[%d] ret mcs_rwlock_writer_unlock\n", ihk_mc_get_processor_id()); \
}
#else
#define mcs_rwlock_writer_unlock __mcs_rwlock_writer_unlock
#endif
static void static void
rwlock_writer_unlock(struct rwlock_lock *lock, struct rwlock_node_irqsave *node) __mcs_rwlock_writer_unlock(struct mcs_rwlock_lock *lock, struct mcs_rwlock_node_irqsave *node)
{ {
rwlock_writer_unlock_noirq(lock, &node->node); __mcs_rwlock_writer_unlock_noirq(lock, &node->node);
cpu_restore_interrupt(node->irqsave); cpu_restore_interrupt(node->irqsave);
} }
#ifdef DEBUG_MCS_RWLOCK
#define mcs_rwlock_reader_lock(l, n) { \
__kprintf("[%d] call mcs_rwlock_reader_lock %p %s:%d\n", ihk_mc_get_processor_id(), (l), __FILE__, __LINE__); \
__mcs_rwlock_reader_lock((l), (n)); \
__kprintf("[%d] ret mcs_rwlock_reader_lock\n", ihk_mc_get_processor_id()); \
}
#else
#define mcs_rwlock_reader_lock __mcs_rwlock_reader_lock
#endif
static void static void
rwlock_reader_lock(struct rwlock_lock *lock, struct rwlock_node_irqsave *node) __mcs_rwlock_reader_lock(struct mcs_rwlock_lock *lock, struct mcs_rwlock_node_irqsave *node)
{ {
node->irqsave = cpu_disable_interrupt_save(); node->irqsave = cpu_disable_interrupt_save();
rwlock_reader_lock_noirq(lock, &node->node); __mcs_rwlock_reader_lock_noirq(lock, &node->node);
} }
#ifdef DEBUG_MCS_RWLOCK
#define mcs_rwlock_reader_unlock(l, n) { \
__kprintf("[%d] call mcs_rwlock_reader_unlock %p %s:%d\n", ihk_mc_get_processor_id(), (l), __FILE__, __LINE__); \
__mcs_rwlock_reader_unlock((l), (n)); \
__kprintf("[%d] ret mcs_rwlock_reader_unlock\n", ihk_mc_get_processor_id()); \
}
#else
#define mcs_rwlock_reader_unlock __mcs_rwlock_reader_unlock
#endif
static void static void
rwlock_reader_unlock(struct rwlock_lock *lock, struct rwlock_node_irqsave *node) __mcs_rwlock_reader_unlock(struct mcs_rwlock_lock *lock, struct mcs_rwlock_node_irqsave *node)
{ {
rwlock_reader_unlock_noirq(lock, &node->node); __mcs_rwlock_reader_unlock_noirq(lock, &node->node);
cpu_restore_interrupt(node->irqsave); cpu_restore_interrupt(node->irqsave);
} }

View File

@@ -2191,7 +2191,7 @@ int read_process_vm(struct process_vm *vm, void *kdst, const void *usrc, size_t
cpsize = remain; cpsize = remain;
} }
error = ihk_mc_pt_virt_to_phys(vm->page_table, from, &pa); error = ihk_mc_pt_virt_to_phys(vm->address_space->page_table, from, &pa);
if (error) { if (error) {
return error; return error;
} }
@@ -2274,7 +2274,7 @@ int write_process_vm(struct process_vm *vm, void *udst, const void *ksrc, size_t
cpsize = remain; cpsize = remain;
} }
error = ihk_mc_pt_virt_to_phys(vm->page_table, to, &pa); error = ihk_mc_pt_virt_to_phys(vm->address_space->page_table, to, &pa);
if (error) { if (error) {
return error; return error;
} }
@@ -2330,7 +2330,7 @@ int patch_process_vm(struct process_vm *vm, void *udst, const void *ksrc, size_t
cpsize = remain; cpsize = remain;
} }
error = ihk_mc_pt_virt_to_phys(vm->page_table, to, &pa); error = ihk_mc_pt_virt_to_phys(vm->address_space->page_table, to, &pa);
if (error) { if (error) {
kprintf("patch_process_vm(%p,%p,%p,%lx):v2p(%p):%d\n", vm, udst, ksrc, siz, to, error); kprintf("patch_process_vm(%p,%p,%p,%lx):v2p(%p):%d\n", vm, udst, ksrc, siz, to, error);
return error; return error;

View File

@@ -25,13 +25,13 @@
#include <kmalloc.h> #include <kmalloc.h>
#include <uio.h> #include <uio.h>
void terminate(int, int, ihk_mc_user_context_t *); void terminate(int, int);
int copy_from_user(void *dst, const void *src, size_t siz); int copy_from_user(void *dst, const void *src, size_t siz);
int copy_to_user(void *dst, const void *src, size_t siz); int copy_to_user(void *dst, const void *src, size_t siz);
int write_process_vm(struct process_vm *vm, void *dst, const void *src, size_t siz); int write_process_vm(struct process_vm *vm, void *dst, const void *src, size_t siz);
long do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact); long do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact);
long syscall(int num, ihk_mc_user_context_t *ctx); long syscall(int num, ihk_mc_user_context_t *ctx);
extern void save_fp_regs(struct process *proc); extern void save_fp_regs(struct thread *proc);
//#define DEBUG_PRINT_SC //#define DEBUG_PRINT_SC
@@ -46,12 +46,12 @@ uintptr_t debug_constants[] = {
offsetof(struct cpu_local_var, current), offsetof(struct cpu_local_var, current),
offsetof(struct cpu_local_var, runq), offsetof(struct cpu_local_var, runq),
offsetof(struct cpu_local_var, status), offsetof(struct cpu_local_var, status),
offsetof(struct process, ctx), offsetof(struct thread, ctx),
offsetof(struct process, sched_list), offsetof(struct thread, sched_list),
offsetof(struct process, ftn), offsetof(struct thread, proc),
offsetof(struct fork_tree_node, status), offsetof(struct thread, tstatus),
offsetof(struct fork_tree_node, pid), offsetof(struct process, pid),
offsetof(struct fork_tree_node, tid), offsetof(struct thread, tid),
-1, -1,
}; };
@@ -163,7 +163,7 @@ struct sigsp {
SYSCALL_DECLARE(rt_sigreturn) SYSCALL_DECLARE(rt_sigreturn)
{ {
struct process *proc = cpu_local_var(current); struct thread *thread = cpu_local_var(current);
struct x86_user_context *regs; struct x86_user_context *regs;
struct sigsp *sigsp; struct sigsp *sigsp;
@@ -173,8 +173,8 @@ SYSCALL_DECLARE(rt_sigreturn)
sigsp = (struct sigsp *)regs->gpr.rsp; sigsp = (struct sigsp *)regs->gpr.rsp;
if(copy_from_user(regs, &sigsp->regs, sizeof(struct x86_user_context))) if(copy_from_user(regs, &sigsp->regs, sizeof(struct x86_user_context)))
return -EFAULT; return -EFAULT;
proc->sigmask.__val[0] = sigsp->sigmask; thread->sigmask.__val[0] = sigsp->sigmask;
proc->sigstack.ss_flags = sigsp->ssflags; thread->sigstack.ss_flags = sigsp->ssflags;
if(sigsp->restart){ if(sigsp->restart){
return syscall(sigsp->num, (ihk_mc_user_context_t *)regs); return syscall(sigsp->num, (ihk_mc_user_context_t *)regs);
} }
@@ -182,38 +182,10 @@ SYSCALL_DECLARE(rt_sigreturn)
} }
extern struct cpu_local_var *clv; extern struct cpu_local_var *clv;
extern unsigned long do_kill(int pid, int tid, int sig, struct siginfo *info, int ptracecont); extern unsigned long do_kill(struct thread *thread, int pid, int tid, int sig, struct siginfo *info, int ptracecont);
extern void interrupt_syscall(int all, int pid); extern void interrupt_syscall(int all, int pid);
extern int num_processors; extern int num_processors;
void
do_setpgid(int pid, int pgid)
{
struct cpu_local_var *v;
struct process *p;
struct process *proc = cpu_local_var(current);
int i;
unsigned long irqstate;
if(pid == 0)
pid = proc->ftn->pid;
if(pgid == 0)
pgid = pid;
for(i = 0; i < num_processors; i++){
v = get_cpu_local_var(i);
irqstate = ihk_mc_spinlock_lock(&(v->runq_lock));
list_for_each_entry(p, &(v->runq), sched_list){
if(p->ftn->pid <= 0)
continue;
if(p->ftn->pid == pid){
p->ftn->pgid = pgid;
}
}
ihk_mc_spinlock_unlock(&(v->runq_lock), irqstate);
}
}
#define RFLAGS_MASK (RFLAGS_CF | RFLAGS_PF | RFLAGS_AF | RFLAGS_ZF | \ #define RFLAGS_MASK (RFLAGS_CF | RFLAGS_PF | RFLAGS_AF | RFLAGS_ZF | \
RFLAGS_SF | RFLAGS_TF | RFLAGS_DF | RFLAGS_OF | \ RFLAGS_SF | RFLAGS_TF | RFLAGS_DF | RFLAGS_OF | \
RFLAGS_NT | RFLAGS_RF | RFLAGS_AC) RFLAGS_NT | RFLAGS_RF | RFLAGS_AC)
@@ -222,10 +194,10 @@ do_setpgid(int pid, int pgid)
#define DB7_RESERVED_MASK (0xffffffff0000dc00UL) #define DB7_RESERVED_MASK (0xffffffff0000dc00UL)
#define DB7_RESERVED_SET (0x400UL) #define DB7_RESERVED_SET (0x400UL)
extern ihk_mc_user_context_t *lookup_user_context(struct process *proc); extern ihk_mc_user_context_t *lookup_user_context(struct thread *thread);
long long
ptrace_read_user(struct process *proc, long addr, unsigned long *value) ptrace_read_user(struct thread *thread, long addr, unsigned long *value)
{ {
unsigned long *p; unsigned long *p;
struct x86_user_context *uctx; struct x86_user_context *uctx;
@@ -235,7 +207,7 @@ ptrace_read_user(struct process *proc, long addr, unsigned long *value)
return -EIO; return -EIO;
} }
else if (addr < sizeof(struct user_regs_struct)) { else if (addr < sizeof(struct user_regs_struct)) {
uctx = lookup_user_context(proc); uctx = lookup_user_context(thread);
if (!uctx) { if (!uctx) {
return -EIO; return -EIO;
} }
@@ -253,11 +225,11 @@ ptrace_read_user(struct process *proc, long addr, unsigned long *value)
if (offsetof(struct user, u_debugreg[0]) <= addr && if (offsetof(struct user, u_debugreg[0]) <= addr &&
addr < offsetof(struct user, u_debugreg[8])) { addr < offsetof(struct user, u_debugreg[8])) {
if (addr & (sizeof(*value) - 1)) return -EIO; if (addr & (sizeof(*value) - 1)) return -EIO;
if (proc->ptrace_debugreg == NULL) { if (thread->ptrace_debugreg == NULL) {
kprintf("ptrace_read_user: missing ptrace_debugreg\n"); kprintf("ptrace_read_user: missing ptrace_debugreg\n");
return -EFAULT; return -EFAULT;
} }
p = &proc->ptrace_debugreg[(addr - offsetof(struct user, u_debugreg[0])) / sizeof(*value)]; p = &thread->ptrace_debugreg[(addr - offsetof(struct user, u_debugreg[0])) / sizeof(*value)];
*value = *p; *value = *p;
return 0; return 0;
} }
@@ -269,7 +241,7 @@ ptrace_read_user(struct process *proc, long addr, unsigned long *value)
} }
long long
ptrace_write_user(struct process *proc, long addr, unsigned long value) ptrace_write_user(struct thread *thread, long addr, unsigned long value)
{ {
unsigned long *p; unsigned long *p;
struct x86_user_context *uctx; struct x86_user_context *uctx;
@@ -279,7 +251,7 @@ ptrace_write_user(struct process *proc, long addr, unsigned long value)
return -EIO; return -EIO;
} }
else if (addr < sizeof(struct user_regs_struct)) { else if (addr < sizeof(struct user_regs_struct)) {
uctx = lookup_user_context(proc); uctx = lookup_user_context(thread);
if (!uctx) { if (!uctx) {
return -EIO; return -EIO;
} }
@@ -302,11 +274,11 @@ ptrace_write_user(struct process *proc, long addr, unsigned long value)
if (offsetof(struct user, u_debugreg[0]) <= addr && if (offsetof(struct user, u_debugreg[0]) <= addr &&
addr < offsetof(struct user, u_debugreg[8])) { addr < offsetof(struct user, u_debugreg[8])) {
if (addr & (sizeof(value) - 1)) return -EIO; if (addr & (sizeof(value) - 1)) return -EIO;
if (proc->ptrace_debugreg == NULL) { if (thread->ptrace_debugreg == NULL) {
kprintf("ptrace_write_user: missing ptrace_debugreg\n"); kprintf("ptrace_write_user: missing ptrace_debugreg\n");
return -EFAULT; return -EFAULT;
} }
p = &proc->ptrace_debugreg[(addr - offsetof(struct user, u_debugreg[0])) / sizeof(value)]; p = &thread->ptrace_debugreg[(addr - offsetof(struct user, u_debugreg[0])) / sizeof(value)];
if (addr == offsetof(struct user, u_debugreg[6])) { if (addr == offsetof(struct user, u_debugreg[6])) {
value &= ~DB6_RESERVED_MASK; value &= ~DB6_RESERVED_MASK;
value |= DB6_RESERVED_SET; value |= DB6_RESERVED_SET;
@@ -325,16 +297,16 @@ ptrace_write_user(struct process *proc, long addr, unsigned long value)
} }
long long
alloc_debugreg(struct process *proc) alloc_debugreg(struct thread *thread)
{ {
proc->ptrace_debugreg = kmalloc(sizeof(*proc->ptrace_debugreg) * 8, IHK_MC_AP_NOWAIT); thread->ptrace_debugreg = kmalloc(sizeof(*thread->ptrace_debugreg) * 8, IHK_MC_AP_NOWAIT);
if (proc->ptrace_debugreg == NULL) { if (thread->ptrace_debugreg == NULL) {
kprintf("alloc_debugreg: no memory.\n"); kprintf("alloc_debugreg: no memory.\n");
return -ENOMEM; return -ENOMEM;
} }
memset(proc->ptrace_debugreg, '\0', sizeof(*proc->ptrace_debugreg) * 8); memset(thread->ptrace_debugreg, '\0', sizeof(*thread->ptrace_debugreg) * 8);
proc->ptrace_debugreg[6] = DB6_RESERVED_SET; thread->ptrace_debugreg[6] = DB6_RESERVED_SET;
proc->ptrace_debugreg[7] = DB7_RESERVED_SET; thread->ptrace_debugreg[7] = DB7_RESERVED_SET;
return 0; return 0;
} }
@@ -381,50 +353,50 @@ clear_debugreg(void)
asm("mov %0, %%db7" ::"r" (r)); asm("mov %0, %%db7" ::"r" (r));
} }
void clear_single_step(struct process *proc) void clear_single_step(struct thread *thread)
{ {
proc->uctx->gpr.rflags &= ~RFLAGS_TF; thread->uctx->gpr.rflags &= ~RFLAGS_TF;
} }
void set_single_step(struct process *proc) void set_single_step(struct thread *thread)
{ {
proc->uctx->gpr.rflags |= RFLAGS_TF; thread->uctx->gpr.rflags |= RFLAGS_TF;
} }
long ptrace_read_fpregs(struct process *proc, void *fpregs) long ptrace_read_fpregs(struct thread *thread, void *fpregs)
{ {
save_fp_regs(proc); save_fp_regs(thread);
if (proc->fp_regs == NULL) { if (thread->fp_regs == NULL) {
return -ENOMEM; return -ENOMEM;
} }
return copy_to_user(fpregs, &proc->fp_regs->i387, return copy_to_user(fpregs, &thread->fp_regs->i387,
sizeof(struct i387_fxsave_struct)); sizeof(struct i387_fxsave_struct));
} }
long ptrace_write_fpregs(struct process *proc, void *fpregs) long ptrace_write_fpregs(struct thread *thread, void *fpregs)
{ {
save_fp_regs(proc); save_fp_regs(thread);
if (proc->fp_regs == NULL) { if (thread->fp_regs == NULL) {
return -ENOMEM; return -ENOMEM;
} }
return copy_from_user(&proc->fp_regs->i387, fpregs, return copy_from_user(&thread->fp_regs->i387, fpregs,
sizeof(struct i387_fxsave_struct)); sizeof(struct i387_fxsave_struct));
} }
long ptrace_read_regset(struct process *proc, long type, struct iovec *iov) long ptrace_read_regset(struct thread *thread, long type, struct iovec *iov)
{ {
long rc = -EINVAL; long rc = -EINVAL;
switch (type) { switch (type) {
case NT_X86_XSTATE: case NT_X86_XSTATE:
save_fp_regs(proc); save_fp_regs(thread);
if (proc->fp_regs == NULL) { if (thread->fp_regs == NULL) {
return -ENOMEM; return -ENOMEM;
} }
if (iov->iov_len > sizeof(fp_regs_struct)) { if (iov->iov_len > sizeof(fp_regs_struct)) {
iov->iov_len = sizeof(fp_regs_struct); iov->iov_len = sizeof(fp_regs_struct);
} }
rc = copy_to_user(iov->iov_base, proc->fp_regs, iov->iov_len); rc = copy_to_user(iov->iov_base, thread->fp_regs, iov->iov_len);
break; break;
default: default:
kprintf("ptrace_read_regset: not supported type 0x%x\n", type); kprintf("ptrace_read_regset: not supported type 0x%x\n", type);
@@ -433,20 +405,20 @@ long ptrace_read_regset(struct process *proc, long type, struct iovec *iov)
return rc; return rc;
} }
long ptrace_write_regset(struct process *proc, long type, struct iovec *iov) long ptrace_write_regset(struct thread *thread, long type, struct iovec *iov)
{ {
long rc = -EINVAL; long rc = -EINVAL;
switch (type) { switch (type) {
case NT_X86_XSTATE: case NT_X86_XSTATE:
save_fp_regs(proc); save_fp_regs(thread);
if (proc->fp_regs == NULL) { if (thread->fp_regs == NULL) {
return -ENOMEM; return -ENOMEM;
} }
if (iov->iov_len > sizeof(fp_regs_struct)) { if (iov->iov_len > sizeof(fp_regs_struct)) {
iov->iov_len = sizeof(fp_regs_struct); iov->iov_len = sizeof(fp_regs_struct);
} }
rc = copy_from_user(proc->fp_regs, iov->iov_base, iov->iov_len); rc = copy_from_user(thread->fp_regs, iov->iov_base, iov->iov_len);
break; break;
default: default:
kprintf("ptrace_write_regset: not supported type 0x%x\n", type); kprintf("ptrace_write_regset: not supported type 0x%x\n", type);
@@ -455,47 +427,44 @@ long ptrace_write_regset(struct process *proc, long type, struct iovec *iov)
return rc; return rc;
} }
extern void coredump(struct process *proc, void *regs); extern void coredump(struct thread *thread, void *regs);
void ptrace_report_signal(struct process *proc, int sig) void ptrace_report_signal(struct thread *thread, int sig)
{ {
long rc; struct mcs_rwlock_node_irqsave lock;
struct process *proc = thread->proc;
int parent_pid;
struct siginfo info;
dkprintf("ptrace_report_signal,pid=%d\n", proc->ftn->pid); dkprintf("ptrace_report_signal,pid=%d\n", thread->proc->pid);
ihk_mc_spinlock_lock_noirq(&proc->ftn->lock); mcs_rwlock_writer_lock(&proc->update_lock, &lock);
proc->ftn->exit_status = sig; if(!(proc->ptrace & PT_TRACED)){
/* Transition process state */ mcs_rwlock_writer_unlock(&proc->update_lock, &lock);
proc->ftn->status = PS_TRACED; return;
proc->ftn->ptrace &= ~PT_TRACE_SYSCALL_MASK; }
proc->exit_status = sig;
/* Transition thread state */
proc->pstatus = PS_TRACED;
thread->tstatus = PS_TRACED;
proc->ptrace &= ~PT_TRACE_SYSCALL_MASK;
if (sig == SIGSTOP || sig == SIGTSTP || if (sig == SIGSTOP || sig == SIGTSTP ||
sig == SIGTTIN || sig == SIGTTOU) { sig == SIGTTIN || sig == SIGTTOU) {
proc->ftn->signal_flags |= SIGNAL_STOP_STOPPED; proc->signal_flags |= SIGNAL_STOP_STOPPED;
} else { } else {
proc->ftn->signal_flags &= ~SIGNAL_STOP_STOPPED; proc->signal_flags &= ~SIGNAL_STOP_STOPPED;
} }
ihk_mc_spinlock_unlock_noirq(&proc->ftn->lock); parent_pid = proc->parent->pid;
if (proc->ftn->parent) { mcs_rwlock_writer_unlock(&proc->update_lock, &lock);
/* kill SIGCHLD */
ihk_mc_spinlock_lock_noirq(&proc->ftn->parent->lock);
if (proc->ftn->parent->owner) {
struct siginfo info;
memset(&info, '\0', sizeof info); memset(&info, '\0', sizeof info);
info.si_signo = SIGCHLD; info.si_signo = SIGCHLD;
info.si_code = CLD_TRAPPED; info.si_code = CLD_TRAPPED;
info._sifields._sigchld.si_pid = proc->ftn->pid; info._sifields._sigchld.si_pid = thread->proc->pid;
info._sifields._sigchld.si_status = proc->ftn->exit_status; info._sifields._sigchld.si_status = thread->proc->exit_status;
rc = do_kill(proc->ftn->parent->pid, -1, SIGCHLD, &info, 0); do_kill(cpu_local_var(current), parent_pid, -1, SIGCHLD, &info, 0);
if (rc < 0) { /* Wake parent (if sleeping in wait4()) */
kprintf("ptrace_report_signal,do_kill failed\n"); waitq_wakeup(&proc->parent->waitpid_q);
}
}
ihk_mc_spinlock_unlock_noirq(&proc->ftn->parent->lock);
/* Wake parent (if sleeping in wait4()) */
waitq_wakeup(&proc->ftn->parent->waitpid_q);
}
dkprintf("ptrace_report_signal,sleeping\n"); dkprintf("ptrace_report_signal,sleeping\n");
/* Sleep */ /* Sleep */
@@ -505,6 +474,8 @@ void ptrace_report_signal(struct process *proc, int sig)
static int static int
isrestart(int num, unsigned long rc, int sig, int restart) isrestart(int num, unsigned long rc, int sig, int restart)
{ {
if(sig == SIGKILL || sig == SIGSTOP)
return 0;
if(num == 0 || rc != -EINTR) if(num == 0 || rc != -EINTR)
return 0; return 0;
switch(num){ switch(num){
@@ -536,22 +507,23 @@ isrestart(int num, unsigned long rc, int sig, int restart)
} }
void void
do_signal(unsigned long rc, void *regs0, struct process *proc, struct sig_pending *pending, int num) do_signal(unsigned long rc, void *regs0, struct thread *thread, struct sig_pending *pending, int num)
{ {
struct x86_user_context *regs = regs0; struct x86_user_context *regs = regs0;
struct k_sigaction *k; struct k_sigaction *k;
int sig; int sig;
__sigset_t w; __sigset_t w;
int irqstate; struct process *proc = thread->proc;
struct fork_tree_node *ftn = proc->ftn;
int orgsig; int orgsig;
int ptraceflag = 0; int ptraceflag = 0;
struct mcs_rwlock_node_irqsave lock;
unsigned long irqstate;
for(w = pending->sigmask.__val[0], sig = 0; w; sig++, w >>= 1); for(w = pending->sigmask.__val[0], sig = 0; w; sig++, w >>= 1);
dkprintf("do_signal,pid=%d,sig=%d\n", proc->ftn->pid, sig); dkprintf("do_signal,pid=%d,sig=%d\n", proc->pid, sig);
orgsig = sig; orgsig = sig;
if((ftn->ptrace & PT_TRACED) && if((proc->ptrace & PT_TRACED) &&
pending->ptracecont == 0 && pending->ptracecont == 0 &&
sig != SIGKILL) { sig != SIGKILL) {
ptraceflag = 1; ptraceflag = 1;
@@ -566,39 +538,39 @@ do_signal(unsigned long rc, void *regs0, struct process *proc, struct sig_pendin
rc = regs->gpr.rax; rc = regs->gpr.rax;
} }
irqstate = ihk_mc_spinlock_lock(&proc->sighandler->lock); irqstate = ihk_mc_spinlock_lock(&thread->sigcommon->lock);
k = proc->sighandler->action + sig - 1; k = thread->sigcommon->action + sig - 1;
if(k->sa.sa_handler == SIG_IGN){ if(k->sa.sa_handler == SIG_IGN){
kfree(pending); kfree(pending);
ihk_mc_spinlock_unlock(&proc->sighandler->lock, irqstate); ihk_mc_spinlock_unlock(&thread->sigcommon->lock, irqstate);
return; return;
} }
else if(k->sa.sa_handler){ else if(k->sa.sa_handler){
unsigned long *usp; /* user stack */ unsigned long *usp; /* user stack */
struct sigsp *sigsp; struct sigsp *sigsp;
int ssflags = proc->sigstack.ss_flags; int ssflags = thread->sigstack.ss_flags;
unsigned long mask = (unsigned long)proc->sigmask.__val[0]; unsigned long mask = (unsigned long)thread->sigmask.__val[0];
if((k->sa.sa_flags & SA_ONSTACK) && if((k->sa.sa_flags & SA_ONSTACK) &&
!(proc->sigstack.ss_flags & SS_DISABLE) && !(thread->sigstack.ss_flags & SS_DISABLE) &&
!(proc->sigstack.ss_flags & SS_ONSTACK)){ !(thread->sigstack.ss_flags & SS_ONSTACK)){
unsigned long lsp; unsigned long lsp;
lsp = ((unsigned long)(((char *)proc->sigstack.ss_sp) + proc->sigstack.ss_size)) & 0xfffffffffffffff8UL; lsp = ((unsigned long)(((char *)thread->sigstack.ss_sp) + thread->sigstack.ss_size)) & 0xfffffffffffffff8UL;
usp = (unsigned long *)lsp; usp = (unsigned long *)lsp;
proc->sigstack.ss_flags |= SS_ONSTACK; thread->sigstack.ss_flags |= SS_ONSTACK;
} }
else{ else{
usp = (unsigned long *)regs->gpr.rsp; usp = (unsigned long *)regs->gpr.rsp;
} }
sigsp = ((struct sigsp *)usp) - 1; sigsp = ((struct sigsp *)usp) - 1;
sigsp = (struct sigsp *)((unsigned long)sigsp & 0xfffffffffffffff0UL); sigsp = (struct sigsp *)((unsigned long)sigsp & 0xfffffffffffffff0UL);
if(write_process_vm(proc->vm, &sigsp->regs, regs, sizeof(struct x86_user_context)) || if(write_process_vm(thread->vm, &sigsp->regs, regs, sizeof(struct x86_user_context)) ||
write_process_vm(proc->vm, &sigsp->sigrc, &rc, sizeof(long))){ write_process_vm(thread->vm, &sigsp->sigrc, &rc, sizeof(long))){
kfree(pending); kfree(pending);
ihk_mc_spinlock_unlock(&proc->sighandler->lock, irqstate); ihk_mc_spinlock_unlock(&thread->sigcommon->lock, irqstate);
kprintf("do_signal,write_process_vm failed\n"); kprintf("do_signal,write_process_vm failed\n");
terminate(0, sig, (ihk_mc_user_context_t *)regs->gpr.rsp); terminate(0, sig);
return; return;
} }
sigsp->sigmask = mask; sigsp->sigmask = mask;
@@ -621,25 +593,25 @@ do_signal(unsigned long rc, void *regs0, struct process *proc, struct sig_pendin
regs->gpr.rip = (unsigned long)k->sa.sa_handler; regs->gpr.rip = (unsigned long)k->sa.sa_handler;
regs->gpr.rsp = (unsigned long)usp; regs->gpr.rsp = (unsigned long)usp;
proc->sigmask.__val[0] |= pending->sigmask.__val[0]; thread->sigmask.__val[0] |= pending->sigmask.__val[0];
kfree(pending); kfree(pending);
ihk_mc_spinlock_unlock(&proc->sighandler->lock, irqstate); ihk_mc_spinlock_unlock(&thread->sigcommon->lock, irqstate);
} }
else { else {
int coredumped = 0; int coredumped = 0;
siginfo_t info; siginfo_t info;
if(ptraceflag){ if(ptraceflag){
if(proc->ptrace_recvsig) if(thread->ptrace_recvsig)
kfree(proc->ptrace_recvsig); kfree(thread->ptrace_recvsig);
proc->ptrace_recvsig = pending; thread->ptrace_recvsig = pending;
if(proc->ptrace_sendsig) if(thread->ptrace_sendsig)
kfree(proc->ptrace_sendsig); kfree(thread->ptrace_sendsig);
proc->ptrace_sendsig = NULL; thread->ptrace_sendsig = NULL;
} }
else else
kfree(pending); kfree(pending);
ihk_mc_spinlock_unlock(&proc->sighandler->lock, irqstate); ihk_mc_spinlock_unlock(&thread->sigcommon->lock, irqstate);
switch (sig) { switch (sig) {
case SIGSTOP: case SIGSTOP:
case SIGTSTP: case SIGTSTP:
@@ -648,49 +620,50 @@ do_signal(unsigned long rc, void *regs0, struct process *proc, struct sig_pendin
memset(&info, '\0', sizeof info); memset(&info, '\0', sizeof info);
info.si_signo = SIGCHLD; info.si_signo = SIGCHLD;
info.si_code = CLD_STOPPED; info.si_code = CLD_STOPPED;
info._sifields._sigchld.si_pid = proc->ftn->pid; info._sifields._sigchld.si_pid = thread->proc->pid;
info._sifields._sigchld.si_status = (sig << 8) | 0x7f; info._sifields._sigchld.si_status = (sig << 8) | 0x7f;
do_kill(proc->ftn->parent->pid, -1, SIGCHLD, &info, 0); do_kill(cpu_local_var(current), thread->proc->parent->pid, -1, SIGCHLD, &info, 0);
if(ptraceflag){ if(ptraceflag){
ptrace_report_signal(proc, orgsig); ptrace_report_signal(thread, orgsig);
} }
else{ else{
dkprintf("do_signal,SIGSTOP,changing state\n"); dkprintf("do_signal,SIGSTOP,changing state\n");
/* Update process state in fork tree */ /* Update thread state in fork tree */
ihk_mc_spinlock_lock_noirq(&ftn->lock); mcs_rwlock_writer_lock(&proc->update_lock, &lock);
ftn->group_exit_status = SIGSTOP; proc->group_exit_status = SIGSTOP;
/* Reap and set new signal_flags */ /* Reap and set new signal_flags */
ftn->signal_flags = SIGNAL_STOP_STOPPED; proc->signal_flags = SIGNAL_STOP_STOPPED;
ftn->status = PS_STOPPED; proc->pstatus = PS_STOPPED;
ihk_mc_spinlock_unlock_noirq(&proc->ftn->lock); thread->tstatus = PS_STOPPED;
mcs_rwlock_writer_unlock(&proc->update_lock, &lock);
/* Wake up the parent who tried wait4 and sleeping */ /* Wake up the parent who tried wait4 and sleeping */
waitq_wakeup(&proc->ftn->parent->waitpid_q); waitq_wakeup(&proc->parent->waitpid_q);
dkprintf("do_signal,SIGSTOP,sleeping\n"); dkprintf("do_signal,SIGSTOP,sleeping\n");
/* Sleep */ /* Sleep */
proc->ftn->status = PS_STOPPED;
schedule(); schedule();
dkprintf("SIGSTOP(): woken up\n"); dkprintf("SIGSTOP(): woken up\n");
} }
break; break;
case SIGTRAP: case SIGTRAP:
dkprintf("do_signal,SIGTRAP\n"); dkprintf("do_signal,SIGTRAP\n");
if(!(ftn->ptrace & PT_TRACED)) { if(!(proc->ptrace & PT_TRACED)) {
goto core; goto core;
} }
/* Update process state in fork tree */ /* Update thread state in fork tree */
ihk_mc_spinlock_lock_noirq(&ftn->lock); mcs_rwlock_writer_lock(&proc->update_lock, &lock);
ftn->exit_status = SIGTRAP; proc->exit_status = SIGTRAP;
ftn->status = PS_TRACED; proc->pstatus = PS_TRACED;
ihk_mc_spinlock_unlock_noirq(&proc->ftn->lock); thread->tstatus = PS_TRACED;
mcs_rwlock_writer_unlock(&proc->update_lock, &lock);
/* Wake up the parent who tried wait4 and sleeping */ /* Wake up the parent who tried wait4 and sleeping */
waitq_wakeup(&proc->ftn->parent->waitpid_q); waitq_wakeup(&thread->proc->parent->waitpid_q);
/* Sleep */ /* Sleep */
dkprintf("do_signal,SIGTRAP,sleeping\n"); dkprintf("do_signal,SIGTRAP,sleeping\n");
@@ -702,10 +675,10 @@ do_signal(unsigned long rc, void *regs0, struct process *proc, struct sig_pendin
memset(&info, '\0', sizeof info); memset(&info, '\0', sizeof info);
info.si_signo = SIGCHLD; info.si_signo = SIGCHLD;
info.si_code = CLD_CONTINUED; info.si_code = CLD_CONTINUED;
info._sifields._sigchld.si_pid = proc->ftn->pid; info._sifields._sigchld.si_pid = proc->pid;
info._sifields._sigchld.si_status = 0x0000ffff; info._sifields._sigchld.si_status = 0x0000ffff;
do_kill(proc->ftn->parent->pid, -1, SIGCHLD, &info, 0); do_kill(cpu_local_var(current), proc->parent->pid, -1, SIGCHLD, &info, 0);
ftn->signal_flags = SIGNAL_STOP_CONTINUED; proc->signal_flags = SIGNAL_STOP_CONTINUED;
dkprintf("do_signal,SIGCONT,do nothing\n"); dkprintf("do_signal,SIGCONT,do nothing\n");
break; break;
case SIGQUIT: case SIGQUIT:
@@ -717,23 +690,23 @@ do_signal(unsigned long rc, void *regs0, struct process *proc, struct sig_pendin
case SIGSYS: case SIGSYS:
core: core:
dkprintf("do_signal,default,core,sig=%d\n", sig); dkprintf("do_signal,default,core,sig=%d\n", sig);
coredump(proc, regs); coredump(thread, regs);
coredumped = 0x80; coredumped = 0x80;
terminate(0, sig | coredumped, (ihk_mc_user_context_t *)regs->gpr.rsp); terminate(0, sig | coredumped);
break; break;
case SIGCHLD: case SIGCHLD:
case SIGURG: case SIGURG:
break; break;
default: default:
dkprintf("do_signal,default,terminate,sig=%d\n", sig); dkprintf("do_signal,default,terminate,sig=%d\n", sig);
terminate(0, sig, (ihk_mc_user_context_t *)regs->gpr.rsp); terminate(0, sig);
break; break;
} }
} }
} }
static struct sig_pending * static struct sig_pending *
getsigpending(struct process *proc, int delflag){ getsigpending(struct thread *thread, int delflag){
struct list_head *head; struct list_head *head;
ihk_spinlock_t *lock; ihk_spinlock_t *lock;
struct sig_pending *next; struct sig_pending *next;
@@ -744,15 +717,15 @@ getsigpending(struct process *proc, int delflag){
int sig; int sig;
struct k_sigaction *k; struct k_sigaction *k;
w = proc->sigmask.__val[0]; w = thread->sigmask.__val[0];
lock = &proc->sigshared->lock; lock = &thread->sigcommon->lock;
head = &proc->sigshared->sigpending; head = &thread->sigcommon->sigpending;
for(;;){ for(;;){
irqstate = ihk_mc_spinlock_lock(lock); irqstate = ihk_mc_spinlock_lock(lock);
list_for_each_entry_safe(pending, next, head, list){ list_for_each_entry_safe(pending, next, head, list){
for(x = pending->sigmask.__val[0], sig = 0; x; sig++, x >>= 1); for(x = pending->sigmask.__val[0], sig = 0; x; sig++, x >>= 1);
k = proc->sighandler->action + sig - 1; k = thread->sigcommon->action + sig - 1;
if(delflag || if(delflag ||
(sig != SIGCHLD && sig != SIGURG) || (sig != SIGCHLD && sig != SIGURG) ||
(k->sa.sa_handler != (void *)1 && (k->sa.sa_handler != (void *)1 &&
@@ -767,45 +740,44 @@ getsigpending(struct process *proc, int delflag){
} }
ihk_mc_spinlock_unlock(lock, irqstate); ihk_mc_spinlock_unlock(lock, irqstate);
if(lock == &proc->sigpendinglock) if(lock == &thread->sigpendinglock)
return NULL; return NULL;
lock = &proc->sigpendinglock; lock = &thread->sigpendinglock;
head = &proc->sigpending; head = &thread->sigpending;
} }
return NULL; return NULL;
} }
struct sig_pending * struct sig_pending *
hassigpending(struct process *proc) hassigpending(struct thread *thread)
{ {
return getsigpending(proc, 0); return getsigpending(thread, 0);
} }
void void
check_signal(unsigned long rc, void *regs0, int num) check_signal(unsigned long rc, void *regs0, int num)
{ {
struct x86_user_context *regs = regs0; struct x86_user_context *regs = regs0;
struct process *proc; struct thread *thread;
struct sig_pending *pending; struct sig_pending *pending;
int irqstate; int irqstate;
if(clv == NULL) if(clv == NULL)
return; return;
proc = cpu_local_var(current); thread = cpu_local_var(current);
if(proc == NULL || proc->ftn->pid == 0){
struct process *p; if(thread == NULL || thread == &cpu_local_var(idle)){
struct thread *t;
irqstate = ihk_mc_spinlock_lock(&(cpu_local_var(runq_lock))); irqstate = ihk_mc_spinlock_lock(&(cpu_local_var(runq_lock)));
list_for_each_entry(p, &(cpu_local_var(runq)), sched_list){ list_for_each_entry(t, &(cpu_local_var(runq)), sched_list){
if(p->ftn->pid <= 0) if(t == &cpu_local_var(idle))
continue; continue;
if(p->ftn->status == PS_INTERRUPTIBLE && if(t->tstatus == PS_INTERRUPTIBLE &&
hassigpending(p)){ hassigpending(t)){
p->ftn->status = PS_RUNNING; t->tstatus = PS_RUNNING;
ihk_mc_spinlock_unlock(&(cpu_local_var(runq_lock)), irqstate); break;
// schedule();
return;
} }
} }
ihk_mc_spinlock_unlock(&(cpu_local_var(runq_lock)), irqstate); ihk_mc_spinlock_unlock(&(cpu_local_var(runq_lock)), irqstate);
@@ -817,24 +789,24 @@ check_signal(unsigned long rc, void *regs0, int num)
} }
for(;;){ for(;;){
pending = getsigpending(proc, 1); pending = getsigpending(thread, 1);
if(!pending) { if(!pending) {
dkprintf("check_signal,queue is empty\n"); dkprintf("check_signal,queue is empty\n");
return; return;
} }
do_signal(rc, regs, proc, pending, num); do_signal(rc, regs, thread, pending, num);
} }
} }
unsigned long unsigned long
do_kill(int pid, int tid, int sig, siginfo_t *info, int ptracecont) do_kill(struct thread *thread, int pid, int tid, int sig, siginfo_t *info,
int ptracecont)
{ {
dkprintf("do_kill,pid=%d,tid=%d,sig=%d\n", pid, tid, sig); dkprintf("do_kill,pid=%d,tid=%d,sig=%d\n", pid, tid, sig);
struct cpu_local_var *v; struct cpu_local_var *v;
struct process *p; struct thread *t;
struct process *proc = cpu_local_var(current); struct thread *tthread = NULL;
struct process *tproc = NULL;
int i; int i;
__sigset_t mask; __sigset_t mask;
struct list_head *head; struct list_head *head;
@@ -865,9 +837,9 @@ do_kill(int pid, int tid, int sig, siginfo_t *info, int ptracecont)
int sendme = 0; int sendme = 0;
if(pid == 0){ if(pid == 0){
if(proc == NULL || proc->ftn->pid <= 0) if(thread == NULL || thread->proc->pid <= 0)
return -ESRCH; return -ESRCH;
pgid = proc->ftn->pgid; pgid = thread->proc->pgid;
} }
pids = kmalloc(sizeof(int) * num_processors, IHK_MC_AP_NOWAIT); pids = kmalloc(sizeof(int) * num_processors, IHK_MC_AP_NOWAIT);
if(!pids) if(!pids)
@@ -875,32 +847,32 @@ do_kill(int pid, int tid, int sig, siginfo_t *info, int ptracecont)
for(i = 0; i < num_processors; i++){ for(i = 0; i < num_processors; i++){
v = get_cpu_local_var(i); v = get_cpu_local_var(i);
irqstate = ihk_mc_spinlock_lock(&(v->runq_lock)); irqstate = ihk_mc_spinlock_lock(&(v->runq_lock));
list_for_each_entry(p, &(v->runq), sched_list){ list_for_each_entry(t, &(v->runq), sched_list){
int j; int j;
if(p->ftn->pid <= 0) if(t->proc->pid <= 0)
continue; continue;
if(pgid != 1 && p->ftn->pgid != pgid) if(pgid != 1 && t->proc->pgid != pgid)
continue; continue;
if(proc && p->ftn->pid == proc->ftn->pid){ if(thread && t->proc->pid == thread->proc->pid){
sendme = 1; sendme = 1;
continue; continue;
} }
for(j = 0; j < n; j++) for(j = 0; j < n; j++)
if(pids[j] == p->ftn->pid) if(pids[j] == t->proc->pid)
break; break;
if(j == n){ if(j == n){
pids[n] = p->ftn->pid; pids[n] = t->proc->pid;
n++; n++;
} }
} }
ihk_mc_spinlock_unlock(&(v->runq_lock), irqstate); ihk_mc_spinlock_unlock(&(v->runq_lock), irqstate);
} }
for(i = 0; i < n; i++) for(i = 0; i < n; i++)
rc = do_kill(pids[i], -1, sig, info, ptracecont); rc = do_kill(thread, pids[i], -1, sig, info, ptracecont);
if(sendme) if(sendme)
rc = do_kill(proc->ftn->pid, -1, sig, info, ptracecont); rc = do_kill(thread, thread->proc->pid, -1, sig, info, ptracecont);
kfree(pids); kfree(pids);
return rc; return rc;
@@ -908,18 +880,18 @@ do_kill(int pid, int tid, int sig, siginfo_t *info, int ptracecont)
irqstate = cpu_disable_interrupt_save(); irqstate = cpu_disable_interrupt_save();
mask = __sigmask(sig); mask = __sigmask(sig);
if(tid == -1){ if(tid == -1){
struct process *tproc0 = NULL; struct thread *tthread0 = NULL;
ihk_spinlock_t *savelock0 = NULL; ihk_spinlock_t *savelock0 = NULL;
for(i = 0; i < num_processors; i++){ for(i = 0; i < num_processors; i++){
v = get_cpu_local_var(i); v = get_cpu_local_var(i);
found = 0; found = 0;
ihk_mc_spinlock_lock_noirq(&(v->runq_lock)); ihk_mc_spinlock_lock_noirq(&(v->runq_lock));
list_for_each_entry(p, &(v->runq), sched_list){ list_for_each_entry(t, &(v->runq), sched_list){
if(p->ftn->pid == pid){ if(t->proc->pid == pid){
if(p->ftn->tid == pid || tproc == NULL){ if(t->tid == pid || tthread == NULL){
if(!(mask & p->sigmask.__val[0])){ if(!(mask & t->sigmask.__val[0])){
tproc = p; tthread = t;
if(!found && savelock) { if(!found && savelock) {
ihk_mc_spinlock_unlock_noirq(savelock); ihk_mc_spinlock_unlock_noirq(savelock);
} }
@@ -930,14 +902,14 @@ do_kill(int pid, int tid, int sig, siginfo_t *info, int ptracecont)
savelock0 = NULL; savelock0 = NULL;
} }
} }
else if(tproc == NULL && tproc0 == NULL){ else if(tthread == NULL && tthread0 == NULL){
tproc0 = p; tthread0 = t;
found = 1; found = 1;
savelock0 = &(v->runq_lock); savelock0 = &(v->runq_lock);
} }
} }
if(!(mask & p->sigmask.__val[0])){ if(!(mask & t->sigmask.__val[0])){
if(p->ftn->tid == pid || tproc == NULL){ if(t->tid == pid || tthread == NULL){
} }
} }
@@ -947,8 +919,8 @@ do_kill(int pid, int tid, int sig, siginfo_t *info, int ptracecont)
ihk_mc_spinlock_unlock_noirq(&(v->runq_lock)); ihk_mc_spinlock_unlock_noirq(&(v->runq_lock));
} }
} }
if(tproc == NULL){ if(tthread == NULL){
tproc = tproc0; tthread = tthread0;
savelock = savelock0; savelock = savelock0;
} }
} }
@@ -957,12 +929,12 @@ do_kill(int pid, int tid, int sig, siginfo_t *info, int ptracecont)
v = get_cpu_local_var(i); v = get_cpu_local_var(i);
found = 0; found = 0;
ihk_mc_spinlock_lock_noirq(&(v->runq_lock)); ihk_mc_spinlock_lock_noirq(&(v->runq_lock));
list_for_each_entry(p, &(v->runq), sched_list){ list_for_each_entry(t, &(v->runq), sched_list){
if(p->ftn->pid > 0 && if(t->proc->pid > 0 &&
p->ftn->tid == tid){ t->tid == tid){
savelock = &(v->runq_lock); savelock = &(v->runq_lock);
found = 1; found = 1;
tproc = p; tthread = t;
break; break;
} }
} }
@@ -975,12 +947,12 @@ do_kill(int pid, int tid, int sig, siginfo_t *info, int ptracecont)
v = get_cpu_local_var(i); v = get_cpu_local_var(i);
found = 0; found = 0;
ihk_mc_spinlock_lock_noirq(&(v->runq_lock)); ihk_mc_spinlock_lock_noirq(&(v->runq_lock));
list_for_each_entry(p, &(v->runq), sched_list){ list_for_each_entry(t, &(v->runq), sched_list){
if(p->ftn->pid == pid && if(t->proc->pid == pid &&
p->ftn->tid == tid){ t->tid == tid){
savelock = &(v->runq_lock); savelock = &(v->runq_lock);
found = 1; found = 1;
tproc = p; tthread = t;
break; break;
} }
} }
@@ -990,17 +962,18 @@ do_kill(int pid, int tid, int sig, siginfo_t *info, int ptracecont)
} }
} }
if(!tproc){ if(!tthread){
cpu_restore_interrupt(irqstate); cpu_restore_interrupt(irqstate);
return -ESRCH; return -ESRCH;
} }
if(sig != SIGCONT && if(sig != SIGCONT &&
proc->ftn->euid != 0 && thread &&
proc->ftn->ruid != tproc->ftn->ruid && thread->proc->euid != 0 &&
proc->ftn->euid != tproc->ftn->ruid && thread->proc->ruid != tthread->proc->ruid &&
proc->ftn->ruid != tproc->ftn->suid && thread->proc->euid != tthread->proc->ruid &&
proc->ftn->euid != tproc->ftn->suid){ thread->proc->ruid != tthread->proc->suid &&
thread->proc->euid != tthread->proc->suid){
ihk_mc_spinlock_unlock_noirq(savelock); ihk_mc_spinlock_unlock_noirq(savelock);
cpu_restore_interrupt(irqstate); cpu_restore_interrupt(irqstate);
return -EPERM; return -EPERM;
@@ -1014,20 +987,20 @@ do_kill(int pid, int tid, int sig, siginfo_t *info, int ptracecont)
doint = 0; doint = 0;
if(tid == -1){ if(tid == -1){
ihk_mc_spinlock_lock_noirq(&tproc->sigshared->lock); ihk_mc_spinlock_lock_noirq(&tthread->sigcommon->lock);
head = &tproc->sigshared->sigpending; head = &tthread->sigcommon->sigpending;
} }
else{ else{
ihk_mc_spinlock_lock_noirq(&tproc->sigpendinglock); ihk_mc_spinlock_lock_noirq(&tthread->sigpendinglock);
head = &tproc->sigpending; head = &tthread->sigpending;
} }
/* Put signal event even when handler is SIG_IGN or SIG_DFL /* Put signal event even when handler is SIG_IGN or SIG_DFL
because target ptraced process must call ptrace_report_signal because target ptraced thread must call ptrace_report_signal
in check_signal */ in check_signal */
rc = 0; rc = 0;
k = tproc->sighandler->action + sig - 1; k = tthread->sigcommon->action + sig - 1;
if((sig != SIGKILL && (tproc->ftn->ptrace & PT_TRACED)) || if((sig != SIGKILL && (tthread->proc->ptrace & PT_TRACED)) ||
(k->sa.sa_handler != (void *)1 && (k->sa.sa_handler != (void *)1 &&
(k->sa.sa_handler != NULL || (k->sa.sa_handler != NULL ||
(sig != SIGCHLD && sig != SIGURG)))){ (sig != SIGCHLD && sig != SIGURG)))){
@@ -1055,42 +1028,42 @@ do_kill(int pid, int tid, int sig, siginfo_t *info, int ptracecont)
list_add(&pending->list, head); list_add(&pending->list, head);
else else
list_add_tail(&pending->list, head); list_add_tail(&pending->list, head);
tproc->sigevent = 1; tthread->sigevent = 1;
} }
} }
} }
if(tid == -1){ if(tid == -1){
ihk_mc_spinlock_unlock_noirq(&tproc->sigshared->lock); ihk_mc_spinlock_unlock_noirq(&tthread->sigcommon->lock);
} }
else{ else{
ihk_mc_spinlock_unlock_noirq(&tproc->sigpendinglock); ihk_mc_spinlock_unlock_noirq(&tthread->sigpendinglock);
} }
if (doint && !(mask & tproc->sigmask.__val[0])) { if (doint && !(mask & tthread->sigmask.__val[0])) {
int cpuid = tproc->cpu_id; int cpuid = tthread->cpu_id;
int pid = tproc->ftn->pid; int pid = tthread->proc->pid;
int status = tproc->ftn->status; int status = tthread->tstatus;
if (proc != tproc) { if (thread != tthread) {
dkprintf("do_kill,ipi,pid=%d,cpu_id=%d\n", dkprintf("do_kill,ipi,pid=%d,cpu_id=%d\n",
tproc->ftn->pid, tproc->cpu_id); tthread->proc->pid, tthread->cpu_id);
ihk_mc_interrupt_cpu(get_x86_cpu_local_variable(tproc->cpu_id)->apic_id, 0xd0); ihk_mc_interrupt_cpu(get_x86_cpu_local_variable(tthread->cpu_id)->apic_id, 0xd0);
} }
ihk_mc_spinlock_unlock_noirq(savelock); ihk_mc_spinlock_unlock_noirq(savelock);
cpu_restore_interrupt(irqstate); cpu_restore_interrupt(irqstate);
if(!tproc->nohost) if(!tthread->proc->nohost)
interrupt_syscall(pid, cpuid); interrupt_syscall(pid, cpuid);
if (status != PS_RUNNING) { if (status != PS_RUNNING) {
if(sig == SIGKILL){ if(sig == SIGKILL){
/* Wake up the target only when stopped by ptrace-reporting */ /* Wake up the target only when stopped by ptrace-reporting */
sched_wakeup_process(tproc, PS_TRACED | PS_STOPPED); sched_wakeup_thread(tthread, PS_TRACED | PS_STOPPED);
} }
else if(sig == SIGCONT || ptracecont){ else if(sig == SIGCONT || ptracecont){
/* Wake up the target only when stopped by SIGSTOP */ /* Wake up the target only when stopped by SIGSTOP */
sched_wakeup_process(tproc, PS_STOPPED); sched_wakeup_thread(tthread, PS_STOPPED);
} }
} }
} }
@@ -1105,15 +1078,15 @@ void
set_signal(int sig, void *regs0, siginfo_t *info) set_signal(int sig, void *regs0, siginfo_t *info)
{ {
struct x86_user_context *regs = regs0; struct x86_user_context *regs = regs0;
struct process *proc = cpu_local_var(current); struct thread *thread = cpu_local_var(current);
if(proc == NULL || proc->ftn->pid == 0) if(thread == NULL || thread->proc->pid == 0)
return; return;
if((__sigmask(sig) & proc->sigmask.__val[0]) || if((__sigmask(sig) & thread->sigmask.__val[0]) ||
(regs->gpr.rsp & 0x8000000000000000)){ (regs->gpr.rsp & 0x8000000000000000)){
coredump(proc, regs0); coredump(thread, regs0);
terminate(0, sig | 0x80, (ihk_mc_user_context_t *)regs->gpr.rsp); terminate(0, sig | 0x80);
} }
do_kill(proc->ftn->pid, proc->ftn->tid, sig, info, 0); do_kill(thread, thread->proc->pid, thread->tid, sig, info, 0);
} }

View File

@@ -183,6 +183,7 @@ struct program_load_desc *load_elf(FILE *fp, char **interp_pathp)
desc = malloc(sizeof(struct program_load_desc) desc = malloc(sizeof(struct program_load_desc)
+ sizeof(struct program_image_section) * nhdrs); + sizeof(struct program_image_section) * nhdrs);
desc->shell_path[0] = '\0';
fseek(fp, hdr.e_phoff, SEEK_SET); fseek(fp, hdr.e_phoff, SEEK_SET);
j = 0; j = 0;
desc->num_sections = nhdrs; desc->num_sections = nhdrs;
@@ -1822,6 +1823,7 @@ fork_child_sync_pipe:
/* Parent */ /* Parent */
default: default:
fprintf(stderr, "fork %d->%d\n", getpid(), pid);
fs->pid = pid; fs->pid = pid;
while ((rc = sem_trywait(&fs->sem)) == -1 && (errno == EAGAIN || errno == EINTR)) { while ((rc = sem_trywait(&fs->sem)) == -1 && (errno == EAGAIN || errno == EINTR)) {
int st; int st;
@@ -1870,6 +1872,7 @@ fork_err:
siginfo_t info; siginfo_t info;
int opt; int opt;
fprintf(stderr, "wait4: pid=%d\n", pid);
opt = WEXITED | (options & WNOWAIT); opt = WEXITED | (options & WNOWAIT);
memset(&info, '\0', sizeof info); memset(&info, '\0', sizeof info);
while((ret = waitid(P_PID, pid, &info, opt)) == -1 && while((ret = waitid(P_PID, pid, &info, opt)) == -1 &&
@@ -1879,7 +1882,7 @@ fork_err:
} }
if(ret != pid) { if(ret != pid) {
fprintf(stderr, "ERROR: waiting for %lu\n", w.sr.args[0]); fprintf(stderr, "ERROR: waiting for %lu rc=%d errno=%d\n", w.sr.args[0], ret, errno);
} }
do_syscall_return(fd, cpu, ret, 0, 0, 0, 0); do_syscall_return(fd, cpu, ret, 0, 0, 0, 0);

View File

@@ -28,7 +28,7 @@ void kputs(char *buf)
int len = strlen(buf); int len = strlen(buf);
unsigned long flags; unsigned long flags;
flags = ihk_mc_spinlock_lock(&kmsg_lock); flags = __ihk_mc_spinlock_lock(&kmsg_lock);
if (len + kmsg_buf.tail > kmsg_buf.len) { if (len + kmsg_buf.tail > kmsg_buf.len) {
kmsg_buf.tail = 0; kmsg_buf.tail = 0;
@@ -40,19 +40,19 @@ void kputs(char *buf)
memcpy(kmsg_buf.str + kmsg_buf.tail, buf, len); memcpy(kmsg_buf.str + kmsg_buf.tail, buf, len);
kmsg_buf.tail += len; kmsg_buf.tail += len;
ihk_mc_spinlock_unlock(&kmsg_lock, flags); __ihk_mc_spinlock_unlock(&kmsg_lock, flags);
} }
#define KPRINTF_LOCAL_BUF_LEN 1024 #define KPRINTF_LOCAL_BUF_LEN 1024
unsigned long kprintf_lock(void) unsigned long kprintf_lock(void)
{ {
return ihk_mc_spinlock_lock(&kmsg_lock); return __ihk_mc_spinlock_lock(&kmsg_lock);
} }
void kprintf_unlock(unsigned long irqflags) void kprintf_unlock(unsigned long irqflags)
{ {
ihk_mc_spinlock_unlock(&kmsg_lock, irqflags); __ihk_mc_spinlock_unlock(&kmsg_lock, irqflags);
} }
/* Caller must hold kmsg_lock! */ /* Caller must hold kmsg_lock! */
@@ -85,7 +85,7 @@ int kprintf(const char *format, ...)
unsigned long flags; unsigned long flags;
char buf[KPRINTF_LOCAL_BUF_LEN]; char buf[KPRINTF_LOCAL_BUF_LEN];
flags = ihk_mc_spinlock_lock(&kmsg_lock); flags = __ihk_mc_spinlock_lock(&kmsg_lock);
/* Copy into the local buf */ /* Copy into the local buf */
len = sprintf(buf, "[%3d]: ", ihk_mc_get_processor_id()); len = sprintf(buf, "[%3d]: ", ihk_mc_get_processor_id());
@@ -101,7 +101,7 @@ int kprintf(const char *format, ...)
memcpy(kmsg_buf.str + kmsg_buf.tail, buf, len); memcpy(kmsg_buf.str + kmsg_buf.tail, buf, len);
kmsg_buf.tail += len; kmsg_buf.tail += len;
ihk_mc_spinlock_unlock(&kmsg_lock, flags); __ihk_mc_spinlock_unlock(&kmsg_lock, flags);
return len; return len;
} }

View File

@@ -387,7 +387,7 @@ out:
static int fileobj_get_page(struct memobj *memobj, off_t off, int p2align, uintptr_t *physp, unsigned long *pflag) static int fileobj_get_page(struct memobj *memobj, off_t off, int p2align, uintptr_t *physp, unsigned long *pflag)
{ {
struct process *proc = cpu_local_var(current); struct thread *proc = cpu_local_var(current);
struct fileobj *obj = to_fileobj(memobj); struct fileobj *obj = to_fileobj(memobj);
int error; int error;
void *virt = NULL; void *virt = NULL;

View File

@@ -103,7 +103,7 @@ int futex_cmpxchg_enabled;
struct futex_q { struct futex_q {
struct plist_node list; struct plist_node list;
struct process *task; struct thread *task;
ihk_spinlock_t *lock_ptr; ihk_spinlock_t *lock_ptr;
union futex_key key; union futex_key key;
union futex_key *requeue_pi_key; union futex_key *requeue_pi_key;
@@ -243,7 +243,7 @@ static int get_futex_value_locked(uint32_t *dest, uint32_t *from)
*/ */
static void wake_futex(struct futex_q *q) static void wake_futex(struct futex_q *q)
{ {
struct process *p = q->task; struct thread *p = q->task;
/* /*
* We set q->lock_ptr = NULL _before_ we wake up the task. If * We set q->lock_ptr = NULL _before_ we wake up the task. If
@@ -263,7 +263,7 @@ static void wake_futex(struct futex_q *q)
barrier(); barrier();
q->lock_ptr = NULL; q->lock_ptr = NULL;
sched_wakeup_process(p, PS_NORMAL); sched_wakeup_thread(p, PS_NORMAL);
} }
/* /*
@@ -658,7 +658,7 @@ static uint64_t futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q
* queue_me() calls spin_unlock() upon completion, both serializing * queue_me() calls spin_unlock() upon completion, both serializing
* access to the hash list and forcing another memory barrier. * access to the hash list and forcing another memory barrier.
*/ */
xchg4(&(cpu_local_var(current)->ftn->status), PS_INTERRUPTIBLE); xchg4(&(cpu_local_var(current)->tstatus), PS_INTERRUPTIBLE);
queue_me(q, hb); queue_me(q, hb);
if (!plist_node_empty(&q->list)) { if (!plist_node_empty(&q->list)) {
@@ -674,7 +674,7 @@ static uint64_t futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q
} }
/* This does not need to be serialized */ /* This does not need to be serialized */
cpu_local_var(current)->ftn->status = PS_RUNNING; cpu_local_var(current)->tstatus = PS_RUNNING;
return time_remain; return time_remain;
} }

View File

@@ -39,11 +39,11 @@
#define dkprintf(...) do { if (0) kprintf(__VA_ARGS__); } while (0) #define dkprintf(...) do { if (0) kprintf(__VA_ARGS__); } while (0)
#endif #endif
void check_mapping_for_proc(struct process *proc, unsigned long addr) void check_mapping_for_proc(struct thread *thread, unsigned long addr)
{ {
unsigned long __phys; unsigned long __phys;
if (ihk_mc_pt_virt_to_phys(proc->vm->page_table, (void*)addr, &__phys)) { if (ihk_mc_pt_virt_to_phys(thread->vm->address_space->page_table, (void*)addr, &__phys)) {
kprintf("check_map: no mapping for 0x%lX\n", addr); kprintf("check_map: no mapping for 0x%lX\n", addr);
} }
else { else {
@@ -60,7 +60,7 @@ void check_mapping_for_proc(struct process *proc, unsigned long addr)
* NOTE: if args, args_len, envs, envs_len are zero, * NOTE: if args, args_len, envs, envs_len are zero,
* the function constructs them based on the descriptor * the function constructs them based on the descriptor
*/ */
int prepare_process_ranges_args_envs(struct process *proc, int prepare_process_ranges_args_envs(struct thread *thread,
struct program_load_desc *pn, struct program_load_desc *pn,
struct program_load_desc *p, struct program_load_desc *p,
enum ihk_mc_pt_attribute attr, enum ihk_mc_pt_attribute attr,
@@ -81,6 +81,9 @@ int prepare_process_ranges_args_envs(struct process *proc,
uintptr_t interp_obase = -1; uintptr_t interp_obase = -1;
uintptr_t interp_nbase = -1; uintptr_t interp_nbase = -1;
size_t map_size; size_t map_size;
struct process *proc = thread->proc;
struct process_vm *vm = proc->vm;
struct address_space *as = vm->address_space;
n = p->num_sections; n = p->num_sections;
@@ -89,7 +92,7 @@ int prepare_process_ranges_args_envs(struct process *proc,
if (pn->sections[i].interp && (interp_nbase == (uintptr_t)-1)) { if (pn->sections[i].interp && (interp_nbase == (uintptr_t)-1)) {
interp_obase = pn->sections[i].vaddr; interp_obase = pn->sections[i].vaddr;
interp_obase -= (interp_obase % pn->interp_align); interp_obase -= (interp_obase % pn->interp_align);
interp_nbase = proc->vm->region.map_start; interp_nbase = vm->region.map_start;
interp_nbase = (interp_nbase + pn->interp_align - 1) interp_nbase = (interp_nbase + pn->interp_align - 1)
& ~(pn->interp_align - 1); & ~(pn->interp_align - 1);
} }
@@ -114,7 +117,7 @@ int prepare_process_ranges_args_envs(struct process *proc,
} }
up = virt_to_phys(up_v); up = virt_to_phys(up_v);
if (add_process_memory_range(proc, s, e, up, flags, NULL, 0) != 0) { if (add_process_memory_range(vm, s, e, up, flags, NULL, 0) != 0) {
ihk_mc_free_pages(up_v, range_npages); ihk_mc_free_pages(up_v, range_npages);
kprintf("ERROR: adding memory range for ELF section %i\n", i); kprintf("ERROR: adding memory range for ELF section %i\n", i);
goto err; goto err;
@@ -123,14 +126,14 @@ int prepare_process_ranges_args_envs(struct process *proc,
{ {
void *_virt = (void *)s; void *_virt = (void *)s;
unsigned long _phys; unsigned long _phys;
if (ihk_mc_pt_virt_to_phys(proc->vm->page_table, if (ihk_mc_pt_virt_to_phys(as->page_table,
_virt, &_phys)) { _virt, &_phys)) {
kprintf("ERROR: no mapping for 0x%lX\n", _virt); kprintf("ERROR: no mapping for 0x%lX\n", _virt);
} }
for (_virt = (void *)s + PAGE_SIZE; for (_virt = (void *)s + PAGE_SIZE;
(unsigned long)_virt < e; _virt += PAGE_SIZE) { (unsigned long)_virt < e; _virt += PAGE_SIZE) {
unsigned long __phys; unsigned long __phys;
if (ihk_mc_pt_virt_to_phys(proc->vm->page_table, if (ihk_mc_pt_virt_to_phys(as->page_table,
_virt, &__phys)) { _virt, &__phys)) {
kprintf("ERROR: no mapping for 0x%lX\n", _virt); kprintf("ERROR: no mapping for 0x%lX\n", _virt);
panic("mapping"); panic("mapping");
@@ -149,23 +152,23 @@ int prepare_process_ranges_args_envs(struct process *proc,
/* TODO: Maybe we need flag */ /* TODO: Maybe we need flag */
if (pn->sections[i].interp) { if (pn->sections[i].interp) {
proc->vm->region.map_end = e; vm->region.map_end = e;
} }
else if (i == 0) { else if (i == 0) {
proc->vm->region.text_start = s; vm->region.text_start = s;
proc->vm->region.text_end = e; vm->region.text_end = e;
} }
else if (i == 1) { else if (i == 1) {
proc->vm->region.data_start = s; vm->region.data_start = s;
proc->vm->region.data_end = e; vm->region.data_end = e;
} }
else { else {
proc->vm->region.data_start = vm->region.data_start =
(s < proc->vm->region.data_start ? (s < vm->region.data_start ?
s : proc->vm->region.data_start); s : vm->region.data_start);
proc->vm->region.data_end = vm->region.data_end =
(e > proc->vm->region.data_end ? (e > vm->region.data_end ?
e : proc->vm->region.data_end); e : vm->region.data_end);
} }
} }
@@ -173,17 +176,17 @@ int prepare_process_ranges_args_envs(struct process *proc,
pn->entry -= interp_obase; pn->entry -= interp_obase;
pn->entry += interp_nbase; pn->entry += interp_nbase;
p->entry = pn->entry; p->entry = pn->entry;
ihk_mc_modify_user_context(proc->uctx, IHK_UCR_PROGRAM_COUNTER, ihk_mc_modify_user_context(thread->uctx,
pn->entry); IHK_UCR_PROGRAM_COUNTER,
pn->entry);
} }
proc->vm->region.brk_start = proc->vm->region.brk_end = vm->region.brk_start = vm->region.brk_end = vm->region.data_end;
proc->vm->region.data_end;
/* Map, copy and update args and envs */ /* Map, copy and update args and envs */
flags = VR_PROT_READ | VR_PROT_WRITE; flags = VR_PROT_READ | VR_PROT_WRITE;
flags |= VRFLAG_PROT_TO_MAXPROT(flags); flags |= VRFLAG_PROT_TO_MAXPROT(flags);
addr = proc->vm->region.map_start - PAGE_SIZE * SCD_RESERVED_COUNT; addr = vm->region.map_start - PAGE_SIZE * SCD_RESERVED_COUNT;
e = addr + PAGE_SIZE * ARGENV_PAGE_COUNT; e = addr + PAGE_SIZE * ARGENV_PAGE_COUNT;
if((args_envs = ihk_mc_alloc_pages(ARGENV_PAGE_COUNT, IHK_MC_AP_NOWAIT)) == NULL){ if((args_envs = ihk_mc_alloc_pages(ARGENV_PAGE_COUNT, IHK_MC_AP_NOWAIT)) == NULL){
@@ -192,7 +195,7 @@ int prepare_process_ranges_args_envs(struct process *proc,
} }
args_envs_p = virt_to_phys(args_envs); args_envs_p = virt_to_phys(args_envs);
if(add_process_memory_range(proc, addr, e, args_envs_p, if(add_process_memory_range(vm, addr, e, args_envs_p,
flags, NULL, 0) != 0){ flags, NULL, 0) != 0){
ihk_mc_free_pages(args_envs, ARGENV_PAGE_COUNT); ihk_mc_free_pages(args_envs, ARGENV_PAGE_COUNT);
kprintf("ERROR: adding memory range for args/envs\n"); kprintf("ERROR: adding memory range for args/envs\n");
@@ -305,10 +308,10 @@ int prepare_process_ranges_args_envs(struct process *proc,
dkprintf("env OK\n"); dkprintf("env OK\n");
p->rprocess = (unsigned long)proc; p->rprocess = (unsigned long)thread;
p->rpgtable = virt_to_phys(proc->vm->page_table); p->rpgtable = virt_to_phys(as->page_table);
if (init_process_stack(proc, pn, argc, argv, envc, env) != 0) { if (init_process_stack(thread, pn, argc, argv, envc, env) != 0) {
goto err; goto err;
} }
@@ -327,7 +330,9 @@ static int process_msg_prepare_process(unsigned long rphys)
unsigned long phys, sz; unsigned long phys, sz;
struct program_load_desc *p, *pn; struct program_load_desc *p, *pn;
int npages, n; int npages, n;
struct thread *thread;
struct process *proc; struct process *proc;
struct process_vm *vm;
enum ihk_mc_pt_attribute attr; enum ihk_mc_pt_attribute attr;
attr = PTATTR_NO_EXECUTE | PTATTR_WRITABLE | PTATTR_FOR_USER; attr = PTATTR_NO_EXECUTE | PTATTR_WRITABLE | PTATTR_FOR_USER;
@@ -354,41 +359,43 @@ static int process_msg_prepare_process(unsigned long rphys)
memcpy_long(pn, p, sizeof(struct program_load_desc) memcpy_long(pn, p, sizeof(struct program_load_desc)
+ sizeof(struct program_image_section) * n); + sizeof(struct program_image_section) * n);
if((proc = create_process(p->entry)) == NULL){ if((thread = create_thread(p->entry)) == NULL){
ihk_mc_free(pn); ihk_mc_free(pn);
ihk_mc_unmap_virtual(p, npages, 1); ihk_mc_unmap_virtual(p, npages, 1);
ihk_mc_unmap_memory(NULL, phys, sz); ihk_mc_unmap_memory(NULL, phys, sz);
return -ENOMEM; return -ENOMEM;
} }
proc->ftn->pid = pn->pid; proc = thread->proc;
proc->ftn->pgid = pn->pgid; vm = thread->vm;
proc->ftn->ruid = pn->cred[0]; proc->pid = pn->pid;
proc->ftn->euid = pn->cred[1]; proc->pgid = pn->pgid;
proc->ftn->suid = pn->cred[2]; proc->ruid = pn->cred[0];
proc->ftn->fsuid = pn->cred[3]; proc->euid = pn->cred[1];
proc->ftn->rgid = pn->cred[4]; proc->suid = pn->cred[2];
proc->ftn->egid = pn->cred[5]; proc->fsuid = pn->cred[3];
proc->ftn->sgid = pn->cred[6]; proc->rgid = pn->cred[4];
proc->ftn->fsgid = pn->cred[7]; proc->egid = pn->cred[5];
proc->sgid = pn->cred[6];
proc->fsgid = pn->cred[7];
proc->vm->region.user_start = pn->user_start; vm->region.user_start = pn->user_start;
proc->vm->region.user_end = pn->user_end; vm->region.user_end = pn->user_end;
proc->vm->region.map_start = (USER_END / 3) & LARGE_PAGE_MASK; vm->region.map_start = (USER_END / 3) & LARGE_PAGE_MASK;
proc->vm->region.map_end = proc->vm->region.map_start; vm->region.map_end = proc->vm->region.map_start;
memcpy(proc->rlimit, pn->rlimit, sizeof(struct rlimit) * MCK_RLIM_MAX); memcpy(proc->rlimit, pn->rlimit, sizeof(struct rlimit) * MCK_RLIM_MAX);
/* TODO: Clear it at the proper timing */ /* TODO: Clear it at the proper timing */
cpu_local_var(scp).post_idx = 0; cpu_local_var(scp).post_idx = 0;
if (prepare_process_ranges_args_envs(proc, pn, p, attr, if (prepare_process_ranges_args_envs(thread, pn, p, attr,
NULL, 0, NULL, 0) != 0) { NULL, 0, NULL, 0) != 0) {
kprintf("error: preparing process ranges, args, envs, stack\n"); kprintf("error: preparing process ranges, args, envs, stack\n");
goto err; goto err;
} }
dkprintf("new process : %p [%d] / table : %p\n", proc, proc->ftn->pid, dkprintf("new process : %p [%d] / table : %p\n", proc, proc->pid,
proc->vm->page_table); vm->address_space->page_table);
ihk_mc_free(pn); ihk_mc_free(pn);
@@ -401,8 +408,7 @@ err:
ihk_mc_free(pn); ihk_mc_free(pn);
ihk_mc_unmap_virtual(p, npages, 1); ihk_mc_unmap_virtual(p, npages, 1);
ihk_mc_unmap_memory(NULL, phys, sz); ihk_mc_unmap_memory(NULL, phys, sz);
free_process_memory(proc); destroy_thread(thread);
destroy_process(proc);
return -ENOMEM; return -ENOMEM;
} }
@@ -476,8 +482,8 @@ static void syscall_channel_send(struct ihk_ikc_channel_desc *c,
ihk_ikc_send(c, packet, 0); ihk_ikc_send(c, packet, 0);
} }
extern unsigned long do_kill(int, int, int, struct siginfo *, int ptracecont); extern unsigned long do_kill(struct thread *, int, int, int, struct siginfo *, int ptracecont);
extern void settid(struct process *proc, int mode, int newcpuid, int oldcpuid); extern void settid(struct thread *proc, int mode, int newcpuid, int oldcpuid);
extern void process_procfs_request(unsigned long rarg); extern void process_procfs_request(unsigned long rarg);
extern int memcheckall(); extern int memcheckall();
@@ -492,6 +498,7 @@ static int syscall_packet_handler(struct ihk_ikc_channel_desc *c,
struct ikc_scd_packet *packet = __packet; struct ikc_scd_packet *packet = __packet;
struct ikc_scd_packet pckt; struct ikc_scd_packet pckt;
int rc; int rc;
struct thread *thread;
struct process *proc; struct process *proc;
struct mcctrl_signal { struct mcctrl_signal {
int cond; int cond;
@@ -539,13 +546,17 @@ static int syscall_packet_handler(struct ihk_ikc_channel_desc *c,
return -1; return -1;
} }
dkprintf("SCD_MSG_SCHEDULE_PROCESS: %lx\n", packet->arg); dkprintf("SCD_MSG_SCHEDULE_PROCESS: %lx\n", packet->arg);
proc = (struct process *)packet->arg; thread = (struct thread *)packet->arg;
proc = thread->proc;
settid(proc, 0, cpuid, -1); settid(thread, 0, cpuid, -1);
proc->ftn->status = PS_RUNNING; proc->pstatus = PS_RUNNING;
runq_add_proc(proc, cpuid); thread->tstatus = PS_RUNNING;
chain_thread(thread);
chain_process(proc);
runq_add_thread(thread, cpuid);
//cpu_local_var(next) = (struct process *)packet->arg; //cpu_local_var(next) = (struct thread *)packet->arg;
return 0; return 0;
case SCD_MSG_SEND_SIGNAL: case SCD_MSG_SEND_SIGNAL:
pp = ihk_mc_map_memory(NULL, packet->arg, sizeof(struct mcctrl_signal)); pp = ihk_mc_map_memory(NULL, packet->arg, sizeof(struct mcctrl_signal));
@@ -559,7 +570,7 @@ static int syscall_packet_handler(struct ihk_ikc_channel_desc *c,
pckt.arg = packet->arg; pckt.arg = packet->arg;
syscall_channel_send(c, &pckt); syscall_channel_send(c, &pckt);
rc = do_kill(info.pid, info.tid, info.sig, &info.info, 0); rc = do_kill(NULL, info.pid, info.tid, info.sig, &info.info, 0);
kprintf("SCD_MSG_SEND_SIGNAL: do_kill(pid=%d, tid=%d, sig=%d)=%d\n", info.pid, info.tid, info.sig, rc); kprintf("SCD_MSG_SEND_SIGNAL: do_kill(pid=%d, tid=%d, sig=%d)=%d\n", info.pid, info.tid, info.sig, rc);
return 0; return 0;
case SCD_MSG_PROCFS_REQUEST: case SCD_MSG_PROCFS_REQUEST:

View File

@@ -41,13 +41,14 @@ struct cpu_local_var {
struct malloc_header free_list; struct malloc_header free_list;
ihk_spinlock_t free_list_lock; ihk_spinlock_t free_list_lock;
struct process idle; struct thread idle;
struct fork_tree_node idle_ftn; struct process idle_proc;
struct process_vm idle_vm; struct process_vm idle_vm;
struct address_space idle_asp;
ihk_spinlock_t runq_lock; ihk_spinlock_t runq_lock;
unsigned long runq_irqstate; unsigned long runq_irqstate;
struct process *current; struct thread *current;
struct list_head runq; struct list_head runq;
size_t runq_len; size_t runq_len;
@@ -58,6 +59,7 @@ struct cpu_local_var {
struct ihk_ikc_channel_desc *syscall_channel2; struct ihk_ikc_channel_desc *syscall_channel2;
struct syscall_params scp2; struct syscall_params scp2;
struct ikc_scd_init_param iip2; struct ikc_scd_init_param iip2;
struct resource_set *resource_set;
int status; int status;
int fs; int fs;

View File

@@ -14,8 +14,18 @@
#define __HEADER_KMALLOC_H #define __HEADER_KMALLOC_H
#include <ihk/mm.h> #include <ihk/mm.h>
#include <cls.h>
#define kmalloc(size, flag) _kmalloc(size, flag, __FILE__, __LINE__) void panic(const char *);
int kprintf(const char *format, ...);
#define kmalloc(size, flag) ({\
void *r = _kmalloc(size, flag, __FILE__, __LINE__);\
if(r == NULL){\
kprintf("kmalloc: out of memory %s:%d no_preempt=%d\n", __FILE__, __LINE__, cpu_local_var(no_preempt)); \
}\
r;\
})
#define kfree(ptr) _kfree(ptr, __FILE__, __LINE__) #define kfree(ptr) _kfree(ptr, __FILE__, __LINE__)
#define memcheck(ptr, msg) _memcheck(ptr, msg, __FILE__, __LINE__, 0) #define memcheck(ptr, msg) _memcheck(ptr, msg, __FILE__, __LINE__, 0)
void *_kmalloc(int size, enum ihk_mc_ap_flag flag, char *file, int line); void *_kmalloc(int size, enum ihk_mc_ap_flag flag, char *file, int line);

View File

@@ -51,6 +51,7 @@
#define VRFLAG_PROT_TO_MAXPROT(vrflag) (((vrflag) & VR_PROT_MASK) << 4) #define VRFLAG_PROT_TO_MAXPROT(vrflag) (((vrflag) & VR_PROT_MASK) << 4)
#define VRFLAG_MAXPROT_TO_PROT(vrflag) (((vrflag) & VR_MAXPROT_MASK) >> 4) #define VRFLAG_MAXPROT_TO_PROT(vrflag) (((vrflag) & VR_MAXPROT_MASK) >> 4)
// struct process.status, struct thread.status
#define PS_RUNNING 0x1 #define PS_RUNNING 0x1
#define PS_INTERRUPTIBLE 0x2 #define PS_INTERRUPTIBLE 0x2
#define PS_UNINTERRUPTIBLE 0x4 #define PS_UNINTERRUPTIBLE 0x4
@@ -58,15 +59,19 @@
#define PS_EXITED 0x10 #define PS_EXITED 0x10
#define PS_STOPPED 0x20 #define PS_STOPPED 0x20
#define PS_TRACED 0x40 /* Set to "not running" by a ptrace related event */ #define PS_TRACED 0x40 /* Set to "not running" by a ptrace related event */
#define PS_STOPPING 0x80
#define PS_TRACING 0x100
#define PS_NORMAL (PS_INTERRUPTIBLE | PS_UNINTERRUPTIBLE) #define PS_NORMAL (PS_INTERRUPTIBLE | PS_UNINTERRUPTIBLE)
// struct process.ptrace
#define PT_TRACED 0x80 /* The process is ptraced */ #define PT_TRACED 0x80 /* The process is ptraced */
#define PT_TRACE_EXEC 0x100 /* Trace execve(2) */ #define PT_TRACE_EXEC 0x100 /* Trace execve(2) */
#define PT_TRACE_SYSCALL_ENTER 0x200 /* Trace syscall enter */ #define PT_TRACE_SYSCALL_ENTER 0x200 /* Trace syscall enter */
#define PT_TRACE_SYSCALL_EXIT 0x400 /* Trace syscall exit */ #define PT_TRACE_SYSCALL_EXIT 0x400 /* Trace syscall exit */
#define PT_TRACE_SYSCALL_MASK (PT_TRACE_SYSCALL_ENTER | PT_TRACE_SYSCALL_EXIT) #define PT_TRACE_SYSCALL_MASK (PT_TRACE_SYSCALL_ENTER | PT_TRACE_SYSCALL_EXIT)
// ptrace(2) request
#define PTRACE_TRACEME 0 #define PTRACE_TRACEME 0
#define PTRACE_PEEKTEXT 1 #define PTRACE_PEEKTEXT 1
#define PTRACE_PEEKDATA 2 #define PTRACE_PEEKDATA 2
@@ -95,6 +100,7 @@
#define PTRACE_GETREGSET 0x4204 #define PTRACE_GETREGSET 0x4204
#define PTRACE_SETREGSET 0x4205 #define PTRACE_SETREGSET 0x4205
// ptrace(2) options
#define PTRACE_O_TRACESYSGOOD 1 #define PTRACE_O_TRACESYSGOOD 1
#define PTRACE_O_TRACEFORK 2 #define PTRACE_O_TRACEFORK 2
#define PTRACE_O_TRACEVFORK 4 #define PTRACE_O_TRACEVFORK 4
@@ -104,6 +110,7 @@
#define PTRACE_O_TRACEEXIT 0x40 #define PTRACE_O_TRACEEXIT 0x40
#define PTRACE_O_MASK 0x7f #define PTRACE_O_MASK 0x7f
// ptrace(2) events
#define PTRACE_EVENT_FORK 1 #define PTRACE_EVENT_FORK 1
#define PTRACE_EVENT_VFORK 2 #define PTRACE_EVENT_VFORK 2
#define PTRACE_EVENT_CLONE 3 #define PTRACE_EVENT_CLONE 3
@@ -158,6 +165,66 @@
#include <waitq.h> #include <waitq.h>
#include <futex.h> #include <futex.h>
struct resource_set;
struct process_hash;
struct thread_hash;
struct address_space;
struct process;
struct thread;
struct process_vm;
struct vm_regions;
struct vm_range;
#define HASH_SIZE 73
struct resource_set {
struct list_head list;
char *path;
struct process_hash *process_hash;
struct thread_hash *thread_hash;
struct list_head phys_mem_list;
mcs_rwlock_lock_t phys_mem_lock;
cpu_set_t cpu_set;
mcs_rwlock_lock_t cpu_set_lock;
struct process *pid1;
};
extern struct list_head resource_set_list;
extern mcs_rwlock_lock_t resource_set_lock;
struct process_hash {
struct list_head list[HASH_SIZE];
mcs_rwlock_lock_t lock[HASH_SIZE];
};
static inline int
process_hash(int pid)
{
return pid % HASH_SIZE;
}
static inline int
thread_hash(int tid)
{
return tid % HASH_SIZE;
}
struct thread_hash {
struct list_head list[HASH_SIZE];
mcs_rwlock_lock_t lock[HASH_SIZE];
};
struct address_space {
struct page_table *page_table;
struct list_head siblings_list;
struct resource_set *res;
int type;
#define ADDRESS_SPACE_NORMAL 1
#define ADDRESS_SPACE_PVAS 2
int nslots;
int pids[];
};
struct user_fpregs_struct struct user_fpregs_struct
{ {
unsigned short cwd; unsigned short cwd;
@@ -234,6 +301,7 @@ struct vm_range {
}; };
struct vm_regions { struct vm_regions {
unsigned long vm_start, vm_end;
unsigned long text_start, text_end; unsigned long text_start, text_end;
unsigned long data_start, data_end; unsigned long data_start, data_end;
unsigned long brk_start, brk_end; unsigned long brk_start, brk_end;
@@ -252,11 +320,12 @@ struct sigfd {
#define SFD_CLOEXEC 02000000 #define SFD_CLOEXEC 02000000
#define SFD_NONBLOCK 04000 #define SFD_NONBLOCK 04000
struct sig_handler { struct sig_common {
ihk_spinlock_t lock; ihk_spinlock_t lock;
ihk_atomic_t use; ihk_atomic_t use;
struct sigfd *sigfd; struct sigfd *sigfd;
struct k_sigaction action[_NSIG]; struct k_sigaction action[_NSIG];
struct list_head sigpending;
}; };
struct sig_pending { struct sig_pending {
@@ -266,27 +335,60 @@ struct sig_pending {
int ptracecont; int ptracecont;
}; };
struct sig_shared {
ihk_spinlock_t lock;
ihk_atomic_t use;
struct list_head sigpending;
};
typedef void pgio_func_t(void *arg); typedef void pgio_func_t(void *arg);
/* Represents a node in the process fork tree, it may exist even after the /* Represents a node in the process fork tree, it may exist even after the
* corresponding process exited due to references from the parent and/or * corresponding process exited due to references from the parent and/or
* children and is used for implementing wait/waitpid without having a * children and is used for implementing wait/waitpid without having a
* special "init" process */ * special "init" process */
struct fork_tree_node { struct process {
ihk_spinlock_t lock; struct list_head hash_list;
ihk_atomic_t refcount; mcs_rwlock_lock_t update_lock; // lock for parent, status, ...?
int exit_status;
int status;
struct process *owner; // process vm
struct process_vm *vm;
// threads and children
struct list_head threads_list;
mcs_rwlock_lock_t threads_lock; // lock for threads_list
/* The ptracing process behave as the parent of the ptraced process
after using PTRACE_ATTACH except getppid. So we save it here. */
struct process *parent;
struct process *ppid_parent;
struct list_head children_list;
struct list_head ptraced_children_list;
mcs_rwlock_lock_t children_lock; // lock for children_list and ptraced_children_list
struct list_head siblings_list; // lock parent
struct list_head ptraced_siblings_list; // lock ppid_parent
ihk_atomic_t refcount;
// process status and exit status
int pstatus; // PS_RUNNING -> PS_EXITED -> PS_ZOMBIE
// | ^ ^
// | |---+ |
// V | |
// PS_STOPPING | |
// (PS_TRACING)| |
// | | |
// V +---- |
// PS_STOPPED -----+
// (PS_TRACED)
int exit_status;
/* Store exit_status for a group of threads when stopped by SIGSTOP.
exit_status can't be used because values of exit_status of threads
might divert while the threads are exiting by group_exit(). */
int group_exit_status;
/* Manage ptraced processes in the separate list to make it easy to
restore the orginal parent child relationship when
performing PTRACE_DETACH */
struct waitq waitpid_q;
// process info and credentials etc.
int pid; int pid;
int tid;
int pgid; int pgid;
int ruid; int ruid;
int euid; int euid;
@@ -296,50 +398,36 @@ struct fork_tree_node {
int egid; int egid;
int sgid; int sgid;
int fsgid; int fsgid;
int execed;
int nohost;
struct rlimit rlimit[MCK_RLIM_MAX];
unsigned long saved_auxv[AUXV_LEN];
char *saved_cmdline;
long saved_cmdline_len;
struct fork_tree_node *parent; /* Store ptrace flags.
struct list_head children; * The lower 8 bits are PTRACE_O_xxx of the PTRACE_SETOPTIONS request.
struct list_head siblings_list; * Other bits are for inner use of the McKernel.
*/
int ptrace;
/* The ptracing process behave as the parent of the ptraced process /* Store ptrace event message.
after using PTRACE_ATTACH except getppid. So we save it here. */ * PTRACE_O_xxx will store event message here.
struct fork_tree_node *ppid_parent; * PTRACE_GETEVENTMSG will get from here.
*/
unsigned long ptrace_eventmsg;
/* Manage ptraced processes in the separate list to make it easy to /* Store event related to signal. For example,
restore the orginal parent child relationship when it represents that the proceess has been resumed by SIGCONT. */
performing PTRACE_DETACH */ int signal_flags;
struct list_head ptrace_children;
struct list_head ptrace_siblings_list;
struct waitq waitpid_q; /* Store signal sent to parent when the process terminates. */
int termsig;
/* Store exit_status for a group of threads when stopped by SIGSTOP.
exit_status can't be used because values of exit_status of threads
might divert while the threads are exiting by group_exit(). */
int group_exit_status;
/* Store ptrace flags.
* The lower 8 bits are PTRACE_O_xxx of the PTRACE_SETOPTIONS request.
* Other bits are for inner use of the McKernel.
*/
int ptrace;
/* Store ptrace event message.
PTRACE_O_xxx will store event message here.
PTRACE_GETEVENTMSG will get from here.
*/
unsigned long ptrace_eventmsg;
/* Store event related to signal. For example,
it represents that the proceess has been resumed by SIGCONT. */
int signal_flags;
/* Store signal sent to parent when the process terminates. */
int termsig;
}; };
void hold_fork_tree_node(struct fork_tree_node *ftn); void hold_thread(struct thread *ftn);
void release_fork_tree_node(struct fork_tree_node *ftn); void release_thread(struct thread *ftn);
/* /*
* Scheduling policies * Scheduling policies
@@ -364,101 +452,109 @@ struct sched_param {
int sched_priority; int sched_priority;
}; };
struct process { struct thread {
struct list_head hash_list;
// thread info
int cpu_id; int cpu_id;
int tid;
int tstatus;
ihk_atomic_t refcount; // process vm
struct process_vm *vm; struct process_vm *vm;
// context
ihk_mc_kernel_context_t ctx; ihk_mc_kernel_context_t ctx;
ihk_mc_user_context_t *uctx; ihk_mc_user_context_t *uctx;
// sibling
struct process *proc;
struct list_head siblings_list; // lock process
// Runqueue list entry // Runqueue list entry
struct list_head sched_list; struct list_head sched_list; // lock cls
int sched_policy; int sched_policy;
struct sched_param sched_param; struct sched_param sched_param;
ihk_spinlock_t spin_sleep_lock; ihk_spinlock_t spin_sleep_lock;
int spin_sleep; int spin_sleep;
struct thread { ihk_atomic_t refcount;
struct {
int *clear_child_tid; int *clear_child_tid;
unsigned long tlsblock_base, tlsblock_limit; unsigned long tlsblock_base, tlsblock_limit;
} thread; } thread;
volatile int sigevent; // thread info
int nohost; cpu_set_t cpu_set;
int execed; fp_regs_struct *fp_regs;
int in_syscall_offload;
// signal
struct sig_common *sigcommon;
sigset_t sigmask; sigset_t sigmask;
stack_t sigstack; stack_t sigstack;
ihk_spinlock_t sigpendinglock;
struct list_head sigpending; struct list_head sigpending;
struct sig_shared *sigshared; ihk_spinlock_t sigpendinglock;
struct sig_handler *sighandler; volatile int sigevent;
struct rlimit rlimit[MCK_RLIM_MAX]; // gpio
pgio_func_t *pgio_fp; pgio_func_t *pgio_fp;
void *pgio_arg; void *pgio_arg;
struct fork_tree_node *ftn; // for ptrace
cpu_set_t cpu_set;
unsigned long saved_auxv[AUXV_LEN];
unsigned long *ptrace_debugreg; /* debug registers for ptrace */ unsigned long *ptrace_debugreg; /* debug registers for ptrace */
struct sig_pending *ptrace_recvsig; struct sig_pending *ptrace_recvsig;
struct sig_pending *ptrace_sendsig; struct sig_pending *ptrace_sendsig;
fp_regs_struct *fp_regs;
char *saved_cmdline;
long saved_cmdline_len;
int in_syscall_offload;
}; };
struct process_vm { struct process_vm {
ihk_atomic_t refcount; struct address_space *address_space;
struct page_table *page_table;
struct list_head vm_range_list; struct list_head vm_range_list;
struct vm_regions region; struct vm_regions region;
struct process *owner_process; /* process that reside on the same page */ struct process *proc; /* process that reside on the same page */
ihk_spinlock_t page_table_lock; ihk_spinlock_t page_table_lock;
ihk_spinlock_t memory_range_lock; ihk_spinlock_t memory_range_lock;
// to protect the followings: // to protect the followings:
// 1. addition of process "memory range" (extend_process_region, add_process_memory_range) // 1. addition of process "memory range" (extend_process_region, add_process_memory_range)
// 2. addition of process page table (allocate_pages, update_process_page_table) // 2. addition of process page table (allocate_pages, update_process_page_table)
// note that physical memory allocator (ihk_mc_alloc_pages, ihk_pagealloc_alloc) // note that physical memory allocator (ihk_mc_alloc_pages, ihk_pagealloc_alloc)
// is protected by its own lock (see ihk/manycore/generic/page_alloc.c) // is protected by its own lock (see ihk/manycore/generic/page_alloc.c)
ihk_atomic_t refcount;
cpu_set_t cpu_set; cpu_set_t cpu_set;
ihk_spinlock_t cpu_set_lock; ihk_spinlock_t cpu_set_lock;
int exiting; int exiting;
}; };
struct process *create_process(unsigned long user_pc); struct thread *create_thread(unsigned long user_pc);
struct process *clone_process(struct process *org, unsigned long pc, struct thread *clone_thread(struct thread *org, unsigned long pc,
unsigned long sp, int clone_flags); unsigned long sp, int clone_flags);
void destroy_process(struct process *proc); void destroy_thread(struct thread *thread);
void hold_process(struct process *proc); void hold_thread(struct thread *thread);
void release_process(struct process *proc); void release_thread(struct thread *thread);
void flush_process_memory(struct process *proc); void flush_process_memory(struct process_vm *vm);
void free_process_memory(struct process *proc); void hold_process_vm(struct process_vm *vm);
void free_process_memory_ranges(struct process *proc); void release_process_vm(struct process_vm *vm);
int populate_process_memory(struct process *proc, void *start, size_t len); void hold_process(struct process *);
void release_process(struct process *);
void free_process_memory_ranges(struct process_vm *vm);
int populate_process_memory(struct process_vm *vm, void *start, size_t len);
int add_process_memory_range(struct process *process, int add_process_memory_range(struct process_vm *vm,
unsigned long start, unsigned long end, unsigned long start, unsigned long end,
unsigned long phys, unsigned long flag, unsigned long phys, unsigned long flag,
struct memobj *memobj, off_t objoff); struct memobj *memobj, off_t objoff);
int remove_process_memory_range(struct process *process, unsigned long start, int remove_process_memory_range(struct process_vm *vm, unsigned long start,
unsigned long end, int *ro_freedp); unsigned long end, int *ro_freedp);
int split_process_memory_range(struct process *process, int split_process_memory_range(struct process_vm *vm,
struct vm_range *range, uintptr_t addr, struct vm_range **splitp); struct vm_range *range, uintptr_t addr, struct vm_range **splitp);
int join_process_memory_range(struct process *process, struct vm_range *surviving, int join_process_memory_range(struct process_vm *vm, struct vm_range *surviving,
struct vm_range *merging); struct vm_range *merging);
int change_prot_process_memory_range( int change_prot_process_memory_range(
struct process *process, struct vm_range *range, struct process_vm *vm, struct vm_range *range,
unsigned long newflag); unsigned long newflag);
int remap_process_memory_range(struct process_vm *vm, struct vm_range *range, int remap_process_memory_range(struct process_vm *vm, struct vm_range *range,
uintptr_t start, uintptr_t end, off_t off); uintptr_t start, uintptr_t end, off_t off);
@@ -477,24 +573,24 @@ int extend_up_process_memory_range(struct process_vm *vm,
int page_fault_process_vm(struct process_vm *fault_vm, void *fault_addr, int page_fault_process_vm(struct process_vm *fault_vm, void *fault_addr,
uint64_t reason); uint64_t reason);
int remove_process_region(struct process *proc, int remove_process_region(struct process_vm *vm,
unsigned long start, unsigned long end); unsigned long start, unsigned long end);
struct program_load_desc; struct program_load_desc;
int init_process_stack(struct process *process, struct program_load_desc *pn, int init_process_stack(struct thread *thread, struct program_load_desc *pn,
int argc, char **argv, int argc, char **argv,
int envc, char **env); int envc, char **env);
unsigned long extend_process_region(struct process *proc, unsigned long extend_process_region(struct process_vm *vm,
unsigned long start, unsigned long end, unsigned long start, unsigned long end,
unsigned long address, unsigned long flag); unsigned long address, unsigned long flag);
extern enum ihk_mc_pt_attribute arch_vrflag_to_ptattr(unsigned long flag, uint64_t fault, pte_t *ptep); extern enum ihk_mc_pt_attribute arch_vrflag_to_ptattr(unsigned long flag, uint64_t fault, pte_t *ptep);
enum ihk_mc_pt_attribute common_vrflag_to_ptattr(unsigned long flag, uint64_t fault, pte_t *ptep); enum ihk_mc_pt_attribute common_vrflag_to_ptattr(unsigned long flag, uint64_t fault, pte_t *ptep);
void schedule(void); void schedule(void);
void runq_add_proc(struct process *proc, int cpu_id); void runq_add_thread(struct thread *thread, int cpu_id);
void runq_del_proc(struct process *proc, int cpu_id); void runq_del_thread(struct thread *thread, int cpu_id);
int sched_wakeup_process(struct process *proc, int valid_states); int sched_wakeup_thread(struct thread *thread, int valid_states);
void sched_request_migrate(int cpu_id, struct process *proc); void sched_request_migrate(int cpu_id, struct thread *thread);
void check_need_resched(void); void check_need_resched(void);
void cpu_set(int cpu, cpu_set_t *cpu_set, ihk_spinlock_t *lock); void cpu_set(int cpu, cpu_set_t *cpu_set, ihk_spinlock_t *lock);
@@ -502,8 +598,14 @@ void cpu_clear(int cpu, cpu_set_t *cpu_set, ihk_spinlock_t *lock);
void cpu_clear_and_set(int c_cpu, int s_cpu, void cpu_clear_and_set(int c_cpu, int s_cpu,
cpu_set_t *cpu_set, ihk_spinlock_t *lock); cpu_set_t *cpu_set, ihk_spinlock_t *lock);
struct process *findthread_and_lock(int pid, int tid, ihk_spinlock_t **savelock, unsigned long *irqstate);
void process_unlock(void *savelock, unsigned long irqstate);
void release_cpuid(int cpuid); void release_cpuid(int cpuid);
struct thread *find_thread(int pid, int tid, struct mcs_rwlock_node_irqsave *lock);
void thread_unlock(struct thread *thread, struct mcs_rwlock_node_irqsave *lock);
struct process *find_process(int pid, struct mcs_rwlock_node_irqsave *lock);
void process_unlock(struct process *proc, struct mcs_rwlock_node_irqsave *lock);
void chain_process(struct process *);
void chain_thread(struct thread *);
void proc_init();
#endif #endif

View File

@@ -285,4 +285,5 @@ struct procfs_file {
char fname[PROCFS_NAME_MAX]; /* procfs filename (request) */ char fname[PROCFS_NAME_MAX]; /* procfs filename (request) */
}; };
extern void terminate(int, int);
#endif #endif

View File

@@ -36,7 +36,7 @@ struct timer {
uint64_t timeout; uint64_t timeout;
struct waitq processes; struct waitq processes;
struct list_head list; struct list_head list;
struct process *proc; struct thread *thread;
}; };
uint64_t schedule_timeout(uint64_t timeout); uint64_t schedule_timeout(uint64_t timeout);

View File

@@ -19,7 +19,7 @@
#include <ihk/lock.h> #include <ihk/lock.h>
#include <list.h> #include <list.h>
struct process; struct thread;
struct waitq_entry; struct waitq_entry;
typedef int (*waitq_func_t)(struct waitq_entry *wait, unsigned mode, typedef int (*waitq_func_t)(struct waitq_entry *wait, unsigned mode,
@@ -58,7 +58,7 @@ typedef struct waitq_entry {
} }
extern void waitq_init(waitq_t *waitq); extern void waitq_init(waitq_t *waitq);
extern void waitq_init_entry(waitq_entry_t *entry, struct process *proc); extern void waitq_init_entry(waitq_entry_t *entry, struct thread *proc);
extern int waitq_active(waitq_t *waitq); extern int waitq_active(waitq_t *waitq);
extern void waitq_add_entry(waitq_t *waitq, waitq_entry_t *entry); extern void waitq_add_entry(waitq_t *waitq, waitq_entry_t *entry);
extern void waitq_add_entry_locked(waitq_t *waitq, waitq_entry_t *entry); extern void waitq_add_entry_locked(waitq_t *waitq, waitq_entry_t *entry);

View File

@@ -225,6 +225,8 @@ static void rest_init(void)
ikc_master_init(); ikc_master_init();
proc_init();
sched_init(); sched_init();
} }

View File

@@ -174,7 +174,7 @@ static struct ihk_mc_interrupt_handler query_free_mem_handler = {
void set_signal(int sig, void *regs, struct siginfo *info); void set_signal(int sig, void *regs, struct siginfo *info);
void check_signal(unsigned long, void *, int); void check_signal(unsigned long, void *, int);
int gencore(struct process *, void *, struct coretable **, int *); int gencore(struct thread *, void *, struct coretable **, int *);
void freecore(struct coretable **); void freecore(struct coretable **);
/** /**
@@ -184,14 +184,14 @@ void freecore(struct coretable **);
* \param regs A pointer to a x86_regs structure. * \param regs A pointer to a x86_regs structure.
*/ */
void coredump(struct process *proc, void *regs) void coredump(struct thread *thread, void *regs)
{ {
struct syscall_request request IHK_DMA_ALIGN; struct syscall_request request IHK_DMA_ALIGN;
int ret; int ret;
struct coretable *coretable; struct coretable *coretable;
int chunks; int chunks;
ret = gencore(proc, regs, &coretable, &chunks); ret = gencore(thread, regs, &coretable, &chunks);
if (ret != 0) { if (ret != 0) {
dkprintf("could not generate a core file image\n"); dkprintf("could not generate a core file image\n");
return; return;
@@ -200,7 +200,7 @@ void coredump(struct process *proc, void *regs)
request.args[0] = chunks; request.args[0] = chunks;
request.args[1] = virt_to_phys(coretable); request.args[1] = virt_to_phys(coretable);
/* no data for now */ /* no data for now */
ret = do_syscall(&request, proc->cpu_id, proc->ftn->pid); ret = do_syscall(&request, thread->cpu_id, thread->proc->pid);
if (ret == 0) { if (ret == 0) {
kprintf("dumped core.\n"); kprintf("dumped core.\n");
} else { } else {
@@ -209,10 +209,10 @@ void coredump(struct process *proc, void *regs)
freecore(&coretable); freecore(&coretable);
} }
static void unhandled_page_fault(struct process *proc, void *fault_addr, void *regs) static void unhandled_page_fault(struct thread *thread, void *fault_addr, void *regs)
{ {
const uintptr_t address = (uintptr_t)fault_addr; const uintptr_t address = (uintptr_t)fault_addr;
struct process_vm *vm = proc->vm; struct process_vm *vm = thread->vm;
struct vm_range *range; struct vm_range *range;
char found; char found;
unsigned long irqflags; unsigned long irqflags;
@@ -235,7 +235,7 @@ static void unhandled_page_fault(struct process *proc, void *fault_addr, void *r
found = 1; found = 1;
dkprintf("address is in range, flag: 0x%X! \n", dkprintf("address is in range, flag: 0x%X! \n",
range->flag); range->flag);
ihk_mc_pt_print_pte(vm->page_table, (void*)address); ihk_mc_pt_print_pte(vm->address_space->page_table, (void*)address);
break; break;
} }
} }
@@ -366,7 +366,7 @@ void tlb_flush_handler(int vector)
static void page_fault_handler(void *fault_addr, uint64_t reason, void *regs) static void page_fault_handler(void *fault_addr, uint64_t reason, void *regs)
{ {
struct process *proc = cpu_local_var(current); struct thread *thread = cpu_local_var(current);
int error; int error;
dkprintf("[%d]page_fault_handler(%p,%lx,%p)\n", dkprintf("[%d]page_fault_handler(%p,%lx,%p)\n",
@@ -376,29 +376,24 @@ static void page_fault_handler(void *fault_addr, uint64_t reason, void *regs)
cpu_enable_interrupt(); cpu_enable_interrupt();
error = page_fault_process_vm(proc->vm, fault_addr, reason); error = page_fault_process_vm(thread->vm, fault_addr, reason);
if (error) { if (error) {
struct siginfo info; struct siginfo info;
if (error == -ECANCELED) { if (error == -ECANCELED) {
dkprintf("process is exiting, terminate.\n"); dkprintf("process is exiting, terminate.\n");
ihk_mc_spinlock_lock_noirq(&proc->ftn->lock);
proc->ftn->status = PS_ZOMBIE;
ihk_mc_spinlock_unlock_noirq(&proc->ftn->lock);
release_fork_tree_node(proc->ftn->parent);
release_fork_tree_node(proc->ftn);
release_process(proc);
preempt_enable(); preempt_enable();
schedule(); terminate(0, SIGSEGV);
// no return
} }
kprintf("[%d]page_fault_handler(%p,%lx,%p):" kprintf("[%d]page_fault_handler(%p,%lx,%p):"
"fault vm failed. %d, TID: %d\n", "fault vm failed. %d, TID: %d\n",
ihk_mc_get_processor_id(), fault_addr, ihk_mc_get_processor_id(), fault_addr,
reason, regs, error, proc->ftn->tid); reason, regs, error, thread->tid);
unhandled_page_fault(proc, fault_addr, regs); unhandled_page_fault(thread, fault_addr, regs);
preempt_enable();
memset(&info, '\0', sizeof info); memset(&info, '\0', sizeof info);
if (error == -ERANGE) { if (error == -ERANGE) {
info.si_signo = SIGBUS; info.si_signo = SIGBUS;
@@ -407,7 +402,7 @@ static void page_fault_handler(void *fault_addr, uint64_t reason, void *regs)
set_signal(SIGBUS, regs, &info); set_signal(SIGBUS, regs, &info);
} }
else { else {
struct process_vm *vm = proc->vm; struct process_vm *vm = thread->vm;
struct vm_range *range; struct vm_range *range;
info.si_signo = SIGSEGV; info.si_signo = SIGSEGV;
@@ -421,7 +416,6 @@ static void page_fault_handler(void *fault_addr, uint64_t reason, void *regs)
info._sifields._sigfault.si_addr = fault_addr; info._sifields._sigfault.si_addr = fault_addr;
set_signal(SIGSEGV, regs, &info); set_signal(SIGSEGV, regs, &info);
} }
preempt_enable();
check_signal(0, regs, 0); check_signal(0, regs, 0);
goto out; goto out;
} }
@@ -880,12 +874,10 @@ int memcheckall()
struct alloc *ap; struct alloc *ap;
int r = 0; int r = 0;
kprintf("memcheckall\n");
for(i = 0; i < HASHNUM; i++) for(i = 0; i < HASHNUM; i++)
for(ap = allochash[i]; ap; ap = ap->next) for(ap = allochash[i]; ap; ap = ap->next)
if(ap->p) if(ap->p)
r |= _memcheck(ap->p + 1, "memcheck", NULL, 0, 2); r |= _memcheck(ap->p + 1, "memcheck", NULL, 0, 2);
kprintf("done\n");
return r; return r;
} }

File diff suppressed because it is too large Load Diff

View File

@@ -257,14 +257,14 @@ static void operate_proc_procfs_file(int pid, char *fname, int msg, int mode, in
void process_procfs_request(unsigned long rarg) void process_procfs_request(unsigned long rarg)
{ {
unsigned long parg, pbuf; unsigned long parg, pbuf;
struct process *proc = cpu_local_var(current); struct thread *thread = cpu_local_var(current);
struct process *proc = thread->proc;
struct procfs_read *r; struct procfs_read *r;
struct ikc_scd_packet packet; struct ikc_scd_packet packet;
int rosnum, ret, pid, tid, ans = -EIO, eof = 0; int rosnum, ret, pid, tid, ans = -EIO, eof = 0;
char *buf, *p; char *buf, *p;
struct ihk_ikc_channel_desc *syscall_channel; struct ihk_ikc_channel_desc *syscall_channel;
ihk_spinlock_t *savelock; struct mcs_rwlock_node_irqsave lock;
unsigned long irqstate;
unsigned long offset; unsigned long offset;
int count; int count;
int npages; int npages;
@@ -336,30 +336,31 @@ void process_procfs_request(unsigned long rarg)
*/ */
ret = sscanf(p, "%d/", &pid); ret = sscanf(p, "%d/", &pid);
if (ret == 1) { if (ret == 1) {
if (pid != cpu_local_var(current)->ftn->pid) { if (pid != cpu_local_var(current)->proc->pid) {
/* We are not located in the proper cpu for some reason. */ /* We are not located in the proper cpu for some reason. */
dprintf("mismatched pid. We are %d, but requested pid is %d.\n", dprintf("mismatched pid. We are %d, but requested pid is %d.\n",
pid, cpu_local_var(current)->pid); pid, cpu_local_var(current)->pid);
tid = pid; /* main thread */ tid = pid; /* main thread */
proc = findthread_and_lock(pid, tid, &savelock, &irqstate); thread = find_thread(pid, tid, &lock);
if (!proc) { if (!thread) {
dprintf("We cannot find the proper cpu for requested pid.\n"); dprintf("We cannot find the proper cpu for requested pid.\n");
goto end; goto end;
} }
else if (proc->cpu_id != ihk_mc_get_processor_id()) { else if (thread->cpu_id != ihk_mc_get_processor_id()) {
/* The target process has gone by migration. */ /* The target process has gone by migration. */
r->newcpu = proc->cpu_id; r->newcpu = thread->cpu_id;
dprintf("expected cpu id is %d.\n", proc->cpu_id); dprintf("expected cpu id is %d.\n", thread->cpu_id);
process_unlock(savelock, irqstate); thread_unlock(thread, &lock);
ans = 0; ans = 0;
goto end; goto end;
} }
else { else {
process_unlock(savelock, irqstate); thread_unlock(thread, &lock);
/* 'proc' is not 'current' */ /* 'proc' is not 'current' */
is_current = 0; is_current = 0;
} }
proc = thread->proc;
} }
} }
else if (!strcmp(p, "stat")) { /* "/proc/stat" */ else if (!strcmp(p, "stat")) { /* "/proc/stat" */
@@ -431,7 +432,7 @@ void process_procfs_request(unsigned long rarg)
ans = -EIO; ans = -EIO;
goto end; goto end;
} }
ret = ihk_mc_pt_virt_to_phys(vm->page_table, ret = ihk_mc_pt_virt_to_phys(vm->address_space->page_table,
(void *)offset, &pa); (void *)offset, &pa);
if(ret){ if(ret){
if(ans == 0) if(ans == 0)
@@ -562,8 +563,8 @@ void process_procfs_request(unsigned long rarg)
ihk_mc_spinlock_lock_noirq(&vm->memory_range_lock); ihk_mc_spinlock_lock_noirq(&vm->memory_range_lock);
while (start < end) { while (start < end) {
*_buf = ihk_mc_pt_virt_to_pagemap(proc->vm->page_table, start); *_buf = ihk_mc_pt_virt_to_pagemap(proc->vm->address_space->page_table, start);
dprintf("PID: %d, /proc/pagemap: 0x%lx -> %lx\n", proc->ftn->pid, dprintf("PID: %d, /proc/pagemap: 0x%lx -> %lx\n", proc->proc->pid,
start, *_buf); start, *_buf);
start += PAGE_SIZE; start += PAGE_SIZE;
++_buf; ++_buf;
@@ -586,7 +587,6 @@ void process_procfs_request(unsigned long rarg)
unsigned long lockedsize = 0; unsigned long lockedsize = 0;
char tmp[1024]; char tmp[1024];
int len; int len;
struct fork_tree_node *ftn = proc->ftn;
ihk_mc_spinlock_lock_noirq(&proc->vm->memory_range_lock); ihk_mc_spinlock_lock_noirq(&proc->vm->memory_range_lock);
list_for_each_entry(range, &proc->vm->vm_range_list, list) { list_for_each_entry(range, &proc->vm->vm_range_list, list) {
@@ -599,8 +599,8 @@ void process_procfs_request(unsigned long rarg)
"Uid:\t%d\t%d\t%d\t%d\n" "Uid:\t%d\t%d\t%d\t%d\n"
"Gid:\t%d\t%d\t%d\t%d\n" "Gid:\t%d\t%d\t%d\t%d\n"
"VmLck:\t%9lu kB\n", "VmLck:\t%9lu kB\n",
ftn->ruid, ftn->euid, ftn->suid, ftn->fsuid, proc->ruid, proc->euid, proc->suid, proc->fsuid,
ftn->rgid, ftn->egid, ftn->sgid, ftn->fsgid, proc->rgid, proc->egid, proc->sgid, proc->fsgid,
(lockedsize + 1023) >> 10); (lockedsize + 1023) >> 10);
len = strlen(tmp); len = strlen(tmp);
if (r->offset < len) { if (r->offset < len) {
@@ -712,7 +712,7 @@ void process_procfs_request(unsigned long rarg)
char tmp[1024]; char tmp[1024];
int len; int len;
if ((proc = findthread_and_lock(pid, tid, &savelock, &irqstate))){ if ((thread = find_thread(pid, tid, &lock))){
dprintf("thread found! pid=%d tid=%d\n", pid, tid); dprintf("thread found! pid=%d tid=%d\n", pid, tid);
/* /*
* pid (comm) state ppid * pid (comm) state ppid
@@ -748,10 +748,10 @@ void process_procfs_request(unsigned long rarg)
0L, 0L, 0L, 0L, // rsslim... 0L, 0L, 0L, 0L, // rsslim...
0L, 0L, 0L, 0L, // kstkesp... 0L, 0L, 0L, 0L, // kstkesp...
0L, 0L, 0L, 0L, // sigignore... 0L, 0L, 0L, 0L, // sigignore...
0L, 0, proc->cpu_id, 0, // cnswap... 0L, 0, thread->cpu_id, 0, // cnswap...
0, 0LL, 0L, 0L // policy... 0, 0LL, 0L, 0L // policy...
); );
process_unlock(savelock, irqstate); thread_unlock(thread, &lock);
dprintf("tmp=%s\n", tmp); dprintf("tmp=%s\n", tmp);
len = strlen(tmp); len = strlen(tmp);

File diff suppressed because it is too large Load Diff

View File

@@ -57,14 +57,14 @@ uint64_t schedule_timeout(uint64_t timeout)
{ {
struct waitq_entry my_wait; struct waitq_entry my_wait;
struct timer my_timer; struct timer my_timer;
struct process *proc = cpu_local_var(current); struct thread *thread = cpu_local_var(current);
int irqstate; int irqstate;
int spin_sleep; int spin_sleep;
irqstate = ihk_mc_spinlock_lock(&proc->spin_sleep_lock); irqstate = ihk_mc_spinlock_lock(&thread->spin_sleep_lock);
dkprintf("schedule_timeout() spin sleep timeout: %lu\n", timeout); dkprintf("schedule_timeout() spin sleep timeout: %lu\n", timeout);
spin_sleep = ++proc->spin_sleep; spin_sleep = ++thread->spin_sleep;
ihk_mc_spinlock_unlock(&proc->spin_sleep_lock, irqstate); ihk_mc_spinlock_unlock(&thread->spin_sleep_lock, irqstate);
/* Spin sleep.. */ /* Spin sleep.. */
for (;;) { for (;;) {
@@ -72,10 +72,10 @@ uint64_t schedule_timeout(uint64_t timeout)
uint64_t t_e; uint64_t t_e;
int spin_over = 0; int spin_over = 0;
irqstate = ihk_mc_spinlock_lock(&proc->spin_sleep_lock); irqstate = ihk_mc_spinlock_lock(&thread->spin_sleep_lock);
/* Woken up by someone? */ /* Woken up by someone? */
if (proc->spin_sleep < 1) { if (thread->spin_sleep < 1) {
t_e = rdtsc(); t_e = rdtsc();
spin_over = 1; spin_over = 1;
@@ -87,7 +87,7 @@ uint64_t schedule_timeout(uint64_t timeout)
} }
} }
ihk_mc_spinlock_unlock(&proc->spin_sleep_lock, irqstate); ihk_mc_spinlock_unlock(&thread->spin_sleep_lock, irqstate);
if (!spin_over) { if (!spin_over) {
t_s = rdtsc(); t_s = rdtsc();
@@ -97,12 +97,12 @@ uint64_t schedule_timeout(uint64_t timeout)
need_schedule = v->runq_len > 1 ? 1 : 0; need_schedule = v->runq_len > 1 ? 1 : 0;
ihk_mc_spinlock_unlock(&(v->runq_lock), irqstate); ihk_mc_spinlock_unlock(&(v->runq_lock), irqstate);
/* Give a chance to another process (if any) in case the core is /* Give a chance to another thread (if any) in case the core is
* oversubscribed, but make sure we will be re-scheduled */ * oversubscribed, but make sure we will be re-scheduled */
if (need_schedule) { if (need_schedule) {
xchg4(&(cpu_local_var(current)->ftn->status), PS_RUNNING); xchg4(&(cpu_local_var(current)->tstatus), PS_RUNNING);
schedule(); schedule();
xchg4(&(cpu_local_var(current)->ftn->status), xchg4(&(cpu_local_var(current)->tstatus),
PS_INTERRUPTIBLE); PS_INTERRUPTIBLE);
} }
else { else {
@@ -125,7 +125,7 @@ uint64_t schedule_timeout(uint64_t timeout)
dkprintf("schedule_timeout() spin woken up, timeout: %lu\n", dkprintf("schedule_timeout() spin woken up, timeout: %lu\n",
timeout); timeout);
/* Give a chance to another process (if any) in case we timed out, /* Give a chance to another thread (if any) in case we timed out,
* but make sure we will be re-scheduled */ * but make sure we will be re-scheduled */
if (timeout == 0) { if (timeout == 0) {
int need_schedule; int need_schedule;
@@ -137,18 +137,18 @@ uint64_t schedule_timeout(uint64_t timeout)
ihk_mc_spinlock_unlock(&(v->runq_lock), irqstate); ihk_mc_spinlock_unlock(&(v->runq_lock), irqstate);
if (need_schedule) { if (need_schedule) {
xchg4(&(cpu_local_var(current)->ftn->status), PS_RUNNING); xchg4(&(cpu_local_var(current)->tstatus), PS_RUNNING);
schedule(); schedule();
xchg4(&(cpu_local_var(current)->ftn->status), xchg4(&(cpu_local_var(current)->tstatus),
PS_INTERRUPTIBLE); PS_INTERRUPTIBLE);
} }
} }
irqstate = ihk_mc_spinlock_lock(&proc->spin_sleep_lock); irqstate = ihk_mc_spinlock_lock(&thread->spin_sleep_lock);
if (spin_sleep == proc->spin_sleep) { if (spin_sleep == thread->spin_sleep) {
--proc->spin_sleep; --thread->spin_sleep;
} }
ihk_mc_spinlock_unlock(&proc->spin_sleep_lock, irqstate); ihk_mc_spinlock_unlock(&thread->spin_sleep_lock, irqstate);
return timeout; return timeout;
} }
@@ -156,7 +156,7 @@ uint64_t schedule_timeout(uint64_t timeout)
/* Init waitq and wait entry for this timer */ /* Init waitq and wait entry for this timer */
my_timer.timeout = (timeout < LOOP_TIMEOUT) ? LOOP_TIMEOUT : timeout; my_timer.timeout = (timeout < LOOP_TIMEOUT) ? LOOP_TIMEOUT : timeout;
my_timer.proc = cpu_local_var(current); my_timer.thread = cpu_local_var(current);
waitq_init(&my_timer.processes); waitq_init(&my_timer.processes);
waitq_init_entry(&my_wait, cpu_local_var(current)); waitq_init_entry(&my_wait, cpu_local_var(current));
@@ -213,7 +213,7 @@ void wake_timers_loop(void)
list_del(&timer->list); list_del(&timer->list);
dkprintf("timers timeout occurred, waking up pid: %d\n", dkprintf("timers timeout occurred, waking up pid: %d\n",
timer->proc->ftn->pid); timer->thread->proc->pid);
waitq_wakeup(&timer->processes); waitq_wakeup(&timer->processes);
} }

View File

@@ -19,7 +19,7 @@ int
default_wake_function(waitq_entry_t *entry, unsigned mode, default_wake_function(waitq_entry_t *entry, unsigned mode,
int flags, void *key) int flags, void *key)
{ {
return sched_wakeup_process(entry->private, PS_NORMAL); return sched_wakeup_thread(entry->private, PS_NORMAL);
} }
void void
@@ -30,7 +30,7 @@ waitq_init(waitq_t *waitq)
} }
void void
waitq_init_entry(waitq_entry_t *entry, struct process *proc) waitq_init_entry(waitq_entry_t *entry, struct thread *proc)
{ {
entry->private = proc; entry->private = proc;
entry->func = default_wake_function; entry->func = default_wake_function;
@@ -89,14 +89,14 @@ waitq_prepare_to_wait(waitq_t *waitq, waitq_entry_t *entry, int state)
ihk_mc_spinlock_lock_noirq(&waitq->lock); ihk_mc_spinlock_lock_noirq(&waitq->lock);
if (list_empty(&entry->link)) if (list_empty(&entry->link))
list_add(&entry->link, &waitq->waitq); list_add(&entry->link, &waitq->waitq);
cpu_local_var(current)->ftn->status = state; cpu_local_var(current)->tstatus = state;
ihk_mc_spinlock_unlock_noirq(&waitq->lock); ihk_mc_spinlock_unlock_noirq(&waitq->lock);
} }
void void
waitq_finish_wait(waitq_t *waitq, waitq_entry_t *entry) waitq_finish_wait(waitq_t *waitq, waitq_entry_t *entry)
{ {
cpu_local_var(current)->ftn->status = PS_RUNNING; cpu_local_var(current)->tstatus = PS_RUNNING;
waitq_remove_entry(waitq, entry); waitq_remove_entry(waitq, entry);
} }