Compare commits
28 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f74dcfc2a1 | ||
|
|
7c562d0539 | ||
|
|
b5e4459a34 | ||
|
|
782122b681 | ||
|
|
1ce1b17a85 | ||
|
|
a2456c3ed2 | ||
|
|
15783f09a0 | ||
|
|
dac6f2883e | ||
|
|
c484f766fa | ||
|
|
57690479bd | ||
|
|
d0539a9cac | ||
|
|
4c8f583c0c | ||
|
|
6118faffa9 | ||
|
|
dad6470c60 | ||
|
|
46c37fc8f3 | ||
|
|
f6908f21a8 | ||
|
|
01d9d9a5ba | ||
|
|
c43d993a4d | ||
|
|
7d9bbecd7a | ||
|
|
d135731398 | ||
|
|
5c190beb04 | ||
|
|
fc66556f9f | ||
|
|
648bacc90f | ||
|
|
dd37443fc7 | ||
|
|
e34322702a | ||
|
|
e12997e6a9 | ||
|
|
fabaa806d3 | ||
|
|
a83ad620c8 |
@@ -181,6 +181,8 @@ static void init_idt(void)
|
||||
}
|
||||
|
||||
static int xsave_available = 0;
|
||||
static int xsave_size = 0;
|
||||
static uint64_t xsave_mask = 0x0;
|
||||
|
||||
void init_fpu(void)
|
||||
{
|
||||
@@ -224,6 +226,26 @@ void init_fpu(void)
|
||||
xsetbv(0, reg);
|
||||
dkprintf("init_fpu(): AVX init: XCR0 = 0x%016lX\n", reg);
|
||||
}
|
||||
if(xsave_available){
|
||||
unsigned long eax;
|
||||
unsigned long ebx;
|
||||
unsigned long ecx;
|
||||
unsigned long edx;
|
||||
asm volatile("cpuid" : "=a"(eax),"=b"(ebx),"=c"(ecx),"=d"(edx) : "a" (0x0d), "c" (0x00));
|
||||
xsave_size = ecx;
|
||||
dkprintf("init_fpu(): xsave_size = %d\n", xsave_size);
|
||||
|
||||
if ((eax & (1 << 5)) && (eax & (1 << 6)) && (eax & (1 << 7))) {
|
||||
/* Set xcr0[7:5] to enable avx-512 ops */
|
||||
reg = xgetbv(0);
|
||||
reg |= 0xe6;
|
||||
xsetbv(0, reg);
|
||||
dkprintf("init_fpu(): AVX-512 init: XCR0 = 0x%016lX\n", reg);
|
||||
}
|
||||
}
|
||||
|
||||
xsave_mask = xgetbv(0);
|
||||
dkprintf("init_fpu(): xsave_mask = 0x%016lX\n", xsave_mask);
|
||||
|
||||
/* TODO: set MSR_IA32_XSS to enable xsaves/xrstors */
|
||||
|
||||
@@ -234,6 +256,17 @@ void init_fpu(void)
|
||||
asm volatile("finit");
|
||||
}
|
||||
|
||||
int
|
||||
get_xsave_size()
|
||||
{
|
||||
return xsave_size;
|
||||
}
|
||||
|
||||
uint64_t get_xsave_mask()
|
||||
{
|
||||
return xsave_mask;
|
||||
}
|
||||
|
||||
void reload_gdt(struct x86_desc_ptr *gdt_ptr)
|
||||
{
|
||||
asm volatile("pushq %1\n"
|
||||
@@ -883,13 +916,36 @@ void handle_interrupt(int vector, struct x86_user_context *regs)
|
||||
dkprintf("timer[%lu]: CPU_FLAG_NEED_RESCHED \n", rdtsc());
|
||||
}
|
||||
else if (vector == LOCAL_PERF_VECTOR) {
|
||||
struct siginfo info;
|
||||
unsigned long value;
|
||||
struct thread *thread = cpu_local_var(current);
|
||||
struct process *proc = thread->proc;
|
||||
long irqstate;
|
||||
struct mckfd *fdp;
|
||||
|
||||
lapic_write(LAPIC_LVTPC, LOCAL_PERF_VECTOR);
|
||||
|
||||
value = rdmsr(MSR_PERF_GLOBAL_STATUS);
|
||||
wrmsr(MSR_PERF_GLOBAL_OVF_CTRL, value);
|
||||
wrmsr(MSR_PERF_GLOBAL_OVF_CTRL, 0);
|
||||
//TODO: counter overflow signal
|
||||
//set_signal(0x1d, regs, NULL); // SIGIO
|
||||
|
||||
irqstate = ihk_mc_spinlock_lock(&proc->mckfd_lock);
|
||||
for(fdp = proc->mckfd; fdp; fdp = fdp->next) {
|
||||
if(fdp->sig_no > 0)
|
||||
break;
|
||||
}
|
||||
ihk_mc_spinlock_unlock(&proc->mckfd_lock, irqstate);
|
||||
|
||||
if(fdp) {
|
||||
memset(&info, '\0', sizeof info);
|
||||
info.si_signo = fdp->sig_no;
|
||||
info._sifields._sigfault.si_addr = (void *)regs->gpr.rip;
|
||||
info._sifields._sigpoll.si_fd = fdp->fd;
|
||||
set_signal(fdp->sig_no, regs, &info);
|
||||
}
|
||||
else {
|
||||
set_signal(SIGIO, regs, NULL);
|
||||
}
|
||||
}
|
||||
else if (vector >= IHK_TLB_FLUSH_IRQ_VECTOR_START &&
|
||||
vector < IHK_TLB_FLUSH_IRQ_VECTOR_END) {
|
||||
@@ -1494,7 +1550,8 @@ release_fp_regs(struct thread *thread)
|
||||
if (thread && !thread->fp_regs)
|
||||
return;
|
||||
|
||||
pages = (sizeof(fp_regs_struct) + 4095) >> 12;
|
||||
pages = (xsave_size + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
|
||||
dkprintf("release_fp_regs: pages=%d\n", pages);
|
||||
ihk_mc_free_pages(thread->fp_regs, pages);
|
||||
thread->fp_regs = NULL;
|
||||
}
|
||||
@@ -1508,7 +1565,8 @@ save_fp_regs(struct thread *thread)
|
||||
int pages;
|
||||
|
||||
if (!thread->fp_regs) {
|
||||
pages = (sizeof(fp_regs_struct) + 4095) >> 12;
|
||||
pages = (xsave_size + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
|
||||
dkprintf("save_fp_regs: pages=%d\n", pages);
|
||||
thread->fp_regs = ihk_mc_alloc_pages(pages, IHK_MC_AP_NOWAIT);
|
||||
|
||||
if (!thread->fp_regs) {
|
||||
@@ -1517,14 +1575,15 @@ save_fp_regs(struct thread *thread)
|
||||
}
|
||||
|
||||
memset(thread->fp_regs, 0, sizeof(fp_regs_struct));
|
||||
memset(thread->fp_regs, 0, pages * PAGE_SIZE);
|
||||
}
|
||||
|
||||
if (xsave_available) {
|
||||
unsigned int low, high;
|
||||
|
||||
/* Request full save of x87, SSE and AVX states */
|
||||
low = 0x7;
|
||||
high = 0;
|
||||
/* Request full save of x87, SSE, AVX and AVX-512 states */
|
||||
low = (unsigned int)xsave_mask;
|
||||
high = (unsigned int)(xsave_mask >> 32);
|
||||
|
||||
asm volatile("xsave %0" : : "m" (*thread->fp_regs), "a" (low), "d" (high)
|
||||
: "memory");
|
||||
@@ -1546,9 +1605,9 @@ restore_fp_regs(struct thread *thread)
|
||||
if (xsave_available) {
|
||||
unsigned int low, high;
|
||||
|
||||
/* Request full restore of x87, SSE and AVX states */
|
||||
low = 0x7;
|
||||
high = 0;
|
||||
/* Request full restore of x87, SSE, AVX and AVX-512 states */
|
||||
low = (unsigned int)xsave_mask;
|
||||
high = (unsigned int)(xsave_mask >> 32);
|
||||
|
||||
asm volatile("xrstor %0" : : "m" (*thread->fp_regs),
|
||||
"a" (low), "d" (high));
|
||||
|
||||
@@ -66,7 +66,7 @@ SYSCALL_DELEGATED(65, semop)
|
||||
SYSCALL_HANDLED(67, shmdt)
|
||||
SYSCALL_DELEGATED(69, msgsnd)
|
||||
SYSCALL_DELEGATED(70, msgrcv)
|
||||
SYSCALL_DELEGATED(72, fcntl)
|
||||
SYSCALL_HANDLED(72, fcntl)
|
||||
SYSCALL_DELEGATED(79, getcwd)
|
||||
SYSCALL_DELEGATED(89, readlink)
|
||||
SYSCALL_HANDLED(96, gettimeofday)
|
||||
|
||||
@@ -910,11 +910,17 @@ static int split_large_page(pte_t *ptep, size_t pgsize)
|
||||
|
||||
*ptep = (virt_to_phys(pt) & PT_PHYSMASK) | PFL2_PDIR_ATTR;
|
||||
|
||||
if (phys_base != NOPHYS) {
|
||||
page = phys_to_page(phys_base);
|
||||
if (page && page_unmap(page)) {
|
||||
kprintf("split_large_page:page_unmap:%p\n", page);
|
||||
panic("split_large_page:page_unmap\n");
|
||||
/* Do not do this check for large pages as they don't come from the zeroobj
|
||||
* and are not actually mapped.
|
||||
* TODO: clean up zeroobj as we don't really need it, anonymous mappings
|
||||
* should be allocated for real */
|
||||
if (pgsize != PTL2_SIZE) {
|
||||
if (phys_base != NOPHYS) {
|
||||
page = phys_to_page(phys_base);
|
||||
if (pgsize != PTL2_SIZE && page && page_unmap(page)) {
|
||||
kprintf("split_large_page:page_unmap:%p\n", page);
|
||||
panic("split_large_page:page_unmap\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
@@ -2261,6 +2267,7 @@ int read_process_vm(struct process_vm *vm, void *kdst, const void *usrc, size_t
|
||||
if ((ustart < vm->region.user_start)
|
||||
|| (vm->region.user_end <= ustart)
|
||||
|| ((vm->region.user_end - ustart) < siz)) {
|
||||
kprintf("%s: error: out of user range\n", __FUNCTION__);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
@@ -2268,6 +2275,7 @@ int read_process_vm(struct process_vm *vm, void *kdst, const void *usrc, size_t
|
||||
for (addr = ustart & PAGE_MASK; addr < uend; addr += PAGE_SIZE) {
|
||||
error = page_fault_process_vm(vm, (void *)addr, reason);
|
||||
if (error) {
|
||||
kprintf("%s: error: PF for %p failed\n", __FUNCTION__, addr);
|
||||
return error;
|
||||
}
|
||||
}
|
||||
@@ -2283,11 +2291,22 @@ int read_process_vm(struct process_vm *vm, void *kdst, const void *usrc, size_t
|
||||
|
||||
error = ihk_mc_pt_virt_to_phys(vm->address_space->page_table, from, &pa);
|
||||
if (error) {
|
||||
kprintf("%s: error: resolving physical address or %p\n", __FUNCTION__, from);
|
||||
return error;
|
||||
}
|
||||
|
||||
va = phys_to_virt(pa);
|
||||
memcpy(to, va, cpsize);
|
||||
if (pa < ihk_mc_get_memory_address(IHK_MC_GMA_MAP_START, 0) ||
|
||||
pa >= ihk_mc_get_memory_address(IHK_MC_GMA_MAP_END, 0)) {
|
||||
dkprintf("%s: pa is outside of LWK memory, to: %p, pa: %p,"
|
||||
"cpsize: %d\n", __FUNCTION__, to, pa, cpsize);
|
||||
va = ihk_mc_map_virtual(pa, 1, PTATTR_ACTIVE);
|
||||
memcpy(to, va, cpsize);
|
||||
ihk_mc_unmap_virtual(va, 1, 1);
|
||||
}
|
||||
else {
|
||||
va = phys_to_virt(pa);
|
||||
memcpy(to, va, cpsize);
|
||||
}
|
||||
|
||||
from += cpsize;
|
||||
to += cpsize;
|
||||
@@ -2413,8 +2432,18 @@ int patch_process_vm(struct process_vm *vm, void *udst, const void *ksrc, size_t
|
||||
return error;
|
||||
}
|
||||
|
||||
va = phys_to_virt(pa);
|
||||
memcpy(va, from, cpsize);
|
||||
if (pa < ihk_mc_get_memory_address(IHK_MC_GMA_MAP_START, 0) ||
|
||||
pa >= ihk_mc_get_memory_address(IHK_MC_GMA_MAP_END, 0)) {
|
||||
dkprintf("%s: pa is outside of LWK memory, from: %p,"
|
||||
"pa: %p, cpsize: %d\n", __FUNCTION__, from, pa, cpsize);
|
||||
va = ihk_mc_map_virtual(pa, 1, PTATTR_ACTIVE);
|
||||
memcpy(va, from, cpsize);
|
||||
ihk_mc_unmap_virtual(va, 1, 1);
|
||||
}
|
||||
else {
|
||||
va = phys_to_virt(pa);
|
||||
memcpy(va, from, cpsize);
|
||||
}
|
||||
|
||||
from += cpsize;
|
||||
to += cpsize;
|
||||
|
||||
@@ -12,16 +12,29 @@
|
||||
#include <errno.h>
|
||||
#include <ihk/debug.h>
|
||||
#include <registers.h>
|
||||
#include <mc_perf_event.h>
|
||||
|
||||
extern unsigned int *x86_march_perfmap;
|
||||
extern int running_on_kvm(void);
|
||||
|
||||
#define X86_CR4_PCE 0x00000100
|
||||
|
||||
int perf_counters_discovered = 0;
|
||||
int X86_IA32_NUM_PERF_COUNTERS = 0;
|
||||
unsigned long X86_IA32_PERF_COUNTERS_MASK = 0;
|
||||
int X86_IA32_NUM_FIXED_PERF_COUNTERS = 0;
|
||||
unsigned long X86_IA32_FIXED_PERF_COUNTERS_MASK = 0;
|
||||
|
||||
void x86_init_perfctr(void)
|
||||
{
|
||||
int i = 0;
|
||||
unsigned long reg;
|
||||
unsigned long value = 0;
|
||||
uint64_t op;
|
||||
uint64_t eax;
|
||||
uint64_t ebx;
|
||||
uint64_t ecx;
|
||||
uint64_t edx;
|
||||
|
||||
/* Do not do it on KVM */
|
||||
if (running_on_kvm()) return;
|
||||
@@ -30,12 +43,41 @@ void x86_init_perfctr(void)
|
||||
asm volatile("movq %%cr4, %0" : "=r"(reg));
|
||||
reg |= X86_CR4_PCE;
|
||||
asm volatile("movq %0, %%cr4" : : "r"(reg));
|
||||
|
||||
/* Detect number of supported performance counters */
|
||||
if (!perf_counters_discovered) {
|
||||
/* See Table 35.2 - Architectural MSRs in Vol 3C */
|
||||
op = 0x0a;
|
||||
asm volatile("cpuid" : "=a"(eax),"=b"(ebx),"=c"(ecx),"=d"(edx):"a"(op));
|
||||
|
||||
X86_IA32_NUM_PERF_COUNTERS = ((eax & 0xFF00) >> 8);
|
||||
X86_IA32_PERF_COUNTERS_MASK = (1 << X86_IA32_NUM_PERF_COUNTERS) - 1;
|
||||
|
||||
X86_IA32_NUM_FIXED_PERF_COUNTERS = (edx & 0x0F);
|
||||
X86_IA32_FIXED_PERF_COUNTERS_MASK =
|
||||
((1UL << X86_IA32_NUM_FIXED_PERF_COUNTERS) - 1) <<
|
||||
X86_IA32_BASE_FIXED_PERF_COUNTERS;
|
||||
|
||||
perf_counters_discovered = 1;
|
||||
kprintf("X86_IA32_NUM_PERF_COUNTERS: %d, X86_IA32_NUM_FIXED_PERF_COUNTERS: %d\n",
|
||||
X86_IA32_NUM_PERF_COUNTERS, X86_IA32_NUM_FIXED_PERF_COUNTERS);
|
||||
}
|
||||
|
||||
/* Clear Fixed Counter Control */
|
||||
value = rdmsr(MSR_PERF_FIXED_CTRL);
|
||||
value &= 0xfffffffffffff000L;
|
||||
wrmsr(MSR_PERF_FIXED_CTRL, value);
|
||||
|
||||
/* Clear Generic Counter Control */
|
||||
for(i = 0; i < X86_IA32_NUM_PERF_COUNTERS; i++) {
|
||||
wrmsr(MSR_IA32_PERFEVTSEL0 + i, 0);
|
||||
}
|
||||
|
||||
/* Enable PMC Control */
|
||||
value = rdmsr(MSR_PERF_GLOBAL_CTRL);
|
||||
value |= X86_IA32_PERF_COUNTERS_MASK;
|
||||
value |= X86_IA32_FIXED_PERF_COUNTERS_MASK;
|
||||
wrmsr(MSR_PERF_GLOBAL_CTRL, value);
|
||||
value = rdmsr(MSR_PERF_GLOBAL_CTRL);
|
||||
value |= X86_IA32_PERF_COUNTERS_MASK;
|
||||
value |= X86_IA32_FIXED_PERF_COUNTERS_MASK;
|
||||
wrmsr(MSR_PERF_GLOBAL_CTRL, value);
|
||||
}
|
||||
|
||||
static int set_perfctr_x86_direct(int counter, int mode, unsigned int value)
|
||||
@@ -68,7 +110,7 @@ static int set_perfctr_x86_direct(int counter, int mode, unsigned int value)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_pmc_x86_direct(int counter, unsigned long val)
|
||||
static int set_pmc_x86_direct(int counter, long val)
|
||||
{
|
||||
unsigned long cnt_bit = 0;
|
||||
|
||||
@@ -76,6 +118,8 @@ static int set_pmc_x86_direct(int counter, unsigned long val)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
val &= 0x000000ffffffffff; // 40bit Mask
|
||||
|
||||
cnt_bit = 1UL << counter;
|
||||
if ( cnt_bit & X86_IA32_PERF_COUNTERS_MASK ) {
|
||||
// set generic pmc
|
||||
@@ -102,7 +146,7 @@ static int set_perfctr_x86(int counter, int event, int mask, int inv, int count,
|
||||
static int set_fixed_counter(int counter, int mode)
|
||||
{
|
||||
unsigned long value = 0;
|
||||
unsigned int ctr_mask = 0x7;
|
||||
unsigned int ctr_mask = 0xf;
|
||||
int counter_idx = counter - X86_IA32_BASE_FIXED_PERF_COUNTERS ;
|
||||
unsigned int set_val = 0;
|
||||
|
||||
@@ -183,6 +227,24 @@ int ihk_mc_perfctr_stop(unsigned long counter_mask)
|
||||
value &= ~counter_mask;
|
||||
wrmsr(MSR_PERF_GLOBAL_CTRL, value);
|
||||
|
||||
if(counter_mask >> 32 & 0x1) {
|
||||
value = rdmsr(MSR_PERF_FIXED_CTRL);
|
||||
value &= ~(0xf);
|
||||
wrmsr(MSR_PERF_FIXED_CTRL, value);
|
||||
}
|
||||
|
||||
if(counter_mask >> 32 & 0x2) {
|
||||
value = rdmsr(MSR_PERF_FIXED_CTRL);
|
||||
value &= ~(0xf << 4);
|
||||
wrmsr(MSR_PERF_FIXED_CTRL, value);
|
||||
}
|
||||
|
||||
if(counter_mask >> 32 & 0x4) {
|
||||
value = rdmsr(MSR_PERF_FIXED_CTRL);
|
||||
value &= ~(0xf << 8);
|
||||
wrmsr(MSR_PERF_FIXED_CTRL, value);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -190,7 +252,7 @@ int ihk_mc_perfctr_stop(unsigned long counter_mask)
|
||||
int ihk_mc_perfctr_fixed_init(int counter, int mode)
|
||||
{
|
||||
unsigned long value = 0;
|
||||
unsigned int ctr_mask = 0x7;
|
||||
unsigned int ctr_mask = 0xf;
|
||||
int counter_idx = counter - X86_IA32_BASE_FIXED_PERF_COUNTERS ;
|
||||
unsigned int set_val = 0;
|
||||
|
||||
@@ -210,6 +272,9 @@ int ihk_mc_perfctr_fixed_init(int counter, int mode)
|
||||
set_val |= 1;
|
||||
}
|
||||
|
||||
// enable PMI on overflow
|
||||
set_val |= 1 << 3;
|
||||
|
||||
set_val <<= counter_idx * 4;
|
||||
value |= set_val;
|
||||
|
||||
@@ -223,7 +288,7 @@ int ihk_mc_perfctr_reset(int counter)
|
||||
return set_pmc_x86_direct(counter, 0);
|
||||
}
|
||||
|
||||
int ihk_mc_perfctr_set(int counter, unsigned long val)
|
||||
int ihk_mc_perfctr_set(int counter, long val)
|
||||
{
|
||||
return set_pmc_x86_direct(counter, val);
|
||||
}
|
||||
@@ -297,23 +362,33 @@ unsigned long ihk_mc_perfctr_read_msr(int counter)
|
||||
return retval;
|
||||
}
|
||||
|
||||
int ihk_mc_perfctr_alloc_counter(unsigned long pmc_status)
|
||||
int ihk_mc_perfctr_alloc_counter(unsigned int *type, unsigned long *config, unsigned long pmc_status)
|
||||
{
|
||||
int ret = -1;
|
||||
int i = 0;
|
||||
int ret = -1;
|
||||
|
||||
// find avail generic counter
|
||||
for(i = 0; i < X86_IA32_NUM_PERF_COUNTERS; i++) {
|
||||
if(*type == PERF_TYPE_HARDWARE) {
|
||||
switch(*config){
|
||||
case PERF_COUNT_HW_INSTRUCTIONS :
|
||||
*type = PERF_TYPE_RAW;
|
||||
*config = 0x5300c0;
|
||||
break;
|
||||
default :
|
||||
// Unexpected config
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
else if(*type != PERF_TYPE_RAW) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// find avail generic counter
|
||||
for(i = 0; i < X86_IA32_NUM_PERF_COUNTERS; i++) {
|
||||
if(!(pmc_status & (1 << i))) {
|
||||
ret = i;
|
||||
pmc_status |= (1 << i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if(ret < 0){
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -38,6 +38,8 @@ void set_signal(int sig, void *regs0, siginfo_t *info);
|
||||
void check_signal(unsigned long rc, void *regs0, int num);
|
||||
extern unsigned long do_fork(int, unsigned long, unsigned long, unsigned long,
|
||||
unsigned long, unsigned long, unsigned long);
|
||||
extern int get_xsave_size();
|
||||
extern uint64_t get_xsave_mask();
|
||||
|
||||
//#define DEBUG_PRINT_SC
|
||||
|
||||
@@ -54,6 +56,7 @@ uintptr_t debug_constants[] = {
|
||||
offsetof(struct cpu_local_var, current),
|
||||
offsetof(struct cpu_local_var, runq),
|
||||
offsetof(struct cpu_local_var, status),
|
||||
offsetof(struct cpu_local_var, idle),
|
||||
offsetof(struct thread, ctx),
|
||||
offsetof(struct thread, sched_list),
|
||||
offsetof(struct thread, proc),
|
||||
@@ -219,6 +222,7 @@ SYSCALL_DECLARE(rt_sigreturn)
|
||||
struct x86_user_context *regs;
|
||||
struct sigsp ksigsp;
|
||||
struct sigsp *sigsp;
|
||||
int xsavesize = get_xsave_size();
|
||||
|
||||
asm ("movq %%gs:(%1),%0"
|
||||
: "=r"(regs)
|
||||
@@ -265,6 +269,25 @@ SYSCALL_DECLARE(rt_sigreturn)
|
||||
check_signal(0, regs, 0);
|
||||
check_need_resched();
|
||||
}
|
||||
|
||||
if(ksigsp.fpregs && xsavesize){
|
||||
void *fpregs = kmalloc(xsavesize + 64, IHK_MC_AP_NOWAIT);
|
||||
|
||||
if(fpregs){
|
||||
uint64_t xsave_mask = get_xsave_mask();
|
||||
unsigned int low = (unsigned int)xsave_mask;
|
||||
unsigned int high = (unsigned int)(xsave_mask >> 32);
|
||||
struct xsave_struct *kfpregs;
|
||||
|
||||
kfpregs = (void *)((((unsigned long)fpregs) + 63) & ~63);
|
||||
|
||||
if(copy_from_user(kfpregs, ksigsp.fpregs, xsavesize))
|
||||
return -EFAULT;
|
||||
asm volatile("xrstor %0" : : "m"(*kfpregs), "a"(low), "d"(high) : "memory");
|
||||
kfree(fpregs);
|
||||
}
|
||||
}
|
||||
|
||||
return sigsp->sigrc;
|
||||
}
|
||||
|
||||
@@ -707,6 +730,8 @@ do_signal(unsigned long rc, void *regs0, struct thread *thread, struct sig_pendi
|
||||
unsigned long *usp; /* user stack */
|
||||
struct sigsp ksigsp;
|
||||
struct sigsp *sigsp;
|
||||
int xsavesize = get_xsave_size();
|
||||
unsigned long fpregs;
|
||||
|
||||
if((k->sa.sa_flags & SA_ONSTACK) &&
|
||||
!(thread->sigstack.ss_flags & SS_DISABLE) &&
|
||||
@@ -719,7 +744,8 @@ do_signal(unsigned long rc, void *regs0, struct thread *thread, struct sig_pendi
|
||||
else{
|
||||
usp = (unsigned long *)regs->gpr.rsp;
|
||||
}
|
||||
sigsp = ((struct sigsp *)usp) - 1;
|
||||
fpregs = (unsigned long)usp - xsavesize;
|
||||
sigsp = ((struct sigsp *)fpregs) - 1;
|
||||
sigsp = (struct sigsp *)((unsigned long)sigsp & 0xfffffffffffffff0UL);
|
||||
memset(&ksigsp, '\0', sizeof ksigsp);
|
||||
|
||||
@@ -751,6 +777,33 @@ do_signal(unsigned long rc, void *regs0, struct thread *thread, struct sig_pendi
|
||||
ksigsp.restart = isrestart(num, rc, sig, k->sa.sa_flags & SA_RESTART);
|
||||
if(num != 0 && rc == -EINTR && sig == SIGCHLD)
|
||||
ksigsp.restart = 1;
|
||||
if(xsavesize){
|
||||
uint64_t xsave_mask = get_xsave_mask();
|
||||
unsigned int low = (unsigned int)xsave_mask;
|
||||
unsigned int high = (unsigned int)(xsave_mask >> 32);
|
||||
void *_kfpregs = kmalloc(xsavesize + 64, IHK_MC_AP_NOWAIT);
|
||||
struct xsave_struct *kfpregs;
|
||||
|
||||
if(!_kfpregs){
|
||||
kfree(pending);
|
||||
kfree(_kfpregs);
|
||||
kprintf("do_signal,no space available\n");
|
||||
terminate(0, sig);
|
||||
return;
|
||||
}
|
||||
kfpregs = (void *)((((unsigned long)_kfpregs) + 63) & ~63);
|
||||
memset(kfpregs, '\0', xsavesize);
|
||||
asm volatile("xsave %0" : : "m"(*kfpregs), "a"(low), "d"(high) : "memory");
|
||||
if(copy_to_user((void *)fpregs, kfpregs, xsavesize)){
|
||||
kfree(pending);
|
||||
kfree(_kfpregs);
|
||||
kprintf("do_signal,write_process_vm failed\n");
|
||||
terminate(0, sig);
|
||||
return;
|
||||
}
|
||||
ksigsp.fpregs = (void *)fpregs;
|
||||
kfree(_kfpregs);
|
||||
}
|
||||
memcpy(&ksigsp.info, &pending->info, sizeof(siginfo_t));
|
||||
|
||||
if(copy_to_user(sigsp, &ksigsp, sizeof ksigsp)){
|
||||
@@ -761,9 +814,6 @@ do_signal(unsigned long rc, void *regs0, struct thread *thread, struct sig_pendi
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
usp = (unsigned long *)sigsp;
|
||||
usp--;
|
||||
*usp = (unsigned long)k->sa.sa_restorer;
|
||||
@@ -1256,7 +1306,7 @@ done:
|
||||
if (status != PS_RUNNING) {
|
||||
if(sig == SIGKILL){
|
||||
/* Wake up the target only when stopped by ptrace-reporting */
|
||||
sched_wakeup_thread(tthread, PS_TRACED | PS_STOPPED);
|
||||
sched_wakeup_thread(tthread, PS_TRACED | PS_STOPPED | PS_INTERRUPTIBLE);
|
||||
}
|
||||
else if(sig == SIGCONT || ptracecont == 1){
|
||||
/* Wake up the target only when stopped by SIGSTOP */
|
||||
|
||||
@@ -13,27 +13,37 @@
|
||||
# Note that the script does not output anything unless an error occurs.
|
||||
|
||||
prefix="@prefix@"
|
||||
BINDIR="@BINDIR@"
|
||||
SBINDIR="@SBINDIR@"
|
||||
KMODDIR="@KMODDIR@"
|
||||
KERNDIR="@KERNDIR@"
|
||||
BINDIR="${prefix}/bin"
|
||||
SBINDIR="${prefix}/sbin"
|
||||
KMODDIR="${prefix}/kmod"
|
||||
KERNDIR="${prefix}/@TARGET@/kernel"
|
||||
ENABLE_MCOVERLAYFS="@ENABLE_MCOVERLAYFS@"
|
||||
|
||||
mem="512M@0"
|
||||
cpus=""
|
||||
|
||||
INTERVAL=1
|
||||
LOGMODE=0
|
||||
while getopts :i:k: OPT
|
||||
facility="LOG_LOCAL6"
|
||||
chown_option=`logname 2> /dev/null`
|
||||
|
||||
while getopts :i:k:c:m:o:f: OPT
|
||||
do
|
||||
case ${OPT} in
|
||||
f) facility=${OPTARG}
|
||||
;;
|
||||
o) chown_option=${OPTARG}
|
||||
;;
|
||||
i) INTERVAL=${OPTARG}
|
||||
expr "${INTERVAL}" + 1 > /dev/null 2>&1
|
||||
if [ $? -ge 2 ]
|
||||
then
|
||||
echo "invalid -i value"
|
||||
echo "invalid -i value" >&2
|
||||
exit 1
|
||||
fi
|
||||
if [ ${INTERVAL} -le 0 ]
|
||||
then
|
||||
echo "invalid -i value"
|
||||
echo "invalid -i value" >&2
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
@@ -41,22 +51,24 @@ do
|
||||
expr "${LOGMODE}" + 1 > /dev/null 2>&1
|
||||
if [ $? -ge 2 ]
|
||||
then
|
||||
echo "invalid -k value"
|
||||
echo "invalid -k value" >&2
|
||||
exit 1
|
||||
fi
|
||||
if [ ${LOGMODE} -lt 0 -o ${LOGMODE} -gt 2 ]
|
||||
then
|
||||
echo "invalid -k value"
|
||||
echo "invalid -k value" >&2
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
*) echo "invalid option -${OPT}"
|
||||
c) cpus=${OPTARG}
|
||||
;;
|
||||
m) mem=${OPTARG}
|
||||
;;
|
||||
*) echo "invalid option -${OPT}" >&2
|
||||
exit 1
|
||||
esac
|
||||
done
|
||||
|
||||
mem="512M@0"
|
||||
cpus=""
|
||||
ihk_ikc_irq_core=0
|
||||
|
||||
release=`uname -r`
|
||||
@@ -79,12 +91,7 @@ if [ "$cpus" == "" ]; then
|
||||
# Use the second half of the cores
|
||||
let nr_cpus="$nr_cpus / 2"
|
||||
cpus=`lscpu --parse | awk -F"," '{if ($4 == 0) print $1}' | tail -n $nr_cpus | xargs echo -n | sed 's/ /,/g'`
|
||||
if [ "$cpus" == "" ]; then echo "error: no available CPUs on NUMA node 0?"; exit; fi
|
||||
fi
|
||||
|
||||
# Remove delegator if loaded
|
||||
if [ "`lsmod | grep mcctrl`" != "" ]; then
|
||||
if ! rmmod mcctrl; then echo "error: removing mcctrl"; exit; fi
|
||||
if [ "$cpus" == "" ]; then echo "error: no available CPUs on NUMA node 0?" >&2; exit 1; fi
|
||||
fi
|
||||
|
||||
# Remove mcoverlay if loaded
|
||||
@@ -95,13 +102,13 @@ if [ "$enable_mcoverlay" == "yes" ]; then
|
||||
if [ "`cat /proc/mounts | grep /tmp/mcos/linux_proc`" != "" ]; then umount -l /tmp/mcos/linux_proc; fi
|
||||
if [ "`cat /proc/mounts | grep /tmp/mcos`" != "" ]; then umount -l /tmp/mcos; fi
|
||||
if [ -e /tmp/mcos ]; then rm -rf /tmp/mcos; fi
|
||||
if ! rmmod mcoverlay; then echo "error: removing mcoverlay"; exit; fi
|
||||
if ! rmmod mcoverlay; then echo "error: removing mcoverlay" >&2; exit 1; fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Load IHK if not loaded
|
||||
if [ "`lsmod | grep ihk`" == "" ]; then
|
||||
if ! insmod ${KMODDIR}/ihk.ko; then echo "error: loading ihk"; exit; fi;
|
||||
if ! insmod ${KMODDIR}/ihk.ko; then echo "error: loading ihk" >&2; exit 1; fi;
|
||||
fi
|
||||
|
||||
# Load IHK-SMP if not loaded and reserve CPUs and memory
|
||||
@@ -113,57 +120,61 @@ if [ "`lsmod | grep ihk_smp_x86`" == "" ]; then
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ "$ihk_irq" == "" ]; then echo "error: no IRQ available"; exit; fi
|
||||
if ! insmod ${KMODDIR}/ihk-smp-x86.ko ihk_start_irq=$ihk_irq ihk_ikc_irq_core=$ihk_ikc_irq_core; then echo "error: loading ihk-smp-x86"; exit; fi;
|
||||
if ! ${SBINDIR}/ihkconfig 0 reserve cpu ${cpus}; then echo "error: reserving CPUs"; exit; fi
|
||||
if ! ${SBINDIR}/ihkconfig 0 reserve mem ${mem}; then echo "error: reserving memory"; exit; fi
|
||||
if [ "$ihk_irq" == "" ]; then echo "error: no IRQ available" >&2; exit 1; fi
|
||||
if ! insmod ${KMODDIR}/ihk-smp-x86.ko ihk_start_irq=$ihk_irq ihk_ikc_irq_core=$ihk_ikc_irq_core; then echo "error: loading ihk-smp-x86" >&2; exit 1; fi;
|
||||
if ! ${SBINDIR}/ihkconfig 0 reserve cpu ${cpus}; then echo "error: reserving CPUs" >&2; exit 1; fi
|
||||
if ! ${SBINDIR}/ihkconfig 0 reserve mem ${mem}; then echo "error: reserving memory" >&2; exit 1; fi
|
||||
# If loaded, but no resources allocated, get CPUs and memory
|
||||
else
|
||||
if ! ${SBINDIR}/ihkconfig 0 query cpu > /dev/null; then echo "error: querying cpus"; exit; fi
|
||||
if ! ${SBINDIR}/ihkconfig 0 query cpu > /dev/null; then echo "error: querying cpus" >&2; exit 1; fi
|
||||
cpus_allocated=`${SBINDIR}/ihkosctl 0 query cpu`
|
||||
if [ "$cpus_allocated" == "" ]; then
|
||||
if ! ${SBINDIR}/ihkconfig 0 reserve cpu ${cpus}; then echo "error: reserving CPUs"; exit; fi
|
||||
if ! ${SBINDIR}/ihkconfig 0 reserve cpu ${cpus}; then echo "error: reserving CPUs" >&2; exit 1; fi
|
||||
fi
|
||||
|
||||
if ! ${SBINDIR}/ihkosctl 0 query mem > /dev/null; then echo "error: querying memory"; exit; fi
|
||||
if ! ${SBINDIR}/ihkosctl 0 query mem > /dev/null; then echo "error: querying memory" >&2; exit 1; fi
|
||||
mem_allocated=`${SBINDIR}/ihkosctl 0 query mem`
|
||||
if [ "$mem_allocated" == "" ]; then
|
||||
if ! ${SBINDIR}/ihkconfig 0 reserve mem ${mem}; then echo "error: reserving memory"; exit; fi
|
||||
if ! ${SBINDIR}/ihkconfig 0 reserve mem ${mem}; then echo "error: reserving memory" >&2; exit 1; fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Load mcctrl if not loaded
|
||||
if [ "`lsmod | grep mcctrl`" == "" ]; then
|
||||
if ! insmod ${KMODDIR}/mcctrl.ko; then echo "error: inserting mcctrl.ko" >&2; exit 1; fi
|
||||
fi
|
||||
|
||||
# Check for existing OS instance and destroy
|
||||
if [ -c /dev/mcos0 ]; then
|
||||
# Query CPU cores and memory of OS instance so that the same values are used as previously
|
||||
if ! ${SBINDIR}/ihkosctl 0 query cpu > /dev/null; then echo "error: querying cpus"; exit; fi
|
||||
if ! ${SBINDIR}/ihkosctl 0 query cpu > /dev/null; then echo "error: querying cpus" >&2; exit 1; fi
|
||||
cpus=`${SBINDIR}/ihkosctl 0 query cpu`
|
||||
if ! ${SBINDIR}/ihkosctl 0 query mem > /dev/null; then echo "error: querying memory"; exit; fi
|
||||
if ! ${SBINDIR}/ihkosctl 0 query mem > /dev/null; then echo "error: querying memory" >&2; exit 1; fi
|
||||
mem=`${SBINDIR}/ihkosctl 0 query mem`
|
||||
|
||||
if ! ${SBINDIR}/ihkconfig 0 destroy 0; then echo "warning: destroy failed"; fi
|
||||
if ! ${SBINDIR}/ihkconfig 0 destroy 0; then echo "warning: destroy failed" >&2; fi
|
||||
else
|
||||
# Otherwise query IHK-SMP for resources
|
||||
if ! ${SBINDIR}/ihkconfig 0 query cpu > /dev/null; then echo "error: querying cpus"; exit; fi
|
||||
if ! ${SBINDIR}/ihkconfig 0 query cpu > /dev/null; then echo "error: querying cpus" >&2; exit 1; fi
|
||||
cpus=`${SBINDIR}/ihkconfig 0 query cpu`
|
||||
if ! ${SBINDIR}/ihkconfig 0 query mem > /dev/null; then echo "error: querying memory"; exit; fi
|
||||
if ! ${SBINDIR}/ihkconfig 0 query mem > /dev/null; then echo "error: querying memory" >&2; exit 1; fi
|
||||
mem=`${SBINDIR}/ihkconfig 0 query mem`
|
||||
fi
|
||||
|
||||
if ! ${SBINDIR}/ihkconfig 0 create; then echo "error: create"; exit; fi
|
||||
if ! ${SBINDIR}/ihkosctl 0 assign cpu ${cpus}; then echo "error: assign CPUs"; exit; fi
|
||||
if ! ${SBINDIR}/ihkosctl 0 assign mem ${mem}; then echo "error: assign memory"; exit; fi
|
||||
if ! ${SBINDIR}/ihkosctl 0 load ${KERNDIR}/mckernel.img; then echo "error: loading kernel image"; exit; fi
|
||||
if ! ${SBINDIR}/ihkosctl 0 kargs "hidos ksyslogd=${LOGMODE}"; then echo "error: setting kernel arguments"; exit; fi
|
||||
if ! ${SBINDIR}/ihkosctl 0 boot; then echo "error: booting"; exit; fi
|
||||
if ! insmod ${KMODDIR}/mcctrl.ko; then echo "error: inserting mcctrl.ko"; exit; fi
|
||||
if ! chown `logname` /dev/mcd* /dev/mcos*; then echo "error: chowning device files"; exit; fi
|
||||
if ! ${SBINDIR}/ihkconfig 0 create; then echo "error: create" >&2; exit; fi
|
||||
if ! ${SBINDIR}/ihkosctl 0 assign cpu ${cpus}; then echo "error: assign CPUs" >&2; exit 1; fi
|
||||
if ! ${SBINDIR}/ihkosctl 0 assign mem ${mem}; then echo "error: assign memory" >&2; exit 1; fi
|
||||
if ! ${SBINDIR}/ihkosctl 0 load ${KERNDIR}/mckernel.img; then echo "error: loading kernel image" >&2; exit 1; fi
|
||||
if ! ${SBINDIR}/ihkosctl 0 kargs "hidos ksyslogd=${LOGMODE}"; then echo "error: setting kernel arguments" >&2; exit 1; fi
|
||||
if ! ${SBINDIR}/ihkosctl 0 boot; then echo "error: booting" >&2; exit 1; fi
|
||||
if ! chown ${chown_option} /dev/mcd* /dev/mcos*; then echo "error: chowning device files" >&2; exit 1; fi
|
||||
|
||||
if [ "$enable_mcoverlay" == "yes" ]; then
|
||||
if [ ! -e /tmp/mcos ]; then mkdir -p /tmp/mcos; fi
|
||||
if ! mount -t tmpfs tmpfs /tmp/mcos; then echo "error: mount /tmp/mcos"; exit; fi
|
||||
if ! mount -t tmpfs tmpfs /tmp/mcos; then echo "error: mount /tmp/mcos" >&2; exit 1; fi
|
||||
if [ ! -e /tmp/mcos/linux_proc ]; then mkdir -p /tmp/mcos/linux_proc; fi
|
||||
if ! mount --bind /proc /tmp/mcos/linux_proc; then echo "error: mount /tmp/mcos/linux_proc"; exit; fi
|
||||
if ! insmod ${KMODDIR}/mcoverlay.ko; then echo "error: inserting mcoverlay.ko"; exit; fi
|
||||
if ! mount --bind /proc /tmp/mcos/linux_proc; then echo "error: mount /tmp/mcos/linux_proc" >&2; exit 1; fi
|
||||
if ! insmod ${KMODDIR}/mcoverlay.ko; then echo "error: inserting mcoverlay.ko" >&2; exit 1; fi
|
||||
while [ ! -e /proc/mcos0 ]
|
||||
do
|
||||
sleep 1
|
||||
@@ -171,7 +182,7 @@ if [ "$enable_mcoverlay" == "yes" ]; then
|
||||
if [ ! -e /tmp/mcos/mcos0_proc ]; then mkdir -p /tmp/mcos/mcos0_proc; fi
|
||||
if [ ! -e /tmp/mcos/mcos0_proc_upper ]; then mkdir -p /tmp/mcos/mcos0_proc_upper; fi
|
||||
if [ ! -e /tmp/mcos/mcos0_proc_work ]; then mkdir -p /tmp/mcos/mcos0_proc_work; fi
|
||||
if ! mount -t mcoverlay mcoverlay -o lowerdir=/proc/mcos0:/proc,upperdir=/tmp/mcos/mcos0_proc_upper,workdir=/tmp/mcos/mcos0_proc_work,nocopyupw,nofscheck /tmp/mcos/mcos0_proc; then echo "error: mount /tmp/mcos/mcos0_proc"; exit; fi
|
||||
if ! mount -t mcoverlay mcoverlay -o lowerdir=/proc/mcos0:/proc,upperdir=/tmp/mcos/mcos0_proc_upper,workdir=/tmp/mcos/mcos0_proc_work,nocopyupw,nofscheck /tmp/mcos/mcos0_proc; then echo "error: mount /tmp/mcos/mcos0_proc" >&2; exit 1; fi
|
||||
mount --make-rprivate /proc
|
||||
while [ ! -e /sys/devices/virtual/mcos/mcos0/sys ]
|
||||
do
|
||||
@@ -180,7 +191,7 @@ if [ "$enable_mcoverlay" == "yes" ]; then
|
||||
if [ ! -e /tmp/mcos/mcos0_sys ]; then mkdir -p /tmp/mcos/mcos0_sys; fi
|
||||
if [ ! -e /tmp/mcos/mcos0_sys_upper ]; then mkdir -p /tmp/mcos/mcos0_sys_upper; fi
|
||||
if [ ! -e /tmp/mcos/mcos0_sys_work ]; then mkdir -p /tmp/mcos/mcos0_sys_work; fi
|
||||
if ! mount -t mcoverlay mcoverlay -o lowerdir=/sys/devices/virtual/mcos/mcos0/sys:/sys,upperdir=/tmp/mcos/mcos0_sys_upper,workdir=/tmp/mcos/mcos0_sys_work,nocopyupw,nofscheck /tmp/mcos/mcos0_sys; then echo "error: mount /tmp/mcos/mcos0_sys"; exit; fi
|
||||
if ! mount -t mcoverlay mcoverlay -o lowerdir=/sys/devices/virtual/mcos/mcos0/sys:/sys,upperdir=/tmp/mcos/mcos0_sys_upper,workdir=/tmp/mcos/mcos0_sys_work,nocopyupw,nofscheck /tmp/mcos/mcos0_sys; then echo "error: mount /tmp/mcos/mcos0_sys" >&2; exit 1; fi
|
||||
mount --make-rprivate /sys
|
||||
for cpuid in `find /sys/devices/system/cpu/* -maxdepth 0 -name "cpu[0123456789]*" -printf "%f "`; do
|
||||
if [ ! -e "/sys/devices/virtual/mcos/mcos0/sys/devices/system/cpu/$cpuid" ]; then
|
||||
@@ -195,5 +206,7 @@ if [ "$enable_mcoverlay" == "yes" ]; then
|
||||
fi
|
||||
if [ ${LOGMODE} -ne 0 ]
|
||||
then
|
||||
SBINDIR=${SBINDIR} ${SBINDIR}/mcklogd -i ${INTERVAL}
|
||||
# mcklogd survives when McKernel isn't shut down by mcstop+release.sh
|
||||
pkill mcklogd
|
||||
SBINDIR=${SBINDIR} ${SBINDIR}/mcklogd -i ${INTERVAL} -f ${facility}
|
||||
fi
|
||||
|
||||
@@ -17,31 +17,37 @@ mem=""
|
||||
cpus=""
|
||||
|
||||
# No SMP module? Exit.
|
||||
if [ "`lsmod | grep ihk_smp_x86`" == "" ]; then exit; fi
|
||||
|
||||
# Remove delegator if loaded
|
||||
if [ "`lsmod | grep mcctrl`" != "" ]; then
|
||||
if ! rmmod mcctrl; then echo "error: removing mcctrl"; exit; fi
|
||||
fi
|
||||
if [ "`lsmod | grep ihk_smp_x86`" == "" ]; then exit 0; fi
|
||||
|
||||
# Destroy all LWK instances
|
||||
for i in /dev/mcos*; do
|
||||
ind=`echo $i|cut -c10-`;
|
||||
if ! ${SBINDIR}/ihkconfig 0 destroy $ind; then echo "error: destroying LWK instance $ind failed"; exit; fi
|
||||
if ! ${SBINDIR}/ihkconfig 0 destroy $ind; then echo "error: destroying LWK instance $ind failed" >&2; exit 1; fi
|
||||
done
|
||||
|
||||
# Query IHK-SMP resources and release them
|
||||
if ! ${SBINDIR}/ihkconfig 0 query cpu > /dev/null; then echo "error: querying cpus"; exit; fi
|
||||
if ! ${SBINDIR}/ihkconfig 0 query cpu > /dev/null; then echo "error: querying cpus" >&2; exit 1; fi
|
||||
cpus=`${SBINDIR}/ihkconfig 0 query cpu`
|
||||
if ! ${SBINDIR}/ihkconfig 0 release cpu $cpus > /dev/null; then echo "error: releasing CPUs"; exit; fi
|
||||
if ! ${SBINDIR}/ihkconfig 0 release cpu $cpus > /dev/null; then echo "error: releasing CPUs" >&2; exit 1; fi
|
||||
|
||||
if ! ${SBINDIR}/ihkconfig 0 query mem > /dev/null; then echo "error: querying memory"; exit; fi
|
||||
if ! ${SBINDIR}/ihkconfig 0 query mem > /dev/null; then echo "error: querying memory" >&2; exit 1; fi
|
||||
mem=`${SBINDIR}/ihkconfig 0 query mem`
|
||||
if ! ${SBINDIR}/ihkconfig 0 release mem $mem > /dev/null; then echo "error: releasing memory"; exit; fi
|
||||
if ! ${SBINDIR}/ihkconfig 0 release mem $mem > /dev/null; then echo "error: releasing memory" >&2; exit 1; fi
|
||||
|
||||
# Remove delegator if loaded
|
||||
if [ "`lsmod | grep mcctrl`" != "" ]; then
|
||||
if ! rmmod mcctrl; then echo "error: removing mcctrl" >&2; exit 1; fi
|
||||
fi
|
||||
|
||||
# Remove SMP module
|
||||
if [ "`lsmod | grep ihk_smp_x86`" != "" ]; then
|
||||
if ! rmmod ihk_smp_x86; then echo "error: removing ihk_smp_x86"; exit; fi
|
||||
if ! rmmod ihk_smp_x86; then echo "error: removing ihk_smp_x86" >&2; exit 1; fi
|
||||
fi
|
||||
|
||||
# Remove core module
|
||||
if [ "`lsmod | grep -E 'ihk\s' | awk '{print $1}'`" != "" ]; then
|
||||
if ! rmmod ihk; then echo "error: removing ihk" >&2; exit 1; fi
|
||||
fi
|
||||
|
||||
# Stop mcklogd
|
||||
pkill mcklogd
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#include <linux/version.h>
|
||||
#include "../../../../config.h"
|
||||
#include "../../config.h"
|
||||
#include "../../mcctrl.h"
|
||||
|
||||
#ifdef MCCTRL_KSYM_vdso_image_64
|
||||
|
||||
@@ -255,7 +255,7 @@ void __init binfmt_mcexec_init(void)
|
||||
insert_binfmt(&mcexec_format);
|
||||
}
|
||||
|
||||
void __exit binfmt_mcexec_exit(void)
|
||||
void binfmt_mcexec_exit(void)
|
||||
{
|
||||
unregister_binfmt(&mcexec_format);
|
||||
}
|
||||
|
||||
@@ -32,6 +32,7 @@
|
||||
#include <linux/fs.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/version.h>
|
||||
#include <linux/semaphore.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/delay.h>
|
||||
#include <asm/io.h>
|
||||
@@ -802,7 +803,7 @@ long mcexec_ret_syscall(ihk_os_t os, struct syscall_ret_desc *__user arg)
|
||||
}
|
||||
|
||||
LIST_HEAD(mckernel_exec_files);
|
||||
DEFINE_SPINLOCK(mckernel_exec_file_lock);
|
||||
DEFINE_SEMAPHORE(mckernel_exec_file_lock);
|
||||
|
||||
|
||||
struct mckernel_exec_file {
|
||||
@@ -889,7 +890,7 @@ int mcexec_open_exec(ihk_os_t os, char * __user filename)
|
||||
goto out_put_file;
|
||||
}
|
||||
|
||||
spin_lock_irq(&mckernel_exec_file_lock);
|
||||
down(&mckernel_exec_file_lock);
|
||||
/* Find previous file (if exists) and drop it */
|
||||
list_for_each_entry(mcef_iter, &mckernel_exec_files, list) {
|
||||
if (mcef_iter->os == os && mcef_iter->pid == task_tgid_vnr(current)) {
|
||||
@@ -910,7 +911,7 @@ int mcexec_open_exec(ihk_os_t os, char * __user filename)
|
||||
/* Create /proc/self/exe entry */
|
||||
add_pid_entry(os_ind, task_tgid_vnr(current));
|
||||
proc_exe_link(os_ind, task_tgid_vnr(current), fullpath);
|
||||
spin_unlock(&mckernel_exec_file_lock);
|
||||
up(&mckernel_exec_file_lock);
|
||||
|
||||
dprintk("%d open_exec and holding file: %s\n", (int)task_tgid_vnr(current), filename);
|
||||
|
||||
@@ -937,7 +938,7 @@ int mcexec_close_exec(ihk_os_t os)
|
||||
return EINVAL;
|
||||
}
|
||||
|
||||
spin_lock_irq(&mckernel_exec_file_lock);
|
||||
down(&mckernel_exec_file_lock);
|
||||
list_for_each_entry(mcef, &mckernel_exec_files, list) {
|
||||
if (mcef->os == os && mcef->pid == task_tgid_vnr(current)) {
|
||||
allow_write_access(mcef->fp);
|
||||
@@ -950,7 +951,7 @@ int mcexec_close_exec(ihk_os_t os)
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock(&mckernel_exec_file_lock);
|
||||
up(&mckernel_exec_file_lock);
|
||||
|
||||
return (found ? 0 : EINVAL);
|
||||
}
|
||||
|
||||
@@ -82,79 +82,109 @@ static struct ihk_os_user_call mcctrl_uc[OS_MAX_MINOR];
|
||||
|
||||
static ihk_os_t os[OS_MAX_MINOR];
|
||||
|
||||
ihk_os_t
|
||||
osnum_to_os(int n)
|
||||
ihk_os_t osnum_to_os(int n)
|
||||
{
|
||||
return os[n];
|
||||
}
|
||||
|
||||
static int __init mcctrl_init(void)
|
||||
/* OS event notifier implementation */
|
||||
int mcctrl_os_boot_notifier(int os_index)
|
||||
{
|
||||
int i;
|
||||
int rc;
|
||||
|
||||
rc = -ENOENT;
|
||||
for(i = 0; i < OS_MAX_MINOR; i++){
|
||||
os[i] = ihk_host_find_os(i, NULL);
|
||||
if (os[i]) {
|
||||
printk("OS #%d found.\n", i);
|
||||
rc = 0;
|
||||
}
|
||||
}
|
||||
if(rc){
|
||||
printk("OS not found.\n");
|
||||
return rc;
|
||||
os[os_index] = ihk_host_find_os(os_index, NULL);
|
||||
if (!os[os_index]) {
|
||||
printk("mcctrl: error: OS ID %d couldn't be found\n", os_index);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for(i = 0; i < OS_MAX_MINOR; i++){
|
||||
if (os[i]) {
|
||||
if (prepare_ikc_channels(os[i]) != 0) {
|
||||
printk("Preparing syscall channels failed.\n");
|
||||
os[i] = NULL;
|
||||
}
|
||||
}
|
||||
if (prepare_ikc_channels(os[os_index]) != 0) {
|
||||
printk("mcctrl: error: preparing IKC channels for OS %d\n", os_index);
|
||||
|
||||
os[os_index] = NULL;
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
memcpy(mcctrl_uc + os_index, &mcctrl_uc_proto, sizeof mcctrl_uc_proto);
|
||||
|
||||
rc = ihk_os_register_user_call_handlers(os[os_index], mcctrl_uc + os_index);
|
||||
if (rc < 0) {
|
||||
destroy_ikc_channels(os[os_index]);
|
||||
printk("mcctrl: error: registering callbacks for OS %d\n", os_index);
|
||||
|
||||
goto error_cleanup_channels;
|
||||
}
|
||||
|
||||
procfs_init(os_index);
|
||||
printk("mcctrl: OS ID %d boot event handled\n", os_index);
|
||||
|
||||
return 0;
|
||||
|
||||
error_cleanup_channels:
|
||||
destroy_ikc_channels(os[os_index]);
|
||||
|
||||
os[os_index] = NULL;
|
||||
return rc;
|
||||
}
|
||||
|
||||
int mcctrl_os_shutdown_notifier(int os_index)
|
||||
{
|
||||
sysfsm_cleanup(os[os_index]);
|
||||
free_topology_info(os[os_index]);
|
||||
ihk_os_unregister_user_call_handlers(os[os_index], mcctrl_uc + os_index);
|
||||
destroy_ikc_channels(os[os_index]);
|
||||
procfs_exit(os_index);
|
||||
|
||||
printk("mcctrl: OS ID %d shutdown event handled\n", os_index);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct ihk_os_notifier_ops mcctrl_os_notifier_ops = {
|
||||
.boot = mcctrl_os_boot_notifier,
|
||||
.shutdown = mcctrl_os_shutdown_notifier,
|
||||
};
|
||||
|
||||
static struct ihk_os_notifier mcctrl_os_notifier = {
|
||||
.ops = &mcctrl_os_notifier_ops,
|
||||
};
|
||||
|
||||
static int __init mcctrl_init(void)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
#ifndef DO_USER_MODE
|
||||
mcctrl_syscall_init();
|
||||
#endif
|
||||
|
||||
rus_page_hash_init();
|
||||
|
||||
for(i = 0; i < OS_MAX_MINOR; i++){
|
||||
if (os[i]) {
|
||||
memcpy(mcctrl_uc + i, &mcctrl_uc_proto, sizeof mcctrl_uc_proto);
|
||||
rc = ihk_os_register_user_call_handlers(os[i], mcctrl_uc + i);
|
||||
if(rc < 0){
|
||||
destroy_ikc_channels(os[i]);
|
||||
os[i] = NULL;
|
||||
}
|
||||
procfs_init(i);
|
||||
}
|
||||
}
|
||||
|
||||
binfmt_mcexec_init();
|
||||
|
||||
return 0;
|
||||
if ((ret = ihk_host_register_os_notifier(&mcctrl_os_notifier)) != 0) {
|
||||
printk("mcctrl: error: registering OS notifier\n");
|
||||
goto error;
|
||||
}
|
||||
|
||||
printk("mcctrl: initialized successfully.\n");
|
||||
return ret;
|
||||
|
||||
error:
|
||||
binfmt_mcexec_exit();
|
||||
rus_page_hash_put_pages();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit mcctrl_exit(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
binfmt_mcexec_exit();
|
||||
printk("mcctrl: unregistered.\n");
|
||||
for(i = 0; i < OS_MAX_MINOR; i++){
|
||||
if(os[i]){
|
||||
sysfsm_cleanup(os[i]);
|
||||
free_topology_info(os[i]);
|
||||
ihk_os_unregister_user_call_handlers(os[i], mcctrl_uc + i);
|
||||
destroy_ikc_channels(os[i]);
|
||||
procfs_exit(i);
|
||||
}
|
||||
if (ihk_host_deregister_os_notifier(&mcctrl_os_notifier) != 0) {
|
||||
printk("mcctrl: warning: failed to deregister OS notifier??\n");
|
||||
}
|
||||
|
||||
binfmt_mcexec_exit();
|
||||
rus_page_hash_put_pages();
|
||||
|
||||
printk("mcctrl: unregistered.\n");
|
||||
}
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
#include <linux/resource.h>
|
||||
#include "mcctrl.h"
|
||||
#include <linux/version.h>
|
||||
#include <linux/semaphore.h>
|
||||
|
||||
//#define PROCFS_DEBUG
|
||||
|
||||
@@ -81,7 +82,7 @@ struct procfs_list_entry {
|
||||
* file.
|
||||
*/
|
||||
LIST_HEAD(procfs_file_list);
|
||||
static ihk_spinlock_t procfs_file_list_lock;
|
||||
DEFINE_SEMAPHORE(procfs_file_list_lock);
|
||||
|
||||
static char *
|
||||
getpath(struct procfs_list_entry *e, char *buf, int bufsize)
|
||||
@@ -375,67 +376,62 @@ _add_tid_entry(int osnum, int pid, int tid, const struct cred *cred)
|
||||
void
|
||||
add_tid_entry(int osnum, int pid, int tid)
|
||||
{
|
||||
unsigned long irqflag;
|
||||
const struct cred *cred = get_pid_cred(pid);
|
||||
|
||||
if(!cred)
|
||||
return;
|
||||
irqflag = ihk_ikc_spinlock_lock(&procfs_file_list_lock);
|
||||
down(&procfs_file_list_lock);
|
||||
_add_tid_entry(osnum, pid, tid, cred);
|
||||
ihk_ikc_spinlock_unlock(&procfs_file_list_lock, irqflag);
|
||||
up(&procfs_file_list_lock);
|
||||
}
|
||||
|
||||
void
|
||||
add_pid_entry(int osnum, int pid)
|
||||
{
|
||||
struct procfs_list_entry *parent;
|
||||
unsigned long irqflag;
|
||||
const struct cred *cred = get_pid_cred(pid);
|
||||
|
||||
if(!cred)
|
||||
return;
|
||||
irqflag = ihk_ikc_spinlock_lock(&procfs_file_list_lock);
|
||||
down(&procfs_file_list_lock);
|
||||
parent = get_pid_entry(osnum, pid);
|
||||
add_procfs_entries(parent, pid_entry_stuff, cred->uid, cred->gid);
|
||||
_add_tid_entry(osnum, pid, pid, cred);
|
||||
ihk_ikc_spinlock_unlock(&procfs_file_list_lock, irqflag);
|
||||
up(&procfs_file_list_lock);
|
||||
}
|
||||
|
||||
void
|
||||
delete_tid_entry(int osnum, int pid, int tid)
|
||||
{
|
||||
unsigned long irqflag;
|
||||
struct procfs_list_entry *e;
|
||||
|
||||
irqflag = ihk_ikc_spinlock_lock(&procfs_file_list_lock);
|
||||
down(&procfs_file_list_lock);
|
||||
e = find_tid_entry(osnum, pid, tid);
|
||||
if(e)
|
||||
delete_procfs_entries(e);
|
||||
ihk_ikc_spinlock_unlock(&procfs_file_list_lock, irqflag);
|
||||
up(&procfs_file_list_lock);
|
||||
}
|
||||
|
||||
void
|
||||
delete_pid_entry(int osnum, int pid)
|
||||
{
|
||||
unsigned long irqflag;
|
||||
struct procfs_list_entry *e;
|
||||
|
||||
irqflag = ihk_ikc_spinlock_lock(&procfs_file_list_lock);
|
||||
down(&procfs_file_list_lock);
|
||||
e = find_pid_entry(osnum, pid);
|
||||
if(e)
|
||||
delete_procfs_entries(e);
|
||||
ihk_ikc_spinlock_unlock(&procfs_file_list_lock, irqflag);
|
||||
up(&procfs_file_list_lock);
|
||||
}
|
||||
|
||||
void
|
||||
proc_exe_link(int osnum, int pid, const char *path)
|
||||
{
|
||||
struct procfs_list_entry *parent;
|
||||
unsigned long irqflag;
|
||||
kuid_t uid = KUIDT_INIT(0);
|
||||
kgid_t gid = KGIDT_INIT(0);
|
||||
|
||||
irqflag = ihk_ikc_spinlock_lock(&procfs_file_list_lock);
|
||||
down(&procfs_file_list_lock);
|
||||
parent = find_pid_entry(osnum, pid);
|
||||
if(parent){
|
||||
struct procfs_list_entry *task;
|
||||
@@ -451,7 +447,7 @@ proc_exe_link(int osnum, int pid, const char *path)
|
||||
uid, gid, path);
|
||||
}
|
||||
}
|
||||
ihk_ikc_spinlock_unlock(&procfs_file_list_lock, irqflag);
|
||||
up(&procfs_file_list_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -463,14 +459,13 @@ void
|
||||
procfs_init(int osnum)
|
||||
{
|
||||
struct procfs_list_entry *parent;
|
||||
unsigned long irqflag;
|
||||
kuid_t uid = KUIDT_INIT(0);
|
||||
kgid_t gid = KGIDT_INIT(0);
|
||||
|
||||
irqflag = ihk_ikc_spinlock_lock(&procfs_file_list_lock);
|
||||
down(&procfs_file_list_lock);
|
||||
parent = get_base_entry(osnum);
|
||||
add_procfs_entries(parent, base_entry_stuff, uid, gid);
|
||||
ihk_ikc_spinlock_unlock(&procfs_file_list_lock, irqflag);
|
||||
up(&procfs_file_list_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -481,14 +476,13 @@ procfs_init(int osnum)
|
||||
void
|
||||
procfs_exit(int osnum)
|
||||
{
|
||||
unsigned long irqflag;
|
||||
struct procfs_list_entry *e;
|
||||
|
||||
irqflag = ihk_ikc_spinlock_lock(&procfs_file_list_lock);
|
||||
down(&procfs_file_list_lock);
|
||||
e = find_base_entry(osnum);
|
||||
if(e)
|
||||
delete_procfs_entries(e);
|
||||
ihk_ikc_spinlock_unlock(&procfs_file_list_lock, irqflag);
|
||||
up(&procfs_file_list_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -482,7 +482,8 @@ static int rus_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
flags = ihk_ikc_spinlock_lock(&usrdata->per_proc_list_lock);
|
||||
|
||||
list_for_each_entry(ppd_iter, &usrdata->per_proc_list, list) {
|
||||
if (ppd_iter->pid == task_tgid_vnr(current)) {
|
||||
if (ppd_iter->pid == task_tgid_vnr(current) ||
|
||||
ppd_iter->pid == vma->vm_mm->owner->pid) {
|
||||
ppd = ppd_iter;
|
||||
break;
|
||||
}
|
||||
@@ -899,7 +900,7 @@ static int pager_req_create(ihk_os_t os, int fd, uintptr_t result_pa)
|
||||
|
||||
error = vfs_fstat(fd, &st);
|
||||
if (error) {
|
||||
dprintk("pager_req_create(%d,%lx):vfs_stat failed. %d\n", fd, (long)result_pa, error);
|
||||
printk("pager_req_create(%d,%lx):vfs_stat failed. %d\n", fd, (long)result_pa, error);
|
||||
goto out;
|
||||
}
|
||||
if (S_ISCHR(st.mode) && (MAJOR(st.rdev) == 1)) {
|
||||
@@ -914,7 +915,7 @@ static int pager_req_create(ihk_os_t os, int fd, uintptr_t result_pa)
|
||||
file = fget(fd);
|
||||
if (!file) {
|
||||
error = -EBADF;
|
||||
dprintk("pager_req_create(%d,%lx):file not found. %d\n", fd, (long)result_pa, error);
|
||||
printk("pager_req_create(%d,%lx):file not found. %d\n", fd, (long)result_pa, error);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -937,7 +938,7 @@ static int pager_req_create(ihk_os_t os, int fd, uintptr_t result_pa)
|
||||
}
|
||||
if (!(maxprot & PROT_READ)) {
|
||||
error = -EACCES;
|
||||
dprintk("pager_req_create(%d,%lx):cannot read file. %d\n", fd, (long)result_pa, error);
|
||||
printk("pager_req_create(%d,%lx):cannot read file. %d\n", fd, (long)result_pa, error);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -1208,7 +1209,8 @@ struct pager_map_result {
|
||||
int8_t padding[4];
|
||||
};
|
||||
|
||||
static int pager_req_map(ihk_os_t os, int fd, size_t len, off_t off, uintptr_t result_rpa)
|
||||
static int pager_req_map(ihk_os_t os, int fd, size_t len, off_t off,
|
||||
uintptr_t result_rpa, int prot_and_flags)
|
||||
{
|
||||
const ihk_device_t dev = ihk_os_to_dev(os);
|
||||
const off_t pgoff = off / PAGE_SIZE;
|
||||
@@ -1236,27 +1238,33 @@ static int pager_req_map(ihk_os_t os, int fd, size_t len, off_t off, uintptr_t r
|
||||
}
|
||||
|
||||
maxprot = 0;
|
||||
if (file->f_mode & FMODE_READ) {
|
||||
if ((file->f_mode & FMODE_READ) &&
|
||||
(prot_and_flags ? (prot_and_flags & PROT_READ) : 1)) {
|
||||
maxprot |= PROT_READ;
|
||||
}
|
||||
if (file->f_mode & FMODE_WRITE) {
|
||||
if ((file->f_mode & FMODE_WRITE) &&
|
||||
(prot_and_flags ? (prot_and_flags & PROT_WRITE) : 1)) {
|
||||
maxprot |= PROT_WRITE;
|
||||
}
|
||||
if (!(file->f_path.mnt->mnt_flags & MNT_NOEXEC)) {
|
||||
if (!(file->f_path.mnt->mnt_flags & MNT_NOEXEC) &&
|
||||
(prot_and_flags ? (prot_and_flags & PROT_EXEC) : 1)) {
|
||||
maxprot |= PROT_EXEC;
|
||||
}
|
||||
|
||||
down_write(¤t->mm->mmap_sem);
|
||||
#define ANY_WHERE 0
|
||||
if (prot_and_flags & MAP_LOCKED) prot_and_flags |= MAP_POPULATE;
|
||||
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
|
||||
va = do_mmap_pgoff(file, ANY_WHERE, len, maxprot, MAP_SHARED, pgoff);
|
||||
va = do_mmap_pgoff(file, ANY_WHERE, len, maxprot,
|
||||
MAP_SHARED | (prot_and_flags & (MAP_POPULATE | MAP_LOCKED)), pgoff);
|
||||
#endif
|
||||
|
||||
up_write(¤t->mm->mmap_sem);
|
||||
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
|
||||
va = vm_mmap(file, ANY_WHERE, len, maxprot, MAP_SHARED, pgoff << PAGE_SHIFT);
|
||||
va = vm_mmap(file, ANY_WHERE, len, maxprot, MAP_SHARED |
|
||||
(prot_and_flags & (MAP_POPULATE | MAP_LOCKED)), pgoff << PAGE_SHIFT);
|
||||
#endif
|
||||
|
||||
if (IS_ERR_VALUE(va)) {
|
||||
@@ -1270,8 +1278,8 @@ static int pager_req_map(ihk_os_t os, int fd, size_t len, off_t off, uintptr_t r
|
||||
pager->map_len = len;
|
||||
pager->map_off = off;
|
||||
|
||||
dprintk("pager_req_map(%s): 0x%lx - 0x%lx (len: %lu)\n",
|
||||
file->f_dentry->d_name.name, va, va + len, len);
|
||||
dprintk("pager_req_map(%s): 0x%lx - 0x%lx (len: %lu), map_off: %lu\n",
|
||||
file->f_dentry->d_name.name, va, va + len, len, off);
|
||||
|
||||
phys = ihk_device_map_memory(dev, result_rpa, sizeof(*resp));
|
||||
resp = ihk_device_map_virtual(dev, phys, sizeof(*resp), NULL, 0);
|
||||
@@ -1308,6 +1316,7 @@ static int pager_req_pfn(ihk_os_t os, uintptr_t handle, off_t off, uintptr_t ppf
|
||||
pte_t *pte;
|
||||
uintptr_t phys;
|
||||
uintptr_t *ppfn;
|
||||
int page_fault_attempted = 0;
|
||||
|
||||
dprintk("pager_req_pfn(%p,%lx,%lx)\n", os, handle, off);
|
||||
|
||||
@@ -1323,6 +1332,7 @@ static int pager_req_pfn(ihk_os_t os, uintptr_t handle, off_t off, uintptr_t ppf
|
||||
pfn = PFN_VALID; /* デフォルトは not present */
|
||||
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
retry:
|
||||
pgd = pgd_offset(current->mm, va);
|
||||
if (!pgd_none(*pgd) && !pgd_bad(*pgd) && pgd_present(*pgd)) {
|
||||
pud = pud_offset(pgd, va);
|
||||
@@ -1345,6 +1355,35 @@ static int pager_req_pfn(ihk_os_t os, uintptr_t handle, off_t off, uintptr_t ppf
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* If not present, try to fault it */
|
||||
if (!(pfn & PFN_PRESENT) && !page_fault_attempted) {
|
||||
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
|
||||
struct vm_area_struct *vma;
|
||||
int fault;
|
||||
|
||||
#if defined(FAULT_FLAG_USER)
|
||||
flags |= FAULT_FLAG_USER;
|
||||
#endif
|
||||
|
||||
vma = find_vma(current->mm, va);
|
||||
if (!vma || (va < vma->vm_start)) {
|
||||
printk("%s: couldn't find VMA for va %lx\n", __FUNCTION__, va);
|
||||
error = -EINVAL;
|
||||
goto out_release;
|
||||
}
|
||||
|
||||
fault = handle_mm_fault(current->mm, vma, va, flags);
|
||||
if (fault != 0) {
|
||||
printk("%s: error: faulting %lx at off: %lu\n",
|
||||
__FUNCTION__, va, off);
|
||||
}
|
||||
|
||||
page_fault_attempted = 1;
|
||||
goto retry;
|
||||
}
|
||||
|
||||
out_release:
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
|
||||
phys = ihk_device_map_memory(dev, ppfn_rpa, sizeof(*ppfn));
|
||||
@@ -1414,7 +1453,8 @@ static long pager_call(ihk_os_t os, struct syscall_request *req)
|
||||
break;
|
||||
|
||||
case PAGER_REQ_MAP:
|
||||
ret = pager_req_map(os, req->args[1], req->args[2], req->args[3], req->args[4]);
|
||||
ret = pager_req_map(os, req->args[1], req->args[2], req->args[3], req->args[4],
|
||||
req->args[5]);
|
||||
break;
|
||||
|
||||
case PAGER_REQ_PFN:
|
||||
|
||||
@@ -167,6 +167,7 @@ enum {
|
||||
CURRENT_OFFSET,
|
||||
RUNQ_OFFSET,
|
||||
CPU_STATUS_OFFSET,
|
||||
IDLE_THREAD_OFFSET,
|
||||
|
||||
/* process */
|
||||
CTX_OFFSET,
|
||||
@@ -204,6 +205,7 @@ static int setup_constants(void) {
|
||||
printf("CURRENT_OFFSET: %ld\n", K(CURRENT_OFFSET));
|
||||
printf("RUNQ_OFFSET: %ld\n", K(RUNQ_OFFSET));
|
||||
printf("CPU_STATUS_OFFSET: %ld\n", K(CPU_STATUS_OFFSET));
|
||||
printf("IDLE_THREAD_OFFSET: %ld\n", K(IDLE_THREAD_OFFSET));
|
||||
printf("CTX_OFFSET: %ld\n", K(CTX_OFFSET));
|
||||
printf("SCHED_LIST_OFFSET: %ld\n", K(SCHED_LIST_OFFSET));
|
||||
printf("PROC_OFFSET: %ld\n", K(PROC_OFFSET));
|
||||
@@ -251,6 +253,64 @@ static int setup_threads(void) {
|
||||
ihk_mc_switch_context = lookup_symbol("ihk_mc_switch_context");
|
||||
if (0) printf("ihk_mc_switch_context: %lx\n", ihk_mc_switch_context);
|
||||
|
||||
/* Set up idle threads first */
|
||||
for (cpu = 0; cpu < num_processors; ++cpu) {
|
||||
uintptr_t v;
|
||||
uintptr_t thread;
|
||||
uintptr_t proc;
|
||||
int pid;
|
||||
int tid;
|
||||
struct thread_info *ti;
|
||||
int status;
|
||||
|
||||
v = clv + (cpu * K(CPU_LOCAL_VAR_SIZE));
|
||||
|
||||
ti = malloc(sizeof(*ti));
|
||||
if (!ti) {
|
||||
perror("malloc");
|
||||
return 1;
|
||||
}
|
||||
|
||||
thread = v+K(IDLE_THREAD_OFFSET);
|
||||
|
||||
error = read_64(thread+K(PROC_OFFSET), &proc);
|
||||
if (error) {
|
||||
perror("proc");
|
||||
return 1;
|
||||
}
|
||||
|
||||
error = read_32(thread+K(STATUS_OFFSET), &status);
|
||||
if (error) {
|
||||
perror("status");
|
||||
return 1;
|
||||
}
|
||||
|
||||
error = read_32(proc+K(PID_OFFSET), &pid);
|
||||
if (error) {
|
||||
perror("pid");
|
||||
return 1;
|
||||
}
|
||||
|
||||
error = read_32(thread+K(TID_OFFSET), &tid);
|
||||
if (error) {
|
||||
perror("tid");
|
||||
return 1;
|
||||
}
|
||||
|
||||
ti->next = NULL;
|
||||
ti->status = status;
|
||||
ti->pid = pid;
|
||||
ti->tid = tid;
|
||||
ti->cpu = cpu;
|
||||
ti->lcpu = cpu;
|
||||
ti->process = thread;
|
||||
ti->clv = v;
|
||||
ti->x86_clv = locals + locals_span*cpu;
|
||||
|
||||
*titailp = ti;
|
||||
titailp = &ti->next;
|
||||
}
|
||||
|
||||
for (cpu = 0; cpu < num_processors; ++cpu) {
|
||||
uintptr_t v;
|
||||
uintptr_t head;
|
||||
|
||||
@@ -654,7 +654,7 @@ int load_elf_desc(char *filename, struct program_load_desc **desc_p,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void transfer_image(int fd, struct program_load_desc *desc)
|
||||
int transfer_image(int fd, struct program_load_desc *desc)
|
||||
{
|
||||
struct remote_transfer pt;
|
||||
unsigned long s, e, flen, rpa;
|
||||
@@ -668,7 +668,10 @@ void transfer_image(int fd, struct program_load_desc *desc)
|
||||
+ PAGE_SIZE - 1) & PAGE_MASK;
|
||||
rpa = desc->sections[i].remote_pa;
|
||||
|
||||
fseek(fp, desc->sections[i].offset, SEEK_SET);
|
||||
if (fseek(fp, desc->sections[i].offset, SEEK_SET) != 0) {
|
||||
fprintf(stderr, "transfer_image(): error: seeking file position\n");
|
||||
return -1;
|
||||
}
|
||||
flen = desc->sections[i].filesz;
|
||||
|
||||
__dprintf("seeked to %lx | size %ld\n",
|
||||
@@ -690,7 +693,20 @@ void transfer_image(int fd, struct program_load_desc *desc)
|
||||
if (lr > flen) {
|
||||
lr = flen;
|
||||
}
|
||||
fread(dma_buf + l, 1, lr, fp);
|
||||
if (fread(dma_buf + l, 1, lr, fp) != lr) {
|
||||
if (ferror(fp) > 0) {
|
||||
fprintf(stderr, "transfer_image(): error: accessing file\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
else if (feof(fp) > 0) {
|
||||
fprintf(stderr, "transfer_image(): file too short?\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
else {
|
||||
/* TODO: handle smaller reads.. */
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
flen -= lr;
|
||||
}
|
||||
else if (flen > 0) {
|
||||
@@ -699,7 +715,20 @@ void transfer_image(int fd, struct program_load_desc *desc)
|
||||
} else {
|
||||
lr = flen;
|
||||
}
|
||||
fread(dma_buf, 1, lr, fp);
|
||||
if (fread(dma_buf, 1, lr, fp) != lr) {
|
||||
if (ferror(fp) > 0) {
|
||||
fprintf(stderr, "transfer_image(): error: accessing file\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
else if (feof(fp) > 0) {
|
||||
fprintf(stderr, "transfer_image(): file too short?\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
else {
|
||||
/* TODO: handle smaller reads.. */
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
flen -= lr;
|
||||
}
|
||||
s += PAGE_SIZE;
|
||||
@@ -715,6 +744,8 @@ void transfer_image(int fd, struct program_load_desc *desc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void print_desc(struct program_load_desc *desc)
|
||||
@@ -931,7 +962,10 @@ act_signalfd4(struct syscall_wait_desc *w)
|
||||
flags |= O_NONBLOCK;
|
||||
if(tmp & SFD_CLOEXEC)
|
||||
flags |= O_CLOEXEC;
|
||||
pipe2(sfd->sigpipe, flags);
|
||||
if (pipe2(sfd->sigpipe, flags) < 0) {
|
||||
perror("pipe2 failed:");
|
||||
return -1;
|
||||
}
|
||||
sfd->next = sigfdtop;
|
||||
sigfdtop = sfd;
|
||||
rc = sfd->sigpipe[0];
|
||||
@@ -962,7 +996,11 @@ act_signalfd4(struct syscall_wait_desc *w)
|
||||
rc = -EBADF;
|
||||
else{
|
||||
info = (struct signalfd_siginfo *)w->sr.args[2];
|
||||
write(sfd->sigpipe[1], info, sizeof(struct signalfd_siginfo));
|
||||
if (write(sfd->sigpipe[1], info, sizeof(struct signalfd_siginfo))
|
||||
!= sizeof(struct signalfd_siginfo)) {
|
||||
fprintf(stderr, "error: writing sigpipe\n");
|
||||
rc = -EBADF;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
@@ -1522,7 +1560,10 @@ int main(int argc, char **argv)
|
||||
}
|
||||
|
||||
print_desc(desc);
|
||||
transfer_image(fd, desc);
|
||||
if (transfer_image(fd, desc) < 0) {
|
||||
fprintf(stderr, "error: transferring image\n");
|
||||
return -1;
|
||||
}
|
||||
fflush(stdout);
|
||||
fflush(stderr);
|
||||
|
||||
@@ -1945,7 +1986,9 @@ int main_loop(int fd, int cpu, pthread_mutex_t *lock)
|
||||
close(pipefds[0]);
|
||||
pid = fork();
|
||||
if(pid != 0){
|
||||
write(pipefds[1], &pid, sizeof pid);
|
||||
if (write(pipefds[1], &pid, sizeof pid) != sizeof(pid)) {
|
||||
fprintf(stderr, "error: writing pipefds\n");
|
||||
}
|
||||
exit(0);
|
||||
}
|
||||
}
|
||||
@@ -1954,7 +1997,9 @@ int main_loop(int fd, int cpu, pthread_mutex_t *lock)
|
||||
int st;
|
||||
|
||||
close(pipefds[1]);
|
||||
read(pipefds[0], &npid, sizeof npid);
|
||||
if (read(pipefds[0], &npid, sizeof npid) != sizeof(npid)) {
|
||||
fprintf(stderr, "error: reading pipefds\n");
|
||||
}
|
||||
close(pipefds[0]);
|
||||
waitpid(pid, &st, 0);
|
||||
pid = npid;
|
||||
@@ -2210,7 +2255,10 @@ return_execve1:
|
||||
|
||||
__dprintf("%s", "execve(): transfer ELF desc OK\n");
|
||||
|
||||
transfer_image(fd, desc);
|
||||
if (transfer_image(fd, desc) != 0) {
|
||||
fprintf(stderr, "error: transferring image\n");
|
||||
return -1;
|
||||
}
|
||||
__dprintf("%s", "execve(): image transferred\n");
|
||||
|
||||
if (close_cloexec_fds(fd) < 0) {
|
||||
|
||||
@@ -78,51 +78,52 @@ static struct memobj *to_memobj(struct devobj *devobj)
|
||||
/***********************************************************************
|
||||
* devobj
|
||||
*/
|
||||
int devobj_create(int fd, size_t len, off_t off, struct memobj **objp, int *maxprotp)
|
||||
int devobj_create(int fd, size_t len, off_t off, struct memobj **objp, int *maxprotp,
|
||||
int prot, int populate_flags)
|
||||
{
|
||||
ihk_mc_user_context_t ctx;
|
||||
struct pager_map_result result; // XXX: assumes contiguous physical
|
||||
int error;
|
||||
struct devobj *obj = NULL;
|
||||
const size_t npages = (len + PAGE_SIZE - 1) / PAGE_SIZE;
|
||||
const size_t pfn_npages = (npages / (PAGE_SIZE / sizeof(uintptr_t))) + 1;
|
||||
|
||||
dkprintf("devobj_create(%d,%lx,%lx)\n", fd, len, off);
|
||||
#define MAX_PAGES_IN_DEVOBJ (PAGE_SIZE / sizeof(uintptr_t))
|
||||
if (npages > MAX_PAGES_IN_DEVOBJ) {
|
||||
error = -EFBIG;
|
||||
kprintf("devobj_create(%d,%lx,%lx):too large len. %d\n", fd, len, off, error);
|
||||
goto out;
|
||||
}
|
||||
dkprintf("%s: fd: %d, len: %lu, off: %lu \n", __FUNCTION__, fd, len, off);
|
||||
|
||||
obj = kmalloc(sizeof(*obj), IHK_MC_AP_NOWAIT);
|
||||
if (!obj) {
|
||||
error = -ENOMEM;
|
||||
kprintf("devobj_create(%d,%lx,%lx):kmalloc failed. %d\n", fd, len, off, error);
|
||||
kprintf("%s: error: fd: %d, len: %lu, off: %lu kmalloc failed.\n",
|
||||
__FUNCTION__, fd, len, off);
|
||||
goto out;
|
||||
}
|
||||
memset(obj, 0, sizeof(*obj));
|
||||
|
||||
obj->pfn_table = allocate_pages(1, IHK_MC_AP_NOWAIT);
|
||||
obj->pfn_table = allocate_pages(pfn_npages, IHK_MC_AP_NOWAIT);
|
||||
if (!obj->pfn_table) {
|
||||
error = -ENOMEM;
|
||||
kprintf("devobj_create(%d,%lx,%lx):allocate_pages failed. %d\n", fd, len, off, error);
|
||||
kprintf("%s: error: fd: %d, len: %lu, off: %lu allocating PFN failed.\n",
|
||||
__FUNCTION__, fd, len, off);
|
||||
goto out;
|
||||
}
|
||||
memset(obj->pfn_table, 0, 1*PAGE_SIZE);
|
||||
memset(obj->pfn_table, 0, pfn_npages * PAGE_SIZE);
|
||||
|
||||
ihk_mc_syscall_arg0(&ctx) = PAGER_REQ_MAP;
|
||||
ihk_mc_syscall_arg1(&ctx) = fd;
|
||||
ihk_mc_syscall_arg2(&ctx) = len;
|
||||
ihk_mc_syscall_arg3(&ctx) = off;
|
||||
ihk_mc_syscall_arg4(&ctx) = virt_to_phys(&result);
|
||||
ihk_mc_syscall_arg5(&ctx) = prot | populate_flags;
|
||||
|
||||
error = syscall_generic_forwarding(__NR_mmap, &ctx);
|
||||
if (error) {
|
||||
kprintf("devobj_create(%d,%lx,%lx):map failed. %d\n", fd, len, off, error);
|
||||
kprintf("%s: error: fd: %d, len: %lu, off: %lu map failed.\n",
|
||||
__FUNCTION__, fd, len, off);
|
||||
goto out;
|
||||
}
|
||||
dkprintf("devobj_create:handle: %lx\n", result.handle);
|
||||
dkprintf("devobj_create:maxprot: %x\n", result.maxprot);
|
||||
|
||||
dkprintf("%s: fd: %d, len: %lu, off: %lu, handle: %p, maxprot: %x\n",
|
||||
__FUNCTION__, fd, len, off, result.handle, result.maxprot);
|
||||
|
||||
obj->memobj.ops = &devobj_ops;
|
||||
obj->memobj.flags = MF_HAS_PAGER;
|
||||
@@ -140,11 +141,12 @@ int devobj_create(int fd, size_t len, off_t off, struct memobj **objp, int *maxp
|
||||
out:
|
||||
if (obj) {
|
||||
if (obj->pfn_table) {
|
||||
free_pages(obj->pfn_table, 1);
|
||||
free_pages(obj->pfn_table, pfn_npages);
|
||||
}
|
||||
kfree(obj);
|
||||
}
|
||||
dkprintf("devobj_create(%d,%lx,%lx): %d %p %x%d\n", fd, len, off, error, *objp, *maxprotp);
|
||||
dkprintf("%s: ret: %d, fd: %d, len: %lu, off: %lu, handle: %p, maxprot: %x \n",
|
||||
__FUNCTION__, error, fd, len, off, result.handle, result.maxprot);
|
||||
return error;
|
||||
}
|
||||
|
||||
@@ -204,7 +206,7 @@ static void devobj_release(struct memobj *memobj)
|
||||
|
||||
static int devobj_get_page(struct memobj *memobj, off_t off, int p2align, uintptr_t *physp, unsigned long *flag)
|
||||
{
|
||||
const off_t pgoff = off >> PAGE_SHIFT;
|
||||
const off_t pgoff = off / PAGE_SIZE;
|
||||
struct devobj *obj = to_devobj(memobj);
|
||||
int error;
|
||||
uintptr_t pfn;
|
||||
@@ -216,7 +218,7 @@ static int devobj_get_page(struct memobj *memobj, off_t off, int p2align, uintpt
|
||||
|
||||
if ((pgoff < obj->pfn_pgoff) || ((obj->pfn_pgoff + obj->npages) <= pgoff)) {
|
||||
error = -EFBIG;
|
||||
kprintf("devobj_get_page(%p %lx,%lx,%d): out of range. %d\n", memobj, obj->handle, off, p2align, error);
|
||||
kprintf("%s: error: out of range: off: %lu, page off: %lu obj->npages: %d\n", __FUNCTION__, off, pgoff, obj->npages);
|
||||
goto out;
|
||||
}
|
||||
ix = pgoff - obj->pfn_pgoff;
|
||||
|
||||
@@ -182,7 +182,7 @@ int fileobj_create(int fd, struct memobj **objp, int *maxprotp)
|
||||
|
||||
error = syscall_generic_forwarding(__NR_mmap, &ctx);
|
||||
if (error) {
|
||||
kprintf("fileobj_create(%d):create failed. %d\n", fd, error);
|
||||
dkprintf("fileobj_create(%d):create failed. %d\n", fd, error);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
||||
@@ -79,8 +79,6 @@
|
||||
#define dkprintf(...) do { if (0) kprintf(__VA_ARGS__); } while (0)
|
||||
#endif
|
||||
|
||||
extern struct sigpending *hassigpending(struct thread *thread);
|
||||
|
||||
int futex_cmpxchg_enabled;
|
||||
|
||||
/**
|
||||
|
||||
@@ -141,6 +141,7 @@ int fileobj_create(int fd, struct memobj **objp, int *maxprotp);
|
||||
struct shmid_ds;
|
||||
int shmobj_create(struct shmid_ds *ds, struct memobj **objp);
|
||||
int zeroobj_create(struct memobj **objp);
|
||||
int devobj_create(int fd, size_t len, off_t off, struct memobj **objp, int *maxprotp);
|
||||
int devobj_create(int fd, size_t len, off_t off, struct memobj **objp, int *maxprotp,
|
||||
int prot, int populate_flags);
|
||||
|
||||
#endif /* HEADER_MEMOBJ_H */
|
||||
|
||||
@@ -29,6 +29,7 @@
|
||||
#define VR_IO_NOCACHE 0x100
|
||||
#define VR_REMOTE 0x200
|
||||
#define VR_WRITE_COMBINED 0x400
|
||||
#define VR_DONTFORK 0x800
|
||||
#define VR_DEMAND_PAGING 0x1000
|
||||
#define VR_PRIVATE 0x2000
|
||||
#define VR_LOCKED 0x4000
|
||||
@@ -319,12 +320,14 @@ struct process_vm;
|
||||
struct mckfd {
|
||||
struct mckfd *next;
|
||||
int fd;
|
||||
int sig_no;
|
||||
long data;
|
||||
void *opt;
|
||||
long (*read_cb)(struct mckfd *, ihk_mc_user_context_t *);
|
||||
int (*ioctl_cb)(struct mckfd *, ihk_mc_user_context_t *);
|
||||
long (*mmap_cb)(struct mckfd *, ihk_mc_user_context_t *);
|
||||
int (*close_cb)(struct mckfd *, ihk_mc_user_context_t *);
|
||||
int (*fcntl_cb)(struct mckfd *, ihk_mc_user_context_t *);
|
||||
};
|
||||
|
||||
#define SFD_CLOEXEC 02000000
|
||||
@@ -675,5 +678,6 @@ void chain_process(struct process *);
|
||||
void chain_thread(struct thread *);
|
||||
void proc_init();
|
||||
void set_timer();
|
||||
struct sig_pending *hassigpending(struct thread *thread);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -338,6 +338,10 @@ clone_thread(struct thread *org, unsigned long pc, unsigned long sp,
|
||||
proc = org->proc;
|
||||
thread->vm = org->vm;
|
||||
thread->proc = proc;
|
||||
|
||||
thread->sigstack.ss_sp = NULL;
|
||||
thread->sigstack.ss_flags = SS_DISABLE;
|
||||
thread->sigstack.ss_size = 0;
|
||||
}
|
||||
/* fork() */
|
||||
else {
|
||||
@@ -386,6 +390,10 @@ clone_thread(struct thread *org, unsigned long pc, unsigned long sp,
|
||||
thread->proc->maxrss = org->proc->maxrss;
|
||||
thread->vm->currss = org->vm->currss;
|
||||
|
||||
thread->sigstack.ss_sp = org->sigstack.ss_sp;
|
||||
thread->sigstack.ss_flags = org->sigstack.ss_flags;
|
||||
thread->sigstack.ss_size = org->sigstack.ss_size;
|
||||
|
||||
dkprintf("fork(): copy_user_ranges() OK\n");
|
||||
}
|
||||
|
||||
@@ -413,9 +421,6 @@ clone_thread(struct thread *org, unsigned long pc, unsigned long sp,
|
||||
INIT_LIST_HEAD(&thread->sigcommon->sigpending);
|
||||
// TODO: copy signalfd
|
||||
}
|
||||
thread->sigstack.ss_sp = NULL;
|
||||
thread->sigstack.ss_flags = SS_DISABLE;
|
||||
thread->sigstack.ss_size = 0;
|
||||
ihk_mc_spinlock_init(&thread->sigpendinglock);
|
||||
INIT_LIST_HEAD(&thread->sigpending);
|
||||
thread->sigmask = org->sigmask;
|
||||
@@ -566,6 +571,9 @@ static int copy_user_ranges(struct process_vm *vm, struct process_vm *orgvm)
|
||||
break;
|
||||
}
|
||||
|
||||
if(src_range->flag & VR_DONTFORK)
|
||||
continue;
|
||||
|
||||
range = kmalloc(sizeof(struct vm_range), IHK_MC_AP_NOWAIT);
|
||||
if (!range) {
|
||||
goto err_rollback;
|
||||
@@ -1641,6 +1649,18 @@ static int do_page_fault_process_vm(struct process_vm *vm, void *fault_addr0, ui
|
||||
"access denied. %d\n",
|
||||
ihk_mc_get_processor_id(), vm,
|
||||
fault_addr0, reason, error);
|
||||
kprintf("%s: reason: %s%s%s%s%s%s%s%s\n", __FUNCTION__,
|
||||
(reason & PF_PROT) ? "PF_PROT " : "",
|
||||
(reason & PF_WRITE) ? "PF_WRITE " : "",
|
||||
(reason & PF_USER) ? "PF_USER " : "",
|
||||
(reason & PF_RSVD) ? "PF_RSVD " : "",
|
||||
(reason & PF_INSTR) ? "PF_INSTR " : "",
|
||||
(reason & PF_PATCH) ? "PF_PATCH " : "",
|
||||
(reason & PF_POPULATE) ? "PF_POPULATE " : "");
|
||||
kprintf("%s: range->flag & (%s%s%s)\n", __FUNCTION__,
|
||||
(range->flag & VR_PROT_READ) ? "VR_PROT_READ " : "",
|
||||
(range->flag & VR_PROT_WRITE) ? "VR_PROT_WRITE " : "",
|
||||
(range->flag & VR_PROT_EXEC) ? "VR_PROT_EXEC " : "");
|
||||
if (((range->flag & VR_PROT_MASK) == VR_PROT_NONE))
|
||||
kprintf("if (((range->flag & VR_PROT_MASK) == VR_PROT_NONE))\n");
|
||||
if (((reason & PF_WRITE) && !(reason & PF_PATCH)))
|
||||
@@ -2491,7 +2511,6 @@ static void do_migrate(void)
|
||||
cur_v->runq_len -= 1;
|
||||
old_cpu_id = req->thread->cpu_id;
|
||||
req->thread->cpu_id = cpu_id;
|
||||
settid(req->thread, 2, cpu_id, old_cpu_id);
|
||||
list_add_tail(&req->thread->sched_list, &v->runq);
|
||||
v->runq_len += 1;
|
||||
|
||||
@@ -2506,6 +2525,7 @@ static void do_migrate(void)
|
||||
v->flags |= CPU_FLAG_NEED_RESCHED;
|
||||
ihk_mc_interrupt_cpu(get_x86_cpu_local_variable(cpu_id)->apic_id, 0xd1);
|
||||
double_rq_unlock(cur_v, v, irqstate);
|
||||
settid(req->thread, 2, cpu_id, old_cpu_id);
|
||||
|
||||
ack:
|
||||
waitq_wakeup(&req->wq);
|
||||
@@ -2576,9 +2596,10 @@ redo:
|
||||
if (v->flags & CPU_FLAG_NEED_MIGRATE) {
|
||||
next = &cpu_local_var(idle);
|
||||
} else {
|
||||
/* Pick a new running process */
|
||||
/* Pick a new running process or one that has a pending signal */
|
||||
list_for_each_entry_safe(thread, tmp, &(v->runq), sched_list) {
|
||||
if (thread->status == PS_RUNNING) {
|
||||
if (thread->status == PS_RUNNING ||
|
||||
(thread->status == PS_INTERRUPTIBLE && hassigpending(thread))) {
|
||||
next = thread;
|
||||
break;
|
||||
}
|
||||
@@ -2704,9 +2725,11 @@ sched_wakeup_thread(struct thread *thread, int valid_states)
|
||||
int spin_slept = 0;
|
||||
unsigned long irqstate;
|
||||
struct cpu_local_var *v = get_cpu_local_var(thread->cpu_id);
|
||||
struct process *proc = thread->proc;
|
||||
struct mcs_rwlock_node updatelock;
|
||||
|
||||
dkprintf("sched_wakeup_process,proc->pid=%d,valid_states=%08x,proc->status=%08x,proc->cpu_id=%d,my cpu_id=%d\n",
|
||||
thread->proc->pid, valid_states, thread->status, thread->cpu_id, ihk_mc_get_processor_id());
|
||||
proc->pid, valid_states, thread->status, thread->cpu_id, ihk_mc_get_processor_id());
|
||||
|
||||
irqstate = ihk_mc_spinlock_lock(&(thread->spin_sleep_lock));
|
||||
if (thread->spin_sleep > 0) {
|
||||
@@ -2726,7 +2749,10 @@ sched_wakeup_thread(struct thread *thread, int valid_states)
|
||||
irqstate = ihk_mc_spinlock_lock(&(v->runq_lock));
|
||||
|
||||
if (thread->status & valid_states) {
|
||||
xchg4((int *)(&thread->proc->status), PS_RUNNING);
|
||||
mcs_rwlock_writer_lock_noirq(&proc->update_lock, &updatelock);
|
||||
if(proc->status != PS_EXITED)
|
||||
proc->status = PS_RUNNING;
|
||||
mcs_rwlock_writer_unlock_noirq(&proc->update_lock, &updatelock);
|
||||
xchg4((int *)(&thread->status), PS_RUNNING);
|
||||
status = 0;
|
||||
}
|
||||
|
||||
@@ -281,6 +281,13 @@ process_procfs_request(unsigned long rarg)
|
||||
ans = -EIO;
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (pa < ihk_mc_get_memory_address(IHK_MC_GMA_MAP_START, 0) ||
|
||||
pa >= ihk_mc_get_memory_address(IHK_MC_GMA_MAP_END, 0)) {
|
||||
ans = -EIO;
|
||||
goto end;
|
||||
}
|
||||
|
||||
va = phys_to_virt(pa);
|
||||
if(readwrite)
|
||||
memcpy(va, buf + ans, size);
|
||||
|
||||
312
kernel/syscall.c
312
kernel/syscall.c
@@ -105,7 +105,6 @@ static void calculate_time_from_tsc(struct timespec *ts);
|
||||
void check_signal(unsigned long, void *, int);
|
||||
void do_signal(long rc, void *regs, struct thread *thread, struct sig_pending *pending, int num);
|
||||
extern unsigned long do_kill(struct thread *thread, int pid, int tid, int sig, struct siginfo *info, int ptracecont);
|
||||
extern struct sigpending *hassigpending(struct thread *thread);
|
||||
extern long alloc_debugreg(struct thread *thread);
|
||||
extern int num_processors;
|
||||
extern unsigned long ihk_mc_get_ns_per_tsc(void);
|
||||
@@ -231,16 +230,43 @@ long do_syscall(struct syscall_request *req, int cpu, int pid)
|
||||
|
||||
send_syscall(req, cpu, pid);
|
||||
|
||||
dkprintf("SC(%d)[%3d] waiting for host.. \n",
|
||||
ihk_mc_get_processor_id(),
|
||||
req->number);
|
||||
dkprintf("%s: syscall num: %d waiting for Linux.. \n",
|
||||
__FUNCTION__, req->number);
|
||||
|
||||
#define STATUS_IN_PROGRESS 0
|
||||
#define STATUS_COMPLETED 1
|
||||
#define STATUS_PAGE_FAULT 3
|
||||
while (res->status != STATUS_COMPLETED) {
|
||||
while (res->status == STATUS_IN_PROGRESS) {
|
||||
struct cpu_local_var *v;
|
||||
int call_schedule = 0;
|
||||
long runq_irqstate;
|
||||
|
||||
cpu_pause();
|
||||
|
||||
/* XXX: Intel MPI + Intel OpenMP situation:
|
||||
* While the MPI helper thread waits in a poll() call the OpenMP master
|
||||
* thread is iterating through the CPU cores using setaffinity().
|
||||
* Unless we give a chance to it on this core the two threads seem to
|
||||
* hang in deadlock. If the new thread would make a system call on this
|
||||
* core we would be in trouble. For now, allow it, but in the future
|
||||
* we should have syscall channels for each thread instead of per core,
|
||||
* or we should multiplex syscall threads in mcexec */
|
||||
runq_irqstate =
|
||||
ihk_mc_spinlock_lock(&(get_this_cpu_local_var()->runq_lock));
|
||||
v = get_this_cpu_local_var();
|
||||
|
||||
if (v->flags & CPU_FLAG_NEED_RESCHED) {
|
||||
call_schedule = 1;
|
||||
--thread->in_syscall_offload;
|
||||
}
|
||||
|
||||
ihk_mc_spinlock_unlock(&v->runq_lock, runq_irqstate);
|
||||
|
||||
if (call_schedule) {
|
||||
schedule();
|
||||
++thread->in_syscall_offload;
|
||||
}
|
||||
}
|
||||
|
||||
if (res->status == STATUS_PAGE_FAULT) {
|
||||
@@ -260,9 +286,8 @@ long do_syscall(struct syscall_request *req, int cpu, int pid)
|
||||
}
|
||||
}
|
||||
|
||||
dkprintf("SC(%d)[%3d] got host reply: %d \n",
|
||||
ihk_mc_get_processor_id(),
|
||||
req->number, res->ret);
|
||||
dkprintf("%s: syscall num: %d got host reply: %d \n",
|
||||
__FUNCTION__, req->number, res->ret);
|
||||
|
||||
rc = res->ret;
|
||||
if(islock){
|
||||
@@ -1068,6 +1093,11 @@ do_mmap(const intptr_t addr0, const size_t len0, const int prot,
|
||||
populated_mapping = 1;
|
||||
}
|
||||
|
||||
/* XXX: Intel MPI 128MB mapping.. */
|
||||
if (len == 134217728) {
|
||||
populated_mapping = 0;
|
||||
}
|
||||
|
||||
if (!(prot & PROT_WRITE)) {
|
||||
error = set_host_vma(addr, len, PROT_READ);
|
||||
if (error) {
|
||||
@@ -1097,7 +1127,7 @@ do_mmap(const intptr_t addr0, const size_t len0, const int prot,
|
||||
}
|
||||
#endif
|
||||
if (error == -ESRCH) {
|
||||
kprintf("do_mmap:hit non VREG\n");
|
||||
dkprintf("do_mmap:hit non VREG\n");
|
||||
/*
|
||||
* XXX: temporary:
|
||||
*
|
||||
@@ -1108,10 +1138,17 @@ do_mmap(const intptr_t addr0, const size_t len0, const int prot,
|
||||
vrflags &= ~VR_MEMTYPE_MASK;
|
||||
vrflags |= VR_MEMTYPE_UC;
|
||||
}
|
||||
error = devobj_create(fd, len, off, &memobj, &maxprot);
|
||||
error = devobj_create(fd, len, off, &memobj, &maxprot,
|
||||
prot, (flags & (MAP_POPULATE | MAP_LOCKED)));
|
||||
|
||||
if (!error) {
|
||||
dkprintf("%s: device fd: %d off: %lu mapping at %p - %p\n",
|
||||
__FUNCTION__, fd, off, addr, addr + len);
|
||||
}
|
||||
}
|
||||
if (error) {
|
||||
ekprintf("do_mmap:fileobj_create failed. %d\n", error);
|
||||
kprintf("%s: error: file mapping failed, fd: %d, error: %d\n",
|
||||
__FUNCTION__, error);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
@@ -1181,9 +1218,12 @@ out:
|
||||
if (!error && populated_mapping) {
|
||||
error = populate_process_memory(thread->vm, (void *)addr, len);
|
||||
if (error) {
|
||||
ekprintf("do_mmap:populate_process_memory"
|
||||
"(%p,%p,%lx) failed %d\n",
|
||||
thread->vm, (void *)addr, len, error);
|
||||
ekprintf("%s: error :populate_process_memory"
|
||||
"vm: %p, addr: %p, len: %d (flags: %s%s) failed %d\n", __FUNCTION__,
|
||||
thread->vm, (void *)addr, len,
|
||||
(flags & MAP_POPULATE) ? "MAP_POPULATE " : "",
|
||||
(flags & MAP_LOCKED) ? "MAP_LOCKED ": "",
|
||||
error);
|
||||
/*
|
||||
* In this case,
|
||||
* the mapping established by this call should be unmapped
|
||||
@@ -2242,9 +2282,8 @@ SYSCALL_DECLARE(setfsgid)
|
||||
unsigned long newfsgid;
|
||||
struct syscall_request request IHK_DMA_ALIGN;
|
||||
|
||||
request.number = __NR_setfsuid;
|
||||
request.number = __NR_setfsgid;
|
||||
request.args[0] = fsgid;
|
||||
request.args[1] = 0;
|
||||
newfsgid = do_syscall(&request, ihk_mc_get_processor_id(), 0);
|
||||
do_setresgid();
|
||||
return newfsgid;
|
||||
@@ -2470,6 +2509,31 @@ SYSCALL_DECLARE(close)
|
||||
return rc;
|
||||
}
|
||||
|
||||
SYSCALL_DECLARE(fcntl)
|
||||
{
|
||||
int fd = ihk_mc_syscall_arg0(ctx);
|
||||
// int cmd = ihk_mc_syscall_arg1(ctx);
|
||||
long rc;
|
||||
struct thread *thread = cpu_local_var(current);
|
||||
struct process *proc = thread->proc;
|
||||
struct mckfd *fdp;
|
||||
long irqstate;
|
||||
|
||||
irqstate = ihk_mc_spinlock_lock(&proc->mckfd_lock);
|
||||
for(fdp = proc->mckfd; fdp; fdp = fdp->next)
|
||||
if(fdp->fd == fd)
|
||||
break;
|
||||
ihk_mc_spinlock_unlock(&proc->mckfd_lock, irqstate);
|
||||
|
||||
if(fdp && fdp->fcntl_cb){
|
||||
rc = fdp->fcntl_cb(fdp, ctx);
|
||||
}
|
||||
else{
|
||||
rc = syscall_generic_forwarding(__NR_fcntl, ctx);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
SYSCALL_DECLARE(rt_sigprocmask)
|
||||
{
|
||||
int how = ihk_mc_syscall_arg0(ctx);
|
||||
@@ -2621,26 +2685,12 @@ perf_counter_alloc(struct mc_perf_event *event)
|
||||
struct perf_event_attr *attr = &event->attr;
|
||||
struct mc_perf_event *leader = event->group_leader;
|
||||
|
||||
if(attr->type == PERF_TYPE_HARDWARE) {
|
||||
|
||||
event->counter_id = ihk_mc_perfctr_alloc_counter(leader->pmc_status);
|
||||
|
||||
} else if(attr->type == PERF_TYPE_RAW) {
|
||||
// PAPI_REF_CYC counted by fixed counter
|
||||
if((attr->config & 0x0000ffff) == 0x00000300) {
|
||||
event->counter_id = 2 + X86_IA32_BASE_FIXED_PERF_COUNTERS;
|
||||
return ret;
|
||||
}
|
||||
|
||||
event->counter_id = ihk_mc_perfctr_alloc_counter(leader->pmc_status);
|
||||
} else {
|
||||
// Not supported type.
|
||||
ret = -1;
|
||||
}
|
||||
ret = ihk_mc_perfctr_alloc_counter(&attr->type, &attr->config, leader->pmc_status);
|
||||
|
||||
if(ret >= 0) {
|
||||
leader->pmc_status |= 1UL << event->counter_id;
|
||||
leader->pmc_status |= 1UL << ret;
|
||||
}
|
||||
event->counter_id = ret;
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -2649,7 +2699,6 @@ int
|
||||
perf_counter_start(struct mc_perf_event *event)
|
||||
{
|
||||
int ret = 0;
|
||||
enum ihk_perfctr_type type;
|
||||
struct perf_event_attr *attr = &event->attr;
|
||||
int mode = 0x00;
|
||||
|
||||
@@ -2660,52 +2709,34 @@ perf_counter_start(struct mc_perf_event *event)
|
||||
mode |= PERFCTR_USER_MODE;
|
||||
}
|
||||
|
||||
if(attr->type == PERF_TYPE_HARDWARE) {
|
||||
switch(attr->config){
|
||||
case PERF_COUNT_HW_CPU_CYCLES :
|
||||
type = APT_TYPE_CYCLE;
|
||||
break;
|
||||
case PERF_COUNT_HW_INSTRUCTIONS :
|
||||
type = APT_TYPE_INSTRUCTIONS;
|
||||
break;
|
||||
default :
|
||||
// Not supported config.
|
||||
type = PERFCTR_MAX_TYPE;
|
||||
}
|
||||
|
||||
ret = ihk_mc_perfctr_init(event->counter_id, type, mode);
|
||||
ihk_mc_perfctr_set(event->counter_id, event->sample_freq * -1);
|
||||
ihk_mc_perfctr_start(1UL << event->counter_id);
|
||||
|
||||
} else if(attr->type == PERF_TYPE_RAW) {
|
||||
// PAPI_REF_CYC counted by fixed counter
|
||||
if(event->counter_id >= X86_IA32_BASE_FIXED_PERF_COUNTERS) {
|
||||
ret = ihk_mc_perfctr_fixed_init(event->counter_id, mode);
|
||||
ihk_mc_perfctr_set(event->counter_id, event->sample_freq * -1);
|
||||
ihk_mc_perfctr_start(1UL << event->counter_id);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if(event->counter_id >= 0 && event->counter_id < X86_IA32_NUM_PERF_COUNTERS) {
|
||||
ret = ihk_mc_perfctr_init_raw(event->counter_id, attr->config, mode);
|
||||
ihk_mc_perfctr_set(event->counter_id, event->sample_freq * -1);
|
||||
ihk_mc_perfctr_start(1UL << event->counter_id);
|
||||
} else {
|
||||
// Not supported type.
|
||||
}
|
||||
else if(event->counter_id >= X86_IA32_BASE_FIXED_PERF_COUNTERS &&
|
||||
event->counter_id < X86_IA32_BASE_FIXED_PERF_COUNTERS + X86_IA32_NUM_FIXED_PERF_COUNTERS) {
|
||||
ret = ihk_mc_perfctr_fixed_init(event->counter_id, mode);
|
||||
ihk_mc_perfctr_start(1UL << event->counter_id);
|
||||
}
|
||||
else {
|
||||
ret = -1;
|
||||
}
|
||||
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
unsigned long perf_event_read_value(struct mc_perf_event *event)
|
||||
{
|
||||
unsigned long rtn_count = 0;
|
||||
unsigned long pmc_count = 0;
|
||||
int counter_id = event->counter_id;
|
||||
|
||||
if(event->pid == 0)
|
||||
event->count = ihk_mc_perfctr_read(counter_id);
|
||||
if(event->pid == 0) {
|
||||
pmc_count = ihk_mc_perfctr_read(counter_id) + event->attr.sample_freq;
|
||||
pmc_count &= 0x000000ffffffffffL; // 40bit MASK
|
||||
}
|
||||
|
||||
rtn_count += event->count;
|
||||
rtn_count += event->count + pmc_count;
|
||||
|
||||
if(event->attr.inherit)
|
||||
rtn_count += event->child_count_total;
|
||||
@@ -2922,11 +2953,21 @@ perf_ioctl(struct mckfd *sfd, ihk_mc_user_context_t *ctx)
|
||||
break;
|
||||
case PERF_EVENT_IOC_RESET:
|
||||
// TODO: reset other process
|
||||
ihk_mc_perfctr_reset(counter_id);
|
||||
ihk_mc_perfctr_set(counter_id, event->attr.sample_freq * -1);
|
||||
event->count = 0L;
|
||||
break;
|
||||
case PERF_EVENT_IOC_REFRESH:
|
||||
// TODO: refresh other process
|
||||
ihk_mc_perfctr_set(counter_id, event->sample_freq * -1);
|
||||
|
||||
// not supported on inherited events
|
||||
if(event->attr.inherit)
|
||||
return -EINVAL;
|
||||
|
||||
event->count += event->attr.sample_freq;
|
||||
ihk_mc_perfctr_set(counter_id, event->attr.sample_freq * -1);
|
||||
|
||||
perf_start(event);
|
||||
|
||||
break;
|
||||
default :
|
||||
return -1;
|
||||
@@ -2945,6 +2986,28 @@ perf_close(struct mckfd *sfd, ihk_mc_user_context_t *ctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
perf_fcntl(struct mckfd *sfd, ihk_mc_user_context_t *ctx)
|
||||
{
|
||||
int cmd = ihk_mc_syscall_arg1(ctx);
|
||||
long arg = ihk_mc_syscall_arg2(ctx);
|
||||
int rc = 0;
|
||||
|
||||
switch(cmd) {
|
||||
case 10: // F_SETSIG
|
||||
sfd->sig_no = arg;
|
||||
break;
|
||||
case 0xf: // F_SETOWN_EX
|
||||
break;
|
||||
default :
|
||||
break;
|
||||
}
|
||||
|
||||
rc = syscall_generic_forwarding(__NR_fcntl, ctx);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static long
|
||||
perf_mmap(struct mckfd *sfd, ihk_mc_user_context_t *ctx)
|
||||
{
|
||||
@@ -2963,6 +3026,7 @@ perf_mmap(struct mckfd *sfd, ihk_mc_user_context_t *ctx)
|
||||
|
||||
// setup perf_event_mmap_page
|
||||
page = (struct perf_event_mmap_page *)rc;
|
||||
page->data_head = 16;
|
||||
page->cap_user_rdpmc = 1;
|
||||
|
||||
return rc;
|
||||
@@ -3014,7 +3078,7 @@ SYSCALL_DECLARE(perf_event_open)
|
||||
|
||||
event->sample_freq = attr->sample_freq;
|
||||
event->nr_siblings = 0;
|
||||
event->count = 0;
|
||||
event->count = 0L;
|
||||
event->child_count_total = 0;
|
||||
event->parent = NULL;
|
||||
event->pid = pid;
|
||||
@@ -3050,10 +3114,12 @@ SYSCALL_DECLARE(perf_event_open)
|
||||
if(!sfd)
|
||||
return -ENOMEM;
|
||||
sfd->fd = fd;
|
||||
sfd->sig_no = -1;
|
||||
sfd->read_cb = perf_read;
|
||||
sfd->ioctl_cb = perf_ioctl;
|
||||
sfd->close_cb = perf_close;
|
||||
sfd->mmap_cb = perf_mmap;
|
||||
sfd->fcntl_cb = perf_fcntl;
|
||||
sfd->data = (long)event;
|
||||
irqstate = ihk_mc_spinlock_lock(&proc->mckfd_lock);
|
||||
|
||||
@@ -3413,6 +3479,90 @@ SYSCALL_DECLARE(mincore)
|
||||
return 0;
|
||||
} /* sys_mincore() */
|
||||
|
||||
static int
|
||||
set_memory_range_flag(struct vm_range *range, unsigned long arg)
|
||||
{
|
||||
range->flag |= arg;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
clear_memory_range_flag(struct vm_range *range, unsigned long arg)
|
||||
{
|
||||
range->flag &= ~arg;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
change_attr_process_memory_range(struct process_vm *vm,
|
||||
uintptr_t start, uintptr_t end,
|
||||
int (*change_proc)(struct vm_range *,
|
||||
unsigned long),
|
||||
unsigned long arg)
|
||||
{
|
||||
uintptr_t addr;
|
||||
int error;
|
||||
struct vm_range *range;
|
||||
struct vm_range *prev;
|
||||
struct vm_range *next;
|
||||
int join_flag = 0;
|
||||
|
||||
error = 0;
|
||||
range = lookup_process_memory_range(vm, start, start + PAGE_SIZE);
|
||||
if(!range){
|
||||
error = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
prev = previous_process_memory_range(vm, range);
|
||||
if(!prev)
|
||||
prev = range;
|
||||
for (addr = start; addr < end; addr = range->end) {
|
||||
if (range->start < addr) {
|
||||
if((error = split_process_memory_range(vm, range, addr, &range))) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (end < range->end) {
|
||||
if((error = split_process_memory_range(vm, range, end, NULL))) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if(!(error = change_proc(range, arg))){
|
||||
break;
|
||||
}
|
||||
range = next_process_memory_range(vm, range);
|
||||
}
|
||||
if(error){
|
||||
next = next_process_memory_range(vm, range);
|
||||
if(!next)
|
||||
next = range;
|
||||
}
|
||||
else{
|
||||
next = range;
|
||||
}
|
||||
|
||||
while(prev != next){
|
||||
int wkerr;
|
||||
|
||||
range = next_process_memory_range(vm, prev);
|
||||
if(!range)
|
||||
break;
|
||||
wkerr = join_process_memory_range(vm, prev, range);
|
||||
if(range == next)
|
||||
join_flag = 1;
|
||||
if (wkerr) {
|
||||
if(join_flag)
|
||||
break;
|
||||
prev = range;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
SYSCALL_DECLARE(madvise)
|
||||
{
|
||||
const uintptr_t start = (uintptr_t)ihk_mc_syscall_arg0(ctx);
|
||||
@@ -3521,6 +3671,7 @@ SYSCALL_DECLARE(madvise)
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
else if(advice == MADV_DONTFORK || advice == MADV_DOFORK);
|
||||
else if (!range->memobj || !memobj_has_pager(range->memobj)) {
|
||||
dkprintf("[%d]sys_madvise(%lx,%lx,%x):has not pager"
|
||||
"[%lx-%lx) %lx\n",
|
||||
@@ -3565,6 +3716,27 @@ SYSCALL_DECLARE(madvise)
|
||||
}
|
||||
}
|
||||
|
||||
if(advice == MADV_DONTFORK){
|
||||
error = change_attr_process_memory_range(thread->vm, start, end,
|
||||
set_memory_range_flag,
|
||||
VR_DONTFORK);
|
||||
if(error){
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
if(advice == MADV_DOFORK){
|
||||
error = change_attr_process_memory_range(thread->vm, start, end,
|
||||
clear_memory_range_flag,
|
||||
VR_DONTFORK);
|
||||
if(error){
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
if(advice == MADV_DONTFORK ||
|
||||
advice == MADV_DOFORK){
|
||||
error = syscall_generic_forwarding(__NR_madvise, ctx);
|
||||
}
|
||||
|
||||
error = 0;
|
||||
out:
|
||||
ihk_mc_spinlock_unlock_noirq(&thread->vm->memory_range_lock);
|
||||
@@ -5547,7 +5719,7 @@ SYSCALL_DECLARE(sched_setaffinity)
|
||||
len = MIN2(len, sizeof(k_cpu_set));
|
||||
|
||||
if (copy_from_user(&k_cpu_set, u_cpu_set, len)) {
|
||||
kprintf("%s:%d copy_from_user failed.\n", __FILE__, __LINE__);
|
||||
kprintf("%s: error: copy_from_user failed for %p:%d\n", __FUNCTION__, u_cpu_set, len);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
|
||||
@@ -54,11 +54,11 @@ int ihk_mc_perfctr_start(unsigned long counter_mask);
|
||||
int ihk_mc_perfctr_stop(unsigned long counter_mask);
|
||||
int ihk_mc_perfctr_fixed_init(int counter, int mode);
|
||||
int ihk_mc_perfctr_reset(int counter);
|
||||
int ihk_mc_perfctr_set(int counter, unsigned long value);
|
||||
int ihk_mc_perfctr_set(int counter, long value);
|
||||
int ihk_mc_perfctr_read_mask(unsigned long counter_mask, unsigned long *value);
|
||||
unsigned long ihk_mc_perfctr_read(int counter);
|
||||
unsigned long ihk_mc_perfctr_read_msr(int counter);
|
||||
int ihk_mc_perfctr_alloc_counter(unsigned long pmc_status);
|
||||
int ihk_mc_perfctr_alloc_counter(unsigned int *type, unsigned long *config, unsigned long pmc_status);
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
Reference in New Issue
Block a user