Revert "memory_range_lock: Enable interrupt when trylock fails"

This reverts commit 0d3ef65092.

Reason for revert: This fix causes circular dependency with memory_range manipulation and TLB flush. See #1394.

Change-Id: I4774e81ff300c199629e283e538c0a30ad0eeaae
This commit is contained in:
Masamichi Takagi
2019-09-20 07:18:15 +00:00
committed by Ken Sato
parent 41d37bcd30
commit edd3ea0103
12 changed files with 102 additions and 393 deletions

View File

@@ -2131,7 +2131,6 @@ int do_process_vm_read_writev(int pid,
struct vm_range *range;
struct mcs_rwlock_node_irqsave lock;
struct mcs_rwlock_node update_lock;
unsigned long irqflags;
/* Sanity checks */
if (flags) {
@@ -2143,7 +2142,7 @@ int do_process_vm_read_writev(int pid,
}
/* Check if parameters are okay */
memory_range_read_lock(lthread->vm, &irqflags);
ihk_mc_spinlock_lock_noirq(&lthread->vm->memory_range_lock);
range = lookup_process_memory_range(lthread->vm,
(uintptr_t)local_iov,
@@ -2165,7 +2164,7 @@ int do_process_vm_read_writev(int pid,
ret = 0;
arg_out:
memory_range_read_unlock(lthread->vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&lthread->vm->memory_range_lock);
if (ret != 0) {
goto out;
@@ -2234,7 +2233,7 @@ arg_out:
if (pli != li) {
struct vm_range *range;
memory_range_read_lock(lthread->vm, &irqflags);
ihk_mc_spinlock_lock_noirq(&lthread->vm->memory_range_lock);
/* Is base valid? */
range = lookup_process_memory_range(lthread->vm,
@@ -2264,7 +2263,7 @@ arg_out:
ret = 0;
pli_out:
memory_range_read_unlock(lthread->vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&lthread->vm->memory_range_lock);
if (ret != 0) {
goto out;
@@ -2277,7 +2276,7 @@ pli_out:
if (pri != ri) {
struct vm_range *range;
memory_range_read_lock(rvm, &irqflags);
ihk_mc_spinlock_lock_noirq(&rvm->memory_range_lock);
/* Is base valid? */
range = lookup_process_memory_range(rvm,
@@ -2307,7 +2306,7 @@ pli_out:
ret = 0;
pri_out:
memory_range_read_unlock(rvm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&rvm->memory_range_lock);
if (ret != 0) {
goto out;

View File

@@ -2267,7 +2267,6 @@ int do_process_vm_read_writev(int pid,
struct vm_range *range;
struct mcs_rwlock_node_irqsave lock;
struct mcs_rwlock_node update_lock;
unsigned long irqflags;
/* Sanity checks */
if (flags) {
@@ -2279,7 +2278,7 @@ int do_process_vm_read_writev(int pid,
}
/* Check if parameters are okay */
memory_range_read_lock(lthread->vm, &irqflags);
ihk_mc_spinlock_lock_noirq(&lthread->vm->memory_range_lock);
range = lookup_process_memory_range(lthread->vm,
(uintptr_t)local_iov,
@@ -2301,7 +2300,7 @@ int do_process_vm_read_writev(int pid,
ret = 0;
arg_out:
memory_range_read_unlock(lthread->vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&lthread->vm->memory_range_lock);
if (ret != 0) {
goto out;
@@ -2370,7 +2369,7 @@ arg_out:
if (pli != li) {
struct vm_range *range;
memory_range_read_lock(lthread->vm, &irqflags);
ihk_mc_spinlock_lock_noirq(&lthread->vm->memory_range_lock);
/* Is base valid? */
range = lookup_process_memory_range(lthread->vm,
@@ -2400,7 +2399,7 @@ arg_out:
ret = 0;
pli_out:
memory_range_read_unlock(lthread->vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&lthread->vm->memory_range_lock);
if (ret != 0) {
goto out;
@@ -2413,7 +2412,7 @@ pli_out:
if (pri != ri) {
struct vm_range *range;
memory_range_read_lock(rvm, &irqflags);
ihk_mc_spinlock_lock_noirq(&rvm->memory_range_lock);
/* Is base valid? */
range = lookup_process_memory_range(rvm,
@@ -2443,7 +2442,7 @@ pli_out:
ret = 0;
pri_out:
memory_range_read_unlock(rvm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&rvm->memory_range_lock);
if (ret != 0) {
goto out;

View File

@@ -744,7 +744,7 @@ struct process_vm {
void *vvar_addr;
ihk_spinlock_t page_table_lock;
struct ihk_rwlock memory_range_lock;
ihk_spinlock_t memory_range_lock;
// to protect the followings:
// 1. addition of process "memory range" (extend_process_region, add_process_memory_range)
// 2. addition of process page table (allocate_pages, update_process_page_table)
@@ -778,46 +778,6 @@ static inline int has_cap_sys_admin(struct thread *th)
return !(th->proc->euid);
}
static inline void memory_range_read_lock(struct process_vm *vm,
unsigned long *flags)
{
for (;;) {
*flags = cpu_disable_interrupt_save();
if (ihk_mc_read_trylock(&vm->memory_range_lock)) {
break;
}
cpu_restore_interrupt(*flags);
cpu_pause();
}
}
static inline void memory_range_write_lock(struct process_vm *vm,
unsigned long *flags)
{
for (;;) {
*flags = cpu_disable_interrupt_save();
if (ihk_mc_write_trylock(&vm->memory_range_lock)) {
break;
}
cpu_restore_interrupt(*flags);
cpu_pause();
}
}
static inline void memory_range_read_unlock(struct process_vm *vm,
unsigned long *flags)
{
ihk_mc_read_unlock(&vm->memory_range_lock);
cpu_restore_interrupt(*flags);
}
static inline void memory_range_write_unlock(struct process_vm *vm,
unsigned long *flags)
{
ihk_mc_write_unlock(&vm->memory_range_lock);
cpu_restore_interrupt(*flags);
}
void hold_address_space(struct address_space *);
void release_address_space(struct address_space *);
struct thread *create_thread(unsigned long user_pc,

View File

@@ -244,7 +244,7 @@ static int
init_process_vm(struct process *owner, struct address_space *asp, struct process_vm *vm)
{
int i;
ihk_mc_rwlock_init(&vm->memory_range_lock);
ihk_mc_spinlock_init(&vm->memory_range_lock);
ihk_mc_spinlock_init(&vm->page_table_lock);
ihk_atomic_set(&vm->refcount, 1);
@@ -785,9 +785,8 @@ static int copy_user_ranges(struct process_vm *vm, struct process_vm *orgvm)
struct vm_range *range;
struct vm_range *last_insert;
struct copy_args args;
unsigned long irqflags;
memory_range_write_lock(orgvm, &irqflags);
ihk_mc_spinlock_lock_noirq(&orgvm->memory_range_lock);
/* Iterate original process' vm_range list and take a copy one-by-one */
last_insert = NULL;
@@ -849,7 +848,7 @@ static int copy_user_ranges(struct process_vm *vm, struct process_vm *orgvm)
// memory_stat_rss_add() is called in child-node, i.e. copy_user_pte()
}
memory_range_write_unlock(orgvm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&orgvm->memory_range_lock);
return 0;
@@ -869,14 +868,14 @@ err_rollback:
if (dest_range) {
free_process_memory_range(vm, dest_range);
}
if (src_range == last_insert) {
break;
}
src_range = next_process_memory_range(orgvm, src_range);
}
}
memory_range_write_unlock(orgvm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&orgvm->memory_range_lock);
return -1;
}
@@ -2165,7 +2164,6 @@ static int do_page_fault_process_vm(struct process_vm *vm, void *fault_addr0, ui
struct vm_range *range;
struct thread *thread = cpu_local_var(current);
int locked = 0;
unsigned long irqflags;
dkprintf("[%d]do_page_fault_process_vm(%p,%lx,%lx)\n",
ihk_mc_get_processor_id(), vm, fault_addr0, reason);
@@ -2176,12 +2174,10 @@ static int do_page_fault_process_vm(struct process_vm *vm, void *fault_addr0, ui
if (thread->vm->is_memory_range_lock_taken) {
goto skip;
}
irqflags = cpu_disable_interrupt_save();
if (ihk_mc_read_trylock(&vm->memory_range_lock)) {
if (ihk_mc_spinlock_trylock_noirq(&vm->memory_range_lock)) {
locked = 1;
break;
}
cpu_restore_interrupt(irqflags);
}
} else {
skip:;
@@ -2297,8 +2293,7 @@ skip:;
error = 0;
out:
if (locked) {
ihk_mc_read_unlock(&vm->memory_range_lock);
cpu_restore_interrupt(irqflags);
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
}
dkprintf("[%d]do_page_fault_process_vm(%p,%lx,%lx): %d\n",
ihk_mc_get_processor_id(), vm, fault_addr0,
@@ -2599,10 +2594,9 @@ void flush_process_memory(struct process_vm *vm)
struct vm_range *range;
struct rb_node *node, *next = rb_first(&vm->vm_range_tree);
int error;
unsigned long irqflags;
dkprintf("flush_process_memory(%p)\n", vm);
memory_range_write_lock(vm, &irqflags);
ihk_mc_spinlock_lock_noirq(&vm->memory_range_lock);
/* Let concurrent page faults know the VM will be gone */
vm->exiting = 1;
while ((node = next)) {
@@ -2620,7 +2614,7 @@ void flush_process_memory(struct process_vm *vm)
}
}
}
memory_range_write_unlock(vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
dkprintf("flush_process_memory(%p):\n", vm);
return;
}
@@ -2630,13 +2624,12 @@ void free_process_memory_ranges(struct process_vm *vm)
int error;
struct vm_range *range;
struct rb_node *node, *next = rb_first(&vm->vm_range_tree);
unsigned long irqflags;
if (vm == NULL) {
return;
}
memory_range_write_lock(vm, &irqflags);
ihk_mc_spinlock_lock_noirq(&vm->memory_range_lock);
while ((node = next)) {
range = rb_entry(node, struct vm_range, vm_rb_node);
next = rb_next(node);
@@ -2649,7 +2642,7 @@ void free_process_memory_ranges(struct process_vm *vm)
/* through */
}
}
memory_range_write_unlock(vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
}
static void free_thread_pages(struct thread *thread)
@@ -2738,9 +2731,8 @@ free_all_process_memory_range(struct process_vm *vm)
struct vm_range *range;
struct rb_node *node, *next = rb_first(&vm->vm_range_tree);
int error;
unsigned long irqflags;
memory_range_write_lock(vm, &irqflags);
ihk_mc_spinlock_lock_noirq(&vm->memory_range_lock);
while ((node = next)) {
range = rb_entry(node, struct vm_range, vm_rb_node);
next = rb_next(node);
@@ -2753,7 +2745,7 @@ free_all_process_memory_range(struct process_vm *vm)
/* through */
}
}
memory_range_write_unlock(vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
}
void
@@ -3173,7 +3165,7 @@ void sched_init(void)
&idle_thread->proc->children_list);
ihk_mc_init_context(&idle_thread->ctx, NULL, idle);
ihk_mc_rwlock_init(&idle_thread->vm->memory_range_lock);
ihk_mc_spinlock_init(&idle_thread->vm->memory_range_lock);
idle_thread->vm->vm_range_tree = RB_ROOT;
idle_thread->vm->vm_range_numa_policy_tree = RB_ROOT;
idle_thread->proc->pid = 0;

View File

@@ -174,7 +174,6 @@ int process_procfs_request(struct ikc_scd_packet *rpacket)
int err = -EIO;
struct mckernel_procfs_buffer *buf_top = NULL;
struct mckernel_procfs_buffer *buf_cur = NULL;
unsigned long irqflags;
dprintf("process_procfs_request: invoked.\n");
@@ -420,7 +419,7 @@ int process_procfs_request(struct ikc_scd_packet *rpacket)
if (strcmp(p, "maps") == 0) {
struct vm_range *range;
memory_range_read_lock(vm, &irqflags);
ihk_mc_spinlock_lock_noirq(&vm->memory_range_lock);
range = lookup_process_memory_range(vm, 0, -1);
while (range) {
@@ -456,13 +455,13 @@ int process_procfs_request(struct ikc_scd_packet *rpacket)
if (ans < 0 || ans > count ||
buf_add(&buf_top, &buf_cur, buf, ans) < 0) {
memory_range_read_unlock(vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
goto err;
}
range = next_process_memory_range(vm, range);
}
memory_range_read_unlock(vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
ans = 0;
goto end;
@@ -485,7 +484,7 @@ int process_procfs_request(struct ikc_scd_packet *rpacket)
start = (offset / sizeof(uint64_t)) << PAGE_SHIFT;
end = start + ((count / sizeof(uint64_t)) << PAGE_SHIFT);
memory_range_read_lock(vm, &irqflags);
ihk_mc_spinlock_lock_noirq(&vm->memory_range_lock);
while (start < end) {
*_buf = ihk_mc_pt_virt_to_pagemap(proc->vm->address_space->page_table, start);
@@ -495,7 +494,7 @@ int process_procfs_request(struct ikc_scd_packet *rpacket)
++_buf;
}
memory_range_read_unlock(vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
dprintf("/proc/pagemap: 0x%lx - 0x%lx, count: %d\n",
start, end, count);
@@ -527,14 +526,14 @@ int process_procfs_request(struct ikc_scd_packet *rpacket)
goto err;
}
memory_range_read_lock(proc->vm, &irqflags);
ihk_mc_spinlock_lock_noirq(&proc->vm->memory_range_lock);
range = lookup_process_memory_range(vm, 0, -1);
while (range) {
if(range->flag & VR_LOCKED)
lockedsize += range->end - range->start;
range = next_process_memory_range(vm, range);
}
memory_range_read_unlock(proc->vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&proc->vm->memory_range_lock);
cpu_bitmask = &bitmasks[bitmasks_offset];
bitmasks_offset += bitmap_scnprintf(cpu_bitmask,

View File

@@ -1597,7 +1597,6 @@ do_mmap(const uintptr_t addr0, const size_t len0, const int prot,
struct mckfd *fdp = NULL;
int pgshift;
struct vm_range *range = NULL;
unsigned long irqflags;
dkprintf("do_mmap(%lx,%lx,%x,%x,%d,%lx)\n",
addr0, len0, prot, flags, fd, off0);
@@ -1629,8 +1628,6 @@ do_mmap(const uintptr_t addr0, const size_t len0, const int prot,
flush_nfo_tlb();
memory_range_write_lock(thread->vm, &irqflags);
if (flags & MAP_HUGETLB) {
pgshift = (flags >> MAP_HUGE_SHIFT) & 0x3F;
p2align = pgshift - PAGE_SHIFT;
@@ -1653,6 +1650,8 @@ do_mmap(const uintptr_t addr0, const size_t len0, const int prot,
p2align = PAGE_P2ALIGN;
}
ihk_mc_spinlock_lock_noirq(&thread->vm->memory_range_lock);
if (flags & MAP_FIXED) {
/* clear specified address range */
error = do_munmap((void *)addr, len, 1/* holding memory_range_lock */);
@@ -1941,7 +1940,7 @@ out:
if (ro_vma_mapped) {
(void)set_host_vma(addr, len, PROT_READ | PROT_WRITE | PROT_EXEC, 1/* holding memory_range_lock */);
}
memory_range_write_unlock(thread->vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&thread->vm->memory_range_lock);
if (!error && populated_mapping && !((vrflags & VR_PROT_MASK) == VR_PROT_NONE)) {
error = populate_process_memory(thread->vm,
@@ -1994,7 +1993,6 @@ SYSCALL_DECLARE(munmap)
struct vm_regions *region = &thread->vm->region;
size_t len;
int error;
unsigned long irqflags;
dkprintf("[%d]sys_munmap(%lx,%lx)\n",
ihk_mc_get_processor_id(), addr, len0);
@@ -2010,9 +2008,9 @@ SYSCALL_DECLARE(munmap)
goto out;
}
memory_range_write_lock(thread->vm, &irqflags);
ihk_mc_spinlock_lock_noirq(&thread->vm->memory_range_lock);
error = do_munmap((void *)addr, len, 1/* holding memory_range_lock */);
memory_range_write_unlock(thread->vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&thread->vm->memory_range_lock);
out:
dkprintf("[%d]sys_munmap(%lx,%lx): %d\n",
@@ -2037,7 +2035,6 @@ SYSCALL_DECLARE(mprotect)
const unsigned long protflags = PROT_TO_VR_FLAG(prot);
unsigned long denied;
int ro_changed = 0;
unsigned long irqflags;
dkprintf("[%d]sys_mprotect(%lx,%lx,%x)\n",
ihk_mc_get_processor_id(), start, len0, prot);
@@ -2067,7 +2064,7 @@ SYSCALL_DECLARE(mprotect)
flush_nfo_tlb();
memory_range_write_lock(thread->vm, &irqflags);
ihk_mc_spinlock_lock_noirq(&thread->vm->memory_range_lock);
first = lookup_process_memory_range(thread->vm, start, start+PAGE_SIZE);
@@ -2157,7 +2154,7 @@ out:
/* through */
}
}
memory_range_write_unlock(thread->vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&thread->vm->memory_range_lock);
dkprintf("[%d]sys_mprotect(%lx,%lx,%x): %d\n",
ihk_mc_get_processor_id(), start, len0, prot, error);
return error;
@@ -2170,7 +2167,6 @@ SYSCALL_DECLARE(brk)
unsigned long r;
unsigned long vrflag;
unsigned long old_brk_end_allocated = 0;
unsigned long irqflags;
dkprintf("SC(%d)[sys_brk] brk_start=%lx,end=%lx\n",
ihk_mc_get_processor_id(), region->brk_start, region->brk_end);
@@ -2201,11 +2197,11 @@ SYSCALL_DECLARE(brk)
vrflag |= VR_PRIVATE;
vrflag |= VRFLAG_PROT_TO_MAXPROT(vrflag);
old_brk_end_allocated = region->brk_end_allocated;
memory_range_write_lock(cpu_local_var(current)->vm, &irqflags);
ihk_mc_spinlock_lock_noirq(&cpu_local_var(current)->vm->memory_range_lock);
region->brk_end_allocated =
extend_process_region(cpu_local_var(current)->vm,
region->brk_end_allocated, address, vrflag);
memory_range_write_unlock(cpu_local_var(current)->vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&cpu_local_var(current)->vm->memory_range_lock);
if (old_brk_end_allocated == region->brk_end_allocated) {
r = old_brk_end_allocated;
@@ -2428,9 +2424,8 @@ static void munmap_all(void)
void *addr;
size_t size;
int error;
unsigned long irqflags;
memory_range_write_lock(vm, &irqflags);
ihk_mc_spinlock_lock_noirq(&vm->memory_range_lock);
next = lookup_process_memory_range(vm, 0, -1);
while ((range = next)) {
next = next_process_memory_range(vm, range);
@@ -2444,7 +2439,7 @@ static void munmap_all(void)
/* through */
}
}
memory_range_write_unlock(vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
/* free vm_ranges which do_munmap() failed to remove. */
free_process_memory_ranges(thread->vm);
@@ -2475,20 +2470,19 @@ SYSCALL_DECLARE(execve)
struct vm_range *range;
struct process *proc = thread->proc;
int i;
unsigned long irqflags;
memory_range_write_lock(vm, &irqflags);
ihk_mc_spinlock_lock_noirq(&vm->memory_range_lock);
range = lookup_process_memory_range(vm, (unsigned long)filename,
(unsigned long)filename+1);
if (range == NULL || !(range->flag & VR_PROT_READ)) {
memory_range_write_unlock(vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
kprintf("execve(): ERROR: filename is bad address\n");
return -EFAULT;
}
memory_range_write_unlock(vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
desc = ihk_mc_alloc_pages(4, IHK_MC_AP_NOWAIT);
if (!desc) {
@@ -4792,7 +4786,6 @@ SYSCALL_DECLARE(mincore)
uint8_t value;
int error;
pte_t *ptep;
unsigned long irqflags;
if (start & (PAGE_SIZE - 1)) {
dkprintf("mincore(0x%lx,0x%lx,%p): EINVAL\n", start, len, vec);
@@ -4809,10 +4802,10 @@ SYSCALL_DECLARE(mincore)
range = NULL;
up = vec;
for (addr = start; addr < end; addr += PAGE_SIZE) {
memory_range_read_lock(vm, &irqflags);
ihk_mc_spinlock_lock_noirq(&vm->memory_range_lock);
range = lookup_process_memory_range(vm, addr, addr+1);
if (!range) {
memory_range_read_unlock(vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
dkprintf("mincore(0x%lx,0x%lx,%p):lookup failed. ENOMEM\n",
start, len, vec);
return -ENOMEM;
@@ -4834,7 +4827,7 @@ SYSCALL_DECLARE(mincore)
value = 0;
}
ihk_mc_spinlock_unlock_noirq(&vm->page_table_lock);
memory_range_read_unlock(vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
error = copy_to_user(up, &value, sizeof(value));
if (error) {
@@ -4949,7 +4942,6 @@ SYSCALL_DECLARE(madvise)
int error;
uintptr_t s;
uintptr_t e;
unsigned long irqflags;
dkprintf("[%d]sys_madvise(%lx,%lx,%x)\n",
ihk_mc_get_processor_id(), start, len0, advice);
@@ -5009,7 +5001,7 @@ SYSCALL_DECLARE(madvise)
goto out2;
}
memory_range_write_lock(thread->vm, &irqflags);
ihk_mc_spinlock_lock_noirq(&thread->vm->memory_range_lock);
/* check contiguous map */
first = NULL;
range = NULL; /* for avoidance of warning */
@@ -5134,7 +5126,7 @@ SYSCALL_DECLARE(madvise)
error = 0;
out:
memory_range_write_unlock(thread->vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&thread->vm->memory_range_lock);
out2:
dkprintf("[%d]sys_madvise(%lx,%lx,%x): %d\n",
@@ -5397,7 +5389,6 @@ SYSCALL_DECLARE(shmat)
int req;
struct shmobj *obj;
size_t pgsize;
unsigned long irqflags;
dkprintf("shmat(%#x,%p,%#x)\n", shmid, shmaddr, shmflg);
@@ -5447,11 +5438,11 @@ SYSCALL_DECLARE(shmat)
return -EACCES;
}
memory_range_write_lock(vm, &irqflags);
ihk_mc_spinlock_lock_noirq(&vm->memory_range_lock);
if (addr) {
if (lookup_process_memory_range(vm, addr, addr+len)) {
memory_range_write_unlock(vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
shmobj_list_unlock();
memobj_unref(&obj->memobj);
dkprintf("shmat(%#x,%p,%#x):lookup_process_memory_range succeeded. -ENOMEM\n", shmid, shmaddr, shmflg);
@@ -5461,7 +5452,7 @@ SYSCALL_DECLARE(shmat)
else {
error = search_free_space(len, obj->pgshift, &addr);
if (error) {
memory_range_write_unlock(vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
shmobj_list_unlock();
memobj_unref(&obj->memobj);
dkprintf("shmat(%#x,%p,%#x):search_free_space failed. %d\n", shmid, shmaddr, shmflg, error);
@@ -5477,7 +5468,7 @@ SYSCALL_DECLARE(shmat)
if (!(prot & PROT_WRITE)) {
error = set_host_vma(addr, len, PROT_READ | PROT_EXEC, 1/* holding memory_range_lock */);
if (error) {
memory_range_write_unlock(vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
shmobj_list_unlock();
memobj_unref(&obj->memobj);
dkprintf("shmat(%#x,%p,%#x):set_host_vma failed. %d\n", shmid, shmaddr, shmflg, error);
@@ -5492,13 +5483,13 @@ SYSCALL_DECLARE(shmat)
(void)set_host_vma(addr, len, PROT_READ | PROT_WRITE | PROT_EXEC, 1/* holding memory_range_lock */);
}
memobj_unref(&obj->memobj);
memory_range_write_unlock(vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
shmobj_list_unlock();
dkprintf("shmat(%#x,%p,%#x):add_process_memory_range failed. %d\n", shmid, shmaddr, shmflg, error);
return error;
}
memory_range_write_unlock(vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
shmobj_list_unlock();
dkprintf("shmat(%#x,%p,%#x): 0x%lx. %d\n", shmid, shmaddr, shmflg, addr);
@@ -5783,26 +5774,25 @@ SYSCALL_DECLARE(shmdt)
struct process_vm *vm = thread->vm;
struct vm_range *range;
int error;
unsigned long irqflags;
dkprintf("shmdt(%p)\n", shmaddr);
memory_range_write_lock(vm, &irqflags);
ihk_mc_spinlock_lock_noirq(&vm->memory_range_lock);
range = lookup_process_memory_range(vm, (uintptr_t)shmaddr, (uintptr_t)shmaddr+1);
if (!range || (range->start != (uintptr_t)shmaddr) || !range->memobj
|| !(range->memobj->flags & MF_SHMDT_OK)) {
memory_range_write_unlock(vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
dkprintf("shmdt(%p): -EINVAL\n", shmaddr);
return -EINVAL;
}
error = do_munmap((void *)range->start, (range->end - range->start), 1/* holding memory_range_lock */);
if (error) {
memory_range_write_unlock(vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
dkprintf("shmdt(%p): %d\n", shmaddr, error);
return error;
}
memory_range_write_unlock(vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
dkprintf("shmdt(%p): 0\n", shmaddr);
return 0;
} /* sys_shmdt() */
@@ -7665,7 +7655,6 @@ SYSCALL_DECLARE(mlock)
struct vm_range *range;
int error;
struct vm_range *changed;
unsigned long irqflags;
dkprintf("[%d]sys_mlock(%lx,%lx)\n",
ihk_mc_get_processor_id(), start0, len0);
@@ -7693,7 +7682,7 @@ SYSCALL_DECLARE(mlock)
goto out2;
}
memory_range_write_lock(thread->vm, &irqflags);
ihk_mc_spinlock_lock_noirq(&thread->vm->memory_range_lock);
/* check contiguous map */
first = NULL;
@@ -7798,7 +7787,7 @@ SYSCALL_DECLARE(mlock)
error = 0;
out:
memory_range_write_unlock(thread->vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&thread->vm->memory_range_lock);
if (!error) {
error = populate_process_memory(thread->vm, (void *)start, len);
@@ -7841,7 +7830,6 @@ SYSCALL_DECLARE(munlock)
struct vm_range *range;
int error;
struct vm_range *changed;
unsigned long irqflags;
dkprintf("[%d]sys_munlock(%lx,%lx)\n",
ihk_mc_get_processor_id(), start0, len0);
@@ -7869,7 +7857,7 @@ SYSCALL_DECLARE(munlock)
goto out2;
}
memory_range_write_lock(thread->vm, &irqflags);
ihk_mc_spinlock_lock_noirq(&thread->vm->memory_range_lock);
/* check contiguous map */
first = NULL;
@@ -7974,7 +7962,7 @@ SYSCALL_DECLARE(munlock)
error = 0;
out:
memory_range_write_unlock(thread->vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&thread->vm->memory_range_lock);
out2:
dkprintf("[%d]sys_munlock(%lx,%lx): %d\n",
ihk_mc_get_processor_id(), start0, len0, error);
@@ -8027,11 +8015,10 @@ SYSCALL_DECLARE(remap_file_pages)
struct vm_range *range;
int er;
int need_populate = 0;
unsigned long irqflags;
dkprintf("sys_remap_file_pages(%#lx,%#lx,%#x,%#lx,%#x)\n",
start0, size, prot, pgoff, flags);
memory_range_write_lock(thread->vm, &irqflags);
ihk_mc_spinlock_lock_noirq(&thread->vm->memory_range_lock);
#define PGOFF_LIMIT ((off_t)1 << ((8*sizeof(off_t) - 1) - PAGE_SHIFT))
if ((size <= 0) || (size & (PAGE_SIZE - 1)) || (prot != 0)
|| (PGOFF_LIMIT <= pgoff)
@@ -8075,7 +8062,7 @@ SYSCALL_DECLARE(remap_file_pages)
}
error = 0;
out:
memory_range_write_unlock(thread->vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&thread->vm->memory_range_lock);
if (need_populate
&& (er = populate_process_memory(
@@ -8113,11 +8100,10 @@ SYSCALL_DECLARE(mremap)
uintptr_t ret;
uintptr_t lckstart = -1;
uintptr_t lckend = -1;
unsigned long irqflags;
dkprintf("sys_mremap(%#lx,%#lx,%#lx,%#x,%#lx)\n",
oldaddr, oldsize0, newsize0, flags, newaddr);
memory_range_write_lock(vm, &irqflags);
ihk_mc_spinlock_lock_noirq(&vm->memory_range_lock);
/* check arguments */
if ((oldaddr & ~PAGE_MASK)
@@ -8313,7 +8299,7 @@ SYSCALL_DECLARE(mremap)
error = 0;
out:
memory_range_write_unlock(vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
if (!error && (lckstart < lckend)) {
error = populate_process_memory(thread->vm, (void *)lckstart, (lckend - lckstart));
if (error) {
@@ -8346,10 +8332,9 @@ SYSCALL_DECLARE(msync)
struct vm_range *range;
uintptr_t s;
uintptr_t e;
unsigned long irqflags;
dkprintf("sys_msync(%#lx,%#lx,%#x)\n", start0, len0, flags);
memory_range_read_lock(vm, &irqflags);
ihk_mc_spinlock_lock_noirq(&vm->memory_range_lock);
if ((start0 & ~PAGE_MASK)
|| (flags & ~(MS_ASYNC|MS_INVALIDATE|MS_SYNC))
@@ -8443,7 +8428,7 @@ SYSCALL_DECLARE(msync)
error = 0;
out:
memory_range_read_unlock(vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
dkprintf("sys_msync(%#lx,%#lx,%#x):%d\n", start0, len0, flags, error);
return error;
} /* sys_msync() */
@@ -8492,7 +8477,6 @@ SYSCALL_DECLARE(mbind)
struct vm_range *range;
struct vm_range_numa_policy *range_policy, *range_policy_iter = NULL;
DECLARE_BITMAP(numa_mask, PROCESS_NUMA_MASK_BITS);
unsigned long irqflags;
dkprintf("%s: addr: 0x%lx, len: %lu, mode: 0x%x, "
"nodemask: 0x%lx, flags: %lx\n",
@@ -8625,7 +8609,7 @@ SYSCALL_DECLARE(mbind)
}
/* Validate address range */
memory_range_write_lock(vm, &irqflags);
ihk_mc_spinlock_lock_noirq(&vm->memory_range_lock);
range = lookup_process_memory_range(vm, addr, addr + len);
if (!range) {
@@ -8792,7 +8776,7 @@ mbind_update_only:
error = 0;
unlock_out:
memory_range_write_unlock(vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
out:
return error;
} /* sys_mbind() */
@@ -9021,20 +9005,19 @@ SYSCALL_DECLARE(get_mempolicy)
/* Address range specific? */
if (flags & MPOL_F_ADDR) {
struct vm_range *range;
unsigned long irqflags;
memory_range_read_lock(vm, &irqflags);
ihk_mc_spinlock_lock_noirq(&vm->memory_range_lock);
range = lookup_process_memory_range(vm, addr, addr + 1);
if (!range) {
dkprintf("%s: ERROR: range is invalid\n", __FUNCTION__);
error = -EFAULT;
memory_range_read_unlock(vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
goto out;
}
range_policy = vm_range_policy_search(vm, addr);
memory_range_read_unlock(vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
}
/* Return policy */

View File

@@ -1011,7 +1011,6 @@ static int xpmem_attach(
struct mcs_rwlock_node_irqsave at_lock;
struct vm_range *vmr;
struct process_vm *vm = cpu_local_var(current)->vm;
unsigned long irqflags;
XPMEM_DEBUG("call: apid=0x%lx, offset=0x%lx, size=0x%lx, vaddr=0x%lx, "
"fd=%d, att_flags=%d",
@@ -1104,12 +1103,12 @@ static int xpmem_attach(
if (flags & MAP_FIXED) {
struct vm_range *existing_vmr;
memory_range_read_lock(vm, &irqflags);
ihk_mc_spinlock_lock_noirq(&vm->memory_range_lock);
existing_vmr = lookup_process_memory_range(vm, vaddr,
vaddr + size);
memory_range_read_unlock(vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
for (; existing_vmr && existing_vmr->start < vaddr + size;
existing_vmr = next_process_memory_range(vm,
@@ -1135,7 +1134,7 @@ static int xpmem_attach(
XPMEM_DEBUG("at_vaddr=0x%lx", at_vaddr);
att->at_vaddr = at_vaddr;
memory_range_write_lock(vm, &irqflags);
ihk_mc_spinlock_lock_noirq(&vm->memory_range_lock);
vmr = lookup_process_memory_range(vm, at_vaddr, at_vaddr + 1);
@@ -1146,7 +1145,7 @@ static int xpmem_attach(
ekprintf("%s: vmr->memobj equals to NULL\n", __FUNCTION__);
}
memory_range_write_unlock(vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
if (!vmr) {
ret = -ENOENT;
@@ -1191,22 +1190,21 @@ static int xpmem_detach(
struct mcs_rwlock_node_irqsave at_lock;
struct vm_range *range;
struct process_vm *vm = cpu_local_var(current)->vm;
unsigned long irqflags;
XPMEM_DEBUG("call: at_vaddr=0x%lx", at_vaddr);
memory_range_write_lock(vm, &irqflags);
ihk_mc_spinlock_lock_noirq(&vm->memory_range_lock);
range = lookup_process_memory_range(vm, at_vaddr, at_vaddr + 1);
if (!range || range->start > at_vaddr) {
memory_range_write_unlock(vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
return 0;
}
att = (struct xpmem_attachment *)range->private_data;
if (att == NULL) {
memory_range_write_unlock(vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
return -EINVAL;
}
@@ -1216,7 +1214,7 @@ static int xpmem_detach(
if (att->flags & XPMEM_FLAG_DESTROYING) {
mcs_rwlock_writer_unlock(&att->at_lock, &at_lock);
memory_range_write_unlock(vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
xpmem_att_deref(att);
return 0;
}
@@ -1229,7 +1227,7 @@ static int xpmem_detach(
att->flags &= ~XPMEM_FLAG_DESTROYING;
xpmem_ap_deref(ap);
mcs_rwlock_writer_unlock(&att->at_lock, &at_lock);
memory_range_write_unlock(vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
xpmem_att_deref(att);
return -EACCES;
}
@@ -1249,7 +1247,7 @@ static int xpmem_detach(
ekprintf("%s: ERROR: xpmem_vm_munmap() failed %d\n",
__FUNCTION__, ret);
}
memory_range_write_unlock(vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
DBUG_ON(ret != 0);
att->flags &= ~XPMEM_FLAG_VALIDPTEs;
@@ -1409,7 +1407,6 @@ static void xpmem_detach_att(
struct vm_range *range;
struct process_vm *vm;
struct mcs_rwlock_node_irqsave at_lock;
unsigned long irqflags;
XPMEM_DEBUG("call: apid=0x%lx, att=0x%p", ap->apid, att);
@@ -1418,13 +1415,13 @@ static void xpmem_detach_att(
vm = cpu_local_var(current)->vm ? cpu_local_var(current)->vm : att->vm;
memory_range_write_lock(vm, &irqflags);
ihk_mc_spinlock_lock_noirq(&vm->memory_range_lock);
mcs_rwlock_writer_lock(&att->at_lock, &at_lock);
if (att->flags & XPMEM_FLAG_DESTROYING) {
mcs_rwlock_writer_unlock(&att->at_lock, &at_lock);
memory_range_write_unlock(vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
XPMEM_DEBUG("return: XPMEM_FLAG_DESTROYING");
return;
}
@@ -1438,7 +1435,7 @@ static void xpmem_detach_att(
list_del_init(&att->att_list);
ihk_mc_spinlock_unlock_noirq(&ap->lock);
mcs_rwlock_writer_unlock(&att->at_lock, &at_lock);
memory_range_write_unlock(vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
xpmem_att_destroyable(att);
XPMEM_DEBUG("return: range=%p");
return;
@@ -1474,7 +1471,7 @@ static void xpmem_detach_att(
__FUNCTION__, ret);
}
memory_range_write_unlock(vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
xpmem_att_destroyable(att);
@@ -1573,12 +1570,11 @@ static void xpmem_clear_PTEs_of_att(
{
int ret;
struct mcs_rwlock_node_irqsave at_lock;
unsigned long irqflags;
XPMEM_DEBUG("call: att=0x%p, start=0x%lx, end=0x%lx",
att, start, end);
memory_range_write_lock(att->vm, &irqflags);
ihk_mc_spinlock_lock_noirq(&att->vm->memory_range_lock);
mcs_rwlock_writer_lock(&att->at_lock, &at_lock);
if (att->flags & XPMEM_FLAG_VALIDPTEs) {
@@ -1638,7 +1634,7 @@ static void xpmem_clear_PTEs_of_att(
}
out:
mcs_rwlock_writer_unlock(&att->at_lock, &at_lock);
memory_range_write_unlock(att->vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&att->vm->memory_range_lock);
XPMEM_DEBUG("return: ");
}
@@ -1653,7 +1649,6 @@ int xpmem_remove_process_memory_range(
struct xpmem_access_permit *ap;
struct xpmem_attachment *att;
struct mcs_rwlock_node_irqsave at_lock;
unsigned long irqflags;
XPMEM_DEBUG("call: vmr=0x%p, att=0x%p", vmr, vmr->private_data);
@@ -1667,7 +1662,8 @@ int xpmem_remove_process_memory_range(
xpmem_att_ref(att);
memory_range_write_lock(cpu_local_var(current)->vm, &irqflags);
ihk_mc_spinlock_lock_noirq(
&cpu_local_var(current)->vm->memory_range_lock);
mcs_rwlock_writer_lock(&att->at_lock, &at_lock);
@@ -1749,7 +1745,8 @@ int xpmem_remove_process_memory_range(
out:
mcs_rwlock_writer_unlock(&att->at_lock, &at_lock);
memory_range_write_unlock(cpu_local_var(current)->vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(
&cpu_local_var(current)->vm->memory_range_lock);
xpmem_att_deref(att);
@@ -1901,18 +1898,17 @@ static int xpmem_remap_pte(
size_t att_pgsize;
int att_p2align;
enum ihk_mc_pt_attribute att_attr;
unsigned long irqflags;
XPMEM_DEBUG("call: vmr=0x%p, vaddr=0x%lx, reason=0x%lx, segid=0x%lx, "
"seg_vaddr=0x%lx",
vmr, vaddr, reason, seg->segid, seg_vaddr);
memory_range_read_lock(seg_tg->vm, &irqflags);
ihk_mc_spinlock_lock_noirq(&seg_tg->vm->memory_range_lock);
seg_vmr = lookup_process_memory_range(seg_tg->vm, seg_vaddr,
seg_vaddr + 1);
memory_range_read_unlock(seg_tg->vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&seg_tg->vm->memory_range_lock);
if (!seg_vmr) {
ret = -EFAULT;
@@ -2041,15 +2037,14 @@ static int xpmem_pin_page(
{
int ret;
struct vm_range *range;
unsigned long irqflags;
XPMEM_DEBUG("call: tgid=%d, vaddr=0x%lx", tg->tgid, vaddr);
memory_range_read_lock(src_vm, &irqflags);
ihk_mc_spinlock_lock_noirq(&src_vm->memory_range_lock);
range = lookup_process_memory_range(src_vm, vaddr, vaddr + 1);
memory_range_read_unlock(src_vm, &irqflags);
ihk_mc_spinlock_unlock_noirq(&src_vm->memory_range_lock);
if (!range || range->start > vaddr) {
return -ENOENT;

View File

@@ -1,107 +0,0 @@
diff --git a/arch/arm64/kernel/include/syscall_list.h b/arch/arm64/kernel/include/syscall_list.h
index f911674..52c164f 100644
--- a/arch/arm64/kernel/include/syscall_list.h
+++ b/arch/arm64/kernel/include/syscall_list.h
@@ -134,6 +134,8 @@ SYSCALL_HANDLED(731, util_indicate_clone)
SYSCALL_HANDLED(732, get_system)
SYSCALL_HANDLED(733, util_register_desc)
+SYSCALL_HANDLED(740, setkdebug)
+
/* McKernel Specific */
SYSCALL_HANDLED(801, swapout)
SYSCALL_HANDLED(802, linux_mlock)
diff --git a/arch/x86_64/kernel/include/syscall_list.h b/arch/x86_64/kernel/include/syscall_list.h
index 79eda7f..1f81b0a 100644
--- a/arch/x86_64/kernel/include/syscall_list.h
+++ b/arch/x86_64/kernel/include/syscall_list.h
@@ -174,6 +174,8 @@ SYSCALL_HANDLED(731, util_indicate_clone)
SYSCALL_HANDLED(732, get_system)
SYSCALL_HANDLED(733, util_register_desc)
+SYSCALL_HANDLED(740, setkdebug)
+
/* McKernel Specific */
SYSCALL_HANDLED(801, swapout)
SYSCALL_HANDLED(802, linux_mlock)
diff --git a/kernel/include/process.h b/kernel/include/process.h
index 0a9ff47..ecb464f 100644
--- a/kernel/include/process.h
+++ b/kernel/include/process.h
@@ -573,6 +573,7 @@ struct process {
#endif // PROFILE_ENABLE
int nr_processes; /* For partitioned execution */
int process_rank; /* Rank in partition */
+ int debug_flags;
};
/*
diff --git a/kernel/procfs.c b/kernel/procfs.c
index 5f9675c..a1b6d22 100644
--- a/kernel/procfs.c
+++ b/kernel/procfs.c
@@ -420,6 +420,7 @@ int process_procfs_request(struct ikc_scd_packet *rpacket)
if (strcmp(p, "maps") == 0) {
struct vm_range *range;
+ kprintf("read /proc/*/maps\n");
memory_range_read_lock(vm, &irqflags);
range = lookup_process_memory_range(vm, 0, -1);
@@ -485,6 +486,7 @@ int process_procfs_request(struct ikc_scd_packet *rpacket)
start = (offset / sizeof(uint64_t)) << PAGE_SHIFT;
end = start + ((count / sizeof(uint64_t)) << PAGE_SHIFT);
+ kprintf("read /proc/*/pagemap\n");
memory_range_read_lock(vm, &irqflags);
while (start < end) {
@@ -527,6 +529,7 @@ int process_procfs_request(struct ikc_scd_packet *rpacket)
goto err;
}
+ kprintf("read /proc/*/status\n");
memory_range_read_lock(proc->vm, &irqflags);
range = lookup_process_memory_range(vm, 0, -1);
while (range) {
diff --git a/kernel/syscall.c b/kernel/syscall.c
index 012ef13..9a34984 100644
--- a/kernel/syscall.c
+++ b/kernel/syscall.c
@@ -1635,6 +1635,18 @@ do_mmap(const uintptr_t addr0, const size_t len0, const int prot,
flush_nfo_tlb();
memory_range_write_lock(thread->vm, &irqflags);
+ if(thread->proc->debug_flags) {
+ // sleep 5 sec
+ unsigned long ts = rdtsc();
+ unsigned long nanosecs = 5000000000L;
+ unsigned long tscs = nanosecs * 1000 / ihk_mc_get_ns_per_tsc();
+
+ kprintf("kernel debug sleep 5sec...\n");
+ while (rdtsc() - ts < tscs) {
+ cpu_pause();
+ }
+ kprintf("kernel debug wake up\n");
+ }
if (flags & MAP_HUGETLB) {
pgshift = (flags >> MAP_HUGE_SHIFT) & 0x3F;
@@ -9482,6 +9494,17 @@ SYSCALL_DECLARE(util_register_desc)
return 0;
}
+SYSCALL_DECLARE(setkdebug)
+{
+ int flags = ihk_mc_syscall_arg0(ctx);
+ struct thread *mythread = cpu_local_var(current);
+ struct process *proc = mythread->proc;
+
+ kprintf("set kernel debug flag pid=%d val=%d\n", proc->pid, flags);
+ proc->debug_flags = flags;
+ return 0;
+}
+
void
reset_cputime()
{

View File

@@ -1,22 +0,0 @@
#!/bin/sh
USELTP=1
USEOSTEST=0
. ../../common.sh
################################################################################
$MCEXEC ./C452T01
for i in mmap01:02 mmap02:03 mmap03:04 mmap04:05 mmap12:06 brk01:07 fork01:08 \
fork02:09 fork03:10; do
tp=`echo $i|sed 's/:.*//'`
id=`echo $i|sed 's/.*://'`
$MCEXEC $LTPBIN/$tp 2>&1 | tee $tp.txt
ok=`grep TPASS $tp.txt | wc -l`
ng=`grep TFAIL $tp.txt | wc -l`
if [ $ng = 0 ]; then
echo "*** C452T$id: $tp PASS ($ok)"
else
echo "*** C452T$id: $tp FAIL (ok=$ok ng=%ng)"
fi
done

View File

@@ -1,40 +0,0 @@
#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/syscall.h>
#include <sys/mman.h>
#include <sys/wait.h>
#include <signal.h>
#include <errno.h>
int
main(int argc, char **argv)
{
pid_t pid;
int st;
fprintf(stderr, "*** C452T01 test start\n");
fflush(stderr);
pid = fork();
if (pid == 0) {
char file[32];
sleep(1);
sprintf(file, "/proc/%d/maps", getppid());
execlp("cat", "cat", file, NULL);
exit(1);
}
fflush(stdout);
if (syscall(740, 1) == -1) {
fprintf(stderr, "*** C452T01 FAIL no patched kernel\n");
exit(1);
}
mmap(NULL, 4096, PROT_READ|PROT_WRITE,
MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
syscall(740, 0);
while (waitpid(pid, &st, 0) == -1 && errno == EINTR)
;
fprintf(stderr, "*** C452T01 PASS\n");
exit(0);
}

View File

@@ -1,13 +0,0 @@
CC = gcc
TARGET = C452T01
all:: $(TARGET)
C452T01: C452T01.c
$(CC) -g -Wall -o $@ $^
test:: all
sh ./C452.sh
clean::
rm -f $(TARGET) *.o

View File

@@ -1,36 +0,0 @@
【Issue#452 動作確認】
□ テスト内容
1. Issue 指摘事項の再現確認
以下のパッチ (C452.patch) を McKernel に適用し、意図的に memory_range_lock の
競合を起こすことでテストを容易にする。
- mmap処理のmemory_range_lock取得中にsleep可能とする
- mmap処理のmemory_range_lock取得中にsleepするかどうかを制御する
システムコールを追加する
このパッチ適用カーネルを使ってテストする。
C452T01 memory_range_lock 取得中に /proc/*/maps を参照し、PASS すること
2. LTP を用いて既存処理に影響しないことを確認
メモリ関連処理を変更したため、関連するシステムコールのテストを選定した。
C452T02 mmap01: mmap の基本機能の確認
C452T03 mmap02: mmap の基本機能の確認
C452T04 mmap03: mmap の基本機能の確認
C452T05 mmap04: mmap の基本機能の確認
C452T06 mmap12: mmap の基本機能の確認
C452T07 brk01: brk の基本機能の確認
C452T08 fork01: fork の基本機能の確認 (fork時メモリがコピーされる)
C452T09 fork02: fork の基本機能の確認
C452T10 fork03: fork の基本機能の確認
□ 実行手順
$ make test
McKernelのインストール先や LTP の配置場所は、$HOME/.mck_test_config を
参照する。.mck_test_config は、McKernel をビルドした際に生成される
mck_test_config.sample ファイルを $HOME にコピーし、適宜編集すること。
尚、テスト実行には C452.patch を適用した McKernel を使用すること。
□ 実行結果
C452_x86_64.txt(x86_64実行結果)、C452_arm64.txt(arm64実行結果) 参照。
全ての項目が PASS していることを確認。