add shared mapped file (in progress)

implemented:
- Pages can be shared between maps.
- A change made to a map is written to the file, at munmap().

not yet implemented:
- VM operation during page IO execution.
  Because page IO is executed with VM's lock.
- Page IO, which does not change a file size with any case.
  When munmap() races with truncate(), the file size may be changed
  illegally.
This commit is contained in:
NAKAMURA Gou
2013-11-06 17:17:56 +09:00
parent d35140ab0b
commit bbbc6e1570
8 changed files with 153 additions and 24 deletions

View File

@@ -33,12 +33,14 @@ static memobj_release_func_t fileobj_release;
static memobj_ref_func_t fileobj_ref;
static memobj_get_page_func_t fileobj_get_page;
static memobj_copy_page_func_t fileobj_copy_page;
static memobj_flush_page_func_t fileobj_flush_page;
static struct memobj_ops fileobj_ops = {
.release = &fileobj_release,
.ref = &fileobj_ref,
.get_page = &fileobj_get_page,
.copy_page = &fileobj_copy_page,
.flush_page = &fileobj_flush_page,
};
static struct fileobj *to_fileobj(struct memobj *memobj)
@@ -528,3 +530,31 @@ out:
memobj, orgpa, p2align, newpa);
return newpa;
}
static int fileobj_flush_page(struct memobj *memobj, uintptr_t phys,
size_t pgsize)
{
struct fileobj *obj = to_fileobj(memobj);
struct page *page;
ihk_mc_user_context_t ctx;
ssize_t ss;
page = phys_to_page(phys);
memobj_unlock(&obj->memobj);
ihk_mc_syscall_arg0(&ctx) = PAGER_REQ_WRITE;
ihk_mc_syscall_arg1(&ctx) = obj->handle;
ihk_mc_syscall_arg2(&ctx) = page->offset;
ihk_mc_syscall_arg3(&ctx) = pgsize;
ihk_mc_syscall_arg4(&ctx) = phys;
ss = syscall_generic_forwarding(__NR_mmap, &ctx);
if (ss != pgsize) {
dkprintf("fileobj_flush_page(%p,%lx,%lx): %ld (%lx)\n",
memobj, phys, pgsize, ss, ss);
/* through */
}
memobj_lock(&obj->memobj);
return 0;
}

View File

@@ -15,12 +15,14 @@ typedef void memobj_release_func_t(struct memobj *obj);
typedef void memobj_ref_func_t(struct memobj *obj);
typedef int memobj_get_page_func_t(struct memobj *obj, off_t off, int p2align, uintptr_t *physp);
typedef uintptr_t memobj_copy_page_func_t(struct memobj *obj, uintptr_t orgphys, int p2align);
typedef int memobj_flush_page_func_t(struct memobj *obj, uintptr_t phys, size_t pgsize);
struct memobj_ops {
memobj_release_func_t * release;
memobj_ref_func_t * ref;
memobj_get_page_func_t * get_page;
memobj_copy_page_func_t * copy_page;
memobj_flush_page_func_t * flush_page;
};
static inline void memobj_release(struct memobj *obj)
@@ -45,6 +47,11 @@ static inline uintptr_t memobj_copy_page(struct memobj *obj,
return (*obj->ops->copy_page)(obj, orgphys, p2align);
}
static inline int memobj_flush_page(struct memobj *obj, uintptr_t phys, size_t pgsize)
{
return (*obj->ops->flush_page)(obj, phys, pgsize);
}
static inline void memobj_lock(struct memobj *obj)
{
ihk_mc_spinlock_lock_noirq(&obj->lock);

View File

@@ -7,6 +7,7 @@ enum pager_op {
PAGER_REQ_CREATE = 0x0001,
PAGER_REQ_RELEASE = 0x0002,
PAGER_REQ_READ = 0x0003,
PAGER_REQ_WRITE = 0x0004,
};
/*

View File

@@ -324,15 +324,15 @@ int free_process_memory_range(struct process_vm *vm, struct vm_range *range)
memobj_lock(range->memobj);
}
error = ihk_mc_pt_free_range(vm->page_table,
(void *)start, (void *)end);
(void *)start, (void *)end, range->memobj);
if (range->memobj) {
memobj_unlock(range->memobj);
}
ihk_mc_spinlock_unlock_noirq(&vm->page_table_lock);
if (error && (error != -ENOENT)) {
ekprintf("free_process_memory_range(%p,%lx-%lx):"
"ihk_mc_pt_free_range(%lx-%lx) failed. %d\n",
vm, start0, end0, start, end, error);
"ihk_mc_pt_free_range(%lx-%lx,%p) failed. %d\n",
vm, start0, end0, start, end, range->memobj, error);
/* through */
}
}

View File

@@ -464,14 +464,6 @@ SYSCALL_DECLARE(mmap)
goto out2;
}
if ((flags & MAP_SHARED) && !(flags & MAP_ANONYMOUS)) {
ekprintf("sys_mmap(%lx,%lx,%x,%x,%x,%lx):NYI:shared mapped file%lx\n",
addr0, len0, prot, flags, fd, off,
(flags & ~(supported_flags | ignored_flags)));
error = -EINVAL;
goto out2;
}
ihk_mc_spinlock_lock_noirq(&proc->vm->memory_range_lock);
if (flags & MAP_FIXED) {