xpmem: support large page
1. try to use as large page as possible on attach 2. pre-map resident remote pages on attach Change-Id: I5580682a4199e94085a9bad9ce3958a0f14cdcea
This commit is contained in:
committed by
Masamichi Takagi
parent
3aaa5350f0
commit
d2db639853
@@ -1465,7 +1465,7 @@ int add_process_memory_range(struct process_vm *vm,
|
||||
unsigned long start, unsigned long end,
|
||||
unsigned long phys, unsigned long flag,
|
||||
struct memobj *memobj, off_t offset,
|
||||
int pgshift, struct vm_range **rp)
|
||||
int pgshift, void *private_data, struct vm_range **rp)
|
||||
{
|
||||
dkprintf("%s: start=%lx,end=%lx,phys=%lx,flag=%lx\n", __FUNCTION__, start, end, phys, flag);
|
||||
struct vm_range *range;
|
||||
@@ -1493,7 +1493,7 @@ int add_process_memory_range(struct process_vm *vm,
|
||||
range->memobj = memobj;
|
||||
range->objoff = offset;
|
||||
range->pgshift = pgshift;
|
||||
range->private_data = NULL;
|
||||
range->private_data = private_data;
|
||||
range->straight_start = 0;
|
||||
#ifdef ENABLE_TOFU
|
||||
INIT_LIST_HEAD(&range->tofu_stag_list);
|
||||
@@ -1517,6 +1517,10 @@ int add_process_memory_range(struct process_vm *vm,
|
||||
else if (flag & VR_IO_NOCACHE) {
|
||||
rc = update_process_page_table(vm, range, phys, PTATTR_UNCACHABLE);
|
||||
}
|
||||
else if (flag & VR_XPMEM) {
|
||||
range->memobj->flags |= MF_XPMEM;
|
||||
// xpmem_update_process_page_table() is called in do_mmap()
|
||||
}
|
||||
else if (flag & VR_DEMAND_PAGING) {
|
||||
dkprintf("%s: range: 0x%lx - 0x%lx is demand paging\n",
|
||||
__FUNCTION__, range->start, range->end);
|
||||
@@ -1539,7 +1543,8 @@ int add_process_memory_range(struct process_vm *vm,
|
||||
}
|
||||
|
||||
/* Clear content! */
|
||||
if (phys != NOPHYS && !(flag & (VR_REMOTE | VR_DEMAND_PAGING))
|
||||
if (phys != NOPHYS
|
||||
&& !(flag & (VR_REMOTE | VR_DEMAND_PAGING | VR_XPMEM))
|
||||
&& ((flag & VR_PROT_MASK) != VR_PROT_NONE)) {
|
||||
|
||||
if (!zero_at_free) {
|
||||
@@ -2074,7 +2079,9 @@ out:
|
||||
return error;
|
||||
}
|
||||
|
||||
static int page_fault_process_memory_range(struct process_vm *vm, struct vm_range *range, uintptr_t fault_addr, uint64_t reason)
|
||||
int page_fault_process_memory_range(struct process_vm *vm,
|
||||
struct vm_range *range,
|
||||
uintptr_t fault_addr, uint64_t reason)
|
||||
{
|
||||
int error;
|
||||
pte_t *ptep;
|
||||
@@ -2621,7 +2628,8 @@ int init_process_stack(struct thread *thread, struct program_load_desc *pn,
|
||||
vrflag |= VR_MAXPROT_READ | VR_MAXPROT_WRITE | VR_MAXPROT_EXEC;
|
||||
#define NOPHYS ((uintptr_t)-1)
|
||||
if ((rc = add_process_memory_range(thread->vm, start, end, NOPHYS,
|
||||
vrflag, NULL, 0, USER_STACK_PAGE_SHIFT, &range)) != 0) {
|
||||
vrflag, NULL, 0, USER_STACK_PAGE_SHIFT,
|
||||
NULL, &range)) != 0) {
|
||||
ihk_mc_free_pages_user(stack, minsz >> PAGE_SHIFT);
|
||||
kprintf("%s: error addding process memory range: %d\n", rc);
|
||||
return rc;
|
||||
@@ -2795,7 +2803,7 @@ unsigned long extend_process_region(struct process_vm *vm,
|
||||
|
||||
if ((rc = add_process_memory_range(vm, end_allocated, new_end_allocated,
|
||||
(p == 0 ? 0 : virt_to_phys(p)), flag, NULL, 0,
|
||||
align_shift, NULL)) != 0) {
|
||||
align_shift, NULL, NULL)) != 0) {
|
||||
ihk_mc_free_pages_user(p, npages);
|
||||
return end_allocated;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user