use ihk_mc_alloc/free_pages() and eliminate direct calls to low level routines
This commit is contained in:
@@ -32,7 +32,7 @@ void cpu_local_var_init(void)
|
||||
z = sizeof(struct cpu_local_var) * num_processors;
|
||||
z = (z + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
|
||||
clv = allocate_pages(z, IHK_MC_AP_CRITICAL);
|
||||
clv = ihk_mc_alloc_pages(z, IHK_MC_AP_CRITICAL);
|
||||
memset(clv, 0, z * PAGE_SIZE);
|
||||
cpu_local_var_initialized = 1;
|
||||
}
|
||||
|
||||
@@ -281,7 +281,7 @@ static void fileobj_release(struct memobj *memobj)
|
||||
}
|
||||
|
||||
page->mode = PM_NONE;
|
||||
free_pages(phys_to_virt(page_to_phys(page)), 1);
|
||||
ihk_mc_free_pages(phys_to_virt(page_to_phys(page)), 1);
|
||||
}
|
||||
obj_list_remove(free_obj);
|
||||
ihk_mc_spinlock_unlock_noirq(&fileobj_list_lock);
|
||||
|
||||
@@ -39,8 +39,6 @@ struct page *phys_to_page(uintptr_t phys);
|
||||
uintptr_t page_to_phys(struct page *page);
|
||||
int page_unmap(struct page *page);
|
||||
|
||||
void *allocate_pages(int npages, enum ihk_mc_ap_flag flag);
|
||||
void free_pages(void *va, int npages);
|
||||
void begin_free_pages_pending(void);
|
||||
void finish_free_pages_pending(void);
|
||||
|
||||
|
||||
@@ -75,7 +75,8 @@ static void reserve_pages(unsigned long start, unsigned long end, int type)
|
||||
ihk_pagealloc_reserve(pa_allocator, start, end);
|
||||
}
|
||||
|
||||
void *allocate_aligned_pages(int npages, int p2align, enum ihk_mc_ap_flag flag)
|
||||
static void *allocate_aligned_pages(int npages, int p2align,
|
||||
enum ihk_mc_ap_flag flag)
|
||||
{
|
||||
unsigned long pa = ihk_pagealloc_alloc(pa_allocator, npages, p2align);
|
||||
/* all_pagealloc_alloc returns zero when error occured,
|
||||
@@ -87,12 +88,12 @@ void *allocate_aligned_pages(int npages, int p2align, enum ihk_mc_ap_flag flag)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void *allocate_pages(int npages, enum ihk_mc_ap_flag flag)
|
||||
static void *allocate_pages(int npages, enum ihk_mc_ap_flag flag)
|
||||
{
|
||||
return allocate_aligned_pages(npages, PAGE_P2ALIGN, flag);
|
||||
}
|
||||
|
||||
void free_pages(void *va, int npages)
|
||||
static void free_pages(void *va, int npages)
|
||||
{
|
||||
struct list_head *pendings = &cpu_local_var(pending_free_pages);
|
||||
struct page *page;
|
||||
@@ -511,7 +512,7 @@ static void page_init(void)
|
||||
allocsize = sizeof(struct page) * npages;
|
||||
allocpages = (allocsize + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
|
||||
pa_pages = allocate_pages(allocpages, IHK_MC_AP_CRITICAL);
|
||||
pa_pages = ihk_mc_alloc_pages(allocpages, IHK_MC_AP_CRITICAL);
|
||||
memset(pa_pages, 0, allocsize);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -265,7 +265,7 @@ void shmobj_destroy(struct shmobj *obj)
|
||||
}
|
||||
|
||||
page->mode = PM_NONE;
|
||||
free_pages(phys_to_virt(page_to_phys(page)), npages);
|
||||
ihk_mc_free_pages(phys_to_virt(page_to_phys(page)), npages);
|
||||
}
|
||||
if (obj->index < 0) {
|
||||
kfree(obj);
|
||||
|
||||
Reference in New Issue
Block a user