move rusage into kernel ELF image (avoid dynamic alloc before NUMA init)

Change-Id: I7fe86244c8707694b379e567b31de65ee2c56887
This commit is contained in:
Balazs Gerofi
2018-12-24 01:32:55 +09:00
committed by Dominique Martinet
parent 4d215de641
commit 60dcd0e798
8 changed files with 56 additions and 58 deletions

View File

@@ -22,11 +22,11 @@ static inline void
rusage_total_memory_add(unsigned long size)
{
#ifdef RUSAGE_DEBUG
kprintf("%s: total_memory=%ld,size=%ld\n", __FUNCTION__, rusage->total_memory, size);
kprintf("%s: total_memory=%ld,size=%ld\n", __FUNCTION__, rusage.total_memory, size);
#endif
rusage->total_memory += size;
rusage.total_memory += size;
#ifdef RUSAGE_DEBUG
kprintf("%s: total_memory=%ld\n", __FUNCTION__, rusage->total_memory);
kprintf("%s: total_memory=%ld\n", __FUNCTION__, rusage.total_memory);
#endif
}
@@ -38,10 +38,10 @@ rusage_rss_add(unsigned long size)
unsigned long retval;
struct process_vm *vm;
newval = __sync_add_and_fetch(&rusage->rss_current, size);
oldval = rusage->memory_max_usage;
newval = __sync_add_and_fetch(&rusage.rss_current, size);
oldval = rusage.memory_max_usage;
while (newval > oldval) {
retval = __sync_val_compare_and_swap(&rusage->memory_max_usage,
retval = __sync_val_compare_and_swap(&rusage.memory_max_usage,
oldval, newval);
if (retval == oldval) {
break;
@@ -66,7 +66,7 @@ rusage_rss_sub(unsigned long size)
{
struct process_vm *vm = cpu_local_var(current)->vm;
__sync_sub_and_fetch(&rusage->rss_current, size);
__sync_sub_and_fetch(&rusage.rss_current, size);
/* process rss */
vm->currss -= size;
@@ -74,22 +74,22 @@ rusage_rss_sub(unsigned long size)
static inline void memory_stat_rss_add(unsigned long size, int pgsize)
{
ihk_atomic_add_long(size, &rusage->memory_stat_rss[rusage_pgsize_to_pgtype(pgsize)]);
ihk_atomic_add_long(size, &rusage.memory_stat_rss[rusage_pgsize_to_pgtype(pgsize)]);
}
static inline void memory_stat_rss_sub(unsigned long size, int pgsize)
{
ihk_atomic_add_long(-size, &rusage->memory_stat_rss[rusage_pgsize_to_pgtype(pgsize)]);
ihk_atomic_add_long(-size, &rusage.memory_stat_rss[rusage_pgsize_to_pgtype(pgsize)]);
}
static inline void rusage_memory_stat_mapped_file_add(unsigned long size, int pgsize)
{
ihk_atomic_add_long(size, &rusage->memory_stat_mapped_file[rusage_pgsize_to_pgtype(pgsize)]);
ihk_atomic_add_long(size, &rusage.memory_stat_mapped_file[rusage_pgsize_to_pgtype(pgsize)]);
}
static inline void rusage_memory_stat_mapped_file_sub(unsigned long size, int pgsize)
{
ihk_atomic_add_long(-size, &rusage->memory_stat_mapped_file[rusage_pgsize_to_pgtype(pgsize)]);
ihk_atomic_add_long(-size, &rusage.memory_stat_mapped_file[rusage_pgsize_to_pgtype(pgsize)]);
}
static inline int rusage_memory_stat_add(struct vm_range *range, uintptr_t phys, unsigned long size, int pgsize)
@@ -213,11 +213,11 @@ rusage_kmem_add(unsigned long size)
unsigned long oldval;
unsigned long retval;
newval = __sync_add_and_fetch(&rusage->memory_kmem_usage, size);
oldval = rusage->memory_kmem_max_usage;
newval = __sync_add_and_fetch(&rusage.memory_kmem_usage, size);
oldval = rusage.memory_kmem_max_usage;
while (newval > oldval) {
retval = __sync_val_compare_and_swap(
&rusage->memory_kmem_max_usage,
&rusage.memory_kmem_max_usage,
oldval, newval);
if (retval == oldval) {
break;
@@ -229,13 +229,13 @@ rusage_kmem_add(unsigned long size)
static inline void
rusage_kmem_sub(unsigned long size)
{
__sync_sub_and_fetch(&rusage->memory_kmem_usage, size);
__sync_sub_and_fetch(&rusage.memory_kmem_usage, size);
}
static inline void
rusage_numa_add(int numa_id, unsigned long size)
{
__sync_add_and_fetch(rusage->memory_numa_stat + numa_id, size);
__sync_add_and_fetch(rusage.memory_numa_stat + numa_id, size);
rusage_rss_add(size);
}
@@ -243,7 +243,7 @@ static inline void
rusage_numa_sub(int numa_id, unsigned long size)
{
rusage_rss_sub(size);
__sync_sub_and_fetch(rusage->memory_numa_stat + numa_id, size);
__sync_sub_and_fetch(rusage.memory_numa_stat + numa_id, size);
}
static inline int
@@ -251,8 +251,8 @@ rusage_check_oom(int numa_id, unsigned long pages, int is_user)
{
unsigned long size = pages * PAGE_SIZE;
if (rusage->total_memory_usage + size > rusage->total_memory - RUSAGE_OOM_MARGIN) {
kprintf("%s: memory used:%ld available:%ld\n", __FUNCTION__, rusage->total_memory_usage, rusage->total_memory);
if (rusage.total_memory_usage + size > rusage.total_memory - RUSAGE_OOM_MARGIN) {
kprintf("%s: memory used:%ld available:%ld\n", __FUNCTION__, rusage.total_memory_usage, rusage.total_memory);
eventfd(IHK_OS_EVENTFD_TYPE_OOM);
if (is_user) {
return -ENOMEM;
@@ -271,7 +271,7 @@ rusage_page_add(int numa_id, unsigned long pages, int is_user)
unsigned long retval;
#ifdef RUSAGE_DEBUG
if (numa_id < 0 || numa_id >= rusage->num_numa_nodes) {
if (numa_id < 0 || numa_id >= rusage.num_numa_nodes) {
kprintf("%s: Error: invalid numa_id=%d\n", __FUNCTION__, numa_id);
return;
}
@@ -281,16 +281,16 @@ rusage_page_add(int numa_id, unsigned long pages, int is_user)
else
rusage_kmem_add(size);
newval = __sync_add_and_fetch(&rusage->total_memory_usage, size);
oldval = rusage->total_memory_max_usage;
newval = __sync_add_and_fetch(&rusage.total_memory_usage, size);
oldval = rusage.total_memory_max_usage;
while (newval > oldval) {
retval = __sync_val_compare_and_swap(&rusage->total_memory_max_usage,
retval = __sync_val_compare_and_swap(&rusage.total_memory_max_usage,
oldval, newval);
if (retval == oldval) {
#ifdef RUSAGE_DEBUG
if (rusage->total_memory_max_usage > rusage->total_memory_max_usage_old + (1 * (1ULL << 30))) {
kprintf("%s: max(%ld) > old + 1GB,numa_id=%d\n", __FUNCTION__, rusage->total_memory_max_usage, numa_id);
rusage->total_memory_max_usage_old = rusage->total_memory_max_usage;
if (rusage.total_memory_max_usage > rusage.total_memory_max_usage_old + (1 * (1ULL << 30))) {
kprintf("%s: max(%ld) > old + 1GB,numa_id=%d\n", __FUNCTION__, rusage.total_memory_max_usage, numa_id);
rusage.total_memory_max_usage_old = rusage.total_memory_max_usage;
}
#endif
break;
@@ -304,15 +304,15 @@ rusage_page_sub(int numa_id, unsigned long pages, int is_user)
{
unsigned long size = pages * PAGE_SIZE;
#ifdef RUSAGE_DEBUG
if (numa_id < 0 || numa_id >= rusage->num_numa_nodes) {
if (numa_id < 0 || numa_id >= rusage.num_numa_nodes) {
kprintf("%s: Error: invalid numa_id=%d\n", __FUNCTION__, numa_id);
return;
}
if (rusage->total_memory_usage < size) {
kprintf("%s: Error, total_memory_usage=%ld,size=%ld\n", __FUNCTION__, rusage->total_memory_max_usage, size);
if (rusage.total_memory_usage < size) {
kprintf("%s: Error, total_memory_usage=%ld,size=%ld\n", __FUNCTION__, rusage.total_memory_max_usage, size);
}
#endif
__sync_sub_and_fetch(&rusage->total_memory_usage, size);
__sync_sub_and_fetch(&rusage.total_memory_usage, size);
if (is_user)
rusage_numa_sub(numa_id, size);
@@ -327,10 +327,10 @@ rusage_num_threads_inc()
unsigned long oldval;
unsigned long retval;
newval = __sync_add_and_fetch(&rusage->num_threads, 1);
oldval = rusage->max_num_threads;
newval = __sync_add_and_fetch(&rusage.num_threads, 1);
oldval = rusage.max_num_threads;
while (newval > oldval) {
retval = __sync_val_compare_and_swap(&rusage->
retval = __sync_val_compare_and_swap(&rusage.
max_num_threads,
oldval, newval);
if (retval == oldval) {
@@ -343,7 +343,7 @@ rusage_num_threads_inc()
static inline void
rusage_num_threads_dec()
{
__sync_sub_and_fetch(&rusage->num_threads, 1);
__sync_sub_and_fetch(&rusage.num_threads, 1);
}
#else
static inline void
@@ -428,6 +428,6 @@ rusage_num_threads_dec()
}
#endif // ENABLE_RUSAGE
extern struct rusage_global *rusage;
extern struct rusage_global rusage;
#endif /* !defined(RUSAGE_PRIVATE_H_INCLUDED) */