Fix rusage

This commit is contained in:
Masamichi Takagi
2017-06-02 14:12:55 +09:00
parent 12e7ed644f
commit 6bdafbd33b
7 changed files with 119 additions and 77 deletions

View File

@@ -1,17 +1,12 @@
ENABLE_RUSAGE=@ENABLE_RUSAGE@
VPATH=@abs_srcdir@
SRC=$(VPATH)
IHKDIR=$(IHKBASE)/$(TARGETDIR)
OBJS = init.o mem.o debug.o mikc.o listeners.o ap.o syscall.o cls.o host.o
OBJS += process.o copy.o waitq.o futex.o timer.o plist.o fileobj.o shmobj.o
OBJS += zeroobj.o procfs.o devobj.o sysfs.o xpmem.o profile.o rusage.o freeze.o
OBJS += zeroobj.o procfs.o devobj.o sysfs.o xpmem.o rusage.o profile.c freeze.o
DEPSRCS=$(wildcard $(SRC)/*.c)
DOPT=
ifeq ($(ENABLE_RUSAGE),yes)
DOPT=-DENABLE_RUSAGE
endif
CFLAGS += -I$(SRC)/include -D__KERNEL__ $(DOPT) -g -fno-omit-frame-pointer -fno-inline -fno-inline-small-functions
CFLAGS += -I$(SRC)/include -I@abs_builddir@/include -D__KERNEL__ -g -fno-omit-frame-pointer -fno-inline -fno-inline-small-functions
LDFLAGS += -e arch_start
IHKOBJ = ihk/ihk.o

View File

@@ -1,6 +1,8 @@
#ifndef __RUSAGE_H
#define __RUSAGE_H
#include "config.h"
#define RUSAGE_DEFAULT_SIZE 10
enum RUSAGE_MEMBER {
@@ -64,7 +66,6 @@ enum sys_delegate_state_enum {
};
enum sys_delegate_state_enum sys_delegate_state;
unsigned long rusage_rss[sizeof(cpu_set_t)/8];
unsigned long rusage_rss_max;
long rusage_rss_current;
unsigned long rusage_kmem_usage;
@@ -76,4 +77,7 @@ unsigned long rusage_max_memory;
#define RUSAGE_MEM_LIMIT 2000000
void rusage_init();
void rusage_inc_num_threads(int count);
#endif

View File

@@ -334,23 +334,7 @@ struct thread *create_thread(unsigned long user_pc,
ihk_mc_spinlock_init(&thread->spin_sleep_lock);
thread->spin_sleep = 0;
#ifdef ENABLE_RUSAGE
{
int processor_id;
unsigned long curr;
processor_id = ihk_mc_get_processor_id();
rusage_rss[processor_id] += KERNEL_STACK_NR_PAGES * PAGE_SIZE;
curr = ihk_atomic_add_long_return ( KERNEL_STACK_NR_PAGES * PAGE_SIZE, &rusage_rss_current);
if (rusage_rss_max < curr) {
atomic_cmpxchg8(&rusage_rss_max, rusage_rss_max, curr);
}
if (rusage_max_memory - curr < RUSAGE_MEM_LIMIT) {
event_signal();
}
ihk_atomic_add_ulong ( 1, &rusage_num_threads);
if (rusage_max_num_threads < rusage_num_threads) {
atomic_cmpxchg8(&rusage_max_num_threads, rusage_max_num_threads, rusage_num_threads);
}
}
rusage_inc_num_threads(1);
#endif
return thread;
@@ -503,25 +487,7 @@ clone_thread(struct thread *org, unsigned long pc, unsigned long sp,
thread->spin_sleep = 0;
#ifdef ENABLE_RUSAGE
{
int processor_id;
long curr;
processor_id = ihk_mc_get_processor_id();
rusage_rss[processor_id] += KERNEL_STACK_NR_PAGES * PAGE_SIZE;
curr = ihk_atomic_add_long_return (KERNEL_STACK_NR_PAGES * PAGE_SIZE, &rusage_rss_current);
if (rusage_rss_max < curr) {
atomic_cmpxchg8(&rusage_rss_max, rusage_rss_max, curr);
}
if (rusage_max_memory - curr < RUSAGE_MEM_LIMIT) {
event_signal();
}
ihk_atomic_add_ulong ( 1, &rusage_num_threads);
if (rusage_max_num_threads < rusage_num_threads) {
atomic_cmpxchg8(&rusage_max_num_threads, rusage_max_num_threads, rusage_num_threads);
}
}
rusage_inc_num_threads(1);
#endif
#ifdef PROFILE_ENABLE
@@ -2040,11 +2006,7 @@ int init_process_stack(struct thread *thread, struct program_load_desc *pn,
#ifdef ENABLE_RUSAGE
{
int processor_id;
long curr;
processor_id = ihk_mc_get_processor_id();
rusage_rss[processor_id] += (minsz >> PAGE_SHIFT) * PAGE_SIZE;
curr = ihk_atomic_add_long_return ((minsz >> PAGE_SHIFT) * PAGE_SIZE, &rusage_rss_current);
if (rusage_rss_max < curr) {
atomic_cmpxchg8(&rusage_rss_max, rusage_rss_max, curr);
@@ -2054,7 +2016,6 @@ int init_process_stack(struct thread *thread, struct program_load_desc *pn,
}
}
#endif
return 0;
}
@@ -2102,10 +2063,7 @@ unsigned long extend_process_region(struct process_vm *vm,
#ifdef ENABLE_RUSAGE
{
int processor_id;
long curr;
processor_id = ihk_mc_get_processor_id();
rusage_rss[processor_id] += ((new_end_allocated - end_allocated) >> PAGE_SHIFT) * PAGE_SIZE;
curr = ihk_atomic_add_long_return (((new_end_allocated - end_allocated) >> PAGE_SHIFT) * PAGE_SIZE, &rusage_rss_current);
if (rusage_rss_max < curr) {
atomic_cmpxchg8(&rusage_rss_max, rusage_rss_max, curr);
@@ -2419,10 +2377,6 @@ void destroy_thread(struct thread *thread)
#ifdef ENABLE_RUSAGE
{
int processor_id;
processor_id = ihk_mc_get_processor_id();
rusage_rss[processor_id] -= KERNEL_STACK_NR_PAGES * PAGE_SIZE;
ihk_atomic_add_long_return(KERNEL_STACK_NR_PAGES * PAGE_SIZE * (-1) , &rusage_rss_current);
ihk_atomic_add_ulong ( -1, &rusage_num_threads);
}
#endif

View File

@@ -34,14 +34,40 @@ extern struct ihk_os_monitor *monitor;
#ifdef ENABLE_RUSAGE
/* initialize rusage */
void rusage_init() {
int i;
rusage_num_threads = 0;
rusage_max_num_threads = 0;
os_status = IHK_STATUS_INACTIVE;
rusage_hugetlb_usage = 0;
rusage_hugetlb_max_usage = 0;
for (i = 0; i < 1024; i++) {
rusage_numa_stat[i] = 0;
}
rusage_rss_current = 0;
rusage_rss_max = 0;
}
void rusage_inc_num_threads(int count) {
volatile unsigned long max_obs1, max_obs2;
ihk_atomic_add_ulong(count, &rusage_num_threads);
max_obs1 = rusage_max_num_threads;
if (max_obs1 < rusage_num_threads) {
retry:
max_obs2 = atomic_cmpxchg8(&rusage_max_num_threads, max_obs1, rusage_num_threads);
if(max_obs2 != max_obs1 &&
max_obs2 < rusage_num_threads) {
max_obs1 = max_obs2;
goto retry;
}
}
}
/* count total rss */
unsigned long count_rss () {
int i;
unsigned long val = 0;
for(i = 0; i < sizeof(cpu_set_t)/8; i++){
val += rusage_rss[i];
}
return val;
return rusage_rss_current;
}
/* count total cache */