profile: rewrite syscall tracker for generic profiling code

This commit is contained in:
Balazs Gerofi
2017-02-25 10:58:48 +09:00
parent 989af7e045
commit e2f424846c
7 changed files with 401 additions and 288 deletions

View File

@@ -4,7 +4,7 @@ SRC=$(VPATH)
IHKDIR=$(IHKBASE)/$(TARGETDIR)
OBJS = init.o mem.o debug.o mikc.o listeners.o ap.o syscall.o cls.o host.o
OBJS += process.o copy.o waitq.o futex.o timer.o plist.o fileobj.o shmobj.o
OBJS += zeroobj.o procfs.o devobj.o sysfs.o xpmem.o rusage.o freeze.o
OBJS += zeroobj.o procfs.o devobj.o sysfs.o xpmem.o profile.o rusage.o freeze.o
DEPSRCS=$(wildcard $(SRC)/*.c)
DOPT=
ifeq ($(ENABLE_RUSAGE),yes)

View File

@@ -23,6 +23,7 @@
#include <affinity.h>
#include <syscall.h>
#include <bitops.h>
#include <profile.h>
#define VR_NONE 0x0
#define VR_STACK 0x1
@@ -243,27 +244,6 @@ struct process_vm;
struct vm_regions;
struct vm_range;
//#define TRACK_SYSCALLS
#ifdef TRACK_SYSCALLS
#define TRACK_SYSCALLS_MAX 300
#define __NR_track_syscalls 701
#define TRACK_SYSCALLS_CLEAR 0x01
#define TRACK_SYSCALLS_ON 0x02
#define TRACK_SYSCALLS_OFF 0x04
#define TRACK_SYSCALLS_PRINT 0x08
#define TRACK_SYSCALLS_PRINT_PROC 0x10
void track_syscalls_print_thread_stats(struct thread *thread);
void track_syscalls_print_proc_stats(struct process *proc);
void track_syscalls_accumulate_counters(struct thread *thread,
struct process *proc);
void track_syscalls_alloc_counters(struct thread *thread);
void track_syscalls_dealloc_thread_counters(struct thread *thread);
void track_syscalls_dealloc_proc_counters(struct process *proc);
#endif // TRACK_SYSCALLS
#define HASH_SIZE 73
@@ -565,13 +545,10 @@ struct process {
#define PP_COUNT 2
#define PP_STOP 3
struct mc_perf_event *monitoring_event;
#ifdef TRACK_SYSCALLS
mcs_lock_node_t st_lock;
uint64_t *syscall_times;
uint32_t *syscall_cnts;
uint64_t *offload_times;
uint32_t *offload_cnts;
#endif // TRACK_SYSCALLS
#ifdef PROFILE_ENABLE
mcs_lock_node_t profile_lock;
struct profile_event *profile_events;
#endif // PROFILE_ENABLE
};
void hold_thread(struct thread *ftn);
@@ -644,13 +621,10 @@ struct thread {
fp_regs_struct *fp_regs;
int in_syscall_offload;
#ifdef TRACK_SYSCALLS
int track_syscalls;
uint64_t *syscall_times;
uint32_t *syscall_cnts;
uint64_t *offload_times;
uint32_t *offload_cnts;
#endif // TRACK_SYSCALLS
#ifdef PROFILE_ENABLE
int profile;
struct profile_event *profile_events;
#endif // PROFILE_ENABLE
// signal
struct sig_common *sigcommon;

54
kernel/include/profile.h Normal file
View File

@@ -0,0 +1,54 @@
#ifndef __PROCESS_PROFILE_H_
#define __PROCESS_PROFILE_H_
/* Uncomment this to enable profiling */
#define PROFILE_ENABLE
#ifdef PROFILE_ENABLE
#define PROFILE_SYSCALL_MAX 300
#define PROFILE_OFFLOAD_MAX (PROFILE_SYSCALL_MAX << 1)
#define PROFILE_EVENT_MIN PROFILE_OFFLOAD_MAX
#define __NR_profile 701
#define PROF_PROC 0x80000000
#define PROF_CLEAR 0x01
#define PROF_ON 0x02
#define PROF_OFF 0x04
#define PROF_PRINT 0x08
struct profile_event {
uint32_t cnt;
uint64_t tsc;
};
/*
* The layout of profile events is as follows:
* [0,PROFILE_SYSCALL_MAX) - syscalls
* [PROFILE_SYSCALL_MAX,PROFILE_OFFLOAD_MAX) - syscall offloads
* [PROFILE_OFFLOAD_MAX,PROFILE_EVENT_MAX) - general events
*
* XXX: Make sure to fill in prof_event_names in profile.c
* for each added profiled event.
*/
enum profile_event_type {
PROFILE_page_fault = PROFILE_EVENT_MIN,
PROFILE_mpol_alloc_missed,
PROFILE_EVENT_MAX /* Should be the last event type */
};
struct thread;
struct process;
enum profile_event_type profile_syscall2offload(enum profile_event_type sc);
void profile_event_add(enum profile_event_type type, uint64_t tsc);
void profile_print_thread_stats(struct thread *thread);
void profile_print_proc_stats(struct process *proc);
void profile_accumulate_events(struct thread *thread, struct process *proc);
int profile_alloc_events(struct thread *thread);
void profile_dealloc_thread_events(struct thread *thread);
void profile_dealloc_proc_events(struct process *proc);
#endif // PROFILE_ENABLE
#endif // __PROCESS_PROFILE_H_

View File

@@ -38,6 +38,8 @@
#include <init.h>
#include <cas.h>
#include <rusage.h>
#include <syscall.h>
#include <profile.h>
//#define DEBUG_PRINT_MEM
@@ -906,6 +908,10 @@ static void page_fault_handler(void *fault_addr, uint64_t reason, void *regs)
{
struct thread *thread = cpu_local_var(current);
int error;
#ifdef PROFILE_ENABLE
uint64_t t_s;
t_s = rdtsc();
#endif // PROFILE_ENABLE
set_cputime(interrupt_from_user(regs)? 1: 2);
dkprintf("%s: addr: %p, reason: %lx, regs: %p\n",
@@ -968,6 +974,9 @@ out:
__FUNCTION__, fault_addr, reason, regs, error);
check_need_resched();
set_cputime(0);
#ifdef PROFILE_ENABLE
profile_event_add(PROFILE_page_fault, (rdtsc() - t_s));
#endif // PROFILE_ENABLE
return;
}

View File

@@ -107,12 +107,9 @@ init_process(struct process *proc, struct process *parent)
waitq_init(&proc->waitpid_q);
ihk_atomic_set(&proc->refcount, 2);
proc->monitoring_event = NULL;
#ifdef TRACK_SYSCALLS
mcs_lock_init(&proc->st_lock);
proc->syscall_times = NULL;
proc->syscall_cnts = NULL;
proc->offload_times = NULL;
proc->offload_cnts = NULL;
#ifdef PROFILE_ENABLE
mcs_lock_init(&proc->profile_lock);
proc->profile_events = NULL;
#endif
}
@@ -524,8 +521,8 @@ clone_thread(struct thread *org, unsigned long pc, unsigned long sp,
}
#endif
#ifdef TRACK_SYSCALLS
thread->track_syscalls = org->track_syscalls;
#ifdef PROFILE_ENABLE
thread->profile = org->profile;
#endif
return thread;
@@ -2220,10 +2217,10 @@ release_process(struct process *proc)
}
if (proc->tids) kfree(proc->tids);
#ifdef TRACK_SYSCALLS
track_syscalls_print_proc_stats(proc);
track_syscalls_dealloc_proc_counters(proc);
#endif // TRACK_SYSCALLS
#ifdef PROFILE_ENABLE
profile_print_proc_stats(proc);
profile_dealloc_proc_events(proc);
#endif // PROFILE_ENABLE
kfree(proc);
}
@@ -2436,11 +2433,11 @@ void release_thread(struct thread *thread)
vm = thread->vm;
#ifdef TRACK_SYSCALLS
track_syscalls_accumulate_counters(thread, thread->proc);
//track_syscalls_print_thread_stats(thread);
track_syscalls_dealloc_thread_counters(thread);
#endif // TRACK_SYSCALLS
#ifdef PROFILE_ENABLE
profile_accumulate_events(thread, thread->proc);
//profile_print_thread_stats(thread);
profile_dealloc_thread_events(thread);
#endif // PROFILE_ENABLE
procfs_delete_thread(thread);
destroy_thread(thread);

293
kernel/profile.c Normal file
View File

@@ -0,0 +1,293 @@
/**
* \file profile.c
* License details are found in the file LICENSE.
*
* \brief
* Profiler code for various process statistics
* \author Balazs Gerofi <bgerofi@riken.jp>
* Copyright (C) 2017 RIKEN AICS
*/
/*
* HISTORY:
*/
#include <types.h>
#include <kmsg.h>
#include <ihk/cpu.h>
#include <cpulocal.h>
#include <ihk/mm.h>
#include <ihk/debug.h>
#include <ihk/ikc.h>
#include <errno.h>
#include <cls.h>
#include <syscall.h>
#include <page.h>
#include <ihk/lock.h>
#include <ctype.h>
#include <waitq.h>
#include <rlimit.h>
#include <affinity.h>
#include <time.h>
#include <ihk/perfctr.h>
#include <mman.h>
#include <kmalloc.h>
#include <memobj.h>
#include <shm.h>
#include <prio.h>
#include <arch/cpu.h>
#include <limits.h>
#include <march.h>
#include <process.h>
extern char *syscall_name[];
#ifdef PROFILE_ENABLE
char *profile_event_names[] =
{
"page_fault",
"mpol_alloc_missed",
""
};
enum profile_event_type profile_syscall2offload(enum profile_event_type sc)
{
return (PROFILE_SYSCALL_MAX + sc);
}
void profile_event_add(enum profile_event_type type, uint64_t tsc)
{
struct profile_event *event = NULL;
if (!cpu_local_var(current)->profile)
return;
if (!cpu_local_var(current)->profile_events) {
if (profile_alloc_events(cpu_local_var(current)) < 0)
return;
}
if (type < PROFILE_EVENT_MAX) {
event = &cpu_local_var(current)->profile_events[type];
}
else {
kprintf("%s: WARNING: unknown event type %d\n",
__FUNCTION__, type);
return;
}
++event->cnt;
event->tsc += tsc;
}
void profile_print_thread_stats(struct thread *thread)
{
int i;
unsigned long flags;
flags = kprintf_lock();
for (i = 0; i < PROFILE_SYSCALL_MAX; ++i) {
if (!thread->profile_events[i].cnt &&
!thread->profile_events[i + PROFILE_SYSCALL_MAX].cnt)
continue;
__kprintf("TID: %4d (%3d,%20s): %6u %6lukC offl: %6u %6lukC\n",
thread->tid,
i,
syscall_name[i],
thread->profile_events[i].cnt,
(thread->profile_events[i].tsc /
(thread->profile_events[i].cnt ?
thread->profile_events[i].cnt : 1))
/ 1000,
thread->profile_events[i + PROFILE_SYSCALL_MAX].cnt,
(thread->profile_events[i + PROFILE_SYSCALL_MAX].tsc /
(thread->profile_events[i + PROFILE_SYSCALL_MAX].cnt ?
thread->profile_events[i + PROFILE_SYSCALL_MAX].cnt : 1))
/ 1000
);
}
for (i = PROFILE_EVENT_MIN; i < PROFILE_EVENT_MAX; ++i) {
if (!thread->profile_events[i].cnt)
continue;
__kprintf("TID: %4d (%3d,%20s): %6u %6lukC \n",
thread->tid,
i,
profile_event_names[i - PROFILE_EVENT_MIN],
thread->profile_events[i].cnt,
(thread->profile_events[i].tsc /
(thread->profile_events[i].cnt ?
thread->profile_events[i].cnt : 1))
/ 1000);
}
kprintf_unlock(flags);
}
void profile_print_proc_stats(struct process *proc)
{
int i;
unsigned long flags;
flags = kprintf_lock();
for (i = 0; i < PROFILE_SYSCALL_MAX; ++i) {
if (!proc->profile_events[i].cnt &&
!proc->profile_events[i + PROFILE_SYSCALL_MAX].cnt)
continue;
__kprintf("PID: %4d (%3d,%20s): %6u %6lukC offl: %6u %6lukC\n",
proc->pid,
i,
syscall_name[i],
proc->profile_events[i].cnt,
(proc->profile_events[i].tsc /
(proc->profile_events[i].cnt ?
proc->profile_events[i].cnt : 1))
/ 1000,
proc->profile_events[i + PROFILE_SYSCALL_MAX].cnt,
(proc->profile_events[i + PROFILE_SYSCALL_MAX].tsc /
(proc->profile_events[i + PROFILE_SYSCALL_MAX].cnt ?
proc->profile_events[i + PROFILE_SYSCALL_MAX].cnt : 1))
/ 1000
);
}
for (i = PROFILE_EVENT_MIN; i < PROFILE_EVENT_MAX; ++i) {
if (!proc->profile_events[i].cnt)
continue;
__kprintf("PID: %4d (%3d,%20s): %6u %6lukC \n",
proc->pid,
i,
profile_event_names[i - PROFILE_EVENT_MIN],
proc->profile_events[i].cnt,
(proc->profile_events[i].tsc /
(proc->profile_events[i].cnt ?
proc->profile_events[i].cnt : 1))
/ 1000);
}
kprintf_unlock(flags);
}
void profile_accumulate_events(struct thread *thread,
struct process *proc)
{
int i;
struct mcs_lock_node mcs_node;
mcs_lock_lock(&proc->profile_lock, &mcs_node);
for (i = 0; i < PROFILE_EVENT_MAX; ++i) {
proc->profile_events[i].tsc += thread->profile_events[i].tsc;
proc->profile_events[i].cnt += thread->profile_events[i].cnt;
}
mcs_lock_unlock(&proc->profile_lock, &mcs_node);
}
int profile_alloc_events(struct thread *thread)
{
struct process *proc = thread->proc;
struct mcs_lock_node mcs_node;
thread->profile_events = kmalloc(sizeof(*thread->profile_events) *
PROFILE_EVENT_MAX, IHK_MC_AP_NOWAIT);
if (!thread->profile_events) {
kprintf("%s: ERROR: allocating thread private profile counters\n",
__FUNCTION__);
return -ENOMEM;
}
memset(thread->profile_events, 0,
sizeof(*thread->profile_events) * PROFILE_EVENT_MAX);
mcs_lock_lock(&proc->profile_lock, &mcs_node);
if (!proc->profile_events) {
proc->profile_events = kmalloc(sizeof(*proc->profile_events) *
PROFILE_EVENT_MAX, IHK_MC_AP_NOWAIT);
if (!proc->profile_events) {
kprintf("%s: ERROR: allocating proc private profile counters\n",
__FUNCTION__);
return -ENOMEM;
}
memset(proc->profile_events, 0,
sizeof(*thread->profile_events) * PROFILE_EVENT_MAX);
}
mcs_lock_unlock(&proc->profile_lock, &mcs_node);
return 0;
}
void profile_dealloc_thread_events(struct thread *thread)
{
kfree(thread->profile_events);
}
void profile_dealloc_proc_events(struct process *proc)
{
kfree(proc->profile_events);
}
void static profile_clear_thread(struct thread *thread)
{
memset(thread->profile_events, 0,
sizeof(*thread->profile_events) * PROFILE_EVENT_MAX);
}
int do_profile(int flag)
{
struct thread *thread = cpu_local_var(current);
/* Process level? */
if (flag & PROF_PROC) {
if (flag & PROF_PRINT) {
profile_print_proc_stats(thread->proc);
}
if (flag & PROF_ON) {
thread->profile = 1;
}
else if (flag & PROF_OFF) {
thread->profile = 0;
}
}
/* Thread level */
else {
if (flag & PROF_PRINT) {
profile_print_thread_stats(thread);
}
if (flag & PROF_CLEAR) {
profile_clear_thread(thread);
}
if (flag & PROF_ON) {
thread->profile = 1;
}
else if (flag & PROF_OFF) {
thread->profile = 0;
}
}
return 0;
}
SYSCALL_DECLARE(profile)
{
int flag = (int)ihk_mc_syscall_arg0(ctx);
return do_profile(flag);
}
#endif // PROFILE_ENABLE

View File

@@ -56,6 +56,7 @@
#include <bitmap.h>
#include <xpmem.h>
#include <rusage.h>
#include <profile.h>
/* Headers taken from kitten LWK */
#include <lwk/stddef.h>
@@ -93,7 +94,7 @@ static long (*syscall_table[])(int, ihk_mc_user_context_t *) = {
/* generate syscall_name[] */
#define MCKERNEL_UNUSED __attribute__ ((unused))
static char *syscall_name[] MCKERNEL_UNUSED = {
char *syscall_name[] MCKERNEL_UNUSED = {
#define DECLARATOR(number,name) [number] = #name,
#define SYSCALL_HANDLED(number,name) DECLARATOR(number,#name)
#define SYSCALL_DELEGATED(number,name) DECLARATOR(number,#name)
@@ -131,209 +132,6 @@ int prepare_process_ranges_args_envs(struct thread *thread,
static void do_mod_exit(int status);
#endif
#ifdef TRACK_SYSCALLS
void track_syscalls_print_thread_stats(struct thread *thread)
{
int i;
unsigned long flags;
flags = kprintf_lock();
for (i = 0; i < TRACK_SYSCALLS_MAX; ++i) {
if (!thread->syscall_cnts[i] &&
!thread->offload_cnts[i]) continue;
//__kprintf("(%20s): sys.cnt: %3lu (%15lukC)\n",
__kprintf("TID: %4d (%3d,%20s): sys: %6u %6lukC offl: %6u %6lukC\n",
thread->tid,
i,
syscall_name[i],
thread->syscall_cnts[i],
(thread->syscall_times[i] /
(thread->syscall_cnts[i] ? thread->syscall_cnts[i] : 1))
/ 1000,
thread->offload_cnts[i],
(thread->offload_times[i] /
(thread->offload_cnts[i] ? thread->offload_cnts[i] : 1))
/ 1000
);
}
kprintf_unlock(flags);
}
void track_syscalls_print_proc_stats(struct process *proc)
{
int i;
unsigned long flags;
flags = kprintf_lock();
for (i = 0; i < TRACK_SYSCALLS_MAX; ++i) {
if (!proc->syscall_cnts[i] &&
!proc->offload_cnts[i]) continue;
//__kprintf("(%20s): sys.cnt: %3lu (%15lukC)\n",
__kprintf("PID: %4d (%3d,%20s): sys: %6u %6lukC offl: %6u %6lukC\n",
proc->pid,
i,
syscall_name[i],
proc->syscall_cnts[i],
(proc->syscall_times[i] /
(proc->syscall_cnts[i] ? proc->syscall_cnts[i] : 1))
/ 1000,
proc->offload_cnts[i],
(proc->offload_times[i] /
(proc->offload_cnts[i] ? proc->offload_cnts[i] : 1))
/ 1000
);
}
kprintf_unlock(flags);
}
void track_syscalls_accumulate_counters(struct thread *thread,
struct process *proc)
{
int i;
struct mcs_lock_node mcs_node;
mcs_lock_lock(&proc->st_lock, &mcs_node);
for (i = 0; i < TRACK_SYSCALLS_MAX; ++i) {
if (thread->syscall_cnts[i]) {
proc->syscall_times[i] += thread->syscall_times[i];
proc->syscall_cnts[i] += thread->syscall_cnts[i];
}
if (thread->offload_cnts[i]) {
proc->offload_times[i] += thread->offload_times[i];
proc->offload_cnts[i] += thread->offload_cnts[i];
}
}
mcs_lock_unlock(&proc->st_lock, &mcs_node);
}
void track_syscalls_alloc_counters(struct thread *thread)
{
struct process *proc = thread->proc;
struct mcs_lock_node mcs_node;
thread->syscall_times = kmalloc(sizeof(*thread->syscall_times) *
TRACK_SYSCALLS_MAX, IHK_MC_AP_NOWAIT);
thread->syscall_cnts = kmalloc(sizeof(*thread->syscall_cnts) *
TRACK_SYSCALLS_MAX, IHK_MC_AP_NOWAIT);
thread->offload_times = kmalloc(sizeof(*thread->offload_times) *
TRACK_SYSCALLS_MAX, IHK_MC_AP_NOWAIT);
thread->offload_cnts = kmalloc(sizeof(*thread->offload_cnts) *
TRACK_SYSCALLS_MAX, IHK_MC_AP_NOWAIT);
if (!thread->syscall_times ||
!thread->syscall_cnts ||
!thread->offload_times ||
!thread->offload_cnts) {
kprintf("%s: ERROR: allocating thread private counters\n",
__FUNCTION__);
panic("");
}
memset(thread->syscall_times, 0, sizeof(*thread->syscall_times) *
TRACK_SYSCALLS_MAX);
memset(thread->syscall_cnts, 0, sizeof(*thread->syscall_cnts) *
TRACK_SYSCALLS_MAX);
memset(thread->offload_times, 0, sizeof(*thread->offload_times) *
TRACK_SYSCALLS_MAX);
memset(thread->offload_cnts, 0, sizeof(*thread->offload_cnts) *
TRACK_SYSCALLS_MAX);
mcs_lock_lock(&proc->st_lock, &mcs_node);
if (!proc->syscall_times) {
proc->syscall_times = kmalloc(sizeof(*proc->syscall_times) *
TRACK_SYSCALLS_MAX, IHK_MC_AP_NOWAIT);
proc->syscall_cnts = kmalloc(sizeof(*proc->syscall_cnts) *
TRACK_SYSCALLS_MAX, IHK_MC_AP_NOWAIT);
proc->offload_times = kmalloc(sizeof(*proc->offload_times) *
TRACK_SYSCALLS_MAX, IHK_MC_AP_NOWAIT);
proc->offload_cnts = kmalloc(sizeof(*proc->offload_cnts) *
TRACK_SYSCALLS_MAX, IHK_MC_AP_NOWAIT);
if (!proc->syscall_times ||
!proc->syscall_cnts ||
!proc->offload_times ||
!proc->offload_cnts) {
kprintf("%s: ERROR: allocating process private counters\n",
__FUNCTION__);
panic("");
}
memset(proc->syscall_times, 0, sizeof(*proc->syscall_times) *
TRACK_SYSCALLS_MAX);
memset(proc->syscall_cnts, 0, sizeof(*proc->syscall_cnts) *
TRACK_SYSCALLS_MAX);
memset(proc->offload_times, 0, sizeof(*proc->offload_times) *
TRACK_SYSCALLS_MAX);
memset(proc->offload_cnts, 0, sizeof(*proc->offload_cnts) *
TRACK_SYSCALLS_MAX);
}
mcs_lock_unlock(&proc->st_lock, &mcs_node);
}
void track_syscalls_dealloc_thread_counters(struct thread *thread)
{
kfree(thread->syscall_times);
kfree(thread->syscall_cnts);
kfree(thread->offload_times);
kfree(thread->offload_cnts);
}
void track_syscalls_dealloc_proc_counters(struct process *proc)
{
kfree(proc->syscall_times);
kfree(proc->syscall_cnts);
kfree(proc->offload_times);
kfree(proc->offload_cnts);
}
int do_track_syscalls(int flag)
{
struct thread *thread = cpu_local_var(current);
int i;
if (flag & TRACK_SYSCALLS_PRINT)
track_syscalls_print_thread_stats(thread);
if (flag & TRACK_SYSCALLS_PRINT_PROC)
track_syscalls_print_proc_stats(thread->proc);
if (flag & TRACK_SYSCALLS_CLEAR) {
for (i = 0; i < TRACK_SYSCALLS_MAX; ++i) {
if (!thread->syscall_cnts[i] &&
!thread->offload_cnts[i]) continue;
thread->syscall_cnts[i] = 0;
thread->syscall_times[i] = 0;
thread->offload_cnts[i] = 0;
thread->offload_times[i] = 0;
}
}
if (flag & TRACK_SYSCALLS_ON) {
thread->track_syscalls = 1;
}
else if (flag & TRACK_SYSCALLS_OFF) {
thread->track_syscalls = 0;
}
return 0;
}
SYSCALL_DECLARE(track_syscalls)
{
int flag = (int)ihk_mc_syscall_arg0(ctx);
return do_track_syscalls(flag);
}
#endif // TRACK_SYSCALLS
static void send_syscall(struct syscall_request *req, int cpu, int pid, struct syscall_response *res)
{
struct ikc_scd_packet packet IHK_DMA_ALIGN;
@@ -386,10 +184,10 @@ long do_syscall(struct syscall_request *req, int cpu, int pid)
struct ihk_os_monitor *monitor = cpu_local_var(monitor);
int mstatus = 0;
#ifdef TRACK_SYSCALLS
#ifdef PROFILE_ENABLE
uint64_t t_s;
t_s = rdtsc();
#endif // TRACK_SYSCALLS
#endif // PROFILE_ENABLE
dkprintf("SC(%d)[%3d] sending syscall\n",
ihk_mc_get_processor_id(),
@@ -503,22 +301,16 @@ long do_syscall(struct syscall_request *req, int cpu, int pid)
--thread->in_syscall_offload;
}
#ifdef TRACK_SYSCALLS
if (req->number < TRACK_SYSCALLS_MAX) {
if (!cpu_local_var(current)->offload_cnts) {
track_syscalls_alloc_counters(cpu_local_var(current));
}
if (cpu_local_var(current)->track_syscalls) {
cpu_local_var(current)->offload_times[req->number] +=
(rdtsc() - t_s);
cpu_local_var(current)->offload_cnts[req->number] += 1;
}
#ifdef PROFILE_ENABLE
if (req->number < PROFILE_SYSCALL_MAX) {
profile_event_add(profile_syscall2offload(req->number),
(rdtsc() - t_s));
}
else {
dkprintf("%s: offload syscall > %d ?? : %d\n",
__FUNCTION__, TRACK_SYSCALLS_MAX, req->number);
__FUNCTION__, PROFILE_SYSCALL_MAX, req->number);
}
#endif // TRACK_SYSCALLS
#endif // PROFILE_ENABLE
monitor->status = mstatus;
monitor->counter++;
@@ -8668,9 +8460,9 @@ set_cputime(int mode)
long syscall(int num, ihk_mc_user_context_t *ctx)
{
long l;
#ifdef TRACK_SYSCALLS
#ifdef PROFILE_ENABLE
uint64_t t_s;
#endif // TRACK_SYSCALLS
#endif // PROFILE_ENABLE
struct thread *thread = cpu_local_var(current);
#ifdef DISABLE_SCHED_YIELD
@@ -8717,10 +8509,10 @@ long syscall(int num, ihk_mc_user_context_t *ctx)
#endif
dkprintf("\n");
#ifdef TRACK_SYSCALLS
if (num == __NR_clone) cpu_local_var(current)->track_syscalls = 1;
#ifdef PROFILE_ENABLE
if (num == __NR_clone) cpu_local_var(current)->profile = 1;
t_s = rdtsc();
#endif // TRACK_SYSCALLS
#endif // PROFILE_ENABLE
if ((0 <= num) && (num < (sizeof(syscall_table) / sizeof(syscall_table[0])))
&& (syscall_table[num] != NULL)) {
@@ -8742,23 +8534,17 @@ long syscall(int num, ihk_mc_user_context_t *ctx)
check_signal(l, NULL, num);
}
#ifdef TRACK_SYSCALLS
if (num < TRACK_SYSCALLS_MAX) {
if (!cpu_local_var(current)->syscall_cnts) {
track_syscalls_alloc_counters(cpu_local_var(current));
}
if (cpu_local_var(current)->track_syscalls) {
cpu_local_var(current)->syscall_times[num] += (rdtsc() - t_s);
cpu_local_var(current)->syscall_cnts[num]++;
}
#ifdef PROFILE_ENABLE
if (num < PROFILE_SYSCALL_MAX) {
profile_event_add(num, (rdtsc() - t_s));
}
else {
if (num != __NR_track_syscalls) {
if (num != __NR_profile) {
dkprintf("%s: syscall > %d ?? : %d\n",
__FUNCTION__, TRACK_SYSCALLS_MAX, num);
__FUNCTION__, PROFILE_SYSCALL_MAX, num);
}
}
#endif // TRACK_SYSCALLS
#endif // PROFILE_ENABLE
if (num != __NR_sched_yield &&
num != __NR_futex) {