Compare commits

..

22 Commits
1.2.4 ... 1.2.5

Author SHA1 Message Date
Ken Sato
1e442cce10 mcklogd: fixed termination method of mcklogd 2017-05-09 16:28:21 +09:00
Ken Sato
3f870b69a6 mcklogd: change the timing of start/stop. 2017-05-09 16:06:07 +09:00
Balazs Gerofi
0fef80cb19 SCD_MSG_CPU_RW_REG: use syscall channel for reply packet in CPU MSR read/write operation 2017-05-05 00:16:02 +09:00
Balazs Gerofi
9992fe0d72 mcctrl: support remote CPU MSR read/write operations 2017-05-05 00:01:43 +09:00
Balazs Gerofi
2d19ed9391 configure.ac: check NUMA development library 2017-04-29 05:30:27 +09:00
Balazs Gerofi
2f2f04d5a1 mcexec: ENABLE_MCOVERLAYFS on CentOS for up to version 7.3 2017-04-29 05:10:21 +09:00
Ken Sato
1541b26086 ihklib: add pa_info functions. 2017-04-27 17:13:49 +09:00
Ken Sato
e6c4d7731d Merge remote-tracking branch 'origin/rusage'
Conflicts:
	configure
	kernel/process.c
2017-04-27 15:10:38 +09:00
Katsukura
94b527e027 modified: lib/include/ihk/rusage.h 2017-04-27 14:47:21 +09:00
Katsukura
8c9b207557 configure : add option --enable-rusage 2017-04-27 14:00:59 +09:00
Balazs Gerofi
dacb05844b mcoverlayfs: support compile up to 3.10.0-514 2017-04-20 00:48:56 +09:00
Balazs Gerofi
c3ec5d20ca configure: --with-uname_r: optionally specify target kernel version string 2017-04-20 00:48:56 +09:00
Balazs Gerofi
92a40f92dd mcctrl_put_per_proc_data(): do not use task_pid_vnr() in IRQ context 2017-03-30 15:02:57 +09:00
Balazs Gerofi
45bddf3caa mcexec_syscall(): do not use task_pid_vnr() in IRQ context 2017-03-30 14:56:57 +09:00
Balazs Gerofi
b7671fedd3 mcctrl_per_proc_data: comments 2017-03-30 14:51:24 +09:00
Yoichi Umezawa
c38d536aaa xpmem: porting xpmem v2.6.3
implement xpmem_get, xpmem_release, xpmem_attach, xpmem_detach
2017-03-29 18:20:53 +09:00
Yoichi Umezawa
4ee0c05e08 mcoverlayfs: fix NULL pointer dereference on ovl_dentry_release() 2017-03-28 21:52:41 +09:00
Tomoki Shirasawa
f2ab0193e5 fix to panic when thread end and signal overlap. 2017-03-28 11:31:27 +09:00
Tomoki Shirasawa
ef910fdf0e Discard outstanding system calls at the end of mcexec. 2017-03-28 11:23:54 +09:00
Balazs Gerofi
b97a8c5138 mcexec_open_exec(): use strncpy_from_user() before accessing file name 2017-03-21 20:13:12 +09:00
Tomoki Shirasawa
034d10b185 When receiving a signal during fuex processing, the signal is not processed. 2017-03-21 20:37:17 +09:00
Katsukura
3fe2257929 create rusage branch. 2017-03-15 23:22:51 +09:00
43 changed files with 4862 additions and 828 deletions

View File

@@ -1813,4 +1813,43 @@ int running_on_kvm(void) {
return 0;
}
void
mod_nmi_ctx(void *nmi_ctx, void (*func)())
{
unsigned long *l = nmi_ctx;
int i;
unsigned long flags;
//struct x86_cpu_local_variables *v;
//if(!ihk_mc_get_processor_id()) {
//v = get_x86_this_cpu_local();
//}
asm volatile("pushf; pop %0" : "=r"(flags) : : "memory", "cc");
for (i = 0; i < 22; i++)
l[i] = l[i + 5];
l[i++] = (unsigned long)func; // return address
l[i++] = 0x20; // KERNEL CS
l[i++] = flags & ~RFLAGS_IF; // rflags (disable interrupt)
l[i++] = (unsigned long)(l + 27); // ols rsp
l[i++] = 0x28; // KERNEL DS
}
int arch_cpu_read_write_register(
struct mcctrl_os_cpu_register *desc,
enum mcctrl_os_cpu_operation op)
{
if (op == MCCTRL_OS_CPU_READ_REGISTER) {
desc->val = rdmsr(desc->addr);
}
else if (op == MCCTRL_OS_CPU_WRITE_REGISTER) {
wrmsr(desc->addr, desc->val);
}
else {
return -1;
}
return 0;
}
/*** end of file ***/

View File

@@ -215,4 +215,25 @@ static inline unsigned long atomic_cmpxchg4(unsigned int *addr,
return oldval;
}
static inline void ihk_atomic_add_long(long i, long *v) {
asm volatile("lock addq %1,%0"
: "+m" (*v)
: "ir" (i));
}
static inline void ihk_atomic_add_ulong(long i, unsigned long *v) {
asm volatile("lock addq %1,%0"
: "+m" (*v)
: "ir" (i));
}
static inline unsigned long ihk_atomic_add_long_return(long i, long *v) {
long __i;
__i = i;
asm volatile("lock xaddq %0, %1"
: "+r" (i), "+m" (*v)
: : "memory");
return i + __i;
}
#endif

View File

@@ -130,11 +130,40 @@ general_protection_exception:
addq $8, %rsp
iretq
.global __freeze
__freeze:
PUSH_ALL_REGS
callq freeze
POP_ALL_REGS
iretq
.globl nmi
nmi:
#define PANICED 232
#define PANIC_REGS 240
movq %rax,%gs:PANIC_REGS+0x00
movq %rsp,%gs:PANIC_REGS+0x08
movl nmi_mode(%rip),%eax
cmp $1,%rax
je 1f
cmp $2,%rax
jne 3f
1:
cld
movq %gs:PANIC_REGS+0x00,%rax
PUSH_ALL_REGS
subq $40, %rsp
movq %rsp,%gs:PANIC_REGS+0x10
movq %rsp, %rdi
call freeze_thaw
cmpq $0, %rax
jnz 2f
addq $40, %rsp
2:
POP_ALL_REGS
iretq
3:
movq %rbx,%gs:PANIC_REGS+0x08
movq %rcx,%gs:PANIC_REGS+0x10
movq %rdx,%gs:PANIC_REGS+0x18

View File

@@ -1101,7 +1101,7 @@ static int clear_range_l1(void *args0, pte_t *ptep, uint64_t base,
page = phys_to_page(phys);
}
if (page && page_is_in_memobj(page) && (old & PFL1_DIRTY) &&
if (page && page_is_in_memobj(page) && (old & PFL1_DIRTY) && (args->memobj) &&
!(args->memobj->flags & MF_ZEROFILL)) {
memobj_flush_page(args->memobj, phys, PTL1_SIZE);
}

View File

@@ -1179,7 +1179,8 @@ done:
if(pid != -1 && tthread->proc->pid != pid){
continue;
}
if(tthread->tid == tid){
if (tthread->tid == tid &&
tthread->status != PS_EXITED) {
found = 1;
break;
}
@@ -1297,6 +1298,9 @@ done:
sched_wakeup_thread(tthread, PS_STOPPED);
tthread->proc->status = PS_RUNNING;
}
else {
sched_wakeup_thread(tthread, PS_INTERRUPTIBLE);
}
}
}
release_thread(tthread);

View File

@@ -17,6 +17,7 @@
* make sure that these are position-independent codes.
*/
#include <cls.h>
#include <syscall.h>
#include <ihk/atomic.h>
#include <arch/cpu.h>

View File

@@ -260,16 +260,6 @@ if [ "${irqbalance_used}" == "yes" ]; then
fi;
fi
# Start mcklogd. Note that McKernel blocks when kmsg buffer is full
# with '-k 1' until mcklogd unblocks it so starting mcklogd must preceed
# booting McKernel
if [ ${LOGMODE} -ne 0 ]; then
# Stop mcklogd which has survived McKernel shutdown because
# mcstop+release.sh is not used
pkill mcklogd
SBINDIR=${SBINDIR} ${SBINDIR}/mcklogd -i ${INTERVAL} -f ${facility}
fi
# Load IHK if not loaded
if ! grep -E 'ihk\s' /proc/modules &>/dev/null; then
if ! insmod ${KMODDIR}/ihk.ko 2>/dev/null; then
@@ -521,3 +511,13 @@ if [ "${irqbalance_used}" == "yes" ]; then
# echo cpus=$cpus mask=$smp_affinity_mask banirq=$banirq
fi
# Start mcklogd. Note that McKernel blocks when kmsg buffer is full
# with '-k 1' until mcklogd unblocks it so starting mcklogd must preceed
# booting McKernel
if [ ${LOGMODE} -ne 0 ]; then
# Stop mcklogd which has survived McKernel shutdown because
# mcstop+release.sh is not used
pkill mcklogd
SBINDIR=${SBINDIR} ${SBINDIR}/mcklogd -i ${INTERVAL} -f ${facility}
fi

View File

@@ -20,6 +20,12 @@ cpus=""
# No SMP module? Exit.
if ! grep ihk_smp_x86 /proc/modules &>/dev/null; then exit 0; fi
# Stop mcklogd
while pgrep "mcklogd" > /dev/null 2>&1;
do
pkill -9 mcklogd
done
# Destroy all LWK instances
if ls /dev/mcos* 1>/dev/null 2>&1; then
for i in /dev/mcos*; do
@@ -94,9 +100,6 @@ if grep -E 'ihk\s' /proc/modules &>/dev/null; then
fi
fi
# Stop mcklogd
pkill mcklogd
# Start irqbalance with the original settings
if [ "`systemctl status irqbalance_mck.service 2> /dev/null |grep -E 'Active: active'`" != "" ]; then
if ! systemctl stop irqbalance_mck.service 2>/dev/null; then

1927
configure vendored

File diff suppressed because it is too large Load Diff

View File

@@ -17,6 +17,13 @@ DCFA_RELEASE_DATE=DCFA_RELEASE_DATE_m4
AC_PREFIX_DEFAULT([/opt/ppos])
AC_CHECK_HEADER([numa.h],[numa_header_found=yes])
AS_IF([test "x$numa_header_found" != "xyes"],
[AC_MSG_ERROR([Unable to find numa.h header file, missing numactl-devel?])])
AC_CHECK_LIB([numa],[numa_run_on_node],[numa_lib_found=yes])
AS_IF([test "x$numa_lib_found" != "xyes"],
[AC_MSG_ERROR([Unable to find NUMA library, missing numactl-devel?])])
AC_ARG_WITH([kernelsrc],
AC_HELP_STRING(
[--with-kernelsrc=path],[Path to 'kernel src', default is /lib/modules/uname_r/build]),
@@ -48,6 +55,17 @@ AC_ARG_ENABLE([mcoverlayfs],
[ENABLE_MCOVERLAYFS=$enableval],
[ENABLE_MCOVERLAYFS=yes])
AC_ARG_WITH([uname_r],
AC_HELP_STRING(
[--with-uname_r=uname_r],[Value of '`uname -r`' on the target platform, default is local value]),
[WITH_UNAME_R=$withval],[WITH_UNAME_R=yes])
case "X$WITH_UNAME_R" in
Xyes | Xno | X)
WITH_UNAME_R='`uname -r`'
;;
esac
case "X$WITH_KERNELSRC" in
Xyes | Xno | X)
WITH_KERNELSRC='/lib/modules/`uname -r`/build'
@@ -163,6 +181,7 @@ case $WITH_TARGET in
esac
KDIR="$WITH_KERNELSRC"
UNAME_R="$WITH_UNAME_R"
TARGET="$WITH_TARGET"
MCCTRL_LINUX_SYMTAB=""
@@ -280,6 +299,7 @@ AC_SUBST(CC)
AC_SUBST(XCC)
AC_SUBST(ARCH)
AC_SUBST(KDIR)
AC_SUBST(UNAME_R)
AC_SUBST(TARGET)
AC_SUBST(BINDIR)
AC_SUBST(SBINDIR)

View File

@@ -220,4 +220,34 @@ struct sys_unshare_desc {
unsigned long unshare_flags;
};
enum perf_ctrl_type {
PERF_CTRL_SET,
PERF_CTRL_GET,
PERF_CTRL_ENABLE,
PERF_CTRL_DISABLE,
};
struct perf_ctrl_desc {
enum perf_ctrl_type ctrl_type;
int status;
union {
/* for SET, GET */
struct {
unsigned int target_cntr;
unsigned long config;
unsigned long read_value;
unsigned disabled :1,
pinned :1,
exclude_user :1,
exclude_kernel :1,
exclude_hv :1,
exclude_idle :1;
};
/* for START, STOP*/
struct {
unsigned long target_cntr_mask;
};
};
};
#endif

View File

@@ -40,6 +40,8 @@
#include <asm/io.h>
#include "../../config.h"
#include "mcctrl.h"
#include "mcctrl_public.h"
#include <ihk/ihk_host_user.h>
//#define DEBUG
@@ -360,7 +362,7 @@ static long mcexec_newprocess(ihk_os_t os,
struct newprocess_desc desc;
struct release_handler_info *info;
if (copy_from_user(&desc, udesc, sizeof(struct newprocess_desc))) {
if (copy_from_user(&desc, udesc, sizeof(struct newprocess_desc))) {
return -EFAULT;
}
info = kmalloc(sizeof(struct release_handler_info), GFP_KERNEL);
@@ -779,6 +781,9 @@ void mcctrl_put_per_proc_data(struct mcctrl_per_proc_data *ppd)
int hash;
unsigned long flags;
int i;
struct wait_queue_head_list_node *wqhln;
struct wait_queue_head_list_node *wqhln_next;
struct ikc_scd_packet *packet;
if (!ppd)
return;
@@ -804,13 +809,25 @@ void mcctrl_put_per_proc_data(struct mcctrl_per_proc_data *ppd)
list_del(&ptd->hash);
kfree(ptd);
__return_syscall(ppd->ud->os, packet, -EINTR,
task_pid_vnr(current));
packet->req.rtid);
ihk_ikc_release_packet(
(struct ihk_ikc_free_packet *)packet,
(ppd->ud->channels + packet->ref)->c);
}
}
flags = ihk_ikc_spinlock_lock(&ppd->wq_list_lock);
list_for_each_entry_safe(wqhln, wqhln_next, &ppd->wq_req_list, list) {
list_del(&wqhln->list);
packet = wqhln->packet;
kfree(wqhln);
__return_syscall(ppd->ud->os, packet, -EINTR,
packet->req.rtid);
ihk_ikc_release_packet((struct ihk_ikc_free_packet *)packet,
(ppd->ud->channels + packet->ref)->c);
}
ihk_ikc_spinlock_unlock(&ppd->wq_list_lock, flags);
kfree(ppd);
}
@@ -834,6 +851,12 @@ int mcexec_syscall(struct mcctrl_usrdata *ud, struct ikc_scd_packet *packet)
kprintf("%s: ERROR: no per-process structure for PID %d, "
"syscall nr: %lu\n",
__FUNCTION__, pid, packet->req.number);
__return_syscall(ud->os, packet, -EINTR,
packet->req.rtid);
ihk_ikc_release_packet((struct ihk_ikc_free_packet *)packet,
(ud->channels + packet->ref)->c);
return -1;
}
@@ -1250,10 +1273,12 @@ int mcexec_open_exec(ihk_os_t os, char * __user filename)
struct mckernel_exec_file *mcef_iter;
int retval;
int os_ind = ihk_host_os_get_index(os);
char *pathbuf, *fullpath;
char *pathbuf = NULL;
char *fullpath = NULL;
char *kfilename = NULL;
struct mcctrl_usrdata *usrdata = ihk_host_os_get_usrdata(os);
struct mcctrl_per_proc_data *ppd = NULL;
int i;
int i, len;
if (os_ind < 0) {
return -EINVAL;
@@ -1304,7 +1329,20 @@ int mcexec_open_exec(ihk_os_t os, char * __user filename)
goto out_put_ppd;
}
file = open_exec(filename);
kfilename = kmalloc(PATH_MAX, GFP_TEMPORARY);
if (!kfilename) {
retval = -ENOMEM;
kfree(pathbuf);
goto out_put_ppd;
}
len = strncpy_from_user(kfilename, filename, PATH_MAX);
if (unlikely(len < 0)) {
retval = -EINVAL;
goto out_free;
}
file = open_exec(kfilename);
retval = PTR_ERR(file);
if (IS_ERR(file)) {
goto out_free;
@@ -1345,7 +1383,8 @@ int mcexec_open_exec(ihk_os_t os, char * __user filename)
proc_exe_link(os_ind, task_tgid_vnr(current), fullpath);
up(&mckernel_exec_file_lock);
dprintk("%d open_exec and holding file: %s\n", (int)task_tgid_vnr(current), filename);
dprintk("%d open_exec and holding file: %s\n", (int)task_tgid_vnr(current),
kfilename);
kfree(pathbuf);
@@ -1355,6 +1394,7 @@ out_put_file:
fput(file);
out_free:
kfree(pathbuf);
kfree(kfilename);
out_put_ppd:
mcctrl_put_per_proc_data(ppd);
out:
@@ -1550,6 +1590,240 @@ long mcexec_sys_unshare(struct sys_unshare_desc *__user arg)
return ret;
}
static DECLARE_WAIT_QUEUE_HEAD(perfctrlq);
long mcctrl_perf_num(ihk_os_t os, unsigned long arg)
{
struct mcctrl_usrdata *usrdata = ihk_host_os_get_usrdata(os);
usrdata->perf_event_num = arg;
return 0;
}
long mcctrl_perf_set(ihk_os_t os, struct ihk_perf_event_attr *__user arg)
{
struct mcctrl_usrdata *usrdata = ihk_host_os_get_usrdata(os);
struct ikc_scd_packet isp;
struct perf_ctrl_desc *perf_desc = NULL;
struct ihk_perf_event_attr attr;
struct ihk_cpu_info *info = ihk_os_get_cpu_info(os);
int ret = 0;
int i = 0, j = 0;
for (i = 0; i < usrdata->perf_event_num; i++) {
if (copy_from_user(&attr, &arg[i], sizeof(struct ihk_perf_event_attr))) {
printk("%s: error: copying ihk_perf_event_attr from user\n",
__FUNCTION__);
return -EINVAL;
}
for (j = 0; j < info->n_cpus; j++) {
perf_desc = kmalloc(sizeof(struct perf_ctrl_desc), GFP_KERNEL);
if (!perf_desc) {
printk("%s: error: allocating perf_ctrl_desc\n",
__FUNCTION__);
return -ENOMEM;
}
memset(perf_desc, '\0', sizeof(struct perf_ctrl_desc));
perf_desc->ctrl_type = PERF_CTRL_SET;
perf_desc->status = 0;
perf_desc->target_cntr = i;
perf_desc->config = attr.config;
perf_desc->exclude_kernel = attr.exclude_kernel;
perf_desc->exclude_user = attr.exclude_user;
memset(&isp, '\0', sizeof(struct ikc_scd_packet));
isp.msg = SCD_MSG_PERF_CTRL;
isp.arg = virt_to_phys(perf_desc);
if ((ret = mcctrl_ikc_send(os, j, &isp)) < 0) {
printk("%s: mcctrl_ikc_send ret=%d\n", __FUNCTION__, ret);
kfree(perf_desc);
return -EINVAL;
}
ret = wait_event_interruptible(perfctrlq, perf_desc->status);
if (ret < 0) {
printk("%s: ERROR after wait: %d\n", __FUNCTION__, ret);
kfree(perf_desc);
return -EINVAL;
}
kfree(perf_desc);
}
}
return usrdata->perf_event_num;
}
long mcctrl_perf_get(ihk_os_t os, unsigned long *__user arg)
{
struct mcctrl_usrdata *usrdata = ihk_host_os_get_usrdata(os);
struct ikc_scd_packet isp;
struct perf_ctrl_desc *perf_desc = NULL;
struct ihk_cpu_info *info = ihk_os_get_cpu_info(os);
unsigned long value_sum = 0;
int ret = 0;
int i = 0, j = 0;
for (i = 0; i < usrdata->perf_event_num; i++) {
for (j = 0; j < info->n_cpus; j++) {
perf_desc = kmalloc(sizeof(struct perf_ctrl_desc), GFP_KERNEL);
if (!perf_desc) {
printk("%s: error: allocating perf_ctrl_desc\n",
__FUNCTION__);
return -ENOMEM;
}
memset(perf_desc, '\0', sizeof(struct perf_ctrl_desc));
perf_desc->ctrl_type = PERF_CTRL_GET;
perf_desc->status = 0;
perf_desc->target_cntr = i;
memset(&isp, '\0', sizeof(struct ikc_scd_packet));
isp.msg = SCD_MSG_PERF_CTRL;
isp.arg = virt_to_phys(perf_desc);
if ((ret = mcctrl_ikc_send(os, j, &isp)) < 0) {
printk("%s: mcctrl_ikc_send ret=%d\n", __FUNCTION__, ret);
kfree(perf_desc);
return -EINVAL;
}
ret = wait_event_interruptible(perfctrlq, perf_desc->status);
if (ret < 0) {
printk("%s: ERROR after wait: %d\n", __FUNCTION__, ret);
kfree(perf_desc);
return -EINVAL;
}
value_sum += perf_desc->read_value;
kfree(perf_desc);
}
if (copy_to_user(&arg[i], &value_sum, sizeof(unsigned long))) {
printk("%s: error: copying read_value to user\n",
__FUNCTION__);
return -EINVAL;
}
value_sum = 0;
}
return 0;
}
long mcctrl_perf_enable(ihk_os_t os)
{
struct mcctrl_usrdata *usrdata = ihk_host_os_get_usrdata(os);
struct ikc_scd_packet isp;
struct perf_ctrl_desc *perf_desc = NULL;
struct ihk_cpu_info *info = ihk_os_get_cpu_info(os);
unsigned int cntr_mask = 0;
int ret = 0;
int i = 0, j = 0;
for (i = 0; i < usrdata->perf_event_num; i++) {
cntr_mask |= 1 << i;
}
for (j = 0; j < info->n_cpus; j++) {
perf_desc = kmalloc(sizeof(struct perf_ctrl_desc), GFP_KERNEL);
if (!perf_desc) {
printk("%s: error: allocating perf_ctrl_desc\n",
__FUNCTION__);
return -ENOMEM;
}
memset(perf_desc, '\0', sizeof(struct perf_ctrl_desc));
perf_desc->ctrl_type = PERF_CTRL_ENABLE;
perf_desc->status = 0;
perf_desc->target_cntr_mask = cntr_mask;
memset(&isp, '\0', sizeof(struct ikc_scd_packet));
isp.msg = SCD_MSG_PERF_CTRL;
isp.arg = virt_to_phys(perf_desc);
if ((ret = mcctrl_ikc_send(os, j, &isp)) < 0) {
printk("%s: mcctrl_ikc_send ret=%d\n", __FUNCTION__, ret);
kfree(perf_desc);
return -EINVAL;
}
ret = wait_event_interruptible(perfctrlq, perf_desc->status);
if (ret < 0) {
printk("%s: ERROR after wait: %d\n", __FUNCTION__, ret);
kfree(perf_desc);
return -EINVAL;
}
kfree(perf_desc);
}
return 0;
}
long mcctrl_perf_disable(ihk_os_t os)
{
struct mcctrl_usrdata *usrdata = ihk_host_os_get_usrdata(os);
struct ikc_scd_packet isp;
struct perf_ctrl_desc *perf_desc = NULL;
struct ihk_cpu_info *info = ihk_os_get_cpu_info(os);
unsigned int cntr_mask = 0;
int ret = 0;
int i = 0, j = 0;
for (i = 0; i < usrdata->perf_event_num; i++) {
cntr_mask |= 1 << i;
}
for (j = 0; j < info->n_cpus; j++) {
perf_desc = kmalloc(sizeof(struct perf_ctrl_desc), GFP_KERNEL);
if (!perf_desc) {
printk("%s: error: allocating perf_ctrl_desc\n",
__FUNCTION__);
return -ENOMEM;
}
memset(perf_desc, '\0', sizeof(struct perf_ctrl_desc));
perf_desc->ctrl_type = PERF_CTRL_DISABLE;
perf_desc->status = 0;
perf_desc->target_cntr_mask = cntr_mask;
memset(&isp, '\0', sizeof(struct ikc_scd_packet));
isp.msg = SCD_MSG_PERF_CTRL;
isp.arg = virt_to_phys(perf_desc);
if ((ret = mcctrl_ikc_send(os, j, &isp)) < 0) {
printk("%s: mcctrl_ikc_send ret=%d\n", __FUNCTION__, ret);
kfree(perf_desc);
return -EINVAL;
}
ret = wait_event_interruptible(perfctrlq, perf_desc->status);
if (ret < 0) {
printk("%s: ERROR after wait: %d\n", __FUNCTION__, ret);
kfree(perf_desc);
return -EINVAL;
}
kfree(perf_desc);
}
return 0;
}
long mcctrl_perf_destroy(ihk_os_t os)
{
mcctrl_perf_disable(os);
mcctrl_perf_num(os, 0);
return 0;
}
void mcctrl_perf_ack(ihk_os_t os, struct ikc_scd_packet *packet)
{
struct perf_ctrl_desc *perf_desc = phys_to_virt(packet->arg);
perf_desc->status = 1;
wake_up_interruptible(&perfctrlq);
}
long __mcctrl_control(ihk_os_t os, unsigned int req, unsigned long arg,
struct file *file)
{
@@ -1621,6 +1895,24 @@ long __mcctrl_control(ihk_os_t os, unsigned int req, unsigned long arg,
case MCEXEC_UP_DEBUG_LOG:
return mcexec_debug_log(os, arg);
case IHK_OS_AUX_PERF_NUM:
return mcctrl_perf_num(os, arg);
case IHK_OS_AUX_PERF_SET:
return mcctrl_perf_set(os, (struct ihk_perf_event_attr *)arg);
case IHK_OS_AUX_PERF_GET:
return mcctrl_perf_get(os, (unsigned long *)arg);
case IHK_OS_AUX_PERF_ENABLE:
return mcctrl_perf_enable(os);
case IHK_OS_AUX_PERF_DISABLE:
return mcctrl_perf_disable(os);
case IHK_OS_AUX_PERF_DESTROY:
return mcctrl_perf_destroy(os);
}
return -EINVAL;
}
@@ -1646,3 +1938,149 @@ void mcexec_prepare_ack(ihk_os_t os, unsigned long arg, int err)
mcctrl_put_per_proc_data(ppd);
}
/* Per-CPU register manipulation functions */
struct mcctrl_os_cpu_response {
int done;
unsigned long val;
wait_queue_head_t wq;
};
int mcctrl_get_request_os_cpu(ihk_os_t *ret_os, int *ret_cpu)
{
ihk_os_t os;
struct mcctrl_usrdata *usrdata;
struct mcctrl_per_proc_data *ppd;
struct ikc_scd_packet *packet;
struct ihk_ikc_channel_desc *ch;
int ret = 0;
/* Look up IHK OS structure
* TODO: iterate all possible indeces, currently only for OS 0
*/
os = ihk_host_find_os(0, NULL);
if (!os) {
printk("%s: ERROR: no OS found for index 0\n", __FUNCTION__);
return -EINVAL;
}
/* Look up per-OS mcctrl structure */
usrdata = ihk_host_os_get_usrdata(os);
if (!usrdata) {
printk("%s: ERROR: no usrdata found for OS %p\n", __FUNCTION__, os);
return -EINVAL;
}
/* Look up per-process structure */
ppd = mcctrl_get_per_proc_data(usrdata, task_tgid_vnr(current));
if (!ppd) {
kprintf("%s: ERROR: no per-process structure for PID %d??\n",
__FUNCTION__, task_tgid_vnr(current));
return -EINVAL;
}
/* Look up per-thread structure */
packet = (struct ikc_scd_packet *)mcctrl_get_per_thread_data(ppd, current);
if (!packet) {
ret = -EINVAL;
printk("%s: ERROR: no packet registered for TID %d\n",
__FUNCTION__, task_pid_vnr(current));
goto out_put_ppd;
}
*ret_os = os;
/* TODO: define a new IHK query function instead of
* accessing internals directly */
ch = (usrdata->channels + packet->ref)->c;
*ret_cpu = ch->send.queue->read_cpu;
ret = 0;
printk("%s: OS: %p, CPU: %d\n", __FUNCTION__, os, *ret_cpu);
out_put_ppd:
mcctrl_put_per_proc_data(ppd);
return ret;
}
EXPORT_SYMBOL(mcctrl_get_request_os_cpu);
void mcctrl_os_read_write_cpu_response(ihk_os_t os,
struct ikc_scd_packet *pisp)
{
struct mcctrl_os_cpu_response *resp;
/* XXX: What if caller thread is unblocked by a signal
* before this message arrives? */
resp = pisp->resp;
if (!resp) {
return;
}
resp->val = pisp->desc.val;
resp->done = 1;
wake_up_interruptible(&resp->wq);
}
int __mcctrl_os_read_write_cpu_register(ihk_os_t os, int cpu,
struct mcctrl_os_cpu_register *desc,
enum mcctrl_os_cpu_operation op)
{
struct ikc_scd_packet isp;
struct mcctrl_os_cpu_response resp;
int ret = -EINVAL;
memset(&isp, '\0', sizeof(struct ikc_scd_packet));
isp.msg = SCD_MSG_CPU_RW_REG;
isp.op = op;
isp.desc = *desc;
isp.resp = &resp;
resp.done = 0;
init_waitqueue_head(&resp.wq);
mb();
ret = mcctrl_ikc_send(os, cpu, &isp);
if (ret < 0) {
printk("%s: ERROR sending IKC msg: %d\n", __FUNCTION__, ret);
goto out;
}
/* Wait for response */
ret = wait_event_interruptible(resp.wq, resp.done);
if (ret < 0) {
printk("%s: ERROR after wait: %d\n", __FUNCTION__, ret);
goto out;
}
/* Update if read */
if (ret == 0 && op == MCCTRL_OS_CPU_READ_REGISTER) {
desc->val = resp.val;
}
printk("%s: MCCTRL_OS_CPU_%s_REGISTER: reg: 0x%lx, val: 0x%lx\n",
__FUNCTION__,
(op == MCCTRL_OS_CPU_READ_REGISTER ? "READ" : "WRITE"),
desc->addr, desc->val);
out:
return ret;
}
int mcctrl_os_read_cpu_register(ihk_os_t os, int cpu,
struct mcctrl_os_cpu_register *desc)
{
return __mcctrl_os_read_write_cpu_register(os, cpu,
desc, MCCTRL_OS_CPU_READ_REGISTER);
}
EXPORT_SYMBOL(mcctrl_os_read_cpu_register);
int mcctrl_os_write_cpu_register(ihk_os_t os, int cpu,
struct mcctrl_os_cpu_register *desc)
{
return __mcctrl_os_read_write_cpu_register(os, cpu,
desc, MCCTRL_OS_CPU_WRITE_REGISTER);
}
EXPORT_SYMBOL(mcctrl_os_write_cpu_register);

View File

@@ -27,6 +27,7 @@
#include <linux/slab.h>
#include <linux/device.h>
#include "mcctrl.h"
#include <ihk/ihk_host_user.h>
#define OS_MAX_MINOR 64
@@ -74,6 +75,12 @@ static struct ihk_os_user_call_handler mcctrl_uchs[] = {
{ .request = MCEXEC_UP_SYS_UMOUNT, .func = mcctrl_ioctl },
{ .request = MCEXEC_UP_SYS_UNSHARE, .func = mcctrl_ioctl },
{ .request = MCEXEC_UP_DEBUG_LOG, .func = mcctrl_ioctl },
{ .request = IHK_OS_AUX_PERF_NUM, .func = mcctrl_ioctl },
{ .request = IHK_OS_AUX_PERF_SET, .func = mcctrl_ioctl },
{ .request = IHK_OS_AUX_PERF_GET, .func = mcctrl_ioctl },
{ .request = IHK_OS_AUX_PERF_ENABLE, .func = mcctrl_ioctl },
{ .request = IHK_OS_AUX_PERF_DISABLE, .func = mcctrl_ioctl },
{ .request = IHK_OS_AUX_PERF_DESTROY, .func = mcctrl_ioctl },
};
static struct ihk_os_user_call mcctrl_uc_proto = {

View File

@@ -53,6 +53,9 @@ void mcexec_prepare_ack(ihk_os_t os, unsigned long arg, int err);
static void mcctrl_ikc_init(ihk_os_t os, int cpu, unsigned long rphys, struct ihk_ikc_channel_desc *c);
int mcexec_syscall(struct mcctrl_usrdata *ud, struct ikc_scd_packet *packet);
void sig_done(unsigned long arg, int err);
void mcctrl_perf_ack(ihk_os_t os, struct ikc_scd_packet *packet);
void mcctrl_os_read_write_cpu_response(ihk_os_t os,
struct ikc_scd_packet *pisp);
/* XXX: this runs in atomic context! */
static int syscall_packet_handler(struct ihk_ikc_channel_desc *c,
@@ -109,6 +112,14 @@ static int syscall_packet_handler(struct ihk_ikc_channel_desc *c,
get_vdso_info(__os, pisp->arg);
break;
case SCD_MSG_PERF_ACK:
mcctrl_perf_ack(__os, pisp);
break;
case SCD_MSG_CPU_RW_REG_RESP:
mcctrl_os_read_write_cpu_response(__os, pisp);
break;
default:
printk(KERN_ERR "mcctrl:syscall_packet_handler:"
"unknown message (%d.%d.%d.%d.%d.%#lx)\n",

View File

@@ -44,6 +44,7 @@
#include <linux/rwlock.h>
#include <linux/threads.h>
#include "sysfs.h"
#include "mcctrl_public.h"
#define SCD_MSG_PREPARE_PROCESS 0x1
#define SCD_MSG_PREPARE_PROCESS_ACKED 0x2
@@ -92,6 +93,12 @@
#define SCD_MSG_PROCFS_TID_CREATE 0x44
#define SCD_MSG_PROCFS_TID_DELETE 0x45
#define SCD_MSG_PERF_CTRL 0x50
#define SCD_MSG_PERF_ACK 0x51
#define SCD_MSG_CPU_RW_REG 0x52
#define SCD_MSG_CPU_RW_REG_RESP 0x53
#define DMA_PIN_SHIFT 21
#define DO_USER_MODE
@@ -103,6 +110,12 @@ struct coretable {
unsigned long addr;
};
enum mcctrl_os_cpu_operation {
MCCTRL_OS_CPU_READ_REGISTER,
MCCTRL_OS_CPU_WRITE_REGISTER,
MCCTRL_OS_CPU_MAX_OP
};
struct ikc_scd_packet {
int msg;
int err;
@@ -128,6 +141,13 @@ struct ikc_scd_packet {
struct {
int ttid;
};
/* SCD_MSG_CPU_RW_REG */
struct {
struct mcctrl_os_cpu_register desc;
enum mcctrl_os_cpu_operation op;
void *resp;
};
};
char padding[12];
};
@@ -192,9 +212,10 @@ struct mcctrl_per_proc_data {
int pid;
unsigned long rpgtable; /* per process, not per OS */
struct list_head wq_list;
struct list_head wq_req_list;
struct list_head wq_list_exact;
struct list_head wq_list; /* All these requests come from mcexec */
struct list_head wq_req_list; /* These requests come from IKC IRQ handler (can be processed by any threads) */
struct list_head wq_list_exact; /* These requests come from IKC IRQ handler targeting a particular thread */
ihk_spinlock_t wq_list_lock;
wait_queue_head_t wq_prepare;
wait_queue_head_t wq_procfs;
@@ -298,6 +319,7 @@ struct mcctrl_usrdata {
struct list_head cpu_topology_list;
struct list_head node_topology_list;
struct mcctrl_part_exec part_exec;
int perf_event_num;
};
struct mcctrl_signal {
@@ -401,4 +423,14 @@ struct get_cpu_mapping_req {
wait_queue_head_t wq;
};
struct ihk_perf_event_attr{
unsigned long config;
unsigned disabled:1;
unsigned pinned:1;
unsigned exclude_user:1;
unsigned exclude_kernel:1;
unsigned exclude_hv:1;
unsigned exclude_idle:1;
};
#endif

View File

@@ -0,0 +1,20 @@
#ifndef __MCCTRL_PUBLIC_H
#define __MCCTRL_PUBLIC_H
#include <ihk/ihk_host_user.h>
#include <ikc/queue.h>
struct mcctrl_os_cpu_register {
unsigned long addr;
unsigned long val;
unsigned long addr_ext;
};
int mcctrl_os_read_cpu_register(ihk_os_t os, int cpu,
struct mcctrl_os_cpu_register *desc);
int mcctrl_os_write_cpu_register(ihk_os_t os, int cpu,
struct mcctrl_os_cpu_register *desc);
int mcctrl_get_request_os_cpu(ihk_os_t *os, int *cpu);
#endif // __MCCTRL_PUBLIC_H

View File

@@ -1,6 +1,6 @@
ENABLE_MCOVERLAYFS=@ENABLE_MCOVERLAYFS@
RELEASE=@UNAME_R@
RELEASE=$(shell uname -r)
MAJOR=$(shell echo ${RELEASE} | sed -e 's/^\([0-9]*\).*/\1/')
MINOR=$(shell echo ${RELEASE} | sed -e 's/^[0-9]*.\([0-9]*\).*/\1/')
PATCH=$(shell echo ${RELEASE} | sed -e 's/^[0-9]*.[0-9]*.\([0-9]*\).*/\1/')
@@ -9,6 +9,7 @@ RHEL_RELEASE_TMP=$(shell echo ${RELEASE} | sed -e 's/^[0-9]*.[0-9]*.[0-9]*-\([0-
RHEL_RELEASE=$(shell if [ "${RELEASE}" == "${RHEL_RELEASE_TMP}" ]; then echo ""; else echo ${RHEL_RELEASE_TMP}; fi)
BUILD_MODULE_TMP=$(shell if [ "${RHEL_RELEASE}" == "" ]; then echo "org"; else echo "rhel"; fi)
BUILD_MODULE=none
#$(info "LINUX_VERSION_CODE: ${LINUX_VERSION_CODE}, RHEL_RELEASE: ${RHEL_RELEASE}")
ifeq ($(ENABLE_MCOVERLAYFS),yes)
ifeq ($(BUILD_MODULE_TMP),org)
ifeq ($(BUILD_MODULE),none)
@@ -20,7 +21,7 @@ endif
endif
ifeq ($(BUILD_MODULE_TMP),rhel)
ifeq ($(BUILD_MODULE),none)
BUILD_MODULE=$(shell if [ ${LINUX_VERSION_CODE} -eq 199168 -a ${RHEL_RELEASE} -eq 327 ]; then echo "linux-3.10.0-327.36.1.el7"; else echo "none"; fi)
BUILD_MODULE=$(shell if [ ${LINUX_VERSION_CODE} -eq 199168 -a ${RHEL_RELEASE} -ge 327 -a ${RHEL_RELEASE} -le 514 ]; then echo "linux-3.10.0-327.36.1.el7"; else echo "none"; fi)
endif
endif
endif

View File

@@ -420,8 +420,8 @@ struct inode *ovl_d_select_inode(struct dentry *dentry, unsigned file_flags)
dentry, dentry->d_inode->i_ino);
OVL_DEBUG("sysfs: realpath.dentry=%pd4, i_ino=%lu\n",
realpath.dentry, realpath.dentry->d_inode->i_ino);
if (!dentry->d_inode->i_private) {
dentry->d_inode->i_private = dentry->d_fsdata;
if (!ovl_find_d_fsdata(dentry)) {
ovl_add_d_fsdata(dentry);
dentry->d_fsdata = realpath.dentry->d_fsdata;
}
}

View File

@@ -43,6 +43,12 @@ enum ovl_opt_bit {
#define OVL_OPT_NOCOPYUPW(opt) ((opt) & __OVL_OPT_NOCOPYUPW)
#define OVL_OPT_NOFSCHECK(opt) ((opt) & __OVL_OPT_NOFSCHECK)
struct ovl_d_fsdata {
struct list_head list;
struct dentry *d;
struct ovl_entry *oe;
};
static inline int ovl_do_rmdir(struct inode *dir, struct dentry *dentry)
{
int err = vfs_rmdir(dir, dentry);
@@ -149,6 +155,8 @@ static inline int ovl_do_whiteout(struct inode *dir, struct dentry *dentry)
unsigned ovl_get_config_opt(struct dentry *dentry);
void ovl_reset_ovl_entry(struct ovl_entry **oe, struct dentry *dentry);
struct ovl_entry *ovl_find_d_fsdata(struct dentry *dentry);
int ovl_add_d_fsdata(struct dentry *dentry);
enum ovl_path_type ovl_path_type(struct dentry *dentry);
u64 ovl_dentry_version_get(struct dentry *dentry);
void ovl_dentry_version_inc(struct dentry *dentry);

View File

@@ -45,6 +45,7 @@ struct ovl_fs {
long lower_namelen;
/* pathnames of lower and upper dirs, for show_options */
struct ovl_config config;
struct list_head d_fsdata_list;
};
struct ovl_dir_cache;
@@ -76,15 +77,76 @@ unsigned ovl_get_config_opt(struct dentry *dentry)
void ovl_reset_ovl_entry(struct ovl_entry **oe, struct dentry *dentry)
{
unsigned opt = ovl_get_config_opt(dentry);
struct ovl_entry *d_fsdata;
if (OVL_OPT_NOFSCHECK(opt)) {
if (dentry->d_inode && dentry->d_inode->i_private &&
!S_ISDIR(dentry->d_inode->i_mode)) {
*oe = dentry->d_inode->i_private;
if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) {
return;
}
d_fsdata = ovl_find_d_fsdata(dentry);
if (d_fsdata) {
OVL_DEBUG("reset: dentry=%pd4, 0x%p, oe=0x%p\n",
dentry, dentry, d_fsdata);
*oe = d_fsdata;
}
}
}
struct ovl_entry *ovl_find_d_fsdata(struct dentry *dentry)
{
struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
struct ovl_d_fsdata *d_fsdata;
list_for_each_entry(d_fsdata, &ofs->d_fsdata_list, list) {
if (dentry == d_fsdata->d) {
OVL_DEBUG("exist: dentry=%pd4, 0x%p, oe=0x%p\n",
d_fsdata->d, d_fsdata->d, d_fsdata->oe);
return d_fsdata->oe;
}
}
return NULL;
}
int ovl_add_d_fsdata(struct dentry *dentry)
{
struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
struct ovl_d_fsdata *d_fsdata;
d_fsdata = kzalloc(sizeof(struct ovl_d_fsdata), GFP_KERNEL);
if (!d_fsdata) {
return -1;
}
d_fsdata->d = dentry;
d_fsdata->oe = dentry->d_fsdata;
list_add(&d_fsdata->list, &ofs->d_fsdata_list);
OVL_DEBUG("add: dentry=%pd4, 0x%p, oe=0x%p\n",
d_fsdata->d, d_fsdata->d, d_fsdata->oe);
return 0;
}
static int ovl_clear_d_fsdata(struct ovl_fs *ofs)
{
struct ovl_d_fsdata *d_fsdata;
struct ovl_d_fsdata *d_fsdata_next;
list_for_each_entry_safe(d_fsdata, d_fsdata_next, &ofs->d_fsdata_list,
list) {
OVL_DEBUG("delete: dentry=%pd4, 0x%p\n",
d_fsdata->d, d_fsdata->d);
list_del(&d_fsdata->list);
kfree(d_fsdata);
}
return 0;
}
static struct dentry *__ovl_dentry_lower(struct ovl_entry *oe)
{
return oe->numlower ? oe->lowerstack[0].dentry : NULL;
@@ -658,6 +720,8 @@ static void ovl_put_super(struct super_block *sb)
struct ovl_fs *ufs = sb->s_fs_info;
unsigned i;
ovl_clear_d_fsdata(ufs);
dput(ufs->workdir);
mntput(ufs->upper_mnt);
for (i = 0; i < ufs->numlower; i++)
@@ -1049,6 +1113,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
if (!ufs)
goto out;
INIT_LIST_HEAD(&ufs->d_fsdata_list);
err = ovl_parse_opt((char *) data, &ufs->config);
if (err)
goto out_free_config;

View File

@@ -114,9 +114,9 @@ char **__glob_argv = 0;
#define ENABLE_MCOVERLAYFS 1
#endif // LINUX_VERSION_CODE == 4.6
#else
#if RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(7,2)
#if RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,3)
#define ENABLE_MCOVERLAYFS 1
#endif // RHEL_RELEASE_CODE == 7.2
#endif // RHEL_RELEASE_CODE <= 7.3
#endif // RHEL_RELEASE_CODE
#endif // ENABLE_MCOVERLAYFS
@@ -2636,6 +2636,24 @@ return_execve2:
do_syscall_return(fd, cpu, ret, 0, 0, 0, 0);
break;
case __NR_stat:
ret = do_strncpy_from_user(fd, pathbuf, (void *)w.sr.args[0], PATH_MAX);
if (ret >= PATH_MAX) {
ret = -ENAMETOOLONG;
}
if (ret < 0) {
do_syscall_return(fd, cpu, ret, 0, 0, 0, 0);
break;
}
fn = chgpath(pathbuf, tmpbuf);
ret = stat(fn, (struct stat *)w.sr.args[1]);
__dprintf("stat: path=%s, ret=%ld\n", fn, ret);
SET_ERR(ret);
do_syscall_return(fd, cpu, ret, 0, 0, 0, 0);
break;
default:
ret = do_generic_syscall(&w);
do_syscall_return(fd, cpu, ret, 0, 0, 0, 0);

View File

@@ -1,12 +1,17 @@
ENABLE_RUSAGE=@ENABLE_RUSAGE@
VPATH=@abs_srcdir@
SRC=$(VPATH)
IHKDIR=$(IHKBASE)/$(TARGETDIR)
OBJS = init.o mem.o debug.o mikc.o listeners.o ap.o syscall.o cls.o host.o
OBJS += process.o copy.o waitq.o futex.o timer.o plist.o fileobj.o shmobj.o
OBJS += zeroobj.o procfs.o devobj.o sysfs.o xpmem.o
OBJS += zeroobj.o procfs.o devobj.o sysfs.o xpmem.o rusage.o freeze.o
DEPSRCS=$(wildcard $(SRC)/*.c)
DOPT=
ifeq ($(ENABLE_RUSAGE),yes)
DOPT=-DENABLE_RUSAGE
endif
CFLAGS += -I$(SRC)/include -D__KERNEL__ -g -fno-omit-frame-pointer -fno-inline -fno-inline-small-functions
CFLAGS += -I$(SRC)/include -D__KERNEL__ $(DOPT) -g -fno-omit-frame-pointer -fno-inline -fno-inline-small-functions
LDFLAGS += -e arch_start
IHKOBJ = ihk/ihk.o

View File

@@ -25,15 +25,18 @@
#include <init.h>
#include <march.h>
#include <cls.h>
#include <time.h>
#include <syscall.h>
#include <rusage.h>
//#define DEBUG_PRINT_AP
#ifdef DEBUG_PRINT_AP
#define dkprintf(...) kprintf(__VA_ARGS__)
#define ekprintf(...) kprintf(__VA_ARGS__)
#define dkprintf(...) kprintf(__VA_ARGS__)
#define ekprintf(...) kprintf(__VA_ARGS__)
#else
#define dkprintf(...) do { if (0) kprintf(__VA_ARGS__); } while (0)
#define ekprintf(...) kprintf(__VA_ARGS__)
#define ekprintf(...) kprintf(__VA_ARGS__)
#endif
int num_processors = 1;
@@ -41,6 +44,8 @@ static volatile int ap_stop = 1;
mcs_lock_node_t ap_syscall_semaphore;
extern struct ihk_os_monitor *monitor;
static void ap_wait(void)
{
init_tick();
@@ -117,6 +122,10 @@ void ap_init(void)
num_processors++;
}
kprintf("BSP: booted %d AP CPUs\n", cpu_info->ncpus - 1);
#ifdef ENABLE_RUSAGE
rusage_num_threads = 0;
rusage_max_num_threads = 0;
#endif
}
#include <sysfs.h>

View File

@@ -24,16 +24,22 @@ extern int num_processors;
struct cpu_local_var *clv;
int cpu_local_var_initialized = 0;
extern struct ihk_os_monitor *monitor;
void cpu_local_var_init(void)
{
int z;
int i;
z = sizeof(struct cpu_local_var) * num_processors;
z = (z + PAGE_SIZE - 1) >> PAGE_SHIFT;
clv = ihk_mc_alloc_pages(z, IHK_MC_AP_CRITICAL);
memset(clv, 0, z * PAGE_SIZE);
for(i = 0; i < num_processors; i++)
clv[i].monitor = monitor + i;
cpu_local_var_initialized = 1;
}

54
kernel/freeze.c Normal file
View File

@@ -0,0 +1,54 @@
#include <kmsg.h>
#include <string.h>
#include <ihk/cpu.h>
#include <ihk/debug.h>
#include <cls.h>
extern int nmi_mode;
extern void mod_nmi_ctx(void *, void(*)());
extern void lapic_ack();
extern void __freeze();
void
freeze()
{
struct ihk_os_monitor *monitor = cpu_local_var(monitor);
monitor->status_bak = monitor->status;
monitor->status = IHK_OS_MONITOR_KERNEL_FROZEN;
while (monitor->status == IHK_OS_MONITOR_KERNEL_FROZEN)
cpu_halt();
monitor->status = monitor->status_bak;
}
long
freeze_thaw(void *nmi_ctx)
{
struct ihk_os_monitor *monitor = cpu_local_var(monitor);
if (nmi_mode == 1) {
if (monitor->status != IHK_OS_MONITOR_KERNEL_FROZEN) {
#if 1
mod_nmi_ctx(nmi_ctx, __freeze);
return 1;
#else
unsigned long flags;
flags = cpu_disable_interrupt_save();
monitor->status_bak = monitor->status;
monitor->status = IHK_OS_MONITOR_KERNEL_FROZEN;
lapic_ack();
while (monitor->status == IHK_OS_MONITOR_KERNEL_FROZEN)
cpu_halt();
monitor->status = monitor->status_bak;
cpu_restore_interrupt(flags);
#endif
}
}
else if(nmi_mode == 2) {
if (monitor->status == IHK_OS_MONITOR_KERNEL_FROZEN) {
monitor->status = IHK_OS_MONITOR_KERNEL_THAW;
}
}
return 0;
}

View File

@@ -23,14 +23,15 @@
#include <ihk/debug.h>
#include <ihk/ikc.h>
#include <ikc/master.h>
#include <syscall.h>
#include <cls.h>
#include <syscall.h>
#include <process.h>
#include <page.h>
#include <mman.h>
#include <init.h>
#include <kmalloc.h>
#include <sysfs.h>
#include <ihk/perfctr.h>
//#define DEBUG_PRINT_HOST
@@ -488,6 +489,8 @@ static int syscall_packet_handler(struct ihk_ikc_channel_desc *c,
unsigned long pp;
int cpuid;
int ret = 0;
struct perf_ctrl_desc *pcd;
unsigned int mode = 0;
switch (packet->msg) {
case SCD_MSG_INIT_CHANNEL_ACKED:
@@ -597,6 +600,61 @@ static int syscall_packet_handler(struct ihk_ikc_channel_desc *c,
ret = 0;
break;
case SCD_MSG_PERF_CTRL:
pp = ihk_mc_map_memory(NULL, packet->arg, sizeof(struct perf_ctrl_desc));
pcd = (struct perf_ctrl_desc *)ihk_mc_map_virtual(pp, 1, PTATTR_WRITABLE | PTATTR_ACTIVE);
switch (pcd->ctrl_type) {
case PERF_CTRL_SET:
if (!pcd->exclude_kernel) {
mode |= PERFCTR_KERNEL_MODE;
}
if (!pcd->exclude_user) {
mode |= PERFCTR_USER_MODE;
}
ihk_mc_perfctr_init_raw(pcd->target_cntr, pcd->config, mode);
ihk_mc_perfctr_stop(1 << pcd->target_cntr);
ihk_mc_perfctr_reset(pcd->target_cntr);
break;
case PERF_CTRL_ENABLE:
ihk_mc_perfctr_start(pcd->target_cntr_mask);
break;
case PERF_CTRL_DISABLE:
ihk_mc_perfctr_stop(pcd->target_cntr_mask);
break;
case PERF_CTRL_GET:
pcd->read_value = ihk_mc_perfctr_read(pcd->target_cntr);
break;
default:
kprintf("%s: SCD_MSG_PERF_CTRL unexpected ctrl_type\n", __FUNCTION__);
}
ihk_mc_unmap_virtual(pcd, 1, 0);
ihk_mc_unmap_memory(NULL, pp, sizeof(struct perf_ctrl_desc));
pckt.msg = SCD_MSG_PERF_ACK;
pckt.err = 0;
pckt.arg = packet->arg;
ihk_ikc_send(c, &pckt, 0);
ret = 0;
break;
case SCD_MSG_CPU_RW_REG:
pckt.msg = SCD_MSG_CPU_RW_REG_RESP;
memcpy(&pckt.desc, &packet->desc,
sizeof(struct mcctrl_os_cpu_register));
pckt.resp = packet->resp;
pckt.err = arch_cpu_read_write_register(&pckt.desc, pckt.op);
ihk_ikc_send(c, &pckt, 0);
break;
default:
kprintf("syscall_pakcet_handler:unknown message "
"(%d.%d.%d.%d.%d.%#lx)\n",

View File

@@ -28,6 +28,26 @@ struct kmalloc_header {
/* 32 bytes */
};
#define IHK_OS_MONITOR_NOT_BOOT 0
#define IHK_OS_MONITOR_IDLE 1
#define IHK_OS_MONITOR_USER 2
#define IHK_OS_MONITOR_KERNEL 3
#define IHK_OS_MONITOR_KERNEL_HEAVY 4
#define IHK_OS_MONITOR_KERNEL_OFFLOAD 5
#define IHK_OS_MONITOR_KERNEL_FREEZING 8
#define IHK_OS_MONITOR_KERNEL_FROZEN 9
#define IHK_OS_MONITOR_KERNEL_THAW 10
#define IHK_OS_MONITOR_PANIC 99
struct ihk_os_monitor {
int status;
int status_bak;
unsigned long counter;
unsigned long ocounter;
unsigned long user_tsc;
unsigned long system_tsc;
};
#include <ihk/lock.h>
#define CPU_STATUS_DISABLE (0)
#define CPU_STATUS_IDLE (1)
@@ -75,6 +95,7 @@ struct cpu_local_var {
int no_preempt;
int timer_enabled;
int kmalloc_initialized;
struct ihk_os_monitor *monitor;
} __attribute__((aligned(64)));

View File

@@ -29,6 +29,8 @@ extern void init_host_syscall_channel2(void);
extern void sched_init(void);
extern void pc_ap_init(void);
extern void cpu_sysfs_setup(void);
extern void rusage_sysfs_setup(void);
extern void status_sysfs_setup(void);
extern char *find_command_line(char *name);

View File

@@ -389,6 +389,7 @@ struct vm_range {
off_t objoff;
int pgshift; /* page size. 0 means THP */
int padding;
void *private_data;
};
struct vm_range_numa_policy {
@@ -664,9 +665,14 @@ struct thread {
struct sig_pending *ptrace_sendsig;
// cpu time
/*
struct timespec stime;
struct timespec utime;
struct timespec btime;
*/
unsigned long system_tsc;
unsigned long user_tsc;
unsigned long base_tsc;
int times_update;
int in_kernel;

79
kernel/include/rusage.h Normal file
View File

@@ -0,0 +1,79 @@
#ifndef __RUSAGE_H
#define __RUSAGE_H
#define RUSAGE_DEFAULT_SIZE 10
enum RUSAGE_MEMBER {
RUSAGE_RSS,
RUSAGE_CACHE,
RUSAGE_RSS_HUGE,
RUSAGE_MAPPED_FILE,
RUSAGE_MAX_USAGE,
RUSAGE_KMEM_USAGE,
RUSAGE_KMAX_USAGE,
RUSAGE_NUM_NUMA_NODES,
RUSAGE_NUMA_STAT,
RUSAGE_HUGETLB ,
RUSAGE_HUGETLB_MAX ,
RUSAGE_STAT_SYSTEM ,
RUSAGE_STAT_USER ,
RUSAGE_USAGE ,
RUSAGE_USAGE_PER_CPU ,
RUSAGE_NUM_THREADS ,
RUSAGE_MAX_NUM_THREADS
};
struct r_data{
unsigned long pid;
unsigned long rss;
unsigned long cache;
unsigned long rss_huge;
unsigned long mapped_file;
unsigned long max_usage;
unsigned long kmem_usage;
unsigned long kmax_usage;
unsigned long hugetlb;
unsigned long hugetlb_max;
unsigned long stat_system;
unsigned long stat_user;
unsigned long usage;
struct r_data *next;
} ;
typedef struct r_data rusage_data;
rusage_data *rdata[RUSAGE_DEFAULT_SIZE];
unsigned long rusage_max_num_threads;
unsigned long rusage_num_threads;
enum ihk_os_status {
IHK_STATUS_INACTIVE,
IHK_STATUS_BOOTING,
IHK_STATUS_RUNNING,
IHK_STATUS_SHUTDOWN,
IHK_STATUS_PANIC,
IHK_STATUS_HUNGUP,
IHK_STATUS_FREEZING,
IHK_STATUS_FROZEN,
};
enum ihk_os_status os_status;
unsigned long sys_delegate_count;
enum sys_delegate_state_enum {
ENTER_KERNEL,
EXIT_KERNEL,
};
enum sys_delegate_state_enum sys_delegate_state;
unsigned long rusage_rss[sizeof(cpu_set_t)/8];
unsigned long rusage_rss_max;
long rusage_rss_current;
unsigned long rusage_kmem_usage;
unsigned long rusage_kmem_max_usage;
unsigned long rusage_hugetlb_usage;
unsigned long rusage_hugetlb_max_usage;
unsigned long rusage_numa_stat[1024];
unsigned long rusage_max_memory;
#define RUSAGE_MEM_LIMIT 2000000
#endif

View File

@@ -73,6 +73,13 @@
/* #define SCD_MSG_SYSFS_RESP_CLEANUP 0x43 */
#define SCD_MSG_PROCFS_TID_CREATE 0x44
#define SCD_MSG_PROCFS_TID_DELETE 0x45
#define SCD_MSG_EVENT_SIGNAL 0x46
#define SCD_MSG_PERF_CTRL 0x50
#define SCD_MSG_PERF_ACK 0x51
#define SCD_MSG_CPU_RW_REG 0x52
#define SCD_MSG_CPU_RW_REG_RESP 0x53
/* Cloning flags. */
# define CSIGNAL 0x000000ff /* Signal mask to be sent at exit. */
@@ -206,6 +213,18 @@ struct syscall_request {
unsigned long args[6];
};
struct mcctrl_os_cpu_register {
unsigned long addr;
unsigned long val;
unsigned long addr_ext;
};
enum mcctrl_os_cpu_operation {
MCCTRL_OS_CPU_READ_REGISTER,
MCCTRL_OS_CPU_WRITE_REGISTER,
MCCTRL_OS_CPU_MAX_OP
};
struct ikc_scd_packet {
int msg;
int err;
@@ -231,6 +250,13 @@ struct ikc_scd_packet {
struct {
int ttid;
};
/* SCD_MSG_CPU_RW_REG */
struct {
struct mcctrl_os_cpu_register desc;
enum mcctrl_os_cpu_operation op;
void *resp;
};
};
char padding[12];
};
@@ -374,6 +400,29 @@ struct tod_data_s {
};
extern struct tod_data_s tod_data; /* residing in arch-dependent file */
static inline void tsc_to_ts(unsigned long tsc, struct timespec *ts)
{
time_t sec_delta;
long ns_delta;
sec_delta = tsc / tod_data.clocks_per_sec;
ns_delta = NS_PER_SEC * (tsc % tod_data.clocks_per_sec)
/ tod_data.clocks_per_sec;
/* calc. of ns_delta overflows if clocks_per_sec exceeds 18.44 GHz */
ts->tv_sec = sec_delta;
ts->tv_nsec = ns_delta;
if (ts->tv_nsec >= NS_PER_SEC) {
ts->tv_nsec -= NS_PER_SEC;
++ts->tv_sec;
}
}
static inline unsigned long timespec_to_jiffy(const struct timespec *ats)
{
return ats->tv_sec * 100 + ats->tv_nsec / 10000000;
}
void reset_cputime();
void set_cputime(int mode);
int do_munmap(void *addr, size_t len);
@@ -385,6 +434,8 @@ int do_shmget(key_t key, size_t size, int shmflg);
struct process_vm;
int arch_map_vdso(struct process_vm *vm); /* arch dependent */
int arch_setup_vdso(void);
int arch_cpu_read_write_register(struct mcctrl_os_cpu_register *desc,
enum mcctrl_os_cpu_operation op);
#define VDSO_MAXPAGES 2
struct vdso {
@@ -421,4 +472,34 @@ struct get_cpu_mapping_req {
#endif
};
enum perf_ctrl_type {
PERF_CTRL_SET,
PERF_CTRL_GET,
PERF_CTRL_ENABLE,
PERF_CTRL_DISABLE,
};
struct perf_ctrl_desc {
enum perf_ctrl_type ctrl_type;
int status;
union {
/* for SET, GET */
struct {
unsigned int target_cntr;
unsigned long config;
unsigned long read_value;
unsigned disabled :1,
pinned :1,
exclude_user :1,
exclude_kernel :1,
exclude_hv :1,
exclude_idle :1;
};
/* for START, STOP*/
struct {
unsigned long target_cntr_mask;
};
};
};
#endif

View File

@@ -11,11 +11,16 @@
#ifndef _XPMEM_H
#define _XPMEM_H
#include <process.h>
#include <ihk/context.h>
#define XPMEM_DEV_PATH "/dev/xpmem"
extern int xpmem_open(ihk_mc_user_context_t *ctx);
extern int xpmem_remove_process_memory_range(struct process_vm *vm,
struct vm_range *vmr);
extern int xpmem_fault_process_memory_range(struct process_vm *vm,
struct vm_range *vmr, unsigned long vaddr, uint64_t reason);
#endif /* _XPMEM_H */

View File

@@ -160,7 +160,7 @@ static inline int xpmem_ap_hashtable_index(xpmem_apid_t apid)
index = ((xpmem_id_t *)&apid)->xpmem_id.uniq % XPMEM_AP_HASHTABLE_SIZE;
XPMEM_DEBUG("return: apid=%lu, index=%d", apid, index);
XPMEM_DEBUG("return: apid=0x%lx, index=%d", apid, index);
return index;
}
@@ -174,22 +174,20 @@ struct xpmem_thread_group {
uid_t uid; /* tg's uid */
gid_t gid; /* tg's gid */
volatile int flags; /* tg attributes and state */
ihk_atomic_t uniq_segid;
ihk_atomic_t uniq_apid;
mcs_rwlock_lock_t seg_list_lock;
ihk_atomic_t uniq_segid; /* segid uniq */
ihk_atomic_t uniq_apid; /* apid uniq */
mcs_rwlock_lock_t seg_list_lock; /* tg's list of segs lock */
struct list_head seg_list; /* tg's list of segs */
ihk_atomic_t refcnt; /* references to tg */
ihk_atomic_t n_pinned; /* #of pages pinned by this tg */
struct list_head tg_hashlist; /* tg hash list */
struct thread *group_leader; /* thread group leader */
struct process_vm *vm; /* tg's mm */
ihk_atomic_t n_recall_PFNs; /* #of recall of PFNs in progress */
struct process_vm *vm; /* tg's process_vm */
struct xpmem_hashlist ap_hashtable[]; /* locks + ap hash lists */
};
struct xpmem_segment {
ihk_spinlock_t lock; /* seg lock */
mcs_rwlock_lock_t seg_lock; /* seg sema */
xpmem_segid_t segid; /* unique segid */
unsigned long vaddr; /* starting address */
size_t size; /* size of seg */
@@ -216,18 +214,16 @@ struct xpmem_access_permit {
};
struct xpmem_attachment {
mcs_rwlock_lock_t at_lock; /* att lock for serialization */
struct mcs_rwlock_node_irqsave at_irqsave; /* att lock for serialization */
mcs_rwlock_lock_t at_lock; /* att lock */
unsigned long vaddr; /* starting address of seg attached */
unsigned long at_vaddr; /* address where seg is attached */
size_t at_size; /* size of seg attachment */
struct vm_range *at_vma; /* vma where seg is attachment */
struct vm_range *at_vmr; /* vm_range where seg is attachment */
volatile int flags; /* att attributes and state */
ihk_atomic_t refcnt; /* references to att */
struct xpmem_access_permit *ap; /* associated access permit */
struct list_head att_list; /* atts linked to access permit */
struct process_vm *vm; /* mm struct attached to */
mcs_rwlock_lock_t invalidate_lock; /* to serialize page table invalidates */
struct process_vm *vm; /* process_vm attached to */
};
struct xpmem_partition {
@@ -249,8 +245,10 @@ struct xpmem_perm {
#define XPMEM_PERM_IRUSR 00400
#define XPMEM_PERM_IWUSR 00200
extern struct xpmem_partition *xpmem_my_part;
static int xpmem_ioctl(struct mckfd *mckfd, ihk_mc_user_context_t *ctx);
static int xpmem_close( struct mckfd *mckfd, ihk_mc_user_context_t *ctx);
static int xpmem_close(struct mckfd *mckfd, ihk_mc_user_context_t *ctx);
static int xpmem_init(void);
static void xpmem_exit(void);
@@ -263,10 +261,47 @@ static xpmem_segid_t xpmem_make_segid(struct xpmem_thread_group *);
static int xpmem_remove(xpmem_segid_t);
static void xpmem_remove_seg(struct xpmem_thread_group *,
struct xpmem_segment *);
static void xpmem_remove_segs_of_tg(struct xpmem_thread_group *seg_tg);
static int xpmem_get(xpmem_segid_t, int, int, void *, xpmem_apid_t *);
static int xpmem_check_permit_mode(int, struct xpmem_segment *);
static int xpmem_perms(struct xpmem_perm *, short);
static xpmem_apid_t xpmem_make_apid(struct xpmem_thread_group *);
static int xpmem_release(xpmem_apid_t);
static void xpmem_release_ap(struct xpmem_thread_group *,
struct xpmem_access_permit *);
static void xpmem_release_aps_of_tg(struct xpmem_thread_group *ap_tg);
static int xpmem_attach(struct mckfd *, xpmem_apid_t, off_t, size_t,
unsigned long, int, int, unsigned long *);
static int xpmem_detach(unsigned long);
static int xpmem_vm_munmap(struct process_vm *vm, void *addr, size_t len);
static int xpmem_remove_process_range(struct process_vm *vm,
unsigned long start, unsigned long end, int *ro_freedp);
static int xpmem_free_process_memory_range(struct process_vm *vm,
struct vm_range *range);
static void xpmem_detach_att(struct xpmem_access_permit *,
struct xpmem_attachment *);
static void xpmem_clear_PTEs(struct xpmem_segment *);
static void xpmem_clear_PTEs_range(struct xpmem_segment *, unsigned long,
unsigned long);
static void xpmem_clear_PTEs_of_ap(struct xpmem_access_permit *, unsigned long,
unsigned long);
static void xpmem_clear_PTEs_of_att(struct xpmem_attachment *, unsigned long,
unsigned long);
extern struct xpmem_partition *xpmem_my_part;
static int xpmem_remap_pte(struct process_vm *, struct vm_range *,
unsigned long, uint64_t, struct xpmem_segment *, unsigned long);
static int xpmem_ensure_valid_page(struct xpmem_segment *, unsigned long);
static pte_t * xpmem_vaddr_to_pte(struct process_vm *, unsigned long,
size_t *pgsize);
static int xpmem_pin_page(struct xpmem_thread_group *, struct thread *,
struct process_vm *, unsigned long);
static void xpmem_unpin_pages(struct xpmem_segment *, struct process_vm *,
unsigned long, size_t);
static struct xpmem_thread_group * __xpmem_tg_ref_by_tgid_nolock_internal(
pid_t, int, int);
@@ -317,10 +352,17 @@ static inline struct xpmem_thread_group *__xpmem_tg_ref_by_tgid_nolock(
#define xpmem_tg_ref_by_tgid_all_nolock(t) __xpmem_tg_ref_by_tgid_nolock(t, 1)
static struct xpmem_thread_group * xpmem_tg_ref_by_segid(xpmem_segid_t);
static struct xpmem_thread_group * xpmem_tg_ref_by_apid(xpmem_apid_t);
static void xpmem_tg_deref(struct xpmem_thread_group *);
static struct xpmem_segment *xpmem_seg_ref_by_segid(struct xpmem_thread_group *,
xpmem_segid_t);
static void xpmem_seg_deref(struct xpmem_segment *);
static struct xpmem_access_permit * xpmem_ap_ref_by_apid(
struct xpmem_thread_group *, xpmem_apid_t);
static void xpmem_ap_deref(struct xpmem_access_permit *);
static void xpmem_att_deref(struct xpmem_attachment *);
static int xpmem_validate_access(struct xpmem_access_permit *, off_t, size_t,
int, unsigned long *);
/*
* Inlines that mark an internal driver structure as being destroyable or not.
@@ -363,6 +405,42 @@ static inline void xpmem_seg_destroyable(
XPMEM_DEBUG("return: ");
}
static inline void xpmem_ap_not_destroyable(
struct xpmem_access_permit *ap)
{
ihk_atomic_set(&ap->refcnt, 1);
XPMEM_DEBUG("return: ap->refcnt=%d", ap->refcnt);
}
static inline void xpmem_ap_destroyable(
struct xpmem_access_permit *ap)
{
XPMEM_DEBUG("call: ");
xpmem_ap_deref(ap);
XPMEM_DEBUG("return: ");
}
static inline void xpmem_att_not_destroyable(
struct xpmem_attachment *att)
{
ihk_atomic_set(&att->refcnt, 1);
XPMEM_DEBUG("return: att->refcnt=%d", att->refcnt);
}
static inline void xpmem_att_destroyable(
struct xpmem_attachment *att)
{
XPMEM_DEBUG("call: ");
xpmem_att_deref(att);
XPMEM_DEBUG("return: ");
}
/*
* Inlines that increment the refcnt for the specified structure.
*/
@@ -384,5 +462,29 @@ static inline void xpmem_seg_ref(
XPMEM_DEBUG("return: seg->refcnt=%d", seg->refcnt);
}
static inline void xpmem_ap_ref(
struct xpmem_access_permit *ap)
{
DBUG_ON(ihk_atomic_read(&ap->refcnt) <= 0);
ihk_atomic_inc(&ap->refcnt);
XPMEM_DEBUG("return: ap->refcnt=%d", ap->refcnt);
}
static inline void xpmem_att_ref(
struct xpmem_attachment *att)
{
DBUG_ON(ihk_atomic_read(&att->refcnt) <= 0);
ihk_atomic_inc(&att->refcnt);
XPMEM_DEBUG("return: att->refcnt=%d", att->refcnt);
}
static inline int xpmem_is_private_data(
struct vm_range *vmr)
{
return (vmr->private_data != NULL);
}
#endif /* _XPMEM_PRIVATE_H */

View File

@@ -31,6 +31,7 @@
#include <cls.h>
#include <syscall.h>
#include <sysfs.h>
#include <rusage.h>
//#define IOCTL_FUNC_EXTENSION
#ifdef IOCTL_FUNC_EXTENSION
@@ -239,6 +240,31 @@ static void time_init(void)
return;
}
struct ihk_os_monitor *monitor;
static void monitor_init()
{
int z;
unsigned long phys;
z = sizeof(struct ihk_os_monitor) * num_processors;
z = (z + PAGE_SIZE -1) >> PAGE_SHIFT;
monitor = ihk_mc_alloc_pages(z, IHK_MC_AP_CRITICAL);
memset(monitor, 0, z * PAGE_SIZE);
phys = virt_to_phys(monitor);
ihk_set_monitor(phys, sizeof(struct ihk_os_monitor) * num_processors);
}
int nmi_mode;
static void nmi_init()
{
unsigned long phys;
phys = virt_to_phys(&nmi_mode);
ihk_set_nmi_mode_addr(phys);
}
static void rest_init(void)
{
handler_init();
@@ -250,7 +276,9 @@ static void rest_init(void)
//pc_test();
ap_init();
monitor_init();
cpu_local_var_init();
nmi_init();
time_init();
kmalloc_init();
@@ -320,6 +348,10 @@ static void setup_remote_snooping_samples(void)
static void populate_sysfs(void)
{
cpu_sysfs_setup();
#ifdef ENABLE_RUSAGE
rusage_sysfs_setup();
status_sysfs_setup();
#endif
//setup_remote_snooping_samples();
} /* populate_sysfs() */
@@ -361,6 +393,21 @@ int main(void)
char *ptr;
int mode = 0;
#ifdef ENABLE_RUSAGE
int i;
os_status = IHK_STATUS_INACTIVE;
rusage_hugetlb_usage = 0;
rusage_hugetlb_max_usage = 0;
for (i = 0; i < sizeof(cpu_set_t)/8; i++) {
rusage_rss[i] = 0;
}
for (i = 0; i < 1024; i++) {
rusage_numa_stat[i] = 0;
}
rusage_rss_current = 0;
rusage_rss_max = 0;
#endif
ptr = find_command_line("ksyslogd=");
if (ptr) {
mode = ptr[9] - 0x30;
@@ -369,7 +416,9 @@ int main(void)
kmsg_init(mode);
kputs("IHK/McKernel started.\n");
#ifdef ENABLE_RUSAGE
os_status = IHK_STATUS_BOOTING;
#endif
ihk_set_kmsg(virt_to_phys(&kmsg_buf), IHK_KMSG_SIZE);
arch_init();
@@ -392,6 +441,9 @@ int main(void)
futex_init();
kputs("IHK/McKernel booted.\n");
#ifdef ENABLE_RUSAGE
os_status = IHK_STATUS_RUNNING;
#endif
#ifdef DCFA_KMOD
mc_cmd_client_init();

View File

@@ -37,6 +37,7 @@
#include <cpulocal.h>
#include <init.h>
#include <cas.h>
#include <rusage.h>
//#define DEBUG_PRINT_MEM
@@ -537,6 +538,9 @@ static void *mckernel_allocate_aligned_pages_node(int npages, int p2align,
__FUNCTION__,
ihk_mc_get_numa_id(),
npages, node);
#ifdef ENABLE_RUSAGE
rusage_numa_stat[ihk_mc_get_numa_id()] += npages * PAGE_SIZE;
#endif
break;
}
}
@@ -582,6 +586,9 @@ distance_based:
ihk_mc_get_numa_id(),
npages,
memory_nodes[node].nodes_by_distance[i].id);
#ifdef ENABLE_RUSAGE
rusage_numa_stat[ihk_mc_get_numa_id()] += npages * PAGE_SIZE;
#endif
break;
}
}
@@ -602,7 +609,9 @@ order_based:
&memory_nodes[(node + i) %
ihk_mc_get_nr_numa_nodes()].allocators, list) {
pa = ihk_pagealloc_alloc(pa_allocator, npages, p2align);
#ifdef ENABLE_RUSAGE
rusage_numa_stat[ihk_mc_get_numa_id()] += npages * PAGE_SIZE;
#endif
if (pa) break;
}
@@ -634,6 +643,9 @@ static void __mckernel_free_pages_in_allocator(void *va, int npages)
if (pa_start >= pa_allocator->start &&
pa_end <= pa_allocator->end) {
ihk_pagealloc_free(pa_allocator, pa_start, npages);
#ifdef ENABLE_RUSAGE
rusage_numa_stat[i] -= npages * PAGE_SIZE;
#endif
return;
}
}
@@ -1051,6 +1063,9 @@ static void numa_init(void)
ihk_pagealloc_count(allocator) * PAGE_SIZE,
ihk_pagealloc_count(allocator),
numa_id);
#ifdef ENABLE_RUSAGE
rusage_max_memory = ihk_pagealloc_count(allocator) * PAGE_SIZE;
#endif
}
}

View File

@@ -31,6 +31,8 @@
#include <auxvec.h>
#include <timer.h>
#include <mman.h>
#include <xpmem.h>
#include <rusage.h>
//#define DEBUG_PRINT_PROCESS
@@ -65,6 +67,7 @@ extern void procfs_create_thread(struct thread *);
extern void procfs_delete_thread(struct thread *);
extern void perf_start(struct mc_perf_event *event);
extern void perf_reset(struct mc_perf_event *event);
extern void event_signal();
struct list_head resource_set_list;
mcs_rwlock_lock_t resource_set_lock;
@@ -328,7 +331,25 @@ struct thread *create_thread(unsigned long user_pc,
ihk_mc_spinlock_init(&thread->spin_sleep_lock);
thread->spin_sleep = 0;
#ifdef ENABLE_RUSAGE
{
int processor_id;
unsigned long curr;
processor_id = ihk_mc_get_processor_id();
rusage_rss[processor_id] += KERNEL_STACK_NR_PAGES * PAGE_SIZE;
curr = ihk_atomic_add_long_return ( KERNEL_STACK_NR_PAGES * PAGE_SIZE, &rusage_rss_current);
if (rusage_rss_max < curr) {
atomic_cmpxchg8(&rusage_rss_max, rusage_rss_max, curr);
}
if (rusage_max_memory - curr < RUSAGE_MEM_LIMIT) {
event_signal();
}
ihk_atomic_add_ulong ( 1, &rusage_num_threads);
if (rusage_max_num_threads < rusage_num_threads) {
atomic_cmpxchg8(&rusage_max_num_threads, rusage_max_num_threads, rusage_num_threads);
}
}
#endif
return thread;
err:
@@ -475,6 +496,29 @@ clone_thread(struct thread *org, unsigned long pc, unsigned long sp,
ihk_mc_spinlock_init(&thread->spin_sleep_lock);
thread->spin_sleep = 0;
#ifdef ENABLE_RUSAGE
{
int processor_id;
long curr;
processor_id = ihk_mc_get_processor_id();
rusage_rss[processor_id] += KERNEL_STACK_NR_PAGES * PAGE_SIZE;
curr = ihk_atomic_add_long_return (KERNEL_STACK_NR_PAGES * PAGE_SIZE, &rusage_rss_current);
if (rusage_rss_max < curr) {
atomic_cmpxchg8(&rusage_rss_max, rusage_rss_max, curr);
}
if (rusage_max_memory - curr < RUSAGE_MEM_LIMIT) {
event_signal();
}
ihk_atomic_add_ulong ( 1, &rusage_num_threads);
if (rusage_max_num_threads < rusage_num_threads) {
atomic_cmpxchg8(&rusage_max_num_threads, rusage_max_num_threads, rusage_num_threads);
}
}
#endif
#ifdef TRACK_SYSCALLS
thread->track_syscalls = org->track_syscalls;
#endif
@@ -637,6 +681,7 @@ static int copy_user_ranges(struct process_vm *vm, struct process_vm *orgvm)
range->memobj = src_range->memobj;
range->objoff = src_range->objoff;
range->pgshift = src_range->pgshift;
range->private_data = src_range->private_data;
if (range->memobj) {
memobj_ref(range->memobj);
}
@@ -734,6 +779,7 @@ int split_process_memory_range(struct process_vm *vm, struct vm_range *range,
newrange->end = range->end;
newrange->flag = range->flag;
newrange->pgshift = range->pgshift;
newrange->private_data = range->private_data;
if (range->memobj) {
memobj_ref(range->memobj);
@@ -953,6 +999,10 @@ int remove_process_memory_range(struct process_vm *vm,
ro_freed = 1;
}
if (freerange->private_data) {
xpmem_remove_process_memory_range(vm, freerange);
}
error = free_process_memory_range(vm, freerange);
if (error) {
ekprintf("remove_process_memory_range(%p,%lx,%lx):"
@@ -1058,6 +1108,7 @@ int add_process_memory_range(struct process_vm *vm,
range->memobj = memobj;
range->objoff = offset;
range->pgshift = pgshift;
range->private_data = NULL;
rc = 0;
if (phys == NOPHYS) {
@@ -1793,7 +1844,12 @@ static int do_page_fault_process_vm(struct process_vm *vm, void *fault_addr0, ui
}
}
error = page_fault_process_memory_range(vm, range, fault_addr, reason);
if (!range->private_data) {
error = page_fault_process_memory_range(vm, range, fault_addr, reason);
}
else {
error = xpmem_fault_process_memory_range(vm, range, fault_addr, reason);
}
if (error == -ERESTART) {
goto out;
}
@@ -1951,6 +2007,24 @@ int init_process_stack(struct thread *thread, struct program_load_desc *pn,
end + sizeof(unsigned long) * s_ind);
thread->vm->region.stack_end = end;
thread->vm->region.stack_start = start;
#ifdef ENABLE_RUSAGE
{
int processor_id;
long curr;
processor_id = ihk_mc_get_processor_id();
rusage_rss[processor_id] += (minsz >> PAGE_SHIFT) * PAGE_SIZE;
curr = ihk_atomic_add_long_return ((minsz >> PAGE_SHIFT) * PAGE_SIZE, &rusage_rss_current);
if (rusage_rss_max < curr) {
atomic_cmpxchg8(&rusage_rss_max, rusage_rss_max, curr);
}
if (rusage_max_memory - curr < RUSAGE_MEM_LIMIT) {
event_signal();
}
}
#endif
return 0;
}
@@ -2054,7 +2128,21 @@ unsigned long extend_process_region(struct process_vm *vm,
ihk_mc_free_pages(p, (aligned_new_end - aligned_end) >> PAGE_SHIFT);
return end;
}
#ifdef ENABLE_RUSAGE
{
int processor_id;
long curr;
processor_id = ihk_mc_get_processor_id();
rusage_rss[processor_id] += ((aligned_new_end - aligned_end) >> PAGE_SHIFT) * PAGE_SIZE;
curr = ihk_atomic_add_long_return (((aligned_new_end - aligned_end) >> PAGE_SHIFT) * PAGE_SIZE, &rusage_rss_current);
if (rusage_rss_max < curr) {
atomic_cmpxchg8(&rusage_rss_max, rusage_rss_max, curr);
}
if (rusage_max_memory - curr < RUSAGE_MEM_LIMIT) {
event_signal();
}
}
#endif
return address;
}
@@ -2209,6 +2297,19 @@ release_process_vm(struct process_vm *vm)
return;
}
{
long irqstate;
struct mckfd *fdp;
irqstate = ihk_mc_spinlock_lock(&proc->mckfd_lock);
for (fdp = proc->mckfd; fdp; fdp = fdp->next) {
if (fdp->close_cb) {
fdp->close_cb(fdp, NULL);
}
}
ihk_mc_spinlock_unlock(&proc->mckfd_lock, irqstate);
}
if(vm->free_cb)
vm->free_cb(vm, vm->opt);
@@ -2336,6 +2437,16 @@ void destroy_thread(struct thread *thread)
release_sigcommon(thread->sigcommon);
#ifdef ENABLE_RUSAGE
{
int processor_id;
processor_id = ihk_mc_get_processor_id();
rusage_rss[processor_id] -= KERNEL_STACK_NR_PAGES * PAGE_SIZE;
ihk_atomic_add_long_return(KERNEL_STACK_NR_PAGES * PAGE_SIZE * (-1) , &rusage_rss_current);
ihk_atomic_add_ulong ( -1, &rusage_num_threads);
}
#endif
ihk_mc_free_pages(thread, KERNEL_STACK_NR_PAGES);
}
@@ -2343,14 +2454,17 @@ void release_thread(struct thread *thread)
{
struct process_vm *vm;
struct mcs_rwlock_node lock;
struct timespec ats;
if (!ihk_atomic_dec_and_test(&thread->refcount)) {
return;
}
mcs_rwlock_writer_lock_noirq(&thread->proc->update_lock, &lock);
ts_add(&thread->proc->stime, &thread->stime);
ts_add(&thread->proc->utime, &thread->utime);
tsc_to_ts(thread->system_tsc, &ats);
ts_add(&thread->proc->stime, &ats);
tsc_to_ts(thread->user_tsc, &ats);
ts_add(&thread->proc->utime, &ats);
mcs_rwlock_writer_unlock_noirq(&thread->proc->update_lock, &lock);
vm = thread->vm;
@@ -2398,6 +2512,7 @@ static void do_migrate(void);
static void idle(void)
{
struct cpu_local_var *v = get_this_cpu_local_var();
struct ihk_os_monitor *monitor = v->monitor;
/* Release runq_lock before starting the idle loop.
* See comments at release_runq_lock().
@@ -2458,8 +2573,11 @@ static void idle(void)
v->status == CPU_STATUS_RESERVED) {
/* No work to do? Consolidate the kmalloc free list */
kmalloc_consolidate_free_list();
monitor->status = IHK_OS_MONITOR_IDLE;
cpu_local_var(current)->status = PS_INTERRUPTIBLE;
cpu_safe_halt();
monitor->status = IHK_OS_MONITOR_KERNEL;
monitor->counter++;
cpu_local_var(current)->status = PS_RUNNING;
}
else {

View File

@@ -17,8 +17,8 @@
#include <ihk/debug.h>
#include <ihk/ikc.h>
#include <ikc/master.h>
#include <syscall.h>
#include <cls.h>
#include <syscall.h>
#include <kmalloc.h>
#include <process.h>
#include <page.h>

389
kernel/rusage.c Normal file
View File

@@ -0,0 +1,389 @@
/**
* \file rusage.c
*/
#include <types.h>
#include <kmsg.h>
#include <ihk/cpu.h>
#include <ihk/mm.h>
#include <ihk/debug.h>
#include <process.h>
#include <init.h>
#include <march.h>
#include <cls.h>
#include <time.h>
#include <syscall.h>
#include <string.h>
#include <rusage.h>
//#define DEBUG_PRINT_AP
#ifdef DEBUG_PRINT_AP
#define dkprintf(...) kprintf(__VA_ARGS__)
#define ekprintf(...) kprintf(__VA_ARGS__)
#else
#define dkprintf(...) do { if (0) kprintf(__VA_ARGS__); } while (0)
#define ekprintf(...) kprintf(__VA_ARGS__)
#endif
extern int num_processors ;
static volatile int ap_stop = 1;
mcs_lock_node_t ap_syscall_semaphore;
extern struct ihk_os_monitor *monitor;
#ifdef ENABLE_RUSAGE
/* count total rss */
unsigned long count_rss () {
int i;
unsigned long val = 0;
for(i = 0; i < sizeof(cpu_set_t)/8; i++){
val += rusage_rss[i];
}
return val;
}
/* count total cache */
unsigned long count_cache () {
return 0;
}
/* count total rss_huge */
unsigned long count_rss_huge () {
return 0;
}
/* count total mapped_file */
unsigned long count_mapped_file () {
return 0;
}
/* count total max_usage */
unsigned long count_max_usage() {
return rusage_rss_max;
}
/* count total kmem_usage */
unsigned long count_kmem_usage() {
return 0;
}
/* count total kmax_usage */
unsigned long count_kmem_max_usage() {
return 0;
}
#endif
#include <sysfs.h>
#include <vsprintf.h>
#ifdef ENABLE_RUSAGE
char* strcat_rusage(char *s1, char *s2) {
int i;
int j;
for (i = 0; s1[i] != '\0'; i++); //skip chars.
for (j = 0; s2[j] != '\0'; j++) {
s1[i+j] = s2[j];
}
s1[i+j] = '\0';
return s1;
}
static ssize_t
show_rusage_memory_data(struct sysfs_ops *ops, void *instance, void *buf, size_t size)
{
return snprintf(buf, size, "rss %lu\ncache %lu\nrss_huge %lu\nmapped_file %lu\n",
count_rss(),
count_cache(),
count_rss_huge(),
count_mapped_file()
);
}
static ssize_t
show_rusage_memory_max_usage_data(struct sysfs_ops *ops, void *instance, void *buf, size_t size)
{
return snprintf(buf,size,"%lu\n",count_max_usage());
}
static ssize_t
show_rusage_memory_kmem_usage_data(struct sysfs_ops *ops, void *instance, void *buf, size_t size)
{
return snprintf(buf,size,"%lu\n",count_kmem_usage());
}
static ssize_t
show_rusage_memory_kmem_max_usage_data(struct sysfs_ops *ops, void *instance, void *buf, size_t size)
{
return snprintf(buf,size,"%lu\n",count_kmem_max_usage());
}
static ssize_t
show_rusage_num_numa_nodes_data(struct sysfs_ops *ops, void *instance, void *buf, size_t size)
{
return snprintf(buf,size,"%d\n",ihk_mc_get_nr_numa_nodes());
}
static ssize_t
show_rusage_memory_numa_stat_data(struct sysfs_ops *ops, void *instance, void *buf, size_t size)
{
int i;
int num_numa;
char tmp_buf1[1024];
char tmp_buf2[1024];
unsigned long total = 0;
memset(tmp_buf1, 0, 1024);
num_numa = ihk_mc_get_nr_numa_nodes();
for (i = 0; i < num_numa; i++) {
total += rusage_numa_stat[i];
}
sprintf(tmp_buf1, "total=%lu ", total);
for (i = 0; i < num_numa; i++) {
sprintf(tmp_buf2, "N%d=%lu ", i, rusage_numa_stat[i]);
strcat_rusage(tmp_buf1, tmp_buf2);
memset(tmp_buf2, 0, 1024);
}
return snprintf(buf, size, "%s\n", tmp_buf1);
}
static ssize_t
show_rusage_hugetlb_usage_data(struct sysfs_ops *ops, void *instance, void *buf, size_t size)
{
return snprintf(buf, size, "%lu\n", rusage_hugetlb_usage);
}
static ssize_t
show_rusage_hugetlb_max_usage_data(struct sysfs_ops *ops, void *instance, void *buf, size_t size)
{
return snprintf(buf, size, "%lu\n", rusage_hugetlb_max_usage);
}
static ssize_t
show_rusage_cpuacct_stat_data(struct sysfs_ops *ops, void *instance, void *buf, size_t size)
{
struct timespec uts;
struct timespec sts;
int i;
int r = 0;
uts.tv_sec = 0;
uts.tv_nsec = 0;
sts.tv_sec = 0;
sts.tv_nsec = 0;
if (monitor)
for (i = 0; i < num_processors; i++) {
struct timespec ats;
tsc_to_ts(monitor[i].user_tsc, &ats);
ts_add(&uts, &ats);
tsc_to_ts(monitor[i].system_tsc, &ats);
ts_add(&sts, &ats);
}
r = snprintf(buf, size, "user %lu\n", timespec_to_jiffy(&uts));
r += snprintf(strchr(buf, '\0'), size - r, "system %lu\n",
timespec_to_jiffy(&sts));
return r;
}
static ssize_t
show_rusage_cpuacct_usage_data(struct sysfs_ops *ops, void *instance, void *buf, size_t size)
{
struct timespec uts;
int i;
int r = 0;
uts.tv_sec = 0;
uts.tv_nsec = 0;
if (monitor)
for (i = 0; i < num_processors; i++) {
struct timespec ats;
tsc_to_ts(monitor[i].user_tsc + monitor[i].system_tsc,
&ats);
ts_add(&uts, &ats);
}
if (uts.tv_sec)
r = snprintf(buf, size, "%lu%09lu\n", uts.tv_sec, uts.tv_nsec);
else
r = snprintf(buf, size, "%lu\n", uts.tv_nsec);
return r;
}
static ssize_t
show_rusage_cpuacct_usage_percpu_data(struct sysfs_ops *ops, void *instance, void *buf, size_t size)
{
struct timespec uts;
int i;
int r = 0;
((char *)buf)[0] = '\0';
for (i = 0; i < num_processors; i++) {
if (monitor) {
tsc_to_ts(monitor[i].user_tsc + monitor[i].system_tsc,
&uts);
}
else {
uts.tv_sec = 0;
uts.tv_nsec = 0;
}
if (uts.tv_sec)
r += snprintf(strchr(buf, '\0'), size - r,
"%lu%09lu ", uts.tv_sec, uts.tv_nsec);
else
r += snprintf(strchr(buf, '\0'), size - r,
"%lu ", uts.tv_nsec);
}
((char *)buf)[r - 1] = '\n';
return r;
}
/* callback funciton of rusage(threads) sysfs */
static ssize_t
show_rusage_num_threads_data(struct sysfs_ops *ops, void *instance, void *buf, size_t size)
{
return snprintf(buf, size, "%lu\n", rusage_num_threads);
}
/* callback funciton of rusage(max threads) sysfs */
static ssize_t
show_rusage_max_num_threads_data(struct sysfs_ops *ops, void *instance, void *buf, size_t size)
{
return snprintf(buf, size, "%lu\n", rusage_max_num_threads);
}
/* definition of sysfs ops */
struct sysfs_ops show_rusage_memory = {
.show = &show_rusage_memory_data,
};
struct sysfs_ops show_rusage_memory_max_usage = {
.show = &show_rusage_memory_max_usage_data,
};
struct sysfs_ops show_rusage_memory_kmem_usage = {
.show = &show_rusage_memory_kmem_usage_data,
};
struct sysfs_ops show_rusage_memory_kmem_max_usage = {
.show = &show_rusage_memory_kmem_max_usage_data,
};
struct sysfs_ops show_rusage_num_numa_nodes = {
.show = &show_rusage_num_numa_nodes_data,
};
struct sysfs_ops show_rusage_memory_numa_stat = {
.show = &show_rusage_memory_numa_stat_data,
};
struct sysfs_ops show_rusage_hugetlb_usage = {
.show = &show_rusage_hugetlb_usage_data,
};
struct sysfs_ops show_rusage_hugetlb_max_usage = {
.show = &show_rusage_hugetlb_max_usage_data,
};
struct sysfs_ops show_rusage_cpuacct_stat = {
.show = &show_rusage_cpuacct_stat_data,
};
struct sysfs_ops show_rusage_cpuacct_usage = {
.show = &show_rusage_cpuacct_usage_data,
};
struct sysfs_ops show_rusage_num_threads = {
.show = &show_rusage_num_threads_data,
};
struct sysfs_ops show_rusage_cpuacct_usage_percpu = {
.show = &show_rusage_cpuacct_usage_percpu_data,
};
struct sysfs_ops show_rusage_max_num_threads = {
.show = &show_rusage_max_num_threads_data,
};
/* create sysfs files for rusage. */
void rusage_sysfs_setup(void) {
int error;
error = sysfs_createf(&show_rusage_memory, &rdata, 0444,
"/sys/fs/cgroup/memory/memory.stat");
if (error) {
panic("rusage_sysfs_setup:sysfs_createf() failed\n");
}
error = sysfs_createf(&show_rusage_memory_max_usage, &rdata, 0444,
"/sys/fs/cgroup/memory/memory.max_usage_in_bytes");
if (error) {
panic("rusage_sysfs_setup:sysfs_createf() failed\n");
}
error = sysfs_createf(&show_rusage_memory_kmem_usage, &rdata, 0444,
"/sys/fs/cgroup/memory/memory.kmem.usage_in_bytes");
if (error) {
panic("rusage_sysfs_setup:sysfs_createf() failed\n");
}
error = sysfs_createf(&show_rusage_memory_kmem_max_usage, &rdata, 0444,
"/sys/fs/cgroup/memory/memory.kmem.max_usage_in_bytes");
if (error) {
panic("rusage_sysfs_setup:sysfs_createf() failed\n");
}
error = sysfs_createf(&show_rusage_num_numa_nodes, &rdata, 0444,
"/sys/fs/cgroup/cpu/num_numa_nodes.txt");
if (error) {
panic("rusage_sysfs_setup:sysfs_createf() failed\n");
}
error = sysfs_createf(&show_rusage_memory_numa_stat, &rdata, 0444,
"/sys/fs/cgroup/memory/memory.numa_stat");
if (error) {
panic("rusage_sysfs_setup:sysfs_createf() failed\n");
}
error = sysfs_createf(&show_rusage_hugetlb_usage, &rdata, 0444,
"/sys/fs/cgroup/hugetlb/hugetlb.1GB.usage_in_bytes");
if (error) {
panic("rusage_sysfs_setup:sysfs_createf() failed\n");
}
error = sysfs_createf(&show_rusage_hugetlb_max_usage, &rdata, 0444,
"/sys/fs/cgroup/hugetlb/hugetlb.1GB.max_usage_in_bytes");
if (error) {
panic("rusage_sysfs_setup:sysfs_createf() failed\n");
}
error = sysfs_createf(&show_rusage_cpuacct_stat, &rdata, 0444,
"/sys/fs/cgroup/cpuacct/cpuacct.stat");
if (error) {
panic("rusage_sysfs_setup:sysfs_createf() failed\n");
}
error = sysfs_createf(&show_rusage_cpuacct_usage, &rdata, 0444,
"/sys/fs/cgroup/cpuacct/cpuacct.usage");
if (error) {
panic("rusage_sysfs_setup:sysfs_createf() failed\n");
}
error = sysfs_createf(&show_rusage_cpuacct_usage_percpu, &rdata, 0444,
"/sys/fs/cgroup/cpuacct/cpuacct.usage_percpu");
if (error) {
panic("rusage_sysfs_setup:sysfs_createf() failed\n");
}
error = sysfs_createf(&show_rusage_num_threads, &rdata, 0444,
"/sys/fs/cgroup/num_threads");
if (error) {
panic("rusage_sysfs_setup:sysfs_createf() failed\n");
}
error = sysfs_createf(&show_rusage_max_num_threads, &rdata, 0444,
"/sys/fs/cgroup/max_num_threads");
if (error) {
panic("rusage_sysfs_setup:sysfs_createf() failed\n");
}
}
/* callback funciton of os_status sysfs */
static ssize_t
show_ihk_status_data(struct sysfs_ops *ops, void *instance, void *buf, size_t size)
{
return snprintf(buf, size, "%d\n", os_status);
}
struct sysfs_ops show_ihk_status = {
.show = &show_ihk_status_data,
};
/* create sysfs files for monitoring status.*/
void status_sysfs_setup(void) {
int error;
error = sysfs_createf(&show_ihk_status, &rdata, 0444,
"/sys/fs/cgroup/mck_status");
if (error) {
panic("status_sysfs_setup:sysfs_createf() failed\n");
}
}
#endif

View File

@@ -55,6 +55,7 @@
#include <bitops.h>
#include <bitmap.h>
#include <xpmem.h>
#include <rusage.h>
/* Headers taken from kitten LWK */
#include <lwk/stddef.h>
@@ -389,6 +390,9 @@ long do_syscall(struct syscall_request *req, int cpu, int pid)
long rc;
struct thread *thread = cpu_local_var(current);
struct process *proc = thread->proc;
struct ihk_os_monitor *monitor = cpu_local_var(monitor);
int mstatus = 0;
#ifdef TRACK_SYSCALLS
uint64_t t_s;
t_s = rdtsc();
@@ -398,6 +402,9 @@ long do_syscall(struct syscall_request *req, int cpu, int pid)
ihk_mc_get_processor_id(),
req->number);
mstatus = monitor->status;
monitor->status = IHK_OS_MONITOR_KERNEL_OFFLOAD;
barrier();
if(req->number != __NR_exit_group){
@@ -520,6 +527,8 @@ long do_syscall(struct syscall_request *req, int cpu, int pid)
}
#endif // TRACK_SYSCALLS
monitor->status = mstatus;
monitor->counter++;
return rc;
}
@@ -957,6 +966,7 @@ terminate(int rc, int sig)
mcs_rwlock_writer_unlock(&proc->threads_lock, &lock);
vm = proc->vm;
free_all_process_memory_range(vm);
if (proc->saved_cmdline) {
kfree(proc->saved_cmdline);
@@ -1100,6 +1110,18 @@ terminate_host(int pid)
do_kill(cpu_local_var(current), pid, -1, SIGKILL, NULL, 0);
}
void
event_signal()
{
struct ihk_ikc_channel_desc *syscall_channel;
struct ikc_scd_packet pckt;
syscall_channel = get_cpu_local_var(0)->syscall_channel2;
memset(&pckt, '\0', sizeof pckt);
pckt.msg = SCD_MSG_EVENT_SIGNAL;
ihk_ikc_send(syscall_channel, &pckt, 0);
}
void
interrupt_syscall(int pid, int tid)
{
@@ -2405,11 +2427,13 @@ SYSCALL_DECLARE(set_tid_address)
return cpu_local_var(current)->proc->pid;
}
/*
static unsigned long
timespec_to_jiffy(const struct timespec *ats)
{
return ats->tv_sec * 100 + ats->tv_nsec / 10000000;
}
*/
SYSCALL_DECLARE(times)
{
@@ -2425,8 +2449,10 @@ SYSCALL_DECLARE(times)
struct process *proc = thread->proc;
struct timespec ats;
mytms.tms_utime = timespec_to_jiffy(&thread->utime);
mytms.tms_stime = timespec_to_jiffy(&thread->stime);
tsc_to_ts(thread->user_tsc, &ats);
mytms.tms_utime = timespec_to_jiffy(&ats);
tsc_to_ts(thread->system_tsc, &ats);
mytms.tms_stime = timespec_to_jiffy(&ats);
ats.tv_sec = proc->utime.tv_sec;
ats.tv_nsec = proc->utime.tv_nsec;
ts_add(&ats, &proc->utime_children);
@@ -3533,6 +3559,9 @@ SYSCALL_DECLARE(rt_sigtimedwait)
int sig;
struct timespec ats;
struct timespec ets;
struct ihk_os_monitor *monitor = cpu_local_var(monitor);
monitor->status = IHK_OS_MONITOR_KERNEL_HEAVY;
if (sigsetsize > sizeof(sigset_t))
return -EINVAL;
@@ -3688,6 +3717,9 @@ do_sigsuspend(struct thread *thread, const sigset_t *set)
struct list_head *head;
mcs_rwlock_lock_t *lock;
struct mcs_rwlock_node_irqsave mcs_rw_node;
struct ihk_os_monitor *monitor = cpu_local_var(monitor);
monitor->status = IHK_OS_MONITOR_KERNEL_HEAVY;
wset = set->__val[0];
wset &= ~__sigmask(SIGKILL);
@@ -4796,7 +4828,10 @@ SYSCALL_DECLARE(futex)
uint32_t *uaddr2 = (uint32_t *)ihk_mc_syscall_arg4(ctx);
uint32_t val3 = (uint32_t)ihk_mc_syscall_arg5(ctx);
int flags = op;
struct ihk_os_monitor *monitor = cpu_local_var(monitor);
monitor->status = IHK_OS_MONITOR_KERNEL_HEAVY;
/* Cross-address space futex? */
if (op & FUTEX_PRIVATE_FLAG) {
fshared = 0;
@@ -4907,6 +4942,9 @@ SYSCALL_DECLARE(exit)
if(nproc == 1){ // process has only one thread
terminate(exit_status, 0);
#ifdef ENABLE_RUSAGE
rusage_num_threads--;
#endif
return 0;
}
@@ -4933,6 +4971,9 @@ SYSCALL_DECLARE(exit)
if(proc->status == PS_EXITED){
mcs_rwlock_reader_unlock(&proc->threads_lock, &lock);
terminate(exit_status, 0);
#ifdef ENABLE_RUSAGE
rusage_num_threads--;
#endif
return 0;
}
thread->status = PS_EXITED;
@@ -4941,6 +4982,9 @@ SYSCALL_DECLARE(exit)
release_thread(thread);
schedule();
#ifdef ENABLE_RUSAGE
rusage_num_threads--;
#endif
return 0;
}
@@ -5065,6 +5109,7 @@ SYSCALL_DECLARE(getrusage)
struct timespec utime;
struct timespec stime;
struct mcs_rwlock_node lock;
struct timespec ats;
if(who != RUSAGE_SELF &&
who != RUSAGE_CHILDREN &&
@@ -5096,8 +5141,10 @@ SYSCALL_DECLARE(getrusage)
list_for_each_entry(child, &proc->threads_list, siblings_list){
while(!child->times_update)
cpu_pause();
ts_add(&utime, &child->utime);
ts_add(&stime, &child->stime);
tsc_to_ts(child->user_tsc, &ats);
ts_add(&utime, &ats);
tsc_to_ts(child->system_tsc, &ats);
ts_add(&stime, &ats);
}
mcs_rwlock_reader_unlock_noirq(&proc->threads_lock, &lock);
ts_to_tv(&kusage.ru_utime, &utime);
@@ -5106,14 +5153,18 @@ SYSCALL_DECLARE(getrusage)
kusage.ru_maxrss = proc->maxrss / 1024;
}
else if(who == RUSAGE_CHILDREN){
ts_to_tv(&kusage.ru_utime, &proc->utime_children);
ts_to_tv(&kusage.ru_stime, &proc->stime_children);
tsc_to_ts(thread->user_tsc, &ats);
ts_to_tv(&kusage.ru_utime, &ats);
tsc_to_ts(thread->system_tsc, &ats);
ts_to_tv(&kusage.ru_stime, &ats);
kusage.ru_maxrss = proc->maxrss_children / 1024;
}
else if(who == RUSAGE_THREAD){
ts_to_tv(&kusage.ru_utime, &thread->utime);
ts_to_tv(&kusage.ru_stime, &thread->stime);
tsc_to_ts(thread->user_tsc, &ats);
ts_to_tv(&kusage.ru_utime, &ats);
tsc_to_ts(thread->system_tsc, &ats);
ts_to_tv(&kusage.ru_stime, &ats);
kusage.ru_maxrss = proc->maxrss / 1024;
}
@@ -6449,10 +6500,11 @@ SYSCALL_DECLARE(clock_gettime)
ats.tv_nsec = proc->utime.tv_nsec;
ts_add(&ats, &proc->stime);
list_for_each_entry(child, &proc->threads_list, siblings_list){
struct timespec wts;
while(!child->times_update)
cpu_pause();
ts_add(&ats, &child->utime);
ts_add(&ats, &child->stime);
tsc_to_ts(child->user_tsc + child->system_tsc, &wts);
ts_add(&ats, &wts);
}
mcs_rwlock_reader_unlock_noirq(&proc->threads_lock, &lock);
return copy_to_user(ts, &ats, sizeof ats);
@@ -6460,9 +6512,7 @@ SYSCALL_DECLARE(clock_gettime)
else if(clock_id == CLOCK_THREAD_CPUTIME_ID){
struct thread *thread = cpu_local_var(current);
ats.tv_sec = thread->utime.tv_sec;
ats.tv_nsec = thread->utime.tv_nsec;
ts_add(&ats, &thread->stime);
tsc_to_ts(thread->user_tsc + thread->system_tsc, &ats);
return copy_to_user(ts, &ats, sizeof ats);
}
@@ -6565,6 +6615,9 @@ SYSCALL_DECLARE(nanosleep)
struct timespec *tv = (struct timespec *)ihk_mc_syscall_arg0(ctx);
struct timespec *rem = (struct timespec *)ihk_mc_syscall_arg1(ctx);
struct syscall_request request IHK_DMA_ALIGN;
struct ihk_os_monitor *monitor = cpu_local_var(monitor);
monitor->status = IHK_OS_MONITOR_KERNEL_HEAVY;
/* Do it locally if supported */
if (gettime_local_support) {
@@ -8479,8 +8532,7 @@ reset_cputime()
if(!(thread = cpu_local_var(current)))
return;
thread->btime.tv_sec = 0;
thread->btime.tv_nsec = 0;
thread->base_tsc = 0;
}
/**
@@ -8492,8 +8544,9 @@ void
set_cputime(int mode)
{
struct thread *thread;
struct timespec ats;
unsigned long tsc;
struct cpu_local_var *v;
struct ihk_os_monitor *monitor;
if(clv == NULL)
return;
@@ -8501,38 +8554,48 @@ set_cputime(int mode)
v = get_this_cpu_local_var();
if(!(thread = v->current))
return;
if(thread == &v->idle)
return;
monitor = v->monitor;
if(mode == 0){
monitor->status = IHK_OS_MONITOR_USER;
}
else if(mode == 1){
monitor->counter++;
monitor->status = IHK_OS_MONITOR_KERNEL;
}
if(!gettime_local_support){
thread->times_update = 1;
return;
}
calculate_time_from_tsc(&ats);
if(thread->btime.tv_sec != 0 && thread->btime.tv_nsec != 0){
tsc = rdtsc();
if(thread->base_tsc != 0){
unsigned long dtsc = tsc - thread->base_tsc;
struct timespec dts;
dts.tv_sec = ats.tv_sec;
dts.tv_nsec = ats.tv_nsec;
ts_sub(&dts, &thread->btime);
tsc_to_ts(dtsc, &dts);
if(mode == 1){
ts_add(&thread->utime, &dts);
thread->user_tsc += dtsc;
monitor->user_tsc += dtsc;
ts_add(&thread->itimer_virtual_value, &dts);
ts_add(&thread->itimer_prof_value, &dts);
}
else{
ts_add(&thread->stime, &dts);
thread->system_tsc += dtsc;
monitor->system_tsc += dtsc;
ts_add(&thread->itimer_prof_value, &dts);
}
}
if(mode == 2){
thread->btime.tv_sec = 0;
thread->btime.tv_nsec = 0;
thread->base_tsc = 0;
}
else{
thread->btime.tv_sec = ats.tv_sec;
thread->btime.tv_nsec = ats.tv_nsec;
thread->base_tsc = tsc;
}
thread->times_update = 1;
thread->in_kernel = mode;
@@ -8595,6 +8658,7 @@ long syscall(int num, ihk_mc_user_context_t *ctx)
#ifdef TRACK_SYSCALLS
uint64_t t_s;
#endif // TRACK_SYSCALLS
struct thread *thread = cpu_local_var(current);
set_cputime(1);
if(cpu_local_var(current)->proc->status == PS_EXITED &&
@@ -8656,8 +8720,8 @@ long syscall(int num, ihk_mc_user_context_t *ctx)
l = syscall_generic_forwarding(num, ctx);
}
if (num != __NR_sched_yield &&
num != __NR_futex) {
if (!list_empty(&thread->sigpending) ||
!list_empty(&thread->sigcommon->sigpending)) {
check_signal(l, NULL, num);
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,8 +1,17 @@
#include <ihk/debug.h>
#include <ihk/cpu.h>
#include <cls.h>
#include <ihk/rusage.h>
extern struct cpu_local_var *clv;
void panic(const char *msg)
{
if (clv) {
struct ihk_os_monitor *monitor = cpu_local_var(monitor);
monitor->status = IHK_OS_MONITOR_PANIC;
}
cpu_disable_interrupt();
kprintf(msg);

View File

@@ -198,6 +198,9 @@ void remote_flush_tlb_cpumask(struct process_vm *vm,
int ihk_set_kmsg(unsigned long addr, unsigned long size);
char *ihk_get_kargs();
int ihk_set_monitor(unsigned long addr, unsigned long size);
int ihk_set_nmi_mode_addr(unsigned long addr);
extern void (*__tlb_flush_handler)(int vector);
struct tlb_flush_entry {

74
lib/include/ihk/rusage.h Normal file
View File

@@ -0,0 +1,74 @@
#ifndef __RUSAGE_H
#define __RUSAGE_H
#define RUSAGE_DEFAULT_SIZE 10
enum RUSAGE_MEMBER {
RUSAGE_RSS,
RUSAGE_CACHE,
RUSAGE_RSS_HUGE,
RUSAGE_MAPPED_FILE,
RUSAGE_MAX_USAGE,
RUSAGE_KMEM_USAGE,
RUSAGE_KMAX_USAGE,
RUSAGE_NUM_NUMA_NODES,
RUSAGE_NUMA_STAT,
RUSAGE_HUGETLB ,
RUSAGE_HUGETLB_MAX ,
RUSAGE_STAT_SYSTEM ,
RUSAGE_STAT_USER ,
RUSAGE_USAGE ,
RUSAGE_USAGE_PER_CPU ,
RUSAGE_NUM_THREADS ,
RUSAGE_MAX_NUM_THREADS
};
struct r_data{
unsigned long pid;
unsigned long rss;
unsigned long cache;
unsigned long rss_huge;
unsigned long mapped_file;
unsigned long max_usage;
unsigned long kmem_usage;
unsigned long kmax_usage;
unsigned long hugetlb;
unsigned long hugetlb_max;
unsigned long stat_system;
unsigned long stat_user;
unsigned long usage;
struct r_data *next;
} ;
typedef struct r_data rusage_data;
rusage_data *rdata[RUSAGE_DEFAULT_SIZE];
unsigned long rusage_max_num_threads;
unsigned long rusage_num_threads;
enum ihk_os_status {
IHK_STATUS_INACTIVE,
IHK_STATUS_BOOTING,
IHK_STATUS_RUNNING,
IHK_STATUS_SHUTDOWN,
IHK_STATUS_PANIC,
IHK_STATUS_HUNGUP,
IHK_STATUS_FREEZING,
IHK_STATUS_FROZEN,
};
enum ihk_os_status os_status;
unsigned long sys_delegate_count;
enum sys_delegate_state_enum {
ENTER_KERNEL,
EXIT_KERNEL,
};
enum sys_delegate_state_enum sys_delegate_state;
unsigned long rusage_kmem_usage;
unsigned long rusage_kmem_max_usage;
unsigned long rusage_hugetlb_usage;
unsigned long rusage_hugetlb_max_usage;
unsigned long rusage_usage_per_cpu[sizeof(cpu_set_t)/8];
unsigned long rusage_numa_stat[1024];
#endif