Compare commits

...

60 Commits
1.2.3 ... 1.2.4

Author SHA1 Message Date
Tomoki Shirasawa
eca4018ecb mcctrl: release syscall packets when mcexec termination
refs #835
2017-03-11 20:57:54 +09:00
Tomoki Shirasawa
e936b2ebe1 memobj_release: don't call syscall_generic_forwarding after process termination
refs #816
2017-03-10 12:58:47 +09:00
Tomoki Shirasawa
d8112f92f8 terminate(): don't call free_all_process_memory_range
refs #816
2017-03-08 14:30:28 +09:00
Masamichi Takagi
1076010de4 Boundary check in early_alloc_pages() 2017-03-04 17:21:57 +09:00
Balazs Gerofi
da4a5ec44b page_allocator_init(): move memory_nodes to BSS 2017-02-24 19:33:25 +09:00
Balazs Gerofi
d35aa9b100 page_allocator_init(): clean-up code, eliminate initial flag 2017-02-24 14:25:22 +09:00
e29005
ba8dbf1b19 Put kernel image and page table into one chunk 2017-02-24 14:21:32 +09:00
Yoichi Umezawa
6213f0e488 mcctrl: fix cpumask macros for Linux 4.6 2017-02-02 15:49:39 +09:00
Balazs Gerofi
4ef82c2683 OFP-SNC-4: offline/online MCDRAM before memory reservation 2017-01-30 14:47:36 +09:00
Balazs Gerofi
e066a8798c IKC: adjust master channel queue size to nr. of CPUs 2017-01-30 07:24:09 +09:00
Balazs Gerofi
b702c9691e AP init: synchronize syscall channel initialization 2017-01-30 07:24:09 +09:00
Balazs Gerofi
addbe91e59 do_migrate(): signal migrated thread before releasing runq lock 2017-01-30 07:24:09 +09:00
Balazs Gerofi
b812848a0e eclair-dump-backtrace.exp: handle user space threads 2017-01-30 07:24:09 +09:00
Balazs Gerofi
ad214c8206 reserve_user_space(): mutual exclusion on mmap 2017-01-30 07:24:09 +09:00
Balazs Gerofi
1bc3218fc1 partitioned execution: bind mcexec to corresponding NUMA node 2017-01-30 07:24:09 +09:00
Balazs Gerofi
5cc420a6c3 syscall/offload tracker: clean-up and support process-wise aggregation 2017-01-30 07:24:09 +09:00
Balazs Gerofi
c7686fdf4e execve(): fix memory leak 2017-01-30 07:24:09 +09:00
Balazs Gerofi
c1dae4d8b0 mmap(): no physical memory pre-allocation for Intel 128MB mapping 2017-01-30 07:24:08 +09:00
Yoichi Umezawa
2473025201 do_mmap(): remove codes for debug
refs #395
2017-01-16 15:53:27 +09:00
Balazs Gerofi
fa5c1b23ca eclair-dump-backtrace.exp: dump full backtrace of all mckernel threads 2017-01-15 10:46:07 +09:00
Balazs Gerofi
f2f499aace mcreboot/stop: toggle address-space layout randomization (ASLR) to avoid mcexec user-space reservation failure 2017-01-15 10:36:50 +09:00
Balazs Gerofi
bd47b909bf futex(): spin wait when CPU not oversubscribed and fix lost wake-up bug 2017-01-13 08:43:25 +09:00
Balazs Gerofi
d646c2a4b9 cpu_set/clear(): unsigned long for IRQ flags 2017-01-13 08:43:25 +09:00
Balazs Gerofi
865ada46bf IKC2: eliminate unused IKC structures 2017-01-13 08:43:25 +09:00
Balazs Gerofi
cdffc5e853 do_syscall(): eliminate centralized lock for exit/kill code path (use IKC2 thread pool) 2017-01-08 14:16:10 +09:00
Balazs Gerofi
0e67e9266b ap_init(): reformat AP cores report 2017-01-08 14:16:10 +09:00
Balazs Gerofi
1ff0afe6fb devobj/fileobj: do not try to free memory for device file mappings 2017-01-08 14:16:10 +09:00
Balazs Gerofi
d34884f9a4 numa_init(): error handling and propagation 2017-01-08 14:15:51 +09:00
Balazs Gerofi
7a0c204dc1 eclair: report PID for all threads 2017-01-08 14:15:44 +09:00
Balazs Gerofi
25f67c9ef8 mcreboot/mcstop-smp-x86: surpress libkmod warnings 2017-01-08 14:15:34 +09:00
Balazs Gerofi
a776464a7e mcreboot/mcstop: adjust swappiness 2017-01-03 09:02:41 +09:00
Balazs Gerofi
c40e7105e6 NUMA: order nodes by distance for MPOL_BIND / MPOL_PREFERRED policies as well 2017-01-03 09:02:29 +09:00
Balazs Gerofi
5bac38ce8b mmap()/stack/heap: follow user requested NUMA policy 2016-12-31 19:38:05 +09:00
Balazs Gerofi
e3f0662130 allocate_aligned_pages_node(): debug msg format 2016-12-31 16:25:14 +09:00
Balazs Gerofi
21df56b233 sched_wakeup_thread(): memory barrier after status update 2016-12-31 10:44:13 +09:00
Balazs Gerofi
393cec513c allocate_aligned_pages_node(): follow user policiy only for user allocations 2016-12-31 10:10:42 +09:00
Balazs Gerofi
4437ecc69a do_mmap(): indicate user level allocations for anonymous mappings 2016-12-31 10:09:49 +09:00
Balazs Gerofi
40d75baca2 ihk_mc_ap_flag: rewrite flag type, intro for denoting user level allocations 2016-12-30 19:19:34 +09:00
Balazs Gerofi
00f3fe0840 ihk_mc_alloc_aligned_pages_node(): support for explicit indication of target NUMA node 2016-12-30 19:03:59 +09:00
Balazs Gerofi
47a8b5bda5 mmap(): faster pre-allocation for anonymous private mappings 2016-12-30 17:18:44 +09:00
Balazs Gerofi
ec75095073 add_process_memory_range(): optionally return range object 2016-12-30 15:51:17 +09:00
Balazs Gerofi
1794232989 irqbalance_mck: create environment file in /tmp to avoid race condition on PFS 2016-12-30 15:47:44 +09:00
Balazs Gerofi
40978d162e procfs_read/write(): rewrite synchronization for scalability and correctness 2016-12-28 14:17:17 +09:00
Balazs Gerofi
536ce9f927 process_procfs_request(): use IRQ save MCS locks while iterating thread list to avoid deadlock 2016-12-28 12:29:10 +09:00
Balazs Gerofi
4e5ec74ffe mmap(): fault in memory only up to file size for populated file mappings 2016-12-27 16:33:24 +09:00
Balazs Gerofi
a6d8125fd7 mcreboot-smp-x86: reserve memory first and then CPUs 2016-12-27 15:19:05 +09:00
Balazs Gerofi
15d3a0361e destroy_ikc_channels(): eliminate kprint from error free path 2016-12-27 11:52:24 +09:00
Balazs Gerofi
6ad84a96a3 mcexec_syscall(): avoid calling task_pid_nr_ns() in IRQ context 2016-12-26 20:43:17 +09:00
Balazs Gerofi
16e846e9b6 mcexec: report error in prepare_image() if wait queue interrupted 2016-12-26 20:42:31 +09:00
Balazs Gerofi
5bc7185f07 do_migrate(): update debug msg format 2016-12-25 17:34:26 +09:00
Balazs Gerofi
32462dfb2d eclair: fix CPU number display for non-active threads 2016-12-25 17:28:31 +09:00
Balazs Gerofi
e3ef88c0cf do_sigsuspend(): deschedule thread when neccessary (fixes gdb deadlock) 2016-12-25 17:24:32 +09:00
Balazs Gerofi
829aae7b8d mcexec: PATH_MAX buffer lenght in do_generic_syscall() 2016-12-25 17:20:14 +09:00
Balazs Gerofi
b836b84825 mcexec_prepare_image(): use memory barrier when updating request status 2016-12-25 17:19:14 +09:00
Balazs Gerofi
3e1f154412 patch_process_vm(): eliminate kprintfs from error free code path 2016-12-25 17:18:20 +09:00
Balazs Gerofi
e7af537452 get_pid_cred(): proper locking around pid_task 2016-12-25 17:17:27 +09:00
Balazs Gerofi
3565959af7 eclair: fix compiler warnings 2016-12-23 09:57:50 +09:00
Balazs Gerofi
4667136a4c mcctrl: refcount per-process data to avoid corrupted syscall request lists 2016-12-23 09:54:15 +09:00
Balazs Gerofi
972d14611a mcctrl: move prepare waitqueue to per-process data 2016-12-22 10:15:31 +09:00
Balazs Gerofi
e90eef8910 eclair: support for direct memory inspection 2016-12-21 21:55:32 +09:00
43 changed files with 1489 additions and 898 deletions

View File

@@ -49,6 +49,7 @@ install::
mkdir -p -m 755 $(SBINDIR); \
install -m 755 arch/x86/tools/mcreboot-smp-x86.sh $(SBINDIR)/mcreboot.sh; \
install -m 755 arch/x86/tools/mcstop+release-smp-x86.sh $(SBINDIR)/mcstop+release.sh; \
install -m 755 arch/x86/tools/eclair-dump-backtrace.exp $(SBINDIR)/eclair-dump-backtrace.exp;\
mkdir -p -m 755 $(ETCDIR); \
install -m 644 arch/x86/tools/irqbalance_mck.service $(ETCDIR)/irqbalance_mck.service; \
install -m 644 arch/x86/tools/irqbalance_mck.in $(ETCDIR)/irqbalance_mck.in; \

View File

@@ -151,7 +151,7 @@ SYSCALL_HANDLED(603, pmc_stop)
SYSCALL_HANDLED(604, pmc_reset)
SYSCALL_HANDLED(700, get_cpu_id)
#ifdef TRACK_SYSCALLS
SYSCALL_HANDLED(701, syscall_offload_clr_cntrs)
SYSCALL_HANDLED(__NR_track_syscalls, track_syscalls)
#endif // TRACK_SYSCALLS
/**** End of File ****/

View File

@@ -45,7 +45,11 @@ void *early_alloc_pages(int nr_pages)
last_page = phys_to_virt(virt_to_phys(last_page));
} else if (last_page == (void *)-1) {
panic("Early allocator is already finalized. Do not use it.\n");
}
} else {
if(virt_to_phys(last_page) >= bootstrap_mem_end) {
panic("Early allocator: Out of memory\n");
}
}
p = last_page;
last_page += (nr_pages * PAGE_SIZE);
@@ -179,7 +183,7 @@ static void init_normal_area(struct page_table *pt)
}
}
static struct page_table *__alloc_new_pt(enum ihk_mc_ap_flag ap_flag)
static struct page_table *__alloc_new_pt(ihk_mc_ap_flag ap_flag)
{
struct page_table *newpt = ihk_mc_alloc_pages(1, ap_flag);
@@ -278,7 +282,7 @@ void set_pte(pte_t *ppte, unsigned long phys, enum ihk_mc_pt_attribute attr)
* and returns a pointer to the PTE corresponding to the
* virtual address.
*/
pte_t *get_pte(struct page_table *pt, void *virt, enum ihk_mc_pt_attribute attr, enum ihk_mc_ap_flag ap_flag)
pte_t *get_pte(struct page_table *pt, void *virt, enum ihk_mc_pt_attribute attr, ihk_mc_ap_flag ap_flag)
{
int l4idx, l3idx, l2idx, l1idx;
unsigned long v = (unsigned long)virt;
@@ -339,7 +343,7 @@ static int __set_pt_page(struct page_table *pt, void *virt, unsigned long phys,
int l4idx, l3idx, l2idx, l1idx;
unsigned long v = (unsigned long)virt;
struct page_table *newpt;
enum ihk_mc_ap_flag ap_flag;
ihk_mc_ap_flag ap_flag;
int in_kernel =
(((unsigned long long)virt) >= 0xffff000000000000ULL);
unsigned long init_pt_lock_flags;
@@ -664,7 +668,7 @@ int ihk_mc_pt_prepare_map(page_table_t p, void *virt, unsigned long size,
return ret;
}
struct page_table *ihk_mc_pt_create(enum ihk_mc_ap_flag ap_flag)
struct page_table *ihk_mc_pt_create(ihk_mc_ap_flag ap_flag)
{
struct page_table *pt = ihk_mc_alloc_pages(1, ap_flag);
@@ -1097,7 +1101,8 @@ static int clear_range_l1(void *args0, pte_t *ptep, uint64_t base,
page = phys_to_page(phys);
}
if (page && page_is_in_memobj(page) && (old & PFL1_DIRTY)) {
if (page && page_is_in_memobj(page) && (old & PFL1_DIRTY) &&
!(args->memobj->flags & MF_ZEROFILL)) {
memobj_flush_page(args->memobj, phys, PTL1_SIZE);
}
@@ -1271,6 +1276,9 @@ static int clear_range(struct page_table *pt, struct process_vm *vm,
}
args.free_physical = free_physical;
if (memobj && (memobj->flags & MF_DEV_FILE)) {
args.free_physical = 0;
}
args.memobj = memobj;
args.vm = vm;
@@ -2229,30 +2237,28 @@ int strcpy_from_user(char *dst, const char *src)
return err;
}
long getlong_user(const long *p)
long getlong_user(long *dest, const long *p)
{
int error;
long l;
error = copy_from_user(&l, p, sizeof(l));
error = copy_from_user(dest, p, sizeof(long));
if (error) {
return error;
}
return l;
return 0;
}
int getint_user(const int *p)
int getint_user(int *dest, const int *p)
{
int error;
int i;
error = copy_from_user(&i, p, sizeof(i));
error = copy_from_user(dest, p, sizeof(int));
if (error) {
return error;
}
return i;
return 0;
}
int read_process_vm(struct process_vm *vm, void *kdst, const void *usrc, size_t siz)
@@ -2418,7 +2424,7 @@ int patch_process_vm(struct process_vm *vm, void *udst, const void *ksrc, size_t
unsigned long pa;
void *va;
kprintf("patch_process_vm(%p,%p,%p,%lx)\n", vm, udst, ksrc, siz);
dkprintf("patch_process_vm(%p,%p,%p,%lx)\n", vm, udst, ksrc, siz);
if ((ustart < vm->region.user_start)
|| (vm->region.user_end <= ustart)
|| ((vm->region.user_end - ustart) < siz)) {
@@ -2468,6 +2474,6 @@ int patch_process_vm(struct process_vm *vm, void *udst, const void *ksrc, size_t
remain -= cpsize;
}
kprintf("patch_process_vm(%p,%p,%p,%lx):%d\n", vm, udst, ksrc, siz, 0);
dkprintf("patch_process_vm(%p,%p,%p,%lx):%d\n", vm, udst, ksrc, siz, 0);
return 0;
} /* patch_process_vm() */

View File

@@ -30,7 +30,7 @@ int ihk_mc_ikc_init_first_local(struct ihk_ikc_channel_desc *channel,
memset(channel, 0, sizeof(struct ihk_ikc_channel_desc));
mikc_queue_pages = ((num_processors * MASTER_IKCQ_PKTSIZE)
mikc_queue_pages = ((2 * num_processors * MASTER_IKCQ_PKTSIZE)
+ (PAGE_SIZE - 1)) / PAGE_SIZE;
/* Place both sides in this side */

View File

@@ -1741,7 +1741,8 @@ int arch_map_vdso(struct process_vm *vm)
vrflags = VR_REMOTE;
vrflags |= VR_PROT_READ | VR_PROT_EXEC;
vrflags |= VRFLAG_PROT_TO_MAXPROT(vrflags);
error = add_process_memory_range(vm, (intptr_t)s, (intptr_t)e, NOPHYS, vrflags, NULL, 0, PAGE_SHIFT);
error = add_process_memory_range(vm, (intptr_t)s, (intptr_t)e,
NOPHYS, vrflags, NULL, 0, PAGE_SHIFT, NULL);
if (error) {
ekprintf("ERROR: adding memory range for vdso. %d\n", error);
goto out;
@@ -1772,7 +1773,8 @@ int arch_map_vdso(struct process_vm *vm)
vrflags = VR_REMOTE;
vrflags |= VR_PROT_READ;
vrflags |= VRFLAG_PROT_TO_MAXPROT(vrflags);
error = add_process_memory_range(vm, (intptr_t)s, (intptr_t)e, NOPHYS, vrflags, NULL, 0, PAGE_SHIFT);
error = add_process_memory_range(vm, (intptr_t)s, (intptr_t)e,
NOPHYS, vrflags, NULL, 0, PAGE_SHIFT, NULL);
if (error) {
ekprintf("ERROR: adding memory range for vvar. %d\n", error);
goto out;

View File

@@ -0,0 +1,67 @@
#!/usr/bin/expect
set INST_DIR "@prefix@"
spawn $INST_DIR/bin/eclair -d /tmp/mckernel.dump -k $INST_DIR/smp-x86/kernel/mckernel.img -i
set state "init"
set thread_id 0
expect {
"in ?? ()" {
switch -- $state {
"thread_chosen" {
set state "thread_skip"
}
"thread_bt" {
set state "thread_skip"
}
}
exp_continue
}
"(eclair) " {
switch -- $state {
"init" {
set state "threads_list"
send "info threads\r"
}
"threads_list" {
incr thread_id
set state "thread_chosen"
send "thread $thread_id\r"
}
"thread_skip" {
incr thread_id
set state "thread_chosen"
send "thread $thread_id\r"
}
"thread_chosen" {
set state "thread_bt"
send "bt\r"
}
}
exp_continue
}
"Type <return> to continue, or q <return> to quit" {
switch -- $state {
"threads_list" {
send "\r"
}
"thread_bt" {
send "\r"
}
"thread_skip" {
send "q\r"
}
}
exp_continue
}
" not known." {
expect "(eclair) " { send "quit\r" }
expect "Quit anyway? (y or n) " { send "y\r" }
exit 0
}
}

View File

@@ -3,7 +3,7 @@ Description=irqbalance daemon
After=syslog.target
[Service]
EnvironmentFile=@ETCDIR@/irqbalance_mck
EnvironmentFile=/tmp/irqbalance_mck
ExecStart=/usr/sbin/irqbalance --foreground $IRQBALANCE_ARGS
[Install]

View File

@@ -104,7 +104,7 @@ error_exit() {
;&
mcoverlayfs_loaded)
if [ "$enable_mcoverlay" == "yes" ]; then
rmmod mcoverlay
rmmod mcoverlay 2>/dev/null
fi
;&
linux_proc_bind_mounted)
@@ -134,15 +134,7 @@ error_exit() {
fi
;&
mcctrl_loaded)
rmmod mcctrl || echo "warning: failed to remove mcctrl" >&2
;&
mem_reserved)
mem=`${SBINDIR}/ihkconfig 0 query mem`
if [ "${mem}" != "" ]; then
if ! ${SBINDIR}/ihkconfig 0 release mem $mem > /dev/null; then
echo "warning: failed to release memory" >&2
fi
fi
rmmod mcctrl 2>/dev/null || echo "warning: failed to remove mcctrl" >&2
;&
cpus_reserved)
cpus=`${SBINDIR}/ihkconfig 0 query cpu`
@@ -152,11 +144,19 @@ error_exit() {
fi
fi
;&
mem_reserved)
mem=`${SBINDIR}/ihkconfig 0 query mem`
if [ "${mem}" != "" ]; then
if ! ${SBINDIR}/ihkconfig 0 release mem $mem > /dev/null; then
echo "warning: failed to release memory" >&2
fi
fi
;&
ihk_smp_loaded)
rmmod ihk_smp_x86 || echo "warning: failed to remove ihk_smp_x86" >&2
rmmod ihk_smp_x86 2>/dev/null || echo "warning: failed to remove ihk_smp_x86" >&2
;&
ihk_loaded)
rmmod ihk || echo "warning: failed to remove ihk" >&2
rmmod ihk 2>/dev/null || echo "warning: failed to remove ihk" >&2
;&
irqbalance_stopped)
if [ "`systemctl status irqbalance_mck.service 2> /dev/null |grep -E 'Active: active'`" != "" ]; then
@@ -174,6 +174,11 @@ error_exit() {
fi
fi
;&
aslr_disabled)
if [ -f /tmp/mckernel_randomize_va_space ]; then
cat /tmp/mckernel_randomize_va_space > /proc/sys/kernel/randomize_va_space
fi
;&
initial)
# Nothing more to revert
;;
@@ -225,17 +230,23 @@ if [ "$cpus" == "" ]; then
fi
fi
# Disable address space layout randomization
if [ -f /proc/sys/kernel/randomize_va_space ] && [ "`cat /proc/sys/kernel/randomize_va_space`" != "0" ]; then
cat /proc/sys/kernel/randomize_va_space > /tmp/mckernel_randomize_va_space
echo "0" > /proc/sys/kernel/randomize_va_space
fi
# Remove mcoverlay if loaded
if [ "$enable_mcoverlay" == "yes" ]; then
if [ "`lsmod | grep mcoverlay`" != "" ]; then
if grep mcoverlay /proc/modules &>/dev/null; then
if [ "`cat /proc/mounts | grep /tmp/mcos/mcos0_sys`" != "" ]; then umount -l /tmp/mcos/mcos0_sys; fi
if [ "`cat /proc/mounts | grep /tmp/mcos/mcos0_proc`" != "" ]; then umount -l /tmp/mcos/mcos0_proc; fi
if [ "`cat /proc/mounts | grep /tmp/mcos/linux_proc`" != "" ]; then umount -l /tmp/mcos/linux_proc; fi
if [ "`cat /proc/mounts | grep /tmp/mcos`" != "" ]; then umount -l /tmp/mcos; fi
if [ -e /tmp/mcos ]; then rm -rf /tmp/mcos; fi
if ! rmmod mcoverlay; then
if ! rmmod mcoverlay 2>/dev/null; then
echo "error: removing mcoverlay" >&2
error_exit "initial"
error_exit "aslr_disabled"
fi
fi
fi
@@ -245,7 +256,7 @@ if [ "${irqbalance_used}" == "yes" ]; then
systemctl stop irqbalance_mck.service 2>/dev/null
if ! systemctl stop irqbalance.service 2>/dev/null ; then
echo "error: stopping irqbalance" >&2
error_exit "initial"
error_exit "aslr_disabled"
fi;
fi
@@ -260,21 +271,26 @@ if [ ${LOGMODE} -ne 0 ]; then
fi
# Load IHK if not loaded
if [ "`lsmod | grep ihk`" == "" ]; then
if ! insmod ${KMODDIR}/ihk.ko; then
if ! grep -E 'ihk\s' /proc/modules &>/dev/null; then
if ! insmod ${KMODDIR}/ihk.ko 2>/dev/null; then
echo "error: loading ihk" >&2
error_exit "irqbalance_stopped"
fi
fi
# Increase swappiness so that we have better chance to allocate memory for IHK
echo 100 > /proc/sys/vm/swappiness
# Drop Linux caches to free memory
sync && echo 3 > /proc/sys/vm/drop_caches
# Merge free memory areas into large, physically contigous ones
echo 1 > /proc/sys/vm/compact_memory 2>/dev/null
sync
# Load IHK-SMP if not loaded and reserve CPUs and memory
if [ "`lsmod | grep ihk_smp_x86`" == "" ]; then
if ! grep ihk_smp_x86 /proc/modules &>/dev/null; then
ihk_irq=""
for i in `seq 64 255`; do
if [ ! -d /proc/irq/$i ] && [ "`cat /proc/interrupts | grep ":" | awk '{print $1}' | grep -o '[0-9]*' | grep -e '^$i$'`" == "" ]; then
@@ -286,25 +302,38 @@ if [ "`lsmod | grep ihk_smp_x86`" == "" ]; then
echo "error: no IRQ available" >&2
error_exit "ihk_loaded"
fi
if ! insmod ${KMODDIR}/ihk-smp-x86.ko ihk_start_irq=$ihk_irq ihk_ikc_irq_core=$ihk_ikc_irq_core; then
if ! insmod ${KMODDIR}/ihk-smp-x86.ko ihk_start_irq=$ihk_irq ihk_ikc_irq_core=$ihk_ikc_irq_core 2>/dev/null; then
echo "error: loading ihk-smp-x86" >&2
error_exit "ihk_loaded"
fi
if ! ${SBINDIR}/ihkconfig 0 reserve cpu ${cpus}; then
echo "error: reserving CPUs" >&2;
error_exit "ihk_smp_loaded"
# Free MCDRAM (special case for OFP SNC-4 mode)
if [ "`hostname | grep "c[0-9][0-9][0-9][0-9].ofp"`" != "" ] && [ "`cat /sys/devices/system/node/online`" == "0-7" ]; then
for i in 4 5 6 7; do
find /sys/devices/system/node/node$i/memory*/ -name "online" | while read f; do
echo 0 > $f 2>&1 > /dev/null;
done
find /sys/devices/system/node/node$i/memory*/ -name "online" | while read f; do
echo 1 > $f 2>&1 > /dev/null;
done
done
fi
if ! ${SBINDIR}/ihkconfig 0 reserve mem ${mem}; then
echo "error: reserving memory" >&2
error_exit "cpus_reserved"
error_exit "ihk_smp_loaded"
fi
if ! ${SBINDIR}/ihkconfig 0 reserve cpu ${cpus}; then
echo "error: reserving CPUs" >&2;
error_exit "mem_reserved"
fi
fi
# Load mcctrl if not loaded
if [ "`lsmod | grep mcctrl`" == "" ]; then
if ! insmod ${KMODDIR}/mcctrl.ko; then
if ! grep mcctrl /proc/modules &>/dev/null; then
if ! insmod ${KMODDIR}/mcctrl.ko 2>/dev/null; then
echo "error: inserting mcctrl.ko" >&2
error_exit "mem_reserved"
error_exit "cpus_reserved"
fi
fi
@@ -362,17 +391,21 @@ fi
# Overlay /proc, /sys with McKernel specific contents
if [ "$enable_mcoverlay" == "yes" ]; then
if [ ! -e /tmp/mcos ]; then mkdir -p /tmp/mcos; fi
if [ ! -e /tmp/mcos ]; then
mkdir -p /tmp/mcos;
fi
if ! mount -t tmpfs tmpfs /tmp/mcos; then
echo "error: mount /tmp/mcos" >&2
error_exit "tmp_mcos_created"
fi
if [ ! -e /tmp/mcos/linux_proc ]; then mkdir -p /tmp/mcos/linux_proc; fi
if [ ! -e /tmp/mcos/linux_proc ]; then
mkdir -p /tmp/mcos/linux_proc;
fi
if ! mount --bind /proc /tmp/mcos/linux_proc; then
echo "error: mount /tmp/mcos/linux_proc" >&2
error_exit "tmp_mcos_mounted"
fi
if ! insmod ${KMODDIR}/mcoverlay.ko; then
if ! insmod ${KMODDIR}/mcoverlay.ko 2>/dev/null; then
echo "error: inserting mcoverlay.ko" >&2
error_exit "linux_proc_bind_mounted"
fi
@@ -380,9 +413,15 @@ if [ "$enable_mcoverlay" == "yes" ]; then
do
sleep 0.1
done
if [ ! -e /tmp/mcos/mcos0_proc ]; then mkdir -p /tmp/mcos/mcos0_proc; fi
if [ ! -e /tmp/mcos/mcos0_proc_upper ]; then mkdir -p /tmp/mcos/mcos0_proc_upper; fi
if [ ! -e /tmp/mcos/mcos0_proc_work ]; then mkdir -p /tmp/mcos/mcos0_proc_work; fi
if [ ! -e /tmp/mcos/mcos0_proc ]; then
mkdir -p /tmp/mcos/mcos0_proc;
fi
if [ ! -e /tmp/mcos/mcos0_proc_upper ]; then
mkdir -p /tmp/mcos/mcos0_proc_upper;
fi
if [ ! -e /tmp/mcos/mcos0_proc_work ]; then
mkdir -p /tmp/mcos/mcos0_proc_work;
fi
if ! mount -t mcoverlay mcoverlay -o lowerdir=/proc/mcos0:/proc,upperdir=/tmp/mcos/mcos0_proc_upper,workdir=/tmp/mcos/mcos0_proc_work,nocopyupw,nofscheck /tmp/mcos/mcos0_proc; then
echo "error: mounting /tmp/mcos/mcos0_proc" >&2
error_exit "mcoverlayfs_loaded"
@@ -394,9 +433,15 @@ if [ "$enable_mcoverlay" == "yes" ]; then
do
sleep 0.1
done
if [ ! -e /tmp/mcos/mcos0_sys ]; then mkdir -p /tmp/mcos/mcos0_sys; fi
if [ ! -e /tmp/mcos/mcos0_sys_upper ]; then mkdir -p /tmp/mcos/mcos0_sys_upper; fi
if [ ! -e /tmp/mcos/mcos0_sys_work ]; then mkdir -p /tmp/mcos/mcos0_sys_work; fi
if [ ! -e /tmp/mcos/mcos0_sys ]; then
mkdir -p /tmp/mcos/mcos0_sys;
fi
if [ ! -e /tmp/mcos/mcos0_sys_upper ]; then
mkdir -p /tmp/mcos/mcos0_sys_upper;
fi
if [ ! -e /tmp/mcos/mcos0_sys_work ]; then
mkdir -p /tmp/mcos/mcos0_sys_work;
fi
if ! mount -t mcoverlay mcoverlay -o lowerdir=/sys/devices/virtual/mcos/mcos0/sys:/sys,upperdir=/tmp/mcos/mcos0_sys_upper,workdir=/tmp/mcos/mcos0_sys_work,nocopyupw,nofscheck /tmp/mcos/mcos0_sys; then
echo "error: mount /tmp/mcos/mcos0_sys" >&2
error_exit "mcos_proc_mounted"
@@ -462,8 +507,9 @@ if [ "${irqbalance_used}" == "yes" ]; then
banirq=`cat /proc/interrupts| perl -e 'while(<>) { if(/^\s*(\d+).*IHK\-SMP\s*$/) {print $1;}}'`
sed "s/%mask%/$smp_affinity_mask/g" $ETCDIR/irqbalance_mck.in | sed "s/%banirq%/$banirq/g" > $ETCDIR/irqbalance_mck
if ! systemctl link $ETCDIR/irqbalance_mck.service >/dev/null 2>/dev/null; then
sed "s/%mask%/$smp_affinity_mask/g" $ETCDIR/irqbalance_mck.in | sed "s/%banirq%/$banirq/g" > /tmp/irqbalance_mck
systemctl disable irqbalance_mck.service >/dev/null 2>/dev/null
if ! systemctl link $ETCDIR/irqbalance_mck.service >/dev/null 2>/dev/null; then
echo "error: linking irqbalance_mck" >&2
error_exit "mcos_sys_mounted"
fi

View File

@@ -18,7 +18,7 @@ mem=""
cpus=""
# No SMP module? Exit.
if [ "`lsmod | grep ihk_smp_x86`" == "" ]; then exit 0; fi
if ! grep ihk_smp_x86 /proc/modules &>/dev/null; then exit 0; fi
# Destroy all LWK instances
if ls /dev/mcos* 1>/dev/null 2>&1; then
@@ -59,36 +59,36 @@ if [ "${mem}" != "" ]; then
fi
# Remove delegator if loaded
if [ "`lsmod | grep mcctrl`" != "" ]; then
if ! rmmod mcctrl; then
if grep mcctrl /proc/modules &>/dev/null; then
if ! rmmod mcctrl 2>/dev/null; then
echo "error: removing mcctrl" >&2
exit 1
fi
fi
# Remove mcoverlay if loaded
if [ "`lsmod | grep mcoverlay`" != "" ]; then
if grep mcoverlay /proc/modules &>/dev/null; then
if [ "`cat /proc/mounts | grep /tmp/mcos/mcos0_sys`" != "" ]; then umount -l /tmp/mcos/mcos0_sys; fi
if [ "`cat /proc/mounts | grep /tmp/mcos/mcos0_proc`" != "" ]; then umount -l /tmp/mcos/mcos0_proc; fi
if [ "`cat /proc/mounts | grep /tmp/mcos/linux_proc`" != "" ]; then umount -l /tmp/mcos/linux_proc; fi
if [ "`cat /proc/mounts | grep /tmp/mcos`" != "" ]; then umount -l /tmp/mcos; fi
if [ -e /tmp/mcos ]; then rm -rf /tmp/mcos; fi
if ! rmmod mcoverlay; then
if ! rmmod mcoverlay 2>/dev/null; then
echo "warning: failed to remove mcoverlay" >&2
fi
fi
# Remove SMP module
if [ "`lsmod | grep ihk_smp_x86`" != "" ]; then
if ! rmmod ihk_smp_x86; then
if grep ihk_smp_x86 /proc/modules &>/dev/null; then
if ! rmmod ihk_smp_x86 2>/dev/null; then
echo "error: removing ihk_smp_x86" >&2
exit 1
fi
fi
# Remove core module
if [ "`lsmod | grep -E 'ihk\s' | awk '{print $1}'`" != "" ]; then
if ! rmmod ihk; then
if grep -E 'ihk\s' /proc/modules &>/dev/null; then
if ! rmmod ihk 2>/dev/null; then
echo "error: removing ihk" >&2
exit 1
fi
@@ -113,3 +113,10 @@ if [ "`systemctl status irqbalance_mck.service 2> /dev/null |grep -E 'Active: ac
fi
fi
# Re-enable ASLR
if [ -f /tmp/mckernel_randomize_va_space ]; then
cat /tmp/mckernel_randomize_va_space > /proc/sys/kernel/randomize_va_space
fi
# Set back default swappiness
echo 60 > /proc/sys/vm/swappiness

3
configure vendored
View File

@@ -3918,7 +3918,7 @@ fi
ac_config_headers="$ac_config_headers executer/config.h"
ac_config_files="$ac_config_files Makefile executer/user/Makefile executer/kernel/mcctrl/Makefile executer/kernel/mcctrl/arch/x86_64/Makefile executer/kernel/mcoverlayfs/Makefile executer/kernel/mcoverlayfs/linux-3.10.0-327.36.1.el7/Makefile executer/kernel/mcoverlayfs/linux-4.0.9/Makefile executer/kernel/mcoverlayfs/linux-4.6.7/Makefile kernel/Makefile kernel/Makefile.build arch/x86/tools/mcreboot-attached-mic.sh arch/x86/tools/mcshutdown-attached-mic.sh arch/x86/tools/mcreboot-builtin-x86.sh arch/x86/tools/mcreboot-smp-x86.sh arch/x86/tools/mcstop+release-smp-x86.sh arch/x86/tools/mcshutdown-builtin-x86.sh arch/x86/tools/mcreboot.1:arch/x86/tools/mcreboot.1in arch/x86/tools/irqbalance_mck.service arch/x86/tools/irqbalance_mck.in"
ac_config_files="$ac_config_files Makefile executer/user/Makefile executer/kernel/mcctrl/Makefile executer/kernel/mcctrl/arch/x86_64/Makefile executer/kernel/mcoverlayfs/Makefile executer/kernel/mcoverlayfs/linux-3.10.0-327.36.1.el7/Makefile executer/kernel/mcoverlayfs/linux-4.0.9/Makefile executer/kernel/mcoverlayfs/linux-4.6.7/Makefile kernel/Makefile kernel/Makefile.build arch/x86/tools/mcreboot-attached-mic.sh arch/x86/tools/mcshutdown-attached-mic.sh arch/x86/tools/mcreboot-builtin-x86.sh arch/x86/tools/mcreboot-smp-x86.sh arch/x86/tools/mcstop+release-smp-x86.sh arch/x86/tools/eclair-dump-backtrace.exp arch/x86/tools/mcshutdown-builtin-x86.sh arch/x86/tools/mcreboot.1:arch/x86/tools/mcreboot.1in arch/x86/tools/irqbalance_mck.service arch/x86/tools/irqbalance_mck.in"
if test "x$enable_dcfa" = xyes; then :
@@ -4632,6 +4632,7 @@ do
"arch/x86/tools/mcreboot-builtin-x86.sh") CONFIG_FILES="$CONFIG_FILES arch/x86/tools/mcreboot-builtin-x86.sh" ;;
"arch/x86/tools/mcreboot-smp-x86.sh") CONFIG_FILES="$CONFIG_FILES arch/x86/tools/mcreboot-smp-x86.sh" ;;
"arch/x86/tools/mcstop+release-smp-x86.sh") CONFIG_FILES="$CONFIG_FILES arch/x86/tools/mcstop+release-smp-x86.sh" ;;
"arch/x86/tools/eclair-dump-backtrace.exp") CONFIG_FILES="$CONFIG_FILES arch/x86/tools/eclair-dump-backtrace.exp" ;;
"arch/x86/tools/mcshutdown-builtin-x86.sh") CONFIG_FILES="$CONFIG_FILES arch/x86/tools/mcshutdown-builtin-x86.sh" ;;
"arch/x86/tools/mcreboot.1") CONFIG_FILES="$CONFIG_FILES arch/x86/tools/mcreboot.1:arch/x86/tools/mcreboot.1in" ;;
"arch/x86/tools/irqbalance_mck.service") CONFIG_FILES="$CONFIG_FILES arch/x86/tools/irqbalance_mck.service" ;;

View File

@@ -315,6 +315,7 @@ AC_CONFIG_FILES([
arch/x86/tools/mcreboot-builtin-x86.sh
arch/x86/tools/mcreboot-smp-x86.sh
arch/x86/tools/mcstop+release-smp-x86.sh
arch/x86/tools/eclair-dump-backtrace.exp
arch/x86/tools/mcshutdown-builtin-x86.sh
arch/x86/tools/mcreboot.1:arch/x86/tools/mcreboot.1in
arch/x86/tools/irqbalance_mck.service

View File

@@ -85,6 +85,7 @@ struct get_cpu_set_arg {
void *cpu_set;
size_t cpu_set_size; // Size in bytes
int *target_core;
int *mcexec_linux_numa; // NUMA domain to bind mcexec to
};
#define PLD_CPU_SET_MAX_CPUS 1024

View File

@@ -64,6 +64,8 @@ reserve_user_space(struct mcctrl_usrdata *usrdata, unsigned long *startp, unsign
unsigned long start = 0L;
unsigned long end;
mutex_lock(&usrdata->reserve_lock);
#define DESIRED_USER_END 0x800000000000
#define GAP_FOR_MCEXEC 0x008000000000UL
end = DESIRED_USER_END;
@@ -81,6 +83,8 @@ reserve_user_space(struct mcctrl_usrdata *usrdata, unsigned long *startp, unsign
up_write(&current->mm->mmap_sem);
#endif
mutex_unlock(&usrdata->reserve_lock);
if (IS_ERR_VALUE(start)) {
return start;
}

View File

@@ -82,17 +82,18 @@ int (*mcctrl_sys_umount)(char *dir_name, int flags) = sys_umount;
#endif
#endif
//static DECLARE_WAIT_QUEUE_HEAD(wq_prepare);
//extern struct mcctrl_channel *channels;
int mcctrl_ikc_set_recv_cpu(ihk_os_t os, int cpu);
static long mcexec_prepare_image(ihk_os_t os,
struct program_load_desc * __user udesc)
{
struct program_load_desc *desc, *pdesc;
struct program_load_desc *desc = NULL;
struct program_load_desc *pdesc = NULL;
struct ikc_scd_packet isp;
void *args, *envs;
long ret = 0;
void *args = NULL;
void *envs = NULL;
int ret = 0;
struct mcctrl_usrdata *usrdata = ihk_host_os_get_usrdata(os);
struct mcctrl_per_proc_data *ppd = NULL;
int num_sections;
@@ -108,48 +109,59 @@ static long mcexec_prepare_image(ihk_os_t os,
sizeof(struct program_load_desc))) {
printk("%s: error: copying program_load_desc\n",
__FUNCTION__);
kfree(desc);
return -EFAULT;
ret = -EFAULT;
goto free_out;
}
ppd = mcctrl_get_per_proc_data(usrdata, desc->pid);
if (!ppd) {
printk("%s: ERROR: no per process data for PID %d\n",
__FUNCTION__, desc->pid);
ret = -EINVAL;
goto free_out;
}
num_sections = desc->num_sections;
if (num_sections <= 0 || num_sections > 16) {
printk("# of sections: %d\n", num_sections);
return -EINVAL;
printk("%s: ERROR: # of sections: %d\n",
__FUNCTION__, num_sections);
ret = -EINVAL;
goto put_and_free_out;
}
pdesc = kmalloc(sizeof(struct program_load_desc) +
sizeof(struct program_image_section)
* num_sections, GFP_KERNEL);
memcpy(pdesc, desc, sizeof(struct program_load_desc));
if (copy_from_user(pdesc->sections, udesc->sections,
sizeof(struct program_image_section)
* num_sections)) {
kfree(desc);
kfree(pdesc);
return -EFAULT;
ret = -EFAULT;
goto put_and_free_out;
}
kfree(desc);
desc = NULL;
pdesc->pid = task_tgid_vnr(current);
if (reserve_user_space(usrdata, &pdesc->user_start, &pdesc->user_end)) {
kfree(pdesc);
return -ENOMEM;
ret = -ENOMEM;
goto put_and_free_out;
}
args = kmalloc(pdesc->args_len, GFP_KERNEL);
if (copy_from_user(args, pdesc->args, pdesc->args_len)) {
kfree(args);
kfree(pdesc);
return -EFAULT;
ret = -EFAULT;
goto put_and_free_out;
}
envs = kmalloc(pdesc->envs_len, GFP_KERNEL);
if (copy_from_user(envs, pdesc->envs, pdesc->envs_len)) {
ret = -EFAULT;
goto free_out;
goto put_and_free_out;
}
pdesc->args = (void*)virt_to_phys(args);
@@ -167,20 +179,18 @@ static long mcexec_prepare_image(ihk_os_t os,
dprintk("%p (%lx)\n", pdesc, isp.arg);
pdesc->status = 0;
mb();
mcctrl_ikc_send(os, pdesc->cpu, &isp);
while (wait_event_interruptible(usrdata->wq_prepare, pdesc->status) != 0);
if(pdesc->err < 0){
ret = pdesc->err;
goto free_out;
ret = wait_event_interruptible(ppd->wq_prepare, pdesc->status);
if (ret < 0) {
printk("%s: ERROR after wait: %d\n", __FUNCTION__, ret);
goto put_and_free_out;
}
ppd = mcctrl_get_per_proc_data(usrdata, task_tgid_vnr(current));
if (!ppd) {
printk("ERROR: no per process data for PID %d\n", task_tgid_vnr(current));
ret = -EINVAL;
goto free_out;
if (pdesc->err < 0) {
ret = pdesc->err;
goto put_and_free_out;
}
/* Update rpgtable */
@@ -189,7 +199,7 @@ static long mcexec_prepare_image(ihk_os_t os,
if (copy_to_user(udesc, pdesc, sizeof(struct program_load_desc) +
sizeof(struct program_image_section) * num_sections)) {
ret = -EFAULT;
goto free_out;
goto put_and_free_out;
}
dprintk("%s: pid %d, rpgtable: 0x%lx added\n",
@@ -197,10 +207,13 @@ static long mcexec_prepare_image(ihk_os_t os,
ret = 0;
put_and_free_out:
mcctrl_put_per_proc_data(ppd);
free_out:
kfree(args);
kfree(pdesc);
kfree(envs);
kfree(desc);
return ret;
}
@@ -309,11 +322,21 @@ static long mcexec_debug_log(ihk_os_t os, unsigned long arg)
return 0;
}
int mcexec_close_exec(ihk_os_t os);
static void release_handler(ihk_os_t os, void *param)
{
struct release_handler_info *info = param;
struct ikc_scd_packet isp;
int os_ind = ihk_host_os_get_index(os);
struct mcctrl_usrdata *usrdata = ihk_host_os_get_usrdata(os);
struct mcctrl_per_proc_data *ppd = NULL;
ppd = mcctrl_get_per_proc_data(usrdata, info->pid);
if (ppd) {
mcctrl_put_per_proc_data(ppd);
mcexec_close_exec(os);
}
memset(&isp, '\0', sizeof isp);
isp.msg = SCD_MSG_CLEANUP_PROCESS;
@@ -422,7 +445,7 @@ static long mcexec_send_signal(ihk_os_t os, struct signal_desc *sigparam)
isp.pid = sig.pid;
isp.arg = virt_to_phys(msigp);
if((rc = mcctrl_ikc_send(os, sig.cpu, &isp)) < 0){
if ((rc = mcctrl_ikc_send(os, sig.cpu, &isp)) < 0) {
printk("mcexec_send_signal: mcctrl_ikc_send ret=%d\n", rc);
return rc;
}
@@ -480,6 +503,7 @@ static long mcexec_get_cpuset(ihk_os_t os, unsigned long arg)
struct cache_topology *cache_top;
int cpu, cpus_assigned, cpus_to_assign, cpu_prev;
int ret = 0;
int mcexec_linux_numa;
cpumask_t cpus_used;
cpumask_t cpus_to_use;
struct mcctrl_per_proc_data *ppd;
@@ -498,7 +522,8 @@ static long mcexec_get_cpuset(ihk_os_t os, unsigned long arg)
if (copy_from_user(&req, (void *)arg, sizeof(req))) {
printk("%s: error copying user request\n", __FUNCTION__);
return -EINVAL;
ret = -EINVAL;
goto put_and_unlock_out;
}
mutex_lock(&pe->lock);
@@ -520,7 +545,7 @@ static long mcexec_get_cpuset(ihk_os_t os, unsigned long arg)
" doesn't match current partitioned execution\n",
__FUNCTION__);
ret = -EINVAL;
goto unlock_out;
goto put_and_unlock_out;
}
--pe->nr_processes_left;
@@ -537,11 +562,16 @@ static long mcexec_get_cpuset(ihk_os_t os, unsigned long arg)
printk("%s: error: no more CPUs available\n",
__FUNCTION__);
ret = -EINVAL;
goto unlock_out;
goto put_and_unlock_out;
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)
cpumask_set_cpu(cpu, &cpus_used);
cpumask_set_cpu(cpu, &cpus_to_use);
#else
cpu_set(cpu, cpus_used);
cpu_set(cpu, cpus_to_use);
#endif
cpu_prev = cpu;
dprintk("%s: CPU %d assigned (first)\n", __FUNCTION__, cpu);
@@ -562,16 +592,25 @@ static long mcexec_get_cpuset(ihk_os_t os, unsigned long arg)
printk("%s: error: couldn't find CPU topology info\n",
__FUNCTION__);
ret = -EINVAL;
goto unlock_out;
goto put_and_unlock_out;
}
/* Find a core sharing the same cache iterating caches from
* the most inner one outwards */
list_for_each_entry(cache_top, &cpu_top->cache_list, chain) {
for_each_cpu(cpu, &cache_top->shared_cpu_map) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)
if (!cpumask_test_cpu(cpu, &cpus_used)) {
#else
if (!cpu_isset(cpu, cpus_used)) {
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)
cpumask_set_cpu(cpu, &cpus_used);
cpumask_set_cpu(cpu, &cpus_to_use);
#else
cpu_set(cpu, cpus_used);
cpu_set(cpu, cpus_to_use);
#endif
cpu_prev = cpu;
dprintk("%s: CPU %d assigned (same cache L%lu)\n",
__FUNCTION__, cpu, cache_top->saved->level);
@@ -592,8 +631,13 @@ static long mcexec_get_cpuset(ihk_os_t os, unsigned long arg)
/* Found one */
if (node == linux_numa_2_mckernel_numa(udp,
cpu_to_node(mckernel_cpu_2_linux_cpu(udp, cpu)))) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)
cpumask_set_cpu(cpu, &cpus_used);
cpumask_set_cpu(cpu, &cpus_to_use);
#else
cpu_set(cpu, cpus_used);
cpu_set(cpu, cpus_to_use);
#endif
cpu_prev = cpu;
dprintk("%s: CPU %d assigned (same NUMA)\n",
__FUNCTION__, cpu);
@@ -607,11 +651,16 @@ static long mcexec_get_cpuset(ihk_os_t os, unsigned long arg)
printk("%s: error: no more CPUs available\n",
__FUNCTION__);
ret = -EINVAL;
goto unlock_out;
goto put_and_unlock_out;
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)
cpumask_set_cpu(cpu, &cpus_used);
cpumask_set_cpu(cpu, &cpus_to_use);
#else
cpu_set(cpu, cpus_used);
cpu_set(cpu, cpus_to_use);
#endif
cpu_prev = cpu;
dprintk("%s: CPU %d assigned (unused)\n",
__FUNCTION__, cpu);
@@ -626,15 +675,25 @@ next_cpu:
req.cpu_set_size : sizeof(cpus_to_use)))) {
printk("%s: error copying mask to user\n", __FUNCTION__);
ret = -EINVAL;
goto unlock_out;
goto put_and_unlock_out;
}
/* Copy IKC target core and mcexec Linux NUMA id */
cpu = cpumask_next(-1, &cpus_to_use);
if (copy_to_user(req.target_core, &cpu, sizeof(cpu))) {
printk("%s: error copying target core to user\n",
__FUNCTION__);
ret = -EINVAL;
goto unlock_out;
goto put_and_unlock_out;
}
mcexec_linux_numa = cpu_to_node(mckernel_cpu_2_linux_cpu(udp, cpu));
if (copy_to_user(req.mcexec_linux_numa, &mcexec_linux_numa,
sizeof(mcexec_linux_numa))) {
printk("%s: error copying mcexec Linux NUMA id\n",
__FUNCTION__);
ret = -EINVAL;
goto put_and_unlock_out;
}
/* Save in per-process structure */
@@ -655,7 +714,8 @@ next_cpu:
ret = 0;
unlock_out:
put_and_unlock_out:
mcctrl_put_per_proc_data(ppd);
mutex_unlock(&pe->lock);
return ret;
@@ -685,34 +745,10 @@ out:
return ret;
}
int mcctrl_delete_per_proc_data(struct mcctrl_usrdata *ud, int pid)
{
struct mcctrl_per_proc_data *ppd_iter, *ppd = NULL;
int hash = (pid & MCCTRL_PER_PROC_DATA_HASH_MASK);
int ret = 0;
unsigned long flags;
write_lock_irqsave(&ud->per_proc_data_hash_lock[hash], flags);
list_for_each_entry(ppd_iter, &ud->per_proc_data_hash[hash], hash) {
if (ppd_iter->pid == pid) {
ppd = ppd_iter;
break;
}
}
if (!ppd) {
ret = -EINVAL;
goto out;
}
list_del(&ppd->hash);
out:
write_unlock_irqrestore(&ud->per_proc_data_hash_lock[hash], flags);
return ret;
}
inline struct mcctrl_per_proc_data *mcctrl_get_per_proc_data(
/* NOTE: per-process data is refcounted.
* For every get call the user should call put. */
struct mcctrl_per_proc_data *mcctrl_get_per_proc_data(
struct mcctrl_usrdata *ud, int pid)
{
struct mcctrl_per_proc_data *ppd_iter, *ppd = NULL;
@@ -721,7 +757,6 @@ inline struct mcctrl_per_proc_data *mcctrl_get_per_proc_data(
/* Check if data for this process exists and return it */
read_lock_irqsave(&ud->per_proc_data_hash_lock[hash], flags);
list_for_each_entry(ppd_iter, &ud->per_proc_data_hash[hash], hash) {
if (ppd_iter->pid == pid) {
ppd = ppd_iter;
@@ -729,10 +764,57 @@ inline struct mcctrl_per_proc_data *mcctrl_get_per_proc_data(
}
}
if (ppd) {
atomic_inc(&ppd->refcount);
}
read_unlock_irqrestore(&ud->per_proc_data_hash_lock[hash], flags);
return ppd;
}
/* Drop reference. If zero, remove and deallocate */
void mcctrl_put_per_proc_data(struct mcctrl_per_proc_data *ppd)
{
int hash;
unsigned long flags;
int i;
if (!ppd)
return;
if (!atomic_dec_and_test(&ppd->refcount))
return;
dprintk("%s: deallocating PPD for pid %d\n", __FUNCTION__, ppd->pid);
hash = (ppd->pid & MCCTRL_PER_PROC_DATA_HASH_MASK);
write_lock_irqsave(&ppd->ud->per_proc_data_hash_lock[hash], flags);
list_del(&ppd->hash);
write_unlock_irqrestore(&ppd->ud->per_proc_data_hash_lock[hash], flags);
for (i = 0; i < MCCTRL_PER_THREAD_DATA_HASH_SIZE; i++) {
struct mcctrl_per_thread_data *ptd;
struct mcctrl_per_thread_data *next;
struct ikc_scd_packet *packet;
list_for_each_entry_safe(ptd, next,
ppd->per_thread_data_hash + i, hash) {
packet = ptd->data;
list_del(&ptd->hash);
kfree(ptd);
__return_syscall(ppd->ud->os, packet, -EINTR,
task_pid_vnr(current));
ihk_ikc_release_packet(
(struct ihk_ikc_free_packet *)packet,
(ppd->ud->channels + packet->ref)->c);
}
}
kfree(ppd);
}
/*
* Called indirectly from the IKC message handler.
*/
@@ -745,7 +827,7 @@ int mcexec_syscall(struct mcctrl_usrdata *ud, struct ikc_scd_packet *packet)
unsigned long flags;
struct mcctrl_per_proc_data *ppd;
/* Look up per-process structure */
/* Get a reference to per-process structure */
ppd = mcctrl_get_per_proc_data(ud, pid);
if (unlikely(!ppd)) {
@@ -771,7 +853,7 @@ int mcexec_syscall(struct mcctrl_usrdata *ud, struct ikc_scd_packet *packet)
/* Is this a request for a specific thread? See if it's waiting */
if (unlikely(packet->req.ttid)) {
list_for_each_entry(wqhln_iter, &ppd->wq_list_exact, list) {
if (packet->req.ttid != task_pid_vnr(wqhln_iter->task))
if (packet->req.ttid != wqhln_iter->rtid)
continue;
wqhln = wqhln_iter;
@@ -810,8 +892,10 @@ retry_alloc:
wqhln->packet = packet;
wqhln->req = 1;
wake_up(&wqhln->wq_syscall);
ihk_ikc_spinlock_unlock(&ppd->wq_list_lock, flags);
wake_up(&wqhln->wq_syscall);
mcctrl_put_per_proc_data(ppd);
return 0;
}
@@ -829,7 +913,7 @@ int mcexec_wait_syscall(ihk_os_t os, struct syscall_wait_desc *__user req)
unsigned long irqflags;
struct mcctrl_per_proc_data *ppd;
/* Look up per-process structure */
/* Get a reference to per-process structure */
ppd = mcctrl_get_per_proc_data(usrdata, task_tgid_vnr(current));
if (unlikely(!ppd)) {
@@ -842,7 +926,8 @@ int mcexec_wait_syscall(ihk_os_t os, struct syscall_wait_desc *__user req)
if (packet) {
printk("%s: ERROR: packet %p is already registered for thread %d\n",
__FUNCTION__, packet, task_pid_vnr(current));
return -EBUSY;
ret = -EBUSY;
goto put_ppd_out;
}
retry:
@@ -868,12 +953,13 @@ retry_alloc:
wqhln->task = current;
wqhln->req = 0;
wqhln->packet = NULL;
init_waitqueue_head(&wqhln->wq_syscall);
/* Wait for a request.. */
list_add(&wqhln->list, &ppd->wq_list);
ihk_ikc_spinlock_unlock(&ppd->wq_list_lock, irqflags);
/* Wait for a request.. */
ret = wait_event_interruptible(wqhln->wq_syscall, wqhln->req);
/* Remove per-thread wait queue head */
@@ -885,7 +971,8 @@ retry_alloc:
if (ret && !wqhln->req) {
kfree(wqhln);
wqhln = NULL;
return -EINTR;
ret = -EINTR;
goto put_ppd_out;
}
packet = wqhln->packet;
@@ -921,7 +1008,8 @@ retry_alloc:
if (mcctrl_add_per_thread_data(ppd, current, packet) < 0) {
kprintf("%s: error adding per-thread data\n", __FUNCTION__);
return -EINVAL;
ret = -EINVAL;;
goto put_ppd_out;
}
if (__do_in_kernel_syscall(os, packet)) {
@@ -930,11 +1018,13 @@ retry_alloc:
if (mcctrl_delete_per_thread_data(ppd, current) < 0) {
kprintf("%s: error deleting per-thread data\n", __FUNCTION__);
return -EINVAL;
}
return -EFAULT;
ret = -EINVAL;;
goto put_ppd_out;
}
return 0;
ret = 0;
goto put_ppd_out;
}
ihk_ikc_release_packet((struct ihk_ikc_free_packet *)packet,
@@ -942,10 +1032,15 @@ retry_alloc:
if (mcctrl_delete_per_thread_data(ppd, current) < 0) {
kprintf("%s: error deleting per-thread data\n", __FUNCTION__);
return -EINVAL;
ret = -EINVAL;;
goto put_ppd_out;
}
goto retry;
put_ppd_out:
mcctrl_put_per_proc_data(ppd);
return ret;
}
long mcexec_pin_region(ihk_os_t os, unsigned long *__user arg)
@@ -1055,6 +1150,7 @@ long mcexec_ret_syscall(ihk_os_t os, struct syscall_ret_desc *__user arg)
if (!packet) {
kprintf("%s: ERROR: no packet registered for TID %d\n",
__FUNCTION__, task_pid_vnr(current));
mcctrl_put_per_proc_data(ppd);
return -EINVAL;
}
@@ -1073,6 +1169,7 @@ long mcexec_ret_syscall(ihk_os_t os, struct syscall_ret_desc *__user arg)
ret.size, NULL, 0);
#endif
if (copy_from_user(rpm, (void *__user)ret.src, ret.size)) {
mcctrl_put_per_proc_data(ppd);
return -EFAULT;
}
@@ -1090,6 +1187,7 @@ long mcexec_ret_syscall(ihk_os_t os, struct syscall_ret_desc *__user arg)
ihk_ikc_release_packet((struct ihk_ikc_free_packet *)packet,
(usrdata->channels + packet->ref)->c);
mcctrl_put_per_proc_data(ppd);
return 0;
}
@@ -1158,7 +1256,7 @@ int mcexec_open_exec(ihk_os_t os, char * __user filename)
int i;
if (os_ind < 0) {
return EINVAL;
return -EINVAL;
}
ppd = mcctrl_get_per_proc_data(usrdata, task_tgid_vnr(current));
@@ -1170,6 +1268,7 @@ int mcexec_open_exec(ihk_os_t os, char * __user filename)
return -ENOMEM;
}
ppd->ud = usrdata;
ppd->pid = task_tgid_vnr(current);
/*
* XXX: rpgtable will be updated in __do_in_kernel_syscall()
@@ -1178,9 +1277,13 @@ int mcexec_open_exec(ihk_os_t os, char * __user filename)
INIT_LIST_HEAD(&ppd->wq_list);
INIT_LIST_HEAD(&ppd->wq_req_list);
INIT_LIST_HEAD(&ppd->wq_list_exact);
init_waitqueue_head(&ppd->wq_prepare);
init_waitqueue_head(&ppd->wq_procfs);
spin_lock_init(&ppd->wq_list_lock);
memset(&ppd->cpu_set, 0, sizeof(cpumask_t));
ppd->ikc_target_cpu = 0;
/* Final ref will be dropped in close_exec() */
atomic_set(&ppd->refcount, 1);
for (i = 0; i < MCCTRL_PER_THREAD_DATA_HASH_SIZE; ++i) {
INIT_LIST_HEAD(&ppd->per_thread_data_hash[i]);
@@ -1189,36 +1292,33 @@ int mcexec_open_exec(ihk_os_t os, char * __user filename)
if (mcctrl_add_per_proc_data(usrdata, ppd->pid, ppd) < 0) {
printk("%s: error adding per process data\n", __FUNCTION__);
retval = EINVAL;
goto out_free_ppd;
retval = -EINVAL;
kfree(ppd);
goto out;
}
}
else {
/* Only deallocate in case of an error if we added it above */
ppd = NULL;
}
pathbuf = kmalloc(PATH_MAX, GFP_TEMPORARY);
if (!pathbuf) {
retval = ENOMEM;
goto out_error_drop_ppd;
retval = -ENOMEM;
goto out_put_ppd;
}
file = open_exec(filename);
retval = PTR_ERR(file);
if (IS_ERR(file)) {
goto out_error_free;
goto out_free;
}
fullpath = d_path(&file->f_path, pathbuf, PATH_MAX);
if (IS_ERR(fullpath)) {
retval = PTR_ERR(fullpath);
goto out_error_free;
goto out_free;
}
mcef = kmalloc(sizeof(*mcef), GFP_KERNEL);
if (!mcef) {
retval = ENOMEM;
retval = -ENOMEM;
goto out_put_file;
}
@@ -1253,13 +1353,12 @@ int mcexec_open_exec(ihk_os_t os, char * __user filename)
out_put_file:
fput(file);
out_error_free:
out_free:
kfree(pathbuf);
out_error_drop_ppd:
if (ppd) mcctrl_delete_per_proc_data(usrdata, ppd->pid);
out_free_ppd:
if (ppd) kfree(ppd);
return -retval;
out_put_ppd:
mcctrl_put_per_proc_data(ppd);
out:
return retval;
}
@@ -1274,12 +1373,12 @@ int mcexec_close_exec(ihk_os_t os)
ppd = mcctrl_get_per_proc_data(usrdata, task_tgid_vnr(current));
if (ppd) {
mcctrl_delete_per_proc_data(usrdata, ppd->pid);
/* One for the reference and one for deallocation */
mcctrl_put_per_proc_data(ppd);
mcctrl_put_per_proc_data(ppd);
dprintk("pid: %d, tid: %d: rpgtable for %d (0x%lx) removed\n",
task_tgid_vnr(current), current->pid, ppd->pid, ppd->rpgtable);
kfree(ppd);
}
else {
printk("WARNING: no per process data for pid %d ?\n",
@@ -1530,10 +1629,20 @@ void mcexec_prepare_ack(ihk_os_t os, unsigned long arg, int err)
{
struct program_load_desc *desc = phys_to_virt(arg);
struct mcctrl_usrdata *usrdata = ihk_host_os_get_usrdata(os);
struct mcctrl_per_proc_data *ppd = NULL;
ppd = mcctrl_get_per_proc_data(usrdata, desc->pid);
if (!ppd) {
printk("%s: ERROR: no per process data for PID %d\n",
__FUNCTION__, desc->pid);
return;
}
desc->err = err;
desc->status = 1;
mb();
wake_up_all(&usrdata->wq_prepare);
wake_up_all(&ppd->wq_prepare);
mcctrl_put_per_proc_data(ppd);
}

View File

@@ -80,7 +80,7 @@ static int syscall_packet_handler(struct ihk_ikc_channel_desc *c,
break;
case SCD_MSG_PROCFS_ANSWER:
procfs_answer(pisp->arg, pisp->err);
procfs_answer(usrdata, pisp->pid);
break;
case SCD_MSG_SEND_SIGNAL:
@@ -283,12 +283,13 @@ int prepare_ikc_channels(ihk_os_t os)
}
usrdata->os = os;
init_waitqueue_head(&usrdata->wq_prepare);
ihk_host_os_set_usrdata(os, usrdata);
memcpy(&usrdata->listen_param, &listen_param, sizeof listen_param);
ihk_ikc_listen_port(os, &usrdata->listen_param);
memcpy(&usrdata->listen_param2, &listen_param2, sizeof listen_param2);
ihk_ikc_listen_port(os, &usrdata->listen_param2);
init_waitqueue_head(&usrdata->wq_procfs);
mutex_init(&usrdata->reserve_lock);
for (i = 0; i < MCCTRL_PER_PROC_DATA_HASH_SIZE; ++i) {
INIT_LIST_HEAD(&usrdata->per_proc_data_hash[i]);
@@ -326,7 +327,6 @@ void destroy_ikc_channels(ihk_os_t os)
// ihk_ikc_disconnect(usrdata->channels[i].c);
ihk_ikc_free_channel(usrdata->channels[i].c);
__destroy_ikc_channel(os, usrdata->channels + i);
printk("Channel #%d freed.\n", i);
}
}

View File

@@ -187,6 +187,7 @@ struct mcctrl_per_thread_data {
#define MCCTRL_PER_THREAD_DATA_HASH_MASK (MCCTRL_PER_THREAD_DATA_HASH_SIZE - 1)
struct mcctrl_per_proc_data {
struct mcctrl_usrdata *ud;
struct list_head hash;
int pid;
unsigned long rpgtable; /* per process, not per OS */
@@ -195,11 +196,14 @@ struct mcctrl_per_proc_data {
struct list_head wq_req_list;
struct list_head wq_list_exact;
ihk_spinlock_t wq_list_lock;
wait_queue_head_t wq_prepare;
wait_queue_head_t wq_procfs;
struct list_head per_thread_data_hash[MCCTRL_PER_THREAD_DATA_HASH_SIZE];
rwlock_t per_thread_data_hash_lock[MCCTRL_PER_THREAD_DATA_HASH_SIZE];
cpumask_t cpu_set;
int ikc_target_cpu;
atomic_t refcount;
};
struct sysfsm_req {
@@ -279,9 +283,9 @@ struct mcctrl_usrdata {
int base_cpu;
int job_pos;
int mcctrl_dma_abort;
struct mutex reserve_lock;
unsigned long last_thread_exec;
wait_queue_head_t wq_prepare;
wait_queue_head_t wq_procfs;
struct list_head per_proc_data_hash[MCCTRL_PER_PROC_DATA_HASH_SIZE];
rwlock_t per_proc_data_hash_lock[MCCTRL_PER_PROC_DATA_HASH_SIZE];
@@ -315,8 +319,9 @@ int __do_in_kernel_syscall(ihk_os_t os, struct ikc_scd_packet *packet);
int mcctrl_add_per_proc_data(struct mcctrl_usrdata *ud, int pid,
struct mcctrl_per_proc_data *ppd);
int mcctrl_delete_per_proc_data(struct mcctrl_usrdata *ud, int pid);
inline struct mcctrl_per_proc_data *mcctrl_get_per_proc_data(
struct mcctrl_per_proc_data *mcctrl_get_per_proc_data(
struct mcctrl_usrdata *ud, int pid);
void mcctrl_put_per_proc_data(struct mcctrl_per_proc_data *ppd);
int mcctrl_add_per_thread_data(struct mcctrl_per_proc_data* ppd,
struct task_struct *task, void *data);
@@ -348,7 +353,7 @@ struct procfs_file {
char fname[PROCFS_NAME_MAX]; /* procfs filename (request) */
};
void procfs_answer(unsigned int arg, int err);
void procfs_answer(struct mcctrl_usrdata *ud, int pid);
int procfsm_packet_handler(void *os, int msg, int pid, unsigned long arg);
void add_tid_entry(int osnum, int pid, int tid);
void add_pid_entry(int osnum, int pid);

View File

@@ -59,7 +59,6 @@ static const struct procfs_entry base_entry_stuff[];
static const struct file_operations mckernel_forward_ro;
static const struct file_operations mckernel_forward;
static DECLARE_WAIT_QUEUE_HEAD(procfsq);
static ssize_t mckernel_procfs_read(struct file *file, char __user *buf,
size_t nbytes, loff_t *ppos);
@@ -106,14 +105,28 @@ getpath(struct procfs_list_entry *e, char *buf, int bufsize)
/**
* \brief Process SCD_MSG_PROCFS_ANSWER message.
*
* \param arg sent argument
* \param err error info (redundant)
* \param ud mcctrl_usrdata pointer
* \param pid PID of the requesting process
*/
void
procfs_answer(unsigned int arg, int err)
void procfs_answer(struct mcctrl_usrdata *ud, int pid)
{
dprintk("procfs: received SCD_MSG_PROCFS_ANSWER message(err = %d).\n", err);
wake_up_interruptible(&procfsq);
struct mcctrl_per_proc_data *ppd = NULL;
if (pid > 0) {
ppd = mcctrl_get_per_proc_data(ud, pid);
if (unlikely(!ppd)) {
kprintf("%s: ERROR: no per-process structure for PID %d\n",
__FUNCTION__, pid);
return;
}
}
wake_up_all(pid > 0 ? &ppd->wq_procfs : &ud->wq_procfs);
if (pid > 0) {
mcctrl_put_per_proc_data(ppd);
}
}
static struct procfs_list_entry *
@@ -248,9 +261,11 @@ get_pid_cred(int pid)
{
struct task_struct *task = NULL;
if(pid > 0){
if (pid > 0) {
rcu_read_lock();
task = pid_task(find_vpid(pid), PIDTYPE_PID);
if(task){
rcu_read_unlock();
if (task) {
return __task_cred(task);
}
}
@@ -493,36 +508,84 @@ procfs_exit(int osnum)
* This function conforms to the 2) way of fs/proc/generic.c
* from linux-2.6.39.4.
*/
static ssize_t
mckernel_procfs_read(struct file *file, char __user *buf, size_t nbytes,
loff_t *ppos)
static ssize_t __mckernel_procfs_read_write(
struct file *file,
char __user *buf, size_t nbytes,
loff_t *ppos, int read_write)
{
struct inode * inode = file->f_inode;
char *kern_buffer = NULL;
int order = 0;
volatile struct procfs_read *r = NULL;
struct ikc_scd_packet isp;
int ret;
int ret, osnum, pid, retw;
unsigned long pbuf;
unsigned long count = nbytes;
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)
struct proc_dir_entry *dp = PDE(inode);
struct procfs_list_entry *e = dp->data;
#else
#else
struct procfs_list_entry *e = PDE_DATA(inode);
#endif
#endif
loff_t offset = *ppos;
char pathbuf[PROCFS_NAME_MAX];
char *path;
char *path, *p;
ihk_os_t os = NULL;
struct mcctrl_usrdata *udp = NULL;
struct mcctrl_per_proc_data *ppd = NULL;
path = getpath(e, pathbuf, 256);
dprintk("mckernel_procfs_read: invoked for %s, offset: %lu, count: %d\n",
path, offset, count);
if (count <= 0 || offset < 0) {
return 0;
}
path = getpath(e, pathbuf, PROCFS_NAME_MAX);
dprintk("%s: invoked for %s, offset: %lu, count: %lu\n",
__FUNCTION__, path,
(unsigned long)offset, count);
/* Verify OS number */
ret = sscanf(path, "mcos%d/", &osnum);
if (ret != 1) {
printk("%s: error: couldn't determine OS number\n", __FUNCTION__);
return -EINVAL;
}
if (osnum != e->osnum) {
printk("%s: error: OS numbers don't match\n", __FUNCTION__);
return -EINVAL;
}
/* Is this request for a specific process? */
p = strchr(path, '/') + 1;
ret = sscanf(p, "%d/", &pid);
if (ret != 1) {
pid = -1;
}
os = osnum_to_os(osnum);
if (!os) {
printk("%s: error: no IHK OS data found for OS %d\n",
__FUNCTION__, osnum);
return -EINVAL;
}
udp = ihk_host_os_get_usrdata(os);
if (!udp) {
printk("%s: error: no MCCTRL data found for OS %d\n",
__FUNCTION__, osnum);
return -EINVAL;
}
if (pid > 0) {
ppd = mcctrl_get_per_proc_data(udp, pid);
if (unlikely(!ppd)) {
printk("%s: error: no per-process structure for PID %d",
__FUNCTION__, pid);
return -EINVAL;
}
}
while ((1 << order) < count) ++order;
if (order > 12) {
order -= 12;
@@ -534,10 +597,11 @@ mckernel_procfs_read(struct file *file, char __user *buf, size_t nbytes,
/* NOTE: we need physically contigous memory to pass through IKC */
kern_buffer = (char *)__get_free_pages(GFP_KERNEL, order);
if (!kern_buffer) {
printk("mckernel_procfs_read(): ERROR: allocating kernel buffer\n");
return -ENOMEM;
printk("%s: ERROR: allocating kernel buffer\n", __FUNCTION__);
ret = -ENOMEM;
goto out;
}
pbuf = virt_to_phys(kern_buffer);
r = kmalloc(sizeof(struct procfs_read), GFP_KERNEL);
@@ -551,152 +615,96 @@ mckernel_procfs_read(struct file *file, char __user *buf, size_t nbytes,
r->status = 0;
r->offset = offset;
r->count = count;
r->readwrite = 0;
r->readwrite = read_write;
strncpy((char *)r->fname, path, PROCFS_NAME_MAX);
isp.msg = SCD_MSG_PROCFS_REQUEST;
isp.ref = 0;
isp.arg = virt_to_phys(r);
ret = mcctrl_ikc_send(osnum_to_os(e->osnum), 0, &isp);
isp.pid = pid;
ret = mcctrl_ikc_send(osnum_to_os(e->osnum),
(pid > 0) ? ppd->ikc_target_cpu : 0, &isp);
if (ret < 0) {
goto out; /* error */
}
/* Wait for a reply. */
ret = -EIO; /* default exit code */
dprintk("now wait for a relpy\n");
/* Wait for the status field of the procfs_read structure set ready. */
if (wait_event_interruptible_timeout(procfsq, r->status != 0, HZ) == 0) {
kprintf("ERROR: mckernel_procfs_read: timeout (1 sec).\n");
dprintk("%s: waiting for reply\n", __FUNCTION__);
retry_wait:
/* Wait for the status field of the procfs_read structure,
* wait on per-process or OS specific data depending on
* who the request is for.
*/
if (pid > 0) {
retw = wait_event_interruptible_timeout(ppd->wq_procfs,
r->status != 0, HZ);
}
else {
retw = wait_event_interruptible_timeout(udp->wq_procfs,
r->status != 0, HZ);
}
/* Timeout? */
if (retw == 0 && r->status == 0) {
printk("%s: error: timeout (1 sec)\n", __FUNCTION__);
goto out;
}
/* Wake up and check the result. */
dprintk("mckernel_procfs_read: woke up. ret: %d, eof: %d\n", r->ret, r->eof);
if (r->ret > 0) {
if (copy_to_user(buf, kern_buffer, r->ret)) {
kprintf("ERROR: mckernel_procfs_read: copy_to_user failed.\n");
ret = -EFAULT;
goto out;
}
/* Interrupted? */
else if (retw == -ERESTARTSYS) {
ret = -ERESTART;
goto out;
}
/* Were we woken up by a reply to another procfs request? */
else if (r->status == 0) {
/* TODO: r->status is not set atomically, we could be woken
* up with status == 0 and it could change to 1 while in this
* code, we could potentially miss the wake_up()...
*/
printk("%s: stale wake-up, retrying\n", __FUNCTION__);
goto retry_wait;
}
/* Wake up and check the result. */
dprintk("%s: woke up. ret: %d, eof: %d\n",
__FUNCTION__, r->ret, r->eof);
if (r->ret > 0) {
if (read_write == 0) {
if (copy_to_user(buf, kern_buffer, r->ret)) {
printk("%s: ERROR: copy_to_user failed.\n", __FUNCTION__);
ret = -EFAULT;
goto out;
}
}
*ppos += r->ret;
}
ret = r->ret;
out:
if(kern_buffer)
if (ppd)
mcctrl_put_per_proc_data(ppd);
if (kern_buffer)
free_pages((uintptr_t)kern_buffer, order);
if(r)
if (r)
kfree((void *)r);
return ret;
}
static ssize_t
mckernel_procfs_write(struct file *file, const char __user *buf, size_t nbytes,
loff_t *ppos)
static ssize_t mckernel_procfs_read(struct file *file,
char __user *buf, size_t nbytes, loff_t *ppos)
{
struct inode * inode = file->f_inode;
char *kern_buffer = NULL;
int order = 0;
volatile struct procfs_read *r = NULL;
struct ikc_scd_packet isp;
int ret;
unsigned long pbuf;
unsigned long count = nbytes;
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)
struct proc_dir_entry *dp = PDE(inode);
struct procfs_list_entry *e = dp->data;
#else
struct procfs_list_entry *e = PDE_DATA(inode);
#endif
loff_t offset = *ppos;
char pathbuf[PROCFS_NAME_MAX];
char *path;
return __mckernel_procfs_read_write(file, buf, nbytes, ppos, 0);
}
path = getpath(e, pathbuf, 256);
dprintk("mckernel_procfs_read: invoked for %s, offset: %lu, count: %d\n",
path, offset, count);
if (count <= 0 || offset < 0) {
return 0;
}
while ((1 << order) < count) ++order;
if (order > 12) {
order -= 12;
}
else {
order = 1;
}
/* NOTE: we need physically contigous memory to pass through IKC */
kern_buffer = (char *)__get_free_pages(GFP_KERNEL, order);
if (!kern_buffer) {
printk("mckernel_procfs_read(): ERROR: allocating kernel buffer\n");
return -ENOMEM;
}
if (copy_from_user(kern_buffer, buf, nbytes)) {
ret = -EFAULT;
goto out;
}
pbuf = virt_to_phys(kern_buffer);
r = kmalloc(sizeof(struct procfs_read), GFP_KERNEL);
if (r == NULL) {
ret = -ENOMEM;
goto out;
}
dprintk("offset: %lx, count: %d, cpu: %d\n", offset, count, e->cpu);
r->pbuf = pbuf;
r->eof = 0;
r->ret = -EIO; /* default */
r->status = 0;
r->offset = offset;
r->count = count;
r->readwrite = 1;
strncpy((char *)r->fname, path, PROCFS_NAME_MAX);
isp.msg = SCD_MSG_PROCFS_REQUEST;
isp.ref = 0;
isp.arg = virt_to_phys(r);
ret = mcctrl_ikc_send(osnum_to_os(e->osnum), 0, &isp);
if (ret < 0) {
goto out; /* error */
}
/* Wait for a reply. */
ret = -EIO; /* default exit code */
dprintk("now wait for a relpy\n");
/* Wait for the status field of the procfs_read structure set ready. */
if (wait_event_interruptible_timeout(procfsq, r->status != 0, HZ) == 0) {
kprintf("ERROR: mckernel_procfs_read: timeout (1 sec).\n");
goto out;
}
/* Wake up and check the result. */
dprintk("mckernel_procfs_read: woke up. ret: %d, eof: %d\n", r->ret, r->eof);
if (r->ret > 0) {
*ppos += r->ret;
}
ret = r->ret;
out:
if(kern_buffer)
free_pages((uintptr_t)kern_buffer, order);
if(r)
kfree((void *)r);
return ret;
static ssize_t mckernel_procfs_write(struct file *file,
const char __user *buf, size_t nbytes, loff_t *ppos)
{
return __mckernel_procfs_read_write(file,
(char __user *)buf, nbytes, ppos, 1);
}
static loff_t

View File

@@ -306,7 +306,7 @@ static int remote_page_fault(struct mcctrl_usrdata *usrdata, void *fault_addr, u
error = -ENOENT;
printk("%s: no packet registered for TID %d\n",
__FUNCTION__, task_pid_vnr(current));
goto out_no_unmap;
goto out_put_ppd;
}
req = &packet->req;
@@ -326,6 +326,9 @@ retry_alloc:
/* Prepare per-thread wait queue head */
wqhln->task = current;
/* Save the TID explicitly, because mcexec_syscall(), where the request
* will be matched, is in IRQ context and can't call task_pid_vnr() */
wqhln->rtid = task_pid_vnr(current);
wqhln->req = 0;
init_waitqueue_head(&wqhln->wq_syscall);
@@ -434,9 +437,11 @@ out:
ihk_device_unmap_virtual(ihk_os_to_dev(usrdata->os), resp, sizeof(*resp));
ihk_device_unmap_memory(ihk_os_to_dev(usrdata->os), phys, sizeof(*resp));
out_no_unmap:
out_put_ppd:
dprintk("%s: tid: %d, fault_addr: %lu, reason: %lu, error: %d\n",
__FUNCTION__, task_pid_vnr(current), fault_addr, reason, error);
mcctrl_put_per_proc_data(ppd);
return error;
}
@@ -574,6 +579,7 @@ static int rus_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
size_t pix;
#endif
struct mcctrl_per_proc_data *ppd;
int ret = 0;
dprintk("mcctrl:page fault:flags %#x pgoff %#lx va %p page %p\n",
vmf->flags, vmf->pgoff, vmf->virtual_address, vmf->page);
@@ -584,7 +590,6 @@ static int rus_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
ppd = mcctrl_get_per_proc_data(usrdata, vma->vm_mm->owner->pid);
}
if (!ppd) {
kprintf("%s: ERROR: no per-process structure for PID %d??\n",
__FUNCTION__, task_tgid_vnr(current));
@@ -618,7 +623,8 @@ static int rus_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (error) {
printk("mcctrl:page fault error:flags %#x pgoff %#lx va %p page %p\n",
vmf->flags, vmf->pgoff, vmf->virtual_address, vmf->page);
return VM_FAULT_SIGBUS;
ret = VM_FAULT_SIGBUS;
goto put_and_out;
}
rva = (unsigned long)vmf->virtual_address & ~(pgsize - 1);
@@ -655,10 +661,15 @@ static int rus_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (error) {
printk("mcctrl:page fault:remap error:flags %#x pgoff %#lx va %p page %p\n",
vmf->flags, vmf->pgoff, vmf->virtual_address, vmf->page);
return VM_FAULT_SIGBUS;
ret = VM_FAULT_SIGBUS;
goto put_and_out;
}
return VM_FAULT_NOPAGE;
ret = VM_FAULT_NOPAGE;
put_and_out:
mcctrl_put_per_proc_data(ppd);
return ret;
}
static struct vm_operations_struct rus_vmops = {
@@ -757,6 +768,8 @@ enum {
MF_IS_REMOVABLE = 0x0004,
MF_PREFETCH = 0x0008,
MF_ZEROFILL = 0x0010,
MF_REG_FILE = 0x1000,
MF_DEV_FILE = 0x2000,
MF_END
};
@@ -1623,6 +1636,7 @@ int __do_in_kernel_syscall(ihk_os_t os, struct ikc_scd_packet *packet)
dprintk("%s: pid: %d, rpgtable: 0x%lx updated\n",
__FUNCTION__, ppd->pid, ppd->rpgtable);
mcctrl_put_per_proc_data(ppd);
}
ret = clear_pte_range(sc->args[0], sc->args[1]);

View File

@@ -6,14 +6,15 @@ VPATH=@abs_srcdir@
TARGET=mcexec
@uncomment_if_ENABLE_MEMDUMP@TARGET+=eclair
LIBS=@LIBS@
IHKDIR ?= $(VPATH)/../../../ihk/linux/include/
all: $(TARGET)
mcexec: mcexec.c
$(CC) -I${KDIR} $(CFLAGS) $(EXTRA_CFLAGS) -fPIE -pie -lrt -pthread -o $@ $^ $(EXTRA_OBJS)
$(CC) -I${KDIR} $(CFLAGS) $(EXTRA_CFLAGS) -fPIE -pie -lrt -lnuma -pthread -o $@ $^ $(EXTRA_OBJS)
eclair: eclair.c
$(CC) $(CFLAGS) -o $@ $^ $(LIBS)
$(CC) $(CFLAGS) -I${IHKDIR} -o $@ $^ $(LIBS)
clean:
$(RM) $(TARGET) *.o

View File

@@ -16,20 +16,8 @@
#include <unistd.h>
#include <sys/socket.h>
#include <arpa/inet.h>
/* From ihk/linux/include/ihk/ihk_host_user.h */
#define PHYS_CHUNKS_DESC_SIZE 8192
struct dump_mem_chunk {
unsigned long addr;
unsigned long size;
};
typedef struct dump_mem_chunks_s {
int nr_chunks;
struct dump_mem_chunk chunks[];
} dump_mem_chunks_t;
/* ---------- */
#include <sys/ioctl.h>
#include <ihk/ihk_host_user.h>
#define CPU_TID_BASE 1000000
@@ -39,6 +27,10 @@ struct options {
char *kernel_path;
char *dump_path;
char *log_path;
int interactive;
int os_id;
int mcos_fd;
int print_idle;
}; /* struct options */
struct thread_info {
@@ -56,7 +48,7 @@ struct thread_info {
int tid;
int cpu;
int lcpu;
int padding;
int idle;
uintptr_t process;
uintptr_t clv;
uintptr_t x86_clv;
@@ -150,7 +142,21 @@ static int read_mem(uintptr_t va, void *buf, size_t size) {
}
return 1;
}
error = read_physmem(pa, buf, size);
if (opt.interactive) {
dumpargs_t args;
args.cmd = DUMP_READ;
args.start = pa;
args.size = size;
args.buf = buf;
error = ioctl(opt.mcos_fd, IHK_OS_DUMP, &args);
}
else {
error = read_physmem(pa, buf, size);
}
if (error) {
perror("read_mem:read_physmem");
return 1;
@@ -256,6 +262,7 @@ static int setup_threads(void) {
perror("num_processors");
return 1;
}
printf("%s: num_processors: %d\n", __FUNCTION__, num_processors);
error = read_symbol_64("locals", &locals);
if (error) {
@@ -278,64 +285,6 @@ static int setup_threads(void) {
ihk_mc_switch_context = lookup_symbol("ihk_mc_switch_context");
if (0) printf("ihk_mc_switch_context: %lx\n", ihk_mc_switch_context);
/* Set up idle threads first */
for (cpu = 0; cpu < num_processors; ++cpu) {
uintptr_t v;
uintptr_t thread;
uintptr_t proc;
int pid;
int tid;
struct thread_info *ti;
int status;
v = clv + (cpu * K(CPU_LOCAL_VAR_SIZE));
ti = malloc(sizeof(*ti));
if (!ti) {
perror("malloc");
return 1;
}
thread = v+K(IDLE_THREAD_OFFSET);
error = read_64(thread+K(PROC_OFFSET), &proc);
if (error) {
perror("proc");
return 1;
}
error = read_32(thread+K(STATUS_OFFSET), &status);
if (error) {
perror("status");
return 1;
}
error = read_32(proc+K(PID_OFFSET), &pid);
if (error) {
perror("pid");
return 1;
}
error = read_32(thread+K(TID_OFFSET), &tid);
if (error) {
perror("tid");
return 1;
}
ti->next = NULL;
ti->status = status;
ti->pid = pid;
ti->tid = tid;
ti->cpu = cpu;
ti->lcpu = cpu;
ti->process = thread;
ti->clv = v;
ti->x86_clv = locals + locals_span*cpu;
*titailp = ti;
titailp = &ti->next;
}
for (cpu = 0; cpu < num_processors; ++cpu) {
uintptr_t v;
uintptr_t head;
@@ -400,15 +349,19 @@ static int setup_threads(void) {
ti->status = status;
ti->pid = pid;
ti->tid = tid;
ti->cpu = (thread == current)? cpu: -1;
ti->cpu = (thread == current) ? cpu : -1;
ti->lcpu = cpu;
ti->process = thread;
ti->idle = 0;
ti->clv = v;
ti->x86_clv = locals + locals_span*cpu;
*titailp = ti;
titailp = &ti->next;
if (!curr_thread)
curr_thread = ti;
error = read_64(entry, &entry);
if (error) {
perror("process2");
@@ -417,8 +370,78 @@ static int setup_threads(void) {
}
}
/* Set up idle threads */
if (opt.print_idle) {
for (cpu = 0; cpu < num_processors; ++cpu) {
uintptr_t v;
uintptr_t thread;
uintptr_t proc;
int pid;
int tid;
struct thread_info *ti;
int status;
v = clv + (cpu * K(CPU_LOCAL_VAR_SIZE));
error = read_64(v+K(CURRENT_OFFSET), &current);
if (error) {
perror("current");
return 1;
}
ti = malloc(sizeof(*ti));
if (!ti) {
perror("malloc");
return 1;
}
thread = v+K(IDLE_THREAD_OFFSET);
error = read_64(thread+K(PROC_OFFSET), &proc);
if (error) {
perror("proc");
return 1;
}
error = read_32(thread+K(STATUS_OFFSET), &status);
if (error) {
perror("status");
return 1;
}
error = read_32(proc+K(PID_OFFSET), &pid);
if (error) {
perror("pid");
return 1;
}
error = read_32(thread+K(TID_OFFSET), &tid);
if (error) {
perror("tid");
return 1;
}
ti->next = NULL;
ti->status = status;
ti->pid = 1;
ti->tid = 2000000000 + tid;
ti->cpu = (thread == current) ? cpu : -1;
ti->lcpu = cpu;
ti->process = thread;
ti->idle = 1;
ti->clv = v;
ti->x86_clv = locals + locals_span*cpu;
*titailp = ti;
titailp = &ti->next;
if (!curr_thread)
curr_thread = ti;
}
}
if (!tihead) {
printf("thread not found. cpu mode forcibly\n");
printf("No threads found, forcing CPU mode.\n");
opt.cpu = 1;
}
@@ -459,6 +482,7 @@ static int setup_threads(void) {
ti->tid = CPU_TID_BASE + cpu;
ti->cpu = cpu;
ti->process = current;
ti->idle = 1;
ti->clv = v;
ti->x86_clv = locals + locals_span*cpu;
@@ -471,7 +495,9 @@ static int setup_threads(void) {
printf("thread not found\n");
return 1;
}
curr_thread = tihead;
if (!curr_thread)
curr_thread = tihead;
return 0;
} /* setup_threads() */
@@ -713,18 +739,21 @@ static void command(char *cmd, char *res) {
break;
}
//if (regs[17] > MAP_KERNEL) {}
pu8 = (void *)&regs;
for (i = 0; i < sizeof(regs)-4; ++i) {
rbp += sprintf(rbp, "%02x", pu8[i]);
}
}
}
/*
else if (!strcmp(p, "mffffffff80018a82,1")) {
rbp += sprintf(rbp, "b8");
}
else if (!strcmp(p, "mffffffff80018a82,9")) {
rbp += sprintf(rbp, "b8f2ffffff41564155");
}
*/
else if (!strncmp(p, "m", 1)) {
int n;
uintptr_t start;
@@ -820,33 +849,35 @@ static void command(char *cmd, char *res) {
break;
}
q = buf;
q += sprintf(q, "PID %d, ", ti->pid);
if (ti->status & PS_RUNNING) {
q += sprintf(q, "running on cpu%d", ti->cpu);
q += sprintf(q, "%srunning on cpu %d",
ti->idle ? "idle " : "", ti->lcpu);
}
else if (ti->status & (PS_INTERRUPTIBLE | PS_UNINTERRUPTIBLE)) {
q += sprintf(q, "waiting on cpu%d", ti->lcpu);
q += sprintf(q, "%swaiting on cpu %d",
ti->idle ? "idle " : "", ti->lcpu);
}
else if (ti->status & PS_STOPPED) {
q += sprintf(q, "stopped on cpu%d", ti->lcpu);
q += sprintf(q, "%sstopped on cpu %d",
ti->idle ? "idle " : "", ti->lcpu);
}
else if (ti->status & PS_TRACED) {
q += sprintf(q, "traced on cpu%d", ti->lcpu);
q += sprintf(q, "%straced on cpu %d",
ti->idle ? "idle " : "", ti->lcpu);
}
else if (ti->status == CS_IDLE) {
q += sprintf(q, "cpu%d idle", ti->cpu);
q += sprintf(q, "cpu %d idle", ti->cpu);
}
else if (ti->status == CS_RUNNING) {
q += sprintf(q, "cpu%d running", ti->cpu);
q += sprintf(q, "cpu %d running", ti->cpu);
}
else if (ti->status == CS_RESERVED) {
q += sprintf(q, "cpu%d reserved", ti->cpu);
q += sprintf(q, "cpu %d reserved", ti->cpu);
}
else {
q += sprintf(q, "status=%#x", ti->status);
}
if (ti->tid != ti->pid) {
q += sprintf(q, ",pid=%d", ti->pid);
}
rbp += print_hex(rbp, buf);
}
} while (0);
@@ -859,11 +890,12 @@ static void options(int argc, char *argv[]) {
memset(&opt, 0, sizeof(opt));
opt.kernel_path = "./mckernel.img";
opt.dump_path = "./mcdump";
opt.mcos_fd = -1;
for (;;) {
int c;
c = getopt(argc, argv, "cd:hk:");
c = getopt(argc, argv, "ilcd:hk:o:");
if (c < 0) {
break;
}
@@ -881,12 +913,32 @@ static void options(int argc, char *argv[]) {
case 'd':
opt.dump_path = optarg;
break;
case 'i':
opt.interactive = 1;
break;
case 'o':
opt.os_id = atoi(optarg);
break;
case 'l':
opt.print_idle = 1;
break;
}
}
if (optind < argc) {
opt.help = 1;
}
if (opt.interactive) {
char fn[128];
sprintf(fn, "/dev/mcos%d", opt.os_id);
opt.mcos_fd = open(fn, O_RDONLY);
if (opt.mcos_fd < 0) {
perror("open");
exit(1);
}
}
return;
} /* options() */
@@ -969,7 +1021,7 @@ int main(int argc, char *argv[]) {
uint8_t sum;
uint8_t check;
static char lbuf[1024];
static char rbuf[1024];
static char rbuf[8192];
static char cbuf[3];
char *lbp;
char *p;

View File

@@ -66,6 +66,8 @@
#include "../include/uprotocol.h"
#include <getopt.h>
#include "../config.h"
#include <numa.h>
#include <numaif.h>
//#define DEBUG
@@ -1623,21 +1625,50 @@ int main(int argc, char **argv)
/* Partitioned execution, obtain CPU set */
if (nr_processes > 0) {
struct get_cpu_set_arg cpu_set_arg;
int mcexec_linux_numa = 0;
cpu_set_arg.cpu_set = (void *)&desc->cpu_set;
cpu_set_arg.cpu_set_size = sizeof(desc->cpu_set);
cpu_set_arg.nr_processes = nr_processes;
cpu_set_arg.target_core = &target_core;
cpu_set_arg.mcexec_linux_numa = &mcexec_linux_numa;
if (ioctl(fd, MCEXEC_UP_GET_CPUSET, (void *)&cpu_set_arg) != 0) {
perror("getting CPU set for partitioned execution");
close(fd);
return 1;
}
desc->cpu = target_core;
/* This call may not succeed, but that is fine */
if (numa_run_on_node(mcexec_linux_numa) < 0) {
__dprint("%s: WARNING: couldn't bind to NUMA %d\n",
__FUNCTION__, mcexec_linux_numa);
}
#ifdef DEBUG
else {
cpu_set_t cpuset;
char affinity[BUFSIZ];
CPU_ZERO(&cpuset);
if ((sched_getaffinity(0, sizeof(cpu_set_t), &cpuset)) != 0) {
perror("Error sched_getaffinity");
exit(1);
}
affinity[0] = '\0';
for (i = 0; i < 512; i++) {
if (CPU_ISSET(i, &cpuset) == 1) {
sprintf(affinity, "%s %d", affinity, i);
}
}
__dprint("%s: PID: %d affinity: %s\n",
__FUNCTION__, getpid(), affinity);
}
#endif
}
if (ioctl(fd, MCEXEC_UP_PREPARE_IMAGE, (unsigned long)desc) != 0) {
perror("prepare");
close(fd);
@@ -1746,8 +1777,8 @@ do_generic_syscall(
/* Overlayfs /sys/X directory lseek() problem work around */
if (w->sr.number == __NR_lseek && ret == -EINVAL) {
char proc_path[512];
char path[512];
char proc_path[PATH_MAX];
char path[PATH_MAX];
struct stat sb;
sprintf(proc_path, "/proc/self/fd/%d", (int)w->sr.args[0]);
@@ -1756,6 +1787,7 @@ do_generic_syscall(
if (readlink(proc_path, path, sizeof(path)) < 0) {
fprintf(stderr, "%s: error: readlink() failed for %s\n",
__FUNCTION__, proc_path);
perror(": ");
goto out;
}

View File

@@ -26,9 +26,21 @@
#include <march.h>
#include <cls.h>
//#define DEBUG_PRINT_AP
#ifdef DEBUG_PRINT_AP
#define dkprintf(...) kprintf(__VA_ARGS__)
#define ekprintf(...) kprintf(__VA_ARGS__)
#else
#define dkprintf(...) do { if (0) kprintf(__VA_ARGS__); } while (0)
#define ekprintf(...) kprintf(__VA_ARGS__)
#endif
int num_processors = 1;
static volatile int ap_stop = 1;
mcs_lock_node_t ap_syscall_semaphore;
static void ap_wait(void)
{
init_tick();
@@ -43,7 +55,11 @@ static void ap_wait(void)
arch_start_pvclock();
if (find_command_line("hidos")) {
mcs_lock_node_t mcs_node;
mcs_lock_lock_noirq(&ap_syscall_semaphore, &mcs_node);
init_host_syscall_channel();
mcs_lock_unlock_noirq(&ap_syscall_semaphore, &mcs_node);
}
pc_ap_init();
@@ -57,6 +73,7 @@ static void ap_wait(void)
void ap_start(void)
{
init_tick();
mcs_lock_init(&ap_syscall_semaphore);
ap_stop = 0;
sync_tick();
}
@@ -93,13 +110,13 @@ void ap_init(void)
if (cpu_info->hw_ids[i] == bsp_hw_id) {
continue;
}
kprintf("AP Booting: %d (HW ID: %d @ NUMA %d)\n", i,
dkprintf("AP Booting: %d (HW ID: %d @ NUMA %d)\n", i,
cpu_info->hw_ids[i], cpu_info->nodes[i]);
ihk_mc_boot_cpu(cpu_info->hw_ids[i], (unsigned long)ap_wait);
num_processors++;
}
kprintf("AP Booting: Done\n");
kprintf("BSP: booted %d AP CPUs\n", cpu_info->ncpus - 1);
}
#include <sysfs.h>

View File

@@ -126,7 +126,7 @@ int devobj_create(int fd, size_t len, off_t off, struct memobj **objp, int *maxp
__FUNCTION__, fd, len, off, result.handle, result.maxprot);
obj->memobj.ops = &devobj_ops;
obj->memobj.flags = MF_HAS_PAGER;
obj->memobj.flags = MF_HAS_PAGER | MF_DEV_FILE;
obj->memobj.size = len;
obj->handle = result.handle;
obj->ref = 1;
@@ -181,19 +181,21 @@ static void devobj_release(struct memobj *memobj)
memobj_unlock(&obj->memobj);
if (free_obj) {
int error;
ihk_mc_user_context_t ctx;
if (!(free_obj->memobj.flags & MF_HOST_RELEASED)) {
int error;
ihk_mc_user_context_t ctx;
ihk_mc_syscall_arg0(&ctx) = PAGER_REQ_UNMAP;
ihk_mc_syscall_arg1(&ctx) = handle;
ihk_mc_syscall_arg2(&ctx) = 1;
ihk_mc_syscall_arg0(&ctx) = PAGER_REQ_UNMAP;
ihk_mc_syscall_arg1(&ctx) = handle;
ihk_mc_syscall_arg2(&ctx) = 1;
error = syscall_generic_forwarding(__NR_mmap, &ctx);
if (error) {
kprintf("devobj_release(%p %lx):"
"release failed. %d\n",
free_obj, handle, error);
/* through */
error = syscall_generic_forwarding(__NR_mmap, &ctx);
if (error) {
kprintf("devobj_release(%p %lx):"
"release failed. %d\n",
free_obj, handle, error);
/* through */
}
}
if (obj->pfn_table) {

View File

@@ -213,7 +213,7 @@ int fileobj_create(int fd, struct memobj **objp, int *maxprotp)
memset(newobj, 0, sizeof(*newobj));
newobj->memobj.ops = &fileobj_ops;
newobj->memobj.flags = MF_HAS_PAGER;
newobj->memobj.flags = MF_HAS_PAGER | MF_REG_FILE;
newobj->handle = result.handle;
newobj->sref = 1;
newobj->cref = 1;
@@ -294,6 +294,9 @@ static void fileobj_release(struct memobj *memobj)
obj->sref -= free_sref;
free_handle = obj->handle;
memobj_unlock(&obj->memobj);
if (obj->memobj.flags & MF_HOST_RELEASED) {
free_sref = 0; // don't call syscall_generic_forwarding
}
if (free_obj) {
dkprintf("%s: release obj 0x%lx cref: %d, free_obj: 0x%lx, %s\n",
@@ -497,7 +500,10 @@ static int fileobj_get_page(struct memobj *memobj, off_t off,
if (!page) {
npages = 1 << p2align;
virt = ihk_mc_alloc_pages(npages, IHK_MC_AP_NOWAIT);
virt = ihk_mc_alloc_pages(npages, IHK_MC_AP_NOWAIT |
(to_memobj(obj)->flags & MF_ZEROFILL) ? IHK_MC_AP_USER : 0);
if (!virt) {
error = -ENOMEM;
kprintf("fileobj_get_page(%p,%lx,%x,%p):"

View File

@@ -248,9 +248,13 @@ static int cmpxchg_futex_value_locked(uint32_t __user *uaddr, uint32_t uval, uin
static int get_futex_value_locked(uint32_t *dest, uint32_t *from)
{
/* RIKEN: futexes are always on not swappable pages */
*dest = getint_user((int *)from);
/*
* Officially we should call:
* return getint_user((int *)dest, (int *)from);
*
* but McKernel on x86 can just access user-space.
*/
*dest = *(volatile uint32_t *)from;
return 0;
}
@@ -670,25 +674,32 @@ static uint64_t futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q
uint64_t timeout)
{
uint64_t time_remain = 0;
unsigned long irqstate;
struct thread *thread = cpu_local_var(current);
/*
* The task state is guaranteed to be set before another task can
* wake it. set_current_state() is implemented using set_mb() and
* queue_me() calls spin_unlock() upon completion, both serializing
* access to the hash list and forcing another memory barrier.
* wake it.
* queue_me() calls spin_unlock() upon completion, serializing
* access to the hash list and forcing a memory barrier.
*/
xchg4(&(cpu_local_var(current)->status), PS_INTERRUPTIBLE);
/* Indicate spin sleep */
irqstate = ihk_mc_spinlock_lock(&thread->spin_sleep_lock);
thread->spin_sleep = 1;
ihk_mc_spinlock_unlock(&thread->spin_sleep_lock, irqstate);
queue_me(q, hb);
if (!plist_node_empty(&q->list)) {
/* RIKEN: use mcos timers */
if (timeout) {
dkprintf("futex_wait_queue_me(): tid: %d schedule_timeout()\n", cpu_local_var(current)->tid);
time_remain = schedule_timeout(timeout);
}
else {
dkprintf("futex_wait_queue_me(): tid: %d schedule()\n", cpu_local_var(current)->tid);
schedule();
spin_sleep_or_schedule();
time_remain = 0;
}
@@ -697,6 +708,7 @@ static uint64_t futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q
/* This does not need to be serialized */
cpu_local_var(current)->status = PS_RUNNING;
thread->spin_sleep = 0;
return time_remain;
}
@@ -743,14 +755,17 @@ static int futex_wait_setup(uint32_t __user *uaddr, uint32_t val, int fshared,
*/
q->key = FUTEX_KEY_INIT;
ret = get_futex_key(uaddr, fshared, &q->key);
if ((ret != 0))
if (ret != 0)
return ret;
*hb = queue_lock(q);
ret = get_futex_value_locked(&uval, uaddr);
/* RIKEN: get_futex_value_locked() always returns 0 on mckernel */
if (ret) {
queue_unlock(q, *hb);
put_futex_key(fshared, &q->key);
return ret;
}
if (uval != val) {
queue_unlock(q, *hb);
@@ -776,8 +791,6 @@ static int futex_wait(uint32_t __user *uaddr, int fshared,
q.bitset = bitset;
q.requeue_pi_key = NULL;
/* RIKEN: futex_wait_queue_me() calls schedule_timeout() if timer is set */
retry:
/* Prepare to wait on uaddr. */
ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);

View File

@@ -125,7 +125,7 @@ int prepare_process_ranges_args_envs(struct thread *thread,
up = virt_to_phys(up_v);
if (add_process_memory_range(vm, s, e, up, flags, NULL, 0,
PAGE_SHIFT) != 0) {
PAGE_SHIFT, NULL) != 0) {
ihk_mc_free_pages(up_v, range_npages);
kprintf("ERROR: adding memory range for ELF section %i\n", i);
goto err;
@@ -213,7 +213,7 @@ int prepare_process_ranges_args_envs(struct thread *thread,
args_envs_p = virt_to_phys(args_envs);
if(add_process_memory_range(vm, addr, e, args_envs_p,
flags, NULL, 0, PAGE_SHIFT) != 0){
flags, NULL, 0, PAGE_SHIFT, NULL) != 0){
ihk_mc_free_pages(args_envs, ARGENV_PAGE_COUNT);
kprintf("ERROR: adding memory range for args/envs\n");
goto err;
@@ -434,9 +434,6 @@ static int process_msg_prepare_process(unsigned long rphys)
vm->region.map_end = vm->region.map_start;
memcpy(proc->rlimit, pn->rlimit, sizeof(struct rlimit) * MCK_RLIM_MAX);
/* TODO: Clear it at the proper timing */
cpu_local_var(scp).post_idx = 0;
if (prepare_process_ranges_args_envs(thread, pn, p, attr,
NULL, 0, NULL, 0) != 0) {
kprintf("error: preparing process ranges, args, envs, stack\n");
@@ -461,70 +458,6 @@ err:
return -ENOMEM;
}
static void process_msg_init(struct ikc_scd_init_param *pcp, struct syscall_params *lparam)
{
lparam->response_va = ihk_mc_alloc_pages(RESPONSE_PAGE_COUNT, 0);
lparam->response_pa = virt_to_phys(lparam->response_va);
pcp->request_page = 0;
pcp->doorbell_page = 0;
pcp->response_page = lparam->response_pa;
}
static void process_msg_init_acked(struct ihk_ikc_channel_desc *c, unsigned long pphys)
{
struct ikc_scd_init_param *param = phys_to_virt(pphys);
struct syscall_params *lparam;
enum ihk_mc_pt_attribute attr;
attr = PTATTR_NO_EXECUTE | PTATTR_WRITABLE | PTATTR_FOR_USER;
lparam = &cpu_local_var(scp);
if(cpu_local_var(syscall_channel2) == c)
lparam = &cpu_local_var(scp2);
lparam->request_rpa = param->request_page;
lparam->request_pa = ihk_mc_map_memory(NULL, param->request_page,
REQUEST_PAGE_COUNT * PAGE_SIZE);
if((lparam->request_va = ihk_mc_map_virtual(lparam->request_pa,
REQUEST_PAGE_COUNT,
attr)) == NULL){
// TODO:
panic("ENOMEM");
}
lparam->doorbell_rpa = param->doorbell_page;
lparam->doorbell_pa = ihk_mc_map_memory(NULL, param->doorbell_page,
DOORBELL_PAGE_COUNT *
PAGE_SIZE);
if((lparam->doorbell_va = ihk_mc_map_virtual(lparam->doorbell_pa,
DOORBELL_PAGE_COUNT,
attr)) == NULL){
// TODO:
panic("ENOMEM");
}
lparam->post_rpa = param->post_page;
lparam->post_pa = ihk_mc_map_memory(NULL, param->post_page,
PAGE_SIZE);
if((lparam->post_va = ihk_mc_map_virtual(lparam->post_pa, 1,
attr)) == NULL){
// TODO:
panic("ENOMEM");
}
lparam->post_fin = 1;
dkprintf("Syscall parameters: (%d)\n", ihk_mc_get_processor_id());
dkprintf(" Response: %lx, %p\n",
lparam->response_pa, lparam->response_va);
dkprintf(" Request : %lx, %lx, %p\n",
lparam->request_pa, lparam->request_rpa, lparam->request_va);
dkprintf(" Doorbell: %lx, %lx, %p\n",
lparam->doorbell_pa, lparam->doorbell_rpa, lparam->doorbell_va);
dkprintf(" Post: %lx, %lx, %p\n",
lparam->post_pa, lparam->post_rpa, lparam->post_va);
}
static void syscall_channel_send(struct ihk_ikc_channel_desc *c,
struct ikc_scd_packet *packet)
{
@@ -532,7 +465,7 @@ static void syscall_channel_send(struct ihk_ikc_channel_desc *c,
}
extern unsigned long do_kill(struct thread *, int, int, int, struct siginfo *, int ptracecont);
extern void process_procfs_request(unsigned long rarg);
extern void process_procfs_request(struct ikc_scd_packet *rpacket);
extern void terminate_host(int pid);
extern void debug_log(long);
@@ -559,7 +492,6 @@ static int syscall_packet_handler(struct ihk_ikc_channel_desc *c,
switch (packet->msg) {
case SCD_MSG_INIT_CHANNEL_ACKED:
dkprintf("SCD_MSG_INIT_CHANNEL_ACKED\n");
process_msg_init_acked(c, packet->arg);
ret = 0;
break;
@@ -640,7 +572,7 @@ static int syscall_packet_handler(struct ihk_ikc_channel_desc *c,
break;
case SCD_MSG_PROCFS_REQUEST:
process_procfs_request(packet->arg);
process_procfs_request(packet);
ret = 0;
break;
@@ -699,7 +631,6 @@ void init_host_syscall_channel(void)
get_this_cpu_local_var()->syscall_channel = param.channel;
process_msg_init(&cpu_local_var(iip), &cpu_local_var(scp));
pckt.msg = SCD_MSG_INIT_CHANNEL;
pckt.ref = ihk_mc_get_processor_id();
pckt.arg = virt_to_phys(&cpu_local_var(iip));
@@ -726,7 +657,6 @@ void init_host_syscall_channel2(void)
get_this_cpu_local_var()->syscall_channel2 = param.channel;
process_msg_init(&cpu_local_var(iip2), &cpu_local_var(scp2));
pckt.msg = SCD_MSG_INIT_CHANNEL;
pckt.ref = ihk_mc_get_processor_id();
pckt.arg = virt_to_phys(&cpu_local_var(iip2));

View File

@@ -56,11 +56,9 @@ struct cpu_local_var {
size_t runq_len;
struct ihk_ikc_channel_desc *syscall_channel;
struct syscall_params scp;
struct ikc_scd_init_param iip;
struct ihk_ikc_channel_desc *syscall_channel2;
struct syscall_params scp2;
struct ikc_scd_init_param iip2;
struct resource_set *resource_set;

View File

@@ -28,9 +28,9 @@ r;\
})
#define kfree(ptr) _kfree(ptr, __FILE__, __LINE__)
#define memcheck(ptr, msg) _memcheck(ptr, msg, __FILE__, __LINE__, 0)
void *_kmalloc(int size, enum ihk_mc_ap_flag flag, char *file, int line);
void *_kmalloc(int size, ihk_mc_ap_flag flag, char *file, int line);
void _kfree(void *ptr, char *file, int line);
void *__kmalloc(int size, enum ihk_mc_ap_flag flag);
void *__kmalloc(int size, ihk_mc_ap_flag flag);
void __kfree(void *ptr);
int _memcheck(void *ptr, char *msg, char *file, int line, int free);

View File

@@ -34,6 +34,9 @@ enum {
MF_IS_REMOVABLE = 0x0004,
MF_PREFETCH = 0x0008,
MF_ZEROFILL = 0x0010,
MF_REG_FILE = 0x1000,
MF_DEV_FILE = 0x2000,
MF_HOST_RELEASED = 0x80000000,
MF_END
};

View File

@@ -232,8 +232,6 @@ enum mpol_rebind_step {
#include <waitq.h>
#include <futex.h>
//#define TRACK_SYSCALLS
struct resource_set;
struct process_hash;
struct thread_hash;
@@ -244,6 +242,28 @@ struct process_vm;
struct vm_regions;
struct vm_range;
//#define TRACK_SYSCALLS
#ifdef TRACK_SYSCALLS
#define TRACK_SYSCALLS_MAX 300
#define __NR_track_syscalls 701
#define TRACK_SYSCALLS_CLEAR 0x01
#define TRACK_SYSCALLS_ON 0x02
#define TRACK_SYSCALLS_OFF 0x04
#define TRACK_SYSCALLS_PRINT 0x08
#define TRACK_SYSCALLS_PRINT_PROC 0x10
void track_syscalls_print_thread_stats(struct thread *thread);
void track_syscalls_print_proc_stats(struct process *proc);
void track_syscalls_accumulate_counters(struct thread *thread,
struct process *proc);
void track_syscalls_alloc_counters(struct thread *thread);
void track_syscalls_dealloc_thread_counters(struct thread *thread);
void track_syscalls_dealloc_proc_counters(struct process *proc);
#endif // TRACK_SYSCALLS
#define HASH_SIZE 73
struct resource_set {
@@ -539,6 +559,13 @@ struct process {
#define PP_COUNT 2
#define PP_STOP 3
struct mc_perf_event *monitoring_event;
#ifdef TRACK_SYSCALLS
mcs_lock_node_t st_lock;
uint64_t *syscall_times;
uint32_t *syscall_cnts;
uint64_t *offload_times;
uint32_t *offload_cnts;
#endif // TRACK_SYSCALLS
};
void hold_thread(struct thread *ftn);
@@ -612,7 +639,7 @@ struct thread {
int in_syscall_offload;
#ifdef TRACK_SYSCALLS
int socc_enabled;
int track_syscalls;
uint64_t *syscall_times;
uint32_t *syscall_cnts;
uint64_t *offload_times;
@@ -716,9 +743,10 @@ void free_process_memory_ranges(struct process_vm *vm);
int populate_process_memory(struct process_vm *vm, void *start, size_t len);
int add_process_memory_range(struct process_vm *vm,
unsigned long start, unsigned long end,
unsigned long phys, unsigned long flag,
struct memobj *memobj, off_t objoff, int pgshift);
unsigned long start, unsigned long end,
unsigned long phys, unsigned long flag,
struct memobj *memobj, off_t offset,
int pgshift, struct vm_range **rp);
int remove_process_memory_range(struct process_vm *vm, unsigned long start,
unsigned long end, int *ro_freedp);
int split_process_memory_range(struct process_vm *vm,
@@ -758,9 +786,11 @@ extern enum ihk_mc_pt_attribute arch_vrflag_to_ptattr(unsigned long flag, uint64
enum ihk_mc_pt_attribute common_vrflag_to_ptattr(unsigned long flag, uint64_t fault, pte_t *ptep);
void schedule(void);
void spin_sleep_or_schedule(void);
void runq_add_thread(struct thread *thread, int cpu_id);
void runq_del_thread(struct thread *thread, int cpu_id);
int sched_wakeup_thread(struct thread *thread, int valid_states);
int sched_wakeup_thread_locked(struct thread *thread, int valid_states);
void sched_request_migrate(int cpu_id, struct thread *thread);
void check_need_resched(void);

View File

@@ -255,22 +255,6 @@ struct syscall_post {
unsigned long v[8];
};
struct syscall_params {
unsigned long request_rpa, request_pa;
struct syscall_request *request_va;
unsigned long response_pa;
struct syscall_response *response_va;
unsigned long doorbell_rpa, doorbell_pa;
unsigned long *doorbell_va;
unsigned int post_idx;
unsigned long post_rpa, post_pa;
struct syscall_post *post_va;
unsigned long post_fin;
struct syscall_post post_buf IHK_DMA_ALIGN;
};
#define SYSCALL_DECLARE(name) long sys_##name(int n, ihk_mc_user_context_t *ctx)
#define SYSCALL_HEADER struct syscall_request request IHK_DMA_ALIGN; \
request.number = n

View File

@@ -27,6 +27,8 @@ typedef int (*waitq_func_t)(struct waitq_entry *wait, unsigned mode,
int default_wake_function(struct waitq_entry *wait, unsigned mode, int flags,
void *key);
int locked_wake_function(struct waitq_entry *wait, unsigned mode, int flags,
void *key);
typedef struct waitq {
ihk_spinlock_t lock;
@@ -57,6 +59,13 @@ typedef struct waitq_entry {
.link = { &(name).link, &(name).link } \
}
#define DECLARE_WAITQ_ENTRY_LOCKED(name, tsk) \
waitq_entry_t name = { \
.private = tsk, \
.func = locked_wake_function, \
.link = { &(name).link, &(name).link } \
}
extern void waitq_init(waitq_t *waitq);
extern void waitq_init_entry(waitq_entry_t *entry, struct thread *proc);
extern int waitq_active(waitq_t *waitq);

View File

@@ -336,11 +336,8 @@ static void post_init(void)
}
if (find_command_line("hidos")) {
extern ihk_spinlock_t syscall_lock;
init_host_syscall_channel();
init_host_syscall_channel2();
ihk_mc_spinlock_init(&syscall_lock);
}
arch_setup_vdso();

View File

@@ -49,7 +49,7 @@
#endif
static unsigned long pa_start, pa_end;
static struct ihk_mc_numa_node *memory_nodes = NULL;
static struct ihk_mc_numa_node memory_nodes[512];
extern void unhandled_page_fault(struct thread *, void *, void *);
extern int interrupt_from_user(void *);
@@ -65,12 +65,12 @@ extern void early_alloc_invalidate(void);
static char *memdebug = NULL;
static void *___kmalloc(int size, enum ihk_mc_ap_flag flag);
static void *___kmalloc(int size, ihk_mc_ap_flag flag);
static void ___kfree(void *ptr);
static void *___ihk_mc_alloc_aligned_pages(int npages,
int p2align, enum ihk_mc_ap_flag flag);
static void *___ihk_mc_alloc_pages(int npages, enum ihk_mc_ap_flag flag);
static void *___ihk_mc_alloc_aligned_pages_node(int npages,
int p2align, ihk_mc_ap_flag flag, int node);
static void *___ihk_mc_alloc_pages(int npages, ihk_mc_ap_flag flag);
static void ___ihk_mc_free_pages(void *p, int npages);
/*
@@ -151,14 +151,15 @@ struct pagealloc_track_entry *__pagealloc_track_find_entry(
}
/* Top level routines called from macros */
void *_ihk_mc_alloc_aligned_pages(int npages, int p2align,
enum ihk_mc_ap_flag flag, char *file, int line)
void *_ihk_mc_alloc_aligned_pages_node(int npages, int p2align,
ihk_mc_ap_flag flag, int node, char *file, int line)
{
unsigned long irqflags;
struct pagealloc_track_entry *entry;
struct pagealloc_track_addr_entry *addr_entry;
int hash, addr_hash;
void *r = ___ihk_mc_alloc_aligned_pages(npages, p2align, flag);
void *r = ___ihk_mc_alloc_aligned_pages_node(npages,
p2align, flag, node);
if (!memdebug || !pagealloc_track_initialized)
return r;
@@ -230,12 +231,6 @@ out:
return r;
}
void *_ihk_mc_alloc_pages(int npages, enum ihk_mc_ap_flag flag,
char *file, int line)
{
return _ihk_mc_alloc_aligned_pages(npages, PAGE_P2ALIGN, flag, file, line);
}
void _ihk_mc_free_pages(void *ptr, int npages, char *file, int line)
{
unsigned long irqflags;
@@ -449,18 +444,18 @@ void pagealloc_memcheck(void)
/* Actual allocation routines */
static void *___ihk_mc_alloc_aligned_pages(int npages, int p2align,
enum ihk_mc_ap_flag flag)
static void *___ihk_mc_alloc_aligned_pages_node(int npages, int p2align,
ihk_mc_ap_flag flag, int node)
{
if (pa_ops)
return pa_ops->alloc_page(npages, p2align, flag);
return pa_ops->alloc_page(npages, p2align, flag, node);
else
return early_alloc_pages(npages);
}
static void *___ihk_mc_alloc_pages(int npages, enum ihk_mc_ap_flag flag)
static void *___ihk_mc_alloc_pages(int npages, ihk_mc_ap_flag flag)
{
return ___ihk_mc_alloc_aligned_pages(npages, PAGE_P2ALIGN, flag);
return ___ihk_mc_alloc_aligned_pages_node(npages, PAGE_P2ALIGN, flag, -1);
}
static void ___ihk_mc_free_pages(void *p, int npages)
@@ -495,8 +490,8 @@ static void reserve_pages(struct ihk_page_allocator_desc *pa_allocator,
}
extern int cpu_local_var_initialized;
static void *allocate_aligned_pages(int npages, int p2align,
enum ihk_mc_ap_flag flag)
static void *mckernel_allocate_aligned_pages_node(int npages, int p2align,
ihk_mc_ap_flag flag, int pref_node)
{
unsigned long pa = 0;
int i, node;
@@ -509,15 +504,31 @@ static void *allocate_aligned_pages(int npages, int p2align,
goto distance_based;
/* User requested policy? */
if (!(flag & IHK_MC_AP_USER)) {
goto distance_based;
}
node = ihk_mc_get_numa_id();
if (!memory_nodes[node].nodes_by_distance)
goto order_based;
switch (cpu_local_var(current)->vm->numa_mem_policy) {
case MPOL_BIND:
case MPOL_PREFERRED:
for_each_set_bit(node,
cpu_local_var(current)->proc->vm->numa_mask,
ihk_mc_get_nr_numa_nodes()) {
/* Look at nodes in the order of distance but consider
* only the ones requested in user policy */
for (i = 0; i < ihk_mc_get_nr_numa_nodes(); ++i) {
/* Not part of user requested policy? */
if (!test_bit(memory_nodes[node].nodes_by_distance[i].id,
cpu_local_var(current)->proc->vm->numa_mask)) {
continue;
}
list_for_each_entry(pa_allocator,
&memory_nodes[node].allocators, list) {
&memory_nodes[memory_nodes[node].
nodes_by_distance[i].id].allocators, list) {
pa = ihk_pagealloc_alloc(pa_allocator, npages, p2align);
if (pa) {
@@ -542,8 +553,13 @@ static void *allocate_aligned_pages(int npages, int p2align,
break;
}
if (pa)
if (pa) {
return phys_to_virt(pa);
}
else {
dkprintf("%s: couldn't fulfill user policy for %d pages\n",
__FUNCTION__, npages);
}
distance_based:
node = ihk_mc_get_numa_id();
@@ -602,12 +618,7 @@ order_based:
return NULL;
}
static void *allocate_pages(int npages, enum ihk_mc_ap_flag flag)
{
return allocate_aligned_pages(npages, PAGE_P2ALIGN, flag);
}
static void __free_pages_in_allocator(void *va, int npages)
static void __mckernel_free_pages_in_allocator(void *va, int npages)
{
int i;
unsigned long pa_start = virt_to_phys(va);
@@ -630,7 +641,7 @@ static void __free_pages_in_allocator(void *va, int npages)
}
static void free_pages(void *va, int npages)
static void mckernel_free_pages(void *va, int npages)
{
struct list_head *pendings = &cpu_local_var(pending_free_pages);
struct page *page;
@@ -638,7 +649,8 @@ static void free_pages(void *va, int npages)
page = phys_to_page(virt_to_phys(va));
if (page) {
if (page->mode != PM_NONE) {
panic("free_pages:not PM_NONE");
kprintf("%s: WARNING: page phys 0x%lx is not PM_NONE",
__FUNCTION__, page->phys);
}
if (pendings->next != NULL) {
page->mode = PM_PENDING_FREE;
@@ -648,7 +660,7 @@ static void free_pages(void *va, int npages)
}
}
__free_pages_in_allocator(va, npages);
__mckernel_free_pages_in_allocator(va, npages);
}
void begin_free_pages_pending(void) {
@@ -677,7 +689,7 @@ void finish_free_pages_pending(void)
}
page->mode = PM_NONE;
list_del(&page->list);
__free_pages_in_allocator(phys_to_virt(page_to_phys(page)),
__mckernel_free_pages_in_allocator(phys_to_virt(page_to_phys(page)),
page->offset);
}
@@ -686,8 +698,8 @@ void finish_free_pages_pending(void)
}
static struct ihk_mc_pa_ops allocator = {
.alloc_page = allocate_aligned_pages,
.free_page = free_pages,
.alloc_page = mckernel_allocate_aligned_pages_node,
.free_page = mckernel_free_pages,
};
void sbox_write(int offset, unsigned int value);
@@ -948,12 +960,14 @@ out:
}
static struct ihk_page_allocator_desc *page_allocator_init(uint64_t start,
uint64_t end, int initial)
uint64_t end)
{
struct ihk_page_allocator_desc *pa_allocator;
unsigned long page_map_pa, pages;
void *page_map;
unsigned int i;
extern char _end[];
unsigned long phys_end = virt_to_phys(_end);
start &= PAGE_MASK;
pa_start = (start + PAGE_SIZE - 1) & PAGE_MASK;
@@ -966,7 +980,12 @@ static struct ihk_page_allocator_desc *page_allocator_init(uint64_t start,
*/
page_map_pa = 0x100000;
#else
page_map_pa = initial ? virt_to_phys(get_last_early_heap()) : pa_start;
if (pa_start <= phys_end && phys_end <= pa_end) {
page_map_pa = virt_to_phys(get_last_early_heap());
}
else {
page_map_pa = pa_start;
}
#endif
page_map = phys_to_virt(page_map_pa);
@@ -997,14 +1016,16 @@ static struct ihk_page_allocator_desc *page_allocator_init(uint64_t start,
static void numa_init(void)
{
int i, j;
memory_nodes = early_alloc_pages((sizeof(*memory_nodes) *
ihk_mc_get_nr_numa_nodes() + PAGE_SIZE - 1)
>> PAGE_SHIFT);
for (i = 0; i < ihk_mc_get_nr_numa_nodes(); ++i) {
int linux_numa_id, type;
ihk_mc_get_numa_node(i, &linux_numa_id, &type);
if (ihk_mc_get_numa_node(i, &linux_numa_id, &type) != 0) {
kprintf("%s: error: obtaining NUMA info for node %d\n",
__FUNCTION__, i);
panic("");
}
memory_nodes[i].id = i;
memory_nodes[i].linux_numa_id = linux_numa_id;
memory_nodes[i].type = type;
@@ -1022,7 +1043,7 @@ static void numa_init(void)
ihk_mc_get_memory_chunk(j, &start, &end, &numa_id);
allocator = page_allocator_init(start, end, (j == 0));
allocator = page_allocator_init(start, end);
list_add_tail(&allocator->list, &memory_nodes[numa_id].allocators);
kprintf("Physical memory: 0x%lx - 0x%lx, %lu bytes, %d pages available @ NUMA: %d\n",
@@ -1472,7 +1493,7 @@ struct kmalloc_track_entry *__kmalloc_track_find_entry(
}
/* Top level routines called from macro */
void *_kmalloc(int size, enum ihk_mc_ap_flag flag, char *file, int line)
void *_kmalloc(int size, ihk_mc_ap_flag flag, char *file, int line)
{
unsigned long irqflags;
struct kmalloc_track_entry *entry;
@@ -1662,7 +1683,7 @@ void kmalloc_memcheck(void)
}
/* Redirection routines registered in alloc structure */
void *__kmalloc(int size, enum ihk_mc_ap_flag flag)
void *__kmalloc(int size, ihk_mc_ap_flag flag)
{
return kmalloc(size, flag);
}
@@ -1760,7 +1781,7 @@ void kmalloc_consolidate_free_list(void)
#define KMALLOC_MIN_MASK (KMALLOC_MIN_SIZE - 1)
/* Actual low-level allocation routines */
static void *___kmalloc(int size, enum ihk_mc_ap_flag flag)
static void *___kmalloc(int size, ihk_mc_ap_flag flag)
{
struct kmalloc_header *chunk_iter;
struct kmalloc_header *chunk = NULL;

View File

@@ -101,6 +101,13 @@ init_process(struct process *proc, struct process *parent)
waitq_init(&proc->waitpid_q);
ihk_atomic_set(&proc->refcount, 2);
proc->monitoring_event = NULL;
#ifdef TRACK_SYSCALLS
mcs_lock_init(&proc->st_lock);
proc->syscall_times = NULL;
proc->syscall_cnts = NULL;
proc->offload_times = NULL;
proc->offload_cnts = NULL;
#endif
}
void
@@ -468,6 +475,9 @@ clone_thread(struct thread *org, unsigned long pc, unsigned long sp,
ihk_mc_spinlock_init(&thread->spin_sleep_lock);
thread->spin_sleep = 0;
#ifdef TRACK_SYSCALLS
thread->track_syscalls = org->track_syscalls;
#endif
return thread;
@@ -1018,21 +1028,18 @@ enum ihk_mc_pt_attribute common_vrflag_to_ptattr(unsigned long flag, uint64_t fa
}
int add_process_memory_range(struct process_vm *vm,
unsigned long start, unsigned long end,
unsigned long phys, unsigned long flag,
struct memobj *memobj, off_t offset,
int pgshift)
unsigned long start, unsigned long end,
unsigned long phys, unsigned long flag,
struct memobj *memobj, off_t offset,
int pgshift, struct vm_range **rp)
{
struct vm_range *range;
int rc;
#if 0
extern void __host_update_process_range(struct thread *process,
struct vm_range *range);
#endif
if ((start < vm->region.user_start)
|| (vm->region.user_end < end)) {
kprintf("range(%#lx - %#lx) is not in user avail(%#lx - %#lx)\n",
kprintf("%s: error: range %lx - %lx is not in user available area\n",
__FUNCTION__,
start, end, vm->region.user_start,
vm->region.user_end);
return -EINVAL;
@@ -1040,9 +1047,10 @@ int add_process_memory_range(struct process_vm *vm,
range = kmalloc(sizeof(struct vm_range), IHK_MC_AP_NOWAIT);
if (!range) {
kprintf("ERROR: allocating pages for range\n");
kprintf("%s: ERROR: allocating pages for range\n", __FUNCTION__);
return -ENOMEM;
}
INIT_LIST_HEAD(&range->list);
range->start = start;
range->end = end;
@@ -1051,48 +1059,34 @@ int add_process_memory_range(struct process_vm *vm,
range->objoff = offset;
range->pgshift = pgshift;
if(range->flag & VR_DEMAND_PAGING) {
dkprintf("range: 0x%lX - 0x%lX => physicall memory area is allocated on demand (%ld) [%lx]\n",
range->start, range->end, range->end - range->start,
range->flag);
} else {
dkprintf("range: 0x%lX - 0x%lX (%ld) [%lx]\n",
range->start, range->end, range->end - range->start,
range->flag);
}
rc = 0;
if (0) {
/* dummy */
}
else if (phys == NOPHYS) {
/* nothing to map */
}
else if (flag & VR_REMOTE) {
rc = 0;
if (phys == NOPHYS) {
/* Nothing to map */
}
else if (flag & VR_REMOTE) {
rc = update_process_page_table(vm, range, phys, IHK_PTA_REMOTE);
} else if (flag & VR_IO_NOCACHE) {
}
else if (flag & VR_IO_NOCACHE) {
rc = update_process_page_table(vm, range, phys, PTATTR_UNCACHABLE);
} else if(flag & VR_DEMAND_PAGING){
//demand paging no need to update process table now
dkprintf("demand paging do not update process page table\n");
rc = 0;
} else if ((range->flag & VR_PROT_MASK) == VR_PROT_NONE) {
}
else if (flag & VR_DEMAND_PAGING) {
dkprintf("%s: range: 0x%lx - 0x%lx is demand paging\n",
__FUNCTION__, range->start, range->end);
rc = 0;
} else {
}
else if ((range->flag & VR_PROT_MASK) == VR_PROT_NONE) {
rc = 0;
}
else {
rc = update_process_page_table(vm, range, phys, 0);
}
if(rc != 0){
kprintf("ERROR: preparing page tables\n");
if (rc != 0) {
kprintf("%s: ERROR: preparing page tables\n", __FUNCTION__);
kfree(range);
return rc;
}
#if 0 // disable __host_update_process_range() in add_process_memory_range(), because it has no effect on the actual mapping on the MICs side.
if (!(flag & VR_REMOTE)) {
__host_update_process_range(process, range);
}
#endif
insert_vm_range_list(vm, range);
/* Clear content! */
@@ -1101,6 +1095,11 @@ int add_process_memory_range(struct process_vm *vm,
memset((void*)phys_to_virt(phys), 0, end - start);
}
/* Return range object if requested */
if (rp) {
*rp = range;
}
return 0;
}
@@ -1852,6 +1851,7 @@ int init_process_stack(struct thread *thread, struct program_load_desc *pn,
unsigned long minsz;
unsigned long at_rand;
struct process *proc = thread->proc;
unsigned long __flag;
/* create stack range */
end = STACK_TOP(&thread->vm->region);
@@ -1870,12 +1870,15 @@ int init_process_stack(struct thread *thread, struct program_load_desc *pn,
vrflag |= VR_MAXPROT_READ | VR_MAXPROT_WRITE | VR_MAXPROT_EXEC;
#define NOPHYS ((uintptr_t)-1)
if ((rc = add_process_memory_range(thread->vm, start, end, NOPHYS,
vrflag, NULL, 0, PAGE_SHIFT)) != 0) {
vrflag, NULL, 0, PAGE_SHIFT, NULL)) != 0) {
return rc;
}
__flag = (size >= 16777216) ? IHK_MC_AP_USER : 0;
/* map physical pages for initial stack frame */
stack = ihk_mc_alloc_pages(minsz >> PAGE_SHIFT, IHK_MC_AP_NOWAIT);
stack = ihk_mc_alloc_pages(minsz >> PAGE_SHIFT,
IHK_MC_AP_NOWAIT | __flag);
if (!stack) {
return -ENOMEM;
}
@@ -1989,7 +1992,7 @@ unsigned long extend_process_region(struct process_vm *vm,
}
if((rc = add_process_memory_range(vm, old_aligned_end,
aligned_end, virt_to_phys(p), flag,
LARGE_PAGE_SHIFT)) != 0){
LARGE_PAGE_SHIFT, NULL)) != 0){
ihk_mc_free_pages(p, (aligned_end - old_aligned_end) >> PAGE_SHIFT);
return end;
}
@@ -2019,7 +2022,7 @@ unsigned long extend_process_region(struct process_vm *vm,
if((rc = add_process_memory_range(vm, aligned_end,
aligned_new_end, virt_to_phys((void *)p_aligned),
flag, LARGE_PAGE_SHIFT)) != 0){
flag, LARGE_PAGE_SHIFT, NULL)) != 0){
ihk_mc_free_pages(p, (aligned_new_end - aligned_end + LARGE_PAGE_SIZE) >> PAGE_SHIFT);
return end;
}
@@ -2038,15 +2041,16 @@ unsigned long extend_process_region(struct process_vm *vm,
p=0;
}else{
p = ihk_mc_alloc_pages((aligned_new_end - aligned_end) >> PAGE_SHIFT, IHK_MC_AP_NOWAIT);
p = ihk_mc_alloc_pages((aligned_new_end - aligned_end) >> PAGE_SHIFT,
IHK_MC_AP_NOWAIT | IHK_MC_AP_USER);
if (!p) {
return end;
}
}
if((rc = add_process_memory_range(vm, aligned_end, aligned_new_end,
(p==0?0:virt_to_phys(p)), flag, NULL, 0,
PAGE_SHIFT)) != 0){
if ((rc = add_process_memory_range(vm, aligned_end, aligned_new_end,
(p == 0 ? 0 : virt_to_phys(p)), flag, NULL, 0,
PAGE_SHIFT, NULL)) != 0) {
ihk_mc_free_pages(p, (aligned_new_end - aligned_end) >> PAGE_SHIFT);
return end;
}
@@ -2161,6 +2165,10 @@ release_process(struct process *proc)
}
if (proc->tids) kfree(proc->tids);
#ifdef TRACK_SYSCALLS
track_syscalls_print_proc_stats(proc);
track_syscalls_dealloc_proc_counters(proc);
#endif // TRACK_SYSCALLS
kfree(proc);
}
@@ -2178,6 +2186,9 @@ free_all_process_memory_range(struct process_vm *vm)
ihk_mc_spinlock_lock_noirq(&vm->memory_range_lock);
list_for_each_entry_safe(range, next, &vm->vm_range_list, list) {
if (range->memobj) {
range->memobj->flags |= MF_HOST_RELEASED;
}
error = free_process_memory_range(vm, range);
if (error) {
ekprintf("free_process_memory(%p):"
@@ -2344,6 +2355,11 @@ void release_thread(struct thread *thread)
vm = thread->vm;
#ifdef TRACK_SYSCALLS
track_syscalls_accumulate_counters(thread, thread->proc);
//track_syscalls_print_thread_stats(thread);
track_syscalls_dealloc_thread_counters(thread);
#endif // TRACK_SYSCALLS
procfs_delete_thread(thread);
destroy_thread(thread);
@@ -2352,7 +2368,7 @@ void release_thread(struct thread *thread)
void cpu_set(int cpu, cpu_set_t *cpu_set, ihk_spinlock_t *lock)
{
unsigned int flags;
unsigned long flags;
flags = ihk_mc_spinlock_lock(lock);
CPU_SET(cpu, cpu_set);
ihk_mc_spinlock_unlock(lock, flags);
@@ -2360,7 +2376,7 @@ void cpu_set(int cpu, cpu_set_t *cpu_set, ihk_spinlock_t *lock)
void cpu_clear(int cpu, cpu_set_t *cpu_set, ihk_spinlock_t *lock)
{
unsigned int flags;
unsigned long flags;
flags = ihk_mc_spinlock_lock(lock);
CPU_CLR(cpu, cpu_set);
ihk_mc_spinlock_unlock(lock, flags);
@@ -2369,7 +2385,7 @@ void cpu_clear(int cpu, cpu_set_t *cpu_set, ihk_spinlock_t *lock)
void cpu_clear_and_set(int c_cpu, int s_cpu,
cpu_set_t *cpu_set, ihk_spinlock_t *lock)
{
unsigned int flags;
unsigned long flags;
flags = ihk_mc_spinlock_lock(lock);
CPU_CLR(c_cpu, cpu_set);
CPU_SET(s_cpu, cpu_set);
@@ -2394,7 +2410,9 @@ static void idle(void)
cpu_enable_interrupt();
while (1) {
cpu_local_var(current)->status = PS_STOPPED;
schedule();
cpu_local_var(current)->status = PS_RUNNING;
cpu_disable_interrupt();
/* See if we need to migrate a process somewhere */
@@ -2440,7 +2458,9 @@ static void idle(void)
v->status == CPU_STATUS_RESERVED) {
/* No work to do? Consolidate the kmalloc free list */
kmalloc_consolidate_free_list();
cpu_local_var(current)->status = PS_INTERRUPTIBLE;
cpu_safe_halt();
cpu_local_var(current)->status = PS_RUNNING;
}
else {
cpu_enable_interrupt();
@@ -2637,13 +2657,13 @@ static void do_migrate(void)
&req->thread->vm->address_space->cpu_set,
&req->thread->vm->address_space->cpu_set_lock);
dkprintf("do_migrate(): migrated TID %d from CPU %d to CPU %d\n",
req->thread->tid, old_cpu_id, cpu_id);
dkprintf("%s: migrated TID %d from CPU %d to CPU %d\n",
__FUNCTION__, req->thread->tid, old_cpu_id, cpu_id);
v->flags |= CPU_FLAG_NEED_RESCHED;
ihk_mc_interrupt_cpu(get_x86_cpu_local_variable(cpu_id)->apic_id, 0xd1);
waitq_wakeup(&req->wq);
double_rq_unlock(cur_v, v, irqstate);
continue;
ack:
waitq_wakeup(&req->wq);
}
@@ -2670,6 +2690,65 @@ set_timer()
}
}
/*
* NOTE: it is assumed that a wait-queue (or futex queue) is
* set before calling this function.
* NOTE: one must set thread->spin_sleep to 1 before evaluating
* the wait condition to avoid lost wake-ups.
*/
void spin_sleep_or_schedule(void)
{
struct thread *thread = cpu_local_var(current);
struct cpu_local_var *v;
int do_schedule = 0;
int woken = 0;
long irqstate;
/* Try to spin sleep */
irqstate = ihk_mc_spinlock_lock(&thread->spin_sleep_lock);
if (thread->spin_sleep == 0) {
dkprintf("%s: caught a lost wake-up!\n", __FUNCTION__);
}
ihk_mc_spinlock_unlock(&thread->spin_sleep_lock, irqstate);
for (;;) {
/* Check if we need to reschedule */
irqstate =
ihk_mc_spinlock_lock(&(get_this_cpu_local_var()->runq_lock));
v = get_this_cpu_local_var();
if (v->flags & CPU_FLAG_NEED_RESCHED || v->runq_len > 1) {
do_schedule = 1;
}
ihk_mc_spinlock_unlock(&v->runq_lock, irqstate);
/* Check if we were woken up */
irqstate = ihk_mc_spinlock_lock(&thread->spin_sleep_lock);
if (thread->spin_sleep == 0) {
woken = 1;
}
/* Indicate that we are not spinning any more */
if (do_schedule) {
thread->spin_sleep = 0;
}
ihk_mc_spinlock_unlock(&thread->spin_sleep_lock, irqstate);
if (woken) {
return;
}
if (do_schedule) {
break;
}
cpu_pause();
}
schedule();
}
void schedule(void)
{
struct cpu_local_var *v;
@@ -2832,39 +2911,38 @@ void check_need_resched(void)
}
}
int
sched_wakeup_thread(struct thread *thread, int valid_states)
int __sched_wakeup_thread(struct thread *thread,
int valid_states, int runq_locked)
{
int status;
int spin_slept = 0;
unsigned long irqstate;
struct cpu_local_var *v = get_cpu_local_var(thread->cpu_id);
struct process *proc = thread->proc;
struct mcs_rwlock_node updatelock;
dkprintf("sched_wakeup_process,proc->pid=%d,valid_states=%08x,proc->status=%08x,proc->cpu_id=%d,my cpu_id=%d\n",
proc->pid, valid_states, thread->status, thread->cpu_id, ihk_mc_get_processor_id());
dkprintf("%s: proc->pid=%d, valid_states=%08x, "
"proc->status=%08x, proc->cpu_id=%d,my cpu_id=%d\n",
__FUNCTION__,
proc->pid, valid_states, thread->status,
thread->cpu_id, ihk_mc_get_processor_id());
irqstate = ihk_mc_spinlock_lock(&(thread->spin_sleep_lock));
if (thread->spin_sleep > 0) {
dkprintf("sched_wakeup_process() spin wakeup: cpu_id: %d\n",
thread->cpu_id);
if (thread->spin_sleep == 1) {
dkprintf("%s: spin wakeup: cpu_id: %d\n",
__FUNCTION__, thread->cpu_id);
spin_slept = 1;
status = 0;
}
--thread->spin_sleep;
thread->spin_sleep = 0;
ihk_mc_spinlock_unlock(&(thread->spin_sleep_lock), irqstate);
if (spin_slept) {
return status;
if (!runq_locked) {
irqstate = ihk_mc_spinlock_lock(&(v->runq_lock));
}
irqstate = ihk_mc_spinlock_lock(&(v->runq_lock));
if (thread->status & valid_states) {
mcs_rwlock_writer_lock_noirq(&proc->update_lock, &updatelock);
if(proc->status != PS_EXITED)
if (proc->status != PS_EXITED)
proc->status = PS_RUNNING;
mcs_rwlock_writer_unlock_noirq(&proc->update_lock, &updatelock);
xchg4((int *)(&thread->status), PS_RUNNING);
@@ -2874,18 +2952,32 @@ sched_wakeup_thread(struct thread *thread, int valid_states)
status = -EINVAL;
}
ihk_mc_spinlock_unlock(&(v->runq_lock), irqstate);
if (!runq_locked) {
ihk_mc_spinlock_unlock(&(v->runq_lock), irqstate);
}
if (!status && (thread->cpu_id != ihk_mc_get_processor_id())) {
dkprintf("sched_wakeup_process,issuing IPI,thread->cpu_id=%d\n",
thread->cpu_id);
ihk_mc_interrupt_cpu(get_x86_cpu_local_variable(thread->cpu_id)->apic_id,
0xd1);
dkprintf("%s: issuing IPI, thread->cpu_id=%d\n",
__FUNCTION__, thread->cpu_id);
ihk_mc_interrupt_cpu(
get_x86_cpu_local_variable(thread->cpu_id)->apic_id,
0xd1);
}
return status;
}
int sched_wakeup_thread_locked(struct thread *thread, int valid_states)
{
return __sched_wakeup_thread(thread, valid_states, 1);
}
int sched_wakeup_thread(struct thread *thread, int valid_states)
{
return __sched_wakeup_thread(thread, valid_states, 0);
}
/*
* 1. Add current process to waitq
* 2. Queue migration request into the target CPU's queue
@@ -2909,7 +3001,7 @@ void sched_request_migrate(int cpu_id, struct thread *thread)
struct cpu_local_var *v = get_cpu_local_var(cpu_id);
struct migrate_request req = { .thread = thread };
unsigned long irqstate;
DECLARE_WAITQ_ENTRY(entry, cpu_local_var(current));
DECLARE_WAITQ_ENTRY_LOCKED(entry, cpu_local_var(current));
waitq_init(&req.wq);
waitq_prepare_to_wait(&req.wq, &entry, PS_UNINTERRUPTIBLE);
@@ -2926,6 +3018,8 @@ void sched_request_migrate(int cpu_id, struct thread *thread)
if (cpu_id != ihk_mc_get_processor_id())
ihk_mc_interrupt_cpu(/* Kick scheduler */
get_x86_cpu_local_variable(cpu_id)->apic_id, 0xd1);
dkprintf("%s: tid: %d -> cpu: %d\n",
__FUNCTION__, thread->tid, cpu_id);
schedule();
waitq_finish_wait(&req.wq, &entry);

View File

@@ -76,11 +76,11 @@ procfs_delete_thread(struct thread *thread)
*
* \param rarg returned argument
*/
void
process_procfs_request(unsigned long rarg)
void process_procfs_request(struct ikc_scd_packet *rpacket)
{
unsigned long rarg = rpacket->arg;
unsigned long parg, pbuf;
struct thread *thread = NULL;
struct thread *thread = NULL;
struct process *proc = NULL;
struct process_vm *vm = NULL;
struct procfs_read *r;
@@ -161,7 +161,7 @@ process_procfs_request(unsigned long rarg)
*/
ret = sscanf(p, "%d/", &pid);
if (ret == 1) {
struct mcs_rwlock_node tlock;
struct mcs_rwlock_node_irqsave tlock;
int tids;
struct thread *thread1 = NULL;
@@ -178,7 +178,7 @@ process_procfs_request(unsigned long rarg)
else
tid = pid;
mcs_rwlock_reader_lock_noirq(&proc->threads_lock, &tlock);
mcs_rwlock_reader_lock(&proc->threads_lock, &tlock);
list_for_each_entry(thread, &proc->threads_list, siblings_list){
if(thread->tid == tid)
break;
@@ -188,15 +188,15 @@ process_procfs_request(unsigned long rarg)
if(thread == NULL){
kprintf("process_procfs_request: no such tid %d-%d\n", pid, tid);
if(tids){
mcs_rwlock_reader_unlock(&proc->threads_lock, &tlock);
process_unlock(proc, &lock);
mcs_rwlock_reader_unlock_noirq(&proc->threads_lock, &tlock);
goto end;
}
thread = thread1;
}
if(thread)
hold_thread(thread);
mcs_rwlock_reader_unlock_noirq(&proc->threads_lock, &tlock);
mcs_rwlock_reader_unlock(&proc->threads_lock, &tlock);
hold_process(proc);
vm = proc->vm;
if(vm)
@@ -633,6 +633,7 @@ dataunavail:
packet.msg = SCD_MSG_PROCFS_ANSWER;
packet.arg = rarg;
packet.pid = rpacket->pid;
ret = ihk_ikc_send(syscall_channel, &packet, 0);
if (ret < 0) {

View File

@@ -94,8 +94,8 @@ static long (*syscall_table[])(int, ihk_mc_user_context_t *) = {
#define MCKERNEL_UNUSED __attribute__ ((unused))
static char *syscall_name[] MCKERNEL_UNUSED = {
#define DECLARATOR(number,name) [number] = #name,
#define SYSCALL_HANDLED(number,name) DECLARATOR(number,sys_##name)
#define SYSCALL_DELEGATED(number,name) DECLARATOR(number,sys_##name)
#define SYSCALL_HANDLED(number,name) DECLARATOR(number,#name)
#define SYSCALL_DELEGATED(number,name) DECLARATOR(number,#name)
#include <syscall_list.h>
#undef DECLARATOR
#undef SYSCALL_HANDLED
@@ -132,24 +132,20 @@ static void do_mod_exit(int status);
#ifdef TRACK_SYSCALLS
#define SOCC_CLEAR 1
#define SOCC_ON 2
#define SOCC_OFF 4
#define SOCC_PRINT 8
void print_syscall_stats(struct thread *thread)
void track_syscalls_print_thread_stats(struct thread *thread)
{
int i;
unsigned long flags;
flags = kprintf_lock();
for (i = 0; i < 300; ++i) {
for (i = 0; i < TRACK_SYSCALLS_MAX; ++i) {
if (!thread->syscall_cnts[i] &&
!thread->offload_cnts[i]) continue;
//__kprintf("(%20s): sys.cnt: %3lu (%15lukC)\n",
__kprintf("(%3d,%20s): sys.cnt: %5lu (%10lukC), offl.cnt: %5lu (%10lukC)\n",
__kprintf("TID: %4d (%3d,%20s): sys: %6u %6lukC offl: %6u %6lukC\n",
thread->tid,
i,
syscall_name[i],
thread->syscall_cnts[i],
@@ -166,38 +162,150 @@ void print_syscall_stats(struct thread *thread)
kprintf_unlock(flags);
}
void alloc_syscall_counters(struct thread *thread)
void track_syscalls_print_proc_stats(struct process *proc)
{
thread->syscall_times = kmalloc(sizeof(*thread->syscall_times) * 300, IHK_MC_AP_NOWAIT);
thread->syscall_cnts = kmalloc(sizeof(*thread->syscall_cnts) * 300, IHK_MC_AP_NOWAIT);
thread->offload_times = kmalloc(sizeof(*thread->offload_times) * 300, IHK_MC_AP_NOWAIT);
thread->offload_cnts = kmalloc(sizeof(*thread->offload_cnts) * 300, IHK_MC_AP_NOWAIT);
int i;
unsigned long flags;
flags = kprintf_lock();
for (i = 0; i < TRACK_SYSCALLS_MAX; ++i) {
if (!proc->syscall_cnts[i] &&
!proc->offload_cnts[i]) continue;
//__kprintf("(%20s): sys.cnt: %3lu (%15lukC)\n",
__kprintf("PID: %4d (%3d,%20s): sys: %6u %6lukC offl: %6u %6lukC\n",
proc->pid,
i,
syscall_name[i],
proc->syscall_cnts[i],
(proc->syscall_times[i] /
(proc->syscall_cnts[i] ? proc->syscall_cnts[i] : 1))
/ 1000,
proc->offload_cnts[i],
(proc->offload_times[i] /
(proc->offload_cnts[i] ? proc->offload_cnts[i] : 1))
/ 1000
);
}
kprintf_unlock(flags);
}
void track_syscalls_accumulate_counters(struct thread *thread,
struct process *proc)
{
int i;
struct mcs_lock_node mcs_node;
mcs_lock_lock(&proc->st_lock, &mcs_node);
for (i = 0; i < TRACK_SYSCALLS_MAX; ++i) {
if (thread->syscall_cnts[i]) {
proc->syscall_times[i] += thread->syscall_times[i];
proc->syscall_cnts[i] += thread->syscall_cnts[i];
}
if (thread->offload_cnts[i]) {
proc->offload_times[i] += thread->offload_times[i];
proc->offload_cnts[i] += thread->offload_cnts[i];
}
}
mcs_lock_unlock(&proc->st_lock, &mcs_node);
}
void track_syscalls_alloc_counters(struct thread *thread)
{
struct process *proc = thread->proc;
struct mcs_lock_node mcs_node;
thread->syscall_times = kmalloc(sizeof(*thread->syscall_times) *
TRACK_SYSCALLS_MAX, IHK_MC_AP_NOWAIT);
thread->syscall_cnts = kmalloc(sizeof(*thread->syscall_cnts) *
TRACK_SYSCALLS_MAX, IHK_MC_AP_NOWAIT);
thread->offload_times = kmalloc(sizeof(*thread->offload_times) *
TRACK_SYSCALLS_MAX, IHK_MC_AP_NOWAIT);
thread->offload_cnts = kmalloc(sizeof(*thread->offload_cnts) *
TRACK_SYSCALLS_MAX, IHK_MC_AP_NOWAIT);
if (!thread->syscall_times ||
!thread->syscall_cnts ||
!thread->offload_times ||
!thread->offload_cnts) {
kprintf("ERROR: allocating counters\n");
kprintf("%s: ERROR: allocating thread private counters\n",
__FUNCTION__);
panic("");
}
memset(thread->syscall_times, 0, sizeof(*thread->syscall_times) * 300);
memset(thread->syscall_cnts, 0, sizeof(*thread->syscall_cnts) * 300);
memset(thread->offload_times, 0, sizeof(*thread->offload_times) * 300);
memset(thread->offload_cnts, 0, sizeof(*thread->offload_cnts) * 300);
memset(thread->syscall_times, 0, sizeof(*thread->syscall_times) *
TRACK_SYSCALLS_MAX);
memset(thread->syscall_cnts, 0, sizeof(*thread->syscall_cnts) *
TRACK_SYSCALLS_MAX);
memset(thread->offload_times, 0, sizeof(*thread->offload_times) *
TRACK_SYSCALLS_MAX);
memset(thread->offload_cnts, 0, sizeof(*thread->offload_cnts) *
TRACK_SYSCALLS_MAX);
mcs_lock_lock(&proc->st_lock, &mcs_node);
if (!proc->syscall_times) {
proc->syscall_times = kmalloc(sizeof(*proc->syscall_times) *
TRACK_SYSCALLS_MAX, IHK_MC_AP_NOWAIT);
proc->syscall_cnts = kmalloc(sizeof(*proc->syscall_cnts) *
TRACK_SYSCALLS_MAX, IHK_MC_AP_NOWAIT);
proc->offload_times = kmalloc(sizeof(*proc->offload_times) *
TRACK_SYSCALLS_MAX, IHK_MC_AP_NOWAIT);
proc->offload_cnts = kmalloc(sizeof(*proc->offload_cnts) *
TRACK_SYSCALLS_MAX, IHK_MC_AP_NOWAIT);
if (!proc->syscall_times ||
!proc->syscall_cnts ||
!proc->offload_times ||
!proc->offload_cnts) {
kprintf("%s: ERROR: allocating process private counters\n",
__FUNCTION__);
panic("");
}
memset(proc->syscall_times, 0, sizeof(*proc->syscall_times) *
TRACK_SYSCALLS_MAX);
memset(proc->syscall_cnts, 0, sizeof(*proc->syscall_cnts) *
TRACK_SYSCALLS_MAX);
memset(proc->offload_times, 0, sizeof(*proc->offload_times) *
TRACK_SYSCALLS_MAX);
memset(proc->offload_cnts, 0, sizeof(*proc->offload_cnts) *
TRACK_SYSCALLS_MAX);
}
mcs_lock_unlock(&proc->st_lock, &mcs_node);
}
SYSCALL_DECLARE(syscall_offload_clr_cntrs)
void track_syscalls_dealloc_thread_counters(struct thread *thread)
{
kfree(thread->syscall_times);
kfree(thread->syscall_cnts);
kfree(thread->offload_times);
kfree(thread->offload_cnts);
}
void track_syscalls_dealloc_proc_counters(struct process *proc)
{
kfree(proc->syscall_times);
kfree(proc->syscall_cnts);
kfree(proc->offload_times);
kfree(proc->offload_cnts);
}
int do_track_syscalls(int flag)
{
int flag = (int)ihk_mc_syscall_arg0(ctx);
struct thread *thread = cpu_local_var(current);
int i;
if (flag & SOCC_PRINT)
print_syscall_stats(thread);
if (flag & TRACK_SYSCALLS_PRINT)
track_syscalls_print_thread_stats(thread);
if (flag & SOCC_CLEAR) {
for (i = 0; i < 300; ++i) {
if (flag & TRACK_SYSCALLS_PRINT_PROC)
track_syscalls_print_proc_stats(thread->proc);
if (flag & TRACK_SYSCALLS_CLEAR) {
for (i = 0; i < TRACK_SYSCALLS_MAX; ++i) {
if (!thread->syscall_cnts[i] &&
!thread->offload_cnts[i]) continue;
@@ -208,15 +316,21 @@ SYSCALL_DECLARE(syscall_offload_clr_cntrs)
}
}
if (flag & SOCC_ON) {
thread->socc_enabled = 1;
if (flag & TRACK_SYSCALLS_ON) {
thread->track_syscalls = 1;
}
else if (flag & SOCC_OFF) {
thread->socc_enabled = 0;
else if (flag & TRACK_SYSCALLS_OFF) {
thread->track_syscalls = 0;
}
return 0;
}
SYSCALL_DECLARE(track_syscalls)
{
int flag = (int)ihk_mc_syscall_arg0(ctx);
return do_track_syscalls(flag);
}
#endif // TRACK_SYSCALLS
static void send_syscall(struct syscall_request *req, int cpu, int pid, struct syscall_response *res)
@@ -267,16 +381,12 @@ static void send_syscall(struct syscall_request *req, int cpu, int pid, struct s
#endif
}
ihk_spinlock_t syscall_lock;
long do_syscall(struct syscall_request *req, int cpu, int pid)
{
struct syscall_response res;
struct syscall_request req2 IHK_DMA_ALIGN;
int error;
long rc;
int islock = 0;
unsigned long irqstate;
struct thread *thread = cpu_local_var(current);
struct process *proc = thread->proc;
#ifdef TRACK_SYSCALLS
@@ -288,7 +398,6 @@ long do_syscall(struct syscall_request *req, int cpu, int pid)
ihk_mc_get_processor_id(),
req->number);
irqstate = 0; /* for avoidance of warning */
barrier();
if(req->number != __NR_exit_group){
@@ -299,11 +408,6 @@ long do_syscall(struct syscall_request *req, int cpu, int pid)
++thread->in_syscall_offload;
}
if(req->number == __NR_exit_group ||
req->number == __NR_kill){ // interrupt syscall
islock = 1;
irqstate = ihk_mc_spinlock_lock(&syscall_lock);
}
/* The current thread is the requester and any thread from
* the pool may serve the request */
req->rtid = cpu_local_var(current)->tid;
@@ -394,27 +498,25 @@ long do_syscall(struct syscall_request *req, int cpu, int pid)
__FUNCTION__, req->number, res.ret);
rc = res.ret;
if(islock){
ihk_mc_spinlock_unlock(&syscall_lock, irqstate);
}
if(req->number != __NR_exit_group){
--thread->in_syscall_offload;
}
#ifdef TRACK_SYSCALLS
if (req->number < 300) {
if (req->number < TRACK_SYSCALLS_MAX) {
if (!cpu_local_var(current)->offload_cnts) {
alloc_syscall_counters(cpu_local_var(current));
track_syscalls_alloc_counters(cpu_local_var(current));
}
if (cpu_local_var(current)->socc_enabled) {
if (cpu_local_var(current)->track_syscalls) {
cpu_local_var(current)->offload_times[req->number] +=
(rdtsc() - t_s);
cpu_local_var(current)->offload_cnts[req->number]++;
cpu_local_var(current)->offload_cnts[req->number] += 1;
}
}
else {
dkprintf("offload syscall > 300?? : %d\n", req->number);
dkprintf("%s: offload syscall > %d ?? : %d\n",
__FUNCTION__, TRACK_SYSCALLS_MAX, req->number);
}
#endif // TRACK_SYSCALLS
@@ -855,7 +957,6 @@ terminate(int rc, int sig)
mcs_rwlock_writer_unlock(&proc->threads_lock, &lock);
vm = proc->vm;
free_all_process_memory_range(vm);
if (proc->saved_cmdline) {
kfree(proc->saved_cmdline);
@@ -1135,6 +1236,7 @@ do_mmap(const intptr_t addr0, const size_t len0, const int prot,
struct vm_regions *region = &thread->vm->region;
intptr_t addr = addr0;
size_t len = len0;
size_t populate_len;
off_t off;
int error;
intptr_t npages;
@@ -1151,6 +1253,7 @@ do_mmap(const intptr_t addr0, const size_t len0, const int prot,
struct process *proc = thread->proc;
struct mckfd *fdp = NULL;
int pgshift;
struct vm_range *range = NULL;
dkprintf("do_mmap(%lx,%lx,%x,%x,%d,%lx)\n",
addr0, len0, prot, flags, fd, off0);
@@ -1234,16 +1337,9 @@ do_mmap(const intptr_t addr0, const size_t len0, const int prot,
vrflags |= (flags & MAP_LOCKED)? VR_LOCKED: 0;
vrflags |= VR_DEMAND_PAGING;
if (flags & MAP_ANONYMOUS) {
if (!anon_on_demand) {
populated_mapping = 1;
}
#ifdef USE_NOCACHE_MMAP
#define X_MAP_NOCACHE MAP_32BIT
else if (flags & X_MAP_NOCACHE) {
if (!anon_on_demand && (flags & MAP_PRIVATE)) {
vrflags &= ~VR_DEMAND_PAGING;
vrflags |= VR_IO_NOCACHE;
}
#endif
}
if (flags & (MAP_POPULATE | MAP_LOCKED)) {
@@ -1252,6 +1348,9 @@ do_mmap(const intptr_t addr0, const size_t len0, const int prot,
/* XXX: Intel MPI 128MB mapping.. */
if (len == 134217728) {
dkprintf("%s: %ld bytes mapping -> no prefault\n",
__FUNCTION__, len);
vrflags |= VR_DEMAND_PAGING;
populated_mapping = 0;
}
@@ -1309,19 +1408,39 @@ do_mmap(const intptr_t addr0, const size_t len0, const int prot,
goto out;
}
}
/* Prepopulated ANONYMOUS mapping */
else if (!(vrflags & VR_DEMAND_PAGING)
&& ((vrflags & VR_PROT_MASK) != VR_PROT_NONE)) {
npages = len >> PAGE_SHIFT;
p = ihk_mc_alloc_aligned_pages(npages, p2align, IHK_MC_AP_NOWAIT);
/* Small allocations mostly benefit from closest RAM,
* otherwise follow user requested policy */
unsigned long __flag = (len >= 2097152) ? IHK_MC_AP_USER : 0;
p = ihk_mc_alloc_aligned_pages(npages, p2align,
IHK_MC_AP_NOWAIT | __flag);
if (p == NULL) {
ekprintf("do_mmap:allocate_pages(%d,%d) failed.\n",
npages, p2align);
error = -ENOMEM;
goto out;
dkprintf("%s: warning: failed to allocate %d contiguous pages "
" (bytes: %lu, pgshift: %d), enabling demand paging\n",
__FUNCTION__,
npages, npages * PAGE_SIZE, p2align);
/* Give demand paging a chance */
vrflags |= VR_DEMAND_PAGING;
populated_mapping = 0;
error = zeroobj_create(&memobj);
if (error) {
ekprintf("%s: zeroobj_create failed, error: %d\n",
__FUNCTION__, error);
goto out;
}
}
else {
dkprintf("%s: 0x%x:%lu MAP_ANONYMOUS "
"allocated %d pages, p2align: %lx\n",
__FUNCTION__, addr, len, npages, p2align);
phys = virt_to_phys(p);
}
dkprintf("%s: 0x%x:%lu allocated %d pages, p2align: %lx\n",
__FUNCTION__, addr, len, npages, p2align);
phys = virt_to_phys(p);
}
else if (flags & MAP_SHARED) {
memset(&ads, 0, sizeof(ads));
@@ -1354,7 +1473,7 @@ do_mmap(const intptr_t addr0, const size_t len0, const int prot,
vrflags |= VRFLAG_PROT_TO_MAXPROT(PROT_TO_VR_FLAG(maxprot));
error = add_process_memory_range(thread->vm, addr, addr+len, phys,
vrflags, memobj, off, pgshift);
vrflags, memobj, off, pgshift, &range);
if (error) {
kprintf("%s: add_process_memory_range failed for 0x%lx:%lu"
" flags: %lx, vrflags: %lx, pgshift: %d, error: %d\n",
@@ -1363,12 +1482,18 @@ do_mmap(const intptr_t addr0, const size_t len0, const int prot,
goto out;
}
memobj_lock(memobj);
if (memobj->status == MEMOBJ_TO_BE_PREFETCHED) {
memobj->status = MEMOBJ_READY;
populated_mapping = 1;
/* Determine pre-populated size */
populate_len = len;
if (!(flags & MAP_ANONYMOUS)) {
memobj_lock(memobj);
if (memobj->status == MEMOBJ_TO_BE_PREFETCHED) {
memobj->status = MEMOBJ_READY;
populated_mapping = 1;
populate_len = memobj->size;
}
memobj_unlock(memobj);
}
memobj_unlock(memobj);
error = 0;
p = NULL;
@@ -1382,7 +1507,9 @@ out:
ihk_mc_spinlock_unlock_noirq(&thread->vm->memory_range_lock);
if (!error && populated_mapping && !((vrflags & VR_PROT_MASK) == VR_PROT_NONE)) {
error = populate_process_memory(thread->vm, (void *)addr, len);
error = populate_process_memory(thread->vm,
(void *)addr, populate_len);
if (error) {
ekprintf("%s: WARNING: populate_process_memory(): "
"vm: %p, addr: %p, len: %d (flags: %s%s) failed %d\n",
@@ -1895,6 +2022,7 @@ SYSCALL_DECLARE(execve)
if (ret != 0) {
dkprintf("execve(): ERROR: host failed to load elf header, errno: %d\n",
ret);
ihk_mc_free_pages(desc, 1);
return -ret;
}
@@ -1918,6 +2046,7 @@ SYSCALL_DECLARE(execve)
kprintf("ERROR: no argv for executable: %s?\n", kfilename? kfilename: "");
if(kfilename)
kfree(kfilename);
ihk_mc_free_pages(desc, 1);
return -EINVAL;
}
@@ -1986,6 +2115,10 @@ SYSCALL_DECLARE(execve)
dkprintf("execve(): switching to new process\n");
proc->execed = 1;
ihk_mc_free_pages(desc, 1);
kfree(argv_flat);
kfree(envp_flat);
/* Lock run queue because enter_user_mode expects to release it */
cpu_local_var(runq_irqstate) =
ihk_mc_spinlock_lock(&(get_this_cpu_local_var()->runq_lock));
@@ -3563,9 +3696,28 @@ do_sigsuspend(struct thread *thread, const sigset_t *set)
thread->sigmask.__val[0] = wset;
thread->sigevent = 1;
for(;;){
while(thread->sigevent == 0)
cpu_pause();
for (;;) {
while (thread->sigevent == 0) {
int do_schedule = 0;
struct cpu_local_var *v;
long runq_irqstate;
runq_irqstate =
ihk_mc_spinlock_lock(&(get_this_cpu_local_var()->runq_lock));
v = get_this_cpu_local_var();
if (v->flags & CPU_FLAG_NEED_RESCHED) {
do_schedule = 1;
}
ihk_mc_spinlock_unlock(&v->runq_lock, runq_irqstate);
if (do_schedule) {
schedule();
}
else {
cpu_pause();
}
}
lock = &thread->sigcommon->lock;
head = &thread->sigcommon->sigpending;
@@ -4318,7 +4470,7 @@ SYSCALL_DECLARE(shmat)
memobj_ref(&obj->memobj);
error = add_process_memory_range(vm, addr, addr+len, -1,
vrflags, &obj->memobj, 0, obj->pgshift);
vrflags, &obj->memobj, 0, obj->pgshift, NULL);
if (error) {
if (!(prot & PROT_WRITE)) {
(void)set_host_vma(addr, len, PROT_READ|PROT_WRITE);
@@ -7078,7 +7230,7 @@ SYSCALL_DECLARE(mremap)
error = add_process_memory_range(thread->vm, newstart, newend, -1,
range->flag, range->memobj,
range->objoff + (oldstart - range->start),
range->pgshift);
range->pgshift, NULL);
if (error) {
ekprintf("sys_mremap(%#lx,%#lx,%#lx,%#x,%#lx):"
"add failed. %d\n",
@@ -8485,6 +8637,7 @@ long syscall(int num, ihk_mc_user_context_t *ctx)
dkprintf("\n");
#ifdef TRACK_SYSCALLS
if (num == __NR_clone) cpu_local_var(current)->track_syscalls = 1;
t_s = rdtsc();
#endif // TRACK_SYSCALLS
@@ -8509,18 +8662,20 @@ long syscall(int num, ihk_mc_user_context_t *ctx)
}
#ifdef TRACK_SYSCALLS
if (num < 300) {
if (num < TRACK_SYSCALLS_MAX) {
if (!cpu_local_var(current)->syscall_cnts) {
alloc_syscall_counters(cpu_local_var(current));
track_syscalls_alloc_counters(cpu_local_var(current));
}
if (cpu_local_var(current)->socc_enabled) {
if (cpu_local_var(current)->track_syscalls) {
cpu_local_var(current)->syscall_times[num] += (rdtsc() - t_s);
cpu_local_var(current)->syscall_cnts[num]++;
}
}
else {
if (num != 701)
kprintf("syscall > 300?? : %d\n", num);
if (num != __NR_track_syscalls) {
dkprintf("%s: syscall > %d ?? : %d\n",
__FUNCTION__, TRACK_SYSCALLS_MAX, num);
}
}
#endif // TRACK_SYSCALLS

View File

@@ -54,136 +54,75 @@ void init_timers(void)
}
uint64_t schedule_timeout(uint64_t timeout)
{
struct waitq_entry my_wait;
struct timer my_timer;
{
struct thread *thread = cpu_local_var(current);
int irqstate;
int spin_sleep;
irqstate = ihk_mc_spinlock_lock(&thread->spin_sleep_lock);
dkprintf("schedule_timeout() spin sleep timeout: %lu\n", timeout);
spin_sleep = ++thread->spin_sleep;
ihk_mc_spinlock_unlock(&thread->spin_sleep_lock, irqstate);
long irqstate;
/* Spin sleep.. */
for (;;) {
int need_schedule;
struct cpu_local_var *v = get_this_cpu_local_var();
uint64_t t_s = rdtsc();
uint64_t t_e;
int spin_over = 0;
irqstate = ihk_mc_spinlock_lock(&thread->spin_sleep_lock);
/* Woken up by someone? */
if (thread->spin_sleep < 1) {
if (thread->spin_sleep == 0) {
t_e = rdtsc();
spin_over = 1;
if ((t_e - t_s) < timeout) {
timeout -= (t_e - t_s);
}
else {
timeout = 1;
}
ihk_mc_spinlock_unlock(&thread->spin_sleep_lock, irqstate);
break;
}
ihk_mc_spinlock_unlock(&thread->spin_sleep_lock, irqstate);
if (!spin_over) {
t_s = rdtsc();
int need_schedule;
struct cpu_local_var *v = get_this_cpu_local_var();
int irqstate = ihk_mc_spinlock_lock(&(v->runq_lock));
need_schedule = v->runq_len > 1 ? 1 : 0;
/* Give a chance to another thread (if any) in case the core is
* oversubscribed, but make sure we will be re-scheduled */
irqstate = ihk_mc_spinlock_lock(&(v->runq_lock));
need_schedule = v->runq_len > 1 ? 1 : 0;
if (need_schedule) {
xchg4(&(cpu_local_var(current)->status), PS_RUNNING);
ihk_mc_spinlock_unlock(&(v->runq_lock), irqstate);
schedule();
/* Give a chance to another thread (if any) in case the core is
* oversubscribed, but make sure we will be re-scheduled */
if (need_schedule) {
xchg4(&(cpu_local_var(current)->status), PS_RUNNING);
schedule();
xchg4(&(cpu_local_var(current)->status),
PS_INTERRUPTIBLE);
}
else {
/* Spin wait */
while ((rdtsc() - t_s) < LOOP_TIMEOUT) {
cpu_pause();
}
if (timeout < LOOP_TIMEOUT) {
timeout = 0;
spin_over = 1;
}
else {
timeout -= LOOP_TIMEOUT;
}
}
/* Recheck if woken */
continue;
}
else {
ihk_mc_spinlock_unlock(&(v->runq_lock), irqstate);
}
if (spin_over) {
dkprintf("schedule_timeout() spin woken up, timeout: %lu\n",
timeout);
/* Give a chance to another thread (if any) in case we timed out,
* but make sure we will be re-scheduled */
if (timeout == 0) {
int need_schedule;
struct cpu_local_var *v = get_this_cpu_local_var();
int irqstate =
ihk_mc_spinlock_lock(&(v->runq_lock));
need_schedule = v->runq_len > 1 ? 1 : 0;
ihk_mc_spinlock_unlock(&(v->runq_lock), irqstate);
/* Spin wait */
while ((rdtsc() - t_s) < LOOP_TIMEOUT) {
cpu_pause();
}
if (need_schedule) {
xchg4(&(cpu_local_var(current)->status), PS_RUNNING);
schedule();
xchg4(&(cpu_local_var(current)->status),
PS_INTERRUPTIBLE);
}
}
/* Time out? */
if (timeout < LOOP_TIMEOUT) {
timeout = 0;
/* We are not sleeping any more */
irqstate = ihk_mc_spinlock_lock(&thread->spin_sleep_lock);
if (spin_sleep == thread->spin_sleep) {
--thread->spin_sleep;
}
thread->spin_sleep = 0;
ihk_mc_spinlock_unlock(&thread->spin_sleep_lock, irqstate);
return timeout;
break;
}
else {
timeout -= LOOP_TIMEOUT;
}
}
/* Init waitq and wait entry for this timer */
my_timer.timeout = (timeout < LOOP_TIMEOUT) ? LOOP_TIMEOUT : timeout;
my_timer.thread = cpu_local_var(current);
waitq_init(&my_timer.processes);
waitq_init_entry(&my_wait, cpu_local_var(current));
/* Add ourself to the timer queue */
ihk_mc_spinlock_lock_noirq(&timers_lock);
list_add_tail(&my_timer.list, &timers);
dkprintf("schedule_timeout() sleep timeout: %lu\n", my_timer.timeout);
/* Add ourself to the waitqueue and sleep */
waitq_prepare_to_wait(&my_timer.processes, &my_wait, PS_INTERRUPTIBLE);
ihk_mc_spinlock_unlock_noirq(&timers_lock);
schedule();
waitq_finish_wait(&my_timer.processes, &my_wait);
ihk_mc_spinlock_lock_noirq(&timers_lock);
/* Waken up by someone else then timeout? */
if (my_timer.timeout) {
list_del(&my_timer.list);
}
ihk_mc_spinlock_unlock_noirq(&timers_lock);
dkprintf("schedule_timeout() woken up, timeout: %lu\n",
my_timer.timeout);
return my_timer.timeout;
return timeout;
}

View File

@@ -22,6 +22,13 @@ default_wake_function(waitq_entry_t *entry, unsigned mode,
return sched_wakeup_thread(entry->private, PS_NORMAL);
}
int
locked_wake_function(waitq_entry_t *entry, unsigned mode,
int flags, void *key)
{
return sched_wakeup_thread_locked(entry->private, PS_NORMAL);
}
void
waitq_init(waitq_t *waitq)
{

View File

@@ -34,18 +34,25 @@ enum ihk_mc_gma_type {
IHK_MC_RESERVED_AREA_END,
};
extern unsigned long bootstrap_mem_end;
enum ihk_mc_ma_type {
IHK_MC_MA_AVAILABLE,
IHK_MC_MA_RESERVED,
IHK_MC_MA_SPECIAL,
};
enum ihk_mc_ap_flag {
IHK_MC_AP_FLAG,
IHK_MC_AP_CRITICAL, /* panic on no memory space */
IHK_MC_AP_NOWAIT, /* error return on no memory space */
IHK_MC_AP_WAIT /* wait on no memory space */
};
typedef unsigned long ihk_mc_ap_flag;
/* Panic on no memory space */
#define IHK_MC_AP_CRITICAL 0x000001
/* Error return on no memory space */
#define IHK_MC_AP_NOWAIT 0x000002
/* Wait on no memory space */
#define IHK_MC_AP_WAIT 0x000004
#define IHK_MC_AP_USER 0x001000
#define IHK_MC_AP_BANDWIDTH 0x010000
#define IHK_MC_AP_LATENCY 0x020000
enum ihk_mc_pt_prepare_flag {
IHK_MC_PT_FIRST_LEVEL,
@@ -79,10 +86,10 @@ void ihk_mc_reserve_arch_pages(struct ihk_page_allocator_desc *pa_allocator,
unsigned long, unsigned long, int));
struct ihk_mc_pa_ops {
void *(*alloc_page)(int, int, enum ihk_mc_ap_flag);
void *(*alloc_page)(int, int, ihk_mc_ap_flag, int node);
void (*free_page)(void *, int);
void *(*alloc)(int, enum ihk_mc_ap_flag);
void *(*alloc)(int, ihk_mc_ap_flag);
void (*free)(void *);
};
@@ -103,17 +110,20 @@ void ihk_mc_map_micpa(unsigned long host_pa, unsigned long* mic_pa);
int ihk_mc_free_micpa(unsigned long mic_pa);
void ihk_mc_clean_micpa(void);
void *_ihk_mc_alloc_aligned_pages(int npages, int p2align,
enum ihk_mc_ap_flag flag, char *file, int line);
#define ihk_mc_alloc_aligned_pages(npages, p2align, flag) ({\
void *r = _ihk_mc_alloc_aligned_pages(npages, p2align, flag, __FILE__, __LINE__);\
void *_ihk_mc_alloc_aligned_pages_node(int npages, int p2align,
ihk_mc_ap_flag flag, int node, char *file, int line);
#define ihk_mc_alloc_aligned_pages_node(npages, p2align, flag, node) ({\
void *r = _ihk_mc_alloc_aligned_pages_node(npages, p2align, flag, node, __FILE__, __LINE__);\
r;\
})
#define ihk_mc_alloc_aligned_pages(npages, p2align, flag) ({\
void *r = _ihk_mc_alloc_aligned_pages_node(npages, p2align, flag, -1, __FILE__, __LINE__);\
r;\
})
void *_ihk_mc_alloc_pages(int npages, enum ihk_mc_ap_flag flag,
char *file, int line);
#define ihk_mc_alloc_pages(npages, flag) ({\
void *r = _ihk_mc_alloc_pages(npages, flag, __FILE__, __LINE__);\
void *r = _ihk_mc_alloc_aligned_pages_node(npages, PAGE_P2ALIGN, flag, -1, __FILE__, __LINE__);\
r;\
})
@@ -160,7 +170,7 @@ int visit_pte_range(page_table_t pt, void *start, void *end, int pgshift,
int move_pte_range(page_table_t pt, struct process_vm *vm,
void *src, void *dest, size_t size);
struct page_table *ihk_mc_pt_create(enum ihk_mc_ap_flag ap_flag);
struct page_table *ihk_mc_pt_create(ihk_mc_ap_flag ap_flag);
/* XXX: proper use of struct page_table and page_table_t is unknown */
void ihk_mc_pt_destroy(struct page_table *pt);
void ihk_mc_load_page_table(struct page_table *pt);

View File

@@ -35,8 +35,8 @@ void *phys_to_virt(unsigned long p);
int copy_from_user(void *dst, const void *src, size_t siz);
int strlen_user(const char *s);
int strcpy_from_user(char *dst, const char *src);
long getlong_user(const long *p);
int getint_user(const int *p);
long getlong_user(long *dest, const long *p);
int getint_user(int *dest, const int *p);
int read_process_vm(struct process_vm *vm, void *kdst, const void *usrc, size_t siz);
int copy_to_user(void *dst, const void *src, size_t siz);
int setlong_user(long *dst, long data);

View File

@@ -19,7 +19,6 @@
#include <memory.h>
#include <bitops.h>
void *allocate_pages(int npages, enum ihk_mc_ap_flag flag);
void free_pages(void *, int npages);
#define MAP_INDEX(n) ((n) >> 6)

View File

@@ -275,13 +275,21 @@ int flatten_strings_from_user(int nr_strings, char *first, char **strings, char
long *_flat;
char *p;
long r;
int n;
int n, ret;
/* How many strings do we have? */
if (nr_strings == -1) {
for (nr_strings = 0; (r = getlong_user((void *)(strings + nr_strings))) > 0; ++nr_strings);
if(r < 0)
return r;
nr_strings = 0;
for (;;) {
ret = getlong_user(&r, (void *)(strings + nr_strings));
if (ret < 0)
return ret;
if (r == 0)
break;
++nr_strings;
}
}
/* Count full length */
@@ -295,13 +303,19 @@ int flatten_strings_from_user(int nr_strings, char *first, char **strings, char
}
for (string_i = 0; string_i < nr_strings; ++string_i) {
char *userp = (char *)getlong_user((void *)(strings + string_i));
int len = strlen_user(userp);
char *userp;
int len;
ret = getlong_user((long *)&userp, (void *)(strings + string_i));
if (ret < 0)
return ret;
len = strlen_user(userp);
if(len < 0)
return len;
// Pointer + actual value
full_len += sizeof(char *) + len + 1;
full_len += sizeof(char *) + len + 1;
}
full_len = (full_len + sizeof(long) - 1) & ~(sizeof(long) - 1);
@@ -326,8 +340,13 @@ int flatten_strings_from_user(int nr_strings, char *first, char **strings, char
}
for (string_i = 0; string_i < nr_strings; ++string_i) {
char *userp = (char *)getlong_user((void *)(strings + string_i));
char *userp;
_flat[n++] = p - (char *)_flat;
ret = getlong_user((long *)&userp, (void *)(strings + string_i));
if (ret < 0)
return ret;
strcpy_from_user(p, userp);
p = strchr(p, '\0') + 1;
}