Merge remote-tracking branch 'remotes/origin/ikc2'
Conflicts: executer/kernel/mcctrl/syscall.c It is resolved.
This commit is contained in:
@@ -33,6 +33,7 @@
|
||||
#include <linux/file.h>
|
||||
#include <linux/version.h>
|
||||
#include <linux/semaphore.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/delay.h>
|
||||
#include <asm/io.h>
|
||||
@@ -81,7 +82,6 @@ static long mcexec_prepare_image(ihk_os_t os,
|
||||
void *args, *envs;
|
||||
long ret = 0;
|
||||
struct mcctrl_usrdata *usrdata = ihk_host_os_get_usrdata(os);
|
||||
unsigned long flags;
|
||||
struct mcctrl_per_proc_data *ppd = NULL;
|
||||
|
||||
if (copy_from_user(&desc, udesc,
|
||||
@@ -124,52 +124,48 @@ static long mcexec_prepare_image(ihk_os_t os,
|
||||
}
|
||||
|
||||
pdesc->args = (void*)virt_to_phys(args);
|
||||
printk("args: 0x%lX\n", (unsigned long)pdesc->args);
|
||||
printk("argc: %ld\n", *(long *)args);
|
||||
dprintk("args: 0x%lX\n", (unsigned long)pdesc->args);
|
||||
dprintk("argc: %ld\n", *(long *)args);
|
||||
pdesc->envs = (void*)virt_to_phys(envs);
|
||||
printk("envs: 0x%lX\n", (unsigned long)pdesc->envs);
|
||||
printk("envc: %ld\n", *(long *)envs);
|
||||
dprintk("envs: 0x%lX\n", (unsigned long)pdesc->envs);
|
||||
dprintk("envc: %ld\n", *(long *)envs);
|
||||
|
||||
isp.msg = SCD_MSG_PREPARE_PROCESS;
|
||||
isp.ref = pdesc->cpu;
|
||||
isp.arg = virt_to_phys(pdesc);
|
||||
|
||||
printk("# of sections: %d\n", pdesc->num_sections);
|
||||
printk("%p (%lx)\n", pdesc, isp.arg);
|
||||
dprintk("# of sections: %d\n", pdesc->num_sections);
|
||||
dprintk("%p (%lx)\n", pdesc, isp.arg);
|
||||
|
||||
pdesc->status = 0;
|
||||
mcctrl_ikc_send(os, pdesc->cpu, &isp);
|
||||
|
||||
wait_event_interruptible(usrdata->wq_prepare, pdesc->status);
|
||||
while (wait_event_interruptible(usrdata->wq_prepare, pdesc->status) != 0);
|
||||
|
||||
if(pdesc->err < 0){
|
||||
ret = pdesc->err;
|
||||
goto free_out;
|
||||
}
|
||||
|
||||
ppd = kmalloc(sizeof(*ppd), GFP_ATOMIC);
|
||||
ppd = mcctrl_get_per_proc_data(usrdata, task_tgid_vnr(current));
|
||||
if (!ppd) {
|
||||
printk("ERROR: allocating per process data\n");
|
||||
ret = -ENOMEM;
|
||||
printk("ERROR: no per process data for PID %d\n", task_tgid_vnr(current));
|
||||
ret = -EINVAL;
|
||||
goto free_out;
|
||||
}
|
||||
|
||||
ppd->pid = pdesc->pid;
|
||||
/* Update rpgtable */
|
||||
ppd->rpgtable = pdesc->rpgtable;
|
||||
|
||||
flags = ihk_ikc_spinlock_lock(&usrdata->per_proc_list_lock);
|
||||
list_add_tail(&ppd->list, &usrdata->per_proc_list);
|
||||
ihk_ikc_spinlock_unlock(&usrdata->per_proc_list_lock, flags);
|
||||
|
||||
dprintk("pid %d, rpgtable: 0x%lx added\n",
|
||||
ppd->pid, ppd->rpgtable);
|
||||
|
||||
if (copy_to_user(udesc, pdesc, sizeof(struct program_load_desc) +
|
||||
sizeof(struct program_image_section) * desc.num_sections)) {
|
||||
ret = -EFAULT;
|
||||
goto free_out;
|
||||
}
|
||||
|
||||
dprintk("%s: pid %d, rpgtable: 0x%lx added\n",
|
||||
__FUNCTION__, ppd->pid, ppd->rpgtable);
|
||||
|
||||
ret = 0;
|
||||
|
||||
free_out:
|
||||
@@ -417,19 +413,200 @@ static long mcexec_get_cpu(ihk_os_t os)
|
||||
return info->n_cpus;
|
||||
}
|
||||
|
||||
int mcexec_syscall(struct mcctrl_channel *c, int pid, unsigned long arg)
|
||||
int mcctrl_add_per_proc_data(struct mcctrl_usrdata *ud, int pid,
|
||||
struct mcctrl_per_proc_data *ppd)
|
||||
{
|
||||
struct mcctrl_per_proc_data *ppd_iter;
|
||||
int hash = (pid & MCCTRL_PER_PROC_DATA_HASH_MASK);
|
||||
int ret = 0;
|
||||
unsigned long flags;
|
||||
|
||||
/* Check if data for this thread exists and add if not */
|
||||
write_lock_irqsave(&ud->per_proc_data_hash_lock[hash], flags);
|
||||
list_for_each_entry(ppd_iter, &ud->per_proc_data_hash[hash], hash) {
|
||||
if (ppd_iter->pid == pid) {
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
list_add_tail(&ppd->hash, &ud->per_proc_data_hash[hash]);
|
||||
|
||||
out:
|
||||
write_unlock_irqrestore(&ud->per_proc_data_hash_lock[hash], flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int mcctrl_delete_per_proc_data(struct mcctrl_usrdata *ud, int pid)
|
||||
{
|
||||
struct mcctrl_per_proc_data *ppd_iter, *ppd = NULL;
|
||||
int hash = (pid & MCCTRL_PER_PROC_DATA_HASH_MASK);
|
||||
int ret = 0;
|
||||
unsigned long flags;
|
||||
|
||||
write_lock_irqsave(&ud->per_proc_data_hash_lock[hash], flags);
|
||||
list_for_each_entry(ppd_iter, &ud->per_proc_data_hash[hash], hash) {
|
||||
if (ppd_iter->pid == pid) {
|
||||
ppd = ppd_iter;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!ppd) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_del(&ppd->hash);
|
||||
|
||||
out:
|
||||
write_unlock_irqrestore(&ud->per_proc_data_hash_lock[hash], flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
inline struct mcctrl_per_proc_data *mcctrl_get_per_proc_data(
|
||||
struct mcctrl_usrdata *ud, int pid)
|
||||
{
|
||||
struct mcctrl_per_proc_data *ppd_iter, *ppd = NULL;
|
||||
int hash = (pid & MCCTRL_PER_PROC_DATA_HASH_MASK);
|
||||
unsigned long flags;
|
||||
|
||||
/* Check if data for this process exists and return it */
|
||||
read_lock_irqsave(&ud->per_proc_data_hash_lock[hash], flags);
|
||||
|
||||
list_for_each_entry(ppd_iter, &ud->per_proc_data_hash[hash], hash) {
|
||||
if (ppd_iter->pid == pid) {
|
||||
ppd = ppd_iter;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
read_unlock_irqrestore(&ud->per_proc_data_hash_lock[hash], flags);
|
||||
return ppd;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called indirectly from the IKC message handler.
|
||||
*/
|
||||
int mcexec_syscall(struct mcctrl_usrdata *ud, struct ikc_scd_packet *packet)
|
||||
{
|
||||
struct wait_queue_head_list_node *wqhln = NULL;
|
||||
struct wait_queue_head_list_node *wqhln_iter;
|
||||
struct wait_queue_head_list_node *wqhln_alloc = NULL;
|
||||
int pid = packet->pid;
|
||||
unsigned long flags;
|
||||
struct mcctrl_per_proc_data *ppd;
|
||||
|
||||
/* Look up per-process structure */
|
||||
ppd = mcctrl_get_per_proc_data(ud, pid);
|
||||
|
||||
if (unlikely(!ppd)) {
|
||||
kprintf("%s: ERROR: no per-process structure for PID %d??\n",
|
||||
__FUNCTION__, task_tgid_vnr(current));
|
||||
return 0;
|
||||
}
|
||||
|
||||
dprintk("%s: (packet_handler) rtid: %d, ttid: %d, sys nr: %d\n",
|
||||
__FUNCTION__,
|
||||
packet->req.rtid,
|
||||
packet->req.ttid,
|
||||
packet->req.number);
|
||||
/*
|
||||
* Three scenarios are possible:
|
||||
* - Find the designated thread if req->ttid is specified.
|
||||
* - Find any available thread if req->ttid is zero.
|
||||
* - Add a request element if no threads are available.
|
||||
*/
|
||||
flags = ihk_ikc_spinlock_lock(&ppd->wq_list_lock);
|
||||
|
||||
/* Is this a request for a specific thread? See if it's waiting */
|
||||
if (unlikely(packet->req.ttid)) {
|
||||
list_for_each_entry(wqhln_iter, &ppd->wq_list_exact, list) {
|
||||
if (packet->req.ttid != task_pid_vnr(wqhln_iter->task))
|
||||
continue;
|
||||
|
||||
/* Look up per-process wait queue head with pid */
|
||||
flags = ihk_ikc_spinlock_lock(&c->wq_list_lock);
|
||||
list_for_each_entry(wqhln_iter, &c->wq_list, list) {
|
||||
if (wqhln_iter->pid == pid) {
|
||||
wqhln = wqhln_iter;
|
||||
break;
|
||||
}
|
||||
if (!wqhln) {
|
||||
printk("%s: WARNING: no target thread found for exact request??\n",
|
||||
__FUNCTION__);
|
||||
}
|
||||
}
|
||||
/* Is there any thread available? */
|
||||
else {
|
||||
list_for_each_entry(wqhln_iter, &ppd->wq_list, list) {
|
||||
if (wqhln_iter->task && !wqhln_iter->req) {
|
||||
wqhln = wqhln_iter;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* If no match found, add request to pending request list */
|
||||
if (unlikely(!wqhln)) {
|
||||
retry_alloc:
|
||||
wqhln_alloc = kmalloc(sizeof(*wqhln), GFP_ATOMIC);
|
||||
if (!wqhln_alloc) {
|
||||
printk("WARNING: coudln't alloc wait queue head, retrying..\n");
|
||||
goto retry_alloc;
|
||||
}
|
||||
|
||||
wqhln = wqhln_alloc;
|
||||
wqhln->req = 0;
|
||||
wqhln->task = NULL;
|
||||
init_waitqueue_head(&wqhln->wq_syscall);
|
||||
list_add_tail(&wqhln->list, &ppd->wq_req_list);
|
||||
}
|
||||
|
||||
wqhln->packet = packet;
|
||||
wqhln->req = 1;
|
||||
wake_up(&wqhln->wq_syscall);
|
||||
ihk_ikc_spinlock_unlock(&ppd->wq_list_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called from an mcexec thread via ioctl().
|
||||
*/
|
||||
int mcexec_wait_syscall(ihk_os_t os, struct syscall_wait_desc *__user req)
|
||||
{
|
||||
struct ikc_scd_packet *packet;
|
||||
struct mcctrl_usrdata *usrdata = ihk_host_os_get_usrdata(os);
|
||||
struct wait_queue_head_list_node *wqhln = NULL;
|
||||
struct wait_queue_head_list_node *wqhln_iter;
|
||||
int ret = 0;
|
||||
unsigned long irqflags;
|
||||
struct mcctrl_per_proc_data *ppd;
|
||||
|
||||
/* Look up per-process structure */
|
||||
ppd = mcctrl_get_per_proc_data(usrdata, task_tgid_vnr(current));
|
||||
|
||||
if (unlikely(!ppd)) {
|
||||
kprintf("%s: ERROR: no per-process structure for PID %d??\n",
|
||||
__FUNCTION__, task_tgid_vnr(current));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
packet = (struct ikc_scd_packet *)mcctrl_get_per_thread_data(ppd, current);
|
||||
if (packet) {
|
||||
printk("%s: ERROR: packet %p is already registered for thread %d\n",
|
||||
__FUNCTION__, packet, task_pid_vnr(current));
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
retry:
|
||||
/* Prepare per-thread wait queue head or find a valid request */
|
||||
irqflags = ihk_ikc_spinlock_lock(&ppd->wq_list_lock);
|
||||
/* First see if there is a valid request already that is not yet taken */
|
||||
list_for_each_entry(wqhln_iter, &ppd->wq_req_list, list) {
|
||||
if (wqhln_iter->task == NULL && wqhln_iter->req) {
|
||||
wqhln = wqhln_iter;
|
||||
wqhln->task = current;
|
||||
list_del(&wqhln->list);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!wqhln) {
|
||||
@@ -440,180 +617,86 @@ retry_alloc:
|
||||
goto retry_alloc;
|
||||
}
|
||||
|
||||
wqhln->pid = pid;
|
||||
wqhln->task = current;
|
||||
wqhln->req = 0;
|
||||
init_waitqueue_head(&wqhln->wq_syscall);
|
||||
list_add_tail(&wqhln->list, &c->wq_list);
|
||||
|
||||
/* Wait for a request.. */
|
||||
list_add(&wqhln->list, &ppd->wq_list);
|
||||
ihk_ikc_spinlock_unlock(&ppd->wq_list_lock, irqflags);
|
||||
|
||||
ret = wait_event_interruptible(wqhln->wq_syscall, wqhln->req);
|
||||
|
||||
/* Remove per-thread wait queue head */
|
||||
irqflags = ihk_ikc_spinlock_lock(&ppd->wq_list_lock);
|
||||
list_del(&wqhln->list);
|
||||
}
|
||||
ihk_ikc_spinlock_unlock(&ppd->wq_list_lock, irqflags);
|
||||
|
||||
wqhln->req = 1;
|
||||
wake_up(&wqhln->wq_syscall);
|
||||
ihk_ikc_spinlock_unlock(&c->wq_list_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifndef DO_USER_MODE
|
||||
// static int remaining_job, base_cpu, job_pos;
|
||||
#endif
|
||||
|
||||
// extern int num_channels;
|
||||
// extern int mcctrl_dma_abort;
|
||||
|
||||
int mcexec_wait_syscall(ihk_os_t os, struct syscall_wait_desc *__user req)
|
||||
{
|
||||
struct syscall_wait_desc swd;
|
||||
struct mcctrl_channel *c;
|
||||
struct mcctrl_usrdata *usrdata = ihk_host_os_get_usrdata(os);
|
||||
struct wait_queue_head_list_node *wqhln;
|
||||
struct wait_queue_head_list_node *wqhln_iter;
|
||||
int ret = 0;
|
||||
unsigned long irqflags;
|
||||
#ifndef DO_USER_MODE
|
||||
unsigned long s, w, d;
|
||||
#endif
|
||||
|
||||
//printk("mcexec_wait_syscall swd=%p req=%p size=%d\n", &swd, req, sizeof(swd.cpu));
|
||||
if (copy_from_user(&swd, req, sizeof(swd))) {
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (swd.cpu >= usrdata->num_channels)
|
||||
return -EINVAL;
|
||||
|
||||
c = get_peer_channel(usrdata, current);
|
||||
if (c) {
|
||||
printk("mcexec_wait_syscall:already registered. task %p ch %p\n",
|
||||
current, c);
|
||||
return -EBUSY;
|
||||
}
|
||||
c = usrdata->channels + swd.cpu;
|
||||
|
||||
#ifdef DO_USER_MODE
|
||||
retry:
|
||||
/* Prepare per-process wait queue head */
|
||||
retry_alloc:
|
||||
wqhln = kmalloc(sizeof(*wqhln), GFP_KERNEL);
|
||||
if (!wqhln) {
|
||||
printk("WARNING: coudln't alloc wait queue head, retrying..\n");
|
||||
goto retry_alloc;
|
||||
}
|
||||
|
||||
wqhln->pid = swd.pid;
|
||||
wqhln->req = 0;
|
||||
init_waitqueue_head(&wqhln->wq_syscall);
|
||||
|
||||
irqflags = ihk_ikc_spinlock_lock(&c->wq_list_lock);
|
||||
/* First see if there is one wait queue already */
|
||||
list_for_each_entry(wqhln_iter, &c->wq_list, list) {
|
||||
if (wqhln_iter->pid == task_tgid_vnr(current)) {
|
||||
kfree(wqhln);
|
||||
wqhln = wqhln_iter;
|
||||
list_del(&wqhln->list);
|
||||
break;
|
||||
}
|
||||
}
|
||||
list_add_tail(&wqhln->list, &c->wq_list);
|
||||
ihk_ikc_spinlock_unlock(&c->wq_list_lock, irqflags);
|
||||
|
||||
ret = wait_event_interruptible(wqhln->wq_syscall, wqhln->req);
|
||||
|
||||
|
||||
/* Remove per-process wait queue head */
|
||||
irqflags = ihk_ikc_spinlock_lock(&c->wq_list_lock);
|
||||
list_del(&wqhln->list);
|
||||
ihk_ikc_spinlock_unlock(&c->wq_list_lock, irqflags);
|
||||
if (ret && !wqhln->req) {
|
||||
kfree(wqhln);
|
||||
wqhln = NULL;
|
||||
return -EINTR;
|
||||
}
|
||||
|
||||
packet = wqhln->packet;
|
||||
kfree(wqhln);
|
||||
wqhln = NULL;
|
||||
|
||||
if (c->param.request_va->number == 61 &&
|
||||
c->param.request_va->args[0] == swd.pid) {
|
||||
dprintk("%s: tid: %d request from CPU %d\n",
|
||||
__FUNCTION__, task_pid_vnr(current), packet->ref);
|
||||
|
||||
dprintk("pid: %d, tid: %d: SC %d, swd.cpu: %d, WARNING: wait4() for self?\n",
|
||||
task_tgid_vnr(current),
|
||||
task_pid_vnr(current);
|
||||
c->param.request_va->number,
|
||||
swd.cpu);
|
||||
|
||||
return -EINTR;
|
||||
}
|
||||
|
||||
#if 1
|
||||
mb();
|
||||
if (!c->param.request_va->valid) {
|
||||
printk("mcexec_wait_syscall:stray wakeup\n");
|
||||
if (!packet->req.valid) {
|
||||
printk("%s: ERROR: stray wakeup pid: %d, tid: %d: SC %lu\n",
|
||||
__FUNCTION__,
|
||||
task_tgid_vnr(current),
|
||||
task_pid_vnr(current),
|
||||
packet->req.number);
|
||||
ihk_ikc_release_packet((struct ihk_ikc_free_packet *)packet,
|
||||
(usrdata->channels + packet->ref)->c);
|
||||
goto retry;
|
||||
}
|
||||
#endif
|
||||
#else
|
||||
while (1) {
|
||||
c = usrdata->channels + swd.cpu;
|
||||
ihk_get_tsc(s);
|
||||
if (!usrdata->remaining_job) {
|
||||
while (!(*c->param.doorbell_va)) {
|
||||
mb();
|
||||
cpu_relax();
|
||||
ihk_get_tsc(w);
|
||||
if (w > s + 1024UL * 1024 * 1024 * 10) {
|
||||
return -EINTR;
|
||||
}
|
||||
}
|
||||
d = (*c->param.doorbell_va) - 1;
|
||||
*c->param.doorbell_va = 0;
|
||||
|
||||
if (d < 0 || d >= usrdata->num_channels) {
|
||||
d = 0;
|
||||
}
|
||||
usrdata->base_cpu = d;
|
||||
usrdata->job_pos = 0;
|
||||
usrdata->remaining_job = 1;
|
||||
} else {
|
||||
usrdata->job_pos++;
|
||||
}
|
||||
|
||||
for (; usrdata->job_pos < usrdata->num_channels; usrdata->job_pos++) {
|
||||
if (base_cpu + job_pos >= num_channels) {
|
||||
c = usrdata->channels +
|
||||
(usrdata->base_cpu + usrdata->job_pos - usrdata->num_channels);
|
||||
} else {
|
||||
c = usrdata->channels + usrdata->base_cpu + usrdata->job_pos;
|
||||
}
|
||||
if (!c) {
|
||||
continue;
|
||||
}
|
||||
if (c->param.request_va &&
|
||||
c->param.request_va->valid) {
|
||||
#endif
|
||||
c->param.request_va->valid = 0; /* ack */
|
||||
dprintk("SC #%lx, %lx\n",
|
||||
c->param.request_va->number,
|
||||
c->param.request_va->args[0]);
|
||||
register_peer_channel(usrdata, current, c);
|
||||
if (__do_in_kernel_syscall(os, c, c->param.request_va)) {
|
||||
if (copy_to_user(&req->sr, c->param.request_va,
|
||||
sizeof(struct syscall_request))) {
|
||||
deregister_peer_channel(usrdata, current, c);
|
||||
return -EFAULT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
deregister_peer_channel(usrdata, current, c);
|
||||
#ifdef DO_USER_MODE
|
||||
goto retry;
|
||||
#endif
|
||||
#ifndef DO_USER_MODE
|
||||
if (usrdata->mcctrl_dma_abort) {
|
||||
return -2;
|
||||
}
|
||||
}
|
||||
}
|
||||
usrdata->remaining_job = 0;
|
||||
packet->req.valid = 0; /* ack */
|
||||
dprintk("%s: system call: %d, args[0]: %lu, args[1]: %lu, args[2]: %lu, "
|
||||
"args[3]: %lu, args[4]: %lu, args[5]: %lu\n",
|
||||
__FUNCTION__,
|
||||
packet->req.number,
|
||||
packet->req.args[0],
|
||||
packet->req.args[1],
|
||||
packet->req.args[2],
|
||||
packet->req.args[3],
|
||||
packet->req.args[4],
|
||||
packet->req.args[5]);
|
||||
|
||||
if (mcctrl_add_per_thread_data(ppd, current, packet) < 0) {
|
||||
kprintf("%s: error adding per-thread data\n", __FUNCTION__);
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
|
||||
if (__do_in_kernel_syscall(os, packet)) {
|
||||
if (copy_to_user(&req->sr, &packet->req,
|
||||
sizeof(struct syscall_request))) {
|
||||
|
||||
if (mcctrl_delete_per_thread_data(ppd, current) < 0) {
|
||||
kprintf("%s: error deleting per-thread data\n", __FUNCTION__);
|
||||
return -EINVAL;
|
||||
}
|
||||
return -EFAULT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
ihk_ikc_release_packet((struct ihk_ikc_free_packet *)packet,
|
||||
(usrdata->channels + packet->ref)->c);
|
||||
|
||||
if (mcctrl_delete_per_thread_data(ppd, current) < 0) {
|
||||
kprintf("%s: error deleting per-thread data\n", __FUNCTION__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
goto retry;
|
||||
}
|
||||
|
||||
long mcexec_pin_region(ihk_os_t os, unsigned long *__user arg)
|
||||
@@ -696,33 +779,6 @@ long mcexec_load_syscall(ihk_os_t os, struct syscall_load_desc *__user arg)
|
||||
#endif
|
||||
|
||||
ihk_device_unmap_memory(ihk_os_to_dev(os), phys, desc.size);
|
||||
|
||||
/*
|
||||
ihk_dma_channel_t channel;
|
||||
struct ihk_dma_request request;
|
||||
unsigned long dma_status = 0;
|
||||
|
||||
channel = ihk_device_get_dma_channel(ihk_os_to_dev(os), 0);
|
||||
if (!channel) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memset(&request, 0, sizeof(request));
|
||||
request.src_os = os;
|
||||
request.src_phys = desc.src;
|
||||
request.dest_os = NULL;
|
||||
request.dest_phys = desc.dest;
|
||||
request.size = desc.size;
|
||||
request.notify = (void *)virt_to_phys(&dma_status);
|
||||
request.priv = (void *)1;
|
||||
|
||||
ihk_dma_request(channel, &request);
|
||||
|
||||
while (!dma_status) {
|
||||
mb();
|
||||
udelay(1);
|
||||
}
|
||||
*/
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -730,74 +786,60 @@ long mcexec_load_syscall(ihk_os_t os, struct syscall_load_desc *__user arg)
|
||||
long mcexec_ret_syscall(ihk_os_t os, struct syscall_ret_desc *__user arg)
|
||||
{
|
||||
struct syscall_ret_desc ret;
|
||||
struct mcctrl_channel *mc;
|
||||
struct ikc_scd_packet *packet;
|
||||
struct mcctrl_usrdata *usrdata = ihk_host_os_get_usrdata(os);
|
||||
#if 0
|
||||
ihk_dma_channel_t channel;
|
||||
struct ihk_dma_request request;
|
||||
|
||||
channel = ihk_device_get_dma_channel(ihk_os_to_dev(os), 0);
|
||||
if (!channel) {
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif
|
||||
struct mcctrl_per_proc_data *ppd;
|
||||
|
||||
if (copy_from_user(&ret, arg, sizeof(struct syscall_ret_desc))) {
|
||||
return -EFAULT;
|
||||
}
|
||||
mc = usrdata->channels + ret.cpu;
|
||||
if (!mc) {
|
||||
|
||||
/* Look up per-process structure */
|
||||
ppd = mcctrl_get_per_proc_data(usrdata, task_tgid_vnr(current));
|
||||
if (!ppd) {
|
||||
kprintf("%s: ERROR: no per-process structure for PID %d??\n",
|
||||
__FUNCTION__, task_tgid_vnr(current));
|
||||
return -EINVAL;
|
||||
}
|
||||
deregister_peer_channel(usrdata, current, mc);
|
||||
|
||||
mc->param.response_va->ret = ret.ret;
|
||||
packet = (struct ikc_scd_packet *)mcctrl_get_per_thread_data(ppd, current);
|
||||
if (!packet) {
|
||||
kprintf("%s: ERROR: no packet registered for TID %d\n",
|
||||
__FUNCTION__, task_pid_vnr(current));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mcctrl_delete_per_thread_data(ppd, current);
|
||||
|
||||
if (ret.size > 0) {
|
||||
/* Host => Accel. Write is fast. */
|
||||
unsigned long phys;
|
||||
void *rpm;
|
||||
|
||||
phys = ihk_device_map_memory(ihk_os_to_dev(os), ret.dest,
|
||||
ret.size);
|
||||
phys = ihk_device_map_memory(ihk_os_to_dev(os), ret.dest, ret.size);
|
||||
#ifdef CONFIG_MIC
|
||||
rpm = ioremap_wc(phys, ret.size);
|
||||
#else
|
||||
rpm = ihk_device_map_virtual(ihk_os_to_dev(os), phys,
|
||||
ret.size, NULL, 0);
|
||||
#endif
|
||||
|
||||
if (copy_from_user(rpm, (void *__user)ret.src, ret.size)) {
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
mb();
|
||||
mc->param.response_va->status = 1;
|
||||
|
||||
#ifdef CONFIG_MIC
|
||||
iounmap(rpm);
|
||||
#else
|
||||
ihk_device_unmap_virtual(ihk_os_to_dev(os), rpm, ret.size);
|
||||
#endif
|
||||
ihk_device_unmap_memory(ihk_os_to_dev(os), phys, ret.size);
|
||||
}
|
||||
|
||||
/*
|
||||
memset(&request, 0, sizeof(request));
|
||||
request.src_os = NULL;
|
||||
request.src_phys = ret.src;
|
||||
request.dest_os = os;
|
||||
request.dest_phys = ret.dest;
|
||||
request.size = ret.size;
|
||||
request.notify_os = os;
|
||||
request.notify = (void *)mc->param.response_rpa;
|
||||
request.priv = (void *)1;
|
||||
|
||||
ihk_dma_request(channel, &request);
|
||||
*/
|
||||
} else {
|
||||
mb();
|
||||
mc->param.response_va->status = 1;
|
||||
}
|
||||
__return_syscall(os, packet, ret.ret, task_pid_vnr(current));
|
||||
|
||||
/* Free packet */
|
||||
ihk_ikc_release_packet((struct ihk_ikc_free_packet *)packet,
|
||||
(usrdata->channels + packet->ref)->c);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -862,14 +904,53 @@ int mcexec_open_exec(ihk_os_t os, char * __user filename)
|
||||
int retval;
|
||||
int os_ind = ihk_host_os_get_index(os);
|
||||
char *pathbuf, *fullpath;
|
||||
struct mcctrl_usrdata *usrdata = ihk_host_os_get_usrdata(os);
|
||||
struct mcctrl_per_proc_data *ppd = NULL;
|
||||
int i;
|
||||
|
||||
if (os_ind < 0) {
|
||||
return EINVAL;
|
||||
}
|
||||
|
||||
ppd = mcctrl_get_per_proc_data(usrdata, task_tgid_vnr(current));
|
||||
|
||||
if (!ppd) {
|
||||
ppd = kmalloc(sizeof(*ppd), GFP_KERNEL);
|
||||
if (!ppd) {
|
||||
printk("ERROR: allocating per process data\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ppd->pid = task_tgid_vnr(current);
|
||||
/*
|
||||
* XXX: rpgtable will be updated in __do_in_kernel_syscall()
|
||||
* under case __NR_munmap
|
||||
*/
|
||||
INIT_LIST_HEAD(&ppd->wq_list);
|
||||
INIT_LIST_HEAD(&ppd->wq_req_list);
|
||||
INIT_LIST_HEAD(&ppd->wq_list_exact);
|
||||
spin_lock_init(&ppd->wq_list_lock);
|
||||
|
||||
for (i = 0; i < MCCTRL_PER_THREAD_DATA_HASH_SIZE; ++i) {
|
||||
INIT_LIST_HEAD(&ppd->per_thread_data_hash[i]);
|
||||
rwlock_init(&ppd->per_thread_data_hash_lock[i]);
|
||||
}
|
||||
|
||||
if (mcctrl_add_per_proc_data(usrdata, ppd->pid, ppd) < 0) {
|
||||
printk("%s: error adding per process data\n", __FUNCTION__);
|
||||
retval = EINVAL;
|
||||
goto out_free_ppd;
|
||||
}
|
||||
}
|
||||
else {
|
||||
/* Only deallocate in case of an error if we added it above */
|
||||
ppd = NULL;
|
||||
}
|
||||
|
||||
pathbuf = kmalloc(PATH_MAX, GFP_TEMPORARY);
|
||||
if (!pathbuf) {
|
||||
return ENOMEM;
|
||||
retval = ENOMEM;
|
||||
goto out_error_drop_ppd;
|
||||
}
|
||||
|
||||
file = open_exec(filename);
|
||||
@@ -901,7 +982,7 @@ int mcexec_open_exec(ihk_os_t os, char * __user filename)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Add new exec file to the list */
|
||||
mcef->os = os;
|
||||
mcef->pid = task_tgid_vnr(current);
|
||||
@@ -918,12 +999,15 @@ int mcexec_open_exec(ihk_os_t os, char * __user filename)
|
||||
kfree(pathbuf);
|
||||
|
||||
return 0;
|
||||
|
||||
|
||||
out_put_file:
|
||||
fput(file);
|
||||
|
||||
out_error_free:
|
||||
kfree(pathbuf);
|
||||
out_error_drop_ppd:
|
||||
if (ppd) mcctrl_delete_per_proc_data(usrdata, ppd->pid);
|
||||
out_free_ppd:
|
||||
if (ppd) kfree(ppd);
|
||||
return -retval;
|
||||
}
|
||||
|
||||
@@ -933,6 +1017,23 @@ int mcexec_close_exec(ihk_os_t os)
|
||||
struct mckernel_exec_file *mcef = NULL;
|
||||
int found = 0;
|
||||
int os_ind = ihk_host_os_get_index(os);
|
||||
struct mcctrl_usrdata *usrdata = ihk_host_os_get_usrdata(os);
|
||||
struct mcctrl_per_proc_data *ppd = NULL;
|
||||
|
||||
ppd = mcctrl_get_per_proc_data(usrdata, task_tgid_vnr(current));
|
||||
|
||||
if (ppd) {
|
||||
mcctrl_delete_per_proc_data(usrdata, ppd->pid);
|
||||
|
||||
dprintk("pid: %d, tid: %d: rpgtable for %d (0x%lx) removed\n",
|
||||
task_tgid_vnr(current), current->pid, ppd->pid, ppd->rpgtable);
|
||||
|
||||
kfree(ppd);
|
||||
}
|
||||
else {
|
||||
printk("WARNING: no per process data for pid %d ?\n",
|
||||
task_tgid_vnr(current));
|
||||
}
|
||||
|
||||
if (os_ind < 0) {
|
||||
return EINVAL;
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
#include <linux/miscdevice.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include "mcctrl.h"
|
||||
#ifdef ATTACHED_MIC
|
||||
#include <sysdeps/mic/mic/micconst.h>
|
||||
@@ -40,16 +41,18 @@
|
||||
|
||||
void mcexec_prepare_ack(ihk_os_t os, unsigned long arg, int err);
|
||||
static void mcctrl_ikc_init(ihk_os_t os, int cpu, unsigned long rphys, struct ihk_ikc_channel_desc *c);
|
||||
int mcexec_syscall(struct mcctrl_channel *c, int pid, unsigned long arg);
|
||||
int mcexec_syscall(struct mcctrl_usrdata *ud, struct ikc_scd_packet *packet);
|
||||
void sig_done(unsigned long arg, int err);
|
||||
|
||||
/* XXX: this runs in atomic context! */
|
||||
static int syscall_packet_handler(struct ihk_ikc_channel_desc *c,
|
||||
void *__packet, void *__os)
|
||||
{
|
||||
struct ikc_scd_packet *pisp = __packet;
|
||||
struct mcctrl_usrdata *usrdata = ihk_host_os_get_usrdata(__os);
|
||||
int msg = pisp->msg;
|
||||
|
||||
switch (pisp->msg) {
|
||||
switch (msg) {
|
||||
case SCD_MSG_INIT_CHANNEL:
|
||||
mcctrl_ikc_init(__os, pisp->ref, pisp->arg, c);
|
||||
break;
|
||||
@@ -63,7 +66,7 @@ static int syscall_packet_handler(struct ihk_ikc_channel_desc *c,
|
||||
break;
|
||||
|
||||
case SCD_MSG_SYSCALL_ONESIDE:
|
||||
mcexec_syscall(usrdata->channels + pisp->ref, pisp->pid, pisp->arg);
|
||||
mcexec_syscall(usrdata, pisp);
|
||||
break;
|
||||
|
||||
case SCD_MSG_PROCFS_ANSWER:
|
||||
@@ -88,11 +91,8 @@ static int syscall_packet_handler(struct ihk_ikc_channel_desc *c,
|
||||
break;
|
||||
|
||||
case SCD_MSG_PROCFS_TID_CREATE:
|
||||
add_tid_entry(ihk_host_os_get_index(__os), pisp->pid, pisp->arg);
|
||||
break;
|
||||
|
||||
case SCD_MSG_PROCFS_TID_DELETE:
|
||||
delete_tid_entry(ihk_host_os_get_index(__os), pisp->pid, pisp->arg);
|
||||
procfsm_packet_handler(__os, pisp->msg, pisp->pid, pisp->arg);
|
||||
break;
|
||||
|
||||
case SCD_MSG_GET_VDSO_INFO:
|
||||
@@ -110,6 +110,14 @@ static int syscall_packet_handler(struct ihk_ikc_channel_desc *c,
|
||||
pisp->err, pisp->arg);
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* SCD_MSG_SYSCALL_ONESIDE holds the packet and frees is it
|
||||
* mcexec_ret_syscall(), for the rest, free it here.
|
||||
*/
|
||||
if (msg != SCD_MSG_SYSCALL_ONESIDE) {
|
||||
ihk_ikc_release_packet((struct ihk_ikc_free_packet *)__packet, c);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -146,8 +154,6 @@ int mcctrl_ikc_set_recv_cpu(ihk_os_t os, int cpu)
|
||||
|
||||
ihk_ikc_channel_set_cpu(usrdata->channels[cpu].c,
|
||||
ihk_ikc_get_processor_id());
|
||||
kprintf("Setting the target to %d\n",
|
||||
ihk_ikc_get_processor_id());
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -193,12 +199,13 @@ static void mcctrl_ikc_init(ihk_os_t os, int cpu, unsigned long rphys, struct ih
|
||||
#endif
|
||||
|
||||
pmc->param.request_va =
|
||||
(void *)__get_free_pages(GFP_KERNEL,
|
||||
(void *)__get_free_pages(in_interrupt() ? GFP_ATOMIC : GFP_KERNEL,
|
||||
REQUEST_SHIFT - PAGE_SHIFT);
|
||||
pmc->param.request_pa = virt_to_phys(pmc->param.request_va);
|
||||
pmc->param.doorbell_va = usrdata->mcctrl_doorbell_va;
|
||||
pmc->param.doorbell_pa = usrdata->mcctrl_doorbell_pa;
|
||||
pmc->param.post_va = (void *)__get_free_page(GFP_KERNEL);
|
||||
pmc->param.post_va = (void *)__get_free_page(in_interrupt() ?
|
||||
GFP_ATOMIC : GFP_KERNEL);
|
||||
pmc->param.post_pa = virt_to_phys(pmc->param.post_va);
|
||||
memset(pmc->param.doorbell_va, 0, PAGE_SIZE);
|
||||
memset(pmc->param.request_va, 0, PAGE_SIZE);
|
||||
@@ -218,8 +225,9 @@ static void mcctrl_ikc_init(ihk_os_t os, int cpu, unsigned long rphys, struct ih
|
||||
PAGE_SIZE, NULL, 0);
|
||||
#endif
|
||||
|
||||
pmc->dma_buf = (void *)__get_free_pages(GFP_KERNEL,
|
||||
DMA_PIN_SHIFT - PAGE_SHIFT);
|
||||
pmc->dma_buf = (void *)__get_free_pages(in_interrupt() ?
|
||||
GFP_ATOMIC : GFP_KERNEL,
|
||||
DMA_PIN_SHIFT - PAGE_SHIFT);
|
||||
|
||||
rpm->request_page = pmc->param.request_pa;
|
||||
rpm->doorbell_page = pmc->param.doorbell_pa;
|
||||
@@ -265,9 +273,6 @@ static int connect_handler(struct ihk_ikc_channel_info *param)
|
||||
}
|
||||
param->packet_handler = syscall_packet_handler;
|
||||
|
||||
INIT_LIST_HEAD(&usrdata->channels[cpu].wq_list);
|
||||
spin_lock_init(&usrdata->channels[cpu].wq_list_lock);
|
||||
|
||||
usrdata->channels[cpu].c = c;
|
||||
kprintf("syscall: MC CPU %d connected. c=%p\n", cpu, c);
|
||||
|
||||
@@ -286,9 +291,6 @@ static int connect_handler2(struct ihk_ikc_channel_info *param)
|
||||
|
||||
param->packet_handler = syscall_packet_handler;
|
||||
|
||||
INIT_LIST_HEAD(&usrdata->channels[cpu].wq_list);
|
||||
spin_lock_init(&usrdata->channels[cpu].wq_list_lock);
|
||||
|
||||
usrdata->channels[cpu].c = c;
|
||||
kprintf("syscall: MC CPU %d connected. c=%p\n", cpu, c);
|
||||
|
||||
@@ -315,7 +317,7 @@ int prepare_ikc_channels(ihk_os_t os)
|
||||
{
|
||||
struct ihk_cpu_info *info;
|
||||
struct mcctrl_usrdata *usrdata;
|
||||
int error;
|
||||
int i;
|
||||
|
||||
usrdata = kzalloc(sizeof(struct mcctrl_usrdata), GFP_KERNEL);
|
||||
usrdata->mcctrl_doorbell_va = (void *)__get_free_page(GFP_KERNEL);
|
||||
@@ -347,17 +349,14 @@ int prepare_ikc_channels(ihk_os_t os)
|
||||
memcpy(&usrdata->listen_param2, &listen_param2, sizeof listen_param2);
|
||||
ihk_ikc_listen_port(os, &usrdata->listen_param2);
|
||||
|
||||
INIT_LIST_HEAD(&usrdata->per_proc_list);
|
||||
spin_lock_init(&usrdata->per_proc_list_lock);
|
||||
for (i = 0; i < MCCTRL_PER_PROC_DATA_HASH_SIZE; ++i) {
|
||||
INIT_LIST_HEAD(&usrdata->per_proc_data_hash[i]);
|
||||
rwlock_init(&usrdata->per_proc_data_hash_lock[i]);
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&usrdata->cpu_topology_list);
|
||||
INIT_LIST_HEAD(&usrdata->node_topology_list);
|
||||
|
||||
error = init_peer_channel_registry(usrdata);
|
||||
if (error) {
|
||||
return error;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -396,7 +395,6 @@ void destroy_ikc_channels(ihk_os_t os)
|
||||
}
|
||||
free_page((unsigned long)usrdata->mcctrl_doorbell_va);
|
||||
|
||||
destroy_peer_channel_registry(usrdata);
|
||||
kfree(usrdata->channels);
|
||||
kfree(usrdata);
|
||||
}
|
||||
|
||||
@@ -41,6 +41,7 @@
|
||||
#include <ikc/master.h>
|
||||
#include <ihk/msr.h>
|
||||
#include <linux/semaphore.h>
|
||||
#include <linux/rwlock.h>
|
||||
#include <linux/threads.h>
|
||||
#include "sysfs.h"
|
||||
|
||||
@@ -48,6 +49,7 @@
|
||||
#define SCD_MSG_PREPARE_PROCESS_ACKED 0x2
|
||||
#define SCD_MSG_PREPARE_PROCESS_NACKED 0x7
|
||||
#define SCD_MSG_SCHEDULE_PROCESS 0x3
|
||||
#define SCD_MSG_WAKE_UP_SYSCALL_THREAD 0x14
|
||||
|
||||
#define SCD_MSG_INIT_CHANNEL 0x5
|
||||
#define SCD_MSG_INIT_CHANNEL_ACKED 0x6
|
||||
@@ -110,8 +112,9 @@ struct ikc_scd_packet {
|
||||
int ref;
|
||||
int osnum;
|
||||
int pid;
|
||||
int padding;
|
||||
unsigned long arg;
|
||||
struct syscall_request req;
|
||||
unsigned long resp_pa;
|
||||
};
|
||||
|
||||
/* for SCD_MSG_SYSFS_* */
|
||||
@@ -120,7 +123,13 @@ struct ikc_scd_packet {
|
||||
long sysfs_arg2;
|
||||
long sysfs_arg3;
|
||||
};
|
||||
|
||||
/* SCD_MSG_SCHEDULE_THREAD */
|
||||
struct {
|
||||
int ttid;
|
||||
};
|
||||
};
|
||||
char padding[12];
|
||||
};
|
||||
|
||||
struct mcctrl_priv {
|
||||
@@ -154,8 +163,11 @@ struct syscall_params {
|
||||
struct wait_queue_head_list_node {
|
||||
struct list_head list;
|
||||
wait_queue_head_t wq_syscall;
|
||||
int pid;
|
||||
struct task_struct *task;
|
||||
/* Denotes an exclusive wait for requester TID rtid */
|
||||
int rtid;
|
||||
int req;
|
||||
struct ikc_scd_packet *packet;
|
||||
};
|
||||
|
||||
struct mcctrl_channel {
|
||||
@@ -163,15 +175,30 @@ struct mcctrl_channel {
|
||||
struct syscall_params param;
|
||||
struct ikc_scd_init_param init;
|
||||
void *dma_buf;
|
||||
|
||||
struct list_head wq_list;
|
||||
ihk_spinlock_t wq_list_lock;
|
||||
};
|
||||
|
||||
struct mcctrl_per_thread_data {
|
||||
struct list_head hash;
|
||||
struct task_struct *task;
|
||||
void *data;
|
||||
};
|
||||
|
||||
#define MCCTRL_PER_THREAD_DATA_HASH_SHIFT 8
|
||||
#define MCCTRL_PER_THREAD_DATA_HASH_SIZE (1 << MCCTRL_PER_THREAD_DATA_HASH_SHIFT)
|
||||
#define MCCTRL_PER_THREAD_DATA_HASH_MASK (MCCTRL_PER_THREAD_DATA_HASH_SIZE - 1)
|
||||
|
||||
struct mcctrl_per_proc_data {
|
||||
struct list_head list;
|
||||
struct list_head hash;
|
||||
int pid;
|
||||
unsigned long rpgtable; /* per process, not per OS */
|
||||
|
||||
struct list_head wq_list;
|
||||
struct list_head wq_req_list;
|
||||
struct list_head wq_list_exact;
|
||||
ihk_spinlock_t wq_list_lock;
|
||||
|
||||
struct list_head per_thread_data_hash[MCCTRL_PER_THREAD_DATA_HASH_SIZE];
|
||||
rwlock_t per_thread_data_hash_lock[MCCTRL_PER_THREAD_DATA_HASH_SIZE];
|
||||
};
|
||||
|
||||
struct sysfsm_req {
|
||||
@@ -230,6 +257,10 @@ struct node_topology {
|
||||
|
||||
#define CPU_LONGS (((NR_CPUS) + (BITS_PER_LONG) - 1) / (BITS_PER_LONG))
|
||||
|
||||
#define MCCTRL_PER_PROC_DATA_HASH_SHIFT 7
|
||||
#define MCCTRL_PER_PROC_DATA_HASH_SIZE (1 << MCCTRL_PER_PROC_DATA_HASH_SHIFT)
|
||||
#define MCCTRL_PER_PROC_DATA_HASH_MASK (MCCTRL_PER_PROC_DATA_HASH_SIZE - 1)
|
||||
|
||||
struct mcctrl_usrdata {
|
||||
struct ihk_ikc_listen_param listen_param;
|
||||
struct ihk_ikc_listen_param listen_param2;
|
||||
@@ -245,8 +276,9 @@ struct mcctrl_usrdata {
|
||||
unsigned long last_thread_exec;
|
||||
wait_queue_head_t wq_prepare;
|
||||
|
||||
struct list_head per_proc_list;
|
||||
ihk_spinlock_t per_proc_list_lock;
|
||||
struct list_head per_proc_data_hash[MCCTRL_PER_PROC_DATA_HASH_SIZE];
|
||||
rwlock_t per_proc_data_hash_lock[MCCTRL_PER_PROC_DATA_HASH_SIZE];
|
||||
|
||||
void **keys;
|
||||
struct sysfsm_data sysfsm_data;
|
||||
unsigned long cpu_online[CPU_LONGS];
|
||||
@@ -273,12 +305,22 @@ int mcctrl_ikc_is_valid_thread(ihk_os_t os, int cpu);
|
||||
ihk_os_t osnum_to_os(int n);
|
||||
|
||||
/* syscall.c */
|
||||
int init_peer_channel_registry(struct mcctrl_usrdata *ud);
|
||||
void destroy_peer_channel_registry(struct mcctrl_usrdata *ud);
|
||||
int register_peer_channel(struct mcctrl_usrdata *ud, void *key, struct mcctrl_channel *ch);
|
||||
int deregister_peer_channel(struct mcctrl_usrdata *ud, void *key, struct mcctrl_channel *ch);
|
||||
struct mcctrl_channel *get_peer_channel(struct mcctrl_usrdata *ud, void *key);
|
||||
int __do_in_kernel_syscall(ihk_os_t os, struct mcctrl_channel *c, struct syscall_request *sc);
|
||||
int __do_in_kernel_syscall(ihk_os_t os, struct ikc_scd_packet *packet);
|
||||
int mcctrl_add_per_proc_data(struct mcctrl_usrdata *ud, int pid,
|
||||
struct mcctrl_per_proc_data *ppd);
|
||||
int mcctrl_delete_per_proc_data(struct mcctrl_usrdata *ud, int pid);
|
||||
inline struct mcctrl_per_proc_data *mcctrl_get_per_proc_data(
|
||||
struct mcctrl_usrdata *ud, int pid);
|
||||
|
||||
int mcctrl_add_per_thread_data(struct mcctrl_per_proc_data* ppd,
|
||||
struct task_struct *task, void *data);
|
||||
int mcctrl_delete_per_thread_data(struct mcctrl_per_proc_data* ppd,
|
||||
struct task_struct *task);
|
||||
inline struct mcctrl_per_thread_data *mcctrl_get_per_thread_data(
|
||||
struct mcctrl_per_proc_data *ppd, struct task_struct *task);
|
||||
|
||||
void __return_syscall(ihk_os_t os, struct ikc_scd_packet *packet,
|
||||
long ret, int stid);
|
||||
|
||||
#define PROCFS_NAME_MAX 1000
|
||||
|
||||
@@ -301,6 +343,7 @@ struct procfs_file {
|
||||
};
|
||||
|
||||
void procfs_answer(unsigned int arg, int err);
|
||||
int procfsm_packet_handler(void *os, int msg, int pid, unsigned long arg);
|
||||
void add_tid_entry(int osnum, int pid, int tid);
|
||||
void add_pid_entry(int osnum, int pid);
|
||||
void delete_tid_entry(int osnum, int pid, int tid);
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/resource.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include "mcctrl.h"
|
||||
#include <linux/version.h>
|
||||
#include <linux/semaphore.h>
|
||||
@@ -713,6 +714,57 @@ mckernel_procfs_lseek(struct file *file, loff_t offset, int orig)
|
||||
return file->f_pos;
|
||||
}
|
||||
|
||||
struct procfs_work {
|
||||
void *os;
|
||||
int msg;
|
||||
int pid;
|
||||
unsigned long arg;
|
||||
struct work_struct work;
|
||||
};
|
||||
|
||||
static void procfsm_work_main(struct work_struct *work0)
|
||||
{
|
||||
struct procfs_work *work = container_of(work0, struct procfs_work, work);
|
||||
|
||||
switch (work->msg) {
|
||||
case SCD_MSG_PROCFS_TID_CREATE:
|
||||
add_tid_entry(ihk_host_os_get_index(work->os), work->pid, work->arg);
|
||||
break;
|
||||
|
||||
case SCD_MSG_PROCFS_TID_DELETE:
|
||||
delete_tid_entry(ihk_host_os_get_index(work->os), work->pid, work->arg);
|
||||
break;
|
||||
|
||||
default:
|
||||
printk("%s: unknown work: msg: %d, pid: %d, arg: %lu)\n",
|
||||
__FUNCTION__, work->msg, work->pid, work->arg);
|
||||
break;
|
||||
}
|
||||
|
||||
kfree(work);
|
||||
return;
|
||||
}
|
||||
|
||||
int procfsm_packet_handler(void *os, int msg, int pid, unsigned long arg)
|
||||
{
|
||||
struct procfs_work *work = NULL;
|
||||
|
||||
work = kzalloc(sizeof(*work), GFP_ATOMIC);
|
||||
if (!work) {
|
||||
printk("%s: kzalloc failed\n", __FUNCTION__);
|
||||
return -1;
|
||||
}
|
||||
|
||||
work->os = os;
|
||||
work->msg = msg;
|
||||
work->pid = pid;
|
||||
work->arg = arg;
|
||||
INIT_WORK(&work->work, &procfsm_work_main);
|
||||
|
||||
schedule_work(&work->work);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct file_operations mckernel_forward_ro = {
|
||||
.llseek = mckernel_procfs_lseek,
|
||||
.read = mckernel_procfs_read,
|
||||
|
||||
@@ -40,6 +40,7 @@
|
||||
#include <linux/cred.h>
|
||||
#include <linux/capability.h>
|
||||
#include <linux/semaphore.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/mount.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/delay.h>
|
||||
@@ -84,88 +85,96 @@ static void print_dma_lastreq(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
int init_peer_channel_registry(struct mcctrl_usrdata *ud)
|
||||
int mcctrl_add_per_thread_data(struct mcctrl_per_proc_data* ppd,
|
||||
struct task_struct *task, void *data)
|
||||
{
|
||||
ud->keys = kzalloc(sizeof(void *) * ud->num_channels, GFP_KERNEL);
|
||||
if (!ud->keys) {
|
||||
printk("Error: cannot allocate usrdata.keys[].\n");
|
||||
return -ENOMEM;
|
||||
struct mcctrl_per_thread_data *ptd_iter, *ptd = NULL;
|
||||
struct mcctrl_per_thread_data *ptd_alloc = NULL;
|
||||
int hash = (((uint64_t)task >> 4) & MCCTRL_PER_THREAD_DATA_HASH_MASK);
|
||||
int ret = 0;
|
||||
unsigned long flags;
|
||||
|
||||
ptd_alloc = kmalloc(sizeof(*ptd), GFP_ATOMIC);
|
||||
if (!ptd_alloc) {
|
||||
kprintf("%s: error allocate per thread data\n", __FUNCTION__);
|
||||
ret = -ENOMEM;
|
||||
goto out_noalloc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void destroy_peer_channel_registry(struct mcctrl_usrdata *ud)
|
||||
{
|
||||
kfree(ud->keys);
|
||||
ud->keys = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
int register_peer_channel(struct mcctrl_usrdata *ud, void *key, struct mcctrl_channel *ch)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
cpu = ch - ud->channels;
|
||||
if ((cpu < 0) || (ud->num_channels <= cpu)) {
|
||||
printk("register_peer_channel(%p,%p,%p):"
|
||||
"not a syscall channel. cpu=%d\n",
|
||||
ud, key, ch, cpu);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ud->keys[cpu] != NULL) {
|
||||
printk("register_peer_channel(%p,%p,%p):"
|
||||
"already registered. cpu=%d\n",
|
||||
ud, key, ch, cpu);
|
||||
/*
|
||||
* When mcexec receives a signal,
|
||||
* it may be finished without doing deregister_peer_channel().
|
||||
* Therefore a substitute registration is necessary.
|
||||
*/
|
||||
#if 0
|
||||
return -EBUSY;
|
||||
#endif
|
||||
}
|
||||
|
||||
ud->keys[cpu] = key;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int deregister_peer_channel(struct mcctrl_usrdata *ud, void *key, struct mcctrl_channel *ch)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
cpu = ch - ud->channels;
|
||||
if ((cpu < 0) || (ud->num_channels <= cpu)) {
|
||||
printk("deregister_peer_channel(%p,%p,%p):"
|
||||
"not a syscall channel. cpu=%d\n",
|
||||
ud, key, ch, cpu);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ud->keys[cpu] && (ud->keys[cpu] != key)) {
|
||||
printk("deregister_peer_channel(%p,%p,%p):"
|
||||
"not registered. cpu=%d\n",
|
||||
ud, key, ch, cpu);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
ud->keys[cpu] = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct mcctrl_channel *get_peer_channel(struct mcctrl_usrdata *ud, void *key)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for (cpu = 0; cpu < ud->num_channels; ++cpu) {
|
||||
if (ud->keys[cpu] == key) {
|
||||
return &ud->channels[cpu];
|
||||
/* Check if data for this thread exists and add if not */
|
||||
write_lock_irqsave(&ppd->per_thread_data_hash_lock[hash], flags);
|
||||
list_for_each_entry(ptd_iter, &ppd->per_thread_data_hash[hash], hash) {
|
||||
if (ptd_iter->task == task) {
|
||||
ptd = ptd_iter;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
if (unlikely(ptd)) {
|
||||
ret = -EBUSY;
|
||||
kfree(ptd_alloc);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ptd = ptd_alloc;
|
||||
ptd->task = task;
|
||||
ptd->data = data;
|
||||
list_add_tail(&ptd->hash, &ppd->per_thread_data_hash[hash]);
|
||||
|
||||
out:
|
||||
write_unlock_irqrestore(&ppd->per_thread_data_hash_lock[hash], flags);
|
||||
out_noalloc:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int mcctrl_delete_per_thread_data(struct mcctrl_per_proc_data* ppd,
|
||||
struct task_struct *task)
|
||||
{
|
||||
struct mcctrl_per_thread_data *ptd_iter, *ptd = NULL;
|
||||
int hash = (((uint64_t)task >> 4) & MCCTRL_PER_THREAD_DATA_HASH_MASK);
|
||||
int ret = 0;
|
||||
unsigned long flags;
|
||||
|
||||
/* Check if data for this thread exists and delete it */
|
||||
write_lock_irqsave(&ppd->per_thread_data_hash_lock[hash], flags);
|
||||
list_for_each_entry(ptd_iter, &ppd->per_thread_data_hash[hash], hash) {
|
||||
if (ptd_iter->task == task) {
|
||||
ptd = ptd_iter;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!ptd) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_del(&ptd->hash);
|
||||
kfree(ptd);
|
||||
|
||||
out:
|
||||
write_unlock_irqrestore(&ppd->per_thread_data_hash_lock[hash], flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct mcctrl_per_thread_data *mcctrl_get_per_thread_data(struct mcctrl_per_proc_data *ppd, struct task_struct *task)
|
||||
{
|
||||
struct mcctrl_per_thread_data *ptd_iter, *ptd = NULL;
|
||||
int hash = (((uint64_t)task >> 4) & MCCTRL_PER_THREAD_DATA_HASH_MASK);
|
||||
unsigned long flags;
|
||||
|
||||
/* Check if data for this thread exists and return it */
|
||||
read_lock_irqsave(&ppd->per_thread_data_hash_lock[hash], flags);
|
||||
|
||||
list_for_each_entry(ptd_iter, &ppd->per_thread_data_hash[hash], hash) {
|
||||
if (ptd_iter->task == task) {
|
||||
ptd = ptd_iter;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
read_unlock_irqrestore(&ppd->per_thread_data_hash_lock[hash], flags);
|
||||
return ptd ? ptd->data : NULL;
|
||||
}
|
||||
|
||||
#if 1 /* x86 depend, host OS side */
|
||||
@@ -232,80 +241,156 @@ out:
|
||||
}
|
||||
#endif
|
||||
|
||||
static int __notify_syscall_requester(ihk_os_t os, struct ikc_scd_packet *packet,
|
||||
struct syscall_response *res)
|
||||
{
|
||||
struct mcctrl_usrdata *usrdata = ihk_host_os_get_usrdata(os);
|
||||
struct ihk_ikc_channel_desc *c = (usrdata->channels + packet->ref)->c;
|
||||
struct ikc_scd_packet r_packet;
|
||||
int ret = 0;
|
||||
|
||||
/* If spinning, no need for IKC message */
|
||||
if (__sync_bool_compare_and_swap(&res->req_thread_status,
|
||||
IHK_SCD_REQ_THREAD_SPINNING,
|
||||
IHK_SCD_REQ_THREAD_TO_BE_WOKEN)) {
|
||||
dprintk("%s: no need to send IKC message for PID %d\n",
|
||||
__FUNCTION__, packet->pid);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* The thread is not spinning any more, make sure it's descheduled */
|
||||
if (!__sync_bool_compare_and_swap(&res->req_thread_status,
|
||||
IHK_SCD_REQ_THREAD_DESCHEDULED,
|
||||
IHK_SCD_REQ_THREAD_TO_BE_WOKEN)) {
|
||||
printk("%s: WARNING: inconsistent requester status, "
|
||||
"pid: %d, req status: %lu, syscall nr: %lu\n",
|
||||
__FUNCTION__, packet->pid,
|
||||
res->req_thread_status, packet->req.number);
|
||||
dump_stack();
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
r_packet.msg = SCD_MSG_WAKE_UP_SYSCALL_THREAD;
|
||||
r_packet.ttid = packet->req.rtid;
|
||||
ret = ihk_ikc_send(c, &r_packet, 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int remote_page_fault(struct mcctrl_usrdata *usrdata, void *fault_addr, uint64_t reason)
|
||||
{
|
||||
struct mcctrl_channel *channel;
|
||||
struct ikc_scd_packet *packet;
|
||||
struct syscall_request *req;
|
||||
struct syscall_response *resp;
|
||||
int error;
|
||||
struct wait_queue_head_list_node *wqhln;
|
||||
unsigned long irqflags;
|
||||
struct mcctrl_per_proc_data *ppd;
|
||||
unsigned long phys;
|
||||
|
||||
dprintk("remote_page_fault(%p,%p,%llx)\n", usrdata, fault_addr, reason);
|
||||
dprintk("%s: tid: %d, fault_addr: %lu, reason: %lu\n",
|
||||
__FUNCTION__, task_pid_vnr(current), fault_addr, reason);
|
||||
|
||||
/* Look up per-process structure */
|
||||
ppd = mcctrl_get_per_proc_data(usrdata, task_tgid_vnr(current));
|
||||
|
||||
channel = get_peer_channel(usrdata, current);
|
||||
if (!channel) {
|
||||
error = -ENOENT;
|
||||
printk("remote_page_fault(%p,%p,%llx):channel not found. %d\n",
|
||||
usrdata, fault_addr, reason, error);
|
||||
goto out;
|
||||
if (!ppd) {
|
||||
kprintf("%s: ERROR: no per-process structure for PID %d??\n",
|
||||
__FUNCTION__, task_tgid_vnr(current));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
req = channel->param.request_va;
|
||||
resp = channel->param.response_va;
|
||||
packet = (struct ikc_scd_packet *)mcctrl_get_per_thread_data(ppd, current);
|
||||
if (!packet) {
|
||||
error = -ENOENT;
|
||||
printk("%s: no packet registered for TID %d\n",
|
||||
__FUNCTION__, task_pid_vnr(current));
|
||||
goto out_no_unmap;
|
||||
}
|
||||
|
||||
/* request page fault */
|
||||
req = &packet->req;
|
||||
|
||||
/* Map response structure */
|
||||
phys = ihk_device_map_memory(ihk_os_to_dev(usrdata->os),
|
||||
packet->resp_pa, sizeof(*resp));
|
||||
resp = ihk_device_map_virtual(ihk_os_to_dev(usrdata->os),
|
||||
phys, sizeof(*resp), NULL, 0);
|
||||
|
||||
retry_alloc:
|
||||
wqhln = kmalloc(sizeof(*wqhln), GFP_ATOMIC);
|
||||
if (!wqhln) {
|
||||
printk("WARNING: coudln't alloc wait queue head, retrying..\n");
|
||||
goto retry_alloc;
|
||||
}
|
||||
|
||||
/* Prepare per-thread wait queue head */
|
||||
wqhln->task = current;
|
||||
wqhln->req = 0;
|
||||
init_waitqueue_head(&wqhln->wq_syscall);
|
||||
|
||||
irqflags = ihk_ikc_spinlock_lock(&ppd->wq_list_lock);
|
||||
/* Add to exact list */
|
||||
list_add_tail(&wqhln->list, &ppd->wq_list_exact);
|
||||
ihk_ikc_spinlock_unlock(&ppd->wq_list_lock, irqflags);
|
||||
|
||||
/* Request page fault */
|
||||
resp->ret = -EFAULT;
|
||||
resp->fault_address = (unsigned long)fault_addr;
|
||||
resp->fault_reason = reason;
|
||||
resp->stid = task_pid_vnr(current);
|
||||
|
||||
#define STATUS_PAGER_COMPLETED 1
|
||||
#define STATUS_PAGE_FAULT 3
|
||||
req->valid = 0;
|
||||
|
||||
if (__notify_syscall_requester(usrdata->os, packet, resp) < 0) {
|
||||
printk("%s: WARNING: failed to notify PID %d\n",
|
||||
__FUNCTION__, packet->pid);
|
||||
}
|
||||
|
||||
mb();
|
||||
resp->status = STATUS_PAGE_FAULT;
|
||||
|
||||
for (;;) {
|
||||
struct wait_queue_head_list_node *wqhln;
|
||||
struct wait_queue_head_list_node *wqhln_iter;
|
||||
unsigned long irqflags;
|
||||
|
||||
retry_alloc:
|
||||
wqhln = kmalloc(sizeof(*wqhln), GFP_KERNEL);
|
||||
if (!wqhln) {
|
||||
printk("WARNING: coudln't alloc wait queue head, retrying..\n");
|
||||
goto retry_alloc;
|
||||
}
|
||||
|
||||
/* Prepare per-process wait queue head */
|
||||
wqhln->pid = task_tgid_vnr(current);
|
||||
wqhln->req = 0;
|
||||
init_waitqueue_head(&wqhln->wq_syscall);
|
||||
|
||||
irqflags = ihk_ikc_spinlock_lock(&channel->wq_list_lock);
|
||||
/* First see if there is a wait queue already */
|
||||
list_for_each_entry(wqhln_iter, &channel->wq_list, list) {
|
||||
if (wqhln_iter->pid == task_tgid_vnr(current)) {
|
||||
kfree(wqhln);
|
||||
wqhln = wqhln_iter;
|
||||
list_del(&wqhln->list);
|
||||
break;
|
||||
}
|
||||
}
|
||||
list_add_tail(&wqhln->list, &channel->wq_list);
|
||||
ihk_ikc_spinlock_unlock(&channel->wq_list_lock, irqflags);
|
||||
|
||||
dprintk("%s: tid: %d, fault_addr: %p SLEEPING\n",
|
||||
__FUNCTION__, task_pid_vnr(current), fault_addr);
|
||||
/* wait for response */
|
||||
error = wait_event_interruptible(wqhln->wq_syscall, wqhln->req);
|
||||
|
||||
/* Remove per-process wait queue head */
|
||||
irqflags = ihk_ikc_spinlock_lock(&channel->wq_list_lock);
|
||||
|
||||
/* Remove per-thread wait queue head */
|
||||
irqflags = ihk_ikc_spinlock_lock(&ppd->wq_list_lock);
|
||||
list_del(&wqhln->list);
|
||||
ihk_ikc_spinlock_unlock(&channel->wq_list_lock, irqflags);
|
||||
kfree(wqhln);
|
||||
ihk_ikc_spinlock_unlock(&ppd->wq_list_lock, irqflags);
|
||||
|
||||
dprintk("%s: tid: %d, fault_addr: %p WOKEN UP\n",
|
||||
__FUNCTION__, task_pid_vnr(current), fault_addr);
|
||||
|
||||
if (error) {
|
||||
kfree(wqhln);
|
||||
printk("remote_page_fault:interrupted. %d\n", error);
|
||||
goto out;
|
||||
}
|
||||
else {
|
||||
/* Update packet reference */
|
||||
packet = wqhln->packet;
|
||||
req = &packet->req;
|
||||
{
|
||||
unsigned long phys2;
|
||||
struct syscall_response *resp2;
|
||||
phys2 = ihk_device_map_memory(ihk_os_to_dev(usrdata->os),
|
||||
packet->resp_pa, sizeof(*resp));
|
||||
resp2 = ihk_device_map_virtual(ihk_os_to_dev(usrdata->os),
|
||||
phys2, sizeof(*resp), NULL, 0);
|
||||
|
||||
if (resp != resp2) {
|
||||
resp = resp2;
|
||||
phys = phys2;
|
||||
printk("%s: updated new remote PA for resp\n", __FUNCTION__);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!req->valid) {
|
||||
printk("remote_page_fault:not valid\n");
|
||||
}
|
||||
@@ -321,23 +406,37 @@ retry_alloc:
|
||||
#define PAGER_REQ_RESUME 0x0101
|
||||
else if (req->args[0] != PAGER_REQ_RESUME) {
|
||||
resp->ret = pager_call(usrdata->os, (void *)req);
|
||||
|
||||
if (__notify_syscall_requester(usrdata->os, packet, resp) < 0) {
|
||||
printk("%s: WARNING: failed to notify PID %d\n",
|
||||
__FUNCTION__, packet->pid);
|
||||
}
|
||||
|
||||
mb();
|
||||
resp->status = STATUS_PAGER_COMPLETED;
|
||||
continue;
|
||||
break;
|
||||
//continue;
|
||||
}
|
||||
else {
|
||||
error = req->args[1];
|
||||
if (error) {
|
||||
printk("remote_page_fault:response %d\n", error);
|
||||
kfree(wqhln);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
kfree(wqhln);
|
||||
error = 0;
|
||||
out:
|
||||
dprintk("remote_page_fault(%p,%p,%llx): %d\n", usrdata, fault_addr, reason, error);
|
||||
ihk_device_unmap_virtual(ihk_os_to_dev(usrdata->os), resp, sizeof(*resp));
|
||||
ihk_device_unmap_memory(ihk_os_to_dev(usrdata->os), phys, sizeof(*resp));
|
||||
|
||||
out_no_unmap:
|
||||
dprintk("%s: tid: %d, fault_addr: %lu, reason: %lu, error: %d\n",
|
||||
__FUNCTION__, task_pid_vnr(current), fault_addr, reason, error);
|
||||
return error;
|
||||
}
|
||||
|
||||
@@ -389,8 +488,9 @@ static int rus_page_hash_insert(struct page *page)
|
||||
{
|
||||
int ret = 0;
|
||||
struct rus_page *rp;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&rus_page_hash_lock);
|
||||
spin_lock_irqsave(&rus_page_hash_lock, flags);
|
||||
|
||||
rp = _rus_page_hash_lookup(page);
|
||||
if (!rp) {
|
||||
@@ -417,7 +517,7 @@ static int rus_page_hash_insert(struct page *page)
|
||||
|
||||
|
||||
out:
|
||||
spin_unlock(&rus_page_hash_lock);
|
||||
spin_unlock_irqrestore(&rus_page_hash_lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -426,8 +526,9 @@ void rus_page_hash_put_pages(void)
|
||||
int i;
|
||||
struct rus_page *rp_iter;
|
||||
struct rus_page *rp_iter_next;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&rus_page_hash_lock);
|
||||
spin_lock_irqsave(&rus_page_hash_lock, flags);
|
||||
|
||||
for (i = 0; i < RUS_PAGE_HASH_SIZE; ++i) {
|
||||
|
||||
@@ -440,7 +541,7 @@ void rus_page_hash_put_pages(void)
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock(&rus_page_hash_lock);
|
||||
spin_unlock_irqrestore(&rus_page_hash_lock, flags);
|
||||
}
|
||||
|
||||
|
||||
@@ -472,27 +573,22 @@ static int rus_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
#if USE_VM_INSERT_PFN
|
||||
size_t pix;
|
||||
#endif
|
||||
struct mcctrl_per_proc_data *ppd, *ppd_iter;
|
||||
unsigned long flags;
|
||||
struct mcctrl_per_proc_data *ppd;
|
||||
|
||||
dprintk("mcctrl:page fault:flags %#x pgoff %#lx va %p page %p\n",
|
||||
vmf->flags, vmf->pgoff, vmf->virtual_address, vmf->page);
|
||||
|
||||
ppd = NULL;
|
||||
flags = ihk_ikc_spinlock_lock(&usrdata->per_proc_list_lock);
|
||||
|
||||
list_for_each_entry(ppd_iter, &usrdata->per_proc_list, list) {
|
||||
if (ppd_iter->pid == task_tgid_vnr(current) ||
|
||||
ppd_iter->pid == vma->vm_mm->owner->pid) {
|
||||
ppd = ppd_iter;
|
||||
break;
|
||||
}
|
||||
/* Look up per-process structure */
|
||||
ppd = mcctrl_get_per_proc_data(usrdata, task_tgid_vnr(current));
|
||||
if (!ppd) {
|
||||
ppd = mcctrl_get_per_proc_data(usrdata, vma->vm_mm->owner->pid);
|
||||
}
|
||||
ihk_ikc_spinlock_unlock(&usrdata->per_proc_list_lock, flags);
|
||||
|
||||
|
||||
if (!ppd) {
|
||||
printk("ERROR: no per process data for pid %d\n", task_tgid_vnr(current));
|
||||
return VM_FAULT_SIGBUS;
|
||||
kprintf("%s: ERROR: no per-process structure for PID %d??\n",
|
||||
__FUNCTION__, task_tgid_vnr(current));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (try = 1; ; ++try) {
|
||||
@@ -626,237 +722,6 @@ reserve_user_space_common(struct mcctrl_usrdata *usrdata, unsigned long start, u
|
||||
return start;
|
||||
}
|
||||
|
||||
//unsigned long last_thread_exec = 0;
|
||||
|
||||
#ifndef DO_USER_MODE
|
||||
static struct {
|
||||
long (*do_sys_open)(int, const char __user *, int, int);
|
||||
long (*sys_lseek)(unsigned int, off_t, unsigned int);
|
||||
long (*sys_read)(unsigned int, char __user *, size_t);
|
||||
long (*sys_write)(unsigned int, const char __user *, size_t);
|
||||
} syscalls;
|
||||
|
||||
void
|
||||
mcctrl_syscall_init(void)
|
||||
{
|
||||
printk("mcctrl_syscall_init\n");
|
||||
syscalls.do_sys_open = (void *)kallsyms_lookup_name("do_sys_open");
|
||||
syscalls.sys_lseek = (void *)kallsyms_lookup_name("sys_lseek");
|
||||
syscalls.sys_read = (void *)kallsyms_lookup_name("sys_read");
|
||||
syscalls.sys_write = (void *)kallsyms_lookup_name("sys_write");
|
||||
printk("syscalls.do_sys_open=%lx\n", (long)syscalls.do_sys_open);
|
||||
printk("syscalls.sys_lseek=%lx\n", (long)syscalls.sys_lseek);
|
||||
printk("syscalls.sys_read=%lx\n", (long)syscalls.sys_read);
|
||||
printk("syscalls.sys_write=%lx\n", (long)syscalls.sys_write);
|
||||
}
|
||||
|
||||
static int do_async_copy(ihk_os_t os, unsigned long dest, unsigned long src,
|
||||
unsigned long size, unsigned int inbound)
|
||||
{
|
||||
struct ihk_dma_request request;
|
||||
ihk_dma_channel_t channel;
|
||||
unsigned long asize = ALIGN_WAIT_BUF(size);
|
||||
|
||||
channel = ihk_device_get_dma_channel(ihk_os_to_dev(os), 0);
|
||||
if (!channel) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memset(&request, 0, sizeof(request));
|
||||
request.src_os = inbound ? os : NULL;
|
||||
request.src_phys = src;
|
||||
request.dest_os = inbound ? NULL : os;
|
||||
request.dest_phys = dest;
|
||||
request.size = size;
|
||||
request.notify = (void *)(inbound ? dest + asize : src + asize);
|
||||
request.priv = (void *)1;
|
||||
|
||||
*(unsigned long *)phys_to_virt((unsigned long)request.notify) = 0;
|
||||
#ifdef SC_DEBUG
|
||||
last_request = request;
|
||||
#endif
|
||||
|
||||
ihk_dma_request(channel, &request);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
//int mcctrl_dma_abort;
|
||||
|
||||
static void async_wait(ihk_os_t os, unsigned char *p, int size)
|
||||
{
|
||||
int asize = ALIGN_WAIT_BUF(size);
|
||||
unsigned long long s, w;
|
||||
struct mcctrl_usrdata *usrdata = ihk_host_os_get_usrdata(os);
|
||||
|
||||
rdtscll(s);
|
||||
while (!p[asize]) {
|
||||
mb();
|
||||
cpu_relax();
|
||||
rdtscll(w);
|
||||
if (w > s + 1024UL * 1024 * 1024 * 10) {
|
||||
printk("DMA Timed out : %p (%p + %d) => %d\n",
|
||||
p + asize, p, size, p[asize]);
|
||||
#ifdef SC_DEBUG
|
||||
print_dma_lastreq();
|
||||
#endif
|
||||
usrdata->mcctrl_dma_abort = 1;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void clear_wait(unsigned char *p, int size)
|
||||
{
|
||||
//int asize = ALIGN_WAIT_BUF(size);
|
||||
p[size] = 0;
|
||||
}
|
||||
|
||||
static unsigned long translate_remote_va(struct mcctrl_channel *c,
|
||||
unsigned long rva)
|
||||
{
|
||||
int i, n;
|
||||
struct syscall_post *p;
|
||||
|
||||
p = c->param.post_va;
|
||||
|
||||
n = (int)p->v[0];
|
||||
if (n < 0 || n >= PAGE_SIZE / sizeof(struct syscall_post)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
for (i = 0; i < n; i++) {
|
||||
if (p[i + 1].v[0] != 1) {
|
||||
continue;
|
||||
}
|
||||
if (rva >= p[i + 1].v[1] && rva < p[i + 1].v[2]) {
|
||||
return p[i + 1].v[3] + (rva - p[i + 1].v[1]);
|
||||
}
|
||||
}
|
||||
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
//extern struct mcctrl_channel *channels;
|
||||
|
||||
#if 0
|
||||
int __do_in_kernel_syscall(ihk_os_t os, struct mcctrl_channel *c,
|
||||
struct syscall_request *sc)
|
||||
{
|
||||
int ret;
|
||||
mm_segment_t fs;
|
||||
unsigned long pa;
|
||||
struct mcctrl_usrdata *usrdata = ihk_host_os_get_usrdata(os);
|
||||
|
||||
switch (sc->number) {
|
||||
case 0: /* read */
|
||||
case 1024:
|
||||
if (sc->number & 1024) {
|
||||
sc->args[1] = translate_remote_va(c, sc->args[1]);
|
||||
if ((long)sc->args[1] < 0) {
|
||||
__return_syscall(c, -EFAULT);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
clear_wait(c->dma_buf, sc->args[2]);
|
||||
fs = get_fs();
|
||||
set_fs(KERNEL_DS);
|
||||
ret = syscalls.sys_read(sc->args[0], c->dma_buf, sc->args[2]);
|
||||
if (ret > 0) {
|
||||
do_async_copy(os, sc->args[1], virt_to_phys(c->dma_buf),
|
||||
sc->args[2], 0);
|
||||
set_fs(fs);
|
||||
|
||||
async_wait(os, c->dma_buf, sc->args[2]);
|
||||
}
|
||||
__return_syscall(c, ret);
|
||||
return 0;
|
||||
|
||||
case 1: /* write */
|
||||
case 1025:
|
||||
if (sc->number & 1024) {
|
||||
sc->args[1] = translate_remote_va(c, sc->args[1]);
|
||||
if ((long)sc->args[1] < 0) {
|
||||
__return_syscall(c, -EFAULT);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
clear_wait(c->dma_buf, sc->args[2]);
|
||||
do_async_copy(os, virt_to_phys(c->dma_buf), sc->args[1],
|
||||
sc->args[2], 1);
|
||||
fs = get_fs();
|
||||
set_fs(KERNEL_DS);
|
||||
async_wait(os, c->dma_buf, sc->args[2]);
|
||||
|
||||
ret = syscalls.sys_write(sc->args[0], c->dma_buf, sc->args[2]);
|
||||
set_fs(fs);
|
||||
|
||||
__return_syscall(c, ret);
|
||||
return 0;
|
||||
|
||||
case 2: /* open */
|
||||
case 1026:
|
||||
if (sc->number & 1024) {
|
||||
sc->args[0] = translate_remote_va(c, sc->args[0]);
|
||||
if ((long)sc->args[0] < 0) {
|
||||
__return_syscall(c, -EFAULT);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
clear_wait(c->dma_buf, 256);
|
||||
do_async_copy(os, virt_to_phys(c->dma_buf), sc->args[0],
|
||||
256, 1);
|
||||
fs = get_fs();
|
||||
set_fs(KERNEL_DS);
|
||||
async_wait(os, c->dma_buf, 256);
|
||||
|
||||
ret = syscalls.do_sys_open(AT_FDCWD, c->dma_buf, sc->args[1],
|
||||
sc->args[2]);
|
||||
set_fs(fs);
|
||||
|
||||
__return_syscall(c, ret);
|
||||
return 0;
|
||||
|
||||
case 3: /* Close */
|
||||
ret = sys_close(sc->args[0]);
|
||||
__return_syscall(c, ret);
|
||||
return 0;
|
||||
|
||||
case 8: /* lseek */
|
||||
ret = syscalls.sys_lseek(sc->args[0], sc->args[1], sc->args[2]);
|
||||
__return_syscall(c, ret);
|
||||
return 0;
|
||||
|
||||
case 56: /* Clone */
|
||||
usrdata->last_thread_exec++;
|
||||
if (mcctrl_ikc_is_valid_thread(usrdata->last_thread_exec)) {
|
||||
printk("Clone notification: %lx\n", sc->args[0]);
|
||||
if (channels[usrdata->last_thread_exec].param.post_va) {
|
||||
memcpy(usrdata->channels[usrdata->last_thread_exec].param.post_va,
|
||||
c->param.post_va, PAGE_SIZE);
|
||||
}
|
||||
mcctrl_ikc_send_msg(usrdata->last_thread_exec,
|
||||
SCD_MSG_SCHEDULE_PROCESS,
|
||||
usrdata->last_thread_exec, sc->args[0]);
|
||||
}
|
||||
|
||||
__return_syscall(c, 0);
|
||||
return 0;
|
||||
|
||||
default:
|
||||
if (sc->number & 1024) {
|
||||
__return_syscall(c, -EFAULT);
|
||||
return 0;
|
||||
} else {
|
||||
return -ENOSYS;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#endif /* !DO_USER_MODE */
|
||||
|
||||
struct pager {
|
||||
struct list_head list;
|
||||
struct inode * inode;
|
||||
@@ -967,7 +832,7 @@ static int pager_req_create(ihk_os_t os, int fd, uintptr_t result_pa)
|
||||
|
||||
up(&pager_sem);
|
||||
|
||||
newpager = kzalloc(sizeof(*newpager), GFP_KERNEL);
|
||||
newpager = kzalloc(sizeof(*newpager), GFP_ATOMIC);
|
||||
if (!newpager) {
|
||||
error = -ENOMEM;
|
||||
printk("pager_req_create(%d,%lx):kzalloc failed. %d\n", fd, (long)result_pa, error);
|
||||
@@ -1223,7 +1088,7 @@ static int pager_req_map(ihk_os_t os, int fd, size_t len, off_t off,
|
||||
uintptr_t phys;
|
||||
|
||||
dprintk("pager_req_map(%p,%d,%lx,%lx,%lx)\n", os, fd, len, off, result_rpa);
|
||||
pager = kzalloc(sizeof(*pager), GFP_KERNEL);
|
||||
pager = kzalloc(sizeof(*pager), GFP_ATOMIC);
|
||||
if (!pager) {
|
||||
error = -ENOMEM;
|
||||
printk("pager_req_map(%p,%d,%lx,%lx,%lx):kzalloc failed. %d\n", os, fd, len, off, result_rpa, error);
|
||||
@@ -1475,11 +1340,31 @@ static long pager_call(ihk_os_t os, struct syscall_request *req)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __return_syscall(struct mcctrl_channel *c, int ret)
|
||||
void __return_syscall(ihk_os_t os, struct ikc_scd_packet *packet,
|
||||
long ret, int stid)
|
||||
{
|
||||
c->param.response_va->ret = ret;
|
||||
unsigned long phys;
|
||||
struct syscall_response *res;
|
||||
|
||||
phys = ihk_device_map_memory(ihk_os_to_dev(os),
|
||||
packet->resp_pa, sizeof(*res));
|
||||
res = ihk_device_map_virtual(ihk_os_to_dev(os),
|
||||
phys, sizeof(*res), NULL, 0);
|
||||
|
||||
/* Map response structure and notify offloading thread */
|
||||
res->ret = ret;
|
||||
res->stid = stid;
|
||||
|
||||
if (__notify_syscall_requester(os, packet, res) < 0) {
|
||||
printk("%s: WARNING: failed to notify PID %d\n",
|
||||
__FUNCTION__, packet->pid);
|
||||
}
|
||||
|
||||
mb();
|
||||
c->param.response_va->status = 1;
|
||||
res->status = 1;
|
||||
|
||||
ihk_device_unmap_virtual(ihk_os_to_dev(os), res, sizeof(*res));
|
||||
ihk_device_unmap_memory(ihk_os_to_dev(os), phys, sizeof(*res));
|
||||
}
|
||||
|
||||
static int remap_user_space(uintptr_t rva, size_t len, int prot)
|
||||
@@ -1668,13 +1553,14 @@ fail:
|
||||
#define SCHED_CHECK_SAME_OWNER 0x01
|
||||
#define SCHED_CHECK_ROOT 0x02
|
||||
|
||||
int __do_in_kernel_syscall(ihk_os_t os, struct mcctrl_channel *c, struct syscall_request *sc)
|
||||
int __do_in_kernel_syscall(ihk_os_t os, struct ikc_scd_packet *packet)
|
||||
{
|
||||
struct syscall_request *sc = &packet->req;
|
||||
int error;
|
||||
long ret = -1;
|
||||
struct mcctrl_usrdata *usrdata = ihk_host_os_get_usrdata(os);
|
||||
|
||||
dprintk("__do_in_kernel_syscall(%p,%p,%ld %lx)\n", os, c, sc->number, sc->args[0]);
|
||||
dprintk("%s: system call: %d\n", __FUNCTION__, sc->args[0]);
|
||||
switch (sc->number) {
|
||||
case __NR_mmap:
|
||||
ret = pager_call(os, sc);
|
||||
@@ -1683,25 +1569,19 @@ int __do_in_kernel_syscall(ihk_os_t os, struct mcctrl_channel *c, struct syscall
|
||||
case __NR_munmap:
|
||||
/* Set new remote page table if not zero */
|
||||
if (sc->args[2]) {
|
||||
unsigned long flags;
|
||||
struct mcctrl_per_proc_data *ppd = NULL;
|
||||
|
||||
ppd = kmalloc(sizeof(*ppd), GFP_ATOMIC);
|
||||
if (!ppd) {
|
||||
printk("ERROR: allocating per process data\n");
|
||||
error = -ENOMEM;
|
||||
goto out;
|
||||
ppd = mcctrl_get_per_proc_data(usrdata, sc->args[3]);
|
||||
if (unlikely(!ppd)) {
|
||||
kprintf("%s: ERROR: no per-process structure for PID %d??\n",
|
||||
__FUNCTION__, task_tgid_vnr(current));
|
||||
return -1;
|
||||
}
|
||||
|
||||
ppd->pid = task_tgid_vnr(current);
|
||||
ppd->rpgtable = sc->args[2];
|
||||
|
||||
flags = ihk_ikc_spinlock_lock(&usrdata->per_proc_list_lock);
|
||||
list_add_tail(&ppd->list, &usrdata->per_proc_list);
|
||||
ihk_ikc_spinlock_unlock(&usrdata->per_proc_list_lock, flags);
|
||||
|
||||
dprintk("pid: %d, rpgtable: 0x%lx added\n",
|
||||
ppd->pid, ppd->rpgtable);
|
||||
dprintk("%s: pid: %d, rpgtable: 0x%lx updated\n",
|
||||
__FUNCTION__, ppd->pid, ppd->rpgtable);
|
||||
}
|
||||
|
||||
ret = clear_pte_range(sc->args[0], sc->args[1]);
|
||||
@@ -1712,33 +1592,6 @@ int __do_in_kernel_syscall(ihk_os_t os, struct mcctrl_channel *c, struct syscall
|
||||
break;
|
||||
|
||||
case __NR_exit_group: {
|
||||
unsigned long flags;
|
||||
struct mcctrl_per_proc_data *ppd = NULL, *ppd_iter;
|
||||
|
||||
ppd = NULL;
|
||||
flags = ihk_ikc_spinlock_lock(&usrdata->per_proc_list_lock);
|
||||
|
||||
list_for_each_entry(ppd_iter, &usrdata->per_proc_list, list) {
|
||||
if (ppd_iter->pid == task_tgid_vnr(current)) {
|
||||
ppd = ppd_iter;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (ppd) {
|
||||
list_del(&ppd->list);
|
||||
|
||||
dprintk("pid: %d, tid: %d: rpgtable for %d (0x%lx) removed\n",
|
||||
task_tgid_vnr(current), current->pid, ppd->pid, ppd->rpgtable);
|
||||
|
||||
kfree(ppd);
|
||||
}
|
||||
else {
|
||||
printk("WARNING: no per process data for pid %d ?\n",
|
||||
task_tgid_vnr(current));
|
||||
}
|
||||
|
||||
ihk_ikc_spinlock_unlock(&usrdata->per_proc_list_lock, flags);
|
||||
|
||||
/* Make sure the user space handler will be called as well */
|
||||
error = -ENOSYS;
|
||||
@@ -1821,10 +1674,11 @@ sched_setparam_out:
|
||||
break;
|
||||
}
|
||||
|
||||
__return_syscall(c, ret);
|
||||
__return_syscall(os, packet, ret, 0);
|
||||
|
||||
error = 0;
|
||||
out:
|
||||
dprintk("__do_in_kernel_syscall(%p,%p,%ld %lx): %d %ld\n", os, c, sc->number, sc->args[0], error, ret);
|
||||
dprintk("%s: system call: %d, error: %d, ret: %ld\n",
|
||||
__FUNCTION__, sc->number, sc->args[0], error, ret);
|
||||
return error;
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/version.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include "mcctrl.h"
|
||||
#include "sysfs_msg.h"
|
||||
|
||||
|
||||
Reference in New Issue
Block a user