Implemented a replacement for sdma_txadd_page()

Conflicts:
	kernel/user_sdma.c
This commit is contained in:
Aram Santogidis
2017-08-14 17:14:53 +09:00
committed by Balazs Gerofi
parent 99c216d91e
commit ae368d97d4
3 changed files with 58 additions and 18 deletions

View File

@@ -736,6 +736,7 @@ static inline int _sdma_txadd_daddr(
return rval; return rval;
} }
#ifdef __HFI1_ORIG__
/** /**
* sdma_txadd_page() - add a page to the sdma_txreq * sdma_txadd_page() - add a page to the sdma_txreq
* @dd: the device to use for mapping * @dd: the device to use for mapping
@@ -769,7 +770,6 @@ static inline int sdma_txadd_page(
return rval; return rval;
} }
#ifdef __HFI1_ORIG__
addr = dma_map_page( addr = dma_map_page(
&dd->pcidev->dev, &dd->pcidev->dev,
page, page,
@@ -781,14 +781,12 @@ static inline int sdma_txadd_page(
__sdma_txclean(dd, tx); __sdma_txclean(dd, tx);
return -ENOSPC; return -ENOSPC;
} }
#else
//TODO: dma_map_page
#endif /* __HFI1_ORIG__ */
hfi1_cdbg(AIOWRITE, "-"); hfi1_cdbg(AIOWRITE, "-");
return _sdma_txadd_daddr( return _sdma_txadd_daddr(
dd, SDMA_MAP_PAGE, tx, addr, len); dd, SDMA_MAP_PAGE, tx, addr, len);
} }
#endif /* __HFI1_ORIG__ */
/** /**
* sdma_txadd_daddr() - add a dma address to the sdma_txreq * sdma_txadd_daddr() - add a dma address to the sdma_txreq

View File

@@ -921,8 +921,10 @@ struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
return sde; return sde;
out: out:
#endif /* __HFI1_ORIG__ */
return sdma_select_engine_vl(dd, selector, vl); return sdma_select_engine_vl(dd, selector, vl);
#else
return &dd->per_sdma[0];
#endif /* __HFI1_ORIG__ */
} }
#ifdef __HFI1_ORIG__ #ifdef __HFI1_ORIG__

View File

@@ -564,6 +564,7 @@ int hfi1_user_sdma_process_request(void *private_data, struct iovec *iovec,
u16 dlid; u16 dlid;
u32 selector; u32 selector;
#ifndef __HFI1_ORIG__ #ifndef __HFI1_ORIG__
if (!hfi1_kregbase) { if (!hfi1_kregbase) {
enum ihk_mc_pt_attribute attr = PTATTR_UNCACHABLE | PTATTR_WRITABLE; enum ihk_mc_pt_attribute attr = PTATTR_UNCACHABLE | PTATTR_WRITABLE;
@@ -731,6 +732,7 @@ cq_map_unlock:
return -EINVAL; return -EINVAL;
} }
#if 0
/* Try to claim the request. */ /* Try to claim the request. */
if (test_and_set_bit(info.comp_idx, pq->req_in_use)) { if (test_and_set_bit(info.comp_idx, pq->req_in_use)) {
hfi1_cdbg(SDMA, "[%u:%u:%u] Entry %u is in use", hfi1_cdbg(SDMA, "[%u:%u:%u] Entry %u is in use",
@@ -738,6 +740,7 @@ cq_map_unlock:
info.comp_idx); info.comp_idx);
return -EBADSLT; return -EBADSLT;
} }
#endif
/* /*
* All safety checks have been done and this request has been claimed. * All safety checks have been done and this request has been claimed.
*/ */
@@ -839,6 +842,7 @@ cq_map_unlock:
SDMA_DBG(req, "Initial TID offset %u", req->tidoffset); SDMA_DBG(req, "Initial TID offset %u", req->tidoffset);
idx++; idx++;
kprintf("%s: before - save all io_vecs\n", __FUNCTION__);
/* Save all the IO vector structures */ /* Save all the IO vector structures */
for (i = 0; i < req->data_iovs; i++) { for (i = 0; i < req->data_iovs; i++) {
INIT_LIST_HEAD(&req->iovs[i].list); INIT_LIST_HEAD(&req->iovs[i].list);
@@ -900,12 +904,6 @@ cq_map_unlock:
dlid = be16_to_cpu(req->hdr.lrh[1]); dlid = be16_to_cpu(req->hdr.lrh[1]);
selector = dlid_to_selector(dlid); selector = dlid_to_selector(dlid);
selector += uctxt->ctxt + fd->subctxt; selector += uctxt->ctxt + fd->subctxt;
/* TODO: check the rcu stuff */
/*
* XXX: didn't we conclude that we don't need to worry about RCU here?
* the mapping is created at driver initialization, the rest of the
* accesses are read-only
*/
req->sde = sdma_select_user_engine(dd, selector, vl); req->sde = sdma_select_user_engine(dd, selector, vl);
if (!req->sde || !sdma_running(req->sde)) { if (!req->sde || !sdma_running(req->sde)) {
@@ -923,8 +921,8 @@ cq_map_unlock:
} }
} }
set_comp_state(pq, cq, info.comp_idx, QUEUED, 0); // set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
atomic_inc(&pq->n_reqs); // atomic_inc(&pq->n_reqs);
req_queued = 1; req_queued = 1;
/* Send the first N packets in the request to buy us some time */ /* Send the first N packets in the request to buy us some time */
ret = user_sdma_send_pkts(req, pcount); ret = user_sdma_send_pkts(req, pcount);
@@ -932,7 +930,7 @@ cq_map_unlock:
req->status = ret; req->status = ret;
goto free_req; goto free_req;
} }
return 0;
/* /*
* It is possible that the SDMA engine would have processed all the * It is possible that the SDMA engine would have processed all the
* submitted packets by the time we get here. Therefore, only set * submitted packets by the time we get here. Therefore, only set
@@ -1085,6 +1083,8 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
u32 datalen = 0, queued = 0, data_sent = 0; u32 datalen = 0, queued = 0, data_sent = 0;
u64 iov_offset = 0; u64 iov_offset = 0;
//TODO: enable test_bit
#ifdef __HFI1_ORIG__
/* /*
* Check whether any of the completions have come back * Check whether any of the completions have come back
* with errors. If so, we are not going to process any * with errors. If so, we are not going to process any
@@ -1094,7 +1094,6 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
set_bit(SDMA_REQ_DONE_ERROR, &req->flags); set_bit(SDMA_REQ_DONE_ERROR, &req->flags);
return -EFAULT; return -EFAULT;
} }
#ifdef __HFI1_ORIG__
tx = kmem_cache_alloc(pq->txreq_cache, GFP_KERNEL); tx = kmem_cache_alloc(pq->txreq_cache, GFP_KERNEL);
#else #else
tx = kmalloc(sizeof(struct user_sdma_txreq), GFP_KERNEL); tx = kmalloc(sizeof(struct user_sdma_txreq), GFP_KERNEL);
@@ -1221,9 +1220,8 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
*/ */
while (queued < datalen && while (queued < datalen &&
(req->sent + data_sent) < req->data_len) { (req->sent + data_sent) < req->data_len) {
unsigned pageidx, len;
//TODO: sdma_txadd_page
#ifdef __HFI1_ORIG__ #ifdef __HFI1_ORIG__
unsigned pageidx, len;
unsigned long base, offset; unsigned long base, offset;
base = (unsigned long)iovec->iov.iov_base; base = (unsigned long)iovec->iov.iov_base;
@@ -1237,15 +1235,45 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
ret = sdma_txadd_page(pq->dd, &tx->txreq, ret = sdma_txadd_page(pq->dd, &tx->txreq,
iovec->pages[pageidx], iovec->pages[pageidx],
offset, len); offset, len);
#else
struct sdma_txreq *txreq = &tx->txreq;
if ((unlikely(txreq->num_desc == txreq->desc_limit))) {
kprintf("%s: ERROR: ext_coal_sdma_tx_descs() should have been called here\n",
__FUNCTION__);
}
unsigned long base;
const void *virt = (unsigned long)iovec->iov.iov_base + iov_offset;
unsigned len = (unsigned)iovec->iov.iov_len - iov_offset;
if (len > PAGE_SIZE)
len = PAGE_SIZE;
len = min((datalen - queued), len);
if (len) {
if (ihk_mc_pt_virt_to_phys(cpu_local_var(current)->vm->address_space->page_table, virt, &base) < 0) {
/* TODO: shall we make this function fail? *
* Handle this error. */
kprintf("%s: ERROR: virt_to_phys failed - 0x%lx\n",
__FUNCTION__, virt);
//TODO: handle this error
return 0;
}
ret = _sdma_txadd_daddr(pq->dd, SDMA_MAP_PAGE, txreq, base, len);
if (ret) {
kprintf("%s: ERROR _sdma_txadd_daddr()", __FUNCTION__);
return 0;
} else {
kprintf("%s: txadd: base = 0x%lx, len = %d\n", __FUNCTION__, base, len);
}
}
#endif /* sdma_txadd_page */
if (ret) { if (ret) {
SDMA_DBG(req, "SDMA txreq add page failed %d\n", SDMA_DBG(req, "SDMA txreq add page failed %d\n",
ret); ret);
goto free_txreq; goto free_txreq;
} }
//TODO: len may not be initialized
iov_offset += len; iov_offset += len;
queued += len; queued += len;
data_sent += len; data_sent += len;
#ifdef __HFI1_ORIG__
if (unlikely(queued < datalen && if (unlikely(queued < datalen &&
pageidx == iovec->npages && pageidx == iovec->npages &&
req->iov_idx < req->data_iovs - 1)) { req->iov_idx < req->data_iovs - 1)) {
@@ -1253,7 +1281,18 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
iovec = &req->iovs[++req->iov_idx]; iovec = &req->iovs[++req->iov_idx];
iov_offset = 0; iov_offset = 0;
} }
#else
if (unlikely(queued < datalen &&
len == 0 &&
req->iov_idx < req->data_iovs - 1)) {
iovec->offset += iov_offset;
iovec = &req->iovs[++req->iov_idx];
iov_offset = 0;
}
#endif
} }
/* REACHES THIS POINT */
return 0;
/* /*
* The txreq was submitted successfully so we can update * The txreq was submitted successfully so we can update
* the counters. * the counters.
@@ -1274,6 +1313,7 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
npkts++; npkts++;
} }
dosend: dosend:
ret = sdma_send_txlist(req->sde, ret = sdma_send_txlist(req->sde,
iowait_get_ib_work(&pq->busy), iowait_get_ib_work(&pq->busy),
&req->txps, &count); &req->txps, &count);