HFI1: properly iterate iovecs according to underlying page sizes
This commit is contained in:
@@ -154,25 +154,33 @@ struct sdma_mmu_node;
|
|||||||
struct user_sdma_iovec {
|
struct user_sdma_iovec {
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
struct iovec iov;
|
struct iovec iov;
|
||||||
|
#ifdef __HFI1_ORIG__
|
||||||
/* number of pages in this vector */
|
/* number of pages in this vector */
|
||||||
unsigned npages;
|
unsigned npages;
|
||||||
/* array of pinned pages for this vector */
|
/* array of pinned pages for this vector */
|
||||||
#ifdef __HFI1_ORIG__
|
|
||||||
struct page **pages;
|
struct page **pages;
|
||||||
#else
|
#else
|
||||||
/*
|
/*
|
||||||
* Physical address corresponding to iov.iov_base if the
|
* Physical address corresponding to the page that contains
|
||||||
* vector if physically contiguous, which in McKernel most
|
* iov.iov_base and the corresponding page size.
|
||||||
* likely is.
|
|
||||||
*/
|
*/
|
||||||
unsigned long phys;
|
unsigned base_pgsize;
|
||||||
|
unsigned long base_phys;
|
||||||
#endif
|
#endif
|
||||||
/*
|
/*
|
||||||
* offset into the virtual address space of the vector at
|
* offset into the virtual address space of the vector at
|
||||||
* which we last left off.
|
* which we last left off.
|
||||||
*/
|
*/
|
||||||
u64 offset;
|
u64 offset;
|
||||||
|
#ifdef __HFI1_ORIG__
|
||||||
struct sdma_mmu_node *node;
|
struct sdma_mmu_node *node;
|
||||||
|
#else
|
||||||
|
/*
|
||||||
|
* Virtual address corresponding to base_phys
|
||||||
|
* (i.e., the beginning of the underlying page).
|
||||||
|
*/
|
||||||
|
void *base_virt;
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
#ifdef __HFI1_ORIG__
|
#ifdef __HFI1_ORIG__
|
||||||
|
|
||||||
@@ -968,40 +976,39 @@ int hfi1_user_sdma_process_request(void *private_data, struct iovec *iovec,
|
|||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
{
|
{
|
||||||
unsigned long phys;
|
pte_t *ptep;
|
||||||
req->iovs[i].phys = 0;
|
size_t base_pgsize;
|
||||||
|
struct user_sdma_iovec *usi = &req->iovs[i];
|
||||||
|
void *virt = usi->iov.iov_base;
|
||||||
/*
|
/*
|
||||||
* Look up the start and end addresses of this iovec.
|
* Look up the PTE for the start of this iovec.
|
||||||
* If it's contiguous in physical memory, store the physical
|
* Store the physical address of the first page and
|
||||||
* address in iovs[i].phys
|
* the page size in iovec.
|
||||||
*/
|
*/
|
||||||
if (unlikely(ihk_mc_pt_virt_to_phys(
|
ptep = ihk_mc_pt_lookup_pte(
|
||||||
cpu_local_var(current)->vm->address_space->page_table,
|
cpu_local_var(current)->vm->address_space->page_table,
|
||||||
req->iovs[i].iov.iov_base, &phys) < 0)) {
|
virt,
|
||||||
kprintf("%s: ERROR: no valid mapping for 0x%lx\n",
|
0,
|
||||||
__FUNCTION__, req->iovs[i].iov.iov_base);
|
0,
|
||||||
|
&base_pgsize,
|
||||||
|
0);
|
||||||
|
if (unlikely(!ptep || !pte_is_present(ptep))) {
|
||||||
|
kprintf("%s: ERROR: no valid PTE for 0x%lx\n",
|
||||||
|
__FUNCTION__, virt);
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
req->iovs[i].phys = phys;
|
usi->base_pgsize = (unsigned)base_pgsize;
|
||||||
|
usi->base_phys = pte_get_phys(ptep);
|
||||||
if (unlikely(ihk_mc_pt_virt_to_phys(
|
usi->base_virt = (void *)((unsigned long)virt &
|
||||||
cpu_local_var(current)->vm->address_space->page_table,
|
~((unsigned long)usi->base_pgsize - 1));
|
||||||
req->iovs[i].iov.iov_base + req->iovs[i].iov.iov_len - 1,
|
SDMA_DBG("%s: iovec: %d, base_virt: 0x%lx, base_phys: 0x%lx, "
|
||||||
&phys) < 0)) {
|
"base_pgsize: %lu\n",
|
||||||
kprintf("%s: ERROR: no valid mapping for 0x%lx\n",
|
__FUNCTION__,
|
||||||
__FUNCTION__,
|
i,
|
||||||
req->iovs[i].iov.iov_base +
|
usi->base_virt,
|
||||||
req->iovs[i].iov.iov_len - 1);
|
usi->base_phys,
|
||||||
return -EFAULT;
|
usi->base_pgsize);
|
||||||
}
|
|
||||||
|
|
||||||
if ((phys - req->iovs[i].phys) !=
|
|
||||||
(req->iovs[i].iov.iov_len - 1)) {
|
|
||||||
kprintf("%s: iovec %d is not physically contiguous\n",
|
|
||||||
__FUNCTION__, i);
|
|
||||||
req->iovs[i].phys = 0;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
#endif /* __HFI1_ORIG__ */
|
#endif /* __HFI1_ORIG__ */
|
||||||
req->data_len += req->iovs[i].iov.iov_len;
|
req->data_len += req->iovs[i].iov.iov_len;
|
||||||
@@ -1232,11 +1239,6 @@ static int user_sdma_send_pkts(struct user_sdma_request *req,
|
|||||||
struct user_sdma_txreq *tx = NULL;
|
struct user_sdma_txreq *tx = NULL;
|
||||||
struct hfi1_user_sdma_pkt_q *pq = NULL;
|
struct hfi1_user_sdma_pkt_q *pq = NULL;
|
||||||
struct user_sdma_iovec *iovec = NULL;
|
struct user_sdma_iovec *iovec = NULL;
|
||||||
#ifndef __HFI1_ORIG__
|
|
||||||
unsigned long base_phys = 0;
|
|
||||||
unsigned long base_pgsize = 0;
|
|
||||||
void *base_virt = NULL;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
TP("+");
|
TP("+");
|
||||||
hfi1_cdbg(AIOWRITE, "+");
|
hfi1_cdbg(AIOWRITE, "+");
|
||||||
@@ -1439,14 +1441,15 @@ static int user_sdma_send_pkts(struct user_sdma_request *req,
|
|||||||
unsigned long base, offset;
|
unsigned long base, offset;
|
||||||
void *virt;
|
void *virt;
|
||||||
pte_t *ptep;
|
pte_t *ptep;
|
||||||
|
size_t base_pgsize;
|
||||||
|
|
||||||
base = (unsigned long)iovec->iov.iov_base;
|
base = (unsigned long)iovec->iov.iov_base;
|
||||||
virt = base + iovec->offset + iov_offset;
|
virt = base + iovec->offset + iov_offset;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Resolve base_phys if iovec is not physically contiguous.
|
* Resolve iovec->base_phys if virt is out of last page.
|
||||||
*/
|
*/
|
||||||
if (unlikely(!iovec->phys)) {
|
if (unlikely(virt >= (iovec->base_virt + iovec->base_pgsize))) {
|
||||||
ptep = ihk_mc_pt_lookup_pte(
|
ptep = ihk_mc_pt_lookup_pte(
|
||||||
cpu_local_var(current)->vm->address_space->page_table,
|
cpu_local_var(current)->vm->address_space->page_table,
|
||||||
virt, 0, 0, &base_pgsize, 0);
|
virt, 0, 0, &base_pgsize, 0);
|
||||||
@@ -1456,16 +1459,16 @@ static int user_sdma_send_pkts(struct user_sdma_request *req,
|
|||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
base_phys = pte_get_phys(ptep);
|
iovec->base_pgsize = (unsigned)base_pgsize;
|
||||||
base_virt = (void *)((unsigned long)virt & ~(base_pgsize - 1));
|
iovec->base_phys = pte_get_phys(ptep);
|
||||||
|
iovec->base_virt = (void *)((unsigned long)virt &
|
||||||
|
~((unsigned long)iovec->base_pgsize - 1));
|
||||||
SDMA_DBG("%s: base_virt: 0x%lx, base_phys: 0x%lx, "
|
SDMA_DBG("%s: base_virt: 0x%lx, base_phys: 0x%lx, "
|
||||||
"base_pgsize: %lu\n",
|
"base_pgsize: %lu\n",
|
||||||
__FUNCTION__,
|
__FUNCTION__,
|
||||||
base_virt, base_phys, base_pgsize);
|
iovec->base_virt,
|
||||||
}
|
iovec->base_phys,
|
||||||
else {
|
iovec->base_pgsize);
|
||||||
base_phys = iovec->phys;
|
|
||||||
base_virt = base;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef __HFI1_ORIG__
|
#ifdef __HFI1_ORIG__
|
||||||
@@ -1476,10 +1479,9 @@ static int user_sdma_send_pkts(struct user_sdma_request *req,
|
|||||||
len = offset + req->info.fragsize > PAGE_SIZE ?
|
len = offset + req->info.fragsize > PAGE_SIZE ?
|
||||||
PAGE_SIZE - offset : req->info.fragsize;
|
PAGE_SIZE - offset : req->info.fragsize;
|
||||||
#else
|
#else
|
||||||
len = iovec->phys ? req->info.fragsize :
|
len = (iovec->base_virt + iovec->base_pgsize - virt) >
|
||||||
((base_virt + base_pgsize - virt) >
|
req->info.fragsize ? req->info.fragsize :
|
||||||
req->info.fragsize) ? req->info.fragsize :
|
(iovec->base_virt + iovec->base_pgsize - virt);
|
||||||
(base_virt + base_pgsize - virt);
|
|
||||||
#endif
|
#endif
|
||||||
len = min((datalen - queued), len);
|
len = min((datalen - queued), len);
|
||||||
SDMA_DBG("%s: dl: %d, qd: %d, len: %d\n",
|
SDMA_DBG("%s: dl: %d, qd: %d, len: %d\n",
|
||||||
@@ -1489,7 +1491,7 @@ static int user_sdma_send_pkts(struct user_sdma_request *req,
|
|||||||
#ifdef __HFI1_ORIG__
|
#ifdef __HFI1_ORIG__
|
||||||
iovec->pages[pageidx], offset,
|
iovec->pages[pageidx], offset,
|
||||||
#else
|
#else
|
||||||
base_phys + (virt - base_virt),
|
iovec->base_phys + (virt - iovec->base_virt),
|
||||||
#endif
|
#endif
|
||||||
len);
|
len);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
@@ -2052,6 +2054,7 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
|
|||||||
#endif /* __HFI1_ORIG__ */
|
#endif /* __HFI1_ORIG__ */
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#ifdef __HFI1_ORIG__
|
||||||
if (req->data_iovs) {
|
if (req->data_iovs) {
|
||||||
struct sdma_mmu_node *node;
|
struct sdma_mmu_node *node;
|
||||||
int i;
|
int i;
|
||||||
@@ -2061,16 +2064,14 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
|
|||||||
if (!node)
|
if (!node)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
//TODO: hfi1_mmu_rb_remove
|
|
||||||
#ifdef __HFI1_ORIG__
|
|
||||||
if (unpin)
|
if (unpin)
|
||||||
hfi1_mmu_rb_remove(req->pq->handler,
|
hfi1_mmu_rb_remove(req->pq->handler,
|
||||||
&node->rb);
|
&node->rb);
|
||||||
else
|
else
|
||||||
atomic_dec(&node->refcount);
|
atomic_dec(&node->refcount);
|
||||||
#endif /* __HFI1_ORIG__ */
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#endif /* __HFI1_ORIG__ */
|
||||||
kmalloc_cache_free(req->tids);
|
kmalloc_cache_free(req->tids);
|
||||||
clear_bit(req->info.comp_idx, req->pq->req_in_use);
|
clear_bit(req->info.comp_idx, req->pq->req_in_use);
|
||||||
hfi1_cdbg(AIOWRITE, "-");
|
hfi1_cdbg(AIOWRITE, "-");
|
||||||
|
|||||||
Reference in New Issue
Block a user