Fix other warnings
Most were harmless, but the change to ACCESS_ONCE from volatile
cast is probably useful.
Expanding macro, we basically went from:
m = (volatile struct sdma_vl_map *)dd->sdma_map;
to
m = *(volatile struct sdma_vl_map **)&(dd->sdma_map);
i.e. the explicit lookup is at a different level.
This commit is contained in:
committed by
Balazs Gerofi
parent
2dc85ee417
commit
7366da4390
@@ -133,7 +133,7 @@ static inline void ihk_atomic64_inc(ihk_atomic64_t *v)
|
|||||||
* Note 2: xchg has side effect, so that attribute volatile is necessary,
|
* Note 2: xchg has side effect, so that attribute volatile is necessary,
|
||||||
* but generally the primitive is invalid, *ptr is output argument. --ANK
|
* but generally the primitive is invalid, *ptr is output argument. --ANK
|
||||||
*/
|
*/
|
||||||
#define __xg(x) ((volatile long *)(x))
|
#define __xg(x) ((volatile typeof(x))(x))
|
||||||
|
|
||||||
#define xchg4(ptr, x) \
|
#define xchg4(ptr, x) \
|
||||||
({ \
|
({ \
|
||||||
|
|||||||
@@ -421,13 +421,10 @@ long hfi1_file_ioctl(void *private_data, unsigned int cmd,
|
|||||||
{
|
{
|
||||||
struct hfi1_filedata *fd = private_data;
|
struct hfi1_filedata *fd = private_data;
|
||||||
struct hfi1_ctxtdata *uctxt = fd->uctxt;
|
struct hfi1_ctxtdata *uctxt = fd->uctxt;
|
||||||
struct hfi1_user_info uinfo;
|
#if 0
|
||||||
struct hfi1_tid_info tinfo;
|
struct hfi1_tid_info tinfo;
|
||||||
|
#endif
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
unsigned long addr;
|
|
||||||
int uval = 0;
|
|
||||||
unsigned long ul_uval = 0;
|
|
||||||
u16 uval16 = 0;
|
|
||||||
|
|
||||||
hfi1_cdbg(IOCTL, "IOCTL recv: 0x%x", cmd);
|
hfi1_cdbg(IOCTL, "IOCTL recv: 0x%x", cmd);
|
||||||
if (cmd != HFI1_IOCTL_ASSIGN_CTXT &&
|
if (cmd != HFI1_IOCTL_ASSIGN_CTXT &&
|
||||||
@@ -435,9 +432,6 @@ long hfi1_file_ioctl(void *private_data, unsigned int cmd,
|
|||||||
!uctxt)
|
!uctxt)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (rdtsc() - t_s < 400000000)
|
|
||||||
return;
|
|
||||||
|
|
||||||
switch (cmd) {
|
switch (cmd) {
|
||||||
case HFI1_IOCTL_ASSIGN_CTXT:
|
case HFI1_IOCTL_ASSIGN_CTXT:
|
||||||
#if 0
|
#if 0
|
||||||
|
|||||||
@@ -764,8 +764,8 @@ static inline int sdma_txadd_page(
|
|||||||
u16 len)
|
u16 len)
|
||||||
{
|
{
|
||||||
dma_addr_t addr;
|
dma_addr_t addr;
|
||||||
int rval;
|
|
||||||
#ifdef __HFI1_ORIG__
|
#ifdef __HFI1_ORIG__
|
||||||
|
int rval;
|
||||||
/* TODO: check this coealesce thing */
|
/* TODO: check this coealesce thing */
|
||||||
hfi1_cdbg(AIOWRITE, "+");
|
hfi1_cdbg(AIOWRITE, "+");
|
||||||
if ((unlikely(tx->num_desc == tx->desc_limit))) {
|
if ((unlikely(tx->num_desc == tx->desc_limit))) {
|
||||||
|
|||||||
@@ -808,7 +808,7 @@ struct sdma_engine *sdma_select_engine_vl(
|
|||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
m = rcu_dereference(dd->sdma_map);
|
m = rcu_dereference(dd->sdma_map);
|
||||||
#else
|
#else
|
||||||
m = (volatile struct sdma_vl_map *)dd->sdma_map;
|
m = ACCESS_ONCE(dd->sdma_map);
|
||||||
#endif /* __HFI1_ORIG__ */
|
#endif /* __HFI1_ORIG__ */
|
||||||
if (unlikely(!m)) {
|
if (unlikely(!m)) {
|
||||||
#ifdef __HFI1_ORIG__
|
#ifdef __HFI1_ORIG__
|
||||||
@@ -1730,20 +1730,20 @@ void __sdma_txclean(
|
|||||||
struct hfi1_devdata *dd,
|
struct hfi1_devdata *dd,
|
||||||
struct sdma_txreq *tx)
|
struct sdma_txreq *tx)
|
||||||
{
|
{
|
||||||
u16 i;
|
|
||||||
|
|
||||||
if (tx->num_desc) {
|
if (tx->num_desc) {
|
||||||
|
/* TODO: enable sdma_unmap_desc */
|
||||||
|
#if 0
|
||||||
|
u16 i;
|
||||||
u8 skip = 0, mode = ahg_mode(tx);
|
u8 skip = 0, mode = ahg_mode(tx);
|
||||||
|
|
||||||
/* TODO: enable sdma_unmap_desc */
|
|
||||||
/* unmap first */
|
/* unmap first */
|
||||||
//sdma_unmap_desc(dd, &tx->descp[0]);
|
//sdma_unmap_desc(dd, &tx->descp[0]);
|
||||||
/* determine number of AHG descriptors to skip */
|
/* determine number of AHG descriptors to skip */
|
||||||
if (mode > SDMA_AHG_APPLY_UPDATE1)
|
if (mode > SDMA_AHG_APPLY_UPDATE1)
|
||||||
skip = mode >> 1;
|
skip = mode >> 1;
|
||||||
/* TODO: enable sdma_unmap_desc */
|
|
||||||
// for (i = 1 + skip; i < tx->num_desc; i++)
|
// for (i = 1 + skip; i < tx->num_desc; i++)
|
||||||
// sdma_unmap_desc(dd, &tx->descp[i]);
|
// sdma_unmap_desc(dd, &tx->descp[i]);
|
||||||
|
#endif
|
||||||
tx->num_desc = 0;
|
tx->num_desc = 0;
|
||||||
}
|
}
|
||||||
kfree(tx->coalesce_buf);
|
kfree(tx->coalesce_buf);
|
||||||
|
|||||||
@@ -559,7 +559,9 @@ int hfi1_map_device_addresses(struct hfi1_filedata *fd)
|
|||||||
pte_t *ptep;
|
pte_t *ptep;
|
||||||
enum ihk_mc_pt_attribute attr;
|
enum ihk_mc_pt_attribute attr;
|
||||||
void *virt;
|
void *virt;
|
||||||
|
#ifdef __HFI1_ORIG__
|
||||||
size_t pgsize;
|
size_t pgsize;
|
||||||
|
#endif
|
||||||
unsigned long phys;
|
unsigned long phys;
|
||||||
unsigned long len;
|
unsigned long len;
|
||||||
|
|
||||||
@@ -1134,12 +1136,14 @@ int hfi1_user_sdma_process_request(void *private_data, struct iovec *iovec,
|
|||||||
hfi1_cdbg(AIOWRITE, "-wait_event_interruptible_timeout");
|
hfi1_cdbg(AIOWRITE, "-wait_event_interruptible_timeout");
|
||||||
#else
|
#else
|
||||||
TP("+ polling while(pq->state != SDMA_PKT_Q_ACTIVE)");
|
TP("+ polling while(pq->state != SDMA_PKT_Q_ACTIVE)");
|
||||||
|
#ifdef VERBOSE_DEBUG
|
||||||
{
|
{
|
||||||
unsigned long ts = rdtsc();
|
unsigned long ts = rdtsc();
|
||||||
while (pq->state != SDMA_PKT_Q_ACTIVE) cpu_pause();
|
while (pq->state != SDMA_PKT_Q_ACTIVE) cpu_pause();
|
||||||
SDMA_DBG("%s: waited %lu cycles for SDMA_PKT_Q_ACTIVE\n",
|
SDMA_DBG("%s: waited %lu cycles for SDMA_PKT_Q_ACTIVE\n",
|
||||||
__FUNCTION__, rdtsc() - ts);
|
__FUNCTION__, rdtsc() - ts);
|
||||||
}
|
}
|
||||||
|
#endif /* VERBOSE_DEBUG */
|
||||||
TP("- polling while(pq->state != SDMA_PKT_Q_ACTIVE)");
|
TP("- polling while(pq->state != SDMA_PKT_Q_ACTIVE)");
|
||||||
#endif /* __HFI1_ORIG__ */
|
#endif /* __HFI1_ORIG__ */
|
||||||
}
|
}
|
||||||
@@ -1233,7 +1237,8 @@ static int user_sdma_send_pkts(struct user_sdma_request *req,
|
|||||||
unsigned maxpkts,
|
unsigned maxpkts,
|
||||||
struct kmalloc_cache_header *txreq_cache)
|
struct kmalloc_cache_header *txreq_cache)
|
||||||
{
|
{
|
||||||
int ret = 0, count;
|
int ret = 0;
|
||||||
|
u32 count;
|
||||||
unsigned npkts = 0;
|
unsigned npkts = 0;
|
||||||
struct user_sdma_txreq *tx = NULL;
|
struct user_sdma_txreq *tx = NULL;
|
||||||
struct hfi1_user_sdma_pkt_q *pq = NULL;
|
struct hfi1_user_sdma_pkt_q *pq = NULL;
|
||||||
@@ -1437,11 +1442,11 @@ static int user_sdma_send_pkts(struct user_sdma_request *req,
|
|||||||
unsigned pageidx;
|
unsigned pageidx;
|
||||||
#endif
|
#endif
|
||||||
unsigned len;
|
unsigned len;
|
||||||
unsigned long base, offset;
|
uintptr_t base;
|
||||||
void *virt;
|
void *virt;
|
||||||
|
|
||||||
base = (unsigned long)iovec->iov.iov_base;
|
base = (uintptr_t)iovec->iov.iov_base;
|
||||||
virt = base + iovec->offset + iov_offset;
|
virt = (void*)(base + iovec->offset + iov_offset);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Resolve iovec->base_phys if virt is out of last page.
|
* Resolve iovec->base_phys if virt is out of last page.
|
||||||
|
|||||||
Reference in New Issue
Block a user