* The relevant files have been modified in order to compile with McKernel.

Conflicts:
	kernel/Makefile.build.in
This commit is contained in:
Aram Santogidis
2017-08-01 16:17:04 +09:00
committed by Balazs Gerofi
parent 14b360e867
commit 64e2639adc
17 changed files with 637 additions and 55 deletions

View File

@@ -1225,6 +1225,13 @@ void cpu_pause(void)
asm volatile("pause" ::: "memory"); asm volatile("pause" ::: "memory");
} }
/* From: kernel-xppsl_1.5.2/arch/x86/include/asm/processor.h */
/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
void cpu_relax(void)
{
asm volatile("rep; nop" ::: "memory");
}
/*@ /*@
@ assigns \nothing; @ assigns \nothing;
@ ensures \interrupt_disabled > 0; @ ensures \interrupt_disabled > 0;

View File

@@ -8,6 +8,7 @@ OBJS += process.o copy.o waitq.o futex.o timer.o plist.o fileobj.o shmobj.o
OBJS += zeroobj.o procfs.o devobj.o sysfs.o xpmem.o profile.o freeze.o OBJS += zeroobj.o procfs.o devobj.o sysfs.o xpmem.o profile.o freeze.o
OBJS += rbtree.o OBJS += rbtree.o
OBJS += pager.o OBJS += pager.o
OBJS += file_ops.o user_sdma.o sdma.o
# POSTK_DEBUG_ARCH_DEP_18 coredump arch separation. # POSTK_DEBUG_ARCH_DEP_18 coredump arch separation.
DEPSRCS=$(wildcard $(SRC)/*.c) DEPSRCS=$(wildcard $(SRC)/*.c)

View File

@@ -44,6 +44,15 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* *
*/ */
#include <hfi1/file_ops.h>
#include <hfi1/hfi.h>
#include <hfi1/user_sdma.h>
#include <hfi1/ihk_hfi1_common.h>
#include <errno.h>
#ifdef __HFI1_ORIG__
#include <linux/poll.h> #include <linux/poll.h>
#include <linux/cdev.h> #include <linux/cdev.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
@@ -404,15 +413,23 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
return ret; return ret;
} }
#endif /* __HFI1_ORIG__ */
#ifdef __HFI1_ORIG__
static ssize_t hfi1_aio_write(struct kiocb *kiocb, const struct iovec *iovec, static ssize_t hfi1_aio_write(struct kiocb *kiocb, const struct iovec *iovec,
unsigned long dim, loff_t offset) unsigned long dim, loff_t offset)
{ {
struct hfi1_filedata *fd = kiocb->ki_filp->private_data; struct hfi1_filedata *fd = kiocb->ki_filp->private_data;
#else
ssize_t hfi1_aio_write(void *private_data, const struct iovec *iovec, unsigned long dim)
{
struct hfi1_filedata *fd = private_data;
#endif /* __HFI1_ORIG__ */
struct hfi1_user_sdma_pkt_q *pq = fd->pq; struct hfi1_user_sdma_pkt_q *pq = fd->pq;
struct hfi1_user_sdma_comp_q *cq = fd->cq; struct hfi1_user_sdma_comp_q *cq = fd->cq;
int done = 0, reqs = 0; int done = 0, reqs = 0;
hfi1_cdbg(AIOWRITE, "+");
if (!cq || !pq) if (!cq || !pq)
return -EIO; return -EIO;
@@ -429,9 +446,15 @@ static ssize_t hfi1_aio_write(struct kiocb *kiocb, const struct iovec *iovec,
int ret; int ret;
unsigned long count = 0; unsigned long count = 0;
#ifdef __HFI1_ORIG__
ret = hfi1_user_sdma_process_request( ret = hfi1_user_sdma_process_request(
kiocb->ki_filp, (struct iovec *)(iovec + done), kiocb->ki_filp, (struct iovec *)(iovec + done),
dim, &count); dim, &count);
#else
ret = hfi1_user_sdma_process_request(
private_data, (struct iovec *)(iovec + done),
dim, &count);
#endif /* __HFI1_ORIG__ */
if (ret) { if (ret) {
reqs = ret; reqs = ret;
break; break;
@@ -440,9 +463,10 @@ static ssize_t hfi1_aio_write(struct kiocb *kiocb, const struct iovec *iovec,
done += count; done += count;
reqs++; reqs++;
} }
hfi1_cdbg(AIOWRITE, "-");
return reqs; return reqs;
} }
#ifdef __HFI1_ORIG__
static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma) static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
{ {
@@ -1556,3 +1580,4 @@ void hfi1_device_remove(struct hfi1_devdata *dd)
user_remove(dd); user_remove(dd);
hfi1_diag_remove(dd); hfi1_diag_remove(dd);
} }
#endif /* __HFI1_ORIG__ */

View File

@@ -48,7 +48,11 @@
#ifndef _COMMON_H #ifndef _COMMON_H
#define _COMMON_H #define _COMMON_H
#ifdef __HFI1_ORIG__
#include "update/hfi1_user.h" #include "update/hfi1_user.h"
#else
#include <hfi1/hfi1_user.h>
#endif /* __HFI1_ORIG__ */
/* /*
* This file contains defines, structures, etc. that are used * This file contains defines, structures, etc. that are used

View File

@@ -0,0 +1,9 @@
#ifndef _HFI1_FILE_OPS_H_
#define _HFI1_FILE_OPS_H_
#include <ihk/types.h>
#include <uio.h>
ssize_t hfi1_aio_write(void *private_data, const struct iovec *iovec, unsigned long dim);
#endif

View File

@@ -47,6 +47,10 @@
* *
*/ */
#include <hfi1/user_sdma.h>
#ifdef __HFI1_ORIG__
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
@@ -92,7 +96,10 @@
#define DROP_PACKET_OFF 0 #define DROP_PACKET_OFF 0
#define DROP_PACKET_ON 1 #define DROP_PACKET_ON 1
#endif /* __HFI1_ORIG__ */
extern unsigned long hfi1_cap_mask; extern unsigned long hfi1_cap_mask;
#define HFI1_CAP_KGET_MASK(mask, cap) ((mask) & HFI1_CAP_##cap) #define HFI1_CAP_KGET_MASK(mask, cap) ((mask) & HFI1_CAP_##cap)
#define HFI1_CAP_UGET_MASK(mask, cap) \ #define HFI1_CAP_UGET_MASK(mask, cap) \
(((mask) >> HFI1_CAP_USER_SHIFT) & HFI1_CAP_##cap) (((mask) >> HFI1_CAP_USER_SHIFT) & HFI1_CAP_##cap)
@@ -125,6 +132,8 @@ extern unsigned long hfi1_cap_mask;
#define NUM_SEND_CTXT_ERR_STATUS_COUNTERS 5 #define NUM_SEND_CTXT_ERR_STATUS_COUNTERS 5
#define NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS 24 #define NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS 24
#ifdef __HFI1_ORIG__
/* /*
* per driver stats, either not device nor port-specific, or * per driver stats, either not device nor port-specific, or
* summed over all of the devices and ports. * summed over all of the devices and ports.
@@ -194,7 +203,7 @@ struct tid_queue {
u32 enqueue; /* count of tid enqueues */ u32 enqueue; /* count of tid enqueues */
u32 dequeue; /* count of tid dequeues */ u32 dequeue; /* count of tid dequeues */
}; };
struct hfi1_ctxtdata { struct hfi1_ctxtdata {
/* shadow the ctxt's RcvCtrl register */ /* shadow the ctxt's RcvCtrl register */
u64 rcvctrl; u64 rcvctrl;
@@ -358,6 +367,15 @@ struct hfi1_ctxtdata {
int (*do_interrupt)(struct hfi1_ctxtdata *rcd, int threaded); int (*do_interrupt)(struct hfi1_ctxtdata *rcd, int threaded);
}; };
#endif /* __HFI1_ORIG__ */
#ifndef __HFI1_ORIG__
struct hfi1_ctxtdata {
unsigned ctxt;
};
#endif /* __HFI1_ORIG__ */
#ifdef __HFI1_ORIG__
/* /*
* Represents a single packet at a high level. Put commonly computed things in * Represents a single packet at a high level. Put commonly computed things in
* here so we do not have to keep doing them over and over. The rule of thumb is * here so we do not have to keep doing them over and over. The rule of thumb is
@@ -584,6 +602,7 @@ struct vl_arb_cache {
struct ib_vl_weight_elem table[VL_ARB_TABLE_SIZE]; struct ib_vl_weight_elem table[VL_ARB_TABLE_SIZE];
}; };
/* /*
* The structure below encapsulates data relevant to a physical IB Port. * The structure below encapsulates data relevant to a physical IB Port.
* Current chips support only one such port, but the separation * Current chips support only one such port, but the separation
@@ -788,7 +807,15 @@ struct hfi1_pportdata {
/* Does this port need to prescan for FECNs */ /* Does this port need to prescan for FECNs */
bool cc_prescan; bool cc_prescan;
}; };
#endif /* __HFI1_ORIG__ */
#ifndef __HFI1_ORIG__
struct hfi1_pportdata {
};
#endif /* __HFI1_ORIG__ */
#ifdef __HFI1_ORIG__
typedef int (*rhf_rcv_function_ptr)(struct hfi1_packet *packet); typedef int (*rhf_rcv_function_ptr)(struct hfi1_packet *packet);
typedef void (*opcode_handler)(struct hfi1_packet *packet); typedef void (*opcode_handler)(struct hfi1_packet *packet);
@@ -857,6 +884,7 @@ struct sdma_vl_map;
#define SERIAL_MAX 16 /* length of the serial number */ #define SERIAL_MAX 16 /* length of the serial number */
typedef int (*send_routine)(struct rvt_qp *, struct hfi1_pkt_state *, u64); typedef int (*send_routine)(struct rvt_qp *, struct hfi1_pkt_state *, u64);
struct hfi1_devdata { struct hfi1_devdata {
struct hfi1_ibdev verbs_dev; /* must be first */ struct hfi1_ibdev verbs_dev; /* must be first */
struct list_head list; struct list_head list;
@@ -1169,7 +1197,7 @@ struct hfi1_devdata {
/* hfi1_pportdata, points to array of (physical) port-specific /* hfi1_pportdata, points to array of (physical) port-specific
* data structs, indexed by pidx (0..n-1) * data structs, indexed by pidx (0..n-1)
*/ */
struct hfi1_pportdata *pport; struct hfi1_pportdata *pport; //used
/* receive context data */ /* receive context data */
struct hfi1_ctxtdata **rcd; struct hfi1_ctxtdata **rcd;
u64 __percpu *int_counter; u64 __percpu *int_counter;
@@ -1221,6 +1249,20 @@ struct hfi1_devdata {
struct kobject kobj; struct kobject kobj;
}; };
#endif /* __HFI1_ORIG__ */
#ifndef __HFI1_ORIG__
struct hfi1_devdata {
struct list_head list;
/* pointers to related structs for this device */
/* pci access data structure */
struct pci_dev *pcidev;
dma_addr_t sdma_pad_phys;
/* array of engines sized by num_sdma */
struct sdma_engine *per_sdma;
struct hfi1_pportdata *pport;
};
#endif /* __HFI1_ORIG__ */
/* 8051 firmware version helper */ /* 8051 firmware version helper */
#define dc8051_ver(a, b) ((a) << 8 | (b)) #define dc8051_ver(a, b) ((a) << 8 | (b))
@@ -1230,6 +1272,8 @@ struct hfi1_devdata {
#define PT_EAGER 1 #define PT_EAGER 1
#define PT_INVALID 2 #define PT_INVALID 2
#ifdef __HFI1_ORIG__
struct tid_rb_node; struct tid_rb_node;
struct mmu_rb_node; struct mmu_rb_node;
struct mmu_rb_handler; struct mmu_rb_handler;
@@ -1254,6 +1298,19 @@ struct hfi1_filedata {
spinlock_t invalid_lock; spinlock_t invalid_lock;
struct mm_struct *mm; struct mm_struct *mm;
}; };
#endif /* __HFI1_ORIG__ */
#ifndef __HFI1_ORIG__
/* Private data for file operations */
struct hfi1_filedata {
struct hfi1_ctxtdata *uctxt;
unsigned subctxt;
struct hfi1_user_sdma_comp_q *cq;
struct hfi1_user_sdma_pkt_q *pq;
};
#endif /* __HFI1_ORIG__ */
#ifdef __HFI1_ORIG__
extern struct list_head hfi1_dev_list; extern struct list_head hfi1_dev_list;
extern spinlock_t hfi1_devs_lock; extern spinlock_t hfi1_devs_lock;
@@ -2039,10 +2096,12 @@ void hfi1_format_hwerrors(u64 hwerrs,
const struct hfi1_hwerror_msgs *hwerrmsgs, const struct hfi1_hwerror_msgs *hwerrmsgs,
size_t nhwerrmsgs, char *msg, size_t lmsg); size_t nhwerrmsgs, char *msg, size_t lmsg);
#endif /* __HFI1_ORIG__ */
#define USER_OPCODE_CHECK_VAL 0xC0 #define USER_OPCODE_CHECK_VAL 0xC0
#define USER_OPCODE_CHECK_MASK 0xC0 #define USER_OPCODE_CHECK_MASK 0xC0
#define OPCODE_CHECK_VAL_DISABLED 0x0 #define OPCODE_CHECK_VAL_DISABLED 0x0
#define OPCODE_CHECK_MASK_DISABLED 0x0 #define OPCODE_CHECK_MASK_DISABLED 0x0
#ifdef __HFI1_ORIG__
static inline void hfi1_reset_cpu_counters(struct hfi1_devdata *dd) static inline void hfi1_reset_cpu_counters(struct hfi1_devdata *dd)
{ {
@@ -2149,4 +2208,6 @@ __print_symbolic(opcode, \
ib_opcode_name(UD_SEND_ONLY), \ ib_opcode_name(UD_SEND_ONLY), \
ib_opcode_name(UD_SEND_ONLY_WITH_IMMEDIATE), \ ib_opcode_name(UD_SEND_ONLY_WITH_IMMEDIATE), \
ib_opcode_name(CNP)) ib_opcode_name(CNP))
#endif /* __HFI1_ORIG__ */
#endif /* _HFI1_KERNEL_H */ #endif /* _HFI1_KERNEL_H */

View File

@@ -56,8 +56,12 @@
#ifndef _LINUX__HFI1_USER_H #ifndef _LINUX__HFI1_USER_H
#define _LINUX__HFI1_USER_H #define _LINUX__HFI1_USER_H
#ifdef __HFI1_ORIG__
#include <linux/types.h> #include <linux/types.h>
#endif /* __HFI1_ORIG__ */
/* /*
* This version number is given to the driver by the user code during * This version number is given to the driver by the user code during
* initialization in the spu_userversion field of hfi1_user_info, so * initialization in the spu_userversion field of hfi1_user_info, so
@@ -211,6 +215,8 @@ struct hfi1_cmd;
#define HFI1_POLL_TYPE_ANYRCV 0x0 #define HFI1_POLL_TYPE_ANYRCV 0x0
#define HFI1_POLL_TYPE_URGENT 0x1 #define HFI1_POLL_TYPE_URGENT 0x1
#ifdef __HFI1_ORIG__
/* /*
* This structure is passed to the driver to tell it where * This structure is passed to the driver to tell it where
* user code buffers are, sizes, etc. The offsets and sizes of the * user code buffers are, sizes, etc. The offsets and sizes of the
@@ -264,6 +270,7 @@ struct hfi1_tid_info {
/* length of transfer buffer programmed by this request */ /* length of transfer buffer programmed by this request */
__u32 length; __u32 length;
}; };
#endif /* __HFI1_ORIG__ */
enum hfi1_sdma_comp_state { enum hfi1_sdma_comp_state {
FREE = 0, FREE = 0,
@@ -280,6 +287,8 @@ struct hfi1_sdma_comp_entry {
__u32 errcode; __u32 errcode;
}; };
#ifdef __HFI1_ORIG__
/* /*
* Device status and notifications from driver to user-space. * Device status and notifications from driver to user-space.
*/ */
@@ -353,6 +362,7 @@ struct hfi1_base_info {
__u64 subctxt_rcvegrbuf; __u64 subctxt_rcvegrbuf;
__u64 subctxt_rcvhdrbuf; __u64 subctxt_rcvhdrbuf;
}; };
#endif /* __HFI1_ORIG__ */
enum sdma_req_opcode { enum sdma_req_opcode {
EXPECTED = 0, EXPECTED = 0,
@@ -391,7 +401,7 @@ struct sdma_req_info {
* in charge of managing its own ring. * in charge of managing its own ring.
*/ */
__u16 comp_idx; __u16 comp_idx;
} __packed; } __attribute__((packed));
/* /*
* SW KDETH header. * SW KDETH header.
@@ -402,7 +412,7 @@ struct hfi1_kdeth_header {
__le16 jkey; __le16 jkey;
__le16 hcrc; __le16 hcrc;
__le32 swdata[7]; __le32 swdata[7];
} __packed; } __attribute__((packed));
/* /*
* Structure describing the headers that User space uses. The * Structure describing the headers that User space uses. The
@@ -413,8 +423,9 @@ struct hfi1_pkt_header {
__be16 lrh[4]; __be16 lrh[4];
__be32 bth[3]; __be32 bth[3];
struct hfi1_kdeth_header kdeth; struct hfi1_kdeth_header kdeth;
} __packed; } __attribute__((packed));
#ifdef __HFI1_ORIG__
/* /*
* The list of usermode accessible registers. * The list of usermode accessible registers.
@@ -435,5 +446,5 @@ enum hfi1_ureg {
/* (RW) Receive TID flow table */ /* (RW) Receive TID flow table */
ur_rcvtidflowtable = 256 ur_rcvtidflowtable = 256
}; };
#endif /* __HFI1_ORIG__ */
#endif /* _LINIUX__HFI1_USER_H */ #endif /* _LINIUX__HFI1_USER_H */

View File

@@ -0,0 +1,218 @@
#ifndef _IHK_HFI1_COMMON_H_
#define _IHK_HFI1_COMMON_H_
#include <ihk/atomic.h>
#include <ihk/types.h>
#include <kmalloc.h>
#include <lwk/compiler.h>
#include <arch-lock.h>
#include <page.h>
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
#endif
/* From: mckernel/kernel/include/xpmem_private.h */
#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
#define min(x, y) ({ \
__typeof__(x) _min1 = (x); \
__typeof__(y) _min2 = (y); \
(void) (&_min1 == &_min2); \
_min1 < _min2 ? _min1 : _min2;})
#define BIT_ULL(nr) (1ULL << (nr))
/* Disable debug macros */
#define hfi1_cdbg(...) do {} while(0)
#define SDMA_DBG(...) do {} while(0)
#define WARN_ON(...) do {} while(0)
#define WARN_ON_ONCE WARN_ON // use the local definition
#define trace_hfi1_ahg_allocate(...) do {} while(0)
#define trace_hfi1_ahg_deallocate(...) do {} while(0)
/* Byte swapping */
#define be32_to_cpu(x) __builtin_bswap32(x)
#define be16_to_cpu(x) __builtin_bswap16(x)
#define le32_to_cpu(x) x
#define le16_to_cpu(x) x
#define cpu_to_le16(x) x
#define cpu_to_le32(x) x
#define cpu_to_le64(x) x
#define __cpu_to_le64(x) x
#define __le64_to_cpu(x) x
#define __le32_to_cpu(x) x
#define __le16_to_cpu(x) x
//TODO: double-check
#define cpu_to_be16(x) __builtin_bswap16(x)
#define cpu_to_be32(x) __builtin_bswap32(x)
/* Compiler */
#ifndef likely
# define likely(x) __builtin_expect(!!(x), 1)
#endif
#ifndef unlikely
# define unlikely(x) __builtin_expect(!!(x), 0)
#endif
/* From: kernel-xppsl_1.5.2/include/linux/compiler.h */
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
/* Atomic ops */
#define atomic_inc ihk_atomic_inc
#define atomic_dec ihk_atomic_dec
#define atomic_read ihk_atomic_read
#define atomic_add ihk_atomic_add
#define atomic_t ihk_atomic_t
/* TODO***********************************/
#define spin_lock_irqsave(lock, flags) do {} while(0)
#define spin_unlock_irqsave(lock, flags) do {} while(0)
#define spin_unlock_irqrestore(lock, flags) do {} while(0)
typedef ihk_spinlock_t spinlock_t;
#define ____cacheline_aligned_in_smp
#define __iomem
#define spin_lock(...) do {} while(0)
#define spin_unlock(...) do {} while(0)
#define smp_wmb() barrier()
#define smp_rmb() barrier()
/***********************************************/
/* TODO: Figure the corresponding flag for McKernel-kmalloc()*/
#define __GFP_ZERO 0
#define GFP_KERNEL 0
/* kernel-xppsl_1.5.2/include/linux/seqlock.h */
/***********************************************/
typedef struct seqcount {
unsigned sequence;
} seqcount_t;
typedef struct {
struct seqcount seqcount;
spinlock_t lock;
} seqlock_t;
static inline unsigned raw_seqcount_begin(const seqcount_t *s)
{
unsigned ret = ACCESS_ONCE(s->sequence);
smp_rmb();
return ret & ~1;
}
/***********************************************/
/* Misc */
/* From: kernel-xppsl_1.5.2/include/linux/kernel.h */
#define min_t(type, x, y) ({ \
type __min1 = (x); \
type __min2 = (y); \
__min1 < __min2 ? __min1: __min2; })
#define SIZE_MAX (~(size_t)0)
#define MAX_TID_PAIR_ENTRIES 1024 /* max receive expected pairs */
#define PIO_BLOCK_SIZE 64 /* bytes */
/* From: chip.c/h */
//num_vls = HFI1_MAX_VLS_SUPPORTED;
//num_vls = dd->chip_sdma_engines;
#define HFI1_MAX_VLS_SUPPORTED 8
/* integer typedefs */
typedef __signed__ char __s8;
typedef unsigned char __u8;
typedef __signed__ short __s16;
typedef unsigned short __u16;
typedef __signed__ int __s32;
typedef unsigned int __u32;
typedef __signed__ long __s64;
typedef unsigned long __u64;
typedef __u64 u64;
typedef __s64 s64;
typedef __u32 u32;
typedef __s32 s32;
typedef __u16 u16;
typedef __s16 s16;
typedef __u8 u8;
typedef __s8 s8;
typedef __u16 __le16;
typedef __u16 __be16;
typedef __u32 __le32;
typedef __u32 __be32;
typedef __u64 __le64;
typedef __u64 __be64;
typedef unsigned int uint;
/* TODO: There should be a header file that I can include */
typedef _Bool bool;
#define false 0
#define true !false
/* TODO: double check this typedef */
typedef u64 dma_addr_t;
/* From: kernel-xppsl_1.5.2/include/linux/types.h */
typedef unsigned gfp_t;
/* kernel-xppsl_1.5.2/include/asm-generic/io.h */
#ifndef __raw_writeq
static inline void __raw_writeq(u64 b, volatile void __iomem *addr)
{
*(volatile u64 __force *) addr = b;
}
#endif
#define writeq(b, addr) __raw_writeq(__cpu_to_le64(b), addr)
/* TODO: I'm not sure if this definition is correct */
#define LOCK_PREFIX "lock; "
/* From: kernel-xppsl_1.5.2/arch/x86/include/asm/bitops.h */
#define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
#define LINUX_ADDR BITOP_ADDR(addr)
/* From: kernel-xppsl_1.5.2/arch/x86/include/asm/bitops.h */
static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
{
int oldbit;
asm volatile(LOCK_PREFIX "bts %2,%1\n\t"
"sbb %0,%0" : "=r" (oldbit), LINUX_ADDR : "Ir" (nr) : "memory");
return oldbit;
}
/* From: kernel-xppsl_1.5.2/arch/x86/include/asm/atomic.h */
static inline int atomic_dec_and_test(atomic_t *v)
{
unsigned char c;
asm volatile(LOCK_PREFIX "decl %0; sete %1"
: "+m" (v->counter), "=qm" (c)
: : "memory");
return c != 0;
}
/* From: kernel-xppsl_1.5.2/include/linux/slab.h */
static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
{
if (size != 0 && n > SIZE_MAX / size)
return NULL;
return __kmalloc(n * size, flags);
}
static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
{
return kmalloc(n * size, flags | __GFP_ZERO);
}
#endif

View File

@@ -47,6 +47,7 @@
* *
*/ */
#ifdef __HFI1_ORIG__
#include <linux/list.h> #include <linux/list.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/sched.h> #include <linux/sched.h>
@@ -58,6 +59,7 @@
* @work: pointer to work structure * @work: pointer to work structure
*/ */
typedef void (*restart_t)(struct work_struct *work); typedef void (*restart_t)(struct work_struct *work);
#endif /* __HFI1_ORIG__ */
#define IOWAIT_PENDING_IB 0x0 #define IOWAIT_PENDING_IB 0x0
#define IOWAIT_PENDING_TID 0x1 #define IOWAIT_PENDING_TID 0x1
@@ -88,7 +90,11 @@ struct sdma_engine;
*/ */
struct iowait; struct iowait;
struct iowait_work { struct iowait_work {
#ifdef __HFI1_ORIG__
struct work_struct iowork; struct work_struct iowork;
#else
//TODO:
#endif /* __HFI1_ORIG__ */
struct list_head tx_head; struct list_head tx_head;
struct iowait *iow; struct iowait *iow;
}; };
@@ -140,9 +146,13 @@ struct iowait {
unsigned seq); unsigned seq);
void (*wakeup)(struct iowait *wait, int reason); void (*wakeup)(struct iowait *wait, int reason);
void (*sdma_drained)(struct iowait *wait); void (*sdma_drained)(struct iowait *wait);
#ifdef __HFI1_ORIG__
seqlock_t *lock; seqlock_t *lock;
wait_queue_head_t wait_dma; wait_queue_head_t wait_dma;
wait_queue_head_t wait_pio; wait_queue_head_t wait_pio;
#else
//TODO:
#endif /* __HFI1_ORIG__ */
atomic_t sdma_busy; atomic_t sdma_busy;
atomic_t pio_busy; atomic_t pio_busy;
u32 count; u32 count;
@@ -154,6 +164,8 @@ struct iowait {
#define SDMA_AVAIL_REASON 0 #define SDMA_AVAIL_REASON 0
#ifdef __HFI1_ORIG__
void iowait_set_flag(struct iowait *wait, u32 flag); void iowait_set_flag(struct iowait *wait, u32 flag);
bool iowait_flag_set(struct iowait *wait, u32 flag); bool iowait_flag_set(struct iowait *wait, u32 flag);
void iowait_clear_flag(struct iowait *wait, u32 flag); void iowait_clear_flag(struct iowait *wait, u32 flag);
@@ -182,6 +194,7 @@ static inline bool iowait_schedule(
struct workqueue_struct *wq, struct workqueue_struct *wq,
int cpu) int cpu)
{ {
hfi1_cdbg(AIOWRITE, ".");
return !!queue_work_on(cpu, wq, &wait->wait[IOWAIT_IB_SE].iowork); return !!queue_work_on(cpu, wq, &wait->wait[IOWAIT_IB_SE].iowork);
} }
@@ -196,6 +209,7 @@ static inline bool iowait_tid_schedule(
struct workqueue_struct *wq, struct workqueue_struct *wq,
int cpu) int cpu)
{ {
hfi1_cdbg(AIOWRITE, ".");
return !!queue_work_on(cpu, wq, &wait->wait[IOWAIT_TID_SE].iowork); return !!queue_work_on(cpu, wq, &wait->wait[IOWAIT_TID_SE].iowork);
} }
@@ -208,6 +222,7 @@ static inline bool iowait_tid_schedule(
*/ */
static inline void iowait_sdma_drain(struct iowait *wait) static inline void iowait_sdma_drain(struct iowait *wait)
{ {
hfi1_cdbg(AIOWRITE, ".");
wait_event(wait->wait_dma, !atomic_read(&wait->sdma_busy)); wait_event(wait->wait_dma, !atomic_read(&wait->sdma_busy));
} }
@@ -219,6 +234,7 @@ static inline void iowait_sdma_drain(struct iowait *wait)
*/ */
static inline int iowait_sdma_pending(struct iowait *wait) static inline int iowait_sdma_pending(struct iowait *wait)
{ {
hfi1_cdbg(AIOWRITE, ".");
return atomic_read(&wait->sdma_busy); return atomic_read(&wait->sdma_busy);
} }
@@ -228,17 +244,21 @@ static inline int iowait_sdma_pending(struct iowait *wait)
*/ */
static inline void iowait_sdma_inc(struct iowait *wait) static inline void iowait_sdma_inc(struct iowait *wait)
{ {
hfi1_cdbg(AIOWRITE, ".");
atomic_inc(&wait->sdma_busy); atomic_inc(&wait->sdma_busy);
} }
#endif
/** /**
* iowait_sdma_add - add count to pending * iowait_sdma_add - add count to pending
* @wait: iowait_work structure * @wait: iowait_work structure
*/ */
static inline void iowait_sdma_add(struct iowait *wait, int count) static inline void iowait_sdma_add(struct iowait *wait, int count)
{ {
hfi1_cdbg(AIOWRITE, ".");
atomic_add(count, &wait->sdma_busy); atomic_add(count, &wait->sdma_busy);
} }
#ifdef __HFI1_ORIG__
/** /**
* iowait_pio_drain() - wait for pios to drain * iowait_pio_drain() - wait for pios to drain
@@ -250,6 +270,7 @@ static inline void iowait_sdma_add(struct iowait *wait, int count)
*/ */
static inline void iowait_pio_drain(struct iowait *wait) static inline void iowait_pio_drain(struct iowait *wait)
{ {
hfi1_cdbg(AIOWRITE, ".");
wait_event_timeout(wait->wait_pio, wait_event_timeout(wait->wait_pio,
!atomic_read(&wait->pio_busy), !atomic_read(&wait->pio_busy),
HZ); HZ);
@@ -263,6 +284,7 @@ static inline void iowait_pio_drain(struct iowait *wait)
*/ */
static inline int iowait_pio_pending(struct iowait *w) static inline int iowait_pio_pending(struct iowait *w)
{ {
hfi1_cdbg(AIOWRITE, ".");
return atomic_read(&w->pio_busy); return atomic_read(&w->pio_busy);
} }
@@ -274,6 +296,7 @@ static inline int iowait_pio_pending(struct iowait *w)
*/ */
static inline void iowait_drain_wakeup(struct iowait *w) static inline void iowait_drain_wakeup(struct iowait *w)
{ {
hfi1_cdbg(AIOWRITE, ".");
wake_up(&w->wait_dma); wake_up(&w->wait_dma);
wake_up(&w->wait_pio); wake_up(&w->wait_pio);
if (w->sdma_drained) if (w->sdma_drained)
@@ -286,6 +309,7 @@ static inline void iowait_drain_wakeup(struct iowait *w)
*/ */
static inline void iowait_pio_inc(struct iowait *wait) static inline void iowait_pio_inc(struct iowait *wait)
{ {
hfi1_cdbg(AIOWRITE, ".");
atomic_inc(&wait->pio_busy); atomic_inc(&wait->pio_busy);
} }
@@ -295,6 +319,7 @@ static inline void iowait_pio_inc(struct iowait *wait)
*/ */
static inline int iowait_pio_dec(struct iowait *wait) static inline int iowait_pio_dec(struct iowait *wait)
{ {
hfi1_cdbg(AIOWRITE, ".");
if (!wait) if (!wait)
return 0; return 0;
return atomic_dec_and_test(&wait->pio_busy); return atomic_dec_and_test(&wait->pio_busy);
@@ -306,6 +331,7 @@ static inline int iowait_pio_dec(struct iowait *wait)
*/ */
static inline int iowait_sdma_dec(struct iowait *wait) static inline int iowait_sdma_dec(struct iowait *wait)
{ {
hfi1_cdbg(AIOWRITE, ".");
if (!wait) if (!wait)
return 0; return 0;
return atomic_dec_and_test(&wait->sdma_busy); return atomic_dec_and_test(&wait->sdma_busy);
@@ -319,6 +345,7 @@ static inline struct sdma_txreq *iowait_get_txhead(struct iowait_work *wait)
{ {
struct sdma_txreq *tx = NULL; struct sdma_txreq *tx = NULL;
hfi1_cdbg(AIOWRITE, ".");
if (!list_empty(&wait->tx_head)) { if (!list_empty(&wait->tx_head)) {
tx = list_first_entry( tx = list_first_entry(
&wait->tx_head, &wait->tx_head,
@@ -333,6 +360,7 @@ static inline u16 iowait_get_desc(struct iowait_work *w)
{ {
u16 num_desc = 0; u16 num_desc = 0;
struct sdma_txreq *tx = NULL; struct sdma_txreq *tx = NULL;
hfi1_cdbg(AIOWRITE, ".");
if (!list_empty(&w->tx_head)) { if (!list_empty(&w->tx_head)) {
tx = list_first_entry( tx = list_first_entry(
@@ -348,6 +376,7 @@ static inline u32 iowait_get_all_desc(struct iowait *w)
{ {
u32 num_desc = 0; u32 num_desc = 0;
hfi1_cdbg(AIOWRITE, ".");
num_desc = iowait_get_desc(&w->wait[IOWAIT_IB_SE]); num_desc = iowait_get_desc(&w->wait[IOWAIT_IB_SE]);
num_desc += iowait_get_desc(&w->wait[IOWAIT_TID_SE]); num_desc += iowait_get_desc(&w->wait[IOWAIT_TID_SE]);
return num_desc; return num_desc;
@@ -359,9 +388,11 @@ static inline u32 iowait_get_all_desc(struct iowait *w)
*/ */
static inline bool iowait_packet_queued(struct iowait_work *w) static inline bool iowait_packet_queued(struct iowait_work *w)
{ {
hfi1_cdbg(AIOWRITE, ".");
return !list_empty(&w->tx_head); return !list_empty(&w->tx_head);
} }
#endif /* __HFI1_ORIG__ */
/** /**
* inc_wait_count - increment wait counts * inc_wait_count - increment wait counts
* @w: the log work struct * @w: the log work struct
@@ -369,11 +400,13 @@ static inline bool iowait_packet_queued(struct iowait_work *w)
*/ */
static inline void iowait_inc_wait_count(struct iowait_work *w, u16 n) static inline void iowait_inc_wait_count(struct iowait_work *w, u16 n)
{ {
hfi1_cdbg(AIOWRITE, ".");
if (!w) if (!w)
return; return;
w->iow->tx_count++; w->iow->tx_count++;
w->iow->count += n; w->iow->count += n;
} }
#ifdef __HFI1_ORIG__
/** /**
* iowait_get_tid_work - return iowait_work for tid SE * iowait_get_tid_work - return iowait_work for tid SE
@@ -381,15 +414,18 @@ static inline void iowait_inc_wait_count(struct iowait_work *w, u16 n)
*/ */
static inline struct iowait_work *iowait_get_tid_work(struct iowait *w) static inline struct iowait_work *iowait_get_tid_work(struct iowait *w)
{ {
hfi1_cdbg(AIOWRITE, ".");
return &w->wait[IOWAIT_TID_SE]; return &w->wait[IOWAIT_TID_SE];
} }
#endif /* __HFI1_ORIG__ */
/** /**
* iowait_get_ib_work - return iowait_work for ib SE * iowait_get_ib_work - return iowait_work for ib SE
* @w: the iowait struct * @w: the iowait struct
*/ */
static inline struct iowait_work *iowait_get_ib_work(struct iowait *w) static inline struct iowait_work *iowait_get_ib_work(struct iowait *w)
{ {
hfi1_cdbg(AIOWRITE, ".");
return &w->wait[IOWAIT_IB_SE]; return &w->wait[IOWAIT_IB_SE];
} }
@@ -399,12 +435,15 @@ static inline struct iowait_work *iowait_get_ib_work(struct iowait *w)
*/ */
static inline struct iowait *iowait_ioww_to_iow(struct iowait_work *w) static inline struct iowait *iowait_ioww_to_iow(struct iowait_work *w)
{ {
hfi1_cdbg(AIOWRITE, ".");
if (likely(w)) if (likely(w))
return w->iow; return w->iow;
return NULL; return NULL;
} }
#ifdef __HFI1_ORIG__
void iowait_cancel_work(struct iowait *w); void iowait_cancel_work(struct iowait *w);
int iowait_set_work_flag(struct iowait_work *w); int iowait_set_work_flag(struct iowait_work *w);
#endif /* __HFI1_ORIG__ */
#endif #endif

View File

@@ -47,6 +47,11 @@
* *
*/ */
#include <hfi1/ihk_hfi1_common.h>
#include <hfi1/sdma_txreq.h>
#ifdef __HFI1_ORIG__
#include <linux/types.h> #include <linux/types.h>
#include <linux/list.h> #include <linux/list.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
@@ -57,6 +62,11 @@
#include "verbs.h" #include "verbs.h"
#include "sdma_txreq.h" #include "sdma_txreq.h"
#define hfi1_cdbg(which, fmt, ...) \
__hfi1_trace_##which(__func__, fmt, ##__VA_ARGS__)
extern void __hfi1_trace_AIOWRITE(const char *func, char *fmt, ...);
#endif /* __HFI1_ORIG__ */
/* Hardware limit */ /* Hardware limit */
#define MAX_DESC 64 #define MAX_DESC 64
/* Hardware limit for SDMA packet size */ /* Hardware limit for SDMA packet size */
@@ -192,6 +202,7 @@ struct sdma_set_state_action {
unsigned go_s99_running_totrue:1; unsigned go_s99_running_totrue:1;
}; };
#ifdef __HFI1_ORIG__
struct sdma_state { struct sdma_state {
struct kref kref; struct kref kref;
struct completion comp; struct completion comp;
@@ -203,6 +214,11 @@ struct sdma_state {
unsigned previous_op; unsigned previous_op;
enum sdma_events last_event; enum sdma_events last_event;
}; };
#else
struct sdma_state {
enum sdma_states current_state;
};
#endif /* __HFI1_ORIG__ */
/** /**
* DOC: sdma exported routines * DOC: sdma exported routines
@@ -394,6 +410,7 @@ struct sdma_engine {
/* private: */ /* private: */
struct list_head dmawait; struct list_head dmawait;
#ifdef __HFI1_ORIG__
/* CONFIG SDMA for now, just blindly duplicate */ /* CONFIG SDMA for now, just blindly duplicate */
/* private: */ /* private: */
struct tasklet_struct sdma_hw_clean_up_task struct tasklet_struct sdma_hw_clean_up_task
@@ -409,14 +426,20 @@ struct sdma_engine {
u32 progress_check_head; u32 progress_check_head;
/* private: */ /* private: */
struct work_struct flush_worker; struct work_struct flush_worker;
#endif /* __HFI1_ORIG__ */
/* protect flush list */ /* protect flush list */
spinlock_t flushlist_lock; spinlock_t flushlist_lock;
/* private: */ /* private: */
struct list_head flushlist; struct list_head flushlist;
#ifdef __HFI1_ORIG__
struct cpumask cpu_mask; struct cpumask cpu_mask;
struct kobject kobj; struct kobject kobj;
#endif /* __HFI1_ORIG__ */
}; };
#ifdef __HFI1_ORIG__
int sdma_init(struct hfi1_devdata *dd, u8 port); int sdma_init(struct hfi1_devdata *dd, u8 port);
void sdma_start(struct hfi1_devdata *dd); void sdma_start(struct hfi1_devdata *dd);
void sdma_exit(struct hfi1_devdata *dd); void sdma_exit(struct hfi1_devdata *dd);
@@ -441,6 +464,7 @@ static inline int sdma_empty(struct sdma_engine *sde)
return sde->descq_tail == sde->descq_head; return sde->descq_tail == sde->descq_head;
} }
#endif /* __HFI1_ORIG__ */
static inline u16 sdma_descq_freecnt(struct sdma_engine *sde) static inline u16 sdma_descq_freecnt(struct sdma_engine *sde)
{ {
return sde->descq_cnt - return sde->descq_cnt -
@@ -478,9 +502,11 @@ static inline int sdma_running(struct sdma_engine *engine)
unsigned long flags; unsigned long flags;
int ret; int ret;
hfi1_cdbg(AIOWRITE, "+");
spin_lock_irqsave(&engine->tail_lock, flags); spin_lock_irqsave(&engine->tail_lock, flags);
ret = __sdma_running(engine); ret = __sdma_running(engine);
spin_unlock_irqrestore(&engine->tail_lock, flags); spin_unlock_irqrestore(&engine->tail_lock, flags);
hfi1_cdbg(AIOWRITE, "-");
return ret; return ret;
} }
@@ -619,6 +645,7 @@ static inline int sdma_txinit(
{ {
return sdma_txinit_ahg(tx, flags, tlen, 0, 0, NULL, 0, cb); return sdma_txinit_ahg(tx, flags, tlen, 0, 0, NULL, 0, cb);
} }
#ifdef __HFI1_ORIG__
/* helpers - don't use */ /* helpers - don't use */
static inline int sdma_mapping_type(struct sdma_desc *d) static inline int sdma_mapping_type(struct sdma_desc *d)
@@ -639,6 +666,7 @@ static inline dma_addr_t sdma_mapping_addr(struct sdma_desc *d)
>> SDMA_DESC0_PHY_ADDR_SHIFT; >> SDMA_DESC0_PHY_ADDR_SHIFT;
} }
#endif /* __HFI1_ORIG__ */
static inline void make_tx_sdma_desc( static inline void make_tx_sdma_desc(
struct sdma_txreq *tx, struct sdma_txreq *tx,
int type, int type,
@@ -666,7 +694,6 @@ static inline void make_tx_sdma_desc(
int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx, int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
int type, void *kvaddr, struct page *page, int type, void *kvaddr, struct page *page,
unsigned long offset, u16 len); unsigned long offset, u16 len);
int _pad_sdma_tx_descs(struct hfi1_devdata *, struct sdma_txreq *);
void __sdma_txclean(struct hfi1_devdata *, struct sdma_txreq *); void __sdma_txclean(struct hfi1_devdata *, struct sdma_txreq *);
static inline void sdma_txclean(struct hfi1_devdata *dd, struct sdma_txreq *tx) static inline void sdma_txclean(struct hfi1_devdata *dd, struct sdma_txreq *tx)
@@ -674,6 +701,8 @@ static inline void sdma_txclean(struct hfi1_devdata *dd, struct sdma_txreq *tx)
if (tx->num_desc) if (tx->num_desc)
__sdma_txclean(dd, tx); __sdma_txclean(dd, tx);
} }
#ifdef __HFI1_ORIG__
int _pad_sdma_tx_descs(struct hfi1_devdata *, struct sdma_txreq *);
/* helpers used by public routines */ /* helpers used by public routines */
static inline void _sdma_close_tx(struct hfi1_devdata *dd, static inline void _sdma_close_tx(struct hfi1_devdata *dd,
@@ -689,6 +718,7 @@ static inline void _sdma_close_tx(struct hfi1_devdata *dd,
SDMA_DESC1_INT_REQ_FLAG); SDMA_DESC1_INT_REQ_FLAG);
} }
#endif /* __HFI1_ORIG__ */
static inline int _sdma_txadd_daddr( static inline int _sdma_txadd_daddr(
struct hfi1_devdata *dd, struct hfi1_devdata *dd,
int type, int type,
@@ -707,11 +737,13 @@ static inline int _sdma_txadd_daddr(
/* special cases for last */ /* special cases for last */
if (!tx->tlen) { if (!tx->tlen) {
if (tx->packet_len & (sizeof(u32) - 1)) { if (tx->packet_len & (sizeof(u32) - 1)) {
rval = _pad_sdma_tx_descs(dd, tx); //TODO: _pad_sdma_tx_descs
//rval = _pad_sdma_tx_descs(dd, tx);
if (rval) if (rval)
return rval; return rval;
} else { } else {
_sdma_close_tx(dd, tx); //TODO: _sdma_close_tx
//_sdma_close_tx(dd, tx);
} }
} }
tx->num_desc++; tx->num_desc++;
@@ -743,7 +775,7 @@ static inline int sdma_txadd_page(
{ {
dma_addr_t addr; dma_addr_t addr;
int rval; int rval;
hfi1_cdbg(AIOWRITE, "+");
if ((unlikely(tx->num_desc == tx->desc_limit))) { if ((unlikely(tx->num_desc == tx->desc_limit))) {
rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_PAGE, rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_PAGE,
NULL, page, offset, len); NULL, page, offset, len);
@@ -751,6 +783,7 @@ static inline int sdma_txadd_page(
return rval; return rval;
} }
#ifdef __HFI1_ORIG__
addr = dma_map_page( addr = dma_map_page(
&dd->pcidev->dev, &dd->pcidev->dev,
page, page,
@@ -762,7 +795,11 @@ static inline int sdma_txadd_page(
__sdma_txclean(dd, tx); __sdma_txclean(dd, tx);
return -ENOSPC; return -ENOSPC;
} }
#else
//TODO: dma_map_page
#endif /* __HFI1_ORIG__ */
hfi1_cdbg(AIOWRITE, "-");
return _sdma_txadd_daddr( return _sdma_txadd_daddr(
dd, SDMA_MAP_PAGE, tx, addr, len); dd, SDMA_MAP_PAGE, tx, addr, len);
} }
@@ -833,6 +870,7 @@ static inline int sdma_txadd_kvaddr(
return rval; return rval;
} }
#ifdef __HFI1_ORIG__
addr = dma_map_single( addr = dma_map_single(
&dd->pcidev->dev, &dd->pcidev->dev,
kvaddr, kvaddr,
@@ -843,6 +881,9 @@ static inline int sdma_txadd_kvaddr(
__sdma_txclean(dd, tx); __sdma_txclean(dd, tx);
return -ENOSPC; return -ENOSPC;
} }
#else
//TODO: dma_map_single
#endif /* __HFI1_ORIG__ */
return _sdma_txadd_daddr( return _sdma_txadd_daddr(
dd, SDMA_MAP_SINGLE, tx, addr, len); dd, SDMA_MAP_SINGLE, tx, addr, len);
@@ -885,6 +926,7 @@ static inline u32 sdma_build_ahg_descriptor(
((data & SDMA_AHG_VALUE_MASK) << ((data & SDMA_AHG_VALUE_MASK) <<
SDMA_AHG_VALUE_SHIFT)); SDMA_AHG_VALUE_SHIFT));
} }
#ifdef __HFI1_ORIG__
/** /**
* sdma_progress - use seq number of detect head progress * sdma_progress - use seq number of detect head progress
@@ -1061,6 +1103,7 @@ struct sdma_engine *sdma_select_engine_sc(
u32 selector, u32 selector,
u8 sc5); u8 sc5);
#endif /* __HFI1_ORIG__ */
struct sdma_engine *sdma_select_engine_vl( struct sdma_engine *sdma_select_engine_vl(
struct hfi1_devdata *dd, struct hfi1_devdata *dd,
u32 selector, u32 selector,
@@ -1068,6 +1111,8 @@ struct sdma_engine *sdma_select_engine_vl(
struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd, struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
u32 selector, u8 vl); u32 selector, u8 vl);
#ifdef __HFI1_ORIG__
ssize_t sdma_get_cpu_to_sde_map(struct sdma_engine *sde, char *buf); ssize_t sdma_get_cpu_to_sde_map(struct sdma_engine *sde, char *buf);
ssize_t sdma_set_cpu_to_sde_map(struct sdma_engine *sde, const char *buf, ssize_t sdma_set_cpu_to_sde_map(struct sdma_engine *sde, const char *buf,
size_t count); size_t count);
@@ -1095,4 +1140,5 @@ extern uint mod_num_sdma;
void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid); void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid);
#endif /* __HFI1_ORIG__ */
#endif #endif

View File

@@ -48,6 +48,8 @@
#ifndef HFI1_SDMA_TXREQ_H #ifndef HFI1_SDMA_TXREQ_H
#define HFI1_SDMA_TXREQ_H #define HFI1_SDMA_TXREQ_H
#include <hfi1/iowait.h>
/* increased for AHG */ /* increased for AHG */
#define NUM_DESC 6 #define NUM_DESC 6
@@ -105,7 +107,7 @@ struct sdma_txreq {
/* private: */ /* private: */
struct iowait *wait; struct iowait *wait;
/* private: */ /* private: */
callback_t complete; callback_t complete;
#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
u64 sn; u64 sn;
#endif #endif

View File

@@ -46,8 +46,9 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* *
*/ */
#ifdef __HFI1_ORIG__
#include "hfi.h" #include "hfi.h"
#endif /* __HFI1_ORIG__ */
#define EXP_TID_TIDLEN_MASK 0x7FFULL #define EXP_TID_TIDLEN_MASK 0x7FFULL
#define EXP_TID_TIDLEN_SHIFT 0 #define EXP_TID_TIDLEN_SHIFT 0
@@ -70,6 +71,7 @@
(tid) |= EXP_TID_SET(field, (value)); \ (tid) |= EXP_TID_SET(field, (value)); \
} while (0) } while (0)
#ifdef __HFI1_ORIG__
struct tid_group { struct tid_group {
struct list_head list; struct list_head list;
unsigned base; unsigned base;
@@ -154,4 +156,6 @@ int hfi1_user_exp_rcv_setup(struct file *, struct hfi1_tid_info *);
int hfi1_user_exp_rcv_clear(struct file *, struct hfi1_tid_info *); int hfi1_user_exp_rcv_clear(struct file *, struct hfi1_tid_info *);
int hfi1_user_exp_rcv_invalid(struct file *, struct hfi1_tid_info *); int hfi1_user_exp_rcv_invalid(struct file *, struct hfi1_tid_info *);
#endif /* __HFI1_ORIG__ */
#endif /* _HFI1_USER_EXP_RCV_H */ #endif /* _HFI1_USER_EXP_RCV_H */

View File

@@ -1,3 +1,7 @@
#ifndef _HFI1_USER_SDMA_H
#define _HFI1_USER_SDMA_H
/* /*
* Copyright(c) 2015, 2016 Intel Corporation. * Copyright(c) 2015, 2016 Intel Corporation.
* *
@@ -44,6 +48,17 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* *
*/ */
#include <hfi1/ihk_hfi1_common.h>
#include <hfi1/iowait.h>
#include <hfi1/sdma.h>
#include <string.h>
#include <hfi1/hfi.h>
#include <hfi1/hfi1_user.h>
#include <uio.h>
#ifdef __HFI1_ORIG__
#include <linux/device.h> #include <linux/device.h>
#include <linux/wait.h> #include <linux/wait.h>
@@ -53,6 +68,7 @@
extern uint extended_psn; extern uint extended_psn;
#endif /* __HFI1_ORIG__ */
/* /*
* Define fields in the KDETH header so we can update the header * Define fields in the KDETH header so we can update the header
* template. * template.
@@ -105,16 +121,24 @@ struct hfi1_user_sdma_pkt_q {
atomic_t n_reqs; atomic_t n_reqs;
u16 reqidx; u16 reqidx;
struct hfi1_devdata *dd; struct hfi1_devdata *dd;
#ifdef __HFI1_ORIG__
struct kmem_cache *txreq_cache; struct kmem_cache *txreq_cache;
#else
void *txreq_cache; //unused
#endif /* __HFI1_ORIG__ */
struct user_sdma_request *reqs; struct user_sdma_request *reqs;
unsigned long *req_in_use; unsigned long *req_in_use;
struct iowait busy; struct iowait busy;
unsigned state; unsigned state;
#ifdef __HFI1_ORIG__
wait_queue_head_t wait; wait_queue_head_t wait;
unsigned long unpinned; unsigned long unpinned;
struct mmu_rb_handler *handler; struct mmu_rb_handler *handler;
atomic_t n_locked; atomic_t n_locked;
struct mm_struct *mm; struct mm_struct *mm;
#else
//TODO:
#endif /* __HFI1_ORIG__ */
}; };
struct hfi1_user_sdma_comp_q { struct hfi1_user_sdma_comp_q {
@@ -122,7 +146,14 @@ struct hfi1_user_sdma_comp_q {
struct hfi1_sdma_comp_entry *comps; struct hfi1_sdma_comp_entry *comps;
}; };
int hfi1_user_sdma_process_request(void *private_data, struct iovec *iovec,
unsigned long dim, unsigned long *count);
#ifdef __HFI1_ORIG__
int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *, struct file *); int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *, struct file *);
int hfi1_user_sdma_free_queues(struct hfi1_filedata *); int hfi1_user_sdma_free_queues(struct hfi1_filedata *);
int hfi1_user_sdma_process_request(struct file *, struct iovec *, unsigned long, int hfi1_user_sdma_process_request(struct file *, struct iovec *, unsigned long,
unsigned long *); unsigned long *);
#endif /* __HFI1_ORIG__ */
#endif /* _HFI1_SDMA_H */

View File

@@ -45,6 +45,15 @@
* *
*/ */
#include <hfi1/ihk_hfi1_common.h>
#include <hfi1/user_sdma.h>
#include <hfi1/sdma.h>
#include <hfi1/common.h>
unsigned long hfi1_cap_mask = HFI1_CAP_MASK_DEFAULT;
#ifdef __HFI1_ORIG__
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/seqlock.h> #include <linux/seqlock.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
@@ -62,11 +71,15 @@
#include "iowait.h" #include "iowait.h"
#include "trace.h" #include "trace.h"
#endif /* __HFI1_ORIG__ */
/* must be a power of 2 >= 64 <= 32768 */ /* must be a power of 2 >= 64 <= 32768 */
#define SDMA_DESCQ_CNT 2048 #define SDMA_DESCQ_CNT 2048
#define SDMA_DESC_INTR 64 #define SDMA_DESC_INTR 64
#define INVALID_TAIL 0xffff #define INVALID_TAIL 0xffff
#ifdef __HFI1_ORIG__
static uint sdma_descq_cnt = SDMA_DESCQ_CNT; static uint sdma_descq_cnt = SDMA_DESCQ_CNT;
module_param(sdma_descq_cnt, uint, S_IRUGO); module_param(sdma_descq_cnt, uint, S_IRUGO);
MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries"); MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries");
@@ -226,7 +239,9 @@ static const struct sdma_set_state_action sdma_action_table[] = {
}, },
}; };
#endif /* __HFI1_ORIG__ */
#define SDMA_TAIL_UPDATE_THRESH 0x1F #define SDMA_TAIL_UPDATE_THRESH 0x1F
#ifdef __HFI1_ORIG__
/* declare all statics here rather than keep sorting */ /* declare all statics here rather than keep sorting */
static void sdma_complete(struct kref *); static void sdma_complete(struct kref *);
@@ -368,7 +383,7 @@ static inline void complete_tx(struct sdma_engine *sde,
/* protect against complete modifying */ /* protect against complete modifying */
struct iowait *wait = tx->wait; struct iowait *wait = tx->wait;
callback_t complete = tx->complete; callback_t complete = tx->complete;
hfi1_cdbg(AIOWRITE, "+");
#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
trace_hfi1_sdma_out_sn(sde, tx->sn); trace_hfi1_sdma_out_sn(sde, tx->sn);
if (WARN_ON_ONCE(sde->head_sn != tx->sn)) if (WARN_ON_ONCE(sde->head_sn != tx->sn))
@@ -381,6 +396,7 @@ static inline void complete_tx(struct sdma_engine *sde,
(*complete)(tx, res); (*complete)(tx, res);
if (iowait_sdma_dec(wait)) if (iowait_sdma_dec(wait))
iowait_drain_wakeup(wait); iowait_drain_wakeup(wait);
hfi1_cdbg(AIOWRITE, "-");
} }
/* /*
@@ -773,11 +789,16 @@ struct sdma_engine *sdma_select_engine_vl(
struct sdma_map_elem *e; struct sdma_map_elem *e;
struct sdma_engine *rval; struct sdma_engine *rval;
hfi1_cdbg(AIOWRITE, "+");
/* NOTE This should only happen if SC->VL changed after the initial /* NOTE This should only happen if SC->VL changed after the initial
* checks on the QP/AH * checks on the QP/AH
* Default will return engine 0 below * Default will return engine 0 below
*/ */
#ifdef __HFI1_ORIG__
if (vl >= num_vls) { if (vl >= num_vls) {
#else
if (vl >= HFI1_MAX_VLS_SUPPORTED) {
#endif /* __HFI1_ORIG__ */
rval = NULL; rval = NULL;
goto done; goto done;
} }
@@ -795,6 +816,7 @@ struct sdma_engine *sdma_select_engine_vl(
done: done:
rval = !rval ? &dd->per_sdma[0] : rval; rval = !rval ? &dd->per_sdma[0] : rval;
trace_hfi1_sdma_engine_select(dd, selector, vl, rval->this_idx); trace_hfi1_sdma_engine_select(dd, selector, vl, rval->this_idx);
hfi1_cdbg(AIOWRITE, "-");
return rval; return rval;
} }
@@ -864,6 +886,7 @@ struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
const struct cpumask *current_mask = tsk_cpus_allowed(current); const struct cpumask *current_mask = tsk_cpus_allowed(current);
unsigned long cpu_id; unsigned long cpu_id;
hfi1_cdbg(AIOWRITE, "+");
/* /*
* To ensure that always the same sdma engine(s) will be * To ensure that always the same sdma engine(s) will be
* selected make sure the process is pinned to this CPU only. * selected make sure the process is pinned to this CPU only.
@@ -1658,6 +1681,7 @@ static inline void sdma_unmap_desc(
break; break;
} }
} }
#endif /* __HFI1_ORIG__ */
/* /*
* return the mode as indicated by the first * return the mode as indicated by the first
@@ -1689,13 +1713,15 @@ void __sdma_txclean(
if (tx->num_desc) { if (tx->num_desc) {
u8 skip = 0, mode = ahg_mode(tx); u8 skip = 0, mode = ahg_mode(tx);
/* TODO: enable sdma_unmap_desc */
/* unmap first */ /* unmap first */
sdma_unmap_desc(dd, &tx->descp[0]); //sdma_unmap_desc(dd, &tx->descp[0]);
/* determine number of AHG descriptors to skip */ /* determine number of AHG descriptors to skip */
if (mode > SDMA_AHG_APPLY_UPDATE1) if (mode > SDMA_AHG_APPLY_UPDATE1)
skip = mode >> 1; skip = mode >> 1;
for (i = 1 + skip; i < tx->num_desc; i++) /* TODO: enable sdma_unmap_desc */
sdma_unmap_desc(dd, &tx->descp[i]); // for (i = 1 + skip; i < tx->num_desc; i++)
// sdma_unmap_desc(dd, &tx->descp[i]);
tx->num_desc = 0; tx->num_desc = 0;
} }
kfree(tx->coalesce_buf); kfree(tx->coalesce_buf);
@@ -1706,6 +1732,7 @@ void __sdma_txclean(
kfree(tx->descp); kfree(tx->descp);
} }
} }
#ifdef __HFI1_ORIG__
static inline u16 sdma_gethead(struct sdma_engine *sde) static inline u16 sdma_gethead(struct sdma_engine *sde)
{ {
@@ -1824,6 +1851,7 @@ static void sdma_make_progress(struct sdma_engine *sde, u64 status)
u16 hwhead, swhead; u16 hwhead, swhead;
int idle_check_done = 0; int idle_check_done = 0;
hfi1_cdbg(AIOWRITE, "+");
hwhead = sdma_gethead(sde); hwhead = sdma_gethead(sde);
/* The reason for some of the complexity of this code is that /* The reason for some of the complexity of this code is that
@@ -1875,6 +1903,7 @@ retry:
sde->last_status = status; sde->last_status = status;
if (progress) if (progress)
sdma_desc_avail(sde, sdma_descq_freecnt(sde)); sdma_desc_avail(sde, sdma_descq_freecnt(sde));
hfi1_cdbg(AIOWRITE, "-");
} }
/* /*
@@ -1888,6 +1917,7 @@ retry:
*/ */
void sdma_engine_interrupt(struct sdma_engine *sde, u64 status) void sdma_engine_interrupt(struct sdma_engine *sde, u64 status)
{ {
hfi1_cdbg(AIOWRITE, "+");
trace_hfi1_sdma_engine_interrupt(sde, status); trace_hfi1_sdma_engine_interrupt(sde, status);
write_seqlock(&sde->head_lock); write_seqlock(&sde->head_lock);
sdma_set_desc_cnt(sde, sdma_desct_intr); sdma_set_desc_cnt(sde, sdma_desct_intr);
@@ -1899,6 +1929,7 @@ void sdma_engine_interrupt(struct sdma_engine *sde, u64 status)
sde->sdma_int_cnt++; sde->sdma_int_cnt++;
sdma_make_progress(sde, status); sdma_make_progress(sde, status);
write_sequnlock(&sde->head_lock); write_sequnlock(&sde->head_lock);
hfi1_cdbg(AIOWRITE, "-");
} }
/** /**
@@ -2000,12 +2031,15 @@ static void sdma_setlengen(struct sdma_engine *sde)
(4ULL << SD(LEN_GEN_GENERATION_SHIFT))); (4ULL << SD(LEN_GEN_GENERATION_SHIFT)));
} }
#endif /* __HFI1_ORIG__ */
static inline void sdma_update_tail(struct sdma_engine *sde, u16 tail) static inline void sdma_update_tail(struct sdma_engine *sde, u16 tail)
{ {
hfi1_cdbg(AIOWRITE, ".");
/* Commit writes to memory and advance the tail on the chip */ /* Commit writes to memory and advance the tail on the chip */
smp_wmb(); /* see get_txhead() */ smp_wmb(); /* see get_txhead() */
writeq(tail, sde->tail_csr); writeq(tail, sde->tail_csr);
} }
#ifdef __HFI1_ORIG__
/* /*
* This is called when changing to state s10_hw_start_up_halt_wait as * This is called when changing to state s10_hw_start_up_halt_wait as
@@ -2270,6 +2304,7 @@ void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde)
} }
} }
#endif /* __HFI1_ORIG__ */
/* /*
* add the generation number into * add the generation number into
* the qw1 and return * the qw1 and return
@@ -2306,12 +2341,12 @@ static inline u16 submit_tx(struct sdma_engine *sde, struct sdma_txreq *tx)
u16 tail; u16 tail;
struct sdma_desc *descp = tx->descp; struct sdma_desc *descp = tx->descp;
u8 skip = 0, mode = ahg_mode(tx); u8 skip = 0, mode = ahg_mode(tx);
hfi1_cdbg(AIOWRITE, "+");
tail = sde->descq_tail & sde->sdma_mask; tail = sde->descq_tail & sde->sdma_mask;
sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]); sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]);
sde->descq[tail].qw[1] = cpu_to_le64(add_gen(sde, descp->qw[1])); sde->descq[tail].qw[1] = cpu_to_le64(add_gen(sde, descp->qw[1]));
trace_hfi1_sdma_descriptor(sde, descp->qw[0], descp->qw[1], // trace_hfi1_sdma_descriptor(sde, descp->qw[0], descp->qw[1],
tail, &sde->descq[tail]); // tail, &sde->descq[tail]);
tail = ++sde->descq_tail & sde->sdma_mask; tail = ++sde->descq_tail & sde->sdma_mask;
descp++; descp++;
if (mode > SDMA_AHG_APPLY_UPDATE1) if (mode > SDMA_AHG_APPLY_UPDATE1)
@@ -2329,18 +2364,19 @@ static inline u16 submit_tx(struct sdma_engine *sde, struct sdma_txreq *tx)
qw1 = add_gen(sde, descp->qw[1]); qw1 = add_gen(sde, descp->qw[1]);
} }
sde->descq[tail].qw[1] = cpu_to_le64(qw1); sde->descq[tail].qw[1] = cpu_to_le64(qw1);
trace_hfi1_sdma_descriptor(sde, descp->qw[0], qw1, // trace_hfi1_sdma_descriptor(sde, descp->qw[0], qw1,
tail, &sde->descq[tail]); // tail, &sde->descq[tail]);
tail = ++sde->descq_tail & sde->sdma_mask; tail = ++sde->descq_tail & sde->sdma_mask;
} }
tx->next_descq_idx = tail; tx->next_descq_idx = tail;
#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
tx->sn = sde->tail_sn++; tx->sn = sde->tail_sn++;
trace_hfi1_sdma_in_sn(sde, tx->sn); // trace_hfi1_sdma_in_sn(sde, tx->sn);
WARN_ON_ONCE(sde->tx_ring[sde->tx_tail & sde->sdma_mask]); WARN_ON_ONCE(sde->tx_ring[sde->tx_tail & sde->sdma_mask]);
#endif #endif
sde->tx_ring[sde->tx_tail++ & sde->sdma_mask] = tx; sde->tx_ring[sde->tx_tail++ & sde->sdma_mask] = tx;
sde->desc_avail -= tx->num_desc; sde->desc_avail -= tx->num_desc;
hfi1_cdbg(AIOWRITE, "-");
return tail; return tail;
} }
@@ -2354,6 +2390,7 @@ static int sdma_check_progress(
{ {
int ret; int ret;
hfi1_cdbg(AIOWRITE, "+");
sde->desc_avail = sdma_descq_freecnt(sde); sde->desc_avail = sdma_descq_freecnt(sde);
if (tx->num_desc <= sde->desc_avail) if (tx->num_desc <= sde->desc_avail)
return -EAGAIN; return -EAGAIN;
@@ -2369,8 +2406,10 @@ static int sdma_check_progress(
} else { } else {
ret = -EBUSY; ret = -EBUSY;
} }
hfi1_cdbg(AIOWRITE, "-");
return ret; return ret;
} }
#ifdef __HFI1_ORIG__
/** /**
* sdma_send_txreq() - submit a tx req to ring * sdma_send_txreq() - submit a tx req to ring
@@ -2394,6 +2433,7 @@ int sdma_send_txreq(struct sdma_engine *sde,
u16 tail; u16 tail;
unsigned long flags; unsigned long flags;
hfi1_cdbg(AIOWRITE, "+");
/* user should have supplied entire packet */ /* user should have supplied entire packet */
if (unlikely(tx->tlen)) if (unlikely(tx->tlen))
return -EINVAL; return -EINVAL;
@@ -2410,6 +2450,7 @@ retry:
sdma_update_tail(sde, tail); sdma_update_tail(sde, tail);
unlock: unlock:
spin_unlock_irqrestore(&sde->tail_lock, flags); spin_unlock_irqrestore(&sde->tail_lock, flags);
hfi1_cdbg(AIOWRITE, "-");
return ret; return ret;
unlock_noconn: unlock_noconn:
if (wait) if (wait)
@@ -2436,6 +2477,7 @@ nodesc:
goto unlock; goto unlock;
} }
#endif /* __HFI1_ORIG__ */
/** /**
* sdma_send_txlist() - submit a list of tx req to ring * sdma_send_txlist() - submit a list of tx req to ring
* @sde: sdma engine to use * @sde: sdma engine to use
@@ -2473,6 +2515,7 @@ int sdma_send_txlist(struct sdma_engine *sde, struct iowait_work *wait,
u16 tail = INVALID_TAIL; u16 tail = INVALID_TAIL;
u32 submit_count = 0, flush_count = 0, total_count; u32 submit_count = 0, flush_count = 0, total_count;
hfi1_cdbg(AIOWRITE, "+");
spin_lock_irqsave(&sde->tail_lock, flags); spin_lock_irqsave(&sde->tail_lock, flags);
retry: retry:
list_for_each_entry_safe(tx, tx_next, tx_list, list) { list_for_each_entry_safe(tx, tx_next, tx_list, list) {
@@ -2502,6 +2545,7 @@ update_tail:
sdma_update_tail(sde, tail); sdma_update_tail(sde, tail);
spin_unlock_irqrestore(&sde->tail_lock, flags); spin_unlock_irqrestore(&sde->tail_lock, flags);
*count_out = total_count; *count_out = total_count;
hfi1_cdbg(AIOWRITE, "-");
return ret; return ret;
unlock_noconn: unlock_noconn:
spin_lock(&sde->flushlist_lock); spin_lock(&sde->flushlist_lock);
@@ -2511,14 +2555,15 @@ unlock_noconn:
tx->next_descq_idx = 0; tx->next_descq_idx = 0;
#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
tx->sn = sde->tail_sn++; tx->sn = sde->tail_sn++;
trace_hfi1_sdma_in_sn(sde, tx->sn); // trace_hfi1_sdma_in_sn(sde, tx->sn);
#endif #endif
list_add_tail(&tx->list, &sde->flushlist); list_add_tail(&tx->list, &sde->flushlist);
flush_count++; flush_count++;
iowait_inc_wait_count(wait, tx->num_desc); iowait_inc_wait_count(wait, tx->num_desc);
} }
spin_unlock(&sde->flushlist_lock); spin_unlock(&sde->flushlist_lock);
schedule_work(&sde->flush_worker); // TODO: schedule_work
//schedule_work(&sde->flush_worker);
ret = -ECOMM; ret = -ECOMM;
goto update_tail; goto update_tail;
nodesc: nodesc:
@@ -2530,6 +2575,7 @@ nodesc:
sde->descq_full_count++; sde->descq_full_count++;
goto update_tail; goto update_tail;
} }
#ifdef __HFI1_ORIG__
static void sdma_process_event(struct sdma_engine *sde, enum sdma_events event) static void sdma_process_event(struct sdma_engine *sde, enum sdma_events event)
{ {
@@ -3083,6 +3129,7 @@ enomem:
return -ENOMEM; return -ENOMEM;
} }
#endif /* __HFI1_ORIG__ */
/* /*
* ext_coal_sdma_tx_descs() - extend or coalesce sdma tx descriptors * ext_coal_sdma_tx_descs() - extend or coalesce sdma tx descriptors
* *
@@ -3103,6 +3150,8 @@ int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
int type, void *kvaddr, struct page *page, int type, void *kvaddr, struct page *page,
unsigned long offset, u16 len) unsigned long offset, u16 len)
{ {
//TODO: ext_coal_sdma_tx_descs
#ifdef __HFI1_ORIG__
int pad_len, rval; int pad_len, rval;
dma_addr_t addr; dma_addr_t addr;
@@ -3162,9 +3211,10 @@ int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx, return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx,
addr, tx->tlen); addr, tx->tlen);
} }
#endif /* __HFI1_ORIG__ */
return 1; return 1;
} }
#ifdef __HFI1_ORIG__
/* Update sdes when the lmc changes */ /* Update sdes when the lmc changes */
void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid) void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid)
@@ -3209,6 +3259,7 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
return rval; return rval;
} }
#endif /* __HFI1_ORIG__ */
/* /*
* Add ahg to the sdma_txreq * Add ahg to the sdma_txreq
* *
@@ -3316,6 +3367,7 @@ void sdma_ahg_free(struct sdma_engine *sde, int ahg_index)
return; return;
clear_bit(ahg_index, &sde->ahg_bits); clear_bit(ahg_index, &sde->ahg_bits);
} }
#ifdef __HFI1_ORIG__
/* /*
* SPC freeze handling for SDMA engines. Called when the driver knows * SPC freeze handling for SDMA engines. Called when the driver knows
@@ -3410,3 +3462,5 @@ void _sdma_engine_progress_schedule(
CCE_INT_FORCE + (8 * (IS_SDMA_START / 64)), CCE_INT_FORCE + (8 * (IS_SDMA_START / 64)),
sde->progress_mask); sde->progress_mask);
} }
#endif /* __HFI1_ORIG__ */

View File

@@ -67,7 +67,7 @@
#include <lwk/stddef.h> #include <lwk/stddef.h>
#include <futex.h> #include <futex.h>
//#include <hfi1/hfi.h> #include <hfi1/file_ops.h>
#define SYSCALL_BY_IKC #define SYSCALL_BY_IKC
@@ -481,7 +481,7 @@ long do_syscall(struct syscall_request *req, int cpu, int pid)
#endif // PROFILE_ENABLE #endif // PROFILE_ENABLE
if (req->number == __NR_open && rc > 0) { if (req->number == __NR_open && rc > 0) {
if (res.private_data && !strncmp(req->args[0], "/dev/hfi", 8)) { if (res.private_data && !strncmp((const char *)req->args[0], "/dev/hfi", 8)) {
thread->proc->fd_priv_table[rc] = res.private_data; thread->proc->fd_priv_table[rc] = res.private_data;
kprintf("%s: PID: %d, open fd: %d, filename: %s, private_data: 0x%lx\n", kprintf("%s: PID: %d, open fd: %d, filename: %s, private_data: 0x%lx\n",
__FUNCTION__, thread->proc->pid, rc, req->args[0], res.private_data); __FUNCTION__, thread->proc->pid, rc, req->args[0], res.private_data);
@@ -3093,13 +3093,10 @@ SYSCALL_DECLARE(writev)
{ {
struct process *proc = cpu_local_var(current)->proc; struct process *proc = cpu_local_var(current)->proc;
int fd = ihk_mc_syscall_arg0(ctx); int fd = ihk_mc_syscall_arg0(ctx);
struct iovec *iovec = (struct iovec *)ihk_mc_syscall_arg1(ctx);
int iovcnt = ihk_mc_syscall_arg2(ctx); int iovcnt = ihk_mc_syscall_arg2(ctx);
if (fd < 256) { void *private_data = proc->fd_priv_table[fd];
//struct hfi1_filedata *hf = (struct hfi1_filedata *)proc->fd_priv_table[fd]; if (!private_data) hfi1_aio_write(private_data, iovec, iovcnt);
kprintf("%s: fd[%d], 0x%lx, iovcnt[%d]\n", __FUNCTION__, fd, proc->fd_priv_table[fd], iovcnt);
} else {
kprintf("%s: fd[%d] > 256\n", __FUNCTION__, fd);
}
return syscall_generic_forwarding(__NR_writev, ctx); return syscall_generic_forwarding(__NR_writev, ctx);
} }

View File

@@ -44,6 +44,13 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* *
*/ */
#include <hfi1/user_sdma.h>
#include <hfi1/user_exp_rcv.h>
#include <hfi1/common.h>
#ifdef __HFI1_ORIG__
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/device.h> #include <linux/device.h>
@@ -69,9 +76,11 @@
#include "trace.h" #include "trace.h"
#include "mmu_rb.h" #include "mmu_rb.h"
static uint hfi1_sdma_comp_ring_size = 128;
module_param_named(sdma_comp_size, hfi1_sdma_comp_ring_size, uint, S_IRUGO); module_param_named(sdma_comp_size, hfi1_sdma_comp_ring_size, uint, S_IRUGO);
MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 128"); MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 128");
#endif /* __HFI1_ORIG__ */
static uint hfi1_sdma_comp_ring_size = 128;
/* The maximum number of Data io vectors per message/request */ /* The maximum number of Data io vectors per message/request */
#define MAX_VECTORS_PER_REQ 8 #define MAX_VECTORS_PER_REQ 8
@@ -133,8 +142,11 @@ static unsigned initial_pkt_count = 8;
#define SDMA_IOWAIT_TIMEOUT 1000 /* in milliseconds */ #define SDMA_IOWAIT_TIMEOUT 1000 /* in milliseconds */
#ifdef __HFI1_ORIG__
struct sdma_mmu_node; struct sdma_mmu_node;
#endif /* __HFI1_ORIG__ */
struct user_sdma_iovec { struct user_sdma_iovec {
struct list_head list; struct list_head list;
struct iovec iov; struct iovec iov;
@@ -149,6 +161,7 @@ struct user_sdma_iovec {
u64 offset; u64 offset;
struct sdma_mmu_node *node; struct sdma_mmu_node *node;
}; };
#ifdef __HFI1_ORIG__
struct sdma_mmu_node { struct sdma_mmu_node {
struct mmu_rb_node rb; struct mmu_rb_node rb;
@@ -164,6 +177,7 @@ struct evict_data {
u32 target; /* target count to evict */ u32 target; /* target count to evict */
}; };
#endif /* __HFI1_ORIG__ */
struct user_sdma_request { struct user_sdma_request {
struct sdma_req_info info; struct sdma_req_info info;
struct hfi1_user_sdma_pkt_q *pq; struct hfi1_user_sdma_pkt_q *pq;
@@ -237,6 +251,7 @@ struct user_sdma_txreq {
unsigned busycount; unsigned busycount;
u64 seqnum; u64 seqnum;
}; };
#ifdef __HFI1_ORIG__
#define SDMA_DBG(req, fmt, ...) \ #define SDMA_DBG(req, fmt, ...) \
hfi1_cdbg(SDMA, "[%u:%u:%u:%u] " fmt, (req)->pq->dd->unit, \ hfi1_cdbg(SDMA, "[%u:%u:%u:%u] " fmt, (req)->pq->dd->unit, \
@@ -246,24 +261,28 @@ struct user_sdma_txreq {
hfi1_cdbg(SDMA, "[%u:%u:%u] " fmt, (pq)->dd->unit, (pq)->ctxt, \ hfi1_cdbg(SDMA, "[%u:%u:%u] " fmt, (pq)->dd->unit, (pq)->ctxt, \
(pq)->subctxt, ##__VA_ARGS__) (pq)->subctxt, ##__VA_ARGS__)
#endif /* __HFI1_ORIG__ */
static int user_sdma_send_pkts(struct user_sdma_request *, unsigned); static int user_sdma_send_pkts(struct user_sdma_request *, unsigned);
static int num_user_pages(const struct iovec *);
static void user_sdma_txreq_cb(struct sdma_txreq *, int);
static inline void pq_update(struct hfi1_user_sdma_pkt_q *); static inline void pq_update(struct hfi1_user_sdma_pkt_q *);
static void user_sdma_free_request(struct user_sdma_request *, bool);
static int pin_vector_pages(struct user_sdma_request *,
struct user_sdma_iovec *);
static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned,
unsigned);
static int check_header_template(struct user_sdma_request *, static int check_header_template(struct user_sdma_request *,
struct hfi1_pkt_header *, u32, u32); struct hfi1_pkt_header *, u32, u32);
static int set_txreq_header(struct user_sdma_request *, static int set_txreq_header(struct user_sdma_request *,
struct user_sdma_txreq *, u32); struct user_sdma_txreq *, u32);
static int set_txreq_header_ahg(struct user_sdma_request *, static int set_txreq_header_ahg(struct user_sdma_request *,
struct user_sdma_txreq *, u32); struct user_sdma_txreq *, u32);
static void user_sdma_free_request(struct user_sdma_request *, bool);
static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *, static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *,
struct hfi1_user_sdma_comp_q *, struct hfi1_user_sdma_comp_q *,
u16, enum hfi1_sdma_comp_state, int); u16, enum hfi1_sdma_comp_state, int);
static void user_sdma_txreq_cb(struct sdma_txreq *, int);
#ifdef __HFI1_ORIG__
static int num_user_pages(const struct iovec *);
static int pin_vector_pages(struct user_sdma_request *,
struct user_sdma_iovec *);
static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned,
unsigned);
static inline u32 set_pkt_bth_psn(__be32, u8, u32); static inline u32 set_pkt_bth_psn(__be32, u8, u32);
static inline u32 get_lrh_len(struct hfi1_pkt_header, u32 len); static inline u32 get_lrh_len(struct hfi1_pkt_header, u32 len);
@@ -476,6 +495,8 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd)
return 0; return 0;
} }
#endif /* __HFI1_ORIG__ */
static u8 dlid_to_selector(u16 dlid) static u8 dlid_to_selector(u16 dlid)
{ {
static u8 mapping[256]; static u8 mapping[256];
@@ -497,11 +518,19 @@ static u8 dlid_to_selector(u16 dlid)
return mapping[hash]; return mapping[hash];
} }
#ifdef __HFI1_ORIG__
int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec, int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
unsigned long dim, unsigned long *count) unsigned long dim, unsigned long *count)
{ {
int ret = 0, i; int ret = 0, i;
struct hfi1_filedata *fd = fp->private_data; struct hfi1_filedata *fd = fp->private_data;
#else
int hfi1_user_sdma_process_request(void *private_data, struct iovec *iovec,
unsigned long dim, unsigned long *count)
{
int ret = 0, i;
struct hfi1_filedata *fd = private_data;
#endif /* __HFI1_ORIG__ */
struct hfi1_ctxtdata *uctxt = fd->uctxt; struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_user_sdma_pkt_q *pq = fd->pq; struct hfi1_user_sdma_pkt_q *pq = fd->pq;
struct hfi1_user_sdma_comp_q *cq = fd->cq; struct hfi1_user_sdma_comp_q *cq = fd->cq;
@@ -515,6 +544,7 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
u16 dlid; u16 dlid;
u32 selector; u32 selector;
hfi1_cdbg(AIOWRITE, "+");
if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) { if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) {
hfi1_cdbg( hfi1_cdbg(
SDMA, SDMA,
@@ -530,8 +560,8 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
return -EFAULT; return -EFAULT;
} }
trace_hfi1_sdma_user_reqinfo(dd, uctxt->ctxt, fd->subctxt, // trace_hfi1_sdma_user_reqinfo(dd, uctxt->ctxt, fd->subctxt,
(u16 *)&info); // (u16 *)&info);
if (info.comp_idx >= hfi1_sdma_comp_ring_size) { if (info.comp_idx >= hfi1_sdma_comp_ring_size) {
hfi1_cdbg(SDMA, hfi1_cdbg(SDMA,
@@ -619,6 +649,9 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
ret = -EINVAL; ret = -EINVAL;
goto free_req; goto free_req;
} }
// TODO: Enable this validation and checking
#ifdef __HFI1_ORIG__
/* /*
* Validate the vl. Do not trust packets from user space blindly. * Validate the vl. Do not trust packets from user space blindly.
* VL comes from PBC, SC comes from LRH, and the VL needs to * VL comes from PBC, SC comes from LRH, and the VL needs to
@@ -640,6 +673,7 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
ret = -EINVAL; ret = -EINVAL;
goto free_req; goto free_req;
} }
#endif /* __HFI1_ORIG__ */
/* /*
* Also should check the BTH.lnh. If it says the next header is GRH then * Also should check the BTH.lnh. If it says the next header is GRH then
@@ -667,11 +701,16 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
for (i = 0; i < req->data_iovs; i++) { for (i = 0; i < req->data_iovs; i++) {
INIT_LIST_HEAD(&req->iovs[i].list); INIT_LIST_HEAD(&req->iovs[i].list);
memcpy(&req->iovs[i].iov, iovec + idx++, sizeof(struct iovec)); memcpy(&req->iovs[i].iov, iovec + idx++, sizeof(struct iovec));
#ifdef __HFI1_ORIG__
hfi1_cdbg(AIOWRITE, "+pin_vector_pages");
// TODO: pin_vector_pages
ret = pin_vector_pages(req, &req->iovs[i]); ret = pin_vector_pages(req, &req->iovs[i]);
hfi1_cdbg(AIOWRITE, "-pin_vector_pages");
if (ret) { if (ret) {
req->status = ret; req->status = ret;
goto free_req; goto free_req;
} }
#endif /* __HFI1_ORIG__ */
req->data_len += req->iovs[i].iov.iov_len; req->data_len += req->iovs[i].iov.iov_len;
} }
SDMA_DBG(req, "total data length %u", req->data_len); SDMA_DBG(req, "total data length %u", req->data_len);
@@ -719,7 +758,8 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
dlid = be16_to_cpu(req->hdr.lrh[1]); dlid = be16_to_cpu(req->hdr.lrh[1]);
selector = dlid_to_selector(dlid); selector = dlid_to_selector(dlid);
selector += uctxt->ctxt + fd->subctxt; selector += uctxt->ctxt + fd->subctxt;
req->sde = sdma_select_user_engine(dd, selector, vl); /* TODO: check the rcu stuff */
//req->sde = sdma_select_user_engine(dd, selector, vl);
if (!req->sde || !sdma_running(req->sde)) { if (!req->sde || !sdma_running(req->sde)) {
ret = -ECOMM; ret = -ECOMM;
@@ -772,20 +812,28 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
goto free_req; goto free_req;
return ret; return ret;
} }
#ifdef __HFI1_ORIG__
hfi1_cdbg(AIOWRITE, "+wait_event_interruptible_timeout");
wait_event_interruptible_timeout( wait_event_interruptible_timeout(
pq->busy.wait_dma, pq->busy.wait_dma,
(pq->state == SDMA_PKT_Q_ACTIVE), (pq->state == SDMA_PKT_Q_ACTIVE),
msecs_to_jiffies( msecs_to_jiffies(
SDMA_IOWAIT_TIMEOUT)); SDMA_IOWAIT_TIMEOUT));
hfi1_cdbg(AIOWRITE, "-wait_event_interruptible_timeout");
#else
while (pq->state != SDMA_PKT_Q_ACTIVE) cpu_pause();
#endif /* __HFI1_ORIG__ */
} }
} }
*count += idx; *count += idx;
hfi1_cdbg(AIOWRITE, "-");
return 0; return 0;
free_req: free_req:
user_sdma_free_request(req, true); user_sdma_free_request(req, true);
if (req_queued) if (req_queued)
pq_update(pq); pq_update(pq);
set_comp_state(pq, cq, info.comp_idx, ERROR, req->status); set_comp_state(pq, cq, info.comp_idx, ERROR, req->status);
hfi1_cdbg(AIOWRITE, "-");
return ret; return ret;
} }
@@ -862,6 +910,7 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
struct hfi1_user_sdma_pkt_q *pq = NULL; struct hfi1_user_sdma_pkt_q *pq = NULL;
struct user_sdma_iovec *iovec = NULL; struct user_sdma_iovec *iovec = NULL;
hfi1_cdbg(AIOWRITE, "+");
if (!req->pq) if (!req->pq)
return -EINVAL; return -EINVAL;
@@ -899,7 +948,11 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
return -EFAULT; return -EFAULT;
} }
#ifdef __HFI1_ORIG__
tx = kmem_cache_alloc(pq->txreq_cache, GFP_KERNEL); tx = kmem_cache_alloc(pq->txreq_cache, GFP_KERNEL);
#else
tx = kmalloc(sizeof(struct user_sdma_txreq), GFP_KERNEL | __GFP_ZERO);
#endif /* __HFI1_ORIG__ */
if (!tx) if (!tx)
return -ENOMEM; return -ENOMEM;
@@ -1087,14 +1140,21 @@ dosend:
if (test_bit(SDMA_REQ_HAVE_AHG, &req->flags)) if (test_bit(SDMA_REQ_HAVE_AHG, &req->flags))
sdma_ahg_free(req->sde, req->ahg_idx); sdma_ahg_free(req->sde, req->ahg_idx);
} }
hfi1_cdbg(AIOWRITE, "-");
return ret; return ret;
free_txreq: free_txreq:
sdma_txclean(pq->dd, &tx->txreq); sdma_txclean(pq->dd, &tx->txreq);
free_tx: free_tx:
#ifdef __HFI1_ORIG__
kmem_cache_free(pq->txreq_cache, tx); kmem_cache_free(pq->txreq_cache, tx);
hfi1_cdbg(AIOWRITE, "-");
#else
kfree(tx);
#endif /* __HFI1_ORIG__ */
return ret; return ret;
} }
#ifdef __HFI1_ORIG__
/* /*
* How many pages in this iovec element? * How many pages in this iovec element?
@@ -1212,6 +1272,7 @@ static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
kfree(pages); kfree(pages);
} }
#endif /* __HFI1_ORIG__ */
static int check_header_template(struct user_sdma_request *req, static int check_header_template(struct user_sdma_request *req,
struct hfi1_pkt_header *hdr, u32 lrhlen, struct hfi1_pkt_header *hdr, u32 lrhlen,
u32 datalen) u32 datalen)
@@ -1388,8 +1449,8 @@ static int set_txreq_header(struct user_sdma_request *req,
req->omfactor != KDETH_OM_SMALL); req->omfactor != KDETH_OM_SMALL);
} }
done: done:
trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt, // trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt,
req->info.comp_idx, hdr, tidval); // req->info.comp_idx, hdr, tidval);
return sdma_txadd_kvaddr(pq->dd, &tx->txreq, hdr, sizeof(*hdr)); return sdma_txadd_kvaddr(pq->dd, &tx->txreq, hdr, sizeof(*hdr));
} }
@@ -1475,9 +1536,9 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
AHG_HEADER_SET(req->ahg, diff, 7, 16, 14, val); AHG_HEADER_SET(req->ahg, diff, 7, 16, 14, val);
} }
trace_hfi1_sdma_user_header_ahg(pq->dd, pq->ctxt, pq->subctxt, // trace_hfi1_sdma_user_header_ahg(pq->dd, pq->ctxt, pq->subctxt,
req->info.comp_idx, req->sde->this_idx, // req->info.comp_idx, req->sde->this_idx,
req->ahg_idx, req->ahg, diff, tidval); // req->ahg_idx, req->ahg, diff, tidval);
return diff; return diff;
} }
@@ -1510,7 +1571,8 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
} }
req->seqcomp = tx->seqnum; req->seqcomp = tx->seqnum;
kmem_cache_free(pq->txreq_cache, tx); //TODO: kmem_cache_free
//kmem_cache_free(pq->txreq_cache, tx);
tx = NULL; tx = NULL;
idx = req->info.comp_idx; idx = req->info.comp_idx;
@@ -1538,12 +1600,14 @@ static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq)
{ {
if (atomic_dec_and_test(&pq->n_reqs)) { if (atomic_dec_and_test(&pq->n_reqs)) {
xchg(&pq->state, SDMA_PKT_Q_INACTIVE); xchg(&pq->state, SDMA_PKT_Q_INACTIVE);
wake_up(&pq->wait); //TODO: wake_up
//wake_up(&pq->wait);
} }
} }
static void user_sdma_free_request(struct user_sdma_request *req, bool unpin) static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
{ {
hfi1_cdbg(AIOWRITE, "+");
if (!list_empty(&req->txps)) { if (!list_empty(&req->txps)) {
struct sdma_txreq *t, *p; struct sdma_txreq *t, *p;
@@ -1552,7 +1616,9 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
container_of(t, struct user_sdma_txreq, txreq); container_of(t, struct user_sdma_txreq, txreq);
list_del_init(&t->list); list_del_init(&t->list);
sdma_txclean(req->pq->dd, t); sdma_txclean(req->pq->dd, t);
#ifdef __HFI1_ORIG__
kmem_cache_free(req->pq->txreq_cache, tx); kmem_cache_free(req->pq->txreq_cache, tx);
#endif /* __HFI1_ORIG__ */
} }
} }
if (req->data_iovs) { if (req->data_iovs) {
@@ -1564,17 +1630,20 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
if (!node) if (!node)
continue; continue;
//TODO:
#ifdef __HFI1_ORIG__
if (unpin) if (unpin)
hfi1_mmu_rb_remove(req->pq->handler, hfi1_mmu_rb_remove(req->pq->handler,
&node->rb); &node->rb);
else else
atomic_dec(&node->refcount); atomic_dec(&node->refcount);
#endif /* __HFI1_ORIG__ */
} }
} }
kfree(req->tids); kfree(req->tids);
clear_bit(req->info.comp_idx, req->pq->req_in_use); clear_bit(req->info.comp_idx, req->pq->req_in_use);
hfi1_cdbg(AIOWRITE, "-");
} }
static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq, static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
struct hfi1_user_sdma_comp_q *cq, struct hfi1_user_sdma_comp_q *cq,
u16 idx, enum hfi1_sdma_comp_state state, u16 idx, enum hfi1_sdma_comp_state state,
@@ -1585,9 +1654,10 @@ static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
cq->comps[idx].status = state; cq->comps[idx].status = state;
if (state == ERROR) if (state == ERROR)
cq->comps[idx].errcode = -ret; cq->comps[idx].errcode = -ret;
trace_hfi1_sdma_user_completion(pq->dd, pq->ctxt, pq->subctxt, // trace_hfi1_sdma_user_completion(pq->dd, pq->ctxt, pq->subctxt,
idx, state, ret); // idx, state, ret);
} }
#ifdef __HFI1_ORIG__
static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr, static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
unsigned long len) unsigned long len)
@@ -1651,3 +1721,5 @@ static int sdma_rb_invalidate(void *arg, struct mmu_rb_node *mnode)
return 1; return 1;
return 0; return 0;
} }
#endif /* __HFI1_ORIG__ */

View File

@@ -24,6 +24,7 @@ void cpu_halt(void);
void cpu_safe_halt(void); void cpu_safe_halt(void);
void cpu_restore_interrupt(unsigned long); void cpu_restore_interrupt(unsigned long);
void cpu_pause(void); void cpu_pause(void);
void cpu_relax(void);
#define barrier() arch_barrier() #define barrier() arch_barrier()