HFI1: use DWARF generated headers for user_sdma_request and user_sdma_txreq
This commit is contained in:
@@ -34,43 +34,47 @@ struct sdma_engine {
|
||||
u8 sdma_shift;
|
||||
};
|
||||
struct {
|
||||
char padding8[256];
|
||||
char padding8[181];
|
||||
u8 this_idx;
|
||||
};
|
||||
struct {
|
||||
char padding9[256];
|
||||
spinlock_t tail_lock;
|
||||
};
|
||||
struct {
|
||||
char padding9[260];
|
||||
char padding10[260];
|
||||
u32 descq_tail;
|
||||
};
|
||||
struct {
|
||||
char padding10[264];
|
||||
char padding11[264];
|
||||
long unsigned int ahg_bits;
|
||||
};
|
||||
struct {
|
||||
char padding11[272];
|
||||
char padding12[272];
|
||||
u16 desc_avail;
|
||||
};
|
||||
struct {
|
||||
char padding12[274];
|
||||
char padding13[274];
|
||||
u16 tx_tail;
|
||||
};
|
||||
struct {
|
||||
char padding13[276];
|
||||
char padding14[276];
|
||||
u16 descq_cnt;
|
||||
};
|
||||
struct {
|
||||
char padding14[320];
|
||||
char padding15[320];
|
||||
seqlock_t head_lock;
|
||||
};
|
||||
struct {
|
||||
char padding15[328];
|
||||
char padding16[328];
|
||||
u32 descq_head;
|
||||
};
|
||||
struct {
|
||||
char padding16[704];
|
||||
char padding17[704];
|
||||
spinlock_t flushlist_lock;
|
||||
};
|
||||
struct {
|
||||
char padding17[712];
|
||||
char padding18[712];
|
||||
struct list_head flushlist;
|
||||
};
|
||||
};
|
||||
|
||||
@@ -191,8 +191,8 @@ typedef unsigned short __u16;
|
||||
typedef __signed__ int __s32;
|
||||
typedef unsigned int __u32;
|
||||
|
||||
typedef __signed__ long __s64;
|
||||
typedef unsigned long __u64;
|
||||
typedef __signed__ long long __s64;
|
||||
typedef unsigned long long __u64;
|
||||
|
||||
typedef __u64 u64;
|
||||
typedef __s64 s64;
|
||||
|
||||
@@ -94,6 +94,7 @@ extern uint extended_psn;
|
||||
|
||||
#define AHG_KDETH_INTR_SHIFT 12
|
||||
#define AHG_KDETH_SH_SHIFT 13
|
||||
#define AHG_KDETH_ARRAY_SIZE 9
|
||||
|
||||
#define KDETH_GET(val, field) \
|
||||
(((le32_to_cpu((val))) >> KDETH_##field##_SHIFT) & KDETH_##field##_MASK)
|
||||
@@ -108,7 +109,9 @@ extern uint extended_psn;
|
||||
|
||||
/* KDETH OM multipliers and switch over point */
|
||||
#define KDETH_OM_SMALL 4
|
||||
#define KDETH_OM_SMALL_SHIFT 2
|
||||
#define KDETH_OM_LARGE 64
|
||||
#define KDETH_OM_LARGE_SHIFT 6
|
||||
#define KDETH_OM_MAX_SIZE (1 << ((KDETH_OM_LARGE / KDETH_OM_SMALL) + 1))
|
||||
|
||||
/* The original size on Linux is 376 B */
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
#ifndef __LWK_COMPILER_H
|
||||
#define __LWK_COMPILER_H
|
||||
|
||||
#include <ihk/cpu.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#ifdef __CHECKER__
|
||||
@@ -175,11 +177,6 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
|
||||
# define unlikely(x) __builtin_expect(!!(x), 0)
|
||||
#endif
|
||||
|
||||
/* Optimization barrier */
|
||||
#ifndef barrier
|
||||
# define barrier() __memory_barrier()
|
||||
#endif
|
||||
|
||||
#ifndef barrier_data
|
||||
# define barrier_data(ptr) barrier()
|
||||
#endif
|
||||
@@ -490,4 +487,66 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
|
||||
(_________p1); \
|
||||
})
|
||||
|
||||
extern void *memcpy(void *dest, const void *src, size_t n);
|
||||
|
||||
static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
|
||||
{
|
||||
switch (size) {
|
||||
case 1: *(unsigned char *)res = *(volatile unsigned char *)p; break;
|
||||
case 2: *(unsigned short *)res = *(volatile unsigned short *)p; break;
|
||||
case 4: *(unsigned int *)res = *(volatile unsigned int *)p; break;
|
||||
case 8: *(unsigned long long *)res = *(volatile unsigned long long *)p; break;
|
||||
default:
|
||||
barrier();
|
||||
memcpy((void *)res, (const void *)p, size);
|
||||
barrier();
|
||||
}
|
||||
}
|
||||
|
||||
static __always_inline void __write_once_size(volatile void *p, void *res, int size)
|
||||
{
|
||||
switch (size) {
|
||||
case 1: *(volatile unsigned char *)p = *(unsigned char *)res; break;
|
||||
case 2: *(volatile unsigned short *)p = *(unsigned short *)res; break;
|
||||
case 4: *(volatile unsigned int *)p = *(unsigned int *)res; break;
|
||||
case 8: *(volatile unsigned long long *)p = *(unsigned long long *)res; break;
|
||||
default:
|
||||
barrier();
|
||||
memcpy((void *)p, (const void *)res, size);
|
||||
barrier();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Prevent the compiler from merging or refetching reads or writes. The
|
||||
* compiler is also forbidden from reordering successive instances of
|
||||
* READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
|
||||
* compiler is aware of some particular ordering. One way to make the
|
||||
* compiler aware of ordering is to put the two invocations of READ_ONCE,
|
||||
* WRITE_ONCE or ACCESS_ONCE() in different C statements.
|
||||
*
|
||||
* In contrast to ACCESS_ONCE these two macros will also work on aggregate
|
||||
* data types like structs or unions. If the size of the accessed data
|
||||
* type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
|
||||
* READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a
|
||||
* compile-time warning.
|
||||
*
|
||||
* Their two major use cases are: (1) Mediating communication between
|
||||
* process-level code and irq/NMI handlers, all running on the same CPU,
|
||||
* and (2) Ensuring that the compiler does not fold, spindle, or otherwise
|
||||
* mutilate accesses that either do not require ordering or that interact
|
||||
* with an explicit memory barrier or atomic instruction that provides the
|
||||
* required ordering.
|
||||
*/
|
||||
|
||||
#define READ_ONCE(x) \
|
||||
({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
|
||||
|
||||
#define WRITE_ONCE(x, val) \
|
||||
({ typeof(x) __val = (val); __write_once_size(&(x), &__val, sizeof(__val)); __val; })
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#endif /* __LWK_COMPILER_H */
|
||||
|
||||
Reference in New Issue
Block a user