futex and rlimit
This commit is contained in:
38
kernel/include/asm.h
Normal file
38
kernel/include/asm.h
Normal file
@@ -0,0 +1,38 @@
|
||||
#ifndef _ASM_X86_ASM_H
|
||||
#define _ASM_X86_ASM_H
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
# define __ASM_FORM(x) x
|
||||
# define __ASM_EX_SEC .section __ex_table
|
||||
#else
|
||||
# define __ASM_FORM(x) " " #x " "
|
||||
# define __ASM_EX_SEC " .section __ex_table,\"a\"\n"
|
||||
#endif
|
||||
|
||||
# define __ASM_SEL(a,b) __ASM_FORM(b)
|
||||
|
||||
#define __ASM_SIZE(inst) __ASM_SEL(inst##l, inst##q)
|
||||
#define __ASM_REG(reg) __ASM_SEL(e##reg, r##reg)
|
||||
|
||||
#define _ASM_PTR __ASM_SEL(.long, .quad)
|
||||
#define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8)
|
||||
#define _ASM_MOV_UL __ASM_SIZE(mov)
|
||||
|
||||
#define _ASM_INC __ASM_SIZE(inc)
|
||||
#define _ASM_DEC __ASM_SIZE(dec)
|
||||
#define _ASM_ADD __ASM_SIZE(add)
|
||||
#define _ASM_SUB __ASM_SIZE(sub)
|
||||
#define _ASM_XADD __ASM_SIZE(xadd)
|
||||
#define _ASM_AX __ASM_REG(ax)
|
||||
#define _ASM_BX __ASM_REG(bx)
|
||||
#define _ASM_CX __ASM_REG(cx)
|
||||
#define _ASM_DX __ASM_REG(dx)
|
||||
|
||||
/* Exception table entry */
|
||||
# define _ASM_EXTABLE(from,to) \
|
||||
__ASM_EX_SEC \
|
||||
_ASM_ALIGN "\n" \
|
||||
_ASM_PTR #from "," #to "\n" \
|
||||
" .previous\n"
|
||||
|
||||
#endif /* _ASM_X86_ASM_H */
|
||||
242
kernel/include/futex.h
Normal file
242
kernel/include/futex.h
Normal file
@@ -0,0 +1,242 @@
|
||||
/* Kitten LWK futex adaptation */
|
||||
|
||||
|
||||
#ifndef _LWK_FUTEX_H
|
||||
#define _LWK_FUTEX_H
|
||||
|
||||
/** \name Futex Commands
|
||||
* @{
|
||||
*/
|
||||
#define FUTEX_WAIT 0
|
||||
#define FUTEX_WAKE 1
|
||||
#define FUTEX_CMP_REQUEUE 4
|
||||
#define FUTEX_WAKE_OP 5
|
||||
#define FUTEX_WAIT_BITSET 9
|
||||
#define FUTEX_WAKE_BITSET 10
|
||||
// @}
|
||||
|
||||
#define FUTEX_PRIVATE_FLAG 128
|
||||
#define FUTEX_CLOCK_REALTIME 256
|
||||
#define FUTEX_CMD_MASK ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
|
||||
|
||||
/** \name Futex Operations, used for FUTEX_WAKE_OP
|
||||
* @{
|
||||
*/
|
||||
#define FUTEX_OP_SET 0 /* *(int *)UADDR2 = OPARG; */
|
||||
#define FUTEX_OP_ADD 1 /* *(int *)UADDR2 += OPARG; */
|
||||
#define FUTEX_OP_OR 2 /* *(int *)UADDR2 |= OPARG; */
|
||||
#define FUTEX_OP_ANDN 3 /* *(int *)UADDR2 &= ~OPARG; */
|
||||
#define FUTEX_OP_XOR 4 /* *(int *)UADDR2 ^= OPARG; */
|
||||
|
||||
#define FUTEX_OP_OPARG_SHIFT 8 /* Use (1 << OPARG) instead of OPARG. */
|
||||
|
||||
#define FUTEX_OP_CMP_EQ 0 /* if (oldval == CMPARG) wake */
|
||||
#define FUTEX_OP_CMP_NE 1 /* if (oldval != CMPARG) wake */
|
||||
#define FUTEX_OP_CMP_LT 2 /* if (oldval < CMPARG) wake */
|
||||
#define FUTEX_OP_CMP_LE 3 /* if (oldval <= CMPARG) wake */
|
||||
#define FUTEX_OP_CMP_GT 4 /* if (oldval > CMPARG) wake */
|
||||
#define FUTEX_OP_CMP_GE 5 /* if (oldval >= CMPARG) wake */
|
||||
// @}
|
||||
|
||||
/* FUTEX_WAKE_OP will perform atomically
|
||||
int oldval = *(int *)UADDR2;
|
||||
*(int *)UADDR2 = oldval OP OPARG;
|
||||
if (oldval CMP CMPARG)
|
||||
wake UADDR2; */
|
||||
#define FUTEX_OP(op, oparg, cmp, cmparg) \
|
||||
(((op & 0xf) << 28) | ((cmp & 0xf) << 24) \
|
||||
| ((oparg & 0xfff) << 12) | (cmparg & 0xfff))
|
||||
|
||||
/*
|
||||
* bitset with all bits set for the FUTEX_xxx_BITSET OPs to request a
|
||||
* match of any bit.
|
||||
*/
|
||||
#define FUTEX_BITSET_MATCH_ANY 0xffffffff
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <aal/lock.h>
|
||||
#include <list.h>
|
||||
#include <process.h>
|
||||
#include <waitq.h>
|
||||
|
||||
#ifndef _ASM_X86_FUTEX_H
|
||||
#define _ASM_X86_FUTEX_H
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
/* We don't deal with uaccess at the moment, because x86 can access
|
||||
* userspace directly, we rely on glibc and the app developers.
|
||||
*/
|
||||
#ifdef __UACCESS__
|
||||
#include <arch/uaccess.h>
|
||||
#endif
|
||||
|
||||
#include <asm.h>
|
||||
#include <errno.h>
|
||||
|
||||
#define __user
|
||||
|
||||
#if 0
|
||||
#include <arch/processor.h>
|
||||
#include <arch/system.h>
|
||||
#endif
|
||||
|
||||
#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
|
||||
asm volatile("1:\t" insn "\n" \
|
||||
"2:\t.section .fixup,\"ax\"\n" \
|
||||
"3:\tmov\t%3, %1\n" \
|
||||
"\tjmp\t2b\n" \
|
||||
"\t.previous\n" \
|
||||
_ASM_EXTABLE(1b, 3b) \
|
||||
: "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
|
||||
: "i" (-EFAULT), "0" (oparg), "1" (0))
|
||||
|
||||
#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
|
||||
asm volatile("1:\tmovl %2, %0\n" \
|
||||
"\tmovl\t%0, %3\n" \
|
||||
"\t" insn "\n" \
|
||||
"2:\tlock; cmpxchgl %3, %2\n" \
|
||||
"\tjnz\t1b\n" \
|
||||
"3:\t.section .fixup,\"ax\"\n" \
|
||||
"4:\tmov\t%5, %1\n" \
|
||||
"\tjmp\t3b\n" \
|
||||
"\t.previous\n" \
|
||||
_ASM_EXTABLE(1b, 4b) \
|
||||
_ASM_EXTABLE(2b, 4b) \
|
||||
: "=&a" (oldval), "=&r" (ret), \
|
||||
"+m" (*uaddr), "=&r" (tem) \
|
||||
: "r" (oparg), "i" (-EFAULT), "1" (0))
|
||||
|
||||
static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
||||
{
|
||||
int op = (encoded_op >> 28) & 7;
|
||||
int cmp = (encoded_op >> 24) & 15;
|
||||
int oparg = (encoded_op << 8) >> 20;
|
||||
int cmparg = (encoded_op << 20) >> 20;
|
||||
int oldval = 0, ret, tem;
|
||||
|
||||
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
||||
oparg = 1 << oparg;
|
||||
|
||||
#ifdef __UACCESS__
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
return -EFAULT;
|
||||
#endif
|
||||
|
||||
switch (op) {
|
||||
case FUTEX_OP_SET:
|
||||
__futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
|
||||
break;
|
||||
case FUTEX_OP_ADD:
|
||||
__futex_atomic_op1("lock; xaddl %0, %2", ret, oldval,
|
||||
uaddr, oparg);
|
||||
break;
|
||||
case FUTEX_OP_OR:
|
||||
__futex_atomic_op2("orl %4, %3", ret, oldval, uaddr, oparg);
|
||||
break;
|
||||
case FUTEX_OP_ANDN:
|
||||
__futex_atomic_op2("andl %4, %3", ret, oldval, uaddr, ~oparg);
|
||||
break;
|
||||
case FUTEX_OP_XOR:
|
||||
__futex_atomic_op2("xorl %4, %3", ret, oldval, uaddr, oparg);
|
||||
break;
|
||||
default:
|
||||
ret = -ENOSYS;
|
||||
}
|
||||
|
||||
if (!ret) {
|
||||
switch (cmp) {
|
||||
case FUTEX_OP_CMP_EQ:
|
||||
ret = (oldval == cmparg);
|
||||
break;
|
||||
case FUTEX_OP_CMP_NE:
|
||||
ret = (oldval != cmparg);
|
||||
break;
|
||||
case FUTEX_OP_CMP_LT:
|
||||
ret = (oldval < cmparg);
|
||||
break;
|
||||
case FUTEX_OP_CMP_GE:
|
||||
ret = (oldval >= cmparg);
|
||||
break;
|
||||
case FUTEX_OP_CMP_LE:
|
||||
ret = (oldval <= cmparg);
|
||||
break;
|
||||
case FUTEX_OP_CMP_GT:
|
||||
ret = (oldval > cmparg);
|
||||
break;
|
||||
default:
|
||||
ret = -ENOSYS;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
|
||||
int newval)
|
||||
{
|
||||
#ifdef __UACCESS__
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
return -EFAULT;
|
||||
#endif
|
||||
|
||||
asm volatile("1:\tlock; cmpxchgl %3, %1\n"
|
||||
"2:\t.section .fixup, \"ax\"\n"
|
||||
"3:\tmov %2, %0\n"
|
||||
"\tjmp 2b\n"
|
||||
"\t.previous\n"
|
||||
_ASM_EXTABLE(1b, 3b)
|
||||
: "=a" (oldval), "+m" (*uaddr)
|
||||
: "i" (-EFAULT), "r" (newval), "0" (oldval)
|
||||
: "memory"
|
||||
);
|
||||
|
||||
return oldval;
|
||||
}
|
||||
|
||||
#endif // __KERNEL__
|
||||
#endif // _ASM_X86_FUTEX_H
|
||||
|
||||
|
||||
|
||||
#define FUTEX_HASHBITS 8 /* 256 entries in each futex hash tbl */
|
||||
|
||||
/** Futex tracking structure.
|
||||
*
|
||||
* A futex has a woken state, just like tasks have TASK_RUNNING.
|
||||
* It is considered woken when list_empty(&futex->link) || futex->lock_ptr == 0.
|
||||
* The order of wakup is always to make the first condition true, then
|
||||
* wake up futex->waitq, then make the second condition true.
|
||||
*/
|
||||
struct futex {
|
||||
struct list_head link;
|
||||
struct waitq waitq;
|
||||
aal_spinlock_t * lock_ptr;
|
||||
uint32_t __user * uaddr;
|
||||
uint32_t bitset;
|
||||
};
|
||||
|
||||
struct futex_queue {
|
||||
aal_spinlock_t lock;
|
||||
struct list_head futex_list;
|
||||
};
|
||||
|
||||
extern void
|
||||
futex_queue_init(
|
||||
struct futex_queue * queue
|
||||
);
|
||||
|
||||
extern int
|
||||
futex(
|
||||
uint32_t __user * uaddr,
|
||||
int op,
|
||||
uint32_t val,
|
||||
uint64_t timeout,
|
||||
uint32_t __user * uaddr2,
|
||||
uint32_t val2,
|
||||
uint32_t val3
|
||||
);
|
||||
|
||||
|
||||
#endif
|
||||
#endif
|
||||
70
kernel/include/hash.h
Normal file
70
kernel/include/hash.h
Normal file
@@ -0,0 +1,70 @@
|
||||
#ifndef _LINUX_HASH_H
|
||||
#define _LINUX_HASH_H
|
||||
/* Fast hashing routine for ints, longs and pointers.
|
||||
(C) 2002 William Lee Irwin III, IBM */
|
||||
|
||||
/*
|
||||
* Knuth recommends primes in approximately golden ratio to the maximum
|
||||
* integer representable by a machine word for multiplicative hashing.
|
||||
* Chuck Lever verified the effectiveness of this technique:
|
||||
* http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
|
||||
*
|
||||
* These primes are chosen to be bit-sparse, that is operations on
|
||||
* them can use shifts and additions instead of multiplications for
|
||||
* machines where multiplications are slow.
|
||||
*/
|
||||
|
||||
#define BITS_PER_LONG 64
|
||||
|
||||
/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
|
||||
#define GOLDEN_RATIO_PRIME_32 0x9e370001UL
|
||||
/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
|
||||
#define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001UL
|
||||
|
||||
#if BITS_PER_LONG == 32
|
||||
#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_32
|
||||
#define hash_long(val, bits) hash_32(val, bits)
|
||||
#elif BITS_PER_LONG == 64
|
||||
#define hash_long(val, bits) hash_64(val, bits)
|
||||
#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_64
|
||||
#else
|
||||
#error Wordsize not 32 or 64
|
||||
#endif
|
||||
|
||||
static inline uint64_t hash_64(uint64_t val, unsigned int bits)
|
||||
{
|
||||
uint64_t hash = val;
|
||||
|
||||
/* Sigh, gcc can't optimise this alone like it does for 32 bits. */
|
||||
uint64_t n = hash;
|
||||
n <<= 18;
|
||||
hash -= n;
|
||||
n <<= 33;
|
||||
hash -= n;
|
||||
n <<= 3;
|
||||
hash += n;
|
||||
n <<= 3;
|
||||
hash -= n;
|
||||
n <<= 4;
|
||||
hash += n;
|
||||
n <<= 2;
|
||||
hash += n;
|
||||
|
||||
/* High bits are more random, so use them. */
|
||||
return hash >> (64 - bits);
|
||||
}
|
||||
|
||||
static inline uint32_t hash_32(uint32_t val, unsigned int bits)
|
||||
{
|
||||
/* On some cpus multiply is faster, on others gcc will do shifts */
|
||||
uint32_t hash = val * GOLDEN_RATIO_PRIME_32;
|
||||
|
||||
/* High bits are more random, so use them. */
|
||||
return hash >> (32 - bits);
|
||||
}
|
||||
|
||||
static inline unsigned long hash_ptr(void *ptr, unsigned int bits)
|
||||
{
|
||||
return hash_long((unsigned long)ptr, bits);
|
||||
}
|
||||
#endif /* _LINUX_HASH_H */
|
||||
36
kernel/include/lwk/compiler-gcc.h
Normal file
36
kernel/include/lwk/compiler-gcc.h
Normal file
@@ -0,0 +1,36 @@
|
||||
/* Never include this file directly. Include <lwk/compiler.h> instead. */
|
||||
|
||||
/*
|
||||
* Common definitions for all gcc versions go here.
|
||||
*/
|
||||
|
||||
|
||||
/* Optimization barrier */
|
||||
/* The "volatile" is due to gcc bugs
|
||||
* NOTE: already defined in aal/manycore/generic/include/aal/cpu.h
|
||||
* #define barrier() __asm__ __volatile__("": : :"memory")
|
||||
*/
|
||||
|
||||
/* This macro obfuscates arithmetic on a variable address so that gcc
|
||||
shouldn't recognize the original var, and make assumptions about it */
|
||||
/*
|
||||
* Versions of the ppc64 compiler before 4.1 had a bug where use of
|
||||
* RELOC_HIDE could trash r30. The bug can be worked around by changing
|
||||
* the inline assembly constraint from =g to =r, in this particular
|
||||
* case either is valid.
|
||||
*/
|
||||
#define RELOC_HIDE(ptr, off) \
|
||||
({ unsigned long __ptr; \
|
||||
__asm__ ("" : "=r"(__ptr) : "0"(ptr)); \
|
||||
(typeof(ptr)) (__ptr + (off)); })
|
||||
|
||||
|
||||
#define inline inline __attribute__((always_inline))
|
||||
#define __inline__ __inline__ __attribute__((always_inline))
|
||||
#define __inline __inline __attribute__((always_inline))
|
||||
#define __deprecated __attribute__((deprecated))
|
||||
#define noinline __attribute__((noinline))
|
||||
#define __attribute_pure__ __attribute__((pure))
|
||||
#define __attribute_const__ __attribute__((__const__))
|
||||
#define __weak __attribute__((weak))
|
||||
#define __noreturn __attribute__((noreturn))
|
||||
24
kernel/include/lwk/compiler-gcc4.h
Normal file
24
kernel/include/lwk/compiler-gcc4.h
Normal file
@@ -0,0 +1,24 @@
|
||||
/* Never include this file directly. Include <lwk/compiler.h> instead. */
|
||||
|
||||
/* These definitions are for GCC v4.x. */
|
||||
#include <lwk/compiler-gcc.h>
|
||||
|
||||
#ifdef CONFIG_FORCED_INLINING
|
||||
# undef inline
|
||||
# undef __inline__
|
||||
# undef __inline
|
||||
# define inline inline __attribute__((always_inline))
|
||||
# define __inline__ __inline__ __attribute__((always_inline))
|
||||
# define __inline __inline __attribute__((always_inline))
|
||||
#endif
|
||||
|
||||
#define __used __attribute__((__used__))
|
||||
#define __must_check __attribute__((warn_unused_result))
|
||||
#define __compiler_offsetof(a,b) __builtin_offsetof(a,b)
|
||||
#define __always_inline inline __attribute__((always_inline))
|
||||
|
||||
/*
|
||||
* A trick to suppress uninitialized variable warning without generating any
|
||||
* code
|
||||
*/
|
||||
#define uninitialized_var(x) x = x
|
||||
146
kernel/include/lwk/compiler.h
Normal file
146
kernel/include/lwk/compiler.h
Normal file
@@ -0,0 +1,146 @@
|
||||
#ifndef _LWK_COMPILER_H
|
||||
#define _LWK_COMPILER_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#ifdef __CHECKER__
|
||||
# define __user __attribute__((noderef, address_space(1)))
|
||||
# define __kernel /* default address space */
|
||||
# define __safe __attribute__((safe))
|
||||
# define __force __attribute__((force))
|
||||
# define __nocast __attribute__((nocast))
|
||||
# define __iomem __attribute__((noderef, address_space(2)))
|
||||
# define __acquires(x) __attribute__((context(0,1)))
|
||||
# define __releases(x) __attribute__((context(1,0)))
|
||||
# define __acquire(x) __context__(1)
|
||||
# define __release(x) __context__(-1)
|
||||
# define __cond_lock(x) ((x) ? ({ __context__(1); 1; }) : 0)
|
||||
# define __unused(x) x __attribute__((unused))
|
||||
extern void __chk_user_ptr(void __user *);
|
||||
extern void __chk_io_ptr(void __iomem *);
|
||||
#else
|
||||
# define __user
|
||||
# define __kernel
|
||||
# define __safe
|
||||
# define __force
|
||||
# define __nocast
|
||||
# define __iomem
|
||||
# define __chk_user_ptr(x) (void)0
|
||||
# define __chk_io_ptr(x) (void)0
|
||||
# define __builtin_warning(x, y...) (1)
|
||||
# define __acquires(x)
|
||||
# define __releases(x)
|
||||
# define __acquire(x) (void)0
|
||||
# define __release(x) (void)0
|
||||
# define __cond_lock(x) (x)
|
||||
# define __unused(x) x
|
||||
#endif
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#if __GNUC__ > 4
|
||||
#error no compiler-gcc.h file for this gcc version
|
||||
#elif __GNUC__ == 4
|
||||
# include <lwk/compiler-gcc4.h>
|
||||
#else
|
||||
# error Sorry, your compiler is too old/not recognized.
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Generic compiler-dependent macros required for kernel
|
||||
* build go below this comment. Actual compiler/compiler version
|
||||
* specific implementations come from the above header files
|
||||
*/
|
||||
|
||||
#define likely(x) __builtin_expect(!!(x), 1)
|
||||
#define unlikely(x) __builtin_expect(!!(x), 0)
|
||||
|
||||
/* Optimization barrier */
|
||||
#ifndef barrier
|
||||
# define barrier() __memory_barrier()
|
||||
#endif
|
||||
|
||||
#ifndef RELOC_HIDE
|
||||
# define RELOC_HIDE(ptr, off) \
|
||||
({ unsigned long __ptr; \
|
||||
__ptr = (unsigned long) (ptr); \
|
||||
(typeof(ptr)) (__ptr + (off)); })
|
||||
#endif
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#ifdef __KERNEL__
|
||||
/*
|
||||
* Allow us to mark functions as 'deprecated' and have gcc emit a nice
|
||||
* warning for each use, in hopes of speeding the functions removal.
|
||||
* Usage is:
|
||||
* int __deprecated foo(void)
|
||||
*/
|
||||
#ifndef __deprecated
|
||||
# define __deprecated /* unimplemented */
|
||||
#endif
|
||||
|
||||
#ifndef __must_check
|
||||
#define __must_check
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Allow us to avoid 'defined but not used' warnings on functions and data,
|
||||
* as well as force them to be emitted to the assembly file.
|
||||
*
|
||||
* As of gcc 3.4, static functions that are not marked with attribute((used))
|
||||
* may be elided from the assembly file. As of gcc 3.4, static data not so
|
||||
* marked will not be elided, but this may change in a future gcc version.
|
||||
*
|
||||
* In prior versions of gcc, such functions and data would be emitted, but
|
||||
* would be warned about except with attribute((unused)).
|
||||
*/
|
||||
#ifndef __used
|
||||
# define __used /* unimplemented */
|
||||
#endif
|
||||
|
||||
/*
|
||||
* From the GCC manual:
|
||||
*
|
||||
* Many functions have no effects except the return value and their
|
||||
* return value depends only on the parameters and/or global
|
||||
* variables. Such a function can be subject to common subexpression
|
||||
* elimination and loop optimization just as an arithmetic operator
|
||||
* would be.
|
||||
* [...]
|
||||
*/
|
||||
#ifndef __attribute_pure__
|
||||
# define __attribute_pure__ /* unimplemented */
|
||||
#endif
|
||||
|
||||
#ifndef noinline
|
||||
#define noinline
|
||||
#endif
|
||||
|
||||
#ifndef __always_inline
|
||||
#define __always_inline inline
|
||||
#endif
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
/*
|
||||
* From the GCC manual:
|
||||
*
|
||||
* Many functions do not examine any values except their arguments,
|
||||
* and have no effects except the return value. Basically this is
|
||||
* just slightly more strict class than the `pure' attribute above,
|
||||
* since function is not allowed to read global memory.
|
||||
*
|
||||
* Note that a function that has pointer arguments and examines the
|
||||
* data pointed to must _not_ be declared `const'. Likewise, a
|
||||
* function that calls a non-`const' function usually must not be
|
||||
* `const'. It does not make sense for a `const' function to return
|
||||
* `void'.
|
||||
*/
|
||||
#ifndef __attribute_const__
|
||||
# define __attribute_const__ /* unimplemented */
|
||||
#endif
|
||||
|
||||
#endif /* _LWK_COMPILER_H */
|
||||
109
kernel/include/lwk/futex.h
Normal file
109
kernel/include/lwk/futex.h
Normal file
@@ -0,0 +1,109 @@
|
||||
#ifndef _LWK_FUTEX_H
|
||||
#define _LWK_FUTEX_H
|
||||
|
||||
/** \name Futex Commands
|
||||
* @{
|
||||
*/
|
||||
#define FUTEX_WAIT 0
|
||||
#define FUTEX_WAKE 1
|
||||
#define FUTEX_CMP_REQUEUE 4
|
||||
#define FUTEX_WAKE_OP 5
|
||||
#define FUTEX_WAIT_BITSET 9
|
||||
#define FUTEX_WAKE_BITSET 10
|
||||
// @}
|
||||
|
||||
#define FUTEX_PRIVATE_FLAG 128
|
||||
#define FUTEX_CLOCK_REALTIME 256
|
||||
#define FUTEX_CMD_MASK ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
|
||||
|
||||
/** \name Futex Operations, used for FUTEX_WAKE_OP
|
||||
* @{
|
||||
*/
|
||||
#define FUTEX_OP_SET 0 /* *(int *)UADDR2 = OPARG; */
|
||||
#define FUTEX_OP_ADD 1 /* *(int *)UADDR2 += OPARG; */
|
||||
#define FUTEX_OP_OR 2 /* *(int *)UADDR2 |= OPARG; */
|
||||
#define FUTEX_OP_ANDN 3 /* *(int *)UADDR2 &= ~OPARG; */
|
||||
#define FUTEX_OP_XOR 4 /* *(int *)UADDR2 ^= OPARG; */
|
||||
|
||||
#define FUTEX_OP_OPARG_SHIFT 8 /* Use (1 << OPARG) instead of OPARG. */
|
||||
|
||||
#define FUTEX_OP_CMP_EQ 0 /* if (oldval == CMPARG) wake */
|
||||
#define FUTEX_OP_CMP_NE 1 /* if (oldval != CMPARG) wake */
|
||||
#define FUTEX_OP_CMP_LT 2 /* if (oldval < CMPARG) wake */
|
||||
#define FUTEX_OP_CMP_LE 3 /* if (oldval <= CMPARG) wake */
|
||||
#define FUTEX_OP_CMP_GT 4 /* if (oldval > CMPARG) wake */
|
||||
#define FUTEX_OP_CMP_GE 5 /* if (oldval >= CMPARG) wake */
|
||||
// @}
|
||||
|
||||
/* FUTEX_WAKE_OP will perform atomically
|
||||
int oldval = *(int *)UADDR2;
|
||||
*(int *)UADDR2 = oldval OP OPARG;
|
||||
if (oldval CMP CMPARG)
|
||||
wake UADDR2; */
|
||||
#define FUTEX_OP(op, oparg, cmp, cmparg) \
|
||||
(((op & 0xf) << 28) | ((cmp & 0xf) << 24) \
|
||||
| ((oparg & 0xfff) << 12) | (cmparg & 0xfff))
|
||||
|
||||
/*
|
||||
* bitset with all bits set for the FUTEX_xxx_BITSET OPs to request a
|
||||
* match of any bit.
|
||||
*/
|
||||
#define FUTEX_BITSET_MATCH_ANY 0xffffffff
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <lwk/spinlock.h>
|
||||
#include <lwk/list.h>
|
||||
#include <lwk/waitq.h>
|
||||
#include <arch/futex.h>
|
||||
|
||||
#define FUTEX_HASHBITS 8 /* 256 entries in each futex hash tbl */
|
||||
|
||||
/** Futex tracking structure.
|
||||
*
|
||||
* A futex has a woken state, just like tasks have TASK_RUNNING.
|
||||
* It is considered woken when list_empty(&futex->link) || futex->lock_ptr == 0.
|
||||
* The order of wakup is always to make the first condition true, then
|
||||
* wake up futex->waitq, then make the second condition true.
|
||||
*/
|
||||
struct futex {
|
||||
struct list_head link;
|
||||
struct waitq waitq;
|
||||
spinlock_t * lock_ptr;
|
||||
uint32_t __user * uaddr;
|
||||
uint32_t bitset;
|
||||
};
|
||||
|
||||
struct futex_queue {
|
||||
spinlock_t lock;
|
||||
struct list_head futex_list;
|
||||
};
|
||||
|
||||
extern void
|
||||
futex_queue_init(
|
||||
struct futex_queue * queue
|
||||
);
|
||||
|
||||
extern int
|
||||
futex(
|
||||
uint32_t __user * uaddr,
|
||||
int op,
|
||||
uint32_t val,
|
||||
uint64_t timeout,
|
||||
uint32_t __user * uaddr2,
|
||||
uint32_t val2,
|
||||
uint32_t val3
|
||||
);
|
||||
|
||||
extern long
|
||||
sys_futex(
|
||||
uint32_t __user * uaddr,
|
||||
int op,
|
||||
uint32_t val,
|
||||
struct timespec __user * utime,
|
||||
uint32_t __user * uaddr2,
|
||||
uint32_t val3
|
||||
);
|
||||
|
||||
#endif
|
||||
#endif
|
||||
25
kernel/include/lwk/stddef.h
Normal file
25
kernel/include/lwk/stddef.h
Normal file
@@ -0,0 +1,25 @@
|
||||
#ifndef _LWK_STDDEF_H
|
||||
#define _LWK_STDDEF_H
|
||||
|
||||
#include <lwk/compiler.h>
|
||||
|
||||
#undef NULL
|
||||
#if defined(__cplusplus)
|
||||
#define NULL 0
|
||||
#else
|
||||
#define NULL ((void *)0)
|
||||
#endif
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#define false 0
|
||||
#define true 1
|
||||
#endif
|
||||
|
||||
#undef offsetof
|
||||
#ifdef __compiler_offsetof
|
||||
#define offsetof(TYPE,MEMBER) __compiler_offsetof(TYPE,MEMBER)
|
||||
#else
|
||||
#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
|
||||
#endif
|
||||
|
||||
#endif
|
||||
@@ -20,6 +20,7 @@
|
||||
|
||||
#define PS_NORMAL (PS_INTERRUPTIBLE | PS_UNINTERRUPTIBLE)
|
||||
|
||||
|
||||
struct vm_range {
|
||||
struct list_head list;
|
||||
unsigned long start, end;
|
||||
@@ -33,16 +34,9 @@ struct vm_regions {
|
||||
unsigned long brk_start, brk_end;
|
||||
unsigned long map_start, map_end;
|
||||
unsigned long stack_start, stack_end;
|
||||
unsigned long tlsblock_base, tlsblock_limit;
|
||||
};
|
||||
|
||||
struct process_vm {
|
||||
aal_atomic_t refcount;
|
||||
|
||||
struct page_table *page_table;
|
||||
struct list_head vm_range_list;
|
||||
struct vm_regions region;
|
||||
};
|
||||
struct process_vm;
|
||||
|
||||
struct process {
|
||||
int pid;
|
||||
@@ -54,13 +48,30 @@ struct process {
|
||||
aal_mc_kernel_context_t ctx;
|
||||
aal_mc_user_context_t *uctx;
|
||||
|
||||
struct list_head sched_list; // Runqueue
|
||||
// Runqueue list entry
|
||||
struct list_head sched_list;
|
||||
|
||||
struct thread {
|
||||
int *clear_child_tid;
|
||||
unsigned long tlsblock_base, tlsblock_limit;
|
||||
} thread;
|
||||
};
|
||||
|
||||
#include <waitq.h>
|
||||
#include <futex.h>
|
||||
|
||||
struct process_vm {
|
||||
aal_atomic_t refcount;
|
||||
|
||||
struct page_table *page_table;
|
||||
struct list_head vm_range_list;
|
||||
struct vm_regions region;
|
||||
|
||||
// Address space private futexes
|
||||
struct futex_queue futex_queues[1 << FUTEX_HASHBITS];
|
||||
};
|
||||
|
||||
|
||||
struct process *create_process(unsigned long user_pc);
|
||||
struct process *clone_process(struct process *org,
|
||||
unsigned long pc, unsigned long sp);
|
||||
|
||||
88
kernel/include/rlimit.h
Normal file
88
kernel/include/rlimit.h
Normal file
@@ -0,0 +1,88 @@
|
||||
|
||||
#ifndef __RLIMIT_H
|
||||
#define __RLIMIT_H
|
||||
|
||||
/* Kinds of resource limit. */
|
||||
enum __rlimit_resource
|
||||
{
|
||||
/* Per-process CPU limit, in seconds. */
|
||||
RLIMIT_CPU = 0,
|
||||
#define RLIMIT_CPU RLIMIT_CPU
|
||||
|
||||
/* Largest file that can be created, in bytes. */
|
||||
RLIMIT_FSIZE = 1,
|
||||
#define RLIMIT_FSIZE RLIMIT_FSIZE
|
||||
|
||||
/* Maximum size of data segment, in bytes. */
|
||||
RLIMIT_DATA = 2,
|
||||
#define RLIMIT_DATA RLIMIT_DATA
|
||||
|
||||
/* Maximum size of stack segment, in bytes. */
|
||||
RLIMIT_STACK = 3,
|
||||
#define RLIMIT_STACK RLIMIT_STACK
|
||||
|
||||
/* Largest core file that can be created, in bytes. */
|
||||
RLIMIT_CORE = 4,
|
||||
#define RLIMIT_CORE RLIMIT_CORE
|
||||
|
||||
/* Largest resident set size, in bytes.
|
||||
This affects swapping; processes that are exceeding their
|
||||
resident set size will be more likely to have physical memory
|
||||
taken from them. */
|
||||
__RLIMIT_RSS = 5,
|
||||
#define RLIMIT_RSS __RLIMIT_RSS
|
||||
|
||||
/* Number of open files. */
|
||||
RLIMIT_NOFILE = 7,
|
||||
__RLIMIT_OFILE = RLIMIT_NOFILE, /* BSD name for same. */
|
||||
#define RLIMIT_NOFILE RLIMIT_NOFILE
|
||||
#define RLIMIT_OFILE __RLIMIT_OFILE
|
||||
|
||||
/* Address space limit. */
|
||||
RLIMIT_AS = 9,
|
||||
#define RLIMIT_AS RLIMIT_AS
|
||||
|
||||
/* Number of processes. */
|
||||
__RLIMIT_NPROC = 6,
|
||||
#define RLIMIT_NPROC __RLIMIT_NPROC
|
||||
|
||||
/* Locked-in-memory address space. */
|
||||
__RLIMIT_MEMLOCK = 8,
|
||||
#define RLIMIT_MEMLOCK __RLIMIT_MEMLOCK
|
||||
|
||||
/* Maximum number of file locks. */
|
||||
__RLIMIT_LOCKS = 10,
|
||||
#define RLIMIT_LOCKS __RLIMIT_LOCKS
|
||||
|
||||
/* Maximum number of pending signals. */
|
||||
__RLIMIT_SIGPENDING = 11,
|
||||
#define RLIMIT_SIGPENDING __RLIMIT_SIGPENDING
|
||||
|
||||
/* Maximum bytes in POSIX message queues. */
|
||||
__RLIMIT_MSGQUEUE = 12,
|
||||
#define RLIMIT_MSGQUEUE __RLIMIT_MSGQUEUE
|
||||
|
||||
/* Maximum nice priority allowed to raise to.
|
||||
Nice levels 19 .. -20 correspond to 0 .. 39
|
||||
values of this resource limit. */
|
||||
__RLIMIT_NICE = 13,
|
||||
#define RLIMIT_NICE __RLIMIT_NICE
|
||||
|
||||
/* Maximum realtime priority allowed for non-priviledged
|
||||
processes. */
|
||||
__RLIMIT_RTPRIO = 14,
|
||||
#define RLIMIT_RTPRIO __RLIMIT_RTPRIO
|
||||
|
||||
__RLIMIT_NLIMITS = 15,
|
||||
__RLIM_NLIMITS = __RLIMIT_NLIMITS
|
||||
#define RLIMIT_NLIMITS __RLIMIT_NLIMITS
|
||||
#define RLIM_NLIMITS __RLIM_NLIMITS
|
||||
};
|
||||
|
||||
|
||||
struct rlimit {
|
||||
uint64_t rlim_cur; /* Soft limit */
|
||||
uint64_t rlim_max; /* Hard limit (ceiling for rlim_cur) */
|
||||
};
|
||||
|
||||
#endif
|
||||
@@ -5,8 +5,8 @@
|
||||
|
||||
#include <aal/lock.h>
|
||||
#include <list.h>
|
||||
#include <process.h>
|
||||
|
||||
struct process;
|
||||
struct waitq_entry;
|
||||
|
||||
typedef int (*waitq_func_t)(struct waitq_entry *wait, unsigned mode,
|
||||
|
||||
Reference in New Issue
Block a user