uti: futex call function in mcctrl
Previously, futex code of McKerenl was called by mccontrol, but there ware some problems with this method. (Mainly, location of McKernel image on memory) Call futex code in mcctrl instead of the one in McKernel image, giving the following benefits: 1. Not relying on shared kernel virtual address space with Linux any more 2. The cpu id store / retrieve is not needed and resulting in the code Change-Id: Ic40929b64a655b270c435859fa287fedb713ee5c refe: #1428
This commit is contained in:
committed by
Masamichi Takagi
parent
35296c8210
commit
a9973e913d
96
executer/kernel/mcctrl/arch/arm64/cpu.c
Normal file
96
executer/kernel/mcctrl/arch/arm64/cpu.c
Normal file
@@ -0,0 +1,96 @@
|
||||
/* cpu.c COPYRIGHT FUJITSU LIMITED 2015-2019 */
|
||||
|
||||
#include <cpu.h>
|
||||
|
||||
/* we not have "pause" instruction, instead "yield" instruction */
|
||||
void cpu_pause(void)
|
||||
{
|
||||
asm volatile("yield" ::: "memory");
|
||||
}
|
||||
|
||||
#if defined(CONFIG_HAS_NMI)
|
||||
#include <arm-gic-v3.h>
|
||||
|
||||
/* restore interrupt (ICC_PMR_EL1 <= flags) */
|
||||
void cpu_restore_interrupt(unsigned long flags)
|
||||
{
|
||||
asm volatile(
|
||||
"msr_s " __stringify(ICC_PMR_EL1) ",%0"
|
||||
:
|
||||
: "r" (flags)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
/* save ICC_PMR_EL1 & disable interrupt (ICC_PMR_EL1 <= ICC_PMR_EL1_MASKED) */
|
||||
unsigned long cpu_disable_interrupt_save(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long masked = ICC_PMR_EL1_MASKED;
|
||||
|
||||
asm volatile(
|
||||
"mrs_s %0, " __stringify(ICC_PMR_EL1) "\n"
|
||||
"msr_s " __stringify(ICC_PMR_EL1) ",%1"
|
||||
: "=&r" (flags)
|
||||
: "r" (masked)
|
||||
: "memory");
|
||||
return flags;
|
||||
}
|
||||
|
||||
/* save ICC_PMR_EL1 & enable interrupt (ICC_PMR_EL1 <= ICC_PMR_EL1_UNMASKED) */
|
||||
unsigned long cpu_enable_interrupt_save(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long masked = ICC_PMR_EL1_UNMASKED;
|
||||
|
||||
asm volatile(
|
||||
"mrs_s %0, " __stringify(ICC_PMR_EL1) "\n"
|
||||
"msr_s " __stringify(ICC_PMR_EL1) ",%1"
|
||||
: "=&r" (flags)
|
||||
: "r" (masked)
|
||||
: "memory");
|
||||
return flags;
|
||||
}
|
||||
|
||||
#else /* defined(CONFIG_HAS_NMI) */
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/spinlock.h::arch_local_irq_restore */
|
||||
/* restore interrupt (PSTATE.DAIF = flags restore) */
|
||||
void cpu_restore_interrupt(unsigned long flags)
|
||||
{
|
||||
asm volatile(
|
||||
"msr daif, %0 // arch_local_irq_restore"
|
||||
:
|
||||
: "r" (flags)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/irqflags.h::arch_local_irq_save */
|
||||
/* save PSTATE.DAIF & disable interrupt (PSTATE.DAIF I bit set) */
|
||||
unsigned long cpu_disable_interrupt_save(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
asm volatile(
|
||||
"mrs %0, daif // arch_local_irq_save\n"
|
||||
"msr daifset, #2"
|
||||
: "=r" (flags)
|
||||
:
|
||||
: "memory");
|
||||
return flags;
|
||||
}
|
||||
|
||||
/* save PSTATE.DAIF & enable interrupt (PSTATE.DAIF I bit set) */
|
||||
unsigned long cpu_enable_interrupt_save(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
asm volatile(
|
||||
"mrs %0, daif // arch_local_irq_save\n"
|
||||
"msr daifclr, #2"
|
||||
: "=r" (flags)
|
||||
:
|
||||
: "memory");
|
||||
return flags;
|
||||
}
|
||||
#endif /* defined(CONFIG_HAS_NMI) */
|
||||
|
||||
142
executer/kernel/mcctrl/arch/arm64/include/arch-lock.h
Normal file
142
executer/kernel/mcctrl/arch/arm64/include/arch-lock.h
Normal file
@@ -0,0 +1,142 @@
|
||||
/* This is copy of the necessary part from McKernel, for uti-futex */
|
||||
|
||||
/* arch-lock.h COPYRIGHT FUJITSU LIMITED 2015-2018 */
|
||||
#ifndef __HEADER_ARM64_COMMON_ARCH_LOCK_H
|
||||
#define __HEADER_ARM64_COMMON_ARCH_LOCK_H
|
||||
|
||||
#include <linux/preempt.h>
|
||||
#include <cpu.h>
|
||||
|
||||
#define ihk_mc_spinlock_lock __ihk_mc_spinlock_lock
|
||||
#define ihk_mc_spinlock_unlock __ihk_mc_spinlock_unlock
|
||||
|
||||
#define ihk_mc_spinlock_lock_noirq __ihk_mc_spinlock_lock_noirq
|
||||
#define ihk_mc_spinlock_unlock_noirq __ihk_mc_spinlock_unlock_noirq
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/spinlock_types.h::TICKET_SHIFT */
|
||||
#define TICKET_SHIFT 16
|
||||
|
||||
/* @ref.impl ./arch/arm64/include/asm/lse.h::ARM64_LSE_ATOMIC_INSN */
|
||||
/* else defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS) */
|
||||
#define _ARM64_LSE_ATOMIC_INSN(llsc, lse) llsc
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/spinlock_types.h::arch_spinlock_t */
|
||||
typedef struct {
|
||||
#ifdef __AARCH64EB__
|
||||
uint16_t next;
|
||||
uint16_t owner;
|
||||
#else /* __AARCH64EB__ */
|
||||
uint16_t owner;
|
||||
uint16_t next;
|
||||
#endif /* __AARCH64EB__ */
|
||||
} __attribute__((aligned(4))) _ihk_spinlock_t;
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/spinlock.h::arch_spin_lock */
|
||||
/* spinlock lock */
|
||||
static inline void
|
||||
__ihk_mc_spinlock_lock_noirq(_ihk_spinlock_t *lock)
|
||||
{
|
||||
unsigned int tmp;
|
||||
_ihk_spinlock_t lockval, newval;
|
||||
|
||||
preempt_disable();
|
||||
|
||||
asm volatile(
|
||||
/* Atomically increment the next ticket. */
|
||||
_ARM64_LSE_ATOMIC_INSN(
|
||||
/* LL/SC */
|
||||
" prfm pstl1strm, %3\n"
|
||||
"1: ldaxr %w0, %3\n"
|
||||
" add %w1, %w0, %w5\n"
|
||||
" stxr %w2, %w1, %3\n"
|
||||
" cbnz %w2, 1b\n",
|
||||
/* LSE atomics */
|
||||
" mov %w2, %w5\n"
|
||||
" ldadda %w2, %w0, %3\n"
|
||||
__nops(3)
|
||||
)
|
||||
|
||||
/* Did we get the lock? */
|
||||
" eor %w1, %w0, %w0, ror #16\n"
|
||||
" cbz %w1, 3f\n"
|
||||
/*
|
||||
* No: spin on the owner. Send a local event to avoid missing an
|
||||
* unlock before the exclusive load.
|
||||
*/
|
||||
" sevl\n"
|
||||
"2: wfe\n"
|
||||
" ldaxrh %w2, %4\n"
|
||||
" eor %w1, %w2, %w0, lsr #16\n"
|
||||
" cbnz %w1, 2b\n"
|
||||
/* We got the lock. Critical section starts here. */
|
||||
"3:"
|
||||
: "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock)
|
||||
: "Q" (lock->owner), "I" (1 << TICKET_SHIFT)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
/* spinlock lock & interrupt disable & PSTATE.DAIF save */
|
||||
static inline unsigned long
|
||||
__ihk_mc_spinlock_lock(_ihk_spinlock_t *lock)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
flags = cpu_disable_interrupt_save();
|
||||
|
||||
__ihk_mc_spinlock_lock_noirq(lock);
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/spinlock.h::arch_spin_unlock */
|
||||
/* spinlock unlock */
|
||||
static inline void
|
||||
__ihk_mc_spinlock_unlock_noirq(_ihk_spinlock_t *lock)
|
||||
{
|
||||
unsigned long tmp;
|
||||
|
||||
asm volatile(_ARM64_LSE_ATOMIC_INSN(
|
||||
/* LL/SC */
|
||||
" ldrh %w1, %0\n"
|
||||
" add %w1, %w1, #1\n"
|
||||
" stlrh %w1, %0",
|
||||
/* LSE atomics */
|
||||
" mov %w1, #1\n"
|
||||
" staddlh %w1, %0\n"
|
||||
__nops(1))
|
||||
: "=Q" (lock->owner), "=&r" (tmp)
|
||||
:
|
||||
: "memory");
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static inline void
|
||||
__ihk_mc_spinlock_unlock(_ihk_spinlock_t *lock, unsigned long flags)
|
||||
{
|
||||
__ihk_mc_spinlock_unlock_noirq(lock);
|
||||
|
||||
cpu_restore_interrupt(flags);
|
||||
}
|
||||
|
||||
typedef struct mcs_rwlock_lock {
|
||||
_ihk_spinlock_t slock;
|
||||
#ifndef ENABLE_UBSAN
|
||||
} __aligned(64) mcs_rwlock_lock_t;
|
||||
#else
|
||||
} mcs_rwlock_lock_t;
|
||||
#endif
|
||||
|
||||
static inline void
|
||||
mcs_rwlock_writer_lock_noirq(struct mcs_rwlock_lock *lock)
|
||||
{
|
||||
ihk_mc_spinlock_lock_noirq(&lock->slock);
|
||||
}
|
||||
|
||||
static inline void
|
||||
mcs_rwlock_writer_unlock_noirq(struct mcs_rwlock_lock *lock)
|
||||
{
|
||||
ihk_mc_spinlock_unlock_noirq(&lock->slock);
|
||||
}
|
||||
|
||||
#endif /* !__HEADER_ARM64_COMMON_ARCH_LOCK_H */
|
||||
@@ -38,4 +38,26 @@ static const unsigned long arch_rus_vm_flags = VM_RESERVED | VM_MIXEDMAP | VM_EX
|
||||
#else
|
||||
static const unsigned long arch_rus_vm_flags = VM_DONTDUMP | VM_MIXEDMAP | VM_EXEC;
|
||||
#endif
|
||||
|
||||
#define _xchg(ptr, x) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) __ret; \
|
||||
__ret = (__typeof__(*(ptr))) \
|
||||
__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define xchg4(ptr, x) _xchg(ptr, x)
|
||||
#define xchg8(ptr, x) _xchg(ptr, x)
|
||||
|
||||
enum arm64_pf_error_code {
|
||||
PF_PROT = 1 << 0,
|
||||
PF_WRITE = 1 << 1,
|
||||
PF_USER = 1 << 2,
|
||||
PF_RSVD = 1 << 3,
|
||||
PF_INSTR = 1 << 4,
|
||||
|
||||
PF_PATCH = 1 << 29,
|
||||
PF_POPULATE = 1 << 30,
|
||||
};
|
||||
#endif /* __HEADER_MCCTRL_ARM64_ARCHDEPS_H */
|
||||
|
||||
51
executer/kernel/mcctrl/arch/x86_64/cpu.c
Normal file
51
executer/kernel/mcctrl/arch/x86_64/cpu.c
Normal file
@@ -0,0 +1,51 @@
|
||||
/* This is copy of the necessary part from McKernel, for uti-futex */
|
||||
|
||||
#include <cpu.h>
|
||||
|
||||
/*@
|
||||
@ assigns \nothing;
|
||||
@ behavior to_enabled:
|
||||
@ assumes flags & RFLAGS_IF;
|
||||
@ ensures \interrupt_disabled == 0;
|
||||
@ behavior to_disabled:
|
||||
@ assumes !(flags & RFLAGS_IF);
|
||||
@ ensures \interrupt_disabled > 0;
|
||||
@*/
|
||||
void cpu_restore_interrupt(unsigned long flags)
|
||||
{
|
||||
asm volatile("push %0; popf" : : "g"(flags) : "memory", "cc");
|
||||
}
|
||||
|
||||
void cpu_pause(void)
|
||||
{
|
||||
asm volatile("pause" ::: "memory");
|
||||
}
|
||||
|
||||
/*@
|
||||
@ assigns \nothing;
|
||||
@ ensures \interrupt_disabled > 0;
|
||||
@ behavior from_enabled:
|
||||
@ assumes \interrupt_disabled == 0;
|
||||
@ ensures \result & RFLAGS_IF;
|
||||
@ behavior from_disabled:
|
||||
@ assumes \interrupt_disabled > 0;
|
||||
@ ensures !(\result & RFLAGS_IF);
|
||||
@*/
|
||||
unsigned long cpu_disable_interrupt_save(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
asm volatile("pushf; pop %0; cli" : "=r"(flags) : : "memory", "cc");
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
unsigned long cpu_enable_interrupt_save(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
asm volatile("pushf; pop %0; sti" : "=r"(flags) : : "memory", "cc");
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
106
executer/kernel/mcctrl/arch/x86_64/include/arch-lock.h
Normal file
106
executer/kernel/mcctrl/arch/x86_64/include/arch-lock.h
Normal file
@@ -0,0 +1,106 @@
|
||||
/* This is copy of the necessary part from McKernel, for uti-futex */
|
||||
|
||||
#ifndef __HEADER_X86_COMMON_ARCH_LOCK
|
||||
#define __HEADER_X86_COMMON_ARCH_LOCK
|
||||
|
||||
#include <linux/preempt.h>
|
||||
#include <cpu.h>
|
||||
|
||||
#define ihk_mc_spinlock_lock __ihk_mc_spinlock_lock
|
||||
#define ihk_mc_spinlock_unlock __ihk_mc_spinlock_unlock
|
||||
|
||||
#define ihk_mc_spinlock_lock_noirq __ihk_mc_spinlock_lock_noirq
|
||||
#define ihk_mc_spinlock_unlock_noirq __ihk_mc_spinlock_unlock_noirq
|
||||
|
||||
typedef unsigned short __ticket_t;
|
||||
typedef unsigned int __ticketpair_t;
|
||||
|
||||
/* arch/x86/include/asm/spinlock_types.h defines struct __raw_tickets */
|
||||
typedef struct ihk_spinlock {
|
||||
union {
|
||||
__ticketpair_t head_tail;
|
||||
struct ihk__raw_tickets {
|
||||
__ticket_t head, tail;
|
||||
} tickets;
|
||||
};
|
||||
} _ihk_spinlock_t;
|
||||
|
||||
static inline void ihk_mc_spinlock_init(_ihk_spinlock_t *lock)
|
||||
{
|
||||
lock->head_tail = 0;
|
||||
}
|
||||
|
||||
static inline void __ihk_mc_spinlock_lock_noirq(_ihk_spinlock_t *lock)
|
||||
{
|
||||
register struct ihk__raw_tickets inc = { .tail = 0x0002 };
|
||||
|
||||
preempt_disable();
|
||||
|
||||
asm volatile ("lock xaddl %0, %1\n"
|
||||
: "+r" (inc), "+m" (*(lock)) : : "memory", "cc");
|
||||
|
||||
if (inc.head == inc.tail)
|
||||
goto out;
|
||||
|
||||
for (;;) {
|
||||
if (*((volatile __ticket_t *)&lock->tickets.head) == inc.tail)
|
||||
goto out;
|
||||
cpu_pause();
|
||||
}
|
||||
|
||||
out:
|
||||
barrier(); /* make sure nothing creeps before the lock is taken */
|
||||
}
|
||||
|
||||
static inline void __ihk_mc_spinlock_unlock_noirq(_ihk_spinlock_t *lock)
|
||||
{
|
||||
__ticket_t inc = 0x0002;
|
||||
|
||||
asm volatile ("lock addw %1, %0\n"
|
||||
: "+m" (lock->tickets.head)
|
||||
: "ri" (inc) : "memory", "cc");
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static inline unsigned long __ihk_mc_spinlock_lock(_ihk_spinlock_t *lock)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
flags = cpu_disable_interrupt_save();
|
||||
|
||||
__ihk_mc_spinlock_lock_noirq(lock);
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
static inline void __ihk_mc_spinlock_unlock(_ihk_spinlock_t *lock,
|
||||
unsigned long flags)
|
||||
{
|
||||
__ihk_mc_spinlock_unlock_noirq(lock);
|
||||
|
||||
cpu_restore_interrupt(flags);
|
||||
}
|
||||
|
||||
typedef struct mcs_rwlock_lock {
|
||||
_ihk_spinlock_t slock;
|
||||
|
||||
#ifndef ENABLE_UBSAN
|
||||
} __aligned(64) mcs_rwlock_lock_t;
|
||||
#else
|
||||
} mcs_rwlock_lock_t;
|
||||
#endif
|
||||
|
||||
static inline void
|
||||
mcs_rwlock_writer_lock_noirq(struct mcs_rwlock_lock *lock)
|
||||
{
|
||||
ihk_mc_spinlock_lock_noirq(&lock->slock);
|
||||
}
|
||||
|
||||
static inline void
|
||||
mcs_rwlock_writer_unlock_noirq(struct mcs_rwlock_lock *lock)
|
||||
{
|
||||
ihk_mc_spinlock_unlock_noirq(&lock->slock);
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -23,4 +23,26 @@ static const unsigned long arch_rus_vm_flags = VM_RESERVED | VM_MIXEDMAP;
|
||||
#else
|
||||
static const unsigned long arch_rus_vm_flags = VM_DONTDUMP | VM_MIXEDMAP;
|
||||
#endif
|
||||
|
||||
#define xchg4(ptr, x) \
|
||||
({ \
|
||||
int __x = (x); \
|
||||
asm volatile("xchgl %k0,%1" \
|
||||
: "=r" (__x) \
|
||||
: "m" (*ptr), "0" (__x) \
|
||||
: "memory"); \
|
||||
__x; \
|
||||
})
|
||||
|
||||
enum x86_pf_error_code {
|
||||
PF_PROT = 1 << 0,
|
||||
PF_WRITE = 1 << 1,
|
||||
PF_USER = 1 << 2,
|
||||
PF_RSVD = 1 << 3,
|
||||
PF_INSTR = 1 << 4,
|
||||
|
||||
PF_PATCH = 1 << 29,
|
||||
PF_POPULATE = 1 << 30,
|
||||
};
|
||||
|
||||
#endif /* __HEADER_MCCTRL_X86_64_ARCHDEPS_H */
|
||||
|
||||
Reference in New Issue
Block a user