arm64: Scalable Vector Extension (SVE) support.
Change-Id: I3568687913f583edfaa297d5cf5ac91d319d97e9
This commit is contained in:
committed by
Masamichi Takagi
parent
dac99f708c
commit
07aa96ef95
@@ -1,4 +1,4 @@
|
|||||||
/* assert.c COPYRIGHT FUJITSU LIMITED 2015-2018 */
|
/* assert.c COPYRIGHT FUJITSU LIMITED 2015-2019 */
|
||||||
|
|
||||||
#include <process.h>
|
#include <process.h>
|
||||||
#include <list.h>
|
#include <list.h>
|
||||||
@@ -53,4 +53,4 @@ STATIC_ASSERT(SVE_PT_FPSIMD_OFFSET == sizeof(struct user_sve_header));
|
|||||||
STATIC_ASSERT(SVE_PT_SVE_OFFSET == sizeof(struct user_sve_header));
|
STATIC_ASSERT(SVE_PT_SVE_OFFSET == sizeof(struct user_sve_header));
|
||||||
|
|
||||||
/* assert for struct arm64_cpu_local_thread member offset define */
|
/* assert for struct arm64_cpu_local_thread member offset define */
|
||||||
STATIC_ASSERT(offsetof(struct arm64_cpu_local_thread, panic_regs) == 160);
|
STATIC_ASSERT(offsetof(struct arm64_cpu_local_thread, panic_regs) == 168);
|
||||||
|
|||||||
@@ -1,7 +1,11 @@
|
|||||||
/* coredump.c COPYRIGHT FUJITSU LIMITED 2015-2016 */
|
/* coredump.c COPYRIGHT FUJITSU LIMITED 2015-2019 */
|
||||||
#include <process.h>
|
#include <process.h>
|
||||||
#include <elfcore.h>
|
#include <elfcore.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
|
#include <ptrace.h>
|
||||||
|
#include <cls.h>
|
||||||
|
|
||||||
|
#define align32(x) ((((x) + 3) / 4) * 4)
|
||||||
|
|
||||||
void arch_fill_prstatus(struct elf_prstatus64 *prstatus, struct thread *thread, void *regs0)
|
void arch_fill_prstatus(struct elf_prstatus64 *prstatus, struct thread *thread, void *regs0)
|
||||||
{
|
{
|
||||||
@@ -30,3 +34,43 @@ void arch_fill_prstatus(struct elf_prstatus64 *prstatus, struct thread *thread,
|
|||||||
/* copy unaligned prstatus addr */
|
/* copy unaligned prstatus addr */
|
||||||
memcpy(prstatus, &tmp_prstatus, sizeof(*prstatus));
|
memcpy(prstatus, &tmp_prstatus, sizeof(*prstatus));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int arch_get_thread_core_info_size(void)
|
||||||
|
{
|
||||||
|
const struct user_regset_view *view = current_user_regset_view();
|
||||||
|
const struct user_regset *regset = find_regset(view, NT_ARM_SVE);
|
||||||
|
|
||||||
|
return sizeof(struct note) + align32(sizeof("LINUX"))
|
||||||
|
+ regset_size(cpu_local_var(current), regset);
|
||||||
|
}
|
||||||
|
|
||||||
|
void arch_fill_thread_core_info(struct note *head,
|
||||||
|
struct thread *thread, void *regs)
|
||||||
|
{
|
||||||
|
const struct user_regset_view *view = current_user_regset_view();
|
||||||
|
const struct user_regset *regset = find_regset(view, NT_ARM_SVE);
|
||||||
|
|
||||||
|
/* pre saved registers */
|
||||||
|
save_fp_regs(thread);
|
||||||
|
|
||||||
|
if (regset->core_note_type && regset->get &&
|
||||||
|
(!regset->active || regset->active(thread, regset))) {
|
||||||
|
int ret;
|
||||||
|
size_t size = regset_size(thread, regset);
|
||||||
|
void *namep;
|
||||||
|
void *descp;
|
||||||
|
|
||||||
|
namep = (void *) (head + 1);
|
||||||
|
descp = namep + align32(sizeof("LINUX"));
|
||||||
|
|
||||||
|
ret = regset->get(thread, regset, 0, size, descp, NULL);
|
||||||
|
if (ret) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
head->namesz = sizeof("LINUX");
|
||||||
|
head->descsz = size;
|
||||||
|
head->type = NT_ARM_SVE;
|
||||||
|
memcpy(namep, "LINUX", sizeof("LINUX"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -951,7 +951,7 @@ void ihk_mc_boot_cpu(int cpuid, unsigned long pc)
|
|||||||
setup_cpu_features();
|
setup_cpu_features();
|
||||||
}
|
}
|
||||||
|
|
||||||
init_sve_vl();
|
sve_setup();
|
||||||
}
|
}
|
||||||
|
|
||||||
/* for ihk_mc_init_context() */
|
/* for ihk_mc_init_context() */
|
||||||
@@ -1001,9 +1001,10 @@ void ihk_mc_init_context(ihk_mc_kernel_context_t *new_ctx,
|
|||||||
const int lcpuid = ihk_mc_get_processor_id();
|
const int lcpuid = ihk_mc_get_processor_id();
|
||||||
const unsigned long syscallno = current_pt_regs()->syscallno;
|
const unsigned long syscallno = current_pt_regs()->syscallno;
|
||||||
#ifdef CONFIG_ARM64_SVE
|
#ifdef CONFIG_ARM64_SVE
|
||||||
const uint16_t orig_sve_vl = current_thread_info()->sve_vl;
|
struct thread_info *ti = current_thread_info();
|
||||||
const uint16_t orig_sve_vl_onexec = current_thread_info()->sve_vl_onexec;
|
const unsigned int orig_sve_vl = ti->sve_vl;
|
||||||
const uint16_t orig_sve_flags = current_thread_info()->sve_flags;
|
const unsigned int orig_sve_vl_onexec = ti->sve_vl_onexec;
|
||||||
|
const unsigned long orig_sve_flags = ti->sve_flags;
|
||||||
#endif /* CONFIG_ARM64_SVE */
|
#endif /* CONFIG_ARM64_SVE */
|
||||||
|
|
||||||
/* get kernel stack address */
|
/* get kernel stack address */
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
/* fpsimd.c COPYRIGHT FUJITSU LIMITED 2016-2018 */
|
/* fpsimd.c COPYRIGHT FUJITSU LIMITED 2016-2019 */
|
||||||
#include <thread_info.h>
|
#include <thread_info.h>
|
||||||
#include <fpsimd.h>
|
#include <fpsimd.h>
|
||||||
#include <cpuinfo.h>
|
#include <cpuinfo.h>
|
||||||
@@ -11,6 +11,7 @@
|
|||||||
#include <kmalloc.h>
|
#include <kmalloc.h>
|
||||||
#include <debug.h>
|
#include <debug.h>
|
||||||
#include <process.h>
|
#include <process.h>
|
||||||
|
#include <bitmap.h>
|
||||||
|
|
||||||
//#define DEBUG_PRINT_FPSIMD
|
//#define DEBUG_PRINT_FPSIMD
|
||||||
|
|
||||||
@@ -21,11 +22,87 @@
|
|||||||
|
|
||||||
#ifdef CONFIG_ARM64_SVE
|
#ifdef CONFIG_ARM64_SVE
|
||||||
|
|
||||||
|
/* Set of available vector lengths, as vq_to_bit(vq): */
|
||||||
|
static DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
|
||||||
|
|
||||||
/* Maximum supported vector length across all CPUs (initially poisoned) */
|
/* Maximum supported vector length across all CPUs (initially poisoned) */
|
||||||
int sve_max_vl = -1;
|
int sve_max_vl = -1;
|
||||||
|
|
||||||
/* Default VL for tasks that don't set it explicitly: */
|
/* Default VL for tasks that don't set it explicitly: */
|
||||||
int sve_default_vl = -1;
|
int sve_default_vl = -1;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Helpers to translate bit indices in sve_vq_map to VQ values (and
|
||||||
|
* vice versa). This allows find_next_bit() to be used to find the
|
||||||
|
* _maximum_ VQ not exceeding a certain value.
|
||||||
|
*/
|
||||||
|
|
||||||
|
static unsigned int vq_to_bit(unsigned int vq)
|
||||||
|
{
|
||||||
|
return SVE_VQ_MAX - vq;
|
||||||
|
}
|
||||||
|
|
||||||
|
static unsigned int bit_to_vq(unsigned int bit)
|
||||||
|
{
|
||||||
|
if (bit >= SVE_VQ_MAX) {
|
||||||
|
bit = SVE_VQ_MAX - 1;
|
||||||
|
}
|
||||||
|
return SVE_VQ_MAX - bit;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* All vector length selection from userspace comes through here.
|
||||||
|
* We're on a slow path, so some sanity-checks are included.
|
||||||
|
* If things go wrong there's a bug somewhere, but try to fall back to a
|
||||||
|
* safe choice.
|
||||||
|
*/
|
||||||
|
static unsigned int find_supported_vector_length(unsigned int vl)
|
||||||
|
{
|
||||||
|
int bit;
|
||||||
|
int max_vl = sve_max_vl;
|
||||||
|
|
||||||
|
if (!sve_vl_valid(vl)) {
|
||||||
|
vl = SVE_VL_MIN;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!sve_vl_valid(max_vl)) {
|
||||||
|
max_vl = SVE_VL_MIN;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (vl > max_vl) {
|
||||||
|
vl = max_vl;
|
||||||
|
}
|
||||||
|
|
||||||
|
bit = find_next_bit(sve_vq_map, SVE_VQ_MAX,
|
||||||
|
vq_to_bit(sve_vq_from_vl(vl)));
|
||||||
|
return sve_vl_from_vq(bit_to_vq(bit));
|
||||||
|
}
|
||||||
|
|
||||||
|
static void sve_probe_vqs(DECLARE_BITMAP(map, SVE_VQ_MAX))
|
||||||
|
{
|
||||||
|
unsigned int vq, vl;
|
||||||
|
unsigned long zcr;
|
||||||
|
|
||||||
|
bitmap_zero(map, SVE_VQ_MAX);
|
||||||
|
|
||||||
|
zcr = ZCR_EL1_LEN_MASK;
|
||||||
|
zcr = read_sysreg_s(SYS_ZCR_EL1) & ~zcr;
|
||||||
|
|
||||||
|
for (vq = SVE_VQ_MAX; vq >= SVE_VQ_MIN; --vq) {
|
||||||
|
/* self-syncing */
|
||||||
|
write_sysreg_s(zcr | (vq - 1), SYS_ZCR_EL1);
|
||||||
|
vl = sve_get_vl();
|
||||||
|
/* skip intervening lengths */
|
||||||
|
vq = sve_vq_from_vl(vl);
|
||||||
|
set_bit(vq_to_bit(vq), map);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void sve_init_vq_map(void)
|
||||||
|
{
|
||||||
|
sve_probe_vqs(sve_vq_map);
|
||||||
|
}
|
||||||
|
|
||||||
size_t sve_state_size(struct thread const *thread)
|
size_t sve_state_size(struct thread const *thread)
|
||||||
{
|
{
|
||||||
unsigned int vl = thread->ctx.thread->sve_vl;
|
unsigned int vl = thread->ctx.thread->sve_vl;
|
||||||
@@ -75,19 +152,7 @@ int sve_set_vector_length(struct thread *thread,
|
|||||||
{
|
{
|
||||||
struct thread_info *ti = thread->ctx.thread;
|
struct thread_info *ti = thread->ctx.thread;
|
||||||
|
|
||||||
BUG_ON(thread == cpu_local_var(current) && cpu_local_var(no_preempt) == 0);
|
if (flags & ~(unsigned long)(PR_SVE_VL_INHERIT |
|
||||||
|
|
||||||
/*
|
|
||||||
* To avoid accidents, forbid setting for individual threads of a
|
|
||||||
* multithreaded process. User code that knows what it's doing can
|
|
||||||
* pass PR_SVE_SET_VL_THREAD to override this restriction:
|
|
||||||
*/
|
|
||||||
if (!(flags & PR_SVE_SET_VL_THREAD) && get_nr_threads(thread->proc) != 1) {
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
flags &= ~(unsigned long)PR_SVE_SET_VL_THREAD;
|
|
||||||
|
|
||||||
if (flags & ~(unsigned long)(PR_SVE_SET_VL_INHERIT |
|
|
||||||
PR_SVE_SET_VL_ONEXEC)) {
|
PR_SVE_SET_VL_ONEXEC)) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@@ -96,13 +161,19 @@ int sve_set_vector_length(struct thread *thread,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vl > sve_max_vl) {
|
/*
|
||||||
BUG_ON(!sve_vl_valid(sve_max_vl));
|
* Clamp to the maximum vector length that VL-agnostic SVE code can
|
||||||
vl = sve_max_vl;
|
* work with. A flag may be assigned in the future to allow setting
|
||||||
|
* of larger vector lengths without confusing older software.
|
||||||
|
*/
|
||||||
|
if (vl > SVE_VL_ARCH_MAX) {
|
||||||
|
vl = SVE_VL_ARCH_MAX;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (flags & (PR_SVE_SET_VL_ONEXEC |
|
vl = find_supported_vector_length(vl);
|
||||||
PR_SVE_SET_VL_INHERIT)) {
|
|
||||||
|
if (flags & (PR_SVE_VL_INHERIT |
|
||||||
|
PR_SVE_SET_VL_ONEXEC)) {
|
||||||
ti->sve_vl_onexec = vl;
|
ti->sve_vl_onexec = vl;
|
||||||
} else {
|
} else {
|
||||||
/* Reset VL to system default on next exec: */
|
/* Reset VL to system default on next exec: */
|
||||||
@@ -114,39 +185,42 @@ int sve_set_vector_length(struct thread *thread,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vl != ti->sve_vl) {
|
if (vl == ti->sve_vl) {
|
||||||
if ((elf_hwcap & HWCAP_SVE)) {
|
goto out;
|
||||||
fp_regs_struct fp_regs;
|
}
|
||||||
memset(&fp_regs, 0, sizeof(fp_regs));
|
|
||||||
|
|
||||||
/* for self at prctl syscall */
|
if ((elf_hwcap & HWCAP_SVE)) {
|
||||||
if (thread == cpu_local_var(current)) {
|
fp_regs_struct fp_regs;
|
||||||
save_fp_regs(thread);
|
|
||||||
clear_fp_regs();
|
|
||||||
thread_sve_to_fpsimd(thread, &fp_regs);
|
|
||||||
sve_free(thread);
|
|
||||||
|
|
||||||
ti->sve_vl = vl;
|
memset(&fp_regs, 0, sizeof(fp_regs));
|
||||||
|
|
||||||
sve_alloc(thread);
|
/* for self at prctl syscall */
|
||||||
thread_fpsimd_to_sve(thread, &fp_regs);
|
if (thread == cpu_local_var(current)) {
|
||||||
restore_fp_regs(thread);
|
save_fp_regs(thread);
|
||||||
/* for target thread at ptrace */
|
clear_fp_regs();
|
||||||
} else {
|
thread_sve_to_fpsimd(thread, &fp_regs);
|
||||||
thread_sve_to_fpsimd(thread, &fp_regs);
|
sve_free(thread);
|
||||||
sve_free(thread);
|
|
||||||
|
|
||||||
ti->sve_vl = vl;
|
ti->sve_vl = vl;
|
||||||
|
|
||||||
sve_alloc(thread);
|
sve_alloc(thread);
|
||||||
thread_fpsimd_to_sve(thread, &fp_regs);
|
thread_fpsimd_to_sve(thread, &fp_regs);
|
||||||
}
|
restore_fp_regs(thread);
|
||||||
|
/* for target thread at ptrace */
|
||||||
|
} else {
|
||||||
|
thread_sve_to_fpsimd(thread, &fp_regs);
|
||||||
|
sve_free(thread);
|
||||||
|
|
||||||
|
ti->sve_vl = vl;
|
||||||
|
|
||||||
|
sve_alloc(thread);
|
||||||
|
thread_fpsimd_to_sve(thread, &fp_regs);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ti->sve_vl = vl;
|
ti->sve_vl = vl;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
ti->sve_flags = flags & PR_SVE_SET_VL_INHERIT;
|
ti->sve_flags = flags & PR_SVE_VL_INHERIT;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -156,44 +230,53 @@ out:
|
|||||||
* Encode the current vector length and flags for return.
|
* Encode the current vector length and flags for return.
|
||||||
* This is only required for prctl(): ptrace has separate fields
|
* This is only required for prctl(): ptrace has separate fields
|
||||||
*/
|
*/
|
||||||
static int sve_prctl_status(const struct thread_info *ti)
|
static int sve_prctl_status(unsigned long flags)
|
||||||
{
|
{
|
||||||
int ret = ti->sve_vl;
|
int ret;
|
||||||
|
struct thread_info *ti = cpu_local_var(current)->ctx.thread;
|
||||||
|
|
||||||
ret |= ti->sve_flags << 16;
|
if (flags & PR_SVE_SET_VL_ONEXEC) {
|
||||||
|
ret = ti->sve_vl_onexec;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
ret = ti->sve_vl;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ti->sve_flags & PR_SVE_VL_INHERIT) {
|
||||||
|
ret |= PR_SVE_VL_INHERIT;
|
||||||
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* @ref.impl arch/arm64/kernel/fpsimd.c::sve_set_task_vl */
|
/* @ref.impl arch/arm64/kernel/fpsimd.c::sve_set_task_vl */
|
||||||
int sve_set_thread_vl(struct thread *thread, const unsigned long vector_length,
|
int sve_set_thread_vl(unsigned long arg)
|
||||||
const unsigned long flags)
|
|
||||||
{
|
{
|
||||||
|
unsigned long vl, flags;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!(elf_hwcap & HWCAP_SVE)) {
|
vl = arg & PR_SVE_VL_LEN_MASK;
|
||||||
|
flags = arg & ~vl;
|
||||||
|
|
||||||
|
/* Instead of system_supports_sve() */
|
||||||
|
if (unlikely(!(elf_hwcap & HWCAP_SVE))) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
BUG_ON(thread != cpu_local_var(current));
|
ret = sve_set_vector_length(cpu_local_var(current), vl, flags);
|
||||||
|
|
||||||
preempt_disable();
|
|
||||||
ret = sve_set_vector_length(thread, vector_length, flags);
|
|
||||||
preempt_enable();
|
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
return sve_prctl_status(thread->ctx.thread);
|
return sve_prctl_status(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* @ref.impl arch/arm64/kernel/fpsimd.c::sve_get_ti_vl */
|
/* @ref.impl arch/arm64/kernel/fpsimd.c::sve_get_ti_vl */
|
||||||
int sve_get_thread_vl(const struct thread *thread)
|
int sve_get_thread_vl(void)
|
||||||
{
|
{
|
||||||
if (!(elf_hwcap & HWCAP_SVE)) {
|
/* Instead of system_supports_sve() */
|
||||||
|
if (unlikely(!(elf_hwcap & HWCAP_SVE))) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
return sve_prctl_status(thread->ctx.thread);
|
return sve_prctl_status(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void do_sve_acc(unsigned int esr, struct pt_regs *regs)
|
void do_sve_acc(unsigned int esr, struct pt_regs *regs)
|
||||||
@@ -203,25 +286,48 @@ void do_sve_acc(unsigned int esr, struct pt_regs *regs)
|
|||||||
panic("");
|
panic("");
|
||||||
}
|
}
|
||||||
|
|
||||||
void init_sve_vl(void)
|
void sve_setup(void)
|
||||||
{
|
{
|
||||||
extern unsigned long ihk_param_default_vl;
|
extern unsigned long ihk_param_default_vl;
|
||||||
uint64_t zcr;
|
uint64_t zcr;
|
||||||
|
|
||||||
|
/* Instead of system_supports_sve() */
|
||||||
if (unlikely(!(elf_hwcap & HWCAP_SVE))) {
|
if (unlikely(!(elf_hwcap & HWCAP_SVE))) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
zcr = read_system_reg(SYS_ZCR_EL1);
|
/* init sve_vq_map bitmap */
|
||||||
BUG_ON(((zcr & ZCR_EL1_LEN_MASK) + 1) * 16 > sve_max_vl);
|
sve_init_vq_map();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The SVE architecture mandates support for 128-bit vectors,
|
||||||
|
* so sve_vq_map must have at least SVE_VQ_MIN set.
|
||||||
|
* If something went wrong, at least try to patch it up:
|
||||||
|
*/
|
||||||
|
if (!test_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map)) {
|
||||||
|
set_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map);
|
||||||
|
}
|
||||||
|
|
||||||
|
zcr = read_system_reg(SYS_ZCR_EL1);
|
||||||
|
sve_max_vl = sve_vl_from_vq((zcr & ZCR_EL1_LEN_MASK) + 1);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Sanity-check that the max VL we determined through CPU features
|
||||||
|
* corresponds properly to sve_vq_map. If not, do our best:
|
||||||
|
*/
|
||||||
|
if (sve_max_vl != find_supported_vector_length(sve_max_vl)) {
|
||||||
|
sve_max_vl = find_supported_vector_length(sve_max_vl);
|
||||||
|
}
|
||||||
|
|
||||||
sve_max_vl = ((zcr & ZCR_EL1_LEN_MASK) + 1) * 16;
|
|
||||||
sve_default_vl = ihk_param_default_vl;
|
sve_default_vl = ihk_param_default_vl;
|
||||||
|
|
||||||
if (sve_default_vl == 0) {
|
if (ihk_param_default_vl !=
|
||||||
kprintf("SVE: Getting default VL = 0 from HOST-Linux.\n");
|
find_supported_vector_length(ihk_param_default_vl)) {
|
||||||
sve_default_vl = sve_max_vl > 64 ? 64 : sve_max_vl;
|
kprintf("SVE: Getting unsupported default VL = %d "
|
||||||
kprintf("SVE: Using default vl(%d byte).\n", sve_default_vl);
|
"from HOST-Linux.\n", sve_default_vl);
|
||||||
|
sve_default_vl = find_supported_vector_length(64);
|
||||||
|
kprintf("SVE: Using default vl(%d byte).\n",
|
||||||
|
sve_default_vl);
|
||||||
}
|
}
|
||||||
|
|
||||||
kprintf("SVE: maximum available vector length %u bytes per vector\n",
|
kprintf("SVE: maximum available vector length %u bytes per vector\n",
|
||||||
@@ -232,7 +338,7 @@ void init_sve_vl(void)
|
|||||||
|
|
||||||
#else /* CONFIG_ARM64_SVE */
|
#else /* CONFIG_ARM64_SVE */
|
||||||
|
|
||||||
void init_sve_vl(void)
|
void sve_setup(void)
|
||||||
{
|
{
|
||||||
/* nothing to do. */
|
/* nothing to do. */
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
/* fpsimd.h COPYRIGHT FUJITSU LIMITED 2016-2017 */
|
/* fpsimd.h COPYRIGHT FUJITSU LIMITED 2016-2019 */
|
||||||
#ifndef __HEADER_ARM64_COMMON_FPSIMD_H
|
#ifndef __HEADER_ARM64_COMMON_FPSIMD_H
|
||||||
#define __HEADER_ARM64_COMMON_FPSIMD_H
|
#define __HEADER_ARM64_COMMON_FPSIMD_H
|
||||||
|
|
||||||
@@ -46,12 +46,15 @@ extern void sve_alloc(struct thread *thread);
|
|||||||
extern void sve_save_state(void *state, unsigned int *pfpsr);
|
extern void sve_save_state(void *state, unsigned int *pfpsr);
|
||||||
extern void sve_load_state(void const *state, unsigned int const *pfpsr, unsigned long vq_minus_1);
|
extern void sve_load_state(void const *state, unsigned int const *pfpsr, unsigned long vq_minus_1);
|
||||||
extern unsigned int sve_get_vl(void);
|
extern unsigned int sve_get_vl(void);
|
||||||
extern int sve_set_thread_vl(struct thread *thread, const unsigned long vector_length, const unsigned long flags);
|
extern int sve_set_thread_vl(unsigned long arg);
|
||||||
extern int sve_get_thread_vl(const struct thread *thread);
|
extern int sve_get_thread_vl(void);
|
||||||
extern int sve_set_vector_length(struct thread *thread, unsigned long vl, unsigned long flags);
|
extern int sve_set_vector_length(struct thread *thread, unsigned long vl, unsigned long flags);
|
||||||
|
|
||||||
#define SVE_SET_VL(thread, vector_length, flags) sve_set_thread_vl(thread, vector_length, flags)
|
#define SVE_SET_VL(arg) sve_set_thread_vl(arg)
|
||||||
#define SVE_GET_VL(thread) sve_get_thread_vl(thread)
|
#define SVE_GET_VL() sve_get_thread_vl()
|
||||||
|
|
||||||
|
/* Maximum VL that SVE VL-agnostic software can transparently support */
|
||||||
|
#define SVE_VL_ARCH_MAX 0x100
|
||||||
|
|
||||||
#else /* CONFIG_ARM64_SVE */
|
#else /* CONFIG_ARM64_SVE */
|
||||||
|
|
||||||
@@ -80,12 +83,12 @@ static int sve_set_vector_length(struct thread *thread, unsigned long vl, unsign
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* for prctl syscall */
|
/* for prctl syscall */
|
||||||
#define SVE_SET_VL(a,b,c) (-EINVAL)
|
#define SVE_SET_VL(a) (-EINVAL)
|
||||||
#define SVE_GET_VL(a) (-EINVAL)
|
#define SVE_GET_VL() (-EINVAL)
|
||||||
|
|
||||||
#endif /* CONFIG_ARM64_SVE */
|
#endif /* CONFIG_ARM64_SVE */
|
||||||
|
|
||||||
extern void init_sve_vl(void);
|
extern void sve_setup(void);
|
||||||
extern void fpsimd_save_state(struct fpsimd_state *state);
|
extern void fpsimd_save_state(struct fpsimd_state *state);
|
||||||
extern void fpsimd_load_state(struct fpsimd_state *state);
|
extern void fpsimd_load_state(struct fpsimd_state *state);
|
||||||
extern void thread_fpsimd_save(struct thread *thread);
|
extern void thread_fpsimd_save(struct thread *thread);
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
/* prctl.h COPYRIGHT FUJITSU LIMITED 2017 */
|
/* prctl.h COPYRIGHT FUJITSU LIMITED 2017-2019 */
|
||||||
#ifndef __HEADER_ARM64_COMMON_PRCTL_H
|
#ifndef __HEADER_ARM64_COMMON_PRCTL_H
|
||||||
#define __HEADER_ARM64_COMMON_PRCTL_H
|
#define __HEADER_ARM64_COMMON_PRCTL_H
|
||||||
|
|
||||||
@@ -6,15 +6,12 @@
|
|||||||
#define PR_GET_THP_DISABLE 42
|
#define PR_GET_THP_DISABLE 42
|
||||||
|
|
||||||
/* arm64 Scalable Vector Extension controls */
|
/* arm64 Scalable Vector Extension controls */
|
||||||
#define PR_SVE_SET_VL 48 /* set task vector length */
|
/* Flag values must be kept in sync with ptrace NT_ARM_SVE interface */
|
||||||
#define PR_SVE_SET_VL_THREAD (1 << 1) /* set just this thread */
|
#define PR_SVE_SET_VL 50 /* set task vector length */
|
||||||
#define PR_SVE_SET_VL_INHERIT (1 << 2) /* inherit across exec */
|
# define PR_SVE_SET_VL_ONEXEC (1 << 18) /* defer effect until exec */
|
||||||
#define PR_SVE_SET_VL_ONEXEC (1 << 3) /* defer effect until exec */
|
#define PR_SVE_GET_VL 51 /* get task vector length */
|
||||||
|
/* Bits common to PR_SVE_SET_VL and PR_SVE_GET_VL */
|
||||||
#define PR_SVE_GET_VL 49 /* get task vector length */
|
# define PR_SVE_VL_LEN_MASK 0xffff
|
||||||
/* Decode helpers for the return value from PR_SVE_GET_VL: */
|
# define PR_SVE_VL_INHERIT (1 << 17) /* inherit across exec */
|
||||||
#define PR_SVE_GET_VL_LEN(ret) ((ret) & 0x3fff) /* vector length */
|
|
||||||
#define PR_SVE_GET_VL_INHERIT (PR_SVE_SET_VL_INHERIT << 16)
|
|
||||||
/* For conveinence, PR_SVE_SET_VL returns the result in the same encoding */
|
|
||||||
|
|
||||||
#endif /* !__HEADER_ARM64_COMMON_PRCTL_H */
|
#endif /* !__HEADER_ARM64_COMMON_PRCTL_H */
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
/* ptrace.h COPYRIGHT FUJITSU LIMITED 2015-2017 */
|
/* ptrace.h COPYRIGHT FUJITSU LIMITED 2015-2019 */
|
||||||
#ifndef __HEADER_ARM64_COMMON_PTRACE_H
|
#ifndef __HEADER_ARM64_COMMON_PTRACE_H
|
||||||
#define __HEADER_ARM64_COMMON_PTRACE_H
|
#define __HEADER_ARM64_COMMON_PTRACE_H
|
||||||
|
|
||||||
@@ -46,6 +46,7 @@
|
|||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
#ifndef __ASSEMBLY__
|
||||||
|
|
||||||
|
#include <lwk/compiler.h>
|
||||||
#include <ihk/types.h>
|
#include <ihk/types.h>
|
||||||
|
|
||||||
struct user_hwdebug_state {
|
struct user_hwdebug_state {
|
||||||
@@ -78,6 +79,70 @@ struct user_sve_header {
|
|||||||
uint16_t __reserved;
|
uint16_t __reserved;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum aarch64_regset {
|
||||||
|
REGSET_GPR,
|
||||||
|
REGSET_FPR,
|
||||||
|
REGSET_TLS,
|
||||||
|
REGSET_HW_BREAK,
|
||||||
|
REGSET_HW_WATCH,
|
||||||
|
REGSET_SYSTEM_CALL,
|
||||||
|
#ifdef CONFIG_ARM64_SVE
|
||||||
|
REGSET_SVE,
|
||||||
|
#endif /* CONFIG_ARM64_SVE */
|
||||||
|
};
|
||||||
|
|
||||||
|
struct thread;
|
||||||
|
struct user_regset;
|
||||||
|
|
||||||
|
typedef int user_regset_active_fn(struct thread *target,
|
||||||
|
const struct user_regset *regset);
|
||||||
|
|
||||||
|
typedef long user_regset_get_fn(struct thread *target,
|
||||||
|
const struct user_regset *regset,
|
||||||
|
unsigned int pos, unsigned int count,
|
||||||
|
void *kbuf, void __user *ubuf);
|
||||||
|
|
||||||
|
typedef long user_regset_set_fn(struct thread *target,
|
||||||
|
const struct user_regset *regset,
|
||||||
|
unsigned int pos, unsigned int count,
|
||||||
|
const void *kbuf, const void __user *ubuf);
|
||||||
|
|
||||||
|
typedef int user_regset_writeback_fn(struct thread *target,
|
||||||
|
const struct user_regset *regset,
|
||||||
|
int immediate);
|
||||||
|
|
||||||
|
typedef unsigned int user_regset_get_size_fn(struct thread *target,
|
||||||
|
const struct user_regset *regset);
|
||||||
|
|
||||||
|
struct user_regset {
|
||||||
|
user_regset_get_fn *get;
|
||||||
|
user_regset_set_fn *set;
|
||||||
|
user_regset_active_fn *active;
|
||||||
|
user_regset_writeback_fn *writeback;
|
||||||
|
user_regset_get_size_fn *get_size;
|
||||||
|
unsigned int n;
|
||||||
|
unsigned int size;
|
||||||
|
unsigned int align;
|
||||||
|
unsigned int bias;
|
||||||
|
unsigned int core_note_type;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct user_regset_view {
|
||||||
|
const char *name;
|
||||||
|
const struct user_regset *regsets;
|
||||||
|
unsigned int n;
|
||||||
|
uint32_t e_flags;
|
||||||
|
uint16_t e_machine;
|
||||||
|
uint8_t ei_osabi;
|
||||||
|
};
|
||||||
|
|
||||||
|
extern const struct user_regset_view *current_user_regset_view(void);
|
||||||
|
extern const struct user_regset *find_regset(
|
||||||
|
const struct user_regset_view *view,
|
||||||
|
unsigned int type);
|
||||||
|
extern unsigned int regset_size(struct thread *target,
|
||||||
|
const struct user_regset *regset);
|
||||||
|
|
||||||
/* Definitions for user_sve_header.flags: */
|
/* Definitions for user_sve_header.flags: */
|
||||||
#define SVE_PT_REGS_MASK (1 << 0)
|
#define SVE_PT_REGS_MASK (1 << 0)
|
||||||
|
|
||||||
@@ -85,7 +150,7 @@ struct user_sve_header {
|
|||||||
#define SVE_PT_REGS_SVE SVE_PT_REGS_MASK
|
#define SVE_PT_REGS_SVE SVE_PT_REGS_MASK
|
||||||
|
|
||||||
#define SVE_PT_VL_THREAD PR_SVE_SET_VL_THREAD
|
#define SVE_PT_VL_THREAD PR_SVE_SET_VL_THREAD
|
||||||
#define SVE_PT_VL_INHERIT PR_SVE_SET_VL_INHERIT
|
#define SVE_PT_VL_INHERIT PR_SVE_VL_INHERIT
|
||||||
#define SVE_PT_VL_ONEXEC PR_SVE_SET_VL_ONEXEC
|
#define SVE_PT_VL_ONEXEC PR_SVE_SET_VL_ONEXEC
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -99,7 +164,9 @@ struct user_sve_header {
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
/* Offset from the start of struct user_sve_header to the register data */
|
/* Offset from the start of struct user_sve_header to the register data */
|
||||||
#define SVE_PT_REGS_OFFSET ((sizeof(struct sve_context) + 15) / 16 * 16)
|
#define SVE_PT_REGS_OFFSET \
|
||||||
|
((sizeof(struct sve_context) + (SVE_VQ_BYTES - 1)) \
|
||||||
|
/ SVE_VQ_BYTES * SVE_VQ_BYTES)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The register data content and layout depends on the value of the
|
* The register data content and layout depends on the value of the
|
||||||
@@ -174,8 +241,10 @@ struct user_sve_header {
|
|||||||
#define SVE_PT_SVE_FFR_OFFSET(vq) \
|
#define SVE_PT_SVE_FFR_OFFSET(vq) \
|
||||||
__SVE_SIG_TO_PT(SVE_SIG_FFR_OFFSET(vq))
|
__SVE_SIG_TO_PT(SVE_SIG_FFR_OFFSET(vq))
|
||||||
|
|
||||||
#define SVE_PT_SVE_FPSR_OFFSET(vq) \
|
#define SVE_PT_SVE_FPSR_OFFSET(vq) \
|
||||||
((SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq) + 15) / 16 * 16)
|
((SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq) + \
|
||||||
|
(SVE_VQ_BYTES - 1)) \
|
||||||
|
/ SVE_VQ_BYTES * SVE_VQ_BYTES)
|
||||||
#define SVE_PT_SVE_FPCR_OFFSET(vq) \
|
#define SVE_PT_SVE_FPCR_OFFSET(vq) \
|
||||||
(SVE_PT_SVE_FPSR_OFFSET(vq) + SVE_PT_SVE_FPSR_SIZE)
|
(SVE_PT_SVE_FPSR_OFFSET(vq) + SVE_PT_SVE_FPSR_SIZE)
|
||||||
|
|
||||||
@@ -184,9 +253,10 @@ struct user_sve_header {
|
|||||||
* 128-bit boundary.
|
* 128-bit boundary.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define SVE_PT_SVE_SIZE(vq, flags) \
|
#define SVE_PT_SVE_SIZE(vq, flags) \
|
||||||
((SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE - \
|
((SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE \
|
||||||
SVE_PT_SVE_OFFSET + 15) / 16 * 16)
|
- SVE_PT_SVE_OFFSET + (SVE_VQ_BYTES - 1)) \
|
||||||
|
/ SVE_VQ_BYTES * SVE_VQ_BYTES)
|
||||||
|
|
||||||
#define SVE_PT_SIZE(vq, flags) \
|
#define SVE_PT_SIZE(vq, flags) \
|
||||||
(((flags) & SVE_PT_REGS_MASK) == SVE_PT_REGS_SVE ? \
|
(((flags) & SVE_PT_REGS_MASK) == SVE_PT_REGS_SVE ? \
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
/* signal.h COPYRIGHT FUJITSU LIMITED 2015-2018 */
|
/* signal.h COPYRIGHT FUJITSU LIMITED 2015-2019 */
|
||||||
#ifndef __HEADER_ARM64_COMMON_SIGNAL_H
|
#ifndef __HEADER_ARM64_COMMON_SIGNAL_H
|
||||||
#define __HEADER_ARM64_COMMON_SIGNAL_H
|
#define __HEADER_ARM64_COMMON_SIGNAL_H
|
||||||
|
|
||||||
@@ -298,6 +298,7 @@ struct extra_context {
|
|||||||
struct _aarch64_ctx head;
|
struct _aarch64_ctx head;
|
||||||
void *data; /* 16-byte aligned pointer to the extra space */
|
void *data; /* 16-byte aligned pointer to the extra space */
|
||||||
uint32_t size; /* size in bytes of the extra space */
|
uint32_t size; /* size in bytes of the extra space */
|
||||||
|
uint32_t __reserved[3];
|
||||||
};
|
};
|
||||||
|
|
||||||
#define SVE_MAGIC 0x53564501
|
#define SVE_MAGIC 0x53564501
|
||||||
@@ -318,19 +319,25 @@ struct sve_context {
|
|||||||
* The SVE architecture leaves space for future expansion of the
|
* The SVE architecture leaves space for future expansion of the
|
||||||
* vector length beyond its initial architectural limit of 2048 bits
|
* vector length beyond its initial architectural limit of 2048 bits
|
||||||
* (16 quadwords).
|
* (16 quadwords).
|
||||||
|
*
|
||||||
|
* See linux/Documentation/arm64/sve.txt for a description of the VL/VQ
|
||||||
|
* terminology.
|
||||||
*/
|
*/
|
||||||
#define SVE_VQ_MIN 1
|
#define SVE_VQ_BYTES 16 /* number of bytes per quadword */
|
||||||
#define SVE_VQ_MAX 0x200
|
|
||||||
|
|
||||||
#define SVE_VL_MIN (SVE_VQ_MIN * 0x10)
|
#define SVE_VQ_MIN 1
|
||||||
#define SVE_VL_MAX (SVE_VQ_MAX * 0x10)
|
#define SVE_VQ_MAX 512
|
||||||
|
|
||||||
|
#define SVE_VL_MIN (SVE_VQ_MIN * SVE_VQ_BYTES)
|
||||||
|
#define SVE_VL_MAX (SVE_VQ_MAX * SVE_VQ_BYTES)
|
||||||
|
|
||||||
#define SVE_NUM_ZREGS 32
|
#define SVE_NUM_ZREGS 32
|
||||||
#define SVE_NUM_PREGS 16
|
#define SVE_NUM_PREGS 16
|
||||||
|
|
||||||
#define sve_vl_valid(vl) \
|
#define sve_vl_valid(vl) \
|
||||||
((vl) % 0x10 == 0 && (vl) >= SVE_VL_MIN && (vl) <= SVE_VL_MAX)
|
((vl) % SVE_VQ_BYTES == 0 && (vl) >= SVE_VL_MIN && (vl) <= SVE_VL_MAX)
|
||||||
#define sve_vq_from_vl(vl) ((vl) / 0x10)
|
#define sve_vq_from_vl(vl) ((vl) / SVE_VQ_BYTES)
|
||||||
|
#define sve_vl_from_vq(vq) ((vq) * SVE_VQ_BYTES)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The total size of meaningful data in the SVE context in bytes,
|
* The total size of meaningful data in the SVE context in bytes,
|
||||||
@@ -365,11 +372,13 @@ struct sve_context {
|
|||||||
* Additional data might be appended in the future.
|
* Additional data might be appended in the future.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define SVE_SIG_ZREG_SIZE(vq) ((uint32_t)(vq) * 16)
|
#define SVE_SIG_ZREG_SIZE(vq) ((uint32_t)(vq) * SVE_VQ_BYTES)
|
||||||
#define SVE_SIG_PREG_SIZE(vq) ((uint32_t)(vq) * 2)
|
#define SVE_SIG_PREG_SIZE(vq) ((uint32_t)(vq) * (SVE_VQ_BYTES / 8))
|
||||||
#define SVE_SIG_FFR_SIZE(vq) SVE_SIG_PREG_SIZE(vq)
|
#define SVE_SIG_FFR_SIZE(vq) SVE_SIG_PREG_SIZE(vq)
|
||||||
|
|
||||||
#define SVE_SIG_REGS_OFFSET ((sizeof(struct sve_context) + 15) / 16 * 16)
|
#define SVE_SIG_REGS_OFFSET \
|
||||||
|
((sizeof(struct sve_context) + (SVE_VQ_BYTES - 1)) \
|
||||||
|
/ SVE_VQ_BYTES * SVE_VQ_BYTES)
|
||||||
|
|
||||||
#define SVE_SIG_ZREGS_OFFSET SVE_SIG_REGS_OFFSET
|
#define SVE_SIG_ZREGS_OFFSET SVE_SIG_REGS_OFFSET
|
||||||
#define SVE_SIG_ZREG_OFFSET(vq, n) \
|
#define SVE_SIG_ZREG_OFFSET(vq, n) \
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
/* thread_info.h COPYRIGHT FUJITSU LIMITED 2015-2018 */
|
/* thread_info.h COPYRIGHT FUJITSU LIMITED 2015-2019 */
|
||||||
#ifndef __HEADER_ARM64_COMMON_THREAD_INFO_H
|
#ifndef __HEADER_ARM64_COMMON_THREAD_INFO_H
|
||||||
#define __HEADER_ARM64_COMMON_THREAD_INFO_H
|
#define __HEADER_ARM64_COMMON_THREAD_INFO_H
|
||||||
|
|
||||||
@@ -46,9 +46,9 @@ struct thread_info {
|
|||||||
int cpu; /* cpu */
|
int cpu; /* cpu */
|
||||||
struct cpu_context cpu_context; /* kernel_context */
|
struct cpu_context cpu_context; /* kernel_context */
|
||||||
void *sve_state; /* SVE registers, if any */
|
void *sve_state; /* SVE registers, if any */
|
||||||
uint16_t sve_vl; /* SVE vector length */
|
unsigned int sve_vl; /* SVE vector length */
|
||||||
uint16_t sve_vl_onexec; /* SVE vl after next exec */
|
unsigned int sve_vl_onexec; /* SVE vl after next exec */
|
||||||
uint16_t sve_flags; /* SVE related flags */
|
unsigned long sve_flags; /* SVE related flags */
|
||||||
unsigned long fault_address; /* fault info */
|
unsigned long fault_address; /* fault info */
|
||||||
unsigned long fault_code; /* ESR_EL1 value */
|
unsigned long fault_code; /* ESR_EL1 value */
|
||||||
};
|
};
|
||||||
@@ -56,7 +56,7 @@ struct thread_info {
|
|||||||
/* Flags for sve_flags (intentionally defined to match the prctl flags) */
|
/* Flags for sve_flags (intentionally defined to match the prctl flags) */
|
||||||
|
|
||||||
/* Inherit sve_vl and sve_flags across execve(): */
|
/* Inherit sve_vl and sve_flags across execve(): */
|
||||||
#define THREAD_VL_INHERIT PR_SVE_SET_VL_INHERIT
|
#define THREAD_VL_INHERIT PR_SVE_VL_INHERIT
|
||||||
|
|
||||||
struct arm64_cpu_local_thread {
|
struct arm64_cpu_local_thread {
|
||||||
struct thread_info thread_info;
|
struct thread_info thread_info;
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
/* ptrace.c COPYRIGHT FUJITSU LIMITED 2016-2018 */
|
/* ptrace.c COPYRIGHT FUJITSU LIMITED 2016-2019 */
|
||||||
#include <errno.h>
|
#include <errno.h>
|
||||||
#include <debug-monitors.h>
|
#include <debug-monitors.h>
|
||||||
#include <hw_breakpoint.h>
|
#include <hw_breakpoint.h>
|
||||||
@@ -12,6 +12,7 @@
|
|||||||
#include <string.h>
|
#include <string.h>
|
||||||
#include <thread_info.h>
|
#include <thread_info.h>
|
||||||
#include <debug.h>
|
#include <debug.h>
|
||||||
|
#include <ptrace.h>
|
||||||
|
|
||||||
//#define DEBUG_PRINT_SC
|
//#define DEBUG_PRINT_SC
|
||||||
|
|
||||||
@@ -25,37 +26,6 @@
|
|||||||
extern void save_debugreg(unsigned long *debugreg);
|
extern void save_debugreg(unsigned long *debugreg);
|
||||||
extern int interrupt_from_user(void *);
|
extern int interrupt_from_user(void *);
|
||||||
|
|
||||||
enum aarch64_regset {
|
|
||||||
REGSET_GPR,
|
|
||||||
REGSET_FPR,
|
|
||||||
REGSET_TLS,
|
|
||||||
REGSET_HW_BREAK,
|
|
||||||
REGSET_HW_WATCH,
|
|
||||||
REGSET_SYSTEM_CALL,
|
|
||||||
#ifdef CONFIG_ARM64_SVE
|
|
||||||
REGSET_SVE,
|
|
||||||
#endif /* CONFIG_ARM64_SVE */
|
|
||||||
};
|
|
||||||
|
|
||||||
struct user_regset;
|
|
||||||
typedef long user_regset_get_fn(struct thread *target,
|
|
||||||
const struct user_regset *regset,
|
|
||||||
unsigned int pos, unsigned int count,
|
|
||||||
void *kbuf, void __user *ubuf);
|
|
||||||
|
|
||||||
typedef long user_regset_set_fn(struct thread *target,
|
|
||||||
const struct user_regset *regset,
|
|
||||||
unsigned int pos, unsigned int count,
|
|
||||||
const void *kbuf, const void __user *ubuf);
|
|
||||||
|
|
||||||
struct user_regset {
|
|
||||||
user_regset_get_fn *get;
|
|
||||||
user_regset_set_fn *set;
|
|
||||||
unsigned int n;
|
|
||||||
unsigned int size;
|
|
||||||
unsigned int core_note_type;
|
|
||||||
};
|
|
||||||
|
|
||||||
long ptrace_read_user(struct thread *thread, long addr, unsigned long *value)
|
long ptrace_read_user(struct thread *thread, long addr, unsigned long *value)
|
||||||
{
|
{
|
||||||
return -EIO;
|
return -EIO;
|
||||||
@@ -273,6 +243,17 @@ static inline long copy_regset_from_user(struct thread *target,
|
|||||||
return regset->set(target, regset, offset, size, NULL, data);
|
return regset->set(target, regset, offset, size, NULL, data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
unsigned int regset_size(struct thread *target,
|
||||||
|
const struct user_regset *regset)
|
||||||
|
{
|
||||||
|
if (!regset->get_size) {
|
||||||
|
return regset->n * regset->size;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
return regset->get_size(target, regset);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Bits which are always architecturally RES0 per ARM DDI 0487A.h
|
* Bits which are always architecturally RES0 per ARM DDI 0487A.h
|
||||||
* Userspace cannot use these until they have an architectural meaning.
|
* Userspace cannot use these until they have an architectural meaning.
|
||||||
@@ -624,6 +605,48 @@ out:
|
|||||||
|
|
||||||
#ifdef CONFIG_ARM64_SVE
|
#ifdef CONFIG_ARM64_SVE
|
||||||
|
|
||||||
|
static void sve_init_header_from_thread(struct user_sve_header *header,
|
||||||
|
struct thread *target)
|
||||||
|
{
|
||||||
|
unsigned int vq;
|
||||||
|
|
||||||
|
memset(header, 0, sizeof(*header));
|
||||||
|
|
||||||
|
/* McKernel processes always enable SVE. */
|
||||||
|
header->flags = SVE_PT_REGS_SVE;
|
||||||
|
|
||||||
|
if (target->ctx.thread->sve_flags & SVE_PT_VL_INHERIT) {
|
||||||
|
header->flags |= SVE_PT_VL_INHERIT;
|
||||||
|
}
|
||||||
|
|
||||||
|
header->vl = target->ctx.thread->sve_vl;
|
||||||
|
vq = sve_vq_from_vl(header->vl);
|
||||||
|
|
||||||
|
header->max_vl = sve_max_vl;
|
||||||
|
header->size = SVE_PT_SIZE(vq, header->flags);
|
||||||
|
header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl),
|
||||||
|
SVE_PT_REGS_SVE);
|
||||||
|
}
|
||||||
|
|
||||||
|
static unsigned int sve_size_from_header(struct user_sve_header const *header)
|
||||||
|
{
|
||||||
|
return ALIGN(header->size, SVE_VQ_BYTES);
|
||||||
|
}
|
||||||
|
|
||||||
|
static unsigned int sve_get_size(struct thread *target,
|
||||||
|
const struct user_regset *regset)
|
||||||
|
{
|
||||||
|
struct user_sve_header header;
|
||||||
|
|
||||||
|
/* Instead of system_supports_sve() */
|
||||||
|
if (unlikely(!(elf_hwcap & HWCAP_SVE))) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
sve_init_header_from_thread(&header, target);
|
||||||
|
return sve_size_from_header(&header);
|
||||||
|
}
|
||||||
|
|
||||||
/* read NT_ARM_SVE */
|
/* read NT_ARM_SVE */
|
||||||
static long sve_get(struct thread *target,
|
static long sve_get(struct thread *target,
|
||||||
const struct user_regset *regset,
|
const struct user_regset *regset,
|
||||||
@@ -646,23 +669,9 @@ static long sve_get(struct thread *target,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Header */
|
/* Header */
|
||||||
memset(&header, 0, sizeof(header));
|
sve_init_header_from_thread(&header, target);
|
||||||
|
|
||||||
header.vl = target->ctx.thread->sve_vl;
|
|
||||||
|
|
||||||
BUG_ON(!sve_vl_valid(header.vl));
|
|
||||||
vq = sve_vq_from_vl(header.vl);
|
vq = sve_vq_from_vl(header.vl);
|
||||||
|
|
||||||
BUG_ON(!sve_vl_valid(sve_max_vl));
|
|
||||||
header.max_vl = sve_max_vl;
|
|
||||||
|
|
||||||
/* McKernel processes always enable SVE. */
|
|
||||||
header.flags = SVE_PT_REGS_SVE;
|
|
||||||
|
|
||||||
header.size = SVE_PT_SIZE(vq, header.flags);
|
|
||||||
header.max_size = SVE_PT_SIZE(sve_vq_from_vl(header.max_vl),
|
|
||||||
SVE_PT_REGS_SVE);
|
|
||||||
|
|
||||||
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &header,
|
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &header,
|
||||||
0, sizeof(header));
|
0, sizeof(header));
|
||||||
if (ret) {
|
if (ret) {
|
||||||
@@ -676,11 +685,9 @@ static long sve_get(struct thread *target,
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
/* Otherwise: full SVE case */
|
/* Otherwise: full SVE case */
|
||||||
|
|
||||||
start = SVE_PT_SVE_OFFSET;
|
start = SVE_PT_SVE_OFFSET;
|
||||||
end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
|
end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
|
||||||
|
|
||||||
BUG_ON(end < start);
|
|
||||||
BUG_ON(end - start > sve_state_size(target));
|
|
||||||
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||||
target->ctx.thread->sve_state,
|
target->ctx.thread->sve_state,
|
||||||
start, end);
|
start, end);
|
||||||
@@ -690,24 +697,18 @@ static long sve_get(struct thread *target,
|
|||||||
|
|
||||||
start = end;
|
start = end;
|
||||||
end = SVE_PT_SVE_FPSR_OFFSET(vq);
|
end = SVE_PT_SVE_FPSR_OFFSET(vq);
|
||||||
|
|
||||||
BUG_ON(end < start);
|
|
||||||
ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
|
ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
|
||||||
start, end);
|
start, end);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Copy fpsr, and fpcr which must follow contiguously in
|
||||||
|
* struct fpsimd_state:
|
||||||
|
*/
|
||||||
start = end;
|
start = end;
|
||||||
end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
|
end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
|
||||||
|
|
||||||
BUG_ON((char *)(&target->fp_regs->fpcr + 1) <
|
|
||||||
(char *)&target->fp_regs->fpsr);
|
|
||||||
BUG_ON(end < start);
|
|
||||||
BUG_ON((char *)(&target->fp_regs->fpcr + 1) -
|
|
||||||
(char *)&target->fp_regs->fpsr !=
|
|
||||||
end - start);
|
|
||||||
|
|
||||||
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||||
&target->fp_regs->fpsr,
|
&target->fp_regs->fpsr,
|
||||||
start, end);
|
start, end);
|
||||||
@@ -716,9 +717,7 @@ static long sve_get(struct thread *target,
|
|||||||
}
|
}
|
||||||
|
|
||||||
start = end;
|
start = end;
|
||||||
end = (SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE) + 15) / 16 * 16;
|
end = sve_size_from_header(&header);
|
||||||
|
|
||||||
BUG_ON(end < start);
|
|
||||||
ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
|
ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
|
||||||
start, end);
|
start, end);
|
||||||
out:
|
out:
|
||||||
@@ -762,13 +761,12 @@ static long sve_set(struct thread *target,
|
|||||||
* sve_set_vector_length(), which will also validate them for us:
|
* sve_set_vector_length(), which will also validate them for us:
|
||||||
*/
|
*/
|
||||||
ret = sve_set_vector_length(target, header.vl,
|
ret = sve_set_vector_length(target, header.vl,
|
||||||
header.flags & ~SVE_PT_REGS_MASK);
|
((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Actual VL set may be less than the user asked for: */
|
/* Actual VL set may be less than the user asked for: */
|
||||||
BUG_ON(!sve_vl_valid(target->ctx.thread->sve_vl));
|
|
||||||
vq = sve_vq_from_vl(target->ctx.thread->sve_vl);
|
vq = sve_vq_from_vl(target->ctx.thread->sve_vl);
|
||||||
|
|
||||||
/* Registers: FPSIMD-only case */
|
/* Registers: FPSIMD-only case */
|
||||||
@@ -779,11 +777,19 @@ static long sve_set(struct thread *target,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Otherwise: full SVE case */
|
/* Otherwise: full SVE case */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If setting a different VL from the requested VL and there is
|
||||||
|
* register data, the data layout will be wrong: don't even
|
||||||
|
* try to set the registers in this case.
|
||||||
|
*/
|
||||||
|
if (count && vq != sve_vq_from_vl(header.vl)) {
|
||||||
|
ret = -EIO;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
start = SVE_PT_SVE_OFFSET;
|
start = SVE_PT_SVE_OFFSET;
|
||||||
end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
|
end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
|
||||||
|
|
||||||
BUG_ON(end < start);
|
|
||||||
BUG_ON(end - start > sve_state_size(target));
|
|
||||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||||
target->ctx.thread->sve_state,
|
target->ctx.thread->sve_state,
|
||||||
start, end);
|
start, end);
|
||||||
@@ -793,27 +799,21 @@ static long sve_set(struct thread *target,
|
|||||||
|
|
||||||
start = end;
|
start = end;
|
||||||
end = SVE_PT_SVE_FPSR_OFFSET(vq);
|
end = SVE_PT_SVE_FPSR_OFFSET(vq);
|
||||||
|
|
||||||
BUG_ON(end < start);
|
|
||||||
ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
|
ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
|
||||||
start, end);
|
start, end);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Copy fpsr, and fpcr which must follow contiguously in
|
||||||
|
* struct fpsimd_state:
|
||||||
|
*/
|
||||||
start = end;
|
start = end;
|
||||||
end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
|
end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
|
||||||
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||||
BUG_ON((char *)(&target->fp_regs->fpcr + 1) <
|
&target->fp_regs->fpsr,
|
||||||
(char *)&target->fp_regs->fpsr);
|
start, end);
|
||||||
BUG_ON(end < start);
|
|
||||||
BUG_ON((char *)(&target->fp_regs->fpcr + 1) -
|
|
||||||
(char *)&target->fp_regs->fpsr !=
|
|
||||||
end - start);
|
|
||||||
|
|
||||||
user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
||||||
&target->fp_regs->fpsr,
|
|
||||||
start, end);
|
|
||||||
out:
|
out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@@ -825,8 +825,9 @@ static const struct user_regset aarch64_regsets[] = {
|
|||||||
.core_note_type = NT_PRSTATUS,
|
.core_note_type = NT_PRSTATUS,
|
||||||
.n = sizeof(struct user_pt_regs) / sizeof(uint64_t),
|
.n = sizeof(struct user_pt_regs) / sizeof(uint64_t),
|
||||||
.size = sizeof(uint64_t),
|
.size = sizeof(uint64_t),
|
||||||
|
.align = sizeof(uint64_t),
|
||||||
.get = gpr_get,
|
.get = gpr_get,
|
||||||
.set = gpr_set
|
.set = gpr_set,
|
||||||
},
|
},
|
||||||
[REGSET_FPR] = {
|
[REGSET_FPR] = {
|
||||||
.core_note_type = NT_PRFPREG,
|
.core_note_type = NT_PRFPREG,
|
||||||
@@ -836,56 +837,75 @@ static const struct user_regset aarch64_regsets[] = {
|
|||||||
* fpcr are 32-bits wide.
|
* fpcr are 32-bits wide.
|
||||||
*/
|
*/
|
||||||
.size = sizeof(uint32_t),
|
.size = sizeof(uint32_t),
|
||||||
|
.align = sizeof(uint32_t),
|
||||||
.get = fpr_get,
|
.get = fpr_get,
|
||||||
.set = fpr_set
|
.set = fpr_set,
|
||||||
},
|
},
|
||||||
[REGSET_TLS] = {
|
[REGSET_TLS] = {
|
||||||
.core_note_type = NT_ARM_TLS,
|
.core_note_type = NT_ARM_TLS,
|
||||||
.n = 1,
|
.n = 1,
|
||||||
.size = sizeof(void *),
|
.size = sizeof(void *),
|
||||||
|
.align = sizeof(void *),
|
||||||
.get = tls_get,
|
.get = tls_get,
|
||||||
.set = tls_set
|
.set = tls_set,
|
||||||
},
|
},
|
||||||
[REGSET_HW_BREAK] = {
|
[REGSET_HW_BREAK] = {
|
||||||
.core_note_type = NT_ARM_HW_BREAK,
|
.core_note_type = NT_ARM_HW_BREAK,
|
||||||
.n = sizeof(struct user_hwdebug_state) / sizeof(uint32_t),
|
.n = sizeof(struct user_hwdebug_state) / sizeof(uint32_t),
|
||||||
.size = sizeof(uint32_t),
|
.size = sizeof(uint32_t),
|
||||||
|
.align = sizeof(uint32_t),
|
||||||
.get = hw_break_get,
|
.get = hw_break_get,
|
||||||
.set = hw_break_set
|
.set = hw_break_set,
|
||||||
},
|
},
|
||||||
[REGSET_HW_WATCH] = {
|
[REGSET_HW_WATCH] = {
|
||||||
.core_note_type = NT_ARM_HW_WATCH,
|
.core_note_type = NT_ARM_HW_WATCH,
|
||||||
.n = sizeof(struct user_hwdebug_state) / sizeof(uint32_t),
|
.n = sizeof(struct user_hwdebug_state) / sizeof(uint32_t),
|
||||||
.size = sizeof(uint32_t),
|
.size = sizeof(uint32_t),
|
||||||
|
.align = sizeof(uint32_t),
|
||||||
.get = hw_break_get,
|
.get = hw_break_get,
|
||||||
.set = hw_break_set
|
.set = hw_break_set,
|
||||||
},
|
},
|
||||||
[REGSET_SYSTEM_CALL] = {
|
[REGSET_SYSTEM_CALL] = {
|
||||||
.core_note_type = NT_ARM_SYSTEM_CALL,
|
.core_note_type = NT_ARM_SYSTEM_CALL,
|
||||||
.n = 1,
|
.n = 1,
|
||||||
.size = sizeof(int),
|
.size = sizeof(int),
|
||||||
|
.align = sizeof(int),
|
||||||
.get = system_call_get,
|
.get = system_call_get,
|
||||||
.set = system_call_set
|
.set = system_call_set,
|
||||||
},
|
},
|
||||||
#ifdef CONFIG_ARM64_SVE
|
#ifdef CONFIG_ARM64_SVE
|
||||||
[REGSET_SVE] = { /* Scalable Vector Extension */
|
[REGSET_SVE] = { /* Scalable Vector Extension */
|
||||||
.core_note_type = NT_ARM_SVE,
|
.core_note_type = NT_ARM_SVE,
|
||||||
.n = (SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE) + 15) / 16,
|
.n = (SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE) +
|
||||||
.size = 16,
|
(SVE_VQ_BYTES - 1)) / SVE_VQ_BYTES,
|
||||||
|
.size = SVE_VQ_BYTES,
|
||||||
|
.align = SVE_VQ_BYTES,
|
||||||
.get = sve_get,
|
.get = sve_get,
|
||||||
.set = sve_set
|
.set = sve_set,
|
||||||
|
.get_size = sve_get_size,
|
||||||
},
|
},
|
||||||
#endif /* CONFIG_ARM64_SVE */
|
#endif /* CONFIG_ARM64_SVE */
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct user_regset *
|
static const struct user_regset_view user_aarch64_view = {
|
||||||
find_regset(const struct user_regset *regset, unsigned int type, int n)
|
.name = "aarch64", .e_machine = EM_AARCH64,
|
||||||
|
.regsets = aarch64_regsets,
|
||||||
|
.n = sizeof(aarch64_regsets) / sizeof(aarch64_regsets[0])
|
||||||
|
};
|
||||||
|
|
||||||
|
const struct user_regset_view *current_user_regset_view(void)
|
||||||
|
{
|
||||||
|
return &user_aarch64_view;
|
||||||
|
}
|
||||||
|
|
||||||
|
const struct user_regset *find_regset(const struct user_regset_view *view,
|
||||||
|
unsigned int type)
|
||||||
{
|
{
|
||||||
int i = 0;
|
int i = 0;
|
||||||
|
|
||||||
for (i = 0; i < n; i++) {
|
for (i = 0; i < view->n; i++) {
|
||||||
if (regset[i].core_note_type == type) {
|
if (view->regsets[i].core_note_type == type) {
|
||||||
return ®set[i];
|
return &view->regsets[i];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return NULL;
|
return NULL;
|
||||||
@@ -894,8 +914,8 @@ find_regset(const struct user_regset *regset, unsigned int type, int n)
|
|||||||
static long ptrace_regset(struct thread *thread, int req, long type, struct iovec *iov)
|
static long ptrace_regset(struct thread *thread, int req, long type, struct iovec *iov)
|
||||||
{
|
{
|
||||||
long rc = -EINVAL;
|
long rc = -EINVAL;
|
||||||
const struct user_regset *regset = find_regset(aarch64_regsets, type,
|
const struct user_regset *regset =
|
||||||
sizeof(aarch64_regsets) / sizeof(aarch64_regsets[0]));
|
find_regset(&user_aarch64_view, type);
|
||||||
|
|
||||||
if (!regset) {
|
if (!regset) {
|
||||||
kprintf("%s: not supported type 0x%x\n", __FUNCTION__, type);
|
kprintf("%s: not supported type 0x%x\n", __FUNCTION__, type);
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
/* syscall.c COPYRIGHT FUJITSU LIMITED 2015-2018 */
|
/* syscall.c COPYRIGHT FUJITSU LIMITED 2015-2019 */
|
||||||
#include <cpulocal.h>
|
#include <cpulocal.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
#include <kmalloc.h>
|
#include <kmalloc.h>
|
||||||
@@ -178,11 +178,10 @@ SYSCALL_DECLARE(prctl)
|
|||||||
|
|
||||||
switch (option) {
|
switch (option) {
|
||||||
case PR_SVE_SET_VL:
|
case PR_SVE_SET_VL:
|
||||||
error = SVE_SET_VL(cpu_local_var(current),
|
error = SVE_SET_VL(ihk_mc_syscall_arg1(ctx));
|
||||||
ihk_mc_syscall_arg1(ctx), ihk_mc_syscall_arg2(ctx));
|
|
||||||
break;
|
break;
|
||||||
case PR_SVE_GET_VL:
|
case PR_SVE_GET_VL:
|
||||||
error = SVE_GET_VL(cpu_local_var(current));
|
error = SVE_GET_VL();
|
||||||
break;
|
break;
|
||||||
case PR_SET_THP_DISABLE:
|
case PR_SET_THP_DISABLE:
|
||||||
if (arg3 || arg4 || arg5) {
|
if (arg3 || arg4 || arg5) {
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
/* coredump.c COPYRIGHT FUJITSU LIMITED 2018 */
|
/* coredump.c COPYRIGHT FUJITSU LIMITED 2018-2019 */
|
||||||
#include <process.h>
|
#include <process.h>
|
||||||
#include <elfcore.h>
|
#include <elfcore.h>
|
||||||
|
|
||||||
@@ -55,3 +55,13 @@ void arch_fill_prstatus(struct elf_prstatus64 *prstatus, struct thread *thread,
|
|||||||
|
|
||||||
prstatus->pr_fpvalid = 0; /* We assume no fp */
|
prstatus->pr_fpvalid = 0; /* We assume no fp */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void arch_fill_thread_core_info(struct note *head,
|
||||||
|
struct thread *thread, void *regs)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
int arch_get_thread_core_info_size(void)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
/* arch-eclair.h COPYRIGHT FUJITSU LIMITED 2016-2018 */
|
/* arch-eclair.h COPYRIGHT FUJITSU LIMITED 2016-2019 */
|
||||||
#ifndef HEADER_USER_ARM64_ECLAIR_H
|
#ifndef HEADER_USER_ARM64_ECLAIR_H
|
||||||
#define HEADER_USER_ARM64_ECLAIR_H
|
#define HEADER_USER_ARM64_ECLAIR_H
|
||||||
|
|
||||||
@@ -50,7 +50,7 @@
|
|||||||
|
|
||||||
#define ARCH_REGS 34
|
#define ARCH_REGS 34
|
||||||
|
|
||||||
#define PANIC_REGS_OFFSET 160
|
#define PANIC_REGS_OFFSET 168
|
||||||
|
|
||||||
struct arch_kregs {
|
struct arch_kregs {
|
||||||
unsigned long x19, x20, x21, x22, x23;
|
unsigned long x19, x20, x21, x22, x23;
|
||||||
|
|||||||
@@ -183,8 +183,8 @@ void fill_auxv(struct note *head, struct thread *thread, void *regs)
|
|||||||
|
|
||||||
int get_note_size(void)
|
int get_note_size(void)
|
||||||
{
|
{
|
||||||
return get_prstatus_size() + get_prpsinfo_size()
|
return get_prstatus_size() + arch_get_thread_core_info_size()
|
||||||
+ get_auxv_size();
|
+ get_prpsinfo_size() + get_auxv_size();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -199,8 +199,13 @@ void fill_note(void *note, struct thread *thread, void *regs)
|
|||||||
{
|
{
|
||||||
fill_prstatus(note, thread, regs);
|
fill_prstatus(note, thread, regs);
|
||||||
note += get_prstatus_size();
|
note += get_prstatus_size();
|
||||||
|
|
||||||
|
arch_fill_thread_core_info(note, thread, regs);
|
||||||
|
note += arch_get_thread_core_info_size();
|
||||||
|
|
||||||
fill_prpsinfo(note, thread, regs);
|
fill_prpsinfo(note, thread, regs);
|
||||||
note += get_prpsinfo_size();
|
note += get_prpsinfo_size();
|
||||||
|
|
||||||
fill_auxv(note, thread, regs);
|
fill_auxv(note, thread, regs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
/* elfcore.h COPYRIGHT FUJITSU LIMITED 2015-2016 */
|
/* elfcore.h COPYRIGHT FUJITSU LIMITED 2015-2019 */
|
||||||
#ifndef __HEADER_ELFCORE_H
|
#ifndef __HEADER_ELFCORE_H
|
||||||
#define __HEADER_ELFCORE_H
|
#define __HEADER_ELFCORE_H
|
||||||
|
|
||||||
@@ -109,5 +109,8 @@ struct note {
|
|||||||
/* functions */
|
/* functions */
|
||||||
struct thread;
|
struct thread;
|
||||||
extern void arch_fill_prstatus(struct elf_prstatus64 *prstatus, struct thread *thread, void *regs0);
|
extern void arch_fill_prstatus(struct elf_prstatus64 *prstatus, struct thread *thread, void *regs0);
|
||||||
|
extern int arch_get_thread_core_info_size(void);
|
||||||
|
extern void arch_fill_thread_core_info(struct note *head,
|
||||||
|
struct thread *thread, void *regs);
|
||||||
|
|
||||||
#endif /* __HEADER_ELFCORE_H */
|
#endif /* __HEADER_ELFCORE_H */
|
||||||
|
|||||||
Reference in New Issue
Block a user