From 86c45484e36d75147eba783c6890ff14a9d99f09 Mon Sep 17 00:00:00 2001 From: "TOIDA,Suguru" Date: Tue, 19 Nov 2019 09:31:49 +0900 Subject: [PATCH] perf: add struct hw_perf_event Change-Id: I0938e2b18064ad805a9edb6e15d26cf438bf0a59 --- arch/arm64/kernel/perfctr.c | 17 +++++++++++++++++ arch/x86_64/kernel/perfctr.c | 5 +++++ kernel/syscall.c | 26 ++++++++++++++++++++++++++ lib/include/ihk/cpu.h | 7 +++++++ lib/include/mc_perf_event.h | 26 ++++++++++++++++++++++++++ 5 files changed, 81 insertions(+) diff --git a/arch/arm64/kernel/perfctr.c b/arch/arm64/kernel/perfctr.c index 85efe8c2..128f15f2 100644 --- a/arch/arm64/kernel/perfctr.c +++ b/arch/arm64/kernel/perfctr.c @@ -215,3 +215,20 @@ int ihk_mc_perfctr_set_extra(struct mc_perf_event *event) /* Nothing to do. */ return 0; } + +static inline uint64_t arm_pmu_event_max_period(struct mc_perf_event *event) +{ + return 0xFFFFFFFF; +} + +int hw_perf_event_init(struct mc_perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + if (!is_sampling_event(event)) { + hwc->sample_period = arm_pmu_event_max_period(event) >> 1; + hwc->last_period = hwc->sample_period; + ihk_atomic64_set(&hwc->period_left, hwc->sample_period); + } + return 0; +} diff --git a/arch/x86_64/kernel/perfctr.c b/arch/x86_64/kernel/perfctr.c index 22da38f1..f19f4560 100644 --- a/arch/x86_64/kernel/perfctr.c +++ b/arch/x86_64/kernel/perfctr.c @@ -503,3 +503,8 @@ int ihk_mc_perf_get_num_counters(void) { return NUM_PERF_COUNTERS; } + +int hw_perf_event_init(struct mc_perf_event *event) +{ + return 0; +} diff --git a/kernel/syscall.c b/kernel/syscall.c index 2420fd0d..af879a08 100644 --- a/kernel/syscall.c +++ b/kernel/syscall.c @@ -4519,6 +4519,7 @@ static int mc_perf_event_alloc(struct mc_perf_event **out, unsigned long val = 0, extra_config = 0; struct mc_perf_event *event = NULL; int ereg_id; + struct hw_perf_event *hwc; if (!attr) { ret = -EINVAL; @@ -4542,6 +4543,21 @@ static int mc_perf_event_alloc(struct mc_perf_event **out, event->child_count_total = 0; event->parent = NULL; + hwc = &event->hw; + hwc->sample_period = attr->sample_period; + if (attr->freq && attr->sample_freq) { + /* + * Mark struct perf_event_attr::sample_freq is set by user. + * Note that it's okay to use + * struct hw_perf_event::sample_period for this purpose + * because it's not union and not used when + * struct perf_event_attr::freq is one. + */ + hwc->sample_period = 1; + } + hwc->last_period = hwc->sample_period; + ihk_atomic64_set(&hwc->period_left, hwc->sample_period); + if (attr->type == PERF_TYPE_HARDWARE && attr->config == PERF_COUNT_HW_REF_CPU_CYCLES) { event->use_invariant_tsc = 1; @@ -4588,6 +4604,8 @@ static int mc_perf_event_alloc(struct mc_perf_event **out, event->extra_reg.idx = ihk_mc_get_extra_reg_idx(ereg_id); } + ret = hw_perf_event_init(event); + *out = event; out: @@ -4646,6 +4664,14 @@ SYSCALL_DECLARE(perf_event_open) not_supported_flag = 1; } + if (attr->freq) { + not_supported_flag = 1; + } else { + if (attr->sample_period & (1ULL << 63)) { + return -EINVAL; + } + } + if (not_supported_flag) { return -ENOENT; } diff --git a/lib/include/ihk/cpu.h b/lib/include/ihk/cpu.h index 40581315..a242d058 100644 --- a/lib/include/ihk/cpu.h +++ b/lib/include/ihk/cpu.h @@ -17,6 +17,7 @@ #include #include #include +#include extern int num_processors; @@ -89,6 +90,12 @@ unsigned long ihk_mc_hw_cache_event_map(unsigned long hw_cache_event); unsigned long ihk_mc_hw_cache_extra_reg_map(unsigned long hw_cache_event); unsigned long ihk_mc_raw_event_map(unsigned long raw_event); int ihk_mc_validate_event(unsigned long hw_config); +int hw_perf_event_init(struct mc_perf_event *event); + +static inline int is_sampling_event(struct mc_perf_event *event) +{ + return event->attr.sample_period != 0; +} /* returns the 'prev' argument of the call that caused the switch to the context returned. */ void *ihk_mc_switch_context(ihk_mc_kernel_context_t *old_ctx, diff --git a/lib/include/mc_perf_event.h b/lib/include/mc_perf_event.h index 5a101343..6ff0836d 100644 --- a/lib/include/mc_perf_event.h +++ b/lib/include/mc_perf_event.h @@ -257,6 +257,31 @@ struct hw_perf_event_extra { int idx; }; +struct hw_perf_event { + /* + * The last observed hardware counter value, updated with a + * local64_cmpxchg() such that pmu::read() can be called nested. + */ + ihk_atomic64_t prev_count; + + /* + * The period to start the next sample with. + */ + uint64_t sample_period; + + /* + * The period we started this sample with. + */ + uint64_t last_period; + + /* + * However much is left of the current period; note that this is + * a full 64bit value and allows for generation of periods longer + * than hardware might allow. + */ + ihk_atomic64_t period_left; +}; + /** * enum perf_event_state - the states of an event: */ @@ -291,6 +316,7 @@ struct mc_perf_event { long long base_system_tsc; long long stopped_system_tsc; long long system_accum_count; + struct hw_perf_event hw; }; struct perf_event_mmap_page {