From 343121c3d0b223e9933e4b52e603cadc99331196 Mon Sep 17 00:00:00 2001 From: "TOIDA,Suguru" Date: Tue, 19 Nov 2019 14:18:17 +0900 Subject: [PATCH] perf: set event period Change-Id: Ibf569de7af8697e766c10b8d70905b8cdc4df083 --- arch/arm64/kernel/perfctr.c | 40 ++++++++++++++++++++++++++++ arch/arm64/kernel/perfctr_armv8pmu.c | 6 +++++ arch/x86_64/kernel/perfctr.c | 5 ++++ kernel/syscall.c | 2 ++ lib/include/ihk/cpu.h | 1 + 5 files changed, 54 insertions(+) diff --git a/arch/arm64/kernel/perfctr.c b/arch/arm64/kernel/perfctr.c index 128f15f2..2ffc4222 100644 --- a/arch/arm64/kernel/perfctr.c +++ b/arch/arm64/kernel/perfctr.c @@ -232,3 +232,43 @@ int hw_perf_event_init(struct mc_perf_event *event) } return 0; } + +int ihk_mc_event_set_period(struct mc_perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + int64_t left = ihk_atomic64_read(&hwc->period_left); + int64_t period = hwc->sample_period; + uint64_t max_period; + int ret = 0; + + max_period = arm_pmu_event_max_period(event); + if (unlikely(left <= -period)) { + left = period; + ihk_atomic64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + if (unlikely(left <= 0)) { + left += period; + ihk_atomic64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + /* + * Limit the maximum period to prevent the counter value + * from overtaking the one we are about to program. In + * effect we are reducing max_period to account for + * interrupt latency (and we are being very conservative). + */ + if (left > (max_period >> 1)) + left = (max_period >> 1); + + ihk_atomic64_set(&hwc->prev_count, (uint64_t)-left); + + cpu_pmu.write_counter(event->counter_id, + (uint64_t)(-left) & max_period); + + return ret; +} diff --git a/arch/arm64/kernel/perfctr_armv8pmu.c b/arch/arm64/kernel/perfctr_armv8pmu.c index 833e41de..a0973ca9 100644 --- a/arch/arm64/kernel/perfctr_armv8pmu.c +++ b/arch/arm64/kernel/perfctr_armv8pmu.c @@ -788,6 +788,7 @@ static void armv8pmu_handle_irq(void *priv) long irqstate; struct mckfd *fdp; struct pt_regs *regs = (struct pt_regs *)priv; + struct mc_perf_event *event = NULL; /* * Get and reset the IRQ flags @@ -821,6 +822,11 @@ static void armv8pmu_handle_irq(void *priv) else { set_signal(SIGIO, regs, NULL); } + + if (event) { + ihk_mc_event_set_period(event); + } + return; } static void armv8pmu_enable_user_access_pmu_regs(void) diff --git a/arch/x86_64/kernel/perfctr.c b/arch/x86_64/kernel/perfctr.c index f19f4560..5d668cb0 100644 --- a/arch/x86_64/kernel/perfctr.c +++ b/arch/x86_64/kernel/perfctr.c @@ -508,3 +508,8 @@ int hw_perf_event_init(struct mc_perf_event *event) { return 0; } + +int ihk_mc_event_set_period(struct mc_perf_event *event) +{ + return 0; +} diff --git a/kernel/syscall.c b/kernel/syscall.c index af879a08..d70f2e8e 100644 --- a/kernel/syscall.c +++ b/kernel/syscall.c @@ -4068,6 +4068,7 @@ void perf_start(struct mc_perf_event *event) leader->base_system_tsc = thread->system_tsc; } else { + ihk_mc_event_set_period(leader); perf_counter_set(leader); counter_mask |= 1UL << counter_id; } @@ -4097,6 +4098,7 @@ void perf_start(struct mc_perf_event *event) sub->base_system_tsc = thread->system_tsc; } else { + ihk_mc_event_set_period(sub); perf_counter_set(sub); counter_mask |= 1UL << counter_id; } diff --git a/lib/include/ihk/cpu.h b/lib/include/ihk/cpu.h index a242d058..7a47505f 100644 --- a/lib/include/ihk/cpu.h +++ b/lib/include/ihk/cpu.h @@ -91,6 +91,7 @@ unsigned long ihk_mc_hw_cache_extra_reg_map(unsigned long hw_cache_event); unsigned long ihk_mc_raw_event_map(unsigned long raw_event); int ihk_mc_validate_event(unsigned long hw_config); int hw_perf_event_init(struct mc_perf_event *event); +int ihk_mc_event_set_period(struct mc_perf_event *event); static inline int is_sampling_event(struct mc_perf_event *event) {