perf: add struct hw_perf_event

Change-Id: I0938e2b18064ad805a9edb6e15d26cf438bf0a59
This commit is contained in:
TOIDA,Suguru
2019-11-19 09:31:49 +09:00
parent 767792808a
commit 86c45484e3
5 changed files with 81 additions and 0 deletions

View File

@@ -215,3 +215,20 @@ int ihk_mc_perfctr_set_extra(struct mc_perf_event *event)
/* Nothing to do. */
return 0;
}
static inline uint64_t arm_pmu_event_max_period(struct mc_perf_event *event)
{
return 0xFFFFFFFF;
}
int hw_perf_event_init(struct mc_perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
if (!is_sampling_event(event)) {
hwc->sample_period = arm_pmu_event_max_period(event) >> 1;
hwc->last_period = hwc->sample_period;
ihk_atomic64_set(&hwc->period_left, hwc->sample_period);
}
return 0;
}

View File

@@ -503,3 +503,8 @@ int ihk_mc_perf_get_num_counters(void)
{
return NUM_PERF_COUNTERS;
}
int hw_perf_event_init(struct mc_perf_event *event)
{
return 0;
}

View File

@@ -4519,6 +4519,7 @@ static int mc_perf_event_alloc(struct mc_perf_event **out,
unsigned long val = 0, extra_config = 0;
struct mc_perf_event *event = NULL;
int ereg_id;
struct hw_perf_event *hwc;
if (!attr) {
ret = -EINVAL;
@@ -4542,6 +4543,21 @@ static int mc_perf_event_alloc(struct mc_perf_event **out,
event->child_count_total = 0;
event->parent = NULL;
hwc = &event->hw;
hwc->sample_period = attr->sample_period;
if (attr->freq && attr->sample_freq) {
/*
* Mark struct perf_event_attr::sample_freq is set by user.
* Note that it's okay to use
* struct hw_perf_event::sample_period for this purpose
* because it's not union and not used when
* struct perf_event_attr::freq is one.
*/
hwc->sample_period = 1;
}
hwc->last_period = hwc->sample_period;
ihk_atomic64_set(&hwc->period_left, hwc->sample_period);
if (attr->type == PERF_TYPE_HARDWARE &&
attr->config == PERF_COUNT_HW_REF_CPU_CYCLES) {
event->use_invariant_tsc = 1;
@@ -4588,6 +4604,8 @@ static int mc_perf_event_alloc(struct mc_perf_event **out,
event->extra_reg.idx = ihk_mc_get_extra_reg_idx(ereg_id);
}
ret = hw_perf_event_init(event);
*out = event;
out:
@@ -4646,6 +4664,14 @@ SYSCALL_DECLARE(perf_event_open)
not_supported_flag = 1;
}
if (attr->freq) {
not_supported_flag = 1;
} else {
if (attr->sample_period & (1ULL << 63)) {
return -EINVAL;
}
}
if (not_supported_flag) {
return -ENOENT;
}

View File

@@ -17,6 +17,7 @@
#include <list.h>
#include <ihk/context.h>
#include <arch/cpu.h>
#include <mc_perf_event.h>
extern int num_processors;
@@ -89,6 +90,12 @@ unsigned long ihk_mc_hw_cache_event_map(unsigned long hw_cache_event);
unsigned long ihk_mc_hw_cache_extra_reg_map(unsigned long hw_cache_event);
unsigned long ihk_mc_raw_event_map(unsigned long raw_event);
int ihk_mc_validate_event(unsigned long hw_config);
int hw_perf_event_init(struct mc_perf_event *event);
static inline int is_sampling_event(struct mc_perf_event *event)
{
return event->attr.sample_period != 0;
}
/* returns the 'prev' argument of the call that caused the switch to the context returned. */
void *ihk_mc_switch_context(ihk_mc_kernel_context_t *old_ctx,

View File

@@ -257,6 +257,31 @@ struct hw_perf_event_extra {
int idx;
};
struct hw_perf_event {
/*
* The last observed hardware counter value, updated with a
* local64_cmpxchg() such that pmu::read() can be called nested.
*/
ihk_atomic64_t prev_count;
/*
* The period to start the next sample with.
*/
uint64_t sample_period;
/*
* The period we started this sample with.
*/
uint64_t last_period;
/*
* However much is left of the current period; note that this is
* a full 64bit value and allows for generation of periods longer
* than hardware might allow.
*/
ihk_atomic64_t period_left;
};
/**
* enum perf_event_state - the states of an event:
*/
@@ -291,6 +316,7 @@ struct mc_perf_event {
long long base_system_tsc;
long long stopped_system_tsc;
long long system_accum_count;
struct hw_perf_event hw;
};
struct perf_event_mmap_page {