perf/x86/intel: Avoid disable PMU if !cpuc->enabled in sample read

commit f9bdf1f953392c9edd69a7f884f78c0390127029 upstream.

The WARN_ON(this_cpu_read(cpu_hw_events.enabled)) in the
intel_pmu_save_and_restart_reload() is triggered, when sampling read
topdown events.

In a NMI handler, the cpu_hw_events.enabled is set and used to indicate
the status of core PMU. The generic pmu->pmu_disable_count, updated in
the perf_pmu_disable/enable pair, is not touched.
However, the perf_pmu_disable/enable pair is invoked when sampling read
in a NMI handler. The cpuc->enabled is mistakenly set by the
perf_pmu_enable().

Avoid disabling PMU if the core PMU is already disabled.
Merge the logic together.

Fixes: 7b2c05a15d ("perf/x86/intel: Generic support for hardware TopDown metrics")
Suggested-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: stable@vger.kernel.org
Link: https://lkml.kernel.org/r/20250121152303.3128733-2-kan.liang@linux.intel.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Kan Liang 2025-01-21 07:23:01 -08:00 committed by Greg Kroah-Hartman
parent 0a416b4c53
commit 781b2db0eb
3 changed files with 27 additions and 31 deletions

View File

@ -2689,28 +2689,33 @@ static u64 adl_update_topdown_event(struct perf_event *event)
DEFINE_STATIC_CALL(intel_pmu_update_topdown_event, x86_perf_event_update);
static void intel_pmu_read_topdown_event(struct perf_event *event)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
/* Only need to call update_topdown_event() once for group read. */
if ((cpuc->txn_flags & PERF_PMU_TXN_READ) &&
!is_slots_event(event))
return;
perf_pmu_disable(event->pmu);
static_call(intel_pmu_update_topdown_event)(event);
perf_pmu_enable(event->pmu);
}
static void intel_pmu_read_event(struct perf_event *event)
{
if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
intel_pmu_auto_reload_read(event);
else if (is_topdown_count(event))
intel_pmu_read_topdown_event(event);
else
x86_perf_event_update(event);
if (event->hw.flags & (PERF_X86_EVENT_AUTO_RELOAD | PERF_X86_EVENT_TOPDOWN)) {
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
bool pmu_enabled = cpuc->enabled;
/* Only need to call update_topdown_event() once for group read. */
if (is_metric_event(event) && (cpuc->txn_flags & PERF_PMU_TXN_READ))
return;
cpuc->enabled = 0;
if (pmu_enabled)
intel_pmu_disable_all();
if (is_topdown_event(event))
static_call(intel_pmu_update_topdown_event)(event);
else
intel_pmu_drain_pebs_buffer();
cpuc->enabled = pmu_enabled;
if (pmu_enabled)
intel_pmu_enable_all(0);
return;
}
x86_perf_event_update(event);
}
static void intel_pmu_enable_fixed(struct perf_event *event)

View File

@ -789,7 +789,7 @@ unlock:
return 1;
}
static inline void intel_pmu_drain_pebs_buffer(void)
void intel_pmu_drain_pebs_buffer(void)
{
struct perf_sample_data data;
@ -1902,15 +1902,6 @@ get_next_pebs_record_by_bit(void *base, void *top, int bit)
return NULL;
}
void intel_pmu_auto_reload_read(struct perf_event *event)
{
WARN_ON(!(event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD));
perf_pmu_disable(event->pmu);
intel_pmu_drain_pebs_buffer();
perf_pmu_enable(event->pmu);
}
/*
* Special variant of intel_pmu_save_and_restart() for auto-reload.
*/

View File

@ -1536,7 +1536,7 @@ void intel_pmu_pebs_disable_all(void);
void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in);
void intel_pmu_auto_reload_read(struct perf_event *event);
void intel_pmu_drain_pebs_buffer(void);
void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr);