dect
/
linux-2.6
Archived
13
0
Fork 0

oprofile, perf: Use per-cpu framework

This changes oprofile_perf.c to use the per-cpu framework.

Using the per-cpu framework should avoid error like the following:

 arch/arm/oprofile/../../../drivers/oprofile/oprofile_perf.c:28:28: error: variably modified 'perf_events' at file scope

Reported-by: William Cohen <wcohen@redhat.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Robert Richter <robert.richter@amd.com>
This commit is contained in:
Robert Richter 2012-02-23 17:07:06 +01:00
parent e734568b67
commit f8bbfd7d28
1 changed files with 11 additions and 12 deletions

View File

@ -1,5 +1,6 @@
/* /*
* Copyright 2010 ARM Ltd. * Copyright 2010 ARM Ltd.
* Copyright 2012 Advanced Micro Devices, Inc., Robert Richter
* *
* Perf-events backend for OProfile. * Perf-events backend for OProfile.
*/ */
@ -25,7 +26,7 @@ static int oprofile_perf_enabled;
static DEFINE_MUTEX(oprofile_perf_mutex); static DEFINE_MUTEX(oprofile_perf_mutex);
static struct op_counter_config *counter_config; static struct op_counter_config *counter_config;
static struct perf_event **perf_events[NR_CPUS]; static DEFINE_PER_CPU(struct perf_event **, perf_events);
static int num_counters; static int num_counters;
/* /*
@ -38,7 +39,7 @@ static void op_overflow_handler(struct perf_event *event,
u32 cpu = smp_processor_id(); u32 cpu = smp_processor_id();
for (id = 0; id < num_counters; ++id) for (id = 0; id < num_counters; ++id)
if (perf_events[cpu][id] == event) if (per_cpu(perf_events, cpu)[id] == event)
break; break;
if (id != num_counters) if (id != num_counters)
@ -74,7 +75,7 @@ static int op_create_counter(int cpu, int event)
{ {
struct perf_event *pevent; struct perf_event *pevent;
if (!counter_config[event].enabled || perf_events[cpu][event]) if (!counter_config[event].enabled || per_cpu(perf_events, cpu)[event])
return 0; return 0;
pevent = perf_event_create_kernel_counter(&counter_config[event].attr, pevent = perf_event_create_kernel_counter(&counter_config[event].attr,
@ -91,18 +92,18 @@ static int op_create_counter(int cpu, int event)
return -EBUSY; return -EBUSY;
} }
perf_events[cpu][event] = pevent; per_cpu(perf_events, cpu)[event] = pevent;
return 0; return 0;
} }
static void op_destroy_counter(int cpu, int event) static void op_destroy_counter(int cpu, int event)
{ {
struct perf_event *pevent = perf_events[cpu][event]; struct perf_event *pevent = per_cpu(perf_events, cpu)[event];
if (pevent) { if (pevent) {
perf_event_release_kernel(pevent); perf_event_release_kernel(pevent);
perf_events[cpu][event] = NULL; per_cpu(perf_events, cpu)[event] = NULL;
} }
} }
@ -257,12 +258,12 @@ void oprofile_perf_exit(void)
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
for (id = 0; id < num_counters; ++id) { for (id = 0; id < num_counters; ++id) {
event = perf_events[cpu][id]; event = per_cpu(perf_events, cpu)[id];
if (event) if (event)
perf_event_release_kernel(event); perf_event_release_kernel(event);
} }
kfree(perf_events[cpu]); kfree(per_cpu(perf_events, cpu));
} }
kfree(counter_config); kfree(counter_config);
@ -277,8 +278,6 @@ int __init oprofile_perf_init(struct oprofile_operations *ops)
if (ret) if (ret)
return ret; return ret;
memset(&perf_events, 0, sizeof(perf_events));
num_counters = perf_num_counters(); num_counters = perf_num_counters();
if (num_counters <= 0) { if (num_counters <= 0) {
pr_info("oprofile: no performance counters\n"); pr_info("oprofile: no performance counters\n");
@ -298,9 +297,9 @@ int __init oprofile_perf_init(struct oprofile_operations *ops)
} }
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
perf_events[cpu] = kcalloc(num_counters, per_cpu(perf_events, cpu) = kcalloc(num_counters,
sizeof(struct perf_event *), GFP_KERNEL); sizeof(struct perf_event *), GFP_KERNEL);
if (!perf_events[cpu]) { if (!per_cpu(perf_events, cpu)) {
pr_info("oprofile: failed to allocate %d perf events " pr_info("oprofile: failed to allocate %d perf events "
"for cpu %d\n", num_counters, cpu); "for cpu %d\n", num_counters, cpu);
ret = -ENOMEM; ret = -ENOMEM;