dect
/
linux-2.6
Archived
13
0
Fork 0

ARM: 5899/2: arm: provide a mechanism to reserve performance counters

To add support for perf events and to allow the hardware counters to be
shared with oprofile, we need a way to reserve access to the pmu
(performance monitor unit). Platforms with PMU interrupts should
register the interrupts in arch/arm/kernel/pmu.c

Signed-off-by: Jamie Iles <jamie.iles@picochip.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
This commit is contained in:
Jamie Iles 2010-02-02 20:23:15 +01:00 committed by Russell King
parent 74d2e4f8d7
commit 0f4f0672ac
4 changed files with 184 additions and 0 deletions

View File

@ -863,6 +863,11 @@ config XSCALE_PMU
depends on CPU_XSCALE && !XSCALE_PMU_TIMER
default y
config CPU_HAS_PMU
depends on CPU_V6 || CPU_V7 || XSCALE_PMU
default y
bool
if !MMU
source "arch/arm/Kconfig-nommu"
endif

View File

@ -0,0 +1,75 @@
/*
* linux/arch/arm/include/asm/pmu.h
*
* Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#ifndef __ARM_PMU_H__
#define __ARM_PMU_H__
#ifdef CONFIG_CPU_HAS_PMU
struct pmu_irqs {
const int *irqs;
int num_irqs;
};
/**
* reserve_pmu() - reserve the hardware performance counters
*
* Reserve the hardware performance counters in the system for exclusive use.
* The 'struct pmu_irqs' for the system is returned on success, ERR_PTR()
* encoded error on failure.
*/
extern const struct pmu_irqs *
reserve_pmu(void);
/**
* release_pmu() - Relinquish control of the performance counters
*
* Release the performance counters and allow someone else to use them.
* Callers must have disabled the counters and released IRQs before calling
* this. The 'struct pmu_irqs' returned from reserve_pmu() must be passed as
* a cookie.
*/
extern int
release_pmu(const struct pmu_irqs *irqs);
/**
* init_pmu() - Initialise the PMU.
*
* Initialise the system ready for PMU enabling. This should typically set the
* IRQ affinity and nothing else. The users (oprofile/perf events etc) will do
* the actual hardware initialisation.
*/
extern int
init_pmu(void);
#else /* CONFIG_CPU_HAS_PMU */
static inline const struct pmu_irqs *
reserve_pmu(void)
{
return ERR_PTR(-ENODEV);
}
static inline int
release_pmu(const struct pmu_irqs *irqs)
{
return -ENODEV;
}
static inline int
init_pmu(void)
{
return -ENODEV;
}
#endif /* CONFIG_CPU_HAS_PMU */
#endif /* __ARM_PMU_H__ */

View File

@ -46,6 +46,7 @@ obj-$(CONFIG_CPU_XSCALE) += xscale-cp0.o
obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o
obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o
obj-$(CONFIG_IWMMXT) += iwmmxt.o
obj-$(CONFIG_CPU_HAS_PMU) += pmu.o
AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
ifneq ($(CONFIG_ARCH_EBSA110),y)

103
arch/arm/kernel/pmu.c Normal file
View File

@ -0,0 +1,103 @@
/*
* linux/arch/arm/kernel/pmu.c
*
* Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/cpumask.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/pmu.h>
/*
* Define the IRQs for the system. We could use something like a platform
* device but that seems fairly heavyweight for this. Also, the performance
* counters can't be removed or hotplugged.
*
* Ordering is important: init_pmu() will use the ordering to set the affinity
* to the corresponding core. e.g. the first interrupt will go to cpu 0, the
* second goes to cpu 1 etc.
*/
static const int irqs[] = {
#if defined(CONFIG_ARCH_OMAP2)
3,
#elif defined(CONFIG_ARCH_BCMRING)
IRQ_PMUIRQ,
#elif defined(CONFIG_MACH_REALVIEW_EB)
IRQ_EB11MP_PMU_CPU0,
IRQ_EB11MP_PMU_CPU1,
IRQ_EB11MP_PMU_CPU2,
IRQ_EB11MP_PMU_CPU3,
#elif defined(CONFIG_ARCH_OMAP3)
INT_34XX_BENCH_MPU_EMUL,
#elif defined(CONFIG_ARCH_IOP32X)
IRQ_IOP32X_CORE_PMU,
#elif defined(CONFIG_ARCH_IOP33X)
IRQ_IOP33X_CORE_PMU,
#elif defined(CONFIG_ARCH_PXA)
IRQ_PMU,
#endif
};
static const struct pmu_irqs pmu_irqs = {
.irqs = irqs,
.num_irqs = ARRAY_SIZE(irqs),
};
static volatile long pmu_lock;
const struct pmu_irqs *
reserve_pmu(void)
{
return test_and_set_bit_lock(0, &pmu_lock) ? ERR_PTR(-EBUSY) :
&pmu_irqs;
}
EXPORT_SYMBOL_GPL(reserve_pmu);
int
release_pmu(const struct pmu_irqs *irqs)
{
if (WARN_ON(irqs != &pmu_irqs))
return -EINVAL;
clear_bit_unlock(0, &pmu_lock);
return 0;
}
EXPORT_SYMBOL_GPL(release_pmu);
static int
set_irq_affinity(int irq,
unsigned int cpu)
{
#ifdef CONFIG_SMP
int err = irq_set_affinity(irq, cpumask_of(cpu));
if (err)
pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
irq, cpu);
return err;
#else
return 0;
#endif
}
int
init_pmu(void)
{
int i, err = 0;
for (i = 0; i < pmu_irqs.num_irqs; ++i) {
err = set_irq_affinity(pmu_irqs.irqs[i], i);
if (err)
break;
}
return err;
}
EXPORT_SYMBOL_GPL(init_pmu);