dect
/
linux-2.6
Archived
13
0
Fork 0

irqdomain changes for Linux v3.6

Round of refactoring and enhancements to irq_domain infrastructure. This
 series starts the process of simplifying irqdomain. The ultimate goal is
 to merge LEGACY, LINEAR and TREE mappings into a single system, but had
 to back off from that after some last minute bugs. Instead it mainly
 reorganizes the code and ensures that the reverse map gets populated
 when the irq is mapped instead of the first time it is looked up.
 
 Merging of the irq_domain types is deferred to v3.7
 
 In other news, this series adds helpers for creating static mappings on
 a linear or tree mapping.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1.4.11 (GNU/Linux)
 
 iQIcBAABAgAGBQJQGKKkAAoJEEFnBt12D9kB59gQAJnTjrihej1tr0OEkffIthGK
 RyVI/DMo0jMgLs4K/rIo3Y+PdTSsNYd8x4R7ln8O7rNRQn8W6jE6NQgoMh51EvNc
 FAltmTsBldq6hUNuz2FEnbmojBP4QklTzL8bAiXtX5EufWQsgMsP4guOuHXLCjEV
 CkWYVk/slXEWJ8yYJc6GKVRvL+CNeiXVCTcOsYA0CI3ofN7O0rd+YAL314CRllIc
 e5uARbWM+s9FJ/eXwCZP4+3jCmdI/CHJb284WldMc/mBD8Rbiqpb4kH6AZI+TH2O
 CyiNEPWs6FG5eJPTID7HrOarXGzwYq/pvv8iG7Mh8NiKSae1C1HdkHelCjbLQ+pU
 POya0fWF1Gvzlmw0gHik86dqaKjwb29btjj7SFg8KnQExWn2ifhsY70mM9wCTo3s
 cwcQlssDIsARE83nttTFCoV/iAWh9AvTxafrXu/+9OKTjpsYlC8kgzdVjq5aAxON
 JaAUK1OduTWRsd1TabKlh6naRXr9nRcLKikwKri2oYVKkj97wahBuib4ffzAcNqz
 VklRBxTH6M+dz/t5NpcVyLXJpqzTN++QNdTAmeQG6LOnHJL4tpFTsx5sMa7ghmzX
 LNpmp/AkVfP0MT7Drf0FUUx6iFA7sjANYzcepUVDrPGKHx0E3LyqbG5JKcC5LgM6
 +UIoKAktF3vY7pdZJL9z
 =ZUF/
 -----END PGP SIGNATURE-----

Merge tag 'irqdomain-for-linus' of git://git.secretlab.ca/git/linux-2.6

Pull irqdomain changes from Grant Likely:
 "Round of refactoring and enhancements to irq_domain infrastructure.
  This series starts the process of simplifying irqdomain.  The ultimate
  goal is to merge LEGACY, LINEAR and TREE mappings into a single
  system, but had to back off from that after some last minute bugs.
  Instead it mainly reorganizes the code and ensures that the reverse
  map gets populated when the irq is mapped instead of the first time it
  is looked up.

  Merging of the irq_domain types is deferred to v3.7

  In other news, this series adds helpers for creating static mappings
  on a linear or tree mapping."

* tag 'irqdomain-for-linus' of git://git.secretlab.ca/git/linux-2.6:
  irqdomain: Improve diagnostics when a domain mapping fails
  irqdomain: eliminate slow-path revmap lookups
  irqdomain: Fix irq_create_direct_mapping() to test irq_domain type.
  irqdomain: Eliminate dedicated radix lookup functions
  irqdomain: Support for static IRQ mapping and association.
  irqdomain: Always update revmap when setting up a virq
  irqdomain: Split disassociating code into separate function
  irq_domain: correct a minor wrong comment for linear revmap
  irq_domain: Standardise legacy/linear domain selection
  irqdomain: Make ops->map hook optional
  irqdomain: Remove unnecessary test for IRQ_DOMAIN_MAP_LEGACY
  irqdomain: Simple NUMA awareness.
  devicetree: add helper inline for retrieving a node's full name
This commit is contained in:
Linus Torvalds 2012-07-31 20:44:03 -07:00
commit 2d53492620
7 changed files with 249 additions and 170 deletions

View File

@ -93,6 +93,7 @@ Linux IRQ number into the hardware.
Most drivers cannot use this mapping.
==== Legacy ====
irq_domain_add_simple()
irq_domain_add_legacy()
irq_domain_add_legacy_isa()
@ -115,3 +116,7 @@ The legacy map should only be used if fixed IRQ mappings must be
supported. For example, ISA controllers would use the legacy map for
mapping Linux IRQs 0-15 so that existing ISA drivers get the correct IRQ
numbers.
Most users of legacy mappings should use irq_domain_add_simple() which
will use a legacy domain only if an IRQ range is supplied by the
system and will otherwise use a linear domain mapping.

View File

@ -111,7 +111,7 @@ static unsigned int icp_hv_get_irq(void)
if (vec == XICS_IRQ_SPURIOUS)
return NO_IRQ;
irq = irq_radix_revmap_lookup(xics_host, vec);
irq = irq_find_mapping(xics_host, vec);
if (likely(irq != NO_IRQ)) {
xics_push_cppr(vec);
return irq;

View File

@ -119,7 +119,7 @@ static unsigned int icp_native_get_irq(void)
if (vec == XICS_IRQ_SPURIOUS)
return NO_IRQ;
irq = irq_radix_revmap_lookup(xics_host, vec);
irq = irq_find_mapping(xics_host, vec);
if (likely(irq != NO_IRQ)) {
xics_push_cppr(vec);
return irq;

View File

@ -329,9 +329,6 @@ static int xics_host_map(struct irq_domain *h, unsigned int virq,
pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw);
/* Insert the interrupt mapping into the radix tree for fast lookup */
irq_radix_revmap_insert(xics_host, virq, hw);
/* They aren't all level sensitive but we just don't really know */
irq_set_status_flags(virq, IRQ_LEVEL);

View File

@ -112,6 +112,11 @@ struct irq_domain {
};
#ifdef CONFIG_IRQ_DOMAIN
struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
unsigned int size,
unsigned int first_irq,
const struct irq_domain_ops *ops,
void *host_data);
struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
unsigned int size,
unsigned int first_irq,
@ -144,16 +149,31 @@ static inline struct irq_domain *irq_domain_add_legacy_isa(
extern void irq_domain_remove(struct irq_domain *host);
extern int irq_domain_associate_many(struct irq_domain *domain,
unsigned int irq_base,
irq_hw_number_t hwirq_base, int count);
static inline int irq_domain_associate(struct irq_domain *domain, unsigned int irq,
irq_hw_number_t hwirq)
{
return irq_domain_associate_many(domain, irq, hwirq, 1);
}
extern unsigned int irq_create_mapping(struct irq_domain *host,
irq_hw_number_t hwirq);
extern void irq_dispose_mapping(unsigned int virq);
extern unsigned int irq_find_mapping(struct irq_domain *host,
irq_hw_number_t hwirq);
extern unsigned int irq_create_direct_mapping(struct irq_domain *host);
extern void irq_radix_revmap_insert(struct irq_domain *host, unsigned int virq,
irq_hw_number_t hwirq);
extern unsigned int irq_radix_revmap_lookup(struct irq_domain *host,
irq_hw_number_t hwirq);
extern int irq_create_strict_mappings(struct irq_domain *domain,
unsigned int irq_base,
irq_hw_number_t hwirq_base, int count);
static inline int irq_create_identity_mapping(struct irq_domain *host,
irq_hw_number_t hwirq)
{
return irq_create_strict_mappings(host, hwirq, hwirq, 1);
}
extern unsigned int irq_linear_revmap(struct irq_domain *host,
irq_hw_number_t hwirq);

View File

@ -21,6 +21,7 @@
#include <linux/kref.h>
#include <linux/mod_devicetable.h>
#include <linux/spinlock.h>
#include <linux/topology.h>
#include <asm/byteorder.h>
#include <asm/errno.h>
@ -158,11 +159,6 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size)
#define OF_BAD_ADDR ((u64)-1)
#ifndef of_node_to_nid
static inline int of_node_to_nid(struct device_node *np) { return -1; }
#define of_node_to_nid of_node_to_nid
#endif
static inline const char* of_node_full_name(struct device_node *np)
{
return np ? np->full_name : "<no-node>";
@ -427,6 +423,15 @@ static inline int of_machine_is_compatible(const char *compat)
while (0)
#endif /* CONFIG_OF */
#ifndef of_node_to_nid
static inline int of_node_to_nid(struct device_node *np)
{
return numa_node_id();
}
#define of_node_to_nid of_node_to_nid
#endif
/**
* of_property_read_bool - Findfrom a property
* @np: device node from which the property value is to be read.

View File

@ -10,6 +10,7 @@
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/topology.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/smp.h>
@ -45,7 +46,8 @@ static struct irq_domain *irq_domain_alloc(struct device_node *of_node,
{
struct irq_domain *domain;
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
domain = kzalloc_node(sizeof(*domain), GFP_KERNEL,
of_node_to_nid(of_node));
if (WARN_ON(!domain))
return NULL;
@ -137,6 +139,36 @@ static unsigned int irq_domain_legacy_revmap(struct irq_domain *domain,
return hwirq - first_hwirq + domain->revmap_data.legacy.first_irq;
}
/**
* irq_domain_add_simple() - Allocate and register a simple irq_domain.
* @of_node: pointer to interrupt controller's device tree node.
* @size: total number of irqs in mapping
* @first_irq: first number of irq block assigned to the domain
* @ops: map/unmap domain callbacks
* @host_data: Controller private data pointer
*
* Allocates a legacy irq_domain if irq_base is positive or a linear
* domain otherwise.
*
* This is intended to implement the expected behaviour for most
* interrupt controllers which is that a linear mapping should
* normally be used unless the system requires a legacy mapping in
* order to support supplying interrupt numbers during non-DT
* registration of devices.
*/
struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
unsigned int size,
unsigned int first_irq,
const struct irq_domain_ops *ops,
void *host_data)
{
if (first_irq > 0)
return irq_domain_add_legacy(of_node, size, first_irq, 0,
ops, host_data);
else
return irq_domain_add_linear(of_node, size, ops, host_data);
}
/**
* irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain.
* @of_node: pointer to interrupt controller's device tree node.
@ -203,7 +235,8 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
* one can then use irq_create_mapping() to
* explicitly change them
*/
ops->map(domain, irq, hwirq);
if (ops->map)
ops->map(domain, irq, hwirq);
/* Clear norequest flags */
irq_clear_status_flags(irq, IRQ_NOREQUEST);
@ -215,7 +248,7 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
/**
* irq_domain_add_linear() - Allocate and register a legacy revmap irq_domain.
* irq_domain_add_linear() - Allocate and register a linear revmap irq_domain.
* @of_node: pointer to interrupt controller's device tree node.
* @size: Number of interrupts in the domain.
* @ops: map/unmap domain callbacks
@ -229,7 +262,8 @@ struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
struct irq_domain *domain;
unsigned int *revmap;
revmap = kzalloc(sizeof(*revmap) * size, GFP_KERNEL);
revmap = kzalloc_node(sizeof(*revmap) * size, GFP_KERNEL,
of_node_to_nid(of_node));
if (WARN_ON(!revmap))
return NULL;
@ -330,24 +364,112 @@ void irq_set_default_host(struct irq_domain *domain)
}
EXPORT_SYMBOL_GPL(irq_set_default_host);
static int irq_setup_virq(struct irq_domain *domain, unsigned int virq,
irq_hw_number_t hwirq)
static void irq_domain_disassociate_many(struct irq_domain *domain,
unsigned int irq_base, int count)
{
struct irq_data *irq_data = irq_get_irq_data(virq);
/*
* disassociate in reverse order;
* not strictly necessary, but nice for unwinding
*/
while (count--) {
int irq = irq_base + count;
struct irq_data *irq_data = irq_get_irq_data(irq);
irq_hw_number_t hwirq = irq_data->hwirq;
if (WARN_ON(!irq_data || irq_data->domain != domain))
continue;
irq_set_status_flags(irq, IRQ_NOREQUEST);
/* remove chip and handler */
irq_set_chip_and_handler(irq, NULL, NULL);
/* Make sure it's completed */
synchronize_irq(irq);
/* Tell the PIC about it */
if (domain->ops->unmap)
domain->ops->unmap(domain, irq);
smp_mb();
irq_data->hwirq = hwirq;
irq_data->domain = domain;
if (domain->ops->map(domain, virq, hwirq)) {
pr_debug("irq-%i==>hwirq-0x%lx mapping failed\n", virq, hwirq);
irq_data->domain = NULL;
irq_data->hwirq = 0;
return -1;
/* Clear reverse map */
switch(domain->revmap_type) {
case IRQ_DOMAIN_MAP_LINEAR:
if (hwirq < domain->revmap_data.linear.size)
domain->revmap_data.linear.revmap[hwirq] = 0;
break;
case IRQ_DOMAIN_MAP_TREE:
mutex_lock(&revmap_trees_mutex);
radix_tree_delete(&domain->revmap_data.tree, hwirq);
mutex_unlock(&revmap_trees_mutex);
break;
}
}
}
int irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
irq_hw_number_t hwirq_base, int count)
{
unsigned int virq = irq_base;
irq_hw_number_t hwirq = hwirq_base;
int i, ret;
pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__,
of_node_full_name(domain->of_node), irq_base, (int)hwirq_base, count);
for (i = 0; i < count; i++) {
struct irq_data *irq_data = irq_get_irq_data(virq + i);
if (WARN(!irq_data, "error: irq_desc not allocated; "
"irq=%i hwirq=0x%x\n", virq + i, (int)hwirq + i))
return -EINVAL;
if (WARN(irq_data->domain, "error: irq_desc already associated; "
"irq=%i hwirq=0x%x\n", virq + i, (int)hwirq + i))
return -EINVAL;
};
for (i = 0; i < count; i++, virq++, hwirq++) {
struct irq_data *irq_data = irq_get_irq_data(virq);
irq_data->hwirq = hwirq;
irq_data->domain = domain;
if (domain->ops->map) {
ret = domain->ops->map(domain, virq, hwirq);
if (ret != 0) {
pr_err("irq-%i==>hwirq-0x%lx mapping failed: %d\n",
virq, hwirq, ret);
WARN_ON(1);
irq_data->domain = NULL;
irq_data->hwirq = 0;
goto err_unmap;
}
}
switch (domain->revmap_type) {
case IRQ_DOMAIN_MAP_LINEAR:
if (hwirq < domain->revmap_data.linear.size)
domain->revmap_data.linear.revmap[hwirq] = virq;
break;
case IRQ_DOMAIN_MAP_TREE:
mutex_lock(&revmap_trees_mutex);
radix_tree_insert(&domain->revmap_data.tree, hwirq, irq_data);
mutex_unlock(&revmap_trees_mutex);
break;
}
irq_clear_status_flags(virq, IRQ_NOREQUEST);
}
irq_clear_status_flags(virq, IRQ_NOREQUEST);
return 0;
err_unmap:
irq_domain_disassociate_many(domain, irq_base, i);
return -EINVAL;
}
EXPORT_SYMBOL_GPL(irq_domain_associate_many);
/**
* irq_create_direct_mapping() - Allocate an irq for direct mapping
@ -364,10 +486,10 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain)
if (domain == NULL)
domain = irq_default_domain;
BUG_ON(domain == NULL);
WARN_ON(domain->revmap_type != IRQ_DOMAIN_MAP_NOMAP);
if (WARN_ON(!domain || domain->revmap_type != IRQ_DOMAIN_MAP_NOMAP))
return 0;
virq = irq_alloc_desc_from(1, 0);
virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node));
if (!virq) {
pr_debug("create_direct virq allocation failed\n");
return 0;
@ -380,7 +502,7 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain)
}
pr_debug("create_direct obtained virq %d\n", virq);
if (irq_setup_virq(domain, virq, virq)) {
if (irq_domain_associate(domain, virq, virq)) {
irq_free_desc(virq);
return 0;
}
@ -433,17 +555,16 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
hint = hwirq % nr_irqs;
if (hint == 0)
hint++;
virq = irq_alloc_desc_from(hint, 0);
virq = irq_alloc_desc_from(hint, of_node_to_nid(domain->of_node));
if (virq <= 0)
virq = irq_alloc_desc_from(1, 0);
virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node));
if (virq <= 0) {
pr_debug("-> virq allocation failed\n");
return 0;
}
if (irq_setup_virq(domain, virq, hwirq)) {
if (domain->revmap_type != IRQ_DOMAIN_MAP_LEGACY)
irq_free_desc(virq);
if (irq_domain_associate(domain, virq, hwirq)) {
irq_free_desc(virq);
return 0;
}
@ -454,6 +575,44 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
}
EXPORT_SYMBOL_GPL(irq_create_mapping);
/**
* irq_create_strict_mappings() - Map a range of hw irqs to fixed linux irqs
* @domain: domain owning the interrupt range
* @irq_base: beginning of linux IRQ range
* @hwirq_base: beginning of hardware IRQ range
* @count: Number of interrupts to map
*
* This routine is used for allocating and mapping a range of hardware
* irqs to linux irqs where the linux irq numbers are at pre-defined
* locations. For use by controllers that already have static mappings
* to insert in to the domain.
*
* Non-linear users can use irq_create_identity_mapping() for IRQ-at-a-time
* domain insertion.
*
* 0 is returned upon success, while any failure to establish a static
* mapping is treated as an error.
*/
int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base,
irq_hw_number_t hwirq_base, int count)
{
int ret;
ret = irq_alloc_descs(irq_base, irq_base, count,
of_node_to_nid(domain->of_node));
if (unlikely(ret < 0))
return ret;
ret = irq_domain_associate_many(domain, irq_base, hwirq_base, count);
if (unlikely(ret < 0)) {
irq_free_descs(irq_base, count);
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(irq_create_strict_mappings);
unsigned int irq_create_of_mapping(struct device_node *controller,
const u32 *intspec, unsigned int intsize)
{
@ -511,7 +670,6 @@ void irq_dispose_mapping(unsigned int virq)
{
struct irq_data *irq_data = irq_get_irq_data(virq);
struct irq_domain *domain;
irq_hw_number_t hwirq;
if (!virq || !irq_data)
return;
@ -524,33 +682,7 @@ void irq_dispose_mapping(unsigned int virq)
if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY)
return;
irq_set_status_flags(virq, IRQ_NOREQUEST);
/* remove chip and handler */
irq_set_chip_and_handler(virq, NULL, NULL);
/* Make sure it's completed */
synchronize_irq(virq);
/* Tell the PIC about it */
if (domain->ops->unmap)
domain->ops->unmap(domain, virq);
smp_mb();
/* Clear reverse map */
hwirq = irq_data->hwirq;
switch(domain->revmap_type) {
case IRQ_DOMAIN_MAP_LINEAR:
if (hwirq < domain->revmap_data.linear.size)
domain->revmap_data.linear.revmap[hwirq] = 0;
break;
case IRQ_DOMAIN_MAP_TREE:
mutex_lock(&revmap_trees_mutex);
radix_tree_delete(&domain->revmap_data.tree, hwirq);
mutex_unlock(&revmap_trees_mutex);
break;
}
irq_domain_disassociate_many(domain, virq, 1);
irq_free_desc(virq);
}
EXPORT_SYMBOL_GPL(irq_dispose_mapping);
@ -559,16 +691,11 @@ EXPORT_SYMBOL_GPL(irq_dispose_mapping);
* irq_find_mapping() - Find a linux irq from an hw irq number.
* @domain: domain owning this hardware interrupt
* @hwirq: hardware irq number in that domain space
*
* This is a slow path, for use by generic code. It's expected that an
* irq controller implementation directly calls the appropriate low level
* mapping function.
*/
unsigned int irq_find_mapping(struct irq_domain *domain,
irq_hw_number_t hwirq)
{
unsigned int i;
unsigned int hint = hwirq % nr_irqs;
struct irq_data *data;
/* Look for default domain if nececssary */
if (domain == NULL)
@ -576,115 +703,47 @@ unsigned int irq_find_mapping(struct irq_domain *domain,
if (domain == NULL)
return 0;
/* legacy -> bail early */
if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY)
switch (domain->revmap_type) {
case IRQ_DOMAIN_MAP_LEGACY:
return irq_domain_legacy_revmap(domain, hwirq);
/* Slow path does a linear search of the map */
if (hint == 0)
hint = 1;
i = hint;
do {
struct irq_data *data = irq_get_irq_data(i);
case IRQ_DOMAIN_MAP_LINEAR:
return irq_linear_revmap(domain, hwirq);
case IRQ_DOMAIN_MAP_TREE:
rcu_read_lock();
data = radix_tree_lookup(&domain->revmap_data.tree, hwirq);
rcu_read_unlock();
if (data)
return data->irq;
break;
case IRQ_DOMAIN_MAP_NOMAP:
data = irq_get_irq_data(hwirq);
if (data && (data->domain == domain) && (data->hwirq == hwirq))
return i;
i++;
if (i >= nr_irqs)
i = 1;
} while(i != hint);
return hwirq;
break;
}
return 0;
}
EXPORT_SYMBOL_GPL(irq_find_mapping);
/**
* irq_radix_revmap_lookup() - Find a linux irq from a hw irq number.
* @domain: domain owning this hardware interrupt
* @hwirq: hardware irq number in that domain space
*
* This is a fast path, for use by irq controller code that uses radix tree
* revmaps
*/
unsigned int irq_radix_revmap_lookup(struct irq_domain *domain,
irq_hw_number_t hwirq)
{
struct irq_data *irq_data;
if (WARN_ON_ONCE(domain->revmap_type != IRQ_DOMAIN_MAP_TREE))
return irq_find_mapping(domain, hwirq);
/*
* Freeing an irq can delete nodes along the path to
* do the lookup via call_rcu.
*/
rcu_read_lock();
irq_data = radix_tree_lookup(&domain->revmap_data.tree, hwirq);
rcu_read_unlock();
/*
* If found in radix tree, then fine.
* Else fallback to linear lookup - this should not happen in practice
* as it means that we failed to insert the node in the radix tree.
*/
return irq_data ? irq_data->irq : irq_find_mapping(domain, hwirq);
}
EXPORT_SYMBOL_GPL(irq_radix_revmap_lookup);
/**
* irq_radix_revmap_insert() - Insert a hw irq to linux irq number mapping.
* @domain: domain owning this hardware interrupt
* @virq: linux irq number
* @hwirq: hardware irq number in that domain space
*
* This is for use by irq controllers that use a radix tree reverse
* mapping for fast lookup.
*/
void irq_radix_revmap_insert(struct irq_domain *domain, unsigned int virq,
irq_hw_number_t hwirq)
{
struct irq_data *irq_data = irq_get_irq_data(virq);
if (WARN_ON(domain->revmap_type != IRQ_DOMAIN_MAP_TREE))
return;
if (virq) {
mutex_lock(&revmap_trees_mutex);
radix_tree_insert(&domain->revmap_data.tree, hwirq, irq_data);
mutex_unlock(&revmap_trees_mutex);
}
}
EXPORT_SYMBOL_GPL(irq_radix_revmap_insert);
/**
* irq_linear_revmap() - Find a linux irq from a hw irq number.
* @domain: domain owning this hardware interrupt
* @hwirq: hardware irq number in that domain space
*
* This is a fast path, for use by irq controller code that uses linear
* revmaps. It does fallback to the slow path if the revmap doesn't exist
* yet and will create the revmap entry with appropriate locking
* This is a fast path that can be called directly by irq controller code to
* save a handful of instructions.
*/
unsigned int irq_linear_revmap(struct irq_domain *domain,
irq_hw_number_t hwirq)
{
unsigned int *revmap;
BUG_ON(domain->revmap_type != IRQ_DOMAIN_MAP_LINEAR);
if (WARN_ON_ONCE(domain->revmap_type != IRQ_DOMAIN_MAP_LINEAR))
return irq_find_mapping(domain, hwirq);
/* Check revmap bounds; complain if exceeded */
if (WARN_ON(hwirq >= domain->revmap_data.linear.size))
return 0;
/* Check revmap bounds */
if (unlikely(hwirq >= domain->revmap_data.linear.size))
return irq_find_mapping(domain, hwirq);
/* Check if revmap was allocated */
revmap = domain->revmap_data.linear.revmap;
if (unlikely(revmap == NULL))
return irq_find_mapping(domain, hwirq);
/* Fill up revmap with slow path if no mapping found */
if (unlikely(!revmap[hwirq]))
revmap[hwirq] = irq_find_mapping(domain, hwirq);
return revmap[hwirq];
return domain->revmap_data.linear.revmap[hwirq];
}
EXPORT_SYMBOL_GPL(irq_linear_revmap);
@ -761,12 +820,6 @@ static int __init irq_debugfs_init(void)
__initcall(irq_debugfs_init);
#endif /* CONFIG_IRQ_DOMAIN_DEBUG */
static int irq_domain_simple_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
return 0;
}
/**
* irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings
*
@ -829,7 +882,6 @@ int irq_domain_xlate_onetwocell(struct irq_domain *d,
EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell);
const struct irq_domain_ops irq_domain_simple_ops = {
.map = irq_domain_simple_map,
.xlate = irq_domain_xlate_onetwocell,
};
EXPORT_SYMBOL_GPL(irq_domain_simple_ops);