aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/cache-feroceon-l2.c
diff options
context:
space:
mode:
authorLennert Buytenhek <buytenh@wantstofly.org>2008-06-22 22:45:04 +0200
committerLennert Buytenhek <buytenh@marvell.com>2008-06-22 22:45:04 +0200
commit99c6dc117d27d6259214812bd0b113aaf467f600 (patch)
tree0baadd0827fc39f9d2b052305f9447e4b50c5162 /arch/arm/mm/cache-feroceon-l2.c
parent836a8051d54525e0782f156dcfa3c13d30f22840 (diff)
[ARM] Feroceon: L2 cache support
This patch adds support for the unified Feroceon L2 cache controller as found in e.g. the Marvell Kirkwood and Marvell Discovery Duo families of ARM SoCs. Note that: - Page table walks are outer uncacheable on Kirkwood and Discovery Duo, since the ARMv5 spec provides no way to indicate outer cacheability of page table walks (specifying it in TTBR[4:3] is an ARMv6+ feature). This requires adding L2 cache clean instructions to proc-feroceon.S (dcache_clean_area(), set_pte()) as well as to tlbflush.h ({flush,clean}_pmd_entry()). The latter case is handled by defining a new TLB type (TLB_FEROCEON) which is almost identical to the v4wbi one but provides a TLB_L2CLEAN_FR flag. - The Feroceon L2 cache controller supports L2 range (i.e. 'clean L2 range by MVA' and 'invalidate L2 range by MVA') operations, and this patch uses those range operations for all Linux outer cache operations, as they are faster than the regular per-line operations. L2 range operations are not interruptible on this hardware, which avoids potential livelock issues, but can be bad for interrupt latency, so there is a compile-time tunable (MAX_RANGE_SIZE) which allows you to select the maximum range size to operate on at once. (Valid range is between one cache line and one 4KiB page, and must be a multiple of the line size.) Signed-off-by: Lennert Buytenhek <buytenh@marvell.com>
Diffstat (limited to 'arch/arm/mm/cache-feroceon-l2.c')
-rw-r--r--arch/arm/mm/cache-feroceon-l2.c318
1 files changed, 318 insertions, 0 deletions
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c
new file mode 100644
index 00000000000..20eec4ba173
--- /dev/null
+++ b/arch/arm/mm/cache-feroceon-l2.c
@@ -0,0 +1,318 @@
+/*
+ * arch/arm/mm/cache-feroceon-l2.c - Feroceon L2 cache controller support
+ *
+ * Copyright (C) 2008 Marvell Semiconductor
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * References:
+ * - Unified Layer 2 Cache for Feroceon CPU Cores,
+ * Document ID MV-S104858-00, Rev. A, October 23 2007.
+ */
+
+#include <linux/init.h>
+#include <asm/cacheflush.h>
+#include <asm/plat-orion/cache-feroceon-l2.h>
+
+
+/*
+ * Low-level cache maintenance operations.
+ *
+ * As well as the regular 'clean/invalidate/flush L2 cache line by
+ * MVA' instructions, the Feroceon L2 cache controller also features
+ * 'clean/invalidate L2 range by MVA' operations.
+ *
+ * Cache range operations are initiated by writing the start and
+ * end addresses to successive cp15 registers, and process every
+ * cache line whose first byte address lies in the inclusive range
+ * [start:end].
+ *
+ * The cache range operations stall the CPU pipeline until completion.
+ *
+ * The range operations require two successive cp15 writes, in
+ * between which we don't want to be preempted.
+ */
+static inline void l2_clean_pa(unsigned long addr)
+{
+ __asm__("mcr p15, 1, %0, c15, c9, 3" : : "r" (addr));
+}
+
+static inline void l2_clean_mva_range(unsigned long start, unsigned long end)
+{
+ unsigned long flags;
+
+ /*
+ * Make sure 'start' and 'end' reference the same page, as
+ * L2 is PIPT and range operations only do a TLB lookup on
+ * the start address.
+ */
+ BUG_ON((start ^ end) & ~(PAGE_SIZE - 1));
+
+ raw_local_irq_save(flags);
+ __asm__("mcr p15, 1, %0, c15, c9, 4" : : "r" (start));
+ __asm__("mcr p15, 1, %0, c15, c9, 5" : : "r" (end));
+ raw_local_irq_restore(flags);
+}
+
+static inline void l2_clean_pa_range(unsigned long start, unsigned long end)
+{
+ l2_clean_mva_range(__phys_to_virt(start), __phys_to_virt(end));
+}
+
+static inline void l2_clean_inv_pa(unsigned long addr)
+{
+ __asm__("mcr p15, 1, %0, c15, c10, 3" : : "r" (addr));
+}
+
+static inline void l2_inv_pa(unsigned long addr)
+{
+ __asm__("mcr p15, 1, %0, c15, c11, 3" : : "r" (addr));
+}
+
+static inline void l2_inv_mva_range(unsigned long start, unsigned long end)
+{
+ unsigned long flags;
+
+ /*
+ * Make sure 'start' and 'end' reference the same page, as
+ * L2 is PIPT and range operations only do a TLB lookup on
+ * the start address.
+ */
+ BUG_ON((start ^ end) & ~(PAGE_SIZE - 1));
+
+ raw_local_irq_save(flags);
+ __asm__("mcr p15, 1, %0, c15, c11, 4" : : "r" (start));
+ __asm__("mcr p15, 1, %0, c15, c11, 5" : : "r" (end));
+ raw_local_irq_restore(flags);
+}
+
+static inline void l2_inv_pa_range(unsigned long start, unsigned long end)
+{
+ l2_inv_mva_range(__phys_to_virt(start), __phys_to_virt(end));
+}
+
+
+/*
+ * Linux primitives.
+ *
+ * Note that the end addresses passed to Linux primitives are
+ * noninclusive, while the hardware cache range operations use
+ * inclusive start and end addresses.
+ */
+#define CACHE_LINE_SIZE 32
+#define MAX_RANGE_SIZE 1024
+
+static int l2_wt_override;
+
+static unsigned long calc_range_end(unsigned long start, unsigned long end)
+{
+ unsigned long range_end;
+
+ BUG_ON(start & (CACHE_LINE_SIZE - 1));
+ BUG_ON(end & (CACHE_LINE_SIZE - 1));
+
+ /*
+ * Try to process all cache lines between 'start' and 'end'.
+ */
+ range_end = end;
+
+ /*
+ * Limit the number of cache lines processed at once,
+ * since cache range operations stall the CPU pipeline
+ * until completion.
+ */
+ if (range_end > start + MAX_RANGE_SIZE)
+ range_end = start + MAX_RANGE_SIZE;
+
+ /*
+ * Cache range operations can't straddle a page boundary.
+ */
+ if (range_end > (start | (PAGE_SIZE - 1)) + 1)
+ range_end = (start | (PAGE_SIZE - 1)) + 1;
+
+ return range_end;
+}
+
+static void feroceon_l2_inv_range(unsigned long start, unsigned long end)
+{
+ /*
+ * Clean and invalidate partial first cache line.
+ */
+ if (start & (CACHE_LINE_SIZE - 1)) {
+ l2_clean_inv_pa(start & ~(CACHE_LINE_SIZE - 1));
+ start = (start | (CACHE_LINE_SIZE - 1)) + 1;
+ }
+
+ /*
+ * Clean and invalidate partial last cache line.
+ */
+ if (end & (CACHE_LINE_SIZE - 1)) {
+ l2_clean_inv_pa(end & ~(CACHE_LINE_SIZE - 1));
+ end &= ~(CACHE_LINE_SIZE - 1);
+ }
+
+ /*
+ * Invalidate all full cache lines between 'start' and 'end'.
+ */
+ while (start != end) {
+ unsigned long range_end = calc_range_end(start, end);
+ l2_inv_pa_range(start, range_end - CACHE_LINE_SIZE);
+ start = range_end;
+ }
+
+ dsb();
+}
+
+static void feroceon_l2_clean_range(unsigned long start, unsigned long end)
+{
+ /*
+ * If L2 is forced to WT, the L2 will always be clean and we
+ * don't need to do anything here.
+ */
+ if (!l2_wt_override) {
+ start &= ~(CACHE_LINE_SIZE - 1);
+ end = (end + CACHE_LINE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1);
+ while (start != end) {
+ unsigned long range_end = calc_range_end(start, end);
+ l2_clean_pa_range(start, range_end - CACHE_LINE_SIZE);
+ start = range_end;
+ }
+ }
+
+ dsb();
+}
+
+static void feroceon_l2_flush_range(unsigned long start, unsigned long end)
+{
+ start &= ~(CACHE_LINE_SIZE - 1);
+ end = (end + CACHE_LINE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1);
+ while (start != end) {
+ unsigned long range_end = calc_range_end(start, end);
+ if (!l2_wt_override)
+ l2_clean_pa_range(start, range_end - CACHE_LINE_SIZE);
+ l2_inv_pa_range(start, range_end - CACHE_LINE_SIZE);
+ start = range_end;
+ }
+
+ dsb();
+}
+
+
+/*
+ * Routines to disable and re-enable the D-cache and I-cache at run
+ * time. These are necessary because the L2 cache can only be enabled
+ * or disabled while the L1 Dcache and Icache are both disabled.
+ */
+static void __init invalidate_and_disable_dcache(void)
+{
+ u32 cr;
+
+ cr = get_cr();
+ if (cr & CR_C) {
+ unsigned long flags;
+
+ raw_local_irq_save(flags);
+ flush_cache_all();
+ set_cr(cr & ~CR_C);
+ raw_local_irq_restore(flags);
+ }
+}
+
+static void __init enable_dcache(void)
+{
+ u32 cr;
+
+ cr = get_cr();
+ if (!(cr & CR_C))
+ set_cr(cr | CR_C);
+}
+
+static void __init __invalidate_icache(void)
+{
+ int dummy;
+
+ __asm__ __volatile__("mcr p15, 0, %0, c7, c5, 0\n" : "=r" (dummy));
+}
+
+static void __init invalidate_and_disable_icache(void)
+{
+ u32 cr;
+
+ cr = get_cr();
+ if (cr & CR_I) {
+ set_cr(cr & ~CR_I);
+ __invalidate_icache();
+ }
+}
+
+static void __init enable_icache(void)
+{
+ u32 cr;
+
+ cr = get_cr();
+ if (!(cr & CR_I))
+ set_cr(cr | CR_I);
+}
+
+static inline u32 read_extra_features(void)
+{
+ u32 u;
+
+ __asm__("mrc p15, 1, %0, c15, c1, 0" : "=r" (u));
+
+ return u;
+}
+
+static inline void write_extra_features(u32 u)
+{
+ __asm__("mcr p15, 1, %0, c15, c1, 0" : : "r" (u));
+}
+
+static void __init disable_l2_prefetch(void)
+{
+ u32 u;
+
+ /*
+ * Read the CPU Extra Features register and verify that the
+ * Disable L2 Prefetch bit is set.
+ */
+ u = read_extra_features();
+ if (!(u & 0x01000000)) {
+ printk(KERN_INFO "Feroceon L2: Disabling L2 prefetch.\n");
+ write_extra_features(u | 0x01000000);
+ }
+}
+
+static void __init enable_l2(void)
+{
+ u32 u;
+
+ u = read_extra_features();
+ if (!(u & 0x00400000)) {
+ printk(KERN_INFO "Feroceon L2: Enabling L2\n");
+
+ invalidate_and_disable_dcache();
+ invalidate_and_disable_icache();
+ write_extra_features(u | 0x00400000);
+ enable_icache();
+ enable_dcache();
+ }
+}
+
+void __init feroceon_l2_init(int __l2_wt_override)
+{
+ l2_wt_override = __l2_wt_override;
+
+ disable_l2_prefetch();
+
+ outer_cache.inv_range = feroceon_l2_inv_range;
+ outer_cache.clean_range = feroceon_l2_clean_range;
+ outer_cache.flush_range = feroceon_l2_flush_range;
+
+ enable_l2();
+
+ printk(KERN_INFO "Feroceon L2: Cache support initialised%s.\n",
+ l2_wt_override ? ", in WT override mode" : "");
+}