From 94e5a85b3be0ce109d26aa6812b2a02c518a0e4b Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 18 Jan 2012 15:32:49 +0000 Subject: ARM: earlier initialization of vectors page Initialize the contents of the vectors page immediately after we allocate the page, but before we map it. This avoids any possible aliases with other mappings which may need to be flushed after the page has been mapped irrespective of the cache type. We follow this later with a flush_cache_all() after all static memory mappings have been initialized, which ensures that this is safe from any cache effects. Tested-by: Catalin Marinas Reviewed-by: Catalin Marinas Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/include/asm/traps.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/traps.h b/arch/arm/include/asm/traps.h index 5b29a667362..f555bb3664d 100644 --- a/arch/arm/include/asm/traps.h +++ b/arch/arm/include/asm/traps.h @@ -46,7 +46,7 @@ static inline int in_exception_text(unsigned long ptr) return in ? : __in_irqentry_text(ptr); } -extern void __init early_trap_init(void); +extern void __init early_trap_init(void *); extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame); extern void ptrace_break(struct task_struct *tsk, struct pt_regs *regs); -- cgit v1.2.3 From 195864cf3d6f5b6b743793bda3aaa2ff65d322ae Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 19 Jan 2012 10:05:41 +0000 Subject: ARM: move CP15 definitions to separate header file Avoid namespace conflicts with drivers over the CP15 definitions by moving CP15 related prototypes and definitions to a private header file. Acked-by: Stephen Warren Tested-by: Stephen Warren [Tegra] Acked-by: H Hartley Sweeten Tested-by: H Hartley Sweeten [EP93xx] Acked-by: Nicolas Pitre Acked-by: Kukjin Kim Signed-off-by: Russell King --- arch/arm/include/asm/cp15.h | 87 +++++++++++++++++++++++++++++++++++++++++++ arch/arm/include/asm/system.h | 77 -------------------------------------- 2 files changed, 87 insertions(+), 77 deletions(-) create mode 100644 arch/arm/include/asm/cp15.h (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h new file mode 100644 index 00000000000..3dabd8dd404 --- /dev/null +++ b/arch/arm/include/asm/cp15.h @@ -0,0 +1,87 @@ +#ifndef __ASM_ARM_CP15_H +#define __ASM_ARM_CP15_H + +#include + +/* + * CR1 bits (CP#15 CR1) + */ +#define CR_M (1 << 0) /* MMU enable */ +#define CR_A (1 << 1) /* Alignment abort enable */ +#define CR_C (1 << 2) /* Dcache enable */ +#define CR_W (1 << 3) /* Write buffer enable */ +#define CR_P (1 << 4) /* 32-bit exception handler */ +#define CR_D (1 << 5) /* 32-bit data address range */ +#define CR_L (1 << 6) /* Implementation defined */ +#define CR_B (1 << 7) /* Big endian */ +#define CR_S (1 << 8) /* System MMU protection */ +#define CR_R (1 << 9) /* ROM MMU protection */ +#define CR_F (1 << 10) /* Implementation defined */ +#define CR_Z (1 << 11) /* Implementation defined */ +#define CR_I (1 << 12) /* Icache enable */ +#define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */ +#define CR_RR (1 << 14) /* Round Robin cache replacement */ +#define CR_L4 (1 << 15) /* LDR pc can set T bit */ +#define CR_DT (1 << 16) +#define CR_IT (1 << 18) +#define CR_ST (1 << 19) +#define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */ +#define CR_U (1 << 22) /* Unaligned access operation */ +#define CR_XP (1 << 23) /* Extended page tables */ +#define CR_VE (1 << 24) /* Vectored interrupts */ +#define CR_EE (1 << 25) /* Exception (Big) Endian */ +#define CR_TRE (1 << 28) /* TEX remap enable */ +#define CR_AFE (1 << 29) /* Access flag enable */ +#define CR_TE (1 << 30) /* Thumb exception enable */ + +#ifndef __ASSEMBLY__ + +#if __LINUX_ARM_ARCH__ >= 4 +#define vectors_high() (cr_alignment & CR_V) +#else +#define vectors_high() (0) +#endif + +extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ +extern unsigned long cr_alignment; /* defined in entry-armv.S */ + +static inline unsigned int get_cr(void) +{ + unsigned int val; + asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc"); + return val; +} + +static inline void set_cr(unsigned int val) +{ + asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR" + : : "r" (val) : "cc"); + isb(); +} + +#ifndef CONFIG_SMP +extern void adjust_cr(unsigned long mask, unsigned long set); +#endif + +#define CPACC_FULL(n) (3 << (n * 2)) +#define CPACC_SVC(n) (1 << (n * 2)) +#define CPACC_DISABLE(n) (0 << (n * 2)) + +static inline unsigned int get_copro_access(void) +{ + unsigned int val; + asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access" + : "=r" (val) : : "cc"); + return val; +} + +static inline void set_copro_access(unsigned int val) +{ + asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access" + : : "r" (val) : "cc"); + isb(); +} + +#endif + +#endif diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index e4c96cc6ec0..774c41e8add 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -14,37 +14,6 @@ #define CPU_ARCH_ARMv6 8 #define CPU_ARCH_ARMv7 9 -/* - * CR1 bits (CP#15 CR1) - */ -#define CR_M (1 << 0) /* MMU enable */ -#define CR_A (1 << 1) /* Alignment abort enable */ -#define CR_C (1 << 2) /* Dcache enable */ -#define CR_W (1 << 3) /* Write buffer enable */ -#define CR_P (1 << 4) /* 32-bit exception handler */ -#define CR_D (1 << 5) /* 32-bit data address range */ -#define CR_L (1 << 6) /* Implementation defined */ -#define CR_B (1 << 7) /* Big endian */ -#define CR_S (1 << 8) /* System MMU protection */ -#define CR_R (1 << 9) /* ROM MMU protection */ -#define CR_F (1 << 10) /* Implementation defined */ -#define CR_Z (1 << 11) /* Implementation defined */ -#define CR_I (1 << 12) /* Icache enable */ -#define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */ -#define CR_RR (1 << 14) /* Round Robin cache replacement */ -#define CR_L4 (1 << 15) /* LDR pc can set T bit */ -#define CR_DT (1 << 16) -#define CR_IT (1 << 18) -#define CR_ST (1 << 19) -#define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */ -#define CR_U (1 << 22) /* Unaligned access operation */ -#define CR_XP (1 << 23) /* Extended page tables */ -#define CR_VE (1 << 24) /* Vectored interrupts */ -#define CR_EE (1 << 25) /* Exception (Big) Endian */ -#define CR_TRE (1 << 28) /* TEX remap enable */ -#define CR_AFE (1 << 29) /* Access flag enable */ -#define CR_TE (1 << 30) /* Thumb exception enable */ - /* * This is used to ensure the compiler did actually allocate the register we * asked it for some inline assembly sequences. Apparently we can't trust @@ -119,12 +88,6 @@ extern void (*arm_pm_restart)(char str, const char *cmd); extern unsigned int user_debug; -#if __LINUX_ARM_ARCH__ >= 4 -#define vectors_high() (cr_alignment & CR_V) -#else -#define vectors_high() (0) -#endif - #if __LINUX_ARM_ARCH__ >= 7 || \ (__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K)) #define sev() __asm__ __volatile__ ("sev" : : : "memory") @@ -185,46 +148,6 @@ extern unsigned int user_debug; #define set_mb(var, value) do { var = value; smp_mb(); } while (0) #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); -extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ -extern unsigned long cr_alignment; /* defined in entry-armv.S */ - -static inline unsigned int get_cr(void) -{ - unsigned int val; - asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc"); - return val; -} - -static inline void set_cr(unsigned int val) -{ - asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR" - : : "r" (val) : "cc"); - isb(); -} - -#ifndef CONFIG_SMP -extern void adjust_cr(unsigned long mask, unsigned long set); -#endif - -#define CPACC_FULL(n) (3 << (n * 2)) -#define CPACC_SVC(n) (1 << (n * 2)) -#define CPACC_DISABLE(n) (0 << (n * 2)) - -static inline unsigned int get_copro_access(void) -{ - unsigned int val; - asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access" - : "=r" (val) : : "cc"); - return val; -} - -static inline void set_copro_access(unsigned int val) -{ - asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access" - : : "r" (val) : "cc"); - isb(); -} - /* * switch_mm() may do a full cache flush over the context switch, * so enable interrupts over the context switch to avoid high -- cgit v1.2.3 From f9d4861fc32b995b1616775614459b8f266c803c Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 20 Jan 2012 12:01:13 +0100 Subject: ARM: 7294/1: vectors: use gate_vma for vectors user mapping The current user mapping for the vectors page is inserted as a `horrible hack vma' into each task via arch_setup_additional_pages. This causes problems with the MM subsystem and vm_normal_page, as described here: https://lkml.org/lkml/2012/1/14/55 Following the suggestion from Hugh in the above thread, this patch uses the gate_vma for the vectors user mapping, therefore consolidating the horrible hack VMAs into one. Acked-and-Tested-by: Nicolas Pitre Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/elf.h | 4 ---- arch/arm/include/asm/mmu_context.h | 29 +---------------------------- arch/arm/include/asm/page.h | 2 ++ 3 files changed, 3 insertions(+), 32 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index 0e9ce8d9686..38050b1c480 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h @@ -130,8 +130,4 @@ struct mm_struct; extern unsigned long arch_randomize_brk(struct mm_struct *mm); #define arch_randomize_brk arch_randomize_brk -extern int vectors_user_mapping(void); -#define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping() -#define ARCH_HAS_SETUP_ADDITIONAL_PAGES - #endif diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index 71605d9f8e4..a0b3cac0547 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -18,6 +18,7 @@ #include #include #include +#include void __check_kvm_seq(struct mm_struct *mm); @@ -133,32 +134,4 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, #define deactivate_mm(tsk,mm) do { } while (0) #define activate_mm(prev,next) switch_mm(prev, next, NULL) -/* - * We are inserting a "fake" vma for the user-accessible vector page so - * gdb and friends can get to it through ptrace and /proc//mem. - * But we also want to remove it before the generic code gets to see it - * during process exit or the unmapping of it would cause total havoc. - * (the macro is used as remove_vma() is static to mm/mmap.c) - */ -#define arch_exit_mmap(mm) \ -do { \ - struct vm_area_struct *high_vma = find_vma(mm, 0xffff0000); \ - if (high_vma) { \ - BUG_ON(high_vma->vm_next); /* it should be last */ \ - if (high_vma->vm_prev) \ - high_vma->vm_prev->vm_next = NULL; \ - else \ - mm->mmap = NULL; \ - rb_erase(&high_vma->vm_rb, &mm->mm_rb); \ - mm->mmap_cache = NULL; \ - mm->map_count--; \ - remove_vma(high_vma); \ - } \ -} while (0) - -static inline void arch_dup_mmap(struct mm_struct *oldmm, - struct mm_struct *mm) -{ -} - #endif diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index 97b440c25c5..5838361c48b 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h @@ -151,6 +151,8 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from, #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) extern void copy_page(void *to, const void *from); +#define __HAVE_ARCH_GATE_AREA 1 + #ifdef CONFIG_ARM_LPAE #include #else -- cgit v1.2.3 From f5f5195487affe53f90d99cba4aee73a20cae565 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Wed, 1 Feb 2012 10:42:22 +0100 Subject: ARM: 7311/1: Add generic instruction opcode manipulation helpers This patch adds some endianness-agnostic helpers to convert machine instructions between canonical integer form and in-memory representation. A canonical integer form for representing instructions is also formalised here. Signed-off-by: Dave Martin Acked-by: Nicolas Pitre Tested-by: Jon Medhurst Signed-off-by: Russell King --- arch/arm/include/asm/opcodes.h | 59 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/opcodes.h b/arch/arm/include/asm/opcodes.h index c0efdd60966..19c48deda70 100644 --- a/arch/arm/include/asm/opcodes.h +++ b/arch/arm/include/asm/opcodes.h @@ -17,4 +17,63 @@ extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr); #define ARM_OPCODE_CONDTEST_PASS 1 #define ARM_OPCODE_CONDTEST_UNCOND 2 + +/* + * Opcode byteswap helpers + * + * These macros help with converting instructions between a canonical integer + * format and in-memory representation, in an endianness-agnostic manner. + * + * __mem_to_opcode_*() convert from in-memory representation to canonical form. + * __opcode_to_mem_*() convert from canonical form to in-memory representation. + * + * + * Canonical instruction representation: + * + * ARM: 0xKKLLMMNN + * Thumb 16-bit: 0x0000KKLL, where KK < 0xE8 + * Thumb 32-bit: 0xKKLLMMNN, where KK >= 0xE8 + * + * There is no way to distinguish an ARM instruction in canonical representation + * from a Thumb instruction (just as these cannot be distinguished in memory). + * Where this distinction is important, it needs to be tracked separately. + * + * Note that values in the range 0x0000E800..0xE7FFFFFF intentionally do not + * represent any valid Thumb-2 instruction. For this range, + * __opcode_is_thumb32() and __opcode_is_thumb16() will both be false. + */ + +#ifndef __ASSEMBLY__ + +#include +#include + +#ifdef CONFIG_CPU_ENDIAN_BE8 +#define __opcode_to_mem_arm(x) swab32(x) +#define __opcode_to_mem_thumb16(x) swab16(x) +#define __opcode_to_mem_thumb32(x) swahb32(x) +#else +#define __opcode_to_mem_arm(x) ((u32)(x)) +#define __opcode_to_mem_thumb16(x) ((u16)(x)) +#define __opcode_to_mem_thumb32(x) swahw32(x) +#endif + +#define __mem_to_opcode_arm(x) __opcode_to_mem_arm(x) +#define __mem_to_opcode_thumb16(x) __opcode_to_mem_thumb16(x) +#define __mem_to_opcode_thumb32(x) __opcode_to_mem_thumb32(x) + +/* Operations specific to Thumb opcodes */ + +/* Instruction size checks: */ +#define __opcode_is_thumb32(x) ((u32)(x) >= 0xE8000000UL) +#define __opcode_is_thumb16(x) ((u32)(x) < 0xE800UL) + +/* Operations to construct or split 32-bit Thumb instructions: */ +#define __opcode_thumb32_first(x) ((u16)((x) >> 16)) +#define __opcode_thumb32_second(x) ((u16)(x)) +#define __opcode_thumb32_compose(first, second) \ + (((u32)(u16)(first) << 16) | (u32)(u16)(second)) + +#endif /* __ASSEMBLY__ */ + #endif /* __ASM_ARM_OPCODES_H */ -- cgit v1.2.3 From 87067a935a174cf5e0b336d338a0ab535ffe199d Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 4 Feb 2012 10:55:38 +0000 Subject: ARM: Optimize multi-CPU tlb flushing a little more The compiler does not conditionalize the assembly instructions for the tlb operations, which leads to sub-optimal code being generated when building a kernel for multiple CPUs. We can tweak things fairly simply as the code fragment below shows: 17f8: e3120001 tst r2, #1 ; 0x1 ... 1800: 0a000000 beq 1808 1804: ee061f10 mcr 15, 0, r1, cr6, cr0, {0} 1808: e3120004 tst r2, #4 ; 0x4 180c: 0a000000 beq 1814 1810: ee081f36 mcr 15, 0, r1, cr8, cr6, {1} becomes: 17f0: e3120001 tst r2, #1 ; 0x1 17f4: 1e063f10 mcrne 15, 0, r3, cr6, cr0, {0} 17f8: e3120004 tst r2, #4 ; 0x4 17fc: 1e083f36 mcrne 15, 0, r3, cr8, cr6, {1} Overall, for Realview with V6 and V7 CPUs configured: text data bss dec hex filename 4153998 207340 5371036 9732374 948116 ../build/realview/vmlinux.before 4153366 207332 5371036 9731734 947e96 ../build/realview/vmlinux.after Signed-off-by: Russell King --- arch/arm/include/asm/tlbflush.h | 136 +++++++++++++++++----------------------- 1 file changed, 58 insertions(+), 78 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index 02b2f820398..85fe61e7320 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -318,6 +318,21 @@ extern struct cpu_tlb_fns cpu_tlb; #define tlb_flag(f) ((always_tlb_flags & (f)) || (__tlb_flag & possible_tlb_flags & (f))) +#define __tlb_op(f, insnarg, arg) \ + do { \ + if (always_tlb_flags & (f)) \ + asm("mcr " insnarg \ + : : "r" (arg) : "cc"); \ + else if (possible_tlb_flags & (f)) \ + asm("tst %1, %2\n\t" \ + "mcrne " insnarg \ + : : "r" (arg), "r" (__tlb_flag), "Ir" (f) \ + : "cc"); \ + } while (0) + +#define tlb_op(f, regs, arg) __tlb_op(f, "p15, 0, %0, " regs, arg) +#define tlb_l2_op(f, regs, arg) __tlb_op(f, "p15, 1, %0, " regs, arg) + static inline void local_flush_tlb_all(void) { const int zero = 0; @@ -326,16 +341,11 @@ static inline void local_flush_tlb_all(void) if (tlb_flag(TLB_WB)) dsb(); - if (tlb_flag(TLB_V3_FULL)) - asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc"); - if (tlb_flag(TLB_V4_U_FULL | TLB_V6_U_FULL)) - asm("mcr p15, 0, %0, c8, c7, 0" : : "r" (zero) : "cc"); - if (tlb_flag(TLB_V4_D_FULL | TLB_V6_D_FULL)) - asm("mcr p15, 0, %0, c8, c6, 0" : : "r" (zero) : "cc"); - if (tlb_flag(TLB_V4_I_FULL | TLB_V6_I_FULL)) - asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); - if (tlb_flag(TLB_V7_UIS_FULL)) - asm("mcr p15, 0, %0, c8, c3, 0" : : "r" (zero) : "cc"); + tlb_op(TLB_V3_FULL, "c6, c0, 0", zero); + tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero); + tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero); + tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero); + tlb_op(TLB_V7_UIS_FULL, "c8, c3, 0", zero); if (tlb_flag(TLB_BARRIER)) { dsb(); @@ -352,29 +362,23 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm) if (tlb_flag(TLB_WB)) dsb(); - if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) { - if (tlb_flag(TLB_V3_FULL)) - asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc"); - if (tlb_flag(TLB_V4_U_FULL)) - asm("mcr p15, 0, %0, c8, c7, 0" : : "r" (zero) : "cc"); - if (tlb_flag(TLB_V4_D_FULL)) - asm("mcr p15, 0, %0, c8, c6, 0" : : "r" (zero) : "cc"); - if (tlb_flag(TLB_V4_I_FULL)) - asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); + if (possible_tlb_flags & (TLB_V3_FULL|TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) { + if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) { + tlb_op(TLB_V3_FULL, "c6, c0, 0", zero); + tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero); + tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero); + tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero); + } + put_cpu(); } - put_cpu(); - - if (tlb_flag(TLB_V6_U_ASID)) - asm("mcr p15, 0, %0, c8, c7, 2" : : "r" (asid) : "cc"); - if (tlb_flag(TLB_V6_D_ASID)) - asm("mcr p15, 0, %0, c8, c6, 2" : : "r" (asid) : "cc"); - if (tlb_flag(TLB_V6_I_ASID)) - asm("mcr p15, 0, %0, c8, c5, 2" : : "r" (asid) : "cc"); - if (tlb_flag(TLB_V7_UIS_ASID)) + + tlb_op(TLB_V6_U_ASID, "c8, c7, 2", asid); + tlb_op(TLB_V6_D_ASID, "c8, c6, 2", asid); + tlb_op(TLB_V6_I_ASID, "c8, c5, 2", asid); #ifdef CONFIG_ARM_ERRATA_720789 - asm("mcr p15, 0, %0, c8, c3, 0" : : "r" (zero) : "cc"); + tlb_op(TLB_V7_UIS_ASID, "c8, c3, 0", zero); #else - asm("mcr p15, 0, %0, c8, c3, 2" : : "r" (asid) : "cc"); + tlb_op(TLB_V7_UIS_ASID, "c8, c3, 2", asid); #endif if (tlb_flag(TLB_BARRIER)) @@ -392,30 +396,23 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) if (tlb_flag(TLB_WB)) dsb(); - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { - if (tlb_flag(TLB_V3_PAGE)) - asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (uaddr) : "cc"); - if (tlb_flag(TLB_V4_U_PAGE)) - asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (uaddr) : "cc"); - if (tlb_flag(TLB_V4_D_PAGE)) - asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (uaddr) : "cc"); - if (tlb_flag(TLB_V4_I_PAGE)) - asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc"); + if (possible_tlb_flags & (TLB_V3_PAGE|TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) && + cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { + tlb_op(TLB_V3_PAGE, "c6, c0, 0", uaddr); + tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr); + tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", uaddr); + tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", uaddr); if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL)) asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); } - if (tlb_flag(TLB_V6_U_PAGE)) - asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (uaddr) : "cc"); - if (tlb_flag(TLB_V6_D_PAGE)) - asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (uaddr) : "cc"); - if (tlb_flag(TLB_V6_I_PAGE)) - asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc"); - if (tlb_flag(TLB_V7_UIS_PAGE)) + tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", uaddr); + tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", uaddr); + tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", uaddr); #ifdef CONFIG_ARM_ERRATA_720789 - asm("mcr p15, 0, %0, c8, c3, 3" : : "r" (uaddr & PAGE_MASK) : "cc"); + tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 3", uaddr & PAGE_MASK); #else - asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (uaddr) : "cc"); + tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", uaddr); #endif if (tlb_flag(TLB_BARRIER)) @@ -432,25 +429,17 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr) if (tlb_flag(TLB_WB)) dsb(); - if (tlb_flag(TLB_V3_PAGE)) - asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (kaddr) : "cc"); - if (tlb_flag(TLB_V4_U_PAGE)) - asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (kaddr) : "cc"); - if (tlb_flag(TLB_V4_D_PAGE)) - asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (kaddr) : "cc"); - if (tlb_flag(TLB_V4_I_PAGE)) - asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (kaddr) : "cc"); + tlb_op(TLB_V3_PAGE, "c6, c0, 0", kaddr); + tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr); + tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr); + tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr); if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL)) asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); - if (tlb_flag(TLB_V6_U_PAGE)) - asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (kaddr) : "cc"); - if (tlb_flag(TLB_V6_D_PAGE)) - asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (kaddr) : "cc"); - if (tlb_flag(TLB_V6_I_PAGE)) - asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (kaddr) : "cc"); - if (tlb_flag(TLB_V7_UIS_PAGE)) - asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (kaddr) : "cc"); + tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", kaddr); + tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", kaddr); + tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", kaddr); + tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", kaddr); if (tlb_flag(TLB_BARRIER)) { dsb(); @@ -475,13 +464,8 @@ static inline void flush_pmd_entry(void *pmd) { const unsigned int __tlb_flag = __cpu_tlb_flags; - if (tlb_flag(TLB_DCLEAN)) - asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pmd" - : : "r" (pmd) : "cc"); - - if (tlb_flag(TLB_L2CLEAN_FR)) - asm("mcr p15, 1, %0, c15, c9, 1 @ L2 flush_pmd" - : : "r" (pmd) : "cc"); + tlb_op(TLB_DCLEAN, "c7, c10, 1 @ flush_pmd", pmd); + tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1 @ L2 flush_pmd", pmd); if (tlb_flag(TLB_WB)) dsb(); @@ -491,15 +475,11 @@ static inline void clean_pmd_entry(void *pmd) { const unsigned int __tlb_flag = __cpu_tlb_flags; - if (tlb_flag(TLB_DCLEAN)) - asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pmd" - : : "r" (pmd) : "cc"); - - if (tlb_flag(TLB_L2CLEAN_FR)) - asm("mcr p15, 1, %0, c15, c9, 1 @ L2 flush_pmd" - : : "r" (pmd) : "cc"); + tlb_op(TLB_DCLEAN, "c7, c10, 1 @ flush_pmd", pmd); + tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1 @ L2 flush_pmd", pmd); } +#undef tlb_op #undef tlb_flag #undef always_tlb_flags #undef possible_tlb_flags -- cgit v1.2.3 From d33c88c659d708e7c5d518a05ef9349a36217bb2 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 3 Feb 2012 14:46:01 +0100 Subject: ARM: 7315/1: perf: add support for the Cortex-A7 PMU Cortex-A7 implements an ARMv7-compatible PMU compliant with the PMUv2 architecture specification. This patch adds support for the PMU to the ARM perf backend. Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/perf_event.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h index 99cfe360798..ee7c056be3f 100644 --- a/arch/arm/include/asm/perf_event.h +++ b/arch/arm/include/asm/perf_event.h @@ -26,6 +26,7 @@ enum arm_perf_pmu_ids { ARM_PERF_PMU_ID_CA9, ARM_PERF_PMU_ID_CA5, ARM_PERF_PMU_ID_CA15, + ARM_PERF_PMU_ID_CA7, ARM_NUM_PMU_IDS, }; -- cgit v1.2.3 From 5936d1161ff503a1170722fe9b2d9ac4da700eef Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 9 Feb 2012 16:16:28 +0000 Subject: ARM: get rid of asm/irq.h in asm/prom.h Avoid potential build problems caused by lacking mach/irqs.h includes on non-OF builds caused by an errant include in asm/prom.h. asm/prom.h requires nothing from asm/irq.h, as Grant says: On Mon, Feb 06, 2012 at 05:56:23AM +0000, Grant Likely wrote: > On Sat, Feb 04, 2012 at 10:17:48PM +0000, Russell King wrote: > > Finally, do we need asm/irq.h in our asm/prom.h ? That's causing > > fragility between DT and non-DT builds, because people are finding > > that their DT builds work without their mach/irqs.h includes but > > fail when built with non-DT. The only thing which DT might need - > > at the most - is NR_IRQS, but I'd hope with things like irq domains > > it doesn't actually require it. > > I don't think so. There may be a file or two that break because they're > not including everything they need, but I don't think anything in the > header requires it. Signed-off-by: Russell King --- arch/arm/include/asm/prom.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/prom.h b/arch/arm/include/asm/prom.h index ee036330791..aeae9c609df 100644 --- a/arch/arm/include/asm/prom.h +++ b/arch/arm/include/asm/prom.h @@ -13,8 +13,6 @@ #ifdef CONFIG_OF -#include - extern struct machine_desc *setup_machine_fdt(unsigned int dt_phys); extern void arm_dt_memblock_reserve(void); -- cgit v1.2.3 From e88443cdfc61af5cd22d6aeb245ee94833c473bc Mon Sep 17 00:00:00 2001 From: Mathias Krause Date: Mon, 5 Mar 2012 15:05:34 -0800 Subject: ARM: exec: remove redundant set_fs(USER_DS) The address limit is already set in flush_old_exec() so this set_fs(USER_DS) is redundant. Signed-off-by: Mathias Krause Signed-off-by: Andrew Morton Signed-off-by: Russell King --- arch/arm/include/asm/processor.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h index ce280b8d613..d7038fa2234 100644 --- a/arch/arm/include/asm/processor.h +++ b/arch/arm/include/asm/processor.h @@ -55,7 +55,6 @@ struct thread_struct { #define start_thread(regs,pc,sp) \ ({ \ unsigned long *stack = (unsigned long *)sp; \ - set_fs(USER_DS); \ memset(regs->uregs, 0, sizeof(regs->uregs)); \ if (current->personality & ADDR_LIMIT_32BIT) \ regs->ARM_cpsr = USR_MODE; \ -- cgit v1.2.3 From 09f05d8529ff4aa92311c1a55ce35ac98cb59b8c Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Sat, 18 Feb 2012 17:52:41 +0100 Subject: ARM: 7334/1: add jump label support Add the arch-specific code to support jump labels for ARM and Thumb-2. This code will only be activated on compilers that are capable of building it. It has been tested with GCC 4.6 patched with the patch from GCC bug 48637. Cc: Jason Baron Signed-off-by: Rabin Vincent Signed-off-by: Russell King --- arch/arm/include/asm/jump_label.h | 41 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 arch/arm/include/asm/jump_label.h (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h new file mode 100644 index 00000000000..5c5ca2ea62b --- /dev/null +++ b/arch/arm/include/asm/jump_label.h @@ -0,0 +1,41 @@ +#ifndef _ASM_ARM_JUMP_LABEL_H +#define _ASM_ARM_JUMP_LABEL_H + +#ifdef __KERNEL__ + +#include +#include + +#define JUMP_LABEL_NOP_SIZE 4 + +#ifdef CONFIG_THUMB2_KERNEL +#define JUMP_LABEL_NOP "nop.w" +#else +#define JUMP_LABEL_NOP "nop" +#endif + +static __always_inline bool arch_static_branch(struct jump_label_key *key) +{ + asm goto("1:\n\t" + JUMP_LABEL_NOP "\n\t" + ".pushsection __jump_table, \"aw\"\n\t" + ".word 1b, %l[l_yes], %c0\n\t" + ".popsection\n\t" + : : "i" (key) : : l_yes); + + return false; +l_yes: + return true; +} + +#endif /* __KERNEL__ */ + +typedef u32 jump_label_t; + +struct jump_entry { + jump_label_t code; + jump_label_t target; + jump_label_t key; +}; + +#endif -- cgit v1.2.3 From 38b4205abeaed7eaae0a2a9e9af8e1252fa5c86a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Wed, 14 Mar 2012 10:30:52 +0100 Subject: ARM: 7361/1: provide XIP_VIRT_ADDR for no-MMU builds MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit XIP_VIRT_ADDR is needed for XIP builds and currently only defined for builds with CONFIG_MMU. Also provide it for no-MMU builds to make it possible to build an XIP kernel for MMU-less machines. As these lack an MMU it has to be an identity mapping. Acked-by: Nicolas Pitre Signed-off-by: Uwe Kleine-König Signed-off-by: Russell King --- arch/arm/include/asm/memory.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index a8997d71084..fcb575747e5 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -116,6 +116,8 @@ #define MODULES_END (END_MEM) #define MODULES_VADDR (PHYS_OFFSET) +#define XIP_VIRT_ADDR(physaddr) (physaddr) + #endif /* !CONFIG_MMU */ /* -- cgit v1.2.3