diff options
author | Patrick McHardy <kaber@trash.net> | 2012-08-08 21:03:47 +0200 |
---|---|---|
committer | Patrick McHardy <kaber@trash.net> | 2012-08-08 21:03:47 +0200 |
commit | d53b4ed072d9779cdf53582c46436dec06d0961f (patch) | |
tree | ac95ecab33e31cd79aae69c475e8348adac51230 /arch/arm/include/asm | |
parent | 5d4dff7f1011a81a693a9c7b1f6a0b9c842eb60c (diff) | |
parent | 28a33cbc24e4256c143dce96c7d93bf423229f92 (diff) |
Merge tag 'v3.5' of 192.168.0.154:/repos/git/linux-2.6
Conflicts:
drivers/Kconfig
Signed-off-by: Patrick McHardy <kaber@trash.net>
Diffstat (limited to 'arch/arm/include/asm')
105 files changed, 2097 insertions, 2406 deletions
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h new file mode 100644 index 00000000000..ed2e95d46e2 --- /dev/null +++ b/arch/arm/include/asm/arch_timer.h @@ -0,0 +1,19 @@ +#ifndef __ASMARM_ARCH_TIMER_H +#define __ASMARM_ARCH_TIMER_H + +#ifdef CONFIG_ARM_ARCH_TIMER +int arch_timer_of_register(void); +int arch_timer_sched_clock_init(void); +#else +static inline int arch_timer_of_register(void) +{ + return -ENXIO; +} + +static inline int arch_timer_sched_clock_init(void) +{ + return -ENXIO; +} +#endif + +#endif diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 29035e86a59..03fb93621d0 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -23,6 +23,8 @@ #include <asm/ptrace.h> #include <asm/domain.h> +#define IOMEM(x) (x) + /* * Endian independent macros for shifting bytes within registers. */ @@ -137,6 +139,11 @@ disable_irq .endm + .macro save_and_disable_irqs_notrace, oldcpsr + mrs \oldcpsr, cpsr + disable_irq_notrace + .endm + /* * Restore interrupt state previously stored in a register. We don't * guarantee that this will preserve the flags. @@ -187,6 +194,17 @@ #endif /* + * Instruction barrier + */ + .macro instr_sync +#if __LINUX_ARM_ARCH__ >= 7 + isb +#elif __LINUX_ARM_ARCH__ == 6 + mcr p15, 0, r0, c7, c5, 4 +#endif + .endm + +/* * SMP data memory barrier */ .macro smp_dmb mode @@ -226,7 +244,7 @@ */ #ifdef CONFIG_THUMB2_KERNEL - .macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=T() + .macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER() 9999: .if \inc == 1 \instr\cond\()b\()\t\().w \reg, [\ptr, #\off] @@ -266,7 +284,7 @@ #else /* !CONFIG_THUMB2_KERNEL */ - .macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=T() + .macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER() .rept \rept 9999: .if \inc == 1 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index 86976d03438..c79f61faa3a 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -13,7 +13,9 @@ #include <linux/compiler.h> #include <linux/types.h> -#include <asm/system.h> +#include <linux/irqflags.h> +#include <asm/barrier.h> +#include <asm/cmpxchg.h> #define ATOMIC_INIT(i) { (i) } @@ -241,7 +243,7 @@ typedef struct { #define ATOMIC64_INIT(i) { (i) } -static inline u64 atomic64_read(atomic64_t *v) +static inline u64 atomic64_read(const atomic64_t *v) { u64 result; diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h new file mode 100644 index 00000000000..05112380dc5 --- /dev/null +++ b/arch/arm/include/asm/barrier.h @@ -0,0 +1,69 @@ +#ifndef __ASM_BARRIER_H +#define __ASM_BARRIER_H + +#ifndef __ASSEMBLY__ +#include <asm/outercache.h> + +#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); + +#if __LINUX_ARM_ARCH__ >= 7 || \ + (__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K)) +#define sev() __asm__ __volatile__ ("sev" : : : "memory") +#define wfe() __asm__ __volatile__ ("wfe" : : : "memory") +#define wfi() __asm__ __volatile__ ("wfi" : : : "memory") +#endif + +#if __LINUX_ARM_ARCH__ >= 7 +#define isb() __asm__ __volatile__ ("isb" : : : "memory") +#define dsb() __asm__ __volatile__ ("dsb" : : : "memory") +#define dmb() __asm__ __volatile__ ("dmb" : : : "memory") +#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6 +#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ + : : "r" (0) : "memory") +#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ + : : "r" (0) : "memory") +#define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ + : : "r" (0) : "memory") +#elif defined(CONFIG_CPU_FA526) +#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ + : : "r" (0) : "memory") +#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ + : : "r" (0) : "memory") +#define dmb() __asm__ __volatile__ ("" : : : "memory") +#else +#define isb() __asm__ __volatile__ ("" : : : "memory") +#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ + : : "r" (0) : "memory") +#define dmb() __asm__ __volatile__ ("" : : : "memory") +#endif + +#ifdef CONFIG_ARCH_HAS_BARRIERS +#include <mach/barriers.h> +#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) +#define mb() do { dsb(); outer_sync(); } while (0) +#define rmb() dsb() +#define wmb() mb() +#else +#include <asm/memory.h> +#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) +#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) +#define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) +#endif + +#ifndef CONFIG_SMP +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() +#else +#define smp_mb() dmb() +#define smp_rmb() dmb() +#define smp_wmb() dmb() +#endif + +#define read_barrier_depends() do { } while(0) +#define smp_read_barrier_depends() do { } while(0) + +#define set_mb(var, value) do { var = value; smp_mb(); } while (0) + +#endif /* !__ASSEMBLY__ */ +#endif /* __ASM_BARRIER_H */ diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h index f7419ef9c8f..e691ec91e4d 100644 --- a/arch/arm/include/asm/bitops.h +++ b/arch/arm/include/asm/bitops.h @@ -24,7 +24,7 @@ #endif #include <linux/compiler.h> -#include <asm/system.h> +#include <linux/irqflags.h> #define smp_mb__before_clear_bit() smp_mb() #define smp_mb__after_clear_bit() smp_mb() diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h index 9abe7a07d5a..7af5c6c3653 100644 --- a/arch/arm/include/asm/bug.h +++ b/arch/arm/include/asm/bug.h @@ -1,6 +1,7 @@ #ifndef _ASMARM_BUG_H #define _ASMARM_BUG_H +#include <linux/linkage.h> #ifdef CONFIG_BUG @@ -32,7 +33,6 @@ #define __BUG(__file, __line, __value) \ do { \ - BUILD_BUG_ON(sizeof(struct bug_entry) != 12); \ asm volatile("1:\t" BUG_INSTR_TYPE #__value "\n" \ ".pushsection .rodata.str, \"aMS\", %progbits, 1\n" \ "2:\t.asciz " #__file "\n" \ @@ -58,4 +58,33 @@ do { \ #include <asm-generic/bug.h> +struct pt_regs; +void die(const char *msg, struct pt_regs *regs, int err); + +struct siginfo; +void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, + unsigned long err, unsigned long trap); + +#ifdef CONFIG_ARM_LPAE +#define FAULT_CODE_ALIGNMENT 33 +#define FAULT_CODE_DEBUG 34 +#else +#define FAULT_CODE_ALIGNMENT 1 +#define FAULT_CODE_DEBUG 2 +#endif + +void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, + struct pt_regs *), + int sig, int code, const char *name); + +void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, + struct pt_regs *), + int sig, int code, const char *name); + +extern asmlinkage void c_backtrace(unsigned long fp, int pmode); + +struct mm_struct; +extern void show_pte(struct mm_struct *mm, unsigned long addr); +extern void __show_regs(struct pt_regs *); + #endif diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index d5d8d5c7268..004c1bc95d2 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -101,7 +101,7 @@ struct cpu_cache_fns { void (*flush_user_range)(unsigned long, unsigned long, unsigned int); void (*coherent_kern_range)(unsigned long, unsigned long); - void (*coherent_user_range)(unsigned long, unsigned long); + int (*coherent_user_range)(unsigned long, unsigned long); void (*flush_kern_dcache_area)(void *, size_t); void (*dma_map_area)(const void *, size_t, int); @@ -142,7 +142,7 @@ extern void __cpuc_flush_kern_all(void); extern void __cpuc_flush_user_all(void); extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int); extern void __cpuc_coherent_kern_range(unsigned long, unsigned long); -extern void __cpuc_coherent_user_range(unsigned long, unsigned long); +extern int __cpuc_coherent_user_range(unsigned long, unsigned long); extern void __cpuc_flush_dcache_area(void *, size_t); /* @@ -249,7 +249,7 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr * Harvard caches are synchronised for the user space address range. * This is used for the ARM private sys_cacheflush system call. */ -#define flush_cache_user_range(vma,start,end) \ +#define flush_cache_user_range(start,end) \ __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end)) /* diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h new file mode 100644 index 00000000000..7eb18c1d8d6 --- /dev/null +++ b/arch/arm/include/asm/cmpxchg.h @@ -0,0 +1,248 @@ +#ifndef __ASM_ARM_CMPXCHG_H +#define __ASM_ARM_CMPXCHG_H + +#include <linux/irqflags.h> +#include <asm/barrier.h> + +#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) +/* + * On the StrongARM, "swp" is terminally broken since it bypasses the + * cache totally. This means that the cache becomes inconsistent, and, + * since we use normal loads/stores as well, this is really bad. + * Typically, this causes oopsen in filp_close, but could have other, + * more disastrous effects. There are two work-arounds: + * 1. Disable interrupts and emulate the atomic swap + * 2. Clean the cache, perform atomic swap, flush the cache + * + * We choose (1) since its the "easiest" to achieve here and is not + * dependent on the processor type. + * + * NOTE that this solution won't work on an SMP system, so explcitly + * forbid it here. + */ +#define swp_is_buggy +#endif + +static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) +{ + extern void __bad_xchg(volatile void *, int); + unsigned long ret; +#ifdef swp_is_buggy + unsigned long flags; +#endif +#if __LINUX_ARM_ARCH__ >= 6 + unsigned int tmp; +#endif + + smp_mb(); + + switch (size) { +#if __LINUX_ARM_ARCH__ >= 6 + case 1: + asm volatile("@ __xchg1\n" + "1: ldrexb %0, [%3]\n" + " strexb %1, %2, [%3]\n" + " teq %1, #0\n" + " bne 1b" + : "=&r" (ret), "=&r" (tmp) + : "r" (x), "r" (ptr) + : "memory", "cc"); + break; + case 4: + asm volatile("@ __xchg4\n" + "1: ldrex %0, [%3]\n" + " strex %1, %2, [%3]\n" + " teq %1, #0\n" + " bne 1b" + : "=&r" (ret), "=&r" (tmp) + : "r" (x), "r" (ptr) + : "memory", "cc"); + break; +#elif defined(swp_is_buggy) +#ifdef CONFIG_SMP +#error SMP is not supported on this platform +#endif + case 1: + raw_local_irq_save(flags); + ret = *(volatile unsigned char *)ptr; + *(volatile unsigned char *)ptr = x; + raw_local_irq_restore(flags); + break; + + case 4: + raw_local_irq_save(flags); + ret = *(volatile unsigned long *)ptr; + *(volatile unsigned long *)ptr = x; + raw_local_irq_restore(flags); + break; +#else + case 1: + asm volatile("@ __xchg1\n" + " swpb %0, %1, [%2]" + : "=&r" (ret) + : "r" (x), "r" (ptr) + : "memory", "cc"); + break; + case 4: + asm volatile("@ __xchg4\n" + " swp %0, %1, [%2]" + : "=&r" (ret) + : "r" (x), "r" (ptr) + : "memory", "cc"); + break; +#endif + default: + __bad_xchg(ptr, size), ret = 0; + break; + } + smp_mb(); + + return ret; +} + +#define xchg(ptr,x) \ + ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) + +#include <asm-generic/cmpxchg-local.h> + +#if __LINUX_ARM_ARCH__ < 6 +/* min ARCH < ARMv6 */ + +#ifdef CONFIG_SMP +#error "SMP is not supported on this platform" +#endif + +/* + * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make + * them available. + */ +#define cmpxchg_local(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ + (unsigned long)(n), sizeof(*(ptr)))) +#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) + +#ifndef CONFIG_SMP +#include <asm-generic/cmpxchg.h> +#endif + +#else /* min ARCH >= ARMv6 */ + +extern void __bad_cmpxchg(volatile void *ptr, int size); + +/* + * cmpxchg only support 32-bits operands on ARMv6. + */ + +static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, + unsigned long new, int size) +{ + unsigned long oldval, res; + + switch (size) { +#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ + case 1: + do { + asm volatile("@ __cmpxchg1\n" + " ldrexb %1, [%2]\n" + " mov %0, #0\n" + " teq %1, %3\n" + " strexbeq %0, %4, [%2]\n" + : "=&r" (res), "=&r" (oldval) + : "r" (ptr), "Ir" (old), "r" (new) + : "memory", "cc"); + } while (res); + break; + case 2: + do { + asm volatile("@ __cmpxchg1\n" + " ldrexh %1, [%2]\n" + " mov %0, #0\n" + " teq %1, %3\n" + " strexheq %0, %4, [%2]\n" + : "=&r" (res), "=&r" (oldval) + : "r" (ptr), "Ir" (old), "r" (new) + : "memory", "cc"); + } while (res); + break; +#endif + case 4: + do { + asm volatile("@ __cmpxchg4\n" + " ldrex %1, [%2]\n" + " mov %0, #0\n" + " teq %1, %3\n" + " strexeq %0, %4, [%2]\n" + : "=&r" (res), "=&r" (oldval) + : "r" (ptr), "Ir" (old), "r" (new) + : "memory", "cc"); + } while (res); + break; + default: + __bad_cmpxchg(ptr, size); + oldval = 0; + } + + return oldval; +} + +static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, + unsigned long new, int size) +{ + unsigned long ret; + + smp_mb(); + ret = __cmpxchg(ptr, old, new, size); + smp_mb(); + + return ret; +} + +#define cmpxchg(ptr,o,n) \ + ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \ + (unsigned long)(o), \ + (unsigned long)(n), \ + sizeof(*(ptr)))) + +static inline unsigned long __cmpxchg_local(volatile void *ptr, + unsigned long old, + unsigned long new, int size) +{ + unsigned long ret; + + switch (size) { +#ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */ + case 1: + case 2: + ret = __cmpxchg_local_generic(ptr, old, new, size); + break; +#endif + default: + ret = __cmpxchg(ptr, old, new, size); + } + + return ret; +} + +#define cmpxchg_local(ptr,o,n) \ + ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \ + (unsigned long)(o), \ + (unsigned long)(n), \ + sizeof(*(ptr)))) + +#define cmpxchg64(ptr, o, n) \ + ((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr), \ + atomic64_t, \ + counter), \ + (unsigned long)(o), \ + (unsigned long)(n))) + +#define cmpxchg64_local(ptr, o, n) \ + ((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr), \ + local64_t, \ + a), \ + (unsigned long)(o), \ + (unsigned long)(n))) + +#endif /* __LINUX_ARM_ARCH__ >= 6 */ + +#endif /* __ASM_ARM_CMPXCHG_H */ diff --git a/arch/arm/include/asm/compiler.h b/arch/arm/include/asm/compiler.h new file mode 100644 index 00000000000..8155db2f7fa --- /dev/null +++ b/arch/arm/include/asm/compiler.h @@ -0,0 +1,15 @@ +#ifndef __ASM_ARM_COMPILER_H +#define __ASM_ARM_COMPILER_H + +/* + * This is used to ensure the compiler did actually allocate the register we + * asked it for some inline assembly sequences. Apparently we can't trust + * the compiler from one version to another so a bit of paranoia won't hurt. + * This string is meant to be concatenated with the inline asm string and + * will cause compilation to stop on mismatch. + * (for details, see gcc PR 15089) + */ +#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" + + +#endif /* __ASM_ARM_COMPILER_H */ diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h new file mode 100644 index 00000000000..5ef4d8015a6 --- /dev/null +++ b/arch/arm/include/asm/cp15.h @@ -0,0 +1,87 @@ +#ifndef __ASM_ARM_CP15_H +#define __ASM_ARM_CP15_H + +#include <asm/barrier.h> + +/* + * CR1 bits (CP#15 CR1) + */ +#define CR_M (1 << 0) /* MMU enable */ +#define CR_A (1 << 1) /* Alignment abort enable */ +#define CR_C (1 << 2) /* Dcache enable */ +#define CR_W (1 << 3) /* Write buffer enable */ +#define CR_P (1 << 4) /* 32-bit exception handler */ +#define CR_D (1 << 5) /* 32-bit data address range */ +#define CR_L (1 << 6) /* Implementation defined */ +#define CR_B (1 << 7) /* Big endian */ +#define CR_S (1 << 8) /* System MMU protection */ +#define CR_R (1 << 9) /* ROM MMU protection */ +#define CR_F (1 << 10) /* Implementation defined */ +#define CR_Z (1 << 11) /* Implementation defined */ +#define CR_I (1 << 12) /* Icache enable */ +#define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */ +#define CR_RR (1 << 14) /* Round Robin cache replacement */ +#define CR_L4 (1 << 15) /* LDR pc can set T bit */ +#define CR_DT (1 << 16) +#define CR_IT (1 << 18) +#define CR_ST (1 << 19) +#define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */ +#define CR_U (1 << 22) /* Unaligned access operation */ +#define CR_XP (1 << 23) /* Extended page tables */ +#define CR_VE (1 << 24) /* Vectored interrupts */ +#define CR_EE (1 << 25) /* Exception (Big) Endian */ +#define CR_TRE (1 << 28) /* TEX remap enable */ +#define CR_AFE (1 << 29) /* Access flag enable */ +#define CR_TE (1 << 30) /* Thumb exception enable */ + +#ifndef __ASSEMBLY__ + +#if __LINUX_ARM_ARCH__ >= 4 +#define vectors_high() (cr_alignment & CR_V) +#else +#define vectors_high() (0) +#endif + +extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ +extern unsigned long cr_alignment; /* defined in entry-armv.S */ + +static inline unsigned int get_cr(void) +{ + unsigned int val; + asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc"); + return val; +} + +static inline void set_cr(unsigned int val) +{ + asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR" + : : "r" (val) : "cc"); + isb(); +} + +#ifndef CONFIG_SMP +extern void adjust_cr(unsigned long mask, unsigned long set); +#endif + +#define CPACC_FULL(n) (3 << (n * 2)) +#define CPACC_SVC(n) (1 << (n * 2)) +#define CPACC_DISABLE(n) (0 << (n * 2)) + +static inline unsigned int get_copro_access(void) +{ + unsigned int val; + asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access" + : "=r" (val) : : "cc"); + return val; +} + +static inline void set_copro_access(unsigned int val) +{ + asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access" + : : "r" (val) : "cc"); + isb(); +} + +#endif + +#endif diff --git a/arch/arm/include/asm/cpu.h b/arch/arm/include/asm/cpu.h index 793968173be..d797223b39d 100644 --- a/arch/arm/include/asm/cpu.h +++ b/arch/arm/include/asm/cpu.h @@ -16,7 +16,6 @@ struct cpuinfo_arm { struct cpu cpu; #ifdef CONFIG_SMP - struct task_struct *idle; unsigned int loops_per_jiffy; #endif }; diff --git a/arch/arm/include/asm/cpuidle.h b/arch/arm/include/asm/cpuidle.h new file mode 100644 index 00000000000..2fca60ab513 --- /dev/null +++ b/arch/arm/include/asm/cpuidle.h @@ -0,0 +1,29 @@ +#ifndef __ASM_ARM_CPUIDLE_H +#define __ASM_ARM_CPUIDLE_H + +#ifdef CONFIG_CPU_IDLE +extern int arm_cpuidle_simple_enter(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index); +#else +static inline int arm_cpuidle_simple_enter(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) { return -ENODEV; } +#endif + +/* Common ARM WFI state */ +#define ARM_CPUIDLE_WFI_STATE_PWR(p) {\ + .enter = arm_cpuidle_simple_enter,\ + .exit_latency = 1,\ + .target_residency = 1,\ + .power_usage = p,\ + .flags = CPUIDLE_FLAG_TIME_VALID,\ + .name = "WFI",\ + .desc = "ARM WFI",\ +} + +/* + * in case power_specified == 1, give a default WFI power value needed + * by some governors + */ +#define ARM_CPUIDLE_WFI_STATE ARM_CPUIDLE_WFI_STATE_PWR(UINT_MAX) + +#endif diff --git a/arch/arm/include/asm/cti.h b/arch/arm/include/asm/cti.h new file mode 100644 index 00000000000..a0ada3ea435 --- /dev/null +++ b/arch/arm/include/asm/cti.h @@ -0,0 +1,179 @@ +#ifndef __ASMARM_CTI_H +#define __ASMARM_CTI_H + +#include <asm/io.h> + +/* The registers' definition is from section 3.2 of + * Embedded Cross Trigger Revision: r0p0 + */ +#define CTICONTROL 0x000 +#define CTISTATUS 0x004 +#define CTILOCK 0x008 +#define CTIPROTECTION 0x00C +#define CTIINTACK 0x010 +#define CTIAPPSET 0x014 +#define CTIAPPCLEAR 0x018 +#define CTIAPPPULSE 0x01c +#define CTIINEN 0x020 +#define CTIOUTEN 0x0A0 +#define CTITRIGINSTATUS 0x130 +#define CTITRIGOUTSTATUS 0x134 +#define CTICHINSTATUS 0x138 +#define CTICHOUTSTATUS 0x13c +#define CTIPERIPHID0 0xFE0 +#define CTIPERIPHID1 0xFE4 +#define CTIPERIPHID2 0xFE8 +#define CTIPERIPHID3 0xFEC +#define CTIPCELLID0 0xFF0 +#define CTIPCELLID1 0xFF4 +#define CTIPCELLID2 0xFF8 +#define CTIPCELLID3 0xFFC + +/* The below are from section 3.6.4 of + * CoreSight v1.0 Architecture Specification + */ +#define LOCKACCESS 0xFB0 +#define LOCKSTATUS 0xFB4 + +/* write this value to LOCKACCESS will unlock the module, and + * other value will lock the module + */ +#define LOCKCODE 0xC5ACCE55 + +/** + * struct cti - cross trigger interface struct + * @base: mapped virtual address for the cti base + * @irq: irq number for the cti + * @trig_out_for_irq: triger out number which will cause + * the @irq happen + * + * cti struct used to operate cti registers. + */ +struct cti { + void __iomem *base; + int irq; + int trig_out_for_irq; +}; + +/** + * cti_init - initialize the cti instance + * @cti: cti instance + * @base: mapped virtual address for the cti base + * @irq: irq number for the cti + * @trig_out: triger out number which will cause + * the @irq happen + * + * called by machine code to pass the board dependent + * @base, @irq and @trig_out to cti. + */ +static inline void cti_init(struct cti *cti, + void __iomem *base, int irq, int trig_out) +{ + cti->base = base; + cti->irq = irq; + cti->trig_out_for_irq = trig_out; +} + +/** + * cti_map_trigger - use the @chan to map @trig_in to @trig_out + * @cti: cti instance + * @trig_in: trigger in number + * @trig_out: trigger out number + * @channel: channel number + * + * This function maps one trigger in of @trig_in to one trigger + * out of @trig_out using the channel @chan. + */ +static inline void cti_map_trigger(struct cti *cti, + int trig_in, int trig_out, int chan) +{ + void __iomem *base = cti->base; + unsigned long val; + + val = __raw_readl(base + CTIINEN + trig_in * 4); + val |= BIT(chan); + __raw_writel(val, base + CTIINEN + trig_in * 4); + + val = __raw_readl(base + CTIOUTEN + trig_out * 4); + val |= BIT(chan); + __raw_writel(val, base + CTIOUTEN + trig_out * 4); +} + +/** + * cti_enable - enable the cti module + * @cti: cti instance + * + * enable the cti module + */ +static inline void cti_enable(struct cti *cti) +{ + __raw_writel(0x1, cti->base + CTICONTROL); +} + +/** + * cti_disable - disable the cti module + * @cti: cti instance + * + * enable the cti module + */ +static inline void cti_disable(struct cti *cti) +{ + __raw_writel(0, cti->base + CTICONTROL); +} + +/** + * cti_irq_ack - clear the cti irq + * @cti: cti instance + * + * clear the cti irq + */ +static inline void cti_irq_ack(struct cti *cti) +{ + void __iomem *base = cti->base; + unsigned long val; + + val = __raw_readl(base + CTIINTACK); + val |= BIT(cti->trig_out_for_irq); + __raw_writel(val, base + CTIINTACK); +} + +/** + * cti_unlock - unlock cti module + * @cti: cti instance + * + * unlock the cti module, or else any writes to the cti + * module is not allowed. + */ +static inline void cti_unlock(struct cti *cti) +{ + void __iomem *base = cti->base; + unsigned long val; + + val = __raw_readl(base + LOCKSTATUS); + + if (val & 1) { + val = LOCKCODE; + __raw_writel(val, base + LOCKACCESS); + } +} + +/** + * cti_lock - lock cti module + * @cti: cti instance + * + * lock the cti module, so any writes to the cti + * module will be not allowed. + */ +static inline void cti_lock(struct cti *cti) +{ + void __iomem *base = cti->base; + unsigned long val; + + val = __raw_readl(base + LOCKSTATUS); + + if (!(val & 1)) { + val = ~LOCKCODE; + __raw_writel(val, base + LOCKACCESS); + } +} +#endif diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h index 7aa368003b0..b69c0d3285f 100644 --- a/arch/arm/include/asm/device.h +++ b/arch/arm/include/asm/device.h @@ -7,12 +7,16 @@ #define ASMARM_DEVICE_H struct dev_archdata { + struct dma_map_ops *dma_ops; #ifdef CONFIG_DMABOUNCE struct dmabounce_device_info *dmabounce; #endif #ifdef CONFIG_IOMMU_API void *iommu; /* private IOMMU data */ #endif +#ifdef CONFIG_ARM_DMA_USE_IOMMU + struct dma_iommu_mapping *mapping; +#endif }; struct omap_device; diff --git a/arch/arm/include/asm/div64.h b/arch/arm/include/asm/div64.h index d3f0a9eee9f..fe92ccf1d0b 100644 --- a/arch/arm/include/asm/div64.h +++ b/arch/arm/include/asm/div64.h @@ -1,8 +1,8 @@ #ifndef __ASM_ARM_DIV64 #define __ASM_ARM_DIV64 -#include <asm/system.h> #include <linux/types.h> +#include <asm/compiler.h> /* * The semantics of do_div() are: diff --git a/arch/arm/include/asm/dma-contiguous.h b/arch/arm/include/asm/dma-contiguous.h new file mode 100644 index 00000000000..3ed37b4d93d --- /dev/null +++ b/arch/arm/include/asm/dma-contiguous.h @@ -0,0 +1,15 @@ +#ifndef ASMARM_DMA_CONTIGUOUS_H +#define ASMARM_DMA_CONTIGUOUS_H + +#ifdef __KERNEL__ +#ifdef CONFIG_CMA + +#include <linux/types.h> +#include <asm-generic/dma-contiguous.h> + +void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size); + +#endif +#endif + +#endif diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h new file mode 100644 index 00000000000..799b09409fa --- /dev/null +++ b/arch/arm/include/asm/dma-iommu.h @@ -0,0 +1,34 @@ +#ifndef ASMARM_DMA_IOMMU_H +#define ASMARM_DMA_IOMMU_H + +#ifdef __KERNEL__ + +#include <linux/mm_types.h> +#include <linux/scatterlist.h> +#include <linux/dma-debug.h> +#include <linux/kmemcheck.h> + +struct dma_iommu_mapping { + /* iommu specific data */ + struct iommu_domain *domain; + + void *bitmap; + size_t bits; + unsigned int order; + dma_addr_t base; + + spinlock_t lock; + struct kref kref; +}; + +struct dma_iommu_mapping * +arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size, + int order); + +void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping); + +int arm_iommu_attach_device(struct device *dev, + struct dma_iommu_mapping *mapping); + +#endif /* __KERNEL__ */ +#endif diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index cb3b7c981c4..bbef15d0489 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -5,11 +5,35 @@ #include <linux/mm_types.h> #include <linux/scatterlist.h> +#include <linux/dma-attrs.h> #include <linux/dma-debug.h> #include <asm-generic/dma-coherent.h> #include <asm/memory.h> +#define DMA_ERROR_CODE (~0) +extern struct dma_map_ops arm_dma_ops; + +static inline struct dma_map_ops *get_dma_ops(struct device *dev) +{ + if (dev && dev->archdata.dma_ops) + return dev->archdata.dma_ops; + return &arm_dma_ops; +} + +static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) +{ + BUG_ON(!dev); + dev->archdata.dma_ops = ops; +} + +#include <asm-generic/dma-mapping-common.h> + +static inline int dma_set_mask(struct device *dev, u64 mask) +{ + return get_dma_ops(dev)->set_dma_mask(dev, mask); +} + #ifdef __arch_page_to_dma #error Please update to __arch_pfn_to_dma #endif @@ -62,68 +86,11 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) #endif /* - * The DMA API is built upon the notion of "buffer ownership". A buffer - * is either exclusively owned by the CPU (and therefore may be accessed - * by it) or exclusively owned by the DMA device. These helper functions - * represent the transitions between these two ownership states. - * - * Note, however, that on later ARMs, this notion does not work due to - * speculative prefetches. We model our approach on the assumption that - * the CPU does do speculative prefetches, which means we clean caches - * before transfers and delay cache invalidation until transfer completion. - * - * Private support functions: these are not part of the API and are - * liable to change. Drivers must not use these. - */ -static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size, - enum dma_data_direction dir) -{ - extern void ___dma_single_cpu_to_dev(const void *, size_t, - enum dma_data_direction); - - if (!arch_is_coherent()) - ___dma_single_cpu_to_dev(kaddr, size, dir); -} - -static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size, - enum dma_data_direction dir) -{ - extern void ___dma_single_dev_to_cpu(const void *, size_t, - enum dma_data_direction); - - if (!arch_is_coherent()) - ___dma_single_dev_to_cpu(kaddr, size, dir); -} - -static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off, - size_t size, enum dma_data_direction dir) -{ - extern void ___dma_page_cpu_to_dev(struct page *, unsigned long, - size_t, enum dma_data_direction); - - if (!arch_is_coherent()) - ___dma_page_cpu_to_dev(page, off, size, dir); -} - -static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off, - size_t size, enum dma_data_direction dir) -{ - extern void ___dma_page_dev_to_cpu(struct page *, unsigned long, - size_t, enum dma_data_direction); - - if (!arch_is_coherent()) - ___dma_page_dev_to_cpu(page, off, size, dir); -} - -extern int dma_supported(struct device *, u64); -extern int dma_set_mask(struct device *, u64); - -/* * DMA errors are defined by all-bits-set in the DMA address. */ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { - return dma_addr == ~0; + return dma_addr == DMA_ERROR_CODE; } /* @@ -141,69 +108,118 @@ static inline void dma_free_noncoherent(struct device *dev, size_t size, { } +extern int dma_supported(struct device *dev, u64 mask); + /** - * dma_alloc_coherent - allocate consistent memory for DMA + * arm_dma_alloc - allocate consistent memory for DMA * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @size: required memory size * @handle: bus-specific DMA address + * @attrs: optinal attributes that specific mapping properties * - * Allocate some uncached, unbuffered memory for a device for - * performing DMA. This function allocates pages, and will - * return the CPU-viewed address, and sets @handle to be the - * device-viewed address. + * Allocate some memory for a device for performing DMA. This function + * allocates pages, and will return the CPU-viewed address, and sets @handle + * to be the device-viewed address. */ -extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t); +extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, + gfp_t gfp, struct dma_attrs *attrs); + +#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) + +static inline void *dma_alloc_attrs(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag, + struct dma_attrs *attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + void *cpu_addr; + BUG_ON(!ops); + + cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); + debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); + return cpu_addr; +} /** - * dma_free_coherent - free memory allocated by dma_alloc_coherent + * arm_dma_free - free memory allocated by arm_dma_alloc * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @size: size of memory originally requested in dma_alloc_coherent * @cpu_addr: CPU-view address returned from dma_alloc_coherent * @handle: device-view address returned from dma_alloc_coherent + * @attrs: optinal attributes that specific mapping properties * * Free (and unmap) a DMA buffer previously allocated by - * dma_alloc_coherent(). + * arm_dma_alloc(). * * References to memory and mappings associated with cpu_addr/handle * during and after this call executing are illegal. */ -extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t); +extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t handle, struct dma_attrs *attrs); + +#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL) + +static inline void dma_free_attrs(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle, + struct dma_attrs *attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + BUG_ON(!ops); + + debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); + ops->free(dev, size, cpu_addr, dma_handle, attrs); +} /** - * dma_mmap_coherent - map a coherent DMA allocation into user space + * arm_dma_mmap - map a coherent DMA allocation into user space * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @vma: vm_area_struct describing requested user mapping * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent * @handle: device-view address returned from dma_alloc_coherent * @size: size of memory originally requested in dma_alloc_coherent + * @attrs: optinal attributes that specific mapping properties * * Map a coherent DMA buffer previously allocated by dma_alloc_coherent * into user space. The coherent DMA buffer must not be freed by the * driver until the user space mapping has been released. */ -int dma_mmap_coherent(struct device *, struct vm_area_struct *, - void *, dma_addr_t, size_t); +extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + struct dma_attrs *attrs); +#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL) -/** - * dma_alloc_writecombine - allocate writecombining memory for DMA - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @size: required memory size - * @handle: bus-specific DMA address - * - * Allocate some uncached, buffered memory for a device for - * performing DMA. This function allocates pages, and will - * return the CPU-viewed address, and sets @handle to be the - * device-viewed address. - */ -extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *, - gfp_t); +static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, + size_t size, struct dma_attrs *attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + BUG_ON(!ops); + return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); +} + +static inline void *dma_alloc_writecombine(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + DEFINE_DMA_ATTRS(attrs); + dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); + return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs); +} -#define dma_free_writecombine(dev,size,cpu_addr,handle) \ - dma_free_coherent(dev,size,cpu_addr,handle) +static inline void dma_free_writecombine(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle) +{ + DEFINE_DMA_ATTRS(attrs); + dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); + return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs); +} -int dma_mmap_writecombine(struct device *, struct vm_area_struct *, - void *, dma_addr_t, size_t); +static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size) +{ + DEFINE_DMA_ATTRS(attrs); + dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); + return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs); +} /* * This can be called during boot to increase the size of the consistent @@ -212,8 +228,6 @@ int dma_mmap_writecombine(struct device *, struct vm_area_struct *, */ extern void __init init_consistent_dma_size(unsigned long size); - -#ifdef CONFIG_DMABOUNCE /* * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic" * and utilize bounce buffers as needed to work around limited DMA windows. @@ -253,222 +267,19 @@ extern int dmabounce_register_dev(struct device *, unsigned long, */ extern void dmabounce_unregister_dev(struct device *); -/* - * The DMA API, implemented by dmabounce.c. See below for descriptions. - */ -extern dma_addr_t __dma_map_page(struct device *, struct page *, - unsigned long, size_t, enum dma_data_direction); -extern void __dma_unmap_page(struct device *, dma_addr_t, size_t, - enum dma_data_direction); - -/* - * Private functions - */ -int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long, - size_t, enum dma_data_direction); -int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long, - size_t, enum dma_data_direction); -#else -static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr, - unsigned long offset, size_t size, enum dma_data_direction dir) -{ - return 1; -} -static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr, - unsigned long offset, size_t size, enum dma_data_direction dir) -{ - return 1; -} - - -static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction dir) -{ - __dma_page_cpu_to_dev(page, offset, size, dir); - return pfn_to_dma(dev, page_to_pfn(page)) + offset; -} - -static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle, - size_t size, enum dma_data_direction dir) -{ - __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), - handle & ~PAGE_MASK, size, dir); -} -#endif /* CONFIG_DMABOUNCE */ - -/** - * dma_map_single - map a single buffer for streaming DMA - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @cpu_addr: CPU direct mapped address of buffer - * @size: size of buffer to map - * @dir: DMA transfer direction - * - * Ensure that any data held in the cache is appropriately discarded - * or written back. - * - * The device owns this memory once this call has completed. The CPU - * can regain ownership by calling dma_unmap_single() or - * dma_sync_single_for_cpu(). - */ -static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, - size_t size, enum dma_data_direction dir) -{ - unsigned long offset; - struct page *page; - dma_addr_t addr; - - BUG_ON(!virt_addr_valid(cpu_addr)); - BUG_ON(!virt_addr_valid(cpu_addr + size - 1)); - BUG_ON(!valid_dma_direction(dir)); - - page = virt_to_page(cpu_addr); - offset = (unsigned long)cpu_addr & ~PAGE_MASK; - addr = __dma_map_page(dev, page, offset, size, dir); - debug_dma_map_page(dev, page, offset, size, dir, addr, true); - - return addr; -} - -/** - * dma_map_page - map a portion of a page for streaming DMA - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @page: page that buffer resides in - * @offset: offset into page for start of buffer - * @size: size of buffer to map - * @dir: DMA transfer direction - * - * Ensure that any data held in the cache is appropriately discarded - * or written back. - * - * The device owns this memory once this call has completed. The CPU - * can regain ownership by calling dma_unmap_page(). - */ -static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction dir) -{ - dma_addr_t addr; - - BUG_ON(!valid_dma_direction(dir)); - - addr = __dma_map_page(dev, page, offset, size, dir); - debug_dma_map_page(dev, page, offset, size, dir, addr, false); - - return addr; -} - -/** - * dma_unmap_single - unmap a single buffer previously mapped - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @handle: DMA address of buffer - * @size: size of buffer (same as passed to dma_map_single) - * @dir: DMA transfer direction (same as passed to dma_map_single) - * - * Unmap a single streaming mode DMA translation. The handle and size - * must match what was provided in the previous dma_map_single() call. - * All other usages are undefined. - * - * After this call, reads by the CPU to the buffer are guaranteed to see - * whatever the device wrote there. - */ -static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, - size_t size, enum dma_data_direction dir) -{ - debug_dma_unmap_page(dev, handle, size, dir, true); - __dma_unmap_page(dev, handle, size, dir); -} - -/** - * dma_unmap_page - unmap a buffer previously mapped through dma_map_page() - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @handle: DMA address of buffer - * @size: size of buffer (same as passed to dma_map_page) - * @dir: DMA transfer direction (same as passed to dma_map_page) - * - * Unmap a page streaming mode DMA translation. The handle and size - * must match what was provided in the previous dma_map_page() call. - * All other usages are undefined. - * - * After this call, reads by the CPU to the buffer are guaranteed to see - * whatever the device wrote there. - */ -static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, - size_t size, enum dma_data_direction dir) -{ - debug_dma_unmap_page(dev, handle, size, dir, false); - __dma_unmap_page(dev, handle, size, dir); -} - -/** - * dma_sync_single_range_for_cpu - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @handle: DMA address of buffer - * @offset: offset of region to start sync - * @size: size of region to sync - * @dir: DMA transfer direction (same as passed to dma_map_single) - * - * Make physical memory consistent for a single streaming mode DMA - * translation after a transfer. - * - * If you perform a dma_map_single() but wish to interrogate the - * buffer using the cpu, yet do not wish to teardown the PCI dma - * mapping, you must call this function before doing so. At the - * next point you give the PCI dma address back to the card, you - * must first the perform a dma_sync_for_device, and then the - * device again owns the buffer. - */ -static inline void dma_sync_single_range_for_cpu(struct device *dev, - dma_addr_t handle, unsigned long offset, size_t size, - enum dma_data_direction dir) -{ - BUG_ON(!valid_dma_direction(dir)); - - debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir); - - if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir)) - return; - - __dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir); -} - -static inline void dma_sync_single_range_for_device(struct device *dev, - dma_addr_t handle, unsigned long offset, size_t size, - enum dma_data_direction dir) -{ - BUG_ON(!valid_dma_direction(dir)); - - debug_dma_sync_single_for_device(dev, handle + offset, size, dir); - - if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) - return; - - __dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir); -} - -static inline void dma_sync_single_for_cpu(struct device *dev, - dma_addr_t handle, size_t size, enum dma_data_direction dir) -{ - dma_sync_single_range_for_cpu(dev, handle, 0, size, dir); -} - -static inline void dma_sync_single_for_device(struct device *dev, - dma_addr_t handle, size_t size, enum dma_data_direction dir) -{ - dma_sync_single_range_for_device(dev, handle, 0, size, dir); -} /* * The scatter list versions of the above methods. */ -extern int dma_map_sg(struct device *, struct scatterlist *, int, - enum dma_data_direction); -extern void dma_unmap_sg(struct device *, struct scatterlist *, int, +extern int arm_dma_map_sg(struct device *, struct scatterlist *, int, + enum dma_data_direction, struct dma_attrs *attrs); +extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int, + enum dma_data_direction, struct dma_attrs *attrs); +extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int, enum dma_data_direction); -extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int, +extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int, enum dma_data_direction); -extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int, - enum dma_data_direction); - #endif /* __KERNEL__ */ #endif diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h index 69a5b0b6455..5694a0d6576 100644 --- a/arch/arm/include/asm/dma.h +++ b/arch/arm/include/asm/dma.h @@ -19,7 +19,6 @@ * It should not be re-used except for that purpose. */ #include <linux/spinlock.h> -#include <asm/system.h> #include <asm/scatterlist.h> #include <mach/isa-dma.h> diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h index af18ceaacf5..6ddbe446425 100644 --- a/arch/arm/include/asm/domain.h +++ b/arch/arm/include/asm/domain.h @@ -10,6 +10,10 @@ #ifndef __ASM_PROC_DOMAIN_H #define __ASM_PROC_DOMAIN_H +#ifndef __ASSEMBLY__ +#include <asm/barrier.h> +#endif + /* * Domain numbers * @@ -56,13 +60,13 @@ #ifndef __ASSEMBLY__ #ifdef CONFIG_CPU_USE_DOMAINS -#define set_domain(x) \ - do { \ - __asm__ __volatile__( \ - "mcr p15, 0, %0, c3, c0 @ set domain" \ - : : "r" (x)); \ - isb(); \ - } while (0) +static inline void set_domain(unsigned val) +{ + asm volatile( + "mcr p15, 0, %0, c3, c0 @ set domain" + : : "r" (val)); + isb(); +} #define modify_domain(dom,type) \ do { \ @@ -74,8 +78,8 @@ } while (0) #else -#define set_domain(x) do { } while (0) -#define modify_domain(dom,type) do { } while (0) +static inline void set_domain(unsigned val) { } +static inline void modify_domain(unsigned dom, unsigned type) { } #endif /* @@ -83,9 +87,9 @@ * instructions (inline assembly) */ #ifdef CONFIG_CPU_USE_DOMAINS -#define T(instr) #instr "t" +#define TUSER(instr) #instr "t" #else -#define T(instr) #instr +#define TUSER(instr) #instr #endif #else /* __ASSEMBLY__ */ @@ -95,9 +99,9 @@ * instructions */ #ifdef CONFIG_CPU_USE_DOMAINS -#define T(instr) instr ## t +#define TUSER(instr) instr ## t #else -#define T(instr) instr +#define TUSER(instr) instr #endif #endif /* __ASSEMBLY__ */ diff --git a/arch/arm/include/asm/edac.h b/arch/arm/include/asm/edac.h new file mode 100644 index 00000000000..0df7a2c1fc3 --- /dev/null +++ b/arch/arm/include/asm/edac.h @@ -0,0 +1,48 @@ +/* + * Copyright 2011 Calxeda, Inc. + * Based on PPC version Copyright 2007 MontaVista Software, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef ASM_EDAC_H +#define ASM_EDAC_H +/* + * ECC atomic, DMA, SMP and interrupt safe scrub function. + * Implements the per arch atomic_scrub() that EDAC use for software + * ECC scrubbing. It reads memory and then writes back the original + * value, allowing the hardware to detect and correct memory errors. + */ +static inline void atomic_scrub(void *va, u32 size) +{ +#if __LINUX_ARM_ARCH__ >= 6 + unsigned int *virt_addr = va; + unsigned int temp, temp2; + unsigned int i; + + for (i = 0; i < size / sizeof(*virt_addr); i++, virt_addr++) { + /* Very carefully read and write to memory atomically + * so we are interrupt, DMA and SMP safe. + */ + __asm__ __volatile__("\n" + "1: ldrex %0, [%2]\n" + " strex %1, %0, [%2]\n" + " teq %1, #0\n" + " bne 1b\n" + : "=&r"(temp), "=&r"(temp2) + : "r"(virt_addr) + : "cc"); + } +#endif +} + +#endif diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index 0e9ce8d9686..38050b1c480 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h @@ -130,8 +130,4 @@ struct mm_struct; extern unsigned long arch_randomize_brk(struct mm_struct *mm); #define arch_randomize_brk arch_randomize_brk -extern int vectors_user_mapping(void); -#define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping() -#define ARCH_HAS_SETUP_ADDITIONAL_PAGES - #endif diff --git a/arch/arm/include/asm/entry-macro-vic2.S b/arch/arm/include/asm/entry-macro-vic2.S deleted file mode 100644 index 3ceb85e4385..00000000000 --- a/arch/arm/include/asm/entry-macro-vic2.S +++ /dev/null @@ -1,57 +0,0 @@ -/* arch/arm/include/asm/entry-macro-vic2.S - * - * Originally arch/arm/mach-s3c6400/include/mach/entry-macro.S - * - * Copyright 2008 Openmoko, Inc. - * Copyright 2008 Simtec Electronics - * http://armlinux.simtec.co.uk/ - * Ben Dooks <ben@simtec.co.uk> - * - * Low-level IRQ helper macros for a device with two VICs - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. -*/ - -/* This should be included from <mach/entry-macro.S> with the necessary - * defines for virtual addresses and IRQ bases for the two vics. - * - * The code needs the following defined: - * IRQ_VIC0_BASE IRQ number of VIC0's first IRQ - * IRQ_VIC1_BASE IRQ number of VIC1's first IRQ - * VA_VIC0 Virtual address of VIC0 - * VA_VIC1 Virtual address of VIC1 - * - * Note, code assumes VIC0's virtual address is an ARM immediate constant - * away from VIC1. -*/ - -#include <asm/hardware/vic.h> - - .macro disable_fiq - .endm - - .macro get_irqnr_preamble, base, tmp - ldr \base, =VA_VIC0 - .endm - - .macro arch_ret_to_user, tmp1, tmp2 - .endm - - .macro get_irqnr_and_base, irqnr, irqstat, base, tmp - - @ check the vic0 - mov \irqnr, #IRQ_VIC0_BASE + 31 - ldr \irqstat, [ \base, # VIC_IRQ_STATUS ] - teq \irqstat, #0 - - @ otherwise try vic1 - addeq \tmp, \base, #(VA_VIC1 - VA_VIC0) - addeq \irqnr, \irqnr, #(IRQ_VIC1_BASE - IRQ_VIC0_BASE) - ldreq \irqstat, [ \tmp, # VIC_IRQ_STATUS ] - teqeq \irqstat, #0 - - clzne \irqstat, \irqstat - subne \irqnr, \irqnr, \irqstat - .endm diff --git a/arch/arm/include/asm/exec.h b/arch/arm/include/asm/exec.h new file mode 100644 index 00000000000..7c4fbef72b3 --- /dev/null +++ b/arch/arm/include/asm/exec.h @@ -0,0 +1,6 @@ +#ifndef __ASM_ARM_EXEC_H +#define __ASM_ARM_EXEC_H + +#define arch_align_stack(x) (x) + +#endif /* __ASM_ARM_EXEC_H */ diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h index 253cc86318b..e42cf597f6e 100644 --- a/arch/arm/include/asm/futex.h +++ b/arch/arm/include/asm/futex.h @@ -19,6 +19,7 @@ " .long 1b, 4f, 2b, 4f\n" \ " .popsection\n" \ " .pushsection .fixup,\"ax\"\n" \ + " .align 2\n" \ "4: mov %0, " err_reg "\n" \ " b 3b\n" \ " .popsection" @@ -75,9 +76,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ __asm__ __volatile__( \ - "1: " T(ldr) " %1, [%3]\n" \ + "1: " TUSER(ldr) " %1, [%3]\n" \ " " insn "\n" \ - "2: " T(str) " %0, [%3]\n" \ + "2: " TUSER(str) " %0, [%3]\n" \ " mov %0, #0\n" \ __futex_atomic_ex_table("%5") \ : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \ @@ -95,10 +96,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, return -EFAULT; __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" - "1: " T(ldr) " %1, [%4]\n" + "1: " TUSER(ldr) " %1, [%4]\n" " teq %1, %2\n" " it eq @ explicit IT needed for the 2b label\n" - "2: " T(streq) " %3, [%4]\n" + "2: " TUSER(streq) " %3, [%4]\n" __futex_atomic_ex_table("%5") : "+r" (ret), "=&r" (val) : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) diff --git a/arch/arm/include/asm/glue-df.h b/arch/arm/include/asm/glue-df.h index 354d571e8bc..8cacbcda76d 100644 --- a/arch/arm/include/asm/glue-df.h +++ b/arch/arm/include/asm/glue-df.h @@ -31,14 +31,6 @@ #undef CPU_DABORT_HANDLER #undef MULTI_DABORT -#if defined(CONFIG_CPU_ARM610) -# ifdef CPU_DABORT_HANDLER -# define MULTI_DABORT 1 -# else -# define CPU_DABORT_HANDLER cpu_arm6_data_abort -# endif -#endif - #if defined(CONFIG_CPU_ARM710) # ifdef CPU_DABORT_HANDLER # define MULTI_DABORT 1 diff --git a/arch/arm/include/asm/glue-proc.h b/arch/arm/include/asm/glue-proc.h index e2be7f14266..ac1dd54724b 100644 --- a/arch/arm/include/asm/glue-proc.h +++ b/arch/arm/include/asm/glue-proc.h @@ -23,15 +23,6 @@ * CPU_NAME - the prefix for CPU related functions */ -#ifdef CONFIG_CPU_ARM610 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm6 -# endif -#endif - #ifdef CONFIG_CPU_ARM7TDMI # ifdef CPU_NAME # undef MULTI_CPU @@ -41,15 +32,6 @@ # endif #endif -#ifdef CONFIG_CPU_ARM710 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm7 -# endif -#endif - #ifdef CONFIG_CPU_ARM720T # ifdef CPU_NAME # undef MULTI_CPU diff --git a/arch/arm/include/asm/gpio.h b/arch/arm/include/asm/gpio.h index 11ad0bfbb0a..c402e9b31f4 100644 --- a/arch/arm/include/asm/gpio.h +++ b/arch/arm/include/asm/gpio.h @@ -1,6 +1,10 @@ #ifndef _ARCH_ARM_GPIO_H #define _ARCH_ARM_GPIO_H +#if CONFIG_ARCH_NR_GPIO > 0 +#define ARCH_NR_GPIOS CONFIG_ARCH_NR_GPIO +#endif + /* not all ARM platforms necessarily support this API ... */ #include <mach/gpio.h> diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h index ddf07a92a6c..436e60b2cf7 100644 --- a/arch/arm/include/asm/hardirq.h +++ b/arch/arm/include/asm/hardirq.h @@ -27,23 +27,6 @@ u64 smp_irq_stat_cpu(unsigned int cpu); #define arch_irq_stat_cpu smp_irq_stat_cpu -#if NR_IRQS > 512 -#define HARDIRQ_BITS 10 -#elif NR_IRQS > 256 -#define HARDIRQ_BITS 9 -#else -#define HARDIRQ_BITS 8 -#endif - -/* - * The hardirq mask has to be large enough to have space - * for potentially all IRQ sources in the system nesting - * on a single CPU: - */ -#if (1 << HARDIRQ_BITS) < NR_IRQS -# error HARDIRQ_BITS is too low! -#endif - #define __ARCH_IRQ_EXIT_IRQS_DISABLED 1 #endif /* __ASM_HARDIRQ_H */ diff --git a/arch/arm/include/asm/hardware/arm_timer.h b/arch/arm/include/asm/hardware/arm_timer.h index c0f4e7bf22d..d6030ff599d 100644 --- a/arch/arm/include/asm/hardware/arm_timer.h +++ b/arch/arm/include/asm/hardware/arm_timer.h @@ -9,7 +9,12 @@ * * Integrator AP has 16-bit timers, Integrator CP, Versatile and Realview * can have 16-bit or 32-bit selectable via a bit in the control register. + * + * Every SP804 contains two identical timers. */ +#define TIMER_1_BASE 0x00 +#define TIMER_2_BASE 0x20 + #define TIMER_LOAD 0x00 /* ACVR rw */ #define TIMER_VALUE 0x04 /* ACVR ro */ #define TIMER_CTRL 0x08 /* ACVR rw */ diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h index 7df239bcdf2..c4c87bc1223 100644 --- a/arch/arm/include/asm/hardware/cache-l2x0.h +++ b/arch/arm/include/asm/hardware/cache-l2x0.h @@ -103,11 +103,11 @@ #define L2X0_ADDR_FILTER_EN 1 #ifndef __ASSEMBLY__ -extern void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask); +extern void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask); #if defined(CONFIG_CACHE_L2X0) && defined(CONFIG_OF) -extern int l2x0_of_init(__u32 aux_val, __u32 aux_mask); +extern int l2x0_of_init(u32 aux_val, u32 aux_mask); #else -static inline int l2x0_of_init(__u32 aux_val, __u32 aux_mask) +static inline int l2x0_of_init(u32 aux_val, u32 aux_mask) { return -ENODEV; } diff --git a/arch/arm/include/asm/hardware/clps7111.h b/arch/arm/include/asm/hardware/clps7111.h deleted file mode 100644 index 44477225aed..00000000000 --- a/arch/arm/include/asm/hardware/clps7111.h +++ /dev/null @@ -1,184 +0,0 @@ -/* - * arch/arm/include/asm/hardware/clps7111.h - * - * This file contains the hardware definitions of the CLPS7111 internal - * registers. - * - * Copyright (C) 2000 Deep Blue Solutions Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ -#ifndef __ASM_HARDWARE_CLPS7111_H -#define __ASM_HARDWARE_CLPS7111_H - -#define CLPS7111_PHYS_BASE (0x80000000) - -#ifndef __ASSEMBLY__ -#define clps_readb(off) __raw_readb(CLPS7111_BASE + (off)) -#define clps_readw(off) __raw_readw(CLPS7111_BASE + (off)) -#define clps_readl(off) __raw_readl(CLPS7111_BASE + (off)) -#define clps_writeb(val,off) __raw_writeb(val, CLPS7111_BASE + (off)) -#define clps_writew(val,off) __raw_writew(val, CLPS7111_BASE + (off)) -#define clps_writel(val,off) __raw_writel(val, CLPS7111_BASE + (off)) -#endif - -#define PADR (0x0000) -#define PBDR (0x0001) -#define PDDR (0x0003) -#define PADDR (0x0040) -#define PBDDR (0x0041) -#define PDDDR (0x0043) -#define PEDR (0x0080) -#define PEDDR (0x00c0) -#define SYSCON1 (0x0100) -#define SYSFLG1 (0x0140) -#define MEMCFG1 (0x0180) -#define MEMCFG2 (0x01c0) -#define DRFPR (0x0200) -#define INTSR1 (0x0240) -#define INTMR1 (0x0280) -#define LCDCON (0x02c0) -#define TC1D (0x0300) -#define TC2D (0x0340) -#define RTCDR (0x0380) -#define RTCMR (0x03c0) -#define PMPCON (0x0400) -#define CODR (0x0440) -#define UARTDR1 (0x0480) -#define UBRLCR1 (0x04c0) -#define SYNCIO (0x0500) -#define PALLSW (0x0540) -#define PALMSW (0x0580) -#define STFCLR (0x05c0) -#define BLEOI (0x0600) -#define MCEOI (0x0640) -#define TEOI (0x0680) -#define TC1EOI (0x06c0) -#define TC2EOI (0x0700) -#define RTCEOI (0x0740) -#define UMSEOI (0x0780) -#define COEOI (0x07c0) -#define HALT (0x0800) -#define STDBY (0x0840) - -#define FBADDR (0x1000) -#define SYSCON2 (0x1100) -#define SYSFLG2 (0x1140) -#define INTSR2 (0x1240) -#define INTMR2 (0x1280) -#define UARTDR2 (0x1480) -#define UBRLCR2 (0x14c0) -#define SS2DR (0x1500) -#define SRXEOF (0x1600) -#define SS2POP (0x16c0) -#define KBDEOI (0x1700) - -/* common bits: SYSCON1 / SYSCON2 */ -#define SYSCON_UARTEN (1 << 8) - -#define SYSCON1_KBDSCAN(x) ((x) & 15) -#define SYSCON1_KBDSCANMASK (15) -#define SYSCON1_TC1M (1 << 4) -#define SYSCON1_TC1S (1 << 5) -#define SYSCON1_TC2M (1 << 6) -#define SYSCON1_TC2S (1 << 7) -#define SYSCON1_UART1EN SYSCON_UARTEN -#define SYSCON1_BZTOG (1 << 9) -#define SYSCON1_BZMOD (1 << 10) -#define SYSCON1_DBGEN (1 << 11) -#define SYSCON1_LCDEN (1 << 12) -#define SYSCON1_CDENTX (1 << 13) -#define SYSCON1_CDENRX (1 << 14) -#define SYSCON1_SIREN (1 << 15) -#define SYSCON1_ADCKSEL(x) (((x) & 3) << 16) -#define SYSCON1_ADCKSEL_MASK (3 << 16) -#define SYSCON1_EXCKEN (1 << 18) -#define SYSCON1_WAKEDIS (1 << 19) -#define SYSCON1_IRTXM (1 << 20) - -/* common bits: SYSFLG1 / SYSFLG2 */ -#define SYSFLG_UBUSY (1 << 11) -#define SYSFLG_URXFE (1 << 22) -#define SYSFLG_UTXFF (1 << 23) - -#define SYSFLG1_MCDR (1 << 0) -#define SYSFLG1_DCDET (1 << 1) -#define SYSFLG1_WUDR (1 << 2) -#define SYSFLG1_WUON (1 << 3) -#define SYSFLG1_CTS (1 << 8) -#define SYSFLG1_DSR (1 << 9) -#define SYSFLG1_DCD (1 << 10) -#define SYSFLG1_UBUSY SYSFLG_UBUSY -#define SYSFLG1_NBFLG (1 << 12) -#define SYSFLG1_RSTFLG (1 << 13) -#define SYSFLG1_PFFLG (1 << 14) -#define SYSFLG1_CLDFLG (1 << 15) -#define SYSFLG1_URXFE SYSFLG_URXFE -#define SYSFLG1_UTXFF SYSFLG_UTXFF -#define SYSFLG1_CRXFE (1 << 24) -#define SYSFLG1_CTXFF (1 << 25) -#define SYSFLG1_SSIBUSY (1 << 26) -#define SYSFLG1_ID (1 << 29) - -#define SYSFLG2_SSRXOF (1 << 0) -#define SYSFLG2_RESVAL (1 << 1) -#define SYSFLG2_RESFRM (1 << 2) -#define SYSFLG2_SS2RXFE (1 << 3) -#define SYSFLG2_SS2TXFF (1 << 4) -#define SYSFLG2_SS2TXUF (1 << 5) -#define SYSFLG2_CKMODE (1 << 6) -#define SYSFLG2_UBUSY SYSFLG_UBUSY -#define SYSFLG2_URXFE SYSFLG_URXFE -#define SYSFLG2_UTXFF SYSFLG_UTXFF - -#define LCDCON_GSEN (1 << 30) -#define LCDCON_GSMD (1 << 31) - -#define SYSCON2_SERSEL (1 << 0) -#define SYSCON2_KBD6 (1 << 1) -#define SYSCON2_DRAMZ (1 << 2) -#define SYSCON2_KBWEN (1 << 3) -#define SYSCON2_SS2TXEN (1 << 4) -#define SYSCON2_PCCARD1 (1 << 5) -#define SYSCON2_PCCARD2 (1 << 6) -#define SYSCON2_SS2RXEN (1 << 7) -#define SYSCON2_UART2EN SYSCON_UARTEN -#define SYSCON2_SS2MAEN (1 << 9) -#define SYSCON2_OSTB (1 << 12) -#define SYSCON2_CLKENSL (1 << 13) -#define SYSCON2_BUZFREQ (1 << 14) - -/* common bits: UARTDR1 / UARTDR2 */ -#define UARTDR_FRMERR (1 << 8) -#define UARTDR_PARERR (1 << 9) -#define UARTDR_OVERR (1 << 10) - -/* common bits: UBRLCR1 / UBRLCR2 */ -#define UBRLCR_BAUD_MASK ((1 << 12) - 1) -#define UBRLCR_BREAK (1 << 12) -#define UBRLCR_PRTEN (1 << 13) -#define UBRLCR_EVENPRT (1 << 14) -#define UBRLCR_XSTOP (1 << 15) -#define UBRLCR_FIFOEN (1 << 16) -#define UBRLCR_WRDLEN5 (0 << 17) -#define UBRLCR_WRDLEN6 (1 << 17) -#define UBRLCR_WRDLEN7 (2 << 17) -#define UBRLCR_WRDLEN8 (3 << 17) -#define UBRLCR_WRDLEN_MASK (3 << 17) - -#define SYNCIO_SMCKEN (1 << 13) -#define SYNCIO_TXFRMEN (1 << 14) - -#endif /* __ASM_HARDWARE_CLPS7111_H */ diff --git a/arch/arm/include/asm/hardware/cs89712.h b/arch/arm/include/asm/hardware/cs89712.h deleted file mode 100644 index f75626933e9..00000000000 --- a/arch/arm/include/asm/hardware/cs89712.h +++ /dev/null @@ -1,49 +0,0 @@ -/* - * arch/arm/include/asm/hardware/cs89712.h - * - * This file contains the hardware definitions of the CS89712 - * additional internal registers. - * - * Copyright (C) 2001 Thomas Gleixner autronix automation <gleixner@autronix.de> - * - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ -#ifndef __ASM_HARDWARE_CS89712_H -#define __ASM_HARDWARE_CS89712_H - -/* -* CS89712 additional registers -*/ - -#define PCDR 0x0002 /* Port C Data register ---------------------------- */ -#define PCDDR 0x0042 /* Port C Data Direction register ------------------ */ -#define SDCONF 0x2300 /* SDRAM Configuration register ---------------------*/ -#define SDRFPR 0x2340 /* SDRAM Refresh period register --------------------*/ - -#define SDCONF_ACTIVE (1 << 10) -#define SDCONF_CLKCTL (1 << 9) -#define SDCONF_WIDTH_4 (0 << 7) -#define SDCONF_WIDTH_8 (1 << 7) -#define SDCONF_WIDTH_16 (2 << 7) -#define SDCONF_WIDTH_32 (3 << 7) -#define SDCONF_SIZE_16 (0 << 5) -#define SDCONF_SIZE_64 (1 << 5) -#define SDCONF_SIZE_128 (2 << 5) -#define SDCONF_SIZE_256 (3 << 5) -#define SDCONF_CASLAT_2 (2) -#define SDCONF_CASLAT_3 (3) - -#endif /* __ASM_HARDWARE_CS89712_H */ diff --git a/arch/arm/include/asm/hardware/entry-macro-gic.S b/arch/arm/include/asm/hardware/entry-macro-gic.S deleted file mode 100644 index 74ebc803904..00000000000 --- a/arch/arm/include/asm/hardware/entry-macro-gic.S +++ /dev/null @@ -1,60 +0,0 @@ -/* - * arch/arm/include/asm/hardware/entry-macro-gic.S - * - * Low-level IRQ helper macros for GIC - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. - */ - -#include <asm/hardware/gic.h> - -#ifndef HAVE_GET_IRQNR_PREAMBLE - .macro get_irqnr_preamble, base, tmp - ldr \base, =gic_cpu_base_addr - ldr \base, [\base] - .endm -#endif - -/* - * The interrupt numbering scheme is defined in the - * interrupt controller spec. To wit: - * - * Interrupts 0-15 are IPI - * 16-31 are local. We allow 30 to be used for the watchdog. - * 32-1020 are global - * 1021-1022 are reserved - * 1023 is "spurious" (no interrupt) - * - * A simple read from the controller will tell us the number of the highest - * priority enabled interrupt. We then just need to check whether it is in the - * valid range for an IRQ (30-1020 inclusive). - */ - - .macro get_irqnr_and_base, irqnr, irqstat, base, tmp - - ldr \irqstat, [\base, #GIC_CPU_INTACK] - /* bits 12-10 = src CPU, 9-0 = int # */ - - ldr \tmp, =1021 - bic \irqnr, \irqstat, #0x1c00 - cmp \irqnr, #15 - cmpcc \irqnr, \irqnr - cmpne \irqnr, \tmp - cmpcs \irqnr, \irqnr - .endm - -/* We assume that irqstat (the raw value of the IRQ acknowledge - * register) is preserved from the macro above. - * If there is an IPI, we immediately signal end of interrupt on the - * controller, since this requires the original irqstat value which - * we won't easily be able to recreate later. - */ - - .macro test_for_ipi, irqnr, irqstat, base, tmp - bic \irqnr, \irqstat, #0x1c00 - cmp \irqnr, #16 - strcc \irqstat, [\base, #GIC_CPU_EOI] - cmpcs \irqnr, \irqnr - .endm diff --git a/arch/arm/include/asm/hardware/entry-macro-iomd.S b/arch/arm/include/asm/hardware/entry-macro-iomd.S index e0af4983723..8c215acd9b5 100644 --- a/arch/arm/include/asm/hardware/entry-macro-iomd.S +++ b/arch/arm/include/asm/hardware/entry-macro-iomd.S @@ -11,14 +11,6 @@ /* IOC / IOMD based hardware */ #include <asm/hardware/iomd.h> - .macro disable_fiq - mov r12, #ioc_base_high - .if ioc_base_low - orr r12, r12, #ioc_base_low - .endif - strb r12, [r12, #0x38] @ Disable FIQ register - .endm - .macro get_irqnr_and_base, irqnr, irqstat, base, tmp ldrb \irqstat, [\base, #IOMD_IRQREQB] @ get high priority first ldr \tmp, =irq_prio_h diff --git a/arch/arm/include/asm/hardware/ep7211.h b/arch/arm/include/asm/hardware/ep7211.h deleted file mode 100644 index 654d5f625c4..00000000000 --- a/arch/arm/include/asm/hardware/ep7211.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - * arch/arm/include/asm/hardware/ep7211.h - * - * This file contains the hardware definitions of the EP7211 internal - * registers. - * - * Copyright (C) 2001 Blue Mug, Inc. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ -#ifndef __ASM_HARDWARE_EP7211_H -#define __ASM_HARDWARE_EP7211_H - -#include <asm/hardware/clps7111.h> - -/* - * define EP7211_BASE to be the base address of the region - * you want to access. - */ - -#define EP7211_PHYS_BASE (0x80000000) - -/* - * XXX miket@bluemug.com: need to introduce EP7211 registers (those not - * present in 7212) here. - */ - -#endif /* __ASM_HARDWARE_EP7211_H */ diff --git a/arch/arm/include/asm/hardware/ep7212.h b/arch/arm/include/asm/hardware/ep7212.h deleted file mode 100644 index 3b43bbeaf1d..00000000000 --- a/arch/arm/include/asm/hardware/ep7212.h +++ /dev/null @@ -1,83 +0,0 @@ -/* - * arch/arm/include/asm/hardware/ep7212.h - * - * This file contains the hardware definitions of the EP7212 internal - * registers. - * - * Copyright (C) 2000 Deep Blue Solutions Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ -#ifndef __ASM_HARDWARE_EP7212_H -#define __ASM_HARDWARE_EP7212_H - -/* - * define EP7212_BASE to be the base address of the region - * you want to access. - */ - -#define EP7212_PHYS_BASE (0x80000000) - -#ifndef __ASSEMBLY__ -#define ep_readl(off) __raw_readl(EP7212_BASE + (off)) -#define ep_writel(val,off) __raw_writel(val, EP7212_BASE + (off)) -#endif - -/* - * These registers are specific to the EP7212 only - */ -#define DAIR 0x2000 -#define DAIR0 0x2040 -#define DAIDR1 0x2080 -#define DAIDR2 0x20c0 -#define DAISR 0x2100 -#define SYSCON3 0x2200 -#define INTSR3 0x2240 -#define INTMR3 0x2280 -#define LEDFLSH 0x22c0 - -#define DAIR_DAIEN (1 << 16) -#define DAIR_ECS (1 << 17) -#define DAIR_LCTM (1 << 19) -#define DAIR_LCRM (1 << 20) -#define DAIR_RCTM (1 << 21) -#define DAIR_RCRM (1 << 22) -#define DAIR_LBM (1 << 23) - -#define DAIDR2_FIFOEN (1 << 15) -#define DAIDR2_FIFOLEFT (0x0d << 16) -#define DAIDR2_FIFORIGHT (0x11 << 16) - -#define DAISR_RCTS (1 << 0) -#define DAISR_RCRS (1 << 1) -#define DAISR_LCTS (1 << 2) -#define DAISR_LCRS (1 << 3) -#define DAISR_RCTU (1 << 4) -#define DAISR_RCRO (1 << 5) -#define DAISR_LCTU (1 << 6) -#define DAISR_LCRO (1 << 7) -#define DAISR_RCNF (1 << 8) -#define DAISR_RCNE (1 << 9) -#define DAISR_LCNF (1 << 10) -#define DAISR_LCNE (1 << 11) -#define DAISR_FIFO (1 << 12) - -#define SYSCON3_ADCCON (1 << 0) -#define SYSCON3_DAISEL (1 << 3) -#define SYSCON3_ADCCKNSEN (1 << 4) -#define SYSCON3_FASTWAKE (1 << 8) -#define SYSCON3_DAIEN (1 << 9) - -#endif /* __ASM_HARDWARE_EP7212_H */ diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h index 3e91f22046f..4b1ce6cd477 100644 --- a/arch/arm/include/asm/hardware/gic.h +++ b/arch/arm/include/asm/hardware/gic.h @@ -36,30 +36,22 @@ #include <linux/irqdomain.h> struct device_node; -extern void __iomem *gic_cpu_base_addr; extern struct irq_chip gic_arch_extn; -void gic_init(unsigned int, int, void __iomem *, void __iomem *); +void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *, + u32 offset, struct device_node *); int gic_of_init(struct device_node *node, struct device_node *parent); void gic_secondary_init(unsigned int); +void gic_handle_irq(struct pt_regs *regs); void gic_cascade_irq(unsigned int gic_nr, unsigned int irq); void gic_raise_softirq(const struct cpumask *mask, unsigned int irq); -struct gic_chip_data { - void __iomem *dist_base; - void __iomem *cpu_base; -#ifdef CONFIG_CPU_PM - u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)]; - u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)]; - u32 saved_spi_target[DIV_ROUND_UP(1020, 4)]; - u32 __percpu *saved_ppi_enable; - u32 __percpu *saved_ppi_conf; -#endif -#ifdef CONFIG_IRQ_DOMAIN - struct irq_domain domain; -#endif - unsigned int gic_irqs; -}; +static inline void gic_init(unsigned int nr, int start, + void __iomem *dist , void __iomem *cpu) +{ + gic_init_bases(nr, start, dist, cpu, 0, NULL); +} + #endif #endif diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h index 5daea2961d4..2ff2c75a463 100644 --- a/arch/arm/include/asm/hardware/iop3xx.h +++ b/arch/arm/include/asm/hardware/iop3xx.h @@ -231,9 +231,13 @@ extern int iop3xx_get_init_atu(void); #ifndef __ASSEMBLY__ + +#include <linux/types.h> + void iop3xx_map_io(void); void iop_init_cp6_handler(void); void iop_init_time(unsigned long tickrate); +void iop3xx_restart(char, const char *); static inline u32 read_tmr0(void) { diff --git a/arch/arm/include/asm/hardware/iop_adma.h b/arch/arm/include/asm/hardware/iop_adma.h index 59b8c3892f7..122f86d8c99 100644 --- a/arch/arm/include/asm/hardware/iop_adma.h +++ b/arch/arm/include/asm/hardware/iop_adma.h @@ -49,7 +49,6 @@ struct iop_adma_device { /** * struct iop_adma_chan - internal representation of an ADMA device * @pending: allows batching of hardware operations - * @completed_cookie: identifier for the most recently completed operation * @lock: serializes enqueue/dequeue operations to the slot pool * @mmr_base: memory mapped register base * @chain: device chain view of the descriptors @@ -62,7 +61,6 @@ struct iop_adma_device { */ struct iop_adma_chan { int pending; - dma_cookie_t completed_cookie; spinlock_t lock; /* protects the descriptor slot pool */ void __iomem *mmr_base; struct list_head chain; diff --git a/arch/arm/include/asm/hardware/it8152.h b/arch/arm/include/asm/hardware/it8152.h index 43cab498bc2..d36a73d7c0e 100644 --- a/arch/arm/include/asm/hardware/it8152.h +++ b/arch/arm/include/asm/hardware/it8152.h @@ -9,6 +9,9 @@ #ifndef __ASM_HARDWARE_IT8152_H #define __ASM_HARDWARE_IT8152_H + +#include <mach/irqs.h> + extern void __iomem *it8152_base_address; #define IT8152_IO_BASE (it8152_base_address + 0x03e00000) @@ -107,6 +110,6 @@ extern void it8152_irq_demux(unsigned int irq, struct irq_desc *desc); extern void it8152_init_irq(void); extern int it8152_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin); extern int it8152_pci_setup(int nr, struct pci_sys_data *sys); -extern struct pci_bus *it8152_pci_scan_bus(int nr, struct pci_sys_data *sys); +extern struct pci_ops it8152_ops; #endif /* __ASM_HARDWARE_IT8152_H */ diff --git a/arch/arm/include/asm/hardware/pl080.h b/arch/arm/include/asm/hardware/pl080.h index 33c78d7af2e..4eea2107214 100644 --- a/arch/arm/include/asm/hardware/pl080.h +++ b/arch/arm/include/asm/hardware/pl080.h @@ -102,6 +102,8 @@ #define PL080_WIDTH_16BIT (0x1) #define PL080_WIDTH_32BIT (0x2) +#define PL080N_CONFIG_ITPROT (1 << 20) +#define PL080N_CONFIG_SECPROT (1 << 19) #define PL080_CONFIG_HALT (1 << 18) #define PL080_CONFIG_ACTIVE (1 << 17) /* RO */ #define PL080_CONFIG_LOCK (1 << 16) diff --git a/arch/arm/include/asm/hardware/pl330.h b/arch/arm/include/asm/hardware/pl330.h deleted file mode 100644 index 575fa8186ca..00000000000 --- a/arch/arm/include/asm/hardware/pl330.h +++ /dev/null @@ -1,217 +0,0 @@ -/* linux/include/asm/hardware/pl330.h - * - * Copyright (C) 2010 Samsung Electronics Co. Ltd. - * Jaswinder Singh <jassi.brar@samsung.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#ifndef __PL330_CORE_H -#define __PL330_CORE_H - -#define PL330_MAX_CHAN 8 -#define PL330_MAX_IRQS 32 -#define PL330_MAX_PERI 32 - -enum pl330_srccachectrl { - SCCTRL0 = 0, /* Noncacheable and nonbufferable */ - SCCTRL1, /* Bufferable only */ - SCCTRL2, /* Cacheable, but do not allocate */ - SCCTRL3, /* Cacheable and bufferable, but do not allocate */ - SINVALID1, - SINVALID2, - SCCTRL6, /* Cacheable write-through, allocate on reads only */ - SCCTRL7, /* Cacheable write-back, allocate on reads only */ -}; - -enum pl330_dstcachectrl { - DCCTRL0 = 0, /* Noncacheable and nonbufferable */ - DCCTRL1, /* Bufferable only */ - DCCTRL2, /* Cacheable, but do not allocate */ - DCCTRL3, /* Cacheable and bufferable, but do not allocate */ - DINVALID1 = 8, - DINVALID2, - DCCTRL6, /* Cacheable write-through, allocate on writes only */ - DCCTRL7, /* Cacheable write-back, allocate on writes only */ -}; - -/* Populated by the PL330 core driver for DMA API driver's info */ -struct pl330_config { - u32 periph_id; - u32 pcell_id; -#define DMAC_MODE_NS (1 << 0) - unsigned int mode; - unsigned int data_bus_width:10; /* In number of bits */ - unsigned int data_buf_dep:10; - unsigned int num_chan:4; - unsigned int num_peri:6; - u32 peri_ns; - unsigned int num_events:6; - u32 irq_ns; -}; - -/* Handle to the DMAC provided to the PL330 core */ -struct pl330_info { - /* Owning device */ - struct device *dev; - /* Size of MicroCode buffers for each channel. */ - unsigned mcbufsz; - /* ioremap'ed address of PL330 registers. */ - void __iomem *base; - /* Client can freely use it. */ - void *client_data; - /* PL330 core data, Client must not touch it. */ - void *pl330_data; - /* Populated by the PL330 core driver during pl330_add */ - struct pl330_config pcfg; - /* - * If the DMAC has some reset mechanism, then the - * client may want to provide pointer to the method. - */ - void (*dmac_reset)(struct pl330_info *pi); -}; - -enum pl330_byteswap { - SWAP_NO = 0, - SWAP_2, - SWAP_4, - SWAP_8, - SWAP_16, -}; - -/** - * Request Configuration. - * The PL330 core does not modify this and uses the last - * working configuration if the request doesn't provide any. - * - * The Client may want to provide this info only for the - * first request and a request with new settings. - */ -struct pl330_reqcfg { - /* Address Incrementing */ - unsigned dst_inc:1; - unsigned src_inc:1; - - /* - * For now, the SRC & DST protection levels - * and burst size/length are assumed same. - */ - bool nonsecure; - bool privileged; - bool insnaccess; - unsigned brst_len:5; - unsigned brst_size:3; /* in power of 2 */ - - enum pl330_dstcachectrl dcctl; - enum pl330_srccachectrl scctl; - enum pl330_byteswap swap; -}; - -/* - * One cycle of DMAC operation. - * There may be more than one xfer in a request. - */ -struct pl330_xfer { - u32 src_addr; - u32 dst_addr; - /* Size to xfer */ - u32 bytes; - /* - * Pointer to next xfer in the list. - * The last xfer in the req must point to NULL. - */ - struct pl330_xfer *next; -}; - -/* The xfer callbacks are made with one of these arguments. */ -enum pl330_op_err { - /* The all xfers in the request were success. */ - PL330_ERR_NONE, - /* If req aborted due to global error. */ - PL330_ERR_ABORT, - /* If req failed due to problem with Channel. */ - PL330_ERR_FAIL, -}; - -enum pl330_reqtype { - MEMTOMEM, - MEMTODEV, - DEVTOMEM, - DEVTODEV, -}; - -/* A request defining Scatter-Gather List ending with NULL xfer. */ -struct pl330_req { - enum pl330_reqtype rqtype; - /* Index of peripheral for the xfer. */ - unsigned peri:5; - /* Unique token for this xfer, set by the client. */ - void *token; - /* Callback to be called after xfer. */ - void (*xfer_cb)(void *token, enum pl330_op_err err); - /* If NULL, req will be done at last set parameters. */ - struct pl330_reqcfg *cfg; - /* Pointer to first xfer in the request. */ - struct pl330_xfer *x; -}; - -/* - * To know the status of the channel and DMAC, the client - * provides a pointer to this structure. The PL330 core - * fills it with current information. - */ -struct pl330_chanstatus { - /* - * If the DMAC engine halted due to some error, - * the client should remove-add DMAC. - */ - bool dmac_halted; - /* - * If channel is halted due to some error, - * the client should ABORT/FLUSH and START the channel. - */ - bool faulting; - /* Location of last load */ - u32 src_addr; - /* Location of last store */ - u32 dst_addr; - /* - * Pointer to the currently active req, NULL if channel is - * inactive, even though the requests may be present. - */ - struct pl330_req *top_req; - /* Pointer to req waiting second in the queue if any. */ - struct pl330_req *wait_req; -}; - -enum pl330_chan_op { - /* Start the channel */ - PL330_OP_START, - /* Abort the active xfer */ - PL330_OP_ABORT, - /* Stop xfer and flush queue */ - PL330_OP_FLUSH, -}; - -extern int pl330_add(struct pl330_info *); -extern void pl330_del(struct pl330_info *pi); -extern int pl330_update(const struct pl330_info *pi); -extern void pl330_release_channel(void *ch_id); -extern void *pl330_request_channel(const struct pl330_info *pi); -extern int pl330_chan_status(void *ch_id, struct pl330_chanstatus *pstatus); -extern int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op); -extern int pl330_submit_req(void *ch_id, struct pl330_req *r); - -#endif /* __PL330_CORE_H */ diff --git a/arch/arm/include/asm/hardware/sa1111.h b/arch/arm/include/asm/hardware/sa1111.h index 92ed254c175..7c2bbc7f0be 100644 --- a/arch/arm/include/asm/hardware/sa1111.h +++ b/arch/arm/include/asm/hardware/sa1111.h @@ -132,34 +132,10 @@ #define SKPCR_DCLKEN (1<<7) #define SKPCR_PWMCLKEN (1<<8) -/* - * USB Host controller - */ +/* USB Host controller */ #define SA1111_USB 0x0400 /* - * Offsets from SA1111_USB_BASE - */ -#define SA1111_USB_STATUS 0x0118 -#define SA1111_USB_RESET 0x011c -#define SA1111_USB_IRQTEST 0x0120 - -#define USB_RESET_FORCEIFRESET (1 << 0) -#define USB_RESET_FORCEHCRESET (1 << 1) -#define USB_RESET_CLKGENRESET (1 << 2) -#define USB_RESET_SIMSCALEDOWN (1 << 3) -#define USB_RESET_USBINTTEST (1 << 4) -#define USB_RESET_SLEEPSTBYEN (1 << 5) -#define USB_RESET_PWRSENSELOW (1 << 6) -#define USB_RESET_PWRCTRLLOW (1 << 7) - -#define USB_STATUS_IRQHCIRMTWKUP (1 << 7) -#define USB_STATUS_IRQHCIBUFFACC (1 << 8) -#define USB_STATUS_NIRQHCIM (1 << 9) -#define USB_STATUS_NHCIMFCLR (1 << 10) -#define USB_STATUS_USBPWRSENSE (1 << 11) - -/* * Serial Audio Controller * * Registers @@ -327,22 +303,6 @@ * PC_SSR GPIO Block C Sleep State */ -#define _PA_DDR _SA1111( 0x1000 ) -#define _PA_DRR _SA1111( 0x1004 ) -#define _PA_DWR _SA1111( 0x1004 ) -#define _PA_SDR _SA1111( 0x1008 ) -#define _PA_SSR _SA1111( 0x100c ) -#define _PB_DDR _SA1111( 0x1010 ) -#define _PB_DRR _SA1111( 0x1014 ) -#define _PB_DWR _SA1111( 0x1014 ) -#define _PB_SDR _SA1111( 0x1018 ) -#define _PB_SSR _SA1111( 0x101c ) -#define _PC_DDR _SA1111( 0x1020 ) -#define _PC_DRR _SA1111( 0x1024 ) -#define _PC_DWR _SA1111( 0x1024 ) -#define _PC_SDR _SA1111( 0x1028 ) -#define _PC_SSR _SA1111( 0x102c ) - #define SA1111_GPIO 0x1000 #define SA1111_GPIO_PADDR (0x000) @@ -425,106 +385,30 @@ #define SA1111_WAKEPOL0 0x0034 #define SA1111_WAKEPOL1 0x0038 -/* - * PS/2 Trackpad and Mouse Interfaces - * - * Registers - * PS2CR Control Register - * PS2STAT Status Register - * PS2DATA Transmit/Receive Data register - * PS2CLKDIV Clock Division Register - * PS2PRECNT Clock Precount Register - * PS2TEST1 Test register 1 - * PS2TEST2 Test register 2 - * PS2TEST3 Test register 3 - * PS2TEST4 Test register 4 - */ - +/* PS/2 Trackpad and Mouse Interfaces */ #define SA1111_KBD 0x0a00 #define SA1111_MSE 0x0c00 -/* - * These are offsets from the above bases. - */ -#define SA1111_PS2CR 0x0000 -#define SA1111_PS2STAT 0x0004 -#define SA1111_PS2DATA 0x0008 -#define SA1111_PS2CLKDIV 0x000c -#define SA1111_PS2PRECNT 0x0010 - -#define PS2CR_ENA 0x08 -#define PS2CR_FKD 0x02 -#define PS2CR_FKC 0x01 - -#define PS2STAT_STP 0x0100 -#define PS2STAT_TXE 0x0080 -#define PS2STAT_TXB 0x0040 -#define PS2STAT_RXF 0x0020 -#define PS2STAT_RXB 0x0010 -#define PS2STAT_ENA 0x0008 -#define PS2STAT_RXP 0x0004 -#define PS2STAT_KBD 0x0002 -#define PS2STAT_KBC 0x0001 +/* PCMCIA Interface */ +#define SA1111_PCMCIA 0x1600 -/* - * PCMCIA Interface - * - * Registers - * PCSR Status Register - * PCCR Control Register - * PCSSR Sleep State Register - */ - -#define SA1111_PCMCIA 0x1600 - -/* - * These are offsets from the above base. - */ -#define SA1111_PCCR 0x0000 -#define SA1111_PCSSR 0x0004 -#define SA1111_PCSR 0x0008 - -#define PCSR_S0_READY (1<<0) -#define PCSR_S1_READY (1<<1) -#define PCSR_S0_DETECT (1<<2) -#define PCSR_S1_DETECT (1<<3) -#define PCSR_S0_VS1 (1<<4) -#define PCSR_S0_VS2 (1<<5) -#define PCSR_S1_VS1 (1<<6) -#define PCSR_S1_VS2 (1<<7) -#define PCSR_S0_WP (1<<8) -#define PCSR_S1_WP (1<<9) -#define PCSR_S0_BVD1 (1<<10) -#define PCSR_S0_BVD2 (1<<11) -#define PCSR_S1_BVD1 (1<<12) -#define PCSR_S1_BVD2 (1<<13) - -#define PCCR_S0_RST (1<<0) -#define PCCR_S1_RST (1<<1) -#define PCCR_S0_FLT (1<<2) -#define PCCR_S1_FLT (1<<3) -#define PCCR_S0_PWAITEN (1<<4) -#define PCCR_S1_PWAITEN (1<<5) -#define PCCR_S0_PSE (1<<6) -#define PCCR_S1_PSE (1<<7) - -#define PCSSR_S0_SLEEP (1<<0) -#define PCSSR_S1_SLEEP (1<<1) extern struct bus_type sa1111_bus_type; -#define SA1111_DEVID_SBI 0 -#define SA1111_DEVID_SK 1 -#define SA1111_DEVID_USB 2 -#define SA1111_DEVID_SAC 3 -#define SA1111_DEVID_SSP 4 -#define SA1111_DEVID_PS2 5 -#define SA1111_DEVID_GPIO 6 -#define SA1111_DEVID_INT 7 -#define SA1111_DEVID_PCMCIA 8 +#define SA1111_DEVID_SBI (1 << 0) +#define SA1111_DEVID_SK (1 << 1) +#define SA1111_DEVID_USB (1 << 2) +#define SA1111_DEVID_SAC (1 << 3) +#define SA1111_DEVID_SSP (1 << 4) +#define SA1111_DEVID_PS2 (3 << 5) +#define SA1111_DEVID_PS2_KBD (1 << 5) +#define SA1111_DEVID_PS2_MSE (1 << 6) +#define SA1111_DEVID_GPIO (1 << 7) +#define SA1111_DEVID_INT (1 << 8) +#define SA1111_DEVID_PCMCIA (1 << 9) struct sa1111_dev { struct device dev; @@ -548,6 +432,7 @@ struct sa1111_driver { int (*remove)(struct sa1111_dev *); int (*suspend)(struct sa1111_dev *, pm_message_t); int (*resume)(struct sa1111_dev *); + void (*shutdown)(struct sa1111_dev *); }; #define SA1111_DRV(_d) container_of((_d), struct sa1111_driver, drv) @@ -555,9 +440,10 @@ struct sa1111_driver { #define SA1111_DRIVER_NAME(_sadev) ((_sadev)->dev.driver->name) /* - * These frob the SKPCR register. + * These frob the SKPCR register, and call platform specific + * enable/disable functions. */ -void sa1111_enable_device(struct sa1111_dev *); +int sa1111_enable_device(struct sa1111_dev *); void sa1111_disable_device(struct sa1111_dev *); unsigned int sa1111_pll_clock(struct sa1111_dev *); @@ -580,6 +466,10 @@ void sa1111_set_sleep_io(struct sa1111_dev *sadev, unsigned int bits, unsigned i struct sa1111_platform_data { int irq_base; /* base for cascaded on-chip IRQs */ + unsigned disable_devs; + void *data; + int (*enable)(void *, unsigned); + void (*disable)(void *, unsigned); }; #endif /* _ASM_ARCH_SA1111 */ diff --git a/arch/arm/include/asm/hardware/sp810.h b/arch/arm/include/asm/hardware/sp810.h index e0d1c0cfa54..6b9b077d86b 100644 --- a/arch/arm/include/asm/hardware/sp810.h +++ b/arch/arm/include/asm/hardware/sp810.h @@ -4,7 +4,7 @@ * ARM PrimeXsys System Controller SP810 header file * * Copyright (C) 2009 ST Microelectronics - * Viresh Kumar<viresh.kumar@st.com> + * Viresh Kumar <viresh.linux@gmail.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/arch/arm/include/asm/hardware/timer-sp.h b/arch/arm/include/asm/hardware/timer-sp.h index 4384d81eee7..2dd9d3f83f2 100644 --- a/arch/arm/include/asm/hardware/timer-sp.h +++ b/arch/arm/include/asm/hardware/timer-sp.h @@ -1,2 +1,15 @@ -void sp804_clocksource_init(void __iomem *, const char *); +void __sp804_clocksource_and_sched_clock_init(void __iomem *, + const char *, int); + +static inline void sp804_clocksource_init(void __iomem *base, const char *name) +{ + __sp804_clocksource_and_sched_clock_init(base, name, 0); +} + +static inline void sp804_clocksource_and_sched_clock_init(void __iomem *base, + const char *name) +{ + __sp804_clocksource_and_sched_clock_init(base, name, 1); +} + void sp804_clockevents_init(void __iomem *, unsigned int, const char *); diff --git a/arch/arm/include/asm/hardware/uengine.h b/arch/arm/include/asm/hardware/uengine.h deleted file mode 100644 index b442d65c659..00000000000 --- a/arch/arm/include/asm/hardware/uengine.h +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Generic library functions for the microengines found on the Intel - * IXP2000 series of network processors. - * - * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org> - * Dedicated to Marija Kulikova. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as - * published by the Free Software Foundation; either version 2.1 of the - * License, or (at your option) any later version. - */ - -#ifndef __IXP2000_UENGINE_H -#define __IXP2000_UENGINE_H - -extern u32 ixp2000_uengine_mask; - -struct ixp2000_uengine_code -{ - u32 cpu_model_bitmask; - u8 cpu_min_revision; - u8 cpu_max_revision; - - u32 uengine_parameters; - - struct ixp2000_reg_value { - int reg; - u32 value; - } *initial_reg_values; - - int num_insns; - u8 *insns; -}; - -u32 ixp2000_uengine_csr_read(int uengine, int offset); -void ixp2000_uengine_csr_write(int uengine, int offset, u32 value); -void ixp2000_uengine_reset(u32 uengine_mask); -void ixp2000_uengine_set_mode(int uengine, u32 mode); -void ixp2000_uengine_load_microcode(int uengine, u8 *ucode, int insns); -void ixp2000_uengine_init_context(int uengine, int context, int pc); -void ixp2000_uengine_start_contexts(int uengine, u8 ctx_mask); -void ixp2000_uengine_stop_contexts(int uengine, u8 ctx_mask); -int ixp2000_uengine_load(int uengine, struct ixp2000_uengine_code *c); - -#define IXP2000_UENGINE_8_CONTEXTS 0x00000000 -#define IXP2000_UENGINE_4_CONTEXTS 0x80000000 -#define IXP2000_UENGINE_PRN_UPDATE_EVERY 0x40000000 -#define IXP2000_UENGINE_PRN_UPDATE_ON_ACCESS 0x00000000 -#define IXP2000_UENGINE_NN_FROM_SELF 0x00100000 -#define IXP2000_UENGINE_NN_FROM_PREVIOUS 0x00000000 -#define IXP2000_UENGINE_ASSERT_EMPTY_AT_3 0x000c0000 -#define IXP2000_UENGINE_ASSERT_EMPTY_AT_2 0x00080000 -#define IXP2000_UENGINE_ASSERT_EMPTY_AT_1 0x00040000 -#define IXP2000_UENGINE_ASSERT_EMPTY_AT_0 0x00000000 -#define IXP2000_UENGINE_LM_ADDR1_GLOBAL 0x00020000 -#define IXP2000_UENGINE_LM_ADDR1_PER_CONTEXT 0x00000000 -#define IXP2000_UENGINE_LM_ADDR0_GLOBAL 0x00010000 -#define IXP2000_UENGINE_LM_ADDR0_PER_CONTEXT 0x00000000 - - -#endif diff --git a/arch/arm/include/asm/hardware/vic.h b/arch/arm/include/asm/hardware/vic.h index 5d72550a809..e14af1a1a32 100644 --- a/arch/arm/include/asm/hardware/vic.h +++ b/arch/arm/include/asm/hardware/vic.h @@ -41,7 +41,17 @@ #define VIC_PL192_VECT_ADDR 0xF00 #ifndef __ASSEMBLY__ +#include <linux/compiler.h> +#include <linux/types.h> + +struct device_node; +struct pt_regs; + +void __vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, + u32 resume_sources, struct device_node *node); void vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, u32 resume_sources); -#endif +int vic_of_init(struct device_node *node, struct device_node *parent); +void vic_handle_irq(struct pt_regs *regs); +#endif /* __ASSEMBLY__ */ #endif diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h index a4edd19dd3d..8c5e828f484 100644 --- a/arch/arm/include/asm/highmem.h +++ b/arch/arm/include/asm/highmem.h @@ -57,7 +57,7 @@ static inline void *kmap_high_get(struct page *page) #ifdef CONFIG_HIGHMEM extern void *kmap(struct page *page); extern void kunmap(struct page *page); -extern void *__kmap_atomic(struct page *page); +extern void *kmap_atomic(struct page *page); extern void __kunmap_atomic(void *kvaddr); extern void *kmap_atomic_pfn(unsigned long pfn); extern struct page *kmap_atomic_to_page(const void *ptr); diff --git a/arch/arm/include/asm/hwcap.h b/arch/arm/include/asm/hwcap.h index c93a22a8b92..917626128a1 100644 --- a/arch/arm/include/asm/hwcap.h +++ b/arch/arm/include/asm/hwcap.h @@ -25,7 +25,8 @@ #define HWCAP_IDIVT (1 << 18) #define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT) -#if defined(__KERNEL__) && !defined(__ASSEMBLY__) +#if defined(__KERNEL__) +#if !defined(__ASSEMBLY__) /* * This yields a mask that user programs can use to figure out what * instruction set this cpu supports. @@ -33,5 +34,6 @@ #define ELF_HWCAP (elf_hwcap) extern unsigned int elf_hwcap; #endif +#endif #endif diff --git a/arch/arm/include/asm/idmap.h b/arch/arm/include/asm/idmap.h new file mode 100644 index 00000000000..bf863edb517 --- /dev/null +++ b/arch/arm/include/asm/idmap.h @@ -0,0 +1,14 @@ +#ifndef __ASM_IDMAP_H +#define __ASM_IDMAP_H + +#include <linux/compiler.h> +#include <asm/pgtable.h> + +/* Tag a function as requiring to be executed via an identity mapping. */ +#define __idmap __section(.idmap.text) noinline notrace + +extern pgd_t *idmap_pgd; + +void setup_mm_for_reboot(void); + +#endif /* __ASM_IDMAP_H */ diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 065d100fa63..815c669fec0 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -26,7 +26,7 @@ #include <linux/types.h> #include <asm/byteorder.h> #include <asm/memory.h> -#include <asm/system.h> +#include <asm-generic/pci_iomap.h> /* * ISA I/O bus memory addresses are 1:1 with the physical address. @@ -47,9 +47,9 @@ extern void __raw_readsb(const void __iomem *addr, void *data, int bytelen); extern void __raw_readsw(const void __iomem *addr, void *data, int wordlen); extern void __raw_readsl(const void __iomem *addr, void *data, int longlen); -#define __raw_writeb(v,a) (__chk_io_ptr(a), *(volatile unsigned char __force *)(a) = (v)) -#define __raw_writew(v,a) (__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v)) -#define __raw_writel(v,a) (__chk_io_ptr(a), *(volatile unsigned int __force *)(a) = (v)) +#define __raw_writeb(v,a) ((void)(__chk_io_ptr(a), *(volatile unsigned char __force *)(a) = (v))) +#define __raw_writew(v,a) ((void)(__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v))) +#define __raw_writel(v,a) ((void)(__chk_io_ptr(a), *(volatile unsigned int __force *)(a) = (v))) #define __raw_readb(a) (__chk_io_ptr(a), *(volatile unsigned char __force *)(a)) #define __raw_readw(a) (__chk_io_ptr(a), *(volatile unsigned short __force *)(a)) @@ -82,6 +82,11 @@ extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, uns extern void __iomem *__arm_ioremap(unsigned long, size_t, unsigned int); extern void __iomem *__arm_ioremap_exec(unsigned long, size_t, bool cached); extern void __iounmap(volatile void __iomem *addr); +extern void __arm_iounmap(volatile void __iomem *addr); + +extern void __iomem * (*arch_ioremap_caller)(unsigned long, size_t, + unsigned int, void *); +extern void (*arch_iounmap)(volatile void __iomem *); /* * Bad read/write accesses... @@ -96,8 +101,11 @@ static inline void __iomem *__typesafe_io(unsigned long addr) return (void __iomem *)addr; } +#define IOMEM(x) ((void __force __iomem *)(x)) + /* IO barriers */ #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE +#include <asm/barrier.h> #define __iormb() rmb() #define __iowmb() wmb() #else @@ -108,7 +116,11 @@ static inline void __iomem *__typesafe_io(unsigned long addr) /* * Now, pick up the machine-defined IO definitions */ +#ifdef CONFIG_NEED_MACH_IO_H #include <mach/io.h> +#else +#define __io(a) __typesafe_io((a) & IO_SPACE_LIMIT) +#endif /* * This is the limit of PC card/PCI/ISA IO space, which is by default @@ -210,18 +222,16 @@ extern void _memset_io(volatile void __iomem *, int, size_t); * Again, this are defined to perform little endian accesses. See the * IO port primitives for more information. */ -#ifdef __mem_pci -#define readb_relaxed(c) ({ u8 __r = __raw_readb(__mem_pci(c)); __r; }) +#ifndef readl +#define readb_relaxed(c) ({ u8 __r = __raw_readb(c); __r; }) #define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \ - __raw_readw(__mem_pci(c))); __r; }) + __raw_readw(c)); __r; }) #define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \ - __raw_readl(__mem_pci(c))); __r; }) + __raw_readl(c)); __r; }) -#define writeb_relaxed(v,c) ((void)__raw_writeb(v,__mem_pci(c))) -#define writew_relaxed(v,c) ((void)__raw_writew((__force u16) \ - cpu_to_le16(v),__mem_pci(c))) -#define writel_relaxed(v,c) ((void)__raw_writel((__force u32) \ - cpu_to_le32(v),__mem_pci(c))) +#define writeb_relaxed(v,c) __raw_writeb(v,c) +#define writew_relaxed(v,c) __raw_writew((__force u16) cpu_to_le16(v),c) +#define writel_relaxed(v,c) __raw_writel((__force u32) cpu_to_le32(v),c) #define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) #define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) @@ -231,30 +241,19 @@ extern void _memset_io(volatile void __iomem *, int, size_t); #define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); }) #define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); }) -#define readsb(p,d,l) __raw_readsb(__mem_pci(p),d,l) -#define readsw(p,d,l) __raw_readsw(__mem_pci(p),d,l) -#define readsl(p,d,l) __raw_readsl(__mem_pci(p),d,l) - -#define writesb(p,d,l) __raw_writesb(__mem_pci(p),d,l) -#define writesw(p,d,l) __raw_writesw(__mem_pci(p),d,l) -#define writesl(p,d,l) __raw_writesl(__mem_pci(p),d,l) +#define readsb(p,d,l) __raw_readsb(p,d,l) +#define readsw(p,d,l) __raw_readsw(p,d,l) +#define readsl(p,d,l) __raw_readsl(p,d,l) -#define memset_io(c,v,l) _memset_io(__mem_pci(c),(v),(l)) -#define memcpy_fromio(a,c,l) _memcpy_fromio((a),__mem_pci(c),(l)) -#define memcpy_toio(c,a,l) _memcpy_toio(__mem_pci(c),(a),(l)) +#define writesb(p,d,l) __raw_writesb(p,d,l) +#define writesw(p,d,l) __raw_writesw(p,d,l) +#define writesl(p,d,l) __raw_writesl(p,d,l) -#elif !defined(readb) +#define memset_io(c,v,l) _memset_io(c,(v),(l)) +#define memcpy_fromio(a,c,l) _memcpy_fromio((a),c,(l)) +#define memcpy_toio(c,a,l) _memcpy_toio(c,(a),(l)) -#define readb(c) (__readwrite_bug("readb"),0) -#define readw(c) (__readwrite_bug("readw"),0) -#define readl(c) (__readwrite_bug("readl"),0) -#define writeb(v,c) __readwrite_bug("writeb") -#define writew(v,c) __readwrite_bug("writew") -#define writel(v,c) __readwrite_bug("writel") - -#define check_signature(io,sig,len) (0) - -#endif /* __mem_pci */ +#endif /* readl */ /* * ioremap and friends. @@ -263,16 +262,11 @@ extern void _memset_io(volatile void __iomem *, int, size_t); * Documentation/io-mapping.txt. * */ -#ifndef __arch_ioremap -#define __arch_ioremap __arm_ioremap -#define __arch_iounmap __iounmap -#endif - -#define ioremap(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE) -#define ioremap_nocache(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE) -#define ioremap_cached(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE_CACHED) -#define ioremap_wc(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE_WC) -#define iounmap __arch_iounmap +#define ioremap(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) +#define ioremap_nocache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) +#define ioremap_cached(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_CACHED) +#define ioremap_wc(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_WC) +#define iounmap __arm_iounmap /* * io{read,write}{8,16,32} macros @@ -285,12 +279,12 @@ extern void _memset_io(volatile void __iomem *, int, size_t); #define ioread16be(p) ({ unsigned int __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; }) #define ioread32be(p) ({ unsigned int __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; }) -#define iowrite8(v,p) ({ __iowmb(); (void)__raw_writeb(v, p); }) -#define iowrite16(v,p) ({ __iowmb(); (void)__raw_writew((__force __u16)cpu_to_le16(v), p); }) -#define iowrite32(v,p) ({ __iowmb(); (void)__raw_writel((__force __u32)cpu_to_le32(v), p); }) +#define iowrite8(v,p) ({ __iowmb(); __raw_writeb(v, p); }) +#define iowrite16(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_le16(v), p); }) +#define iowrite32(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_le32(v), p); }) -#define iowrite16be(v,p) ({ __iowmb(); (void)__raw_writew((__force __u16)cpu_to_be16(v), p); }) -#define iowrite32be(v,p) ({ __iowmb(); (void)__raw_writel((__force __u32)cpu_to_be32(v), p); }) +#define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); }) +#define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); }) #define ioread8_rep(p,d,c) __raw_readsb(p,d,c) #define ioread16_rep(p,d,c) __raw_readsw(p,d,c) @@ -306,7 +300,6 @@ extern void ioport_unmap(void __iomem *addr); struct pci_dev; -extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen); extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr); /* diff --git a/arch/arm/include/asm/ipcbuf.h b/arch/arm/include/asm/ipcbuf.h index 97683975f7d..84c7e51cb6d 100644 --- a/arch/arm/include/asm/ipcbuf.h +++ b/arch/arm/include/asm/ipcbuf.h @@ -1,29 +1 @@ -#ifndef __ASMARM_IPCBUF_H -#define __ASMARM_IPCBUF_H - -/* - * The ipc64_perm structure for arm architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * Pad space is left for: - * - 32-bit mode_t and seq - * - 2 miscellaneous 32-bit values - */ - -struct ipc64_perm -{ - __kernel_key_t key; - __kernel_uid32_t uid; - __kernel_gid32_t gid; - __kernel_uid32_t cuid; - __kernel_gid32_t cgid; - __kernel_mode_t mode; - unsigned short __pad1; - unsigned short seq; - unsigned short __pad2; - unsigned long __unused1; - unsigned long __unused2; -}; - -#endif /* __ASMARM_IPCBUF_H */ +#include <asm-generic/ipcbuf.h> diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h index 5a526afb5f1..35c21c375d8 100644 --- a/arch/arm/include/asm/irq.h +++ b/arch/arm/include/asm/irq.h @@ -1,14 +1,18 @@ #ifndef __ASM_ARM_IRQ_H #define __ASM_ARM_IRQ_H +#define NR_IRQS_LEGACY 16 + +#ifndef CONFIG_SPARSE_IRQ #include <mach/irqs.h> +#else +#define NR_IRQS NR_IRQS_LEGACY +#endif #ifndef irq_canonicalize #define irq_canonicalize(i) (i) #endif -#define NR_IRQS_LEGACY 16 - /* * Use this value to indicate lack of interrupt * capability diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h new file mode 100644 index 00000000000..bfc198c7591 --- /dev/null +++ b/arch/arm/include/asm/jump_label.h @@ -0,0 +1,41 @@ +#ifndef _ASM_ARM_JUMP_LABEL_H +#define _ASM_ARM_JUMP_LABEL_H + +#ifdef __KERNEL__ + +#include <linux/types.h> +#include <asm/system.h> + +#define JUMP_LABEL_NOP_SIZE 4 + +#ifdef CONFIG_THUMB2_KERNEL +#define JUMP_LABEL_NOP "nop.w" +#else +#define JUMP_LABEL_NOP "nop" +#endif + +static __always_inline bool arch_static_branch(struct static_key *key) +{ + asm goto("1:\n\t" + JUMP_LABEL_NOP "\n\t" + ".pushsection __jump_table, \"aw\"\n\t" + ".word 1b, %l[l_yes], %c0\n\t" + ".popsection\n\t" + : : "i" (key) : : l_yes); + + return false; +l_yes: + return true; +} + +#endif /* __KERNEL__ */ + +typedef u32 jump_label_t; + +struct jump_entry { + jump_label_t code; + jump_label_t target; + jump_label_t key; +}; + +#endif diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h index feec86768f9..f82ec22eeb1 100644 --- a/arch/arm/include/asm/kprobes.h +++ b/arch/arm/include/asm/kprobes.h @@ -24,7 +24,6 @@ #define MAX_INSN_SIZE 2 #define MAX_STACK_SIZE 64 /* 32 would probably be OK */ -#define regs_return_value(regs) ((regs)->ARM_r0) #define flush_insn_slot(p) do { } while (0) #define kretprobe_blacklist_size 0 diff --git a/arch/arm/include/asm/kvm_para.h b/arch/arm/include/asm/kvm_para.h new file mode 100644 index 00000000000..14fab8f0b95 --- /dev/null +++ b/arch/arm/include/asm/kvm_para.h @@ -0,0 +1 @@ +#include <asm-generic/kvm_para.h> diff --git a/arch/arm/include/asm/localtimer.h b/arch/arm/include/asm/localtimer.h index c6a18424888..f77ffc1eb0c 100644 --- a/arch/arm/include/asm/localtimer.h +++ b/arch/arm/include/asm/localtimer.h @@ -11,47 +11,24 @@ #define __ASM_ARM_LOCALTIMER_H #include <linux/errno.h> -#include <linux/interrupt.h> struct clock_event_device; -/* - * Setup a per-cpu timer, whether it be a local timer or dummy broadcast - */ -void percpu_timer_setup(void); +struct local_timer_ops { + int (*setup)(struct clock_event_device *); + void (*stop)(struct clock_event_device *); +}; #ifdef CONFIG_LOCAL_TIMERS - -#ifdef CONFIG_HAVE_ARM_TWD - -#include "smp_twd.h" - -#define local_timer_stop(c) twd_timer_stop((c)) - -#else - -/* - * Stop the local timer - */ -void local_timer_stop(struct clock_event_device *); - -#endif - /* - * Setup a local timer interrupt for a CPU. + * Register a local timer driver */ -int local_timer_setup(struct clock_event_device *); - +int local_timer_register(struct local_timer_ops *); #else - -static inline int local_timer_setup(struct clock_event_device *evt) +static inline int local_timer_register(struct local_timer_ops *ops) { return -ENXIO; } - -static inline void local_timer_stop(struct clock_event_device *evt) -{ -} #endif #endif diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h index 2b0efc3104a..0b1c94b8c65 100644 --- a/arch/arm/include/asm/mach/arch.h +++ b/arch/arm/include/asm/mach/arch.h @@ -19,7 +19,7 @@ struct machine_desc { unsigned int nr; /* architecture number */ const char *name; /* architecture name */ unsigned long atag_offset; /* tagged list (relative) */ - const char **dt_compat; /* array of device tree + const char *const *dt_compat; /* array of device tree * 'compatible' strings */ unsigned int nr_irqs; /* number of IRQs */ @@ -31,10 +31,10 @@ struct machine_desc { unsigned int video_start; /* start of video RAM */ unsigned int video_end; /* end of video RAM */ - unsigned int reserve_lp0 :1; /* never has lp0 */ - unsigned int reserve_lp1 :1; /* never has lp1 */ - unsigned int reserve_lp2 :1; /* never has lp2 */ - unsigned int soft_reboot :1; /* soft reboot */ + unsigned char reserve_lp0 :1; /* never has lp0 */ + unsigned char reserve_lp1 :1; /* never has lp1 */ + unsigned char reserve_lp2 :1; /* never has lp2 */ + char restart_mode; /* default restart mode */ void (*fixup)(struct tag *, char **, struct meminfo *); void (*reserve)(void);/* reserve mem blocks */ @@ -43,9 +43,11 @@ struct machine_desc { void (*init_irq)(void); struct sys_timer *timer; /* system tick timer */ void (*init_machine)(void); + void (*init_late)(void); #ifdef CONFIG_MULTI_IRQ_HANDLER void (*handle_irq)(struct pt_regs *); #endif + void (*restart)(char, const char *); }; /* diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h index b36f3654bf5..a6efcdd6fd2 100644 --- a/arch/arm/include/asm/mach/map.h +++ b/arch/arm/include/asm/mach/map.h @@ -30,6 +30,7 @@ struct map_desc { #define MT_MEMORY_DTCM 12 #define MT_MEMORY_ITCM 13 #define MT_MEMORY_SO 14 +#define MT_MEMORY_DMA_READY 15 #ifdef CONFIG_MMU extern void iotable_init(struct map_desc *, int); diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h index 186efd4e05c..26c511fddf8 100644 --- a/arch/arm/include/asm/mach/pci.h +++ b/arch/arm/include/asm/mach/pci.h @@ -12,13 +12,14 @@ #define __ASM_MACH_PCI_H struct pci_sys_data; +struct pci_ops; struct pci_bus; struct hw_pci { #ifdef CONFIG_PCI_DOMAINS int domain; #endif - struct list_head buses; + struct pci_ops *ops; int nr_controllers; int (*setup)(int nr, struct pci_sys_data *); struct pci_bus *(*scan)(int nr, struct pci_sys_data *); @@ -40,21 +41,15 @@ struct pci_sys_data { u64 mem_offset; /* bus->cpu memory mapping offset */ unsigned long io_offset; /* bus->cpu IO mapping offset */ struct pci_bus *bus; /* PCI bus */ - struct resource *resource[3]; /* Primary PCI bus resources */ + struct list_head resources; /* root bus resources (apertures) */ /* Bridge swizzling */ u8 (*swizzle)(struct pci_dev *, u8 *); /* IRQ mapping */ int (*map_irq)(const struct pci_dev *, u8, u8); - struct hw_pci *hw; void *private_data; /* platform controller private data */ }; /* - * This is the standard PCI-PCI bridge swizzling algorithm. - */ -#define pci_std_swizzle pci_common_swizzle - -/* * Call this with your hw_pci struct to initialise the PCI system. */ void pci_common_init(struct hw_pci *); @@ -62,22 +57,22 @@ void pci_common_init(struct hw_pci *); /* * PCI controllers */ +extern struct pci_ops iop3xx_ops; extern int iop3xx_pci_setup(int nr, struct pci_sys_data *); -extern struct pci_bus *iop3xx_pci_scan_bus(int nr, struct pci_sys_data *); extern void iop3xx_pci_preinit(void); extern void iop3xx_pci_preinit_cond(void); +extern struct pci_ops dc21285_ops; extern int dc21285_setup(int nr, struct pci_sys_data *); -extern struct pci_bus *dc21285_scan_bus(int nr, struct pci_sys_data *); extern void dc21285_preinit(void); extern void dc21285_postinit(void); +extern struct pci_ops via82c505_ops; extern int via82c505_setup(int nr, struct pci_sys_data *); -extern struct pci_bus *via82c505_scan_bus(int nr, struct pci_sys_data *); extern void via82c505_init(void *sysdata); +extern struct pci_ops pci_v3_ops; extern int pci_v3_setup(int nr, struct pci_sys_data *); -extern struct pci_bus *pci_v3_scan_bus(int nr, struct pci_sys_data *); extern void pci_v3_preinit(void); extern void pci_v3_postinit(void); diff --git a/arch/arm/include/asm/mach/time.h b/arch/arm/include/asm/mach/time.h index d5adaae5ee2..6ca945f534a 100644 --- a/arch/arm/include/asm/mach/time.h +++ b/arch/arm/include/asm/mach/time.h @@ -10,8 +10,6 @@ #ifndef __ASM_ARM_MACH_TIME_H #define __ASM_ARM_MACH_TIME_H -#include <linux/sysdev.h> - /* * This is our kernel timer structure. * @@ -44,4 +42,9 @@ struct sys_timer { extern void timer_tick(void); +struct timespec; +typedef void (*clock_access_fn)(struct timespec *); +extern int register_persistent_clock(clock_access_fn read_boot, + clock_access_fn read_persistent); + #endif diff --git a/arch/arm/include/asm/mc146818rtc.h b/arch/arm/include/asm/mc146818rtc.h index 6b884d2b0b6..e8567bb99df 100644 --- a/arch/arm/include/asm/mc146818rtc.h +++ b/arch/arm/include/asm/mc146818rtc.h @@ -5,7 +5,9 @@ #define _ASM_MC146818RTC_H #include <linux/io.h> -#include <mach/irqs.h> +#include <linux/kernel.h> + +#define RTC_IRQ BUILD_BUG_ON(1) #ifndef RTC_PORT #define RTC_PORT(x) (0x70 + (x)) diff --git a/arch/arm/include/asm/memblock.h b/arch/arm/include/asm/memblock.h index b8da2e415e4..00ca5f92648 100644 --- a/arch/arm/include/asm/memblock.h +++ b/arch/arm/include/asm/memblock.h @@ -6,4 +6,6 @@ struct machine_desc; extern void arm_memblock_init(struct meminfo *, struct machine_desc *); +phys_addr_t arm_memblock_steal(phys_addr_t size, phys_addr_t align); + #endif diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index a8997d71084..fcb575747e5 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -116,6 +116,8 @@ #define MODULES_END (END_MEM) #define MODULES_VADDR (PHYS_OFFSET) +#define XIP_VIRT_ADDR(physaddr) (physaddr) + #endif /* !CONFIG_MMU */ /* diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index 71605d9f8e4..0306bc642c0 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -18,6 +18,7 @@ #include <asm/cacheflush.h> #include <asm/cachetype.h> #include <asm/proc-fns.h> +#include <asm-generic/mm_hooks.h> void __check_kvm_seq(struct mm_struct *mm); @@ -42,45 +43,104 @@ void __check_kvm_seq(struct mm_struct *mm); #define ASID_FIRST_VERSION (1 << ASID_BITS) extern unsigned int cpu_last_asid; -#ifdef CONFIG_SMP -DECLARE_PER_CPU(struct mm_struct *, current_mm); -#endif void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); void __new_context(struct mm_struct *mm); +void cpu_set_reserved_ttbr0(void); -static inline void check_context(struct mm_struct *mm) +static inline void switch_new_context(struct mm_struct *mm) { - /* - * This code is executed with interrupts enabled. Therefore, - * mm->context.id cannot be updated to the latest ASID version - * on a different CPU (and condition below not triggered) - * without first getting an IPI to reset the context. The - * alternative is to take a read_lock on mm->context.id_lock - * (after changing its type to rwlock_t). - */ - if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) - __new_context(mm); + unsigned long flags; + __new_context(mm); + + local_irq_save(flags); + cpu_switch_mm(mm->pgd, mm); + local_irq_restore(flags); +} + +static inline void check_and_switch_context(struct mm_struct *mm, + struct task_struct *tsk) +{ if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) __check_kvm_seq(mm); + + /* + * Required during context switch to avoid speculative page table + * walking with the wrong TTBR. + */ + cpu_set_reserved_ttbr0(); + + if (!((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) + /* + * The ASID is from the current generation, just switch to the + * new pgd. This condition is only true for calls from + * context_switch() and interrupts are already disabled. + */ + cpu_switch_mm(mm->pgd, mm); + else if (irqs_disabled()) + /* + * Defer the new ASID allocation until after the context + * switch critical region since __new_context() cannot be + * called with interrupts disabled (it sends IPIs). + */ + set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM); + else + /* + * That is a direct call to switch_mm() or activate_mm() with + * interrupts enabled and a new context. + */ + switch_new_context(mm); } #define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0) -#else - -static inline void check_context(struct mm_struct *mm) +#define finish_arch_post_lock_switch \ + finish_arch_post_lock_switch +static inline void finish_arch_post_lock_switch(void) { + if (test_and_clear_thread_flag(TIF_SWITCH_MM)) + switch_new_context(current->mm); +} + +#else /* !CONFIG_CPU_HAS_ASID */ + #ifdef CONFIG_MMU + +static inline void check_and_switch_context(struct mm_struct *mm, + struct task_struct *tsk) +{ if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) __check_kvm_seq(mm); -#endif + + if (irqs_disabled()) + /* + * cpu_switch_mm() needs to flush the VIVT caches. To avoid + * high interrupt latencies, defer the call and continue + * running with the old mm. Since we only support UP systems + * on non-ASID CPUs, the old mm will remain valid until the + * finish_arch_post_lock_switch() call. + */ + set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM); + else + cpu_switch_mm(mm->pgd, mm); +} + +#define finish_arch_post_lock_switch \ + finish_arch_post_lock_switch +static inline void finish_arch_post_lock_switch(void) +{ + if (test_and_clear_thread_flag(TIF_SWITCH_MM)) { + struct mm_struct *mm = current->mm; + cpu_switch_mm(mm->pgd, mm); + } } +#endif /* CONFIG_MMU */ + #define init_new_context(tsk,mm) 0 -#endif +#endif /* CONFIG_CPU_HAS_ASID */ #define destroy_context(mm) do { } while(0) @@ -118,12 +178,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, __flush_icache_all(); #endif if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { -#ifdef CONFIG_SMP - struct mm_struct **crt_mm = &per_cpu(current_mm, cpu); - *crt_mm = next; -#endif - check_context(next); - cpu_switch_mm(next->pgd, next); + check_and_switch_context(next, tsk); if (cache_is_vivt()) cpumask_clear_cpu(cpu, mm_cpumask(prev)); } @@ -133,32 +188,4 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, #define deactivate_mm(tsk,mm) do { } while (0) #define activate_mm(prev,next) switch_mm(prev, next, NULL) -/* - * We are inserting a "fake" vma for the user-accessible vector page so - * gdb and friends can get to it through ptrace and /proc/<pid>/mem. - * But we also want to remove it before the generic code gets to see it - * during process exit or the unmapping of it would cause total havoc. - * (the macro is used as remove_vma() is static to mm/mmap.c) - */ -#define arch_exit_mmap(mm) \ -do { \ - struct vm_area_struct *high_vma = find_vma(mm, 0xffff0000); \ - if (high_vma) { \ - BUG_ON(high_vma->vm_next); /* it should be last */ \ - if (high_vma->vm_prev) \ - high_vma->vm_prev->vm_next = NULL; \ - else \ - mm->mmap = NULL; \ - rb_erase(&high_vma->vm_rb, &mm->mm_rb); \ - mm->mmap_cache = NULL; \ - mm->map_count--; \ - remove_vma(high_vma); \ - } \ -} while (0) - -static inline void arch_dup_mmap(struct mm_struct *oldmm, - struct mm_struct *mm) -{ -} - #endif diff --git a/arch/arm/include/asm/opcodes.h b/arch/arm/include/asm/opcodes.h new file mode 100644 index 00000000000..19c48deda70 --- /dev/null +++ b/arch/arm/include/asm/opcodes.h @@ -0,0 +1,79 @@ +/* + * arch/arm/include/asm/opcodes.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ASM_ARM_OPCODES_H +#define __ASM_ARM_OPCODES_H + +#ifndef __ASSEMBLY__ +extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr); +#endif + +#define ARM_OPCODE_CONDTEST_FAIL 0 +#define ARM_OPCODE_CONDTEST_PASS 1 +#define ARM_OPCODE_CONDTEST_UNCOND 2 + + +/* + * Opcode byteswap helpers + * + * These macros help with converting instructions between a canonical integer + * format and in-memory representation, in an endianness-agnostic manner. + * + * __mem_to_opcode_*() convert from in-memory representation to canonical form. + * __opcode_to_mem_*() convert from canonical form to in-memory representation. + * + * + * Canonical instruction representation: + * + * ARM: 0xKKLLMMNN + * Thumb 16-bit: 0x0000KKLL, where KK < 0xE8 + * Thumb 32-bit: 0xKKLLMMNN, where KK >= 0xE8 + * + * There is no way to distinguish an ARM instruction in canonical representation + * from a Thumb instruction (just as these cannot be distinguished in memory). + * Where this distinction is important, it needs to be tracked separately. + * + * Note that values in the range 0x0000E800..0xE7FFFFFF intentionally do not + * represent any valid Thumb-2 instruction. For this range, + * __opcode_is_thumb32() and __opcode_is_thumb16() will both be false. + */ + +#ifndef __ASSEMBLY__ + +#include <linux/types.h> +#include <linux/swab.h> + +#ifdef CONFIG_CPU_ENDIAN_BE8 +#define __opcode_to_mem_arm(x) swab32(x) +#define __opcode_to_mem_thumb16(x) swab16(x) +#define __opcode_to_mem_thumb32(x) swahb32(x) +#else +#define __opcode_to_mem_arm(x) ((u32)(x)) +#define __opcode_to_mem_thumb16(x) ((u16)(x)) +#define __opcode_to_mem_thumb32(x) swahw32(x) +#endif + +#define __mem_to_opcode_arm(x) __opcode_to_mem_arm(x) +#define __mem_to_opcode_thumb16(x) __opcode_to_mem_thumb16(x) +#define __mem_to_opcode_thumb32(x) __opcode_to_mem_thumb32(x) + +/* Operations specific to Thumb opcodes */ + +/* Instruction size checks: */ +#define __opcode_is_thumb32(x) ((u32)(x) >= 0xE8000000UL) +#define __opcode_is_thumb16(x) ((u32)(x) < 0xE800UL) + +/* Operations to construct or split 32-bit Thumb instructions: */ +#define __opcode_thumb32_first(x) ((u16)((x) >> 16)) +#define __opcode_thumb32_second(x) ((u16)(x)) +#define __opcode_thumb32_compose(first, second) \ + (((u32)(u16)(first) << 16) | (u32)(u16)(second)) + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_ARM_OPCODES_H */ diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index ca94653f1ec..ecf901902e4 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h @@ -34,7 +34,6 @@ * processor(s) we're building for. * * We have the following to choose from: - * v3 - ARMv3 * v4wt - ARMv4 with writethrough cache, without minicache * v4wb - ARMv4 with writeback cache, without minicache * v4_mc - ARMv4 with minicache @@ -44,14 +43,6 @@ #undef _USER #undef MULTI_USER -#ifdef CONFIG_CPU_COPY_V3 -# ifdef _USER -# define MULTI_USER 1 -# else -# define _USER v3 -# endif -#endif - #ifdef CONFIG_CPU_COPY_V4WT # ifdef _USER # define MULTI_USER 1 @@ -151,7 +142,13 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from, #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) extern void copy_page(void *to, const void *from); +#define __HAVE_ARCH_GATE_AREA 1 + +#ifdef CONFIG_ARM_LPAE +#include <asm/pgtable-3level-types.h> +#else #include <asm/pgtable-2level-types.h> +#endif #endif /* CONFIG_MMU */ diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h index 2b1f245db0c..a98a2e112fa 100644 --- a/arch/arm/include/asm/pci.h +++ b/arch/arm/include/asm/pci.h @@ -31,18 +31,6 @@ static inline int pci_proc_domain(struct pci_bus *bus) } #endif /* CONFIG_PCI_DOMAINS */ -#ifdef CONFIG_PCI_HOST_ITE8152 -/* ITE bridge requires setting latency timer to avoid early bus access - termination by PIC bus mater devices -*/ -extern void pcibios_set_master(struct pci_dev *dev); -#else -static inline void pcibios_set_master(struct pci_dev *dev) -{ - /* No special bus mastering setup handling */ -} -#endif - static inline void pcibios_penalize_isa_irq(int irq, int active) { /* We don't do dynamic PCI IRQ allocation */ @@ -69,14 +57,6 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev, extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state, int write_combine); -extern void -pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, - struct resource *res); - -extern void -pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, - struct pci_bus_region *region); - /* * Dummy implementation; always return 0. */ diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h index 0f8e3827a89..00cbe10a50e 100644 --- a/arch/arm/include/asm/perf_event.h +++ b/arch/arm/include/asm/perf_event.h @@ -12,10 +12,6 @@ #ifndef __ARM_PERF_EVENT_H__ #define __ARM_PERF_EVENT_H__ -/* ARM performance counters start from 1 (in the cp15 accesses) so use the - * same indexes here for consistency. */ -#define PERF_EVENT_INDEX_OFFSET 1 - /* ARM perf PMU IDs for use by internal perf clients. */ enum arm_perf_pmu_ids { ARM_PERF_PMU_ID_XSCALE1 = 0, @@ -26,13 +22,11 @@ enum arm_perf_pmu_ids { ARM_PERF_PMU_ID_CA9, ARM_PERF_PMU_ID_CA5, ARM_PERF_PMU_ID_CA15, + ARM_PERF_PMU_ID_CA7, ARM_NUM_PMU_IDS, }; extern enum arm_perf_pmu_ids armpmu_get_pmu_id(void); -extern int -armpmu_get_max_events(void); - #endif /* __ARM_PERF_EVENT_H__ */ diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h index 3e08fd3fbb6..943504f53f5 100644 --- a/arch/arm/include/asm/pgalloc.h +++ b/arch/arm/include/asm/pgalloc.h @@ -25,12 +25,34 @@ #define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER)) #define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL)) +#ifdef CONFIG_ARM_LPAE + +static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) +{ + return (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_REPEAT); +} + +static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) +{ + BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); + free_page((unsigned long)pmd); +} + +static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) +{ + set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE)); +} + +#else /* !CONFIG_ARM_LPAE */ + /* * Since we have only two-level page tables, these are trivial */ #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); }) #define pmd_free(mm, pmd) do { } while (0) -#define pgd_populate(mm,pmd,pte) BUG() +#define pud_populate(mm,pmd,pte) BUG() + +#endif /* CONFIG_ARM_LPAE */ extern pgd_t *pgd_alloc(struct mm_struct *mm); extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); @@ -109,7 +131,9 @@ static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte, { pmdval_t pmdval = (pte + PTE_HWTABLE_OFF) | prot; pmdp[0] = __pmd(pmdval); +#ifndef CONFIG_ARM_LPAE pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t)); +#endif flush_pmd_entry(pmdp); } diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h index 470457e1cfc..2317a71c8f8 100644 --- a/arch/arm/include/asm/pgtable-2level.h +++ b/arch/arm/include/asm/pgtable-2level.h @@ -140,4 +140,45 @@ #define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */ #define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2) +#ifndef __ASSEMBLY__ + +/* + * The "pud_xxx()" functions here are trivial when the pmd is folded into + * the pud: the pud entry is never bad, always exists, and can't be set or + * cleared. + */ +#define pud_none(pud) (0) +#define pud_bad(pud) (0) +#define pud_present(pud) (1) +#define pud_clear(pudp) do { } while (0) +#define set_pud(pud,pudp) do { } while (0) + +static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) +{ + return (pmd_t *)pud; +} + +#define pmd_bad(pmd) (pmd_val(pmd) & 2) + +#define copy_pmd(pmdpd,pmdps) \ + do { \ + pmdpd[0] = pmdps[0]; \ + pmdpd[1] = pmdps[1]; \ + flush_pmd_entry(pmdpd); \ + } while (0) + +#define pmd_clear(pmdp) \ + do { \ + pmdp[0] = __pmd(0); \ + pmdp[1] = __pmd(0); \ + clean_pmd_entry(pmdp); \ + } while (0) + +/* we don't need complex calculations here as the pmd is folded into the pgd */ +#define pmd_addr_end(addr,end) (end) + +#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) + +#endif /* __ASSEMBLY__ */ + #endif /* _ASM_PGTABLE_2LEVEL_H */ diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h new file mode 100644 index 00000000000..d7952824c5c --- /dev/null +++ b/arch/arm/include/asm/pgtable-3level-hwdef.h @@ -0,0 +1,77 @@ +/* + * arch/arm/include/asm/pgtable-3level-hwdef.h + * + * Copyright (C) 2011 ARM Ltd. + * Author: Catalin Marinas <catalin.marinas@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef _ASM_PGTABLE_3LEVEL_HWDEF_H +#define _ASM_PGTABLE_3LEVEL_HWDEF_H + +/* + * Hardware page table definitions. + * + * + Level 1/2 descriptor + * - common + */ +#define PMD_TYPE_MASK (_AT(pmdval_t, 3) << 0) +#define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0) +#define PMD_TYPE_TABLE (_AT(pmdval_t, 3) << 0) +#define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0) +#define PMD_BIT4 (_AT(pmdval_t, 0)) +#define PMD_DOMAIN(x) (_AT(pmdval_t, 0)) + +/* + * - section + */ +#define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2) +#define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3) +#define PMD_SECT_S (_AT(pmdval_t, 3) << 8) +#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10) +#define PMD_SECT_nG (_AT(pmdval_t, 1) << 11) +#define PMD_SECT_XN (_AT(pmdval_t, 1) << 54) +#define PMD_SECT_AP_WRITE (_AT(pmdval_t, 0)) +#define PMD_SECT_AP_READ (_AT(pmdval_t, 0)) +#define PMD_SECT_TEX(x) (_AT(pmdval_t, 0)) + +/* + * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers). + */ +#define PMD_SECT_UNCACHED (_AT(pmdval_t, 0) << 2) /* strongly ordered */ +#define PMD_SECT_BUFFERED (_AT(pmdval_t, 1) << 2) /* normal non-cacheable */ +#define PMD_SECT_WT (_AT(pmdval_t, 2) << 2) /* normal inner write-through */ +#define PMD_SECT_WB (_AT(pmdval_t, 3) << 2) /* normal inner write-back */ +#define PMD_SECT_WBWA (_AT(pmdval_t, 7) << 2) /* normal inner write-alloc */ + +/* + * + Level 3 descriptor (PTE) + */ +#define PTE_TYPE_MASK (_AT(pteval_t, 3) << 0) +#define PTE_TYPE_FAULT (_AT(pteval_t, 0) << 0) +#define PTE_TYPE_PAGE (_AT(pteval_t, 3) << 0) +#define PTE_BUFFERABLE (_AT(pteval_t, 1) << 2) /* AttrIndx[0] */ +#define PTE_CACHEABLE (_AT(pteval_t, 1) << 3) /* AttrIndx[1] */ +#define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ +#define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */ +#define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */ +#define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */ + +/* + * 40-bit physical address supported. + */ +#define PHYS_MASK_SHIFT (40) +#define PHYS_MASK ((1ULL << PHYS_MASK_SHIFT) - 1) + +#endif diff --git a/arch/arm/include/asm/pgtable-3level-types.h b/arch/arm/include/asm/pgtable-3level-types.h new file mode 100644 index 00000000000..921aa30259c --- /dev/null +++ b/arch/arm/include/asm/pgtable-3level-types.h @@ -0,0 +1,70 @@ +/* + * arch/arm/include/asm/pgtable-3level-types.h + * + * Copyright (C) 2011 ARM Ltd. + * Author: Catalin Marinas <catalin.marinas@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef _ASM_PGTABLE_3LEVEL_TYPES_H +#define _ASM_PGTABLE_3LEVEL_TYPES_H + +#include <asm/types.h> + +typedef u64 pteval_t; +typedef u64 pmdval_t; +typedef u64 pgdval_t; + +#undef STRICT_MM_TYPECHECKS + +#ifdef STRICT_MM_TYPECHECKS + +/* + * These are used to make use of C type-checking.. + */ +typedef struct { pteval_t pte; } pte_t; +typedef struct { pmdval_t pmd; } pmd_t; +typedef struct { pgdval_t pgd; } pgd_t; +typedef struct { pteval_t pgprot; } pgprot_t; + +#define pte_val(x) ((x).pte) +#define pmd_val(x) ((x).pmd) +#define pgd_val(x) ((x).pgd) +#define pgprot_val(x) ((x).pgprot) + +#define __pte(x) ((pte_t) { (x) } ) +#define __pmd(x) ((pmd_t) { (x) } ) +#define __pgd(x) ((pgd_t) { (x) } ) +#define __pgprot(x) ((pgprot_t) { (x) } ) + +#else /* !STRICT_MM_TYPECHECKS */ + +typedef pteval_t pte_t; +typedef pmdval_t pmd_t; +typedef pgdval_t pgd_t; +typedef pteval_t pgprot_t; + +#define pte_val(x) (x) +#define pmd_val(x) (x) +#define pgd_val(x) (x) +#define pgprot_val(x) (x) + +#define __pte(x) (x) +#define __pmd(x) (x) +#define __pgd(x) (x) +#define __pgprot(x) (x) + +#endif /* STRICT_MM_TYPECHECKS */ + +#endif /* _ASM_PGTABLE_3LEVEL_TYPES_H */ diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h new file mode 100644 index 00000000000..b24903549d1 --- /dev/null +++ b/arch/arm/include/asm/pgtable-3level.h @@ -0,0 +1,153 @@ +/* + * arch/arm/include/asm/pgtable-3level.h + * + * Copyright (C) 2011 ARM Ltd. + * Author: Catalin Marinas <catalin.marinas@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef _ASM_PGTABLE_3LEVEL_H +#define _ASM_PGTABLE_3LEVEL_H + +/* + * With LPAE, there are 3 levels of page tables. Each level has 512 entries of + * 8 bytes each, occupying a 4K page. The first level table covers a range of + * 512GB, each entry representing 1GB. Since we are limited to 4GB input + * address range, only 4 entries in the PGD are used. + * + * There are enough spare bits in a page table entry for the kernel specific + * state. + */ +#define PTRS_PER_PTE 512 +#define PTRS_PER_PMD 512 +#define PTRS_PER_PGD 4 + +#define PTE_HWTABLE_PTRS (PTRS_PER_PTE) +#define PTE_HWTABLE_OFF (0) +#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u64)) + +/* + * PGDIR_SHIFT determines the size a top-level page table entry can map. + */ +#define PGDIR_SHIFT 30 + +/* + * PMD_SHIFT determines the size a middle-level page table entry can map. + */ +#define PMD_SHIFT 21 + +#define PMD_SIZE (1UL << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE-1)) +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE-1)) + +/* + * section address mask and size definitions. + */ +#define SECTION_SHIFT 21 +#define SECTION_SIZE (1UL << SECTION_SHIFT) +#define SECTION_MASK (~(SECTION_SIZE-1)) + +#define USER_PTRS_PER_PGD (PAGE_OFFSET / PGDIR_SIZE) + +/* + * "Linux" PTE definitions for LPAE. + * + * These bits overlap with the hardware bits but the naming is preserved for + * consistency with the classic page table format. + */ +#define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Valid */ +#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */ +#define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */ +#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */ +#define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ +#define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */ +#define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */ +#define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */ +#define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */ + +/* + * To be used in assembly code with the upper page attributes. + */ +#define L_PTE_XN_HIGH (1 << (54 - 32)) +#define L_PTE_DIRTY_HIGH (1 << (55 - 32)) + +/* + * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers). + */ +#define L_PTE_MT_UNCACHED (_AT(pteval_t, 0) << 2) /* strongly ordered */ +#define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 1) << 2) /* normal non-cacheable */ +#define L_PTE_MT_WRITETHROUGH (_AT(pteval_t, 2) << 2) /* normal inner write-through */ +#define L_PTE_MT_WRITEBACK (_AT(pteval_t, 3) << 2) /* normal inner write-back */ +#define L_PTE_MT_WRITEALLOC (_AT(pteval_t, 7) << 2) /* normal inner write-alloc */ +#define L_PTE_MT_DEV_SHARED (_AT(pteval_t, 4) << 2) /* device */ +#define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 4) << 2) /* device */ +#define L_PTE_MT_DEV_WC (_AT(pteval_t, 1) << 2) /* normal non-cacheable */ +#define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 3) << 2) /* normal inner write-back */ +#define L_PTE_MT_MASK (_AT(pteval_t, 7) << 2) + +/* + * Software PGD flags. + */ +#define L_PGD_SWAPPER (_AT(pgdval_t, 1) << 55) /* swapper_pg_dir entry */ + +#ifndef __ASSEMBLY__ + +#define pud_none(pud) (!pud_val(pud)) +#define pud_bad(pud) (!(pud_val(pud) & 2)) +#define pud_present(pud) (pud_val(pud)) + +#define pud_clear(pudp) \ + do { \ + *pudp = __pud(0); \ + clean_pmd_entry(pudp); \ + } while (0) + +#define set_pud(pudp, pud) \ + do { \ + *pudp = pud; \ + flush_pmd_entry(pudp); \ + } while (0) + +static inline pmd_t *pud_page_vaddr(pud_t pud) +{ + return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK); +} + +/* Find an entry in the second-level page table.. */ +#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) +static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) +{ + return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr); +} + +#define pmd_bad(pmd) (!(pmd_val(pmd) & 2)) + +#define copy_pmd(pmdpd,pmdps) \ + do { \ + *pmdpd = *pmdps; \ + flush_pmd_entry(pmdpd); \ + } while (0) + +#define pmd_clear(pmdp) \ + do { \ + *pmdp = __pmd(0); \ + clean_pmd_entry(pmdp); \ + } while (0) + +#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,__pte(pte_val(pte)|(ext))) + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_PGTABLE_3LEVEL_H */ diff --git a/arch/arm/include/asm/pgtable-hwdef.h b/arch/arm/include/asm/pgtable-hwdef.h index 183111164ce..8426229ba29 100644 --- a/arch/arm/include/asm/pgtable-hwdef.h +++ b/arch/arm/include/asm/pgtable-hwdef.h @@ -10,6 +10,10 @@ #ifndef _ASMARM_PGTABLE_HWDEF_H #define _ASMARM_PGTABLE_HWDEF_H +#ifdef CONFIG_ARM_LPAE +#include <asm/pgtable-3level-hwdef.h> +#else #include <asm/pgtable-2level-hwdef.h> +#endif #endif diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h index ffc0e85775b..7ec60d6075b 100644 --- a/arch/arm/include/asm/pgtable-nommu.h +++ b/arch/arm/include/asm/pgtable-nommu.h @@ -79,7 +79,6 @@ extern unsigned int kobjsize(const void *objp); * No page table caches to initialise. */ #define pgtable_cache_init() do { } while (0) -#define io_remap_page_range remap_page_range #define io_remap_pfn_range remap_pfn_range diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 9451dce3a55..f66626d71e7 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -11,20 +11,24 @@ #define _ASMARM_PGTABLE_H #include <linux/const.h> -#include <asm-generic/4level-fixup.h> #include <asm/proc-fns.h> #ifndef CONFIG_MMU +#include <asm-generic/4level-fixup.h> #include "pgtable-nommu.h" #else +#include <asm-generic/pgtable-nopud.h> #include <asm/memory.h> -#include <mach/vmalloc.h> #include <asm/pgtable-hwdef.h> +#ifdef CONFIG_ARM_LPAE +#include <asm/pgtable-3level.h> +#else #include <asm/pgtable-2level.h> +#endif /* * Just any arbitrary offset to the start of the vmalloc VM area: the @@ -33,15 +37,10 @@ * any out-of-bounds memory accesses will hopefully be caught. * The vmalloc() routines leaves a hole of 4kB between each vmalloced * area for the same reason. ;) - * - * Note that platforms may override VMALLOC_START, but they must provide - * VMALLOC_END. VMALLOC_END defines the (exclusive) limit of this space, - * which may not overlap IO space. */ -#ifndef VMALLOC_START #define VMALLOC_OFFSET (8*1024*1024) #define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) -#endif +#define VMALLOC_END 0xff000000UL #define LIBRARY_TEXT_START 0x0c000000 @@ -163,39 +162,8 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* to find an entry in a kernel page-table-directory */ #define pgd_offset_k(addr) pgd_offset(&init_mm, addr) -/* - * The "pgd_xxx()" functions here are trivial for a folded two-level - * setup: the pgd is never bad, and a pmd always exists (as it's folded - * into the pgd entry) - */ -#define pgd_none(pgd) (0) -#define pgd_bad(pgd) (0) -#define pgd_present(pgd) (1) -#define pgd_clear(pgdp) do { } while (0) -#define set_pgd(pgd,pgdp) do { } while (0) -#define set_pud(pud,pudp) do { } while (0) - - -/* Find an entry in the second-level page table.. */ -#define pmd_offset(dir, addr) ((pmd_t *)(dir)) - #define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_present(pmd) (pmd_val(pmd)) -#define pmd_bad(pmd) (pmd_val(pmd) & 2) - -#define copy_pmd(pmdpd,pmdps) \ - do { \ - pmdpd[0] = pmdps[0]; \ - pmdpd[1] = pmdps[1]; \ - flush_pmd_entry(pmdpd); \ - } while (0) - -#define pmd_clear(pmdp) \ - do { \ - pmdp[0] = __pmd(0); \ - pmdp[1] = __pmd(0); \ - clean_pmd_entry(pmdp); \ - } while (0) static inline pte_t *pmd_page_vaddr(pmd_t pmd) { @@ -204,10 +172,6 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd) #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) -/* we don't need complex calculations here as the pmd is folded into the pgd */ -#define pmd_addr_end(addr,end) (end) - - #ifndef CONFIG_HIGHPTE #define __pte_map(pmd) pmd_page_vaddr(*(pmd)) #define __pte_unmap(pte) do { } while (0) @@ -229,7 +193,6 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd) #define pte_page(pte) pfn_to_page(pte_pfn(pte)) #define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot) -#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) #define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0) #if __LINUX_ARM_ARCH__ < 6 @@ -336,6 +299,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) * We provide our own arch_get_unmapped_area to cope with VIPT caches. */ #define HAVE_ARCH_UNMAPPED_AREA +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN /* * remap a physical page `pfn' of size `size' with page protection `prot' @@ -346,9 +310,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) #define pgtable_cache_init() do { } while (0) -void identity_mapping_add(pgd_t *, unsigned long, unsigned long); -void identity_mapping_del(pgd_t *, unsigned long, unsigned long); - #endif /* !__ASSEMBLY__ */ #endif /* CONFIG_MMU */ diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index 0bda22c094a..90114faa9f3 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h @@ -27,13 +27,22 @@ enum arm_pmu_type { /* * struct arm_pmu_platdata - ARM PMU platform data * - * @handle_irq: an optional handler which will be called from the interrupt and - * passed the address of the low level handler, and can be used to implement - * any platform specific handling before or after calling it. + * @handle_irq: an optional handler which will be called from the + * interrupt and passed the address of the low level handler, + * and can be used to implement any platform specific handling + * before or after calling it. + * @enable_irq: an optional handler which will be called after + * request_irq and be used to handle some platform specific + * irq enablement + * @disable_irq: an optional handler which will be called before + * free_irq and be used to handle some platform specific + * irq disablement */ struct arm_pmu_platdata { irqreturn_t (*handle_irq)(int irq, void *dev, irq_handler_t pmu_handler); + void (*enable_irq)(int irq); + void (*disable_irq)(int irq); }; #ifdef CONFIG_CPU_HAS_PMU @@ -125,7 +134,7 @@ int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type); u64 armpmu_event_update(struct perf_event *event, struct hw_perf_event *hwc, - int idx, int overflow); + int idx); int armpmu_event_set_period(struct perf_event *event, struct hw_perf_event *hwc, diff --git a/arch/arm/include/asm/posix_types.h b/arch/arm/include/asm/posix_types.h index 2446d23bfdb..d2de9cbbcd9 100644 --- a/arch/arm/include/asm/posix_types.h +++ b/arch/arm/include/asm/posix_types.h @@ -19,59 +19,19 @@ * assume GCC is being used. */ -typedef unsigned long __kernel_ino_t; typedef unsigned short __kernel_mode_t; -typedef unsigned short __kernel_nlink_t; -typedef long __kernel_off_t; -typedef int __kernel_pid_t; +#define __kernel_mode_t __kernel_mode_t + typedef unsigned short __kernel_ipc_pid_t; +#define __kernel_ipc_pid_t __kernel_ipc_pid_t + typedef unsigned short __kernel_uid_t; typedef unsigned short __kernel_gid_t; -typedef unsigned int __kernel_size_t; -typedef int __kernel_ssize_t; -typedef int __kernel_ptrdiff_t; -typedef long __kernel_time_t; -typedef long __kernel_suseconds_t; -typedef long __kernel_clock_t; -typedef int __kernel_timer_t; -typedef int __kernel_clockid_t; -typedef int __kernel_daddr_t; -typedef char * __kernel_caddr_t; -typedef unsigned short __kernel_uid16_t; -typedef unsigned short __kernel_gid16_t; -typedef unsigned int __kernel_uid32_t; -typedef unsigned int __kernel_gid32_t; +#define __kernel_uid_t __kernel_uid_t -typedef unsigned short __kernel_old_uid_t; -typedef unsigned short __kernel_old_gid_t; typedef unsigned short __kernel_old_dev_t; +#define __kernel_old_dev_t __kernel_old_dev_t -#ifdef __GNUC__ -typedef long long __kernel_loff_t; -#endif - -typedef struct { - int val[2]; -} __kernel_fsid_t; - -#if defined(__KERNEL__) - -#undef __FD_SET -#define __FD_SET(fd, fdsetp) \ - (((fd_set *)(fdsetp))->fds_bits[(fd) >> 5] |= (1<<((fd) & 31))) - -#undef __FD_CLR -#define __FD_CLR(fd, fdsetp) \ - (((fd_set *)(fdsetp))->fds_bits[(fd) >> 5] &= ~(1<<((fd) & 31))) - -#undef __FD_ISSET -#define __FD_ISSET(fd, fdsetp) \ - ((((fd_set *)(fdsetp))->fds_bits[(fd) >> 5] & (1<<((fd) & 31))) != 0) - -#undef __FD_ZERO -#define __FD_ZERO(fdsetp) \ - (memset (fdsetp, 0, sizeof (*(fd_set *)(fdsetp)))) - -#endif +#include <asm-generic/posix_types.h> #endif diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h index 9e92cb205e6..f3628fb3d2b 100644 --- a/arch/arm/include/asm/proc-fns.h +++ b/arch/arm/include/asm/proc-fns.h @@ -65,7 +65,11 @@ extern struct processor { * Set a possibly extended PTE. Non-extended PTEs should * ignore 'ext'. */ +#ifdef CONFIG_ARM_LPAE + void (*set_pte_ext)(pte_t *ptep, pte_t pte); +#else void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext); +#endif /* Suspend/resume */ unsigned int suspend_size; @@ -79,7 +83,11 @@ extern void cpu_proc_fin(void); extern int cpu_do_idle(void); extern void cpu_dcache_clean_area(void *, int); extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); +#ifdef CONFIG_ARM_LPAE +extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte); +#else extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); +#endif extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); /* These three are private to arch/arm/kernel/suspend.c */ @@ -107,6 +115,18 @@ extern void cpu_resume(void); #define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm) +#ifdef CONFIG_ARM_LPAE +#define cpu_get_pgd() \ + ({ \ + unsigned long pg, pg2; \ + __asm__("mrrc p15, 0, %0, %1, c2" \ + : "=r" (pg), "=r" (pg2) \ + : \ + : "cc"); \ + pg &= ~(PTRS_PER_PGD*sizeof(pgd_t)-1); \ + (pgd_t *)phys_to_virt(pg); \ + }) +#else #define cpu_get_pgd() \ ({ \ unsigned long pg; \ @@ -115,6 +135,7 @@ extern void cpu_resume(void); pg &= ~0x3fff; \ (pgd_t *)phys_to_virt(pg); \ }) +#endif #endif diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h index b2d9df5667a..99afa749826 100644 --- a/arch/arm/include/asm/processor.h +++ b/arch/arm/include/asm/processor.h @@ -55,7 +55,6 @@ struct thread_struct { #define start_thread(regs,pc,sp) \ ({ \ unsigned long *stack = (unsigned long *)sp; \ - set_fs(USER_DS); \ memset(regs->uregs, 0, sizeof(regs->uregs)); \ if (current->personality & ADDR_LIMIT_32BIT) \ regs->ARM_cpsr = USR_MODE; \ @@ -78,9 +77,6 @@ struct task_struct; /* Free all resources held by a thread. */ extern void release_thread(struct task_struct *); -/* Prepare to copy thread state - unlazy all lazy status */ -#define prepare_to_copy(tsk) do { } while (0) - unsigned long get_wchan(struct task_struct *p); #if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327) @@ -123,6 +119,8 @@ static inline void prefetch(const void *ptr) #endif +#define HAVE_ARCH_PICK_MMAP_LAYOUT + #endif #endif /* __ASM_ARM_PROCESSOR_H */ diff --git a/arch/arm/include/asm/prom.h b/arch/arm/include/asm/prom.h index 6f65ca86a5e..aeae9c609df 100644 --- a/arch/arm/include/asm/prom.h +++ b/arch/arm/include/asm/prom.h @@ -13,9 +13,6 @@ #ifdef CONFIG_OF -#include <asm/setup.h> -#include <asm/irq.h> - extern struct machine_desc *setup_machine_fdt(unsigned int dt_phys); extern void arm_dt_memblock_reserve(void); diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index 96187ff58c2..355ece523f4 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h @@ -189,6 +189,11 @@ static inline int valid_user_regs(struct pt_regs *regs) return 0; } +static inline long regs_return_value(struct pt_regs *regs) +{ + return regs->ARM_r0; +} + #define instruction_pointer(regs) (regs)->ARM_pc #ifdef CONFIG_SMP @@ -244,6 +249,11 @@ static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) return regs->ARM_sp; } +static inline unsigned long user_stack_pointer(struct pt_regs *regs) +{ + return regs->ARM_sp; +} + #endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ diff --git a/arch/arm/include/asm/sched_clock.h b/arch/arm/include/asm/sched_clock.h index c8e6ddf3e86..e3f75726343 100644 --- a/arch/arm/include/asm/sched_clock.h +++ b/arch/arm/include/asm/sched_clock.h @@ -8,113 +8,7 @@ #ifndef ASM_SCHED_CLOCK #define ASM_SCHED_CLOCK -#include <linux/kernel.h> -#include <linux/types.h> - -struct clock_data { - u64 epoch_ns; - u32 epoch_cyc; - u32 epoch_cyc_copy; - u32 mult; - u32 shift; -}; - -#define DEFINE_CLOCK_DATA(name) struct clock_data name - -static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift) -{ - return (cyc * mult) >> shift; -} - -/* - * Atomically update the sched_clock epoch. Your update callback will - * be called from a timer before the counter wraps - read the current - * counter value, and call this function to safely move the epochs - * forward. Only use this from the update callback. - */ -static inline void update_sched_clock(struct clock_data *cd, u32 cyc, u32 mask) -{ - unsigned long flags; - u64 ns = cd->epoch_ns + - cyc_to_ns((cyc - cd->epoch_cyc) & mask, cd->mult, cd->shift); - - /* - * Write epoch_cyc and epoch_ns in a way that the update is - * detectable in cyc_to_fixed_sched_clock(). - */ - raw_local_irq_save(flags); - cd->epoch_cyc = cyc; - smp_wmb(); - cd->epoch_ns = ns; - smp_wmb(); - cd->epoch_cyc_copy = cyc; - raw_local_irq_restore(flags); -} - -/* - * If your clock rate is known at compile time, using this will allow - * you to optimize the mult/shift loads away. This is paired with - * init_fixed_sched_clock() to ensure that your mult/shift are correct. - */ -static inline unsigned long long cyc_to_fixed_sched_clock(struct clock_data *cd, - u32 cyc, u32 mask, u32 mult, u32 shift) -{ - u64 epoch_ns; - u32 epoch_cyc; - - /* - * Load the epoch_cyc and epoch_ns atomically. We do this by - * ensuring that we always write epoch_cyc, epoch_ns and - * epoch_cyc_copy in strict order, and read them in strict order. - * If epoch_cyc and epoch_cyc_copy are not equal, then we're in - * the middle of an update, and we should repeat the load. - */ - do { - epoch_cyc = cd->epoch_cyc; - smp_rmb(); - epoch_ns = cd->epoch_ns; - smp_rmb(); - } while (epoch_cyc != cd->epoch_cyc_copy); - - return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, mult, shift); -} - -/* - * Otherwise, you need to use this, which will obtain the mult/shift - * from the clock_data structure. Use init_sched_clock() with this. - */ -static inline unsigned long long cyc_to_sched_clock(struct clock_data *cd, - u32 cyc, u32 mask) -{ - return cyc_to_fixed_sched_clock(cd, cyc, mask, cd->mult, cd->shift); -} - -/* - * Initialize the clock data - calculate the appropriate multiplier - * and shift. Also setup a timer to ensure that the epoch is refreshed - * at the appropriate time interval, which will call your update - * handler. - */ -void init_sched_clock(struct clock_data *, void (*)(void), - unsigned int, unsigned long); - -/* - * Use this initialization function rather than init_sched_clock() if - * you're using cyc_to_fixed_sched_clock, which will warn if your - * constants are incorrect. - */ -static inline void init_fixed_sched_clock(struct clock_data *cd, - void (*update)(void), unsigned int bits, unsigned long rate, - u32 mult, u32 shift) -{ - init_sched_clock(cd, update, bits, rate); - if (cd->mult != mult || cd->shift != shift) { - pr_crit("sched_clock: wrong multiply/shift: %u>>%u vs calculated %u>>%u\n" - "sched_clock: fix multiply/shift to avoid scheduler hiccups\n", - mult, shift, cd->mult, cd->shift); - } -} - extern void sched_clock_postinit(void); +extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate); #endif diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h index 915696dd9c7..23ebc0c82a3 100644 --- a/arch/arm/include/asm/setup.h +++ b/arch/arm/include/asm/setup.h @@ -192,11 +192,7 @@ static const struct tagtable __tagtable_##fn __tag = { tag, fn } /* * Memory map description */ -#ifdef CONFIG_ARCH_EP93XX -# define NR_BANKS 16 -#else -# define NR_BANKS 8 -#endif +#define NR_BANKS CONFIG_ARM_NR_BANKS struct membank { phys_addr_t start; diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h index 1e5717afc4a..ae29293270a 100644 --- a/arch/arm/include/asm/smp.h +++ b/arch/arm/include/asm/smp.h @@ -71,12 +71,6 @@ extern void platform_secondary_init(unsigned int cpu); extern void platform_smp_prepare_cpus(unsigned int); /* - * Logical CPU mapping. - */ -extern int __cpu_logical_map[NR_CPUS]; -#define cpu_logical_map(cpu) __cpu_logical_map[cpu] - -/* * Initial data for bringing up a secondary CPU. */ struct secondary_data { diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h index f24c1b9e211..558d6c80aca 100644 --- a/arch/arm/include/asm/smp_plat.h +++ b/arch/arm/include/asm/smp_plat.h @@ -43,4 +43,10 @@ static inline int cache_ops_need_broadcast(void) } #endif +/* + * Logical CPU mapping. + */ +extern int __cpu_logical_map[]; +#define cpu_logical_map(cpu) __cpu_logical_map[cpu] + #endif diff --git a/arch/arm/include/asm/smp_twd.h b/arch/arm/include/asm/smp_twd.h index ef9ffba97ad..0f01f4677bd 100644 --- a/arch/arm/include/asm/smp_twd.h +++ b/arch/arm/include/asm/smp_twd.h @@ -18,11 +18,28 @@ #define TWD_TIMER_CONTROL_PERIODIC (1 << 1) #define TWD_TIMER_CONTROL_IT_ENABLE (1 << 2) -struct clock_event_device; +#include <linux/ioport.h> -extern void __iomem *twd_base; +struct twd_local_timer { + struct resource res[2]; +}; -void twd_timer_setup(struct clock_event_device *); -void twd_timer_stop(struct clock_event_device *); +#define DEFINE_TWD_LOCAL_TIMER(name,base,irq) \ +struct twd_local_timer name __initdata = { \ + .res = { \ + DEFINE_RES_MEM(base, 0x10), \ + DEFINE_RES_IRQ(irq), \ + }, \ +}; + +int twd_local_timer_register(struct twd_local_timer *); + +#ifdef CONFIG_HAVE_ARM_TWD +void twd_local_timer_of_register(void); +#else +static inline void twd_local_timer_of_register(void) +{ +} +#endif #endif diff --git a/arch/arm/include/asm/socket.h b/arch/arm/include/asm/socket.h index 90ffd04b8e7..6433cadb6ed 100644 --- a/arch/arm/include/asm/socket.h +++ b/arch/arm/include/asm/socket.h @@ -62,4 +62,11 @@ #define SO_RXQ_OVFL 40 +#define SO_WIFI_STATUS 41 +#define SCM_WIFI_STATUS SO_WIFI_STATUS +#define SO_PEEK_OFF 42 + +/* Instruct lower device to use last 4-bytes of skb data as FCS */ +#define SO_NOFCS 43 + #endif /* _ASM_SOCKET_H */ diff --git a/arch/arm/include/asm/swab.h b/arch/arm/include/asm/swab.h index 9997ad20eff..b859d82e30c 100644 --- a/arch/arm/include/asm/swab.h +++ b/arch/arm/include/asm/swab.h @@ -22,14 +22,16 @@ # define __SWAB_64_THRU_32__ #endif -#if defined(__KERNEL__) && __LINUX_ARM_ARCH__ >= 6 +#if defined(__KERNEL__) +#if __LINUX_ARM_ARCH__ >= 6 -static inline __attribute_const__ __u16 __arch_swab16(__u16 x) +static inline __attribute_const__ __u32 __arch_swahb32(__u32 x) { __asm__ ("rev16 %0, %1" : "=r" (x) : "r" (x)); return x; } -#define __arch_swab16 __arch_swab16 +#define __arch_swahb32 __arch_swahb32 +#define __arch_swab16(x) ((__u16)__arch_swahb32(x)) static inline __attribute_const__ __u32 __arch_swab32(__u32 x) { @@ -38,8 +40,10 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x) } #define __arch_swab32 __arch_swab32 -#else +#endif +#endif +#if !defined(__KERNEL__) || __LINUX_ARM_ARCH__ < 6 static inline __attribute_const__ __u32 __arch_swab32(__u32 x) { __u32 t; diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h new file mode 100644 index 00000000000..fa09e6b49bf --- /dev/null +++ b/arch/arm/include/asm/switch_to.h @@ -0,0 +1,18 @@ +#ifndef __ASM_ARM_SWITCH_TO_H +#define __ASM_ARM_SWITCH_TO_H + +#include <linux/thread_info.h> + +/* + * switch_to(prev, next) should switch from task `prev' to `next' + * `prev' will never be the same as `next'. schedule() itself + * contains the memory barrier to tell GCC not to cache `current'. + */ +extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *); + +#define switch_to(prev,next,last) \ +do { \ + last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ +} while (0) + +#endif /* __ASM_ARM_SWITCH_TO_H */ diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h new file mode 100644 index 00000000000..c334a23ddf7 --- /dev/null +++ b/arch/arm/include/asm/syscall.h @@ -0,0 +1,93 @@ +/* + * Access to user system call parameters and results + * + * See asm-generic/syscall.h for descriptions of what we must do here. + */ + +#ifndef _ASM_ARM_SYSCALL_H +#define _ASM_ARM_SYSCALL_H + +#include <linux/err.h> + +extern const unsigned long sys_call_table[]; + +static inline int syscall_get_nr(struct task_struct *task, + struct pt_regs *regs) +{ + return task_thread_info(task)->syscall; +} + +static inline void syscall_rollback(struct task_struct *task, + struct pt_regs *regs) +{ + regs->ARM_r0 = regs->ARM_ORIG_r0; +} + +static inline long syscall_get_error(struct task_struct *task, + struct pt_regs *regs) +{ + unsigned long error = regs->ARM_r0; + return IS_ERR_VALUE(error) ? error : 0; +} + +static inline long syscall_get_return_value(struct task_struct *task, + struct pt_regs *regs) +{ + return regs->ARM_r0; +} + +static inline void syscall_set_return_value(struct task_struct *task, + struct pt_regs *regs, + int error, long val) +{ + regs->ARM_r0 = (long) error ? error : val; +} + +#define SYSCALL_MAX_ARGS 7 + +static inline void syscall_get_arguments(struct task_struct *task, + struct pt_regs *regs, + unsigned int i, unsigned int n, + unsigned long *args) +{ + if (i + n > SYSCALL_MAX_ARGS) { + unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i; + unsigned int n_bad = n + i - SYSCALL_MAX_ARGS; + pr_warning("%s called with max args %d, handling only %d\n", + __func__, i + n, SYSCALL_MAX_ARGS); + memset(args_bad, 0, n_bad * sizeof(args[0])); + n = SYSCALL_MAX_ARGS - i; + } + + if (i == 0) { + args[0] = regs->ARM_ORIG_r0; + args++; + i++; + n--; + } + + memcpy(args, ®s->ARM_r0 + i, n * sizeof(args[0])); +} + +static inline void syscall_set_arguments(struct task_struct *task, + struct pt_regs *regs, + unsigned int i, unsigned int n, + const unsigned long *args) +{ + if (i + n > SYSCALL_MAX_ARGS) { + pr_warning("%s called with max args %d, handling only %d\n", + __func__, i + n, SYSCALL_MAX_ARGS); + n = SYSCALL_MAX_ARGS - i; + } + + if (i == 0) { + regs->ARM_ORIG_r0 = args[0]; + args++; + i++; + n--; + } + + memcpy(®s->ARM_r0 + i, args, n * sizeof(args[0])); +} + +#endif /* _ASM_ARM_SYSCALL_H */ diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 984014b9264..74542c52f9b 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -1,535 +1,8 @@ -#ifndef __ASM_ARM_SYSTEM_H -#define __ASM_ARM_SYSTEM_H - -#ifdef __KERNEL__ - -#define CPU_ARCH_UNKNOWN 0 -#define CPU_ARCH_ARMv3 1 -#define CPU_ARCH_ARMv4 2 -#define CPU_ARCH_ARMv4T 3 -#define CPU_ARCH_ARMv5 4 -#define CPU_ARCH_ARMv5T 5 -#define CPU_ARCH_ARMv5TE 6 -#define CPU_ARCH_ARMv5TEJ 7 -#define CPU_ARCH_ARMv6 8 -#define CPU_ARCH_ARMv7 9 - -/* - * CR1 bits (CP#15 CR1) - */ -#define CR_M (1 << 0) /* MMU enable */ -#define CR_A (1 << 1) /* Alignment abort enable */ -#define CR_C (1 << 2) /* Dcache enable */ -#define CR_W (1 << 3) /* Write buffer enable */ -#define CR_P (1 << 4) /* 32-bit exception handler */ -#define CR_D (1 << 5) /* 32-bit data address range */ -#define CR_L (1 << 6) /* Implementation defined */ -#define CR_B (1 << 7) /* Big endian */ -#define CR_S (1 << 8) /* System MMU protection */ -#define CR_R (1 << 9) /* ROM MMU protection */ -#define CR_F (1 << 10) /* Implementation defined */ -#define CR_Z (1 << 11) /* Implementation defined */ -#define CR_I (1 << 12) /* Icache enable */ -#define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */ -#define CR_RR (1 << 14) /* Round Robin cache replacement */ -#define CR_L4 (1 << 15) /* LDR pc can set T bit */ -#define CR_DT (1 << 16) -#define CR_IT (1 << 18) -#define CR_ST (1 << 19) -#define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */ -#define CR_U (1 << 22) /* Unaligned access operation */ -#define CR_XP (1 << 23) /* Extended page tables */ -#define CR_VE (1 << 24) /* Vectored interrupts */ -#define CR_EE (1 << 25) /* Exception (Big) Endian */ -#define CR_TRE (1 << 28) /* TEX remap enable */ -#define CR_AFE (1 << 29) /* Access flag enable */ -#define CR_TE (1 << 30) /* Thumb exception enable */ - -/* - * This is used to ensure the compiler did actually allocate the register we - * asked it for some inline assembly sequences. Apparently we can't trust - * the compiler from one version to another so a bit of paranoia won't hurt. - * This string is meant to be concatenated with the inline asm string and - * will cause compilation to stop on mismatch. - * (for details, see gcc PR 15089) - */ -#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" - -#ifndef __ASSEMBLY__ - -#include <linux/compiler.h> -#include <linux/linkage.h> -#include <linux/irqflags.h> - -#include <asm/outercache.h> - -struct thread_info; -struct task_struct; - -/* information about the system we're running on */ -extern unsigned int system_rev; -extern unsigned int system_serial_low; -extern unsigned int system_serial_high; -extern unsigned int mem_fclk_21285; - -struct pt_regs; - -void die(const char *msg, struct pt_regs *regs, int err); - -struct siginfo; -void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, - unsigned long err, unsigned long trap); - -void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, - struct pt_regs *), - int sig, int code, const char *name); - -void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, - struct pt_regs *), - int sig, int code, const char *name); - -#define xchg(ptr,x) \ - ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) - -extern asmlinkage void c_backtrace(unsigned long fp, int pmode); - -struct mm_struct; -extern void show_pte(struct mm_struct *mm, unsigned long addr); -extern void __show_regs(struct pt_regs *); - -extern int __pure cpu_architecture(void); -extern void cpu_init(void); - -void arm_machine_restart(char mode, const char *cmd); -extern void (*arm_pm_restart)(char str, const char *cmd); - -#define UDBG_UNDEFINED (1 << 0) -#define UDBG_SYSCALL (1 << 1) -#define UDBG_BADABORT (1 << 2) -#define UDBG_SEGV (1 << 3) -#define UDBG_BUS (1 << 4) - -extern unsigned int user_debug; - -#if __LINUX_ARM_ARCH__ >= 4 -#define vectors_high() (cr_alignment & CR_V) -#else -#define vectors_high() (0) -#endif - -#if __LINUX_ARM_ARCH__ >= 7 || \ - (__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K)) -#define sev() __asm__ __volatile__ ("sev" : : : "memory") -#define wfe() __asm__ __volatile__ ("wfe" : : : "memory") -#define wfi() __asm__ __volatile__ ("wfi" : : : "memory") -#endif - -#if __LINUX_ARM_ARCH__ >= 7 -#define isb() __asm__ __volatile__ ("isb" : : : "memory") -#define dsb() __asm__ __volatile__ ("dsb" : : : "memory") -#define dmb() __asm__ __volatile__ ("dmb" : : : "memory") -#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6 -#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ - : : "r" (0) : "memory") -#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ - : : "r" (0) : "memory") -#define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ - : : "r" (0) : "memory") -#elif defined(CONFIG_CPU_FA526) -#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ - : : "r" (0) : "memory") -#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ - : : "r" (0) : "memory") -#define dmb() __asm__ __volatile__ ("" : : : "memory") -#else -#define isb() __asm__ __volatile__ ("" : : : "memory") -#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ - : : "r" (0) : "memory") -#define dmb() __asm__ __volatile__ ("" : : : "memory") -#endif - -#ifdef CONFIG_ARCH_HAS_BARRIERS -#include <mach/barriers.h> -#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) -#define mb() do { dsb(); outer_sync(); } while (0) -#define rmb() dsb() -#define wmb() mb() -#else -#include <asm/memory.h> -#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) -#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) -#define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) -#endif - -#ifndef CONFIG_SMP -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#else -#define smp_mb() dmb() -#define smp_rmb() dmb() -#define smp_wmb() dmb() -#endif - -#define read_barrier_depends() do { } while(0) -#define smp_read_barrier_depends() do { } while(0) - -#define set_mb(var, value) do { var = value; smp_mb(); } while (0) -#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); - -extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ -extern unsigned long cr_alignment; /* defined in entry-armv.S */ - -static inline unsigned int get_cr(void) -{ - unsigned int val; - asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc"); - return val; -} - -static inline void set_cr(unsigned int val) -{ - asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR" - : : "r" (val) : "cc"); - isb(); -} - -#ifndef CONFIG_SMP -extern void adjust_cr(unsigned long mask, unsigned long set); -#endif - -#define CPACC_FULL(n) (3 << (n * 2)) -#define CPACC_SVC(n) (1 << (n * 2)) -#define CPACC_DISABLE(n) (0 << (n * 2)) - -static inline unsigned int get_copro_access(void) -{ - unsigned int val; - asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access" - : "=r" (val) : : "cc"); - return val; -} - -static inline void set_copro_access(unsigned int val) -{ - asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access" - : : "r" (val) : "cc"); - isb(); -} - -/* - * switch_mm() may do a full cache flush over the context switch, - * so enable interrupts over the context switch to avoid high - * latency. - */ -#define __ARCH_WANT_INTERRUPTS_ON_CTXSW - -/* - * switch_to(prev, next) should switch from task `prev' to `next' - * `prev' will never be the same as `next'. schedule() itself - * contains the memory barrier to tell GCC not to cache `current'. - */ -extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *); - -#define switch_to(prev,next,last) \ -do { \ - last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ -} while (0) - -#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) -/* - * On the StrongARM, "swp" is terminally broken since it bypasses the - * cache totally. This means that the cache becomes inconsistent, and, - * since we use normal loads/stores as well, this is really bad. - * Typically, this causes oopsen in filp_close, but could have other, - * more disastrous effects. There are two work-arounds: - * 1. Disable interrupts and emulate the atomic swap - * 2. Clean the cache, perform atomic swap, flush the cache - * - * We choose (1) since its the "easiest" to achieve here and is not - * dependent on the processor type. - * - * NOTE that this solution won't work on an SMP system, so explcitly - * forbid it here. - */ -#define swp_is_buggy -#endif - -static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) -{ - extern void __bad_xchg(volatile void *, int); - unsigned long ret; -#ifdef swp_is_buggy - unsigned long flags; -#endif -#if __LINUX_ARM_ARCH__ >= 6 - unsigned int tmp; -#endif - - smp_mb(); - - switch (size) { -#if __LINUX_ARM_ARCH__ >= 6 - case 1: - asm volatile("@ __xchg1\n" - "1: ldrexb %0, [%3]\n" - " strexb %1, %2, [%3]\n" - " teq %1, #0\n" - " bne 1b" - : "=&r" (ret), "=&r" (tmp) - : "r" (x), "r" (ptr) - : "memory", "cc"); - break; - case 4: - asm volatile("@ __xchg4\n" - "1: ldrex %0, [%3]\n" - " strex %1, %2, [%3]\n" - " teq %1, #0\n" - " bne 1b" - : "=&r" (ret), "=&r" (tmp) - : "r" (x), "r" (ptr) - : "memory", "cc"); - break; -#elif defined(swp_is_buggy) -#ifdef CONFIG_SMP -#error SMP is not supported on this platform -#endif - case 1: - raw_local_irq_save(flags); - ret = *(volatile unsigned char *)ptr; - *(volatile unsigned char *)ptr = x; - raw_local_irq_restore(flags); - break; - - case 4: - raw_local_irq_save(flags); - ret = *(volatile unsigned long *)ptr; - *(volatile unsigned long *)ptr = x; - raw_local_irq_restore(flags); - break; -#else - case 1: - asm volatile("@ __xchg1\n" - " swpb %0, %1, [%2]" - : "=&r" (ret) - : "r" (x), "r" (ptr) - : "memory", "cc"); - break; - case 4: - asm volatile("@ __xchg4\n" - " swp %0, %1, [%2]" - : "=&r" (ret) - : "r" (x), "r" (ptr) - : "memory", "cc"); - break; -#endif - default: - __bad_xchg(ptr, size), ret = 0; - break; - } - smp_mb(); - - return ret; -} - -extern void disable_hlt(void); -extern void enable_hlt(void); - -void cpu_idle_wait(void); - -#include <asm-generic/cmpxchg-local.h> - -#if __LINUX_ARM_ARCH__ < 6 -/* min ARCH < ARMv6 */ - -#ifdef CONFIG_SMP -#error "SMP is not supported on this platform" -#endif - -/* - * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make - * them available. - */ -#define cmpxchg_local(ptr, o, n) \ - ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ - (unsigned long)(n), sizeof(*(ptr)))) -#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) - -#ifndef CONFIG_SMP -#include <asm-generic/cmpxchg.h> -#endif - -#else /* min ARCH >= ARMv6 */ - -extern void __bad_cmpxchg(volatile void *ptr, int size); - -/* - * cmpxchg only support 32-bits operands on ARMv6. - */ - -static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, - unsigned long new, int size) -{ - unsigned long oldval, res; - - switch (size) { -#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ - case 1: - do { - asm volatile("@ __cmpxchg1\n" - " ldrexb %1, [%2]\n" - " mov %0, #0\n" - " teq %1, %3\n" - " strexbeq %0, %4, [%2]\n" - : "=&r" (res), "=&r" (oldval) - : "r" (ptr), "Ir" (old), "r" (new) - : "memory", "cc"); - } while (res); - break; - case 2: - do { - asm volatile("@ __cmpxchg1\n" - " ldrexh %1, [%2]\n" - " mov %0, #0\n" - " teq %1, %3\n" - " strexheq %0, %4, [%2]\n" - : "=&r" (res), "=&r" (oldval) - : "r" (ptr), "Ir" (old), "r" (new) - : "memory", "cc"); - } while (res); - break; -#endif - case 4: - do { - asm volatile("@ __cmpxchg4\n" - " ldrex %1, [%2]\n" - " mov %0, #0\n" - " teq %1, %3\n" - " strexeq %0, %4, [%2]\n" - : "=&r" (res), "=&r" (oldval) - : "r" (ptr), "Ir" (old), "r" (new) - : "memory", "cc"); - } while (res); - break; - default: - __bad_cmpxchg(ptr, size); - oldval = 0; - } - - return oldval; -} - -static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, - unsigned long new, int size) -{ - unsigned long ret; - - smp_mb(); - ret = __cmpxchg(ptr, old, new, size); - smp_mb(); - - return ret; -} - -#define cmpxchg(ptr,o,n) \ - ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \ - (unsigned long)(o), \ - (unsigned long)(n), \ - sizeof(*(ptr)))) - -static inline unsigned long __cmpxchg_local(volatile void *ptr, - unsigned long old, - unsigned long new, int size) -{ - unsigned long ret; - - switch (size) { -#ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */ - case 1: - case 2: - ret = __cmpxchg_local_generic(ptr, old, new, size); - break; -#endif - default: - ret = __cmpxchg(ptr, old, new, size); - } - - return ret; -} - -#define cmpxchg_local(ptr,o,n) \ - ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \ - (unsigned long)(o), \ - (unsigned long)(n), \ - sizeof(*(ptr)))) - -#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ - -/* - * Note : ARMv7-M (currently unsupported by Linux) does not support - * ldrexd/strexd. If ARMv7-M is ever supported by the Linux kernel, it should - * not be allowed to use __cmpxchg64. - */ -static inline unsigned long long __cmpxchg64(volatile void *ptr, - unsigned long long old, - unsigned long long new) -{ - register unsigned long long oldval asm("r0"); - register unsigned long long __old asm("r2") = old; - register unsigned long long __new asm("r4") = new; - unsigned long res; - - do { - asm volatile( - " @ __cmpxchg8\n" - " ldrexd %1, %H1, [%2]\n" - " mov %0, #0\n" - " teq %1, %3\n" - " teqeq %H1, %H3\n" - " strexdeq %0, %4, %H4, [%2]\n" - : "=&r" (res), "=&r" (oldval) - : "r" (ptr), "Ir" (__old), "r" (__new) - : "memory", "cc"); - } while (res); - - return oldval; -} - -static inline unsigned long long __cmpxchg64_mb(volatile void *ptr, - unsigned long long old, - unsigned long long new) -{ - unsigned long long ret; - - smp_mb(); - ret = __cmpxchg64(ptr, old, new); - smp_mb(); - - return ret; -} - -#define cmpxchg64(ptr,o,n) \ - ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \ - (unsigned long long)(o), \ - (unsigned long long)(n))) - -#define cmpxchg64_local(ptr,o,n) \ - ((__typeof__(*(ptr)))__cmpxchg64((ptr), \ - (unsigned long long)(o), \ - (unsigned long long)(n))) - -#else /* min ARCH = ARMv6 */ - -#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) - -#endif - -#endif /* __LINUX_ARM_ARCH__ >= 6 */ - -#endif /* __ASSEMBLY__ */ - -#define arch_align_stack(x) (x) - -#endif /* __KERNEL__ */ - -#endif +/* FILE TO BE DELETED. DO NOT ADD STUFF HERE! */ +#include <asm/barrier.h> +#include <asm/compiler.h> +#include <asm/cmpxchg.h> +#include <asm/exec.h> +#include <asm/switch_to.h> +#include <asm/system_info.h> +#include <asm/system_misc.h> diff --git a/arch/arm/include/asm/system_info.h b/arch/arm/include/asm/system_info.h new file mode 100644 index 00000000000..dfd386d0c02 --- /dev/null +++ b/arch/arm/include/asm/system_info.h @@ -0,0 +1,27 @@ +#ifndef __ASM_ARM_SYSTEM_INFO_H +#define __ASM_ARM_SYSTEM_INFO_H + +#define CPU_ARCH_UNKNOWN 0 +#define CPU_ARCH_ARMv3 1 +#define CPU_ARCH_ARMv4 2 +#define CPU_ARCH_ARMv4T 3 +#define CPU_ARCH_ARMv5 4 +#define CPU_ARCH_ARMv5T 5 +#define CPU_ARCH_ARMv5TE 6 +#define CPU_ARCH_ARMv5TEJ 7 +#define CPU_ARCH_ARMv6 8 +#define CPU_ARCH_ARMv7 9 + +#ifndef __ASSEMBLY__ + +/* information about the system we're running on */ +extern unsigned int system_rev; +extern unsigned int system_serial_low; +extern unsigned int system_serial_high; +extern unsigned int mem_fclk_21285; + +extern int __pure cpu_architecture(void); + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_ARM_SYSTEM_INFO_H */ diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h new file mode 100644 index 00000000000..5a85f148b60 --- /dev/null +++ b/arch/arm/include/asm/system_misc.h @@ -0,0 +1,29 @@ +#ifndef __ASM_ARM_SYSTEM_MISC_H +#define __ASM_ARM_SYSTEM_MISC_H + +#ifndef __ASSEMBLY__ + +#include <linux/compiler.h> +#include <linux/linkage.h> +#include <linux/irqflags.h> + +extern void cpu_init(void); + +void soft_restart(unsigned long); +extern void (*arm_pm_restart)(char str, const char *cmd); +extern void (*arm_pm_idle)(void); + +#define UDBG_UNDEFINED (1 << 0) +#define UDBG_SYSCALL (1 << 1) +#define UDBG_BADABORT (1 << 2) +#define UDBG_SEGV (1 << 3) +#define UDBG_BUS (1 << 4) + +extern unsigned int user_debug; + +extern void disable_hlt(void); +extern void enable_hlt(void); + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_ARM_SYSTEM_MISC_H */ diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 7b5cc8dae06..af7b0bda335 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -118,6 +118,13 @@ extern void iwmmxt_task_switch(struct thread_info *); extern void vfp_sync_hwstate(struct thread_info *); extern void vfp_flush_hwstate(struct thread_info *); +struct user_vfp; +struct user_vfp_exc; + +extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *, + struct user_vfp_exc __user *); +extern int vfp_restore_user_hwstate(struct user_vfp __user *, + struct user_vfp_exc __user *); #endif /* @@ -129,6 +136,7 @@ extern void vfp_flush_hwstate(struct thread_info *); /* * thread information flags: * TIF_SYSCALL_TRACE - syscall trace active + * TIF_SYSCAL_AUDIT - syscall auditing active * TIF_SIGPENDING - signal pending * TIF_NEED_RESCHED - rescheduling necessary * TIF_NOTIFY_RESUME - callback before returning to user @@ -139,27 +147,30 @@ extern void vfp_flush_hwstate(struct thread_info *); #define TIF_NEED_RESCHED 1 #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ #define TIF_SYSCALL_TRACE 8 +#define TIF_SYSCALL_AUDIT 9 #define TIF_POLLING_NRFLAG 16 #define TIF_USING_IWMMXT 17 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ -#define TIF_FREEZE 19 #define TIF_RESTORE_SIGMASK 20 #define TIF_SECCOMP 21 +#define TIF_SWITCH_MM 22 /* deferred switch_mm */ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) +#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) -#define _TIF_FREEZE (1 << TIF_FREEZE) -#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) #define _TIF_SECCOMP (1 << TIF_SECCOMP) +/* Checks for any syscall work in entry-common.S */ +#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT) + /* * Change these and you break ASM code in entry-common.S */ -#define _TIF_WORK_MASK 0x000000ff +#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | _TIF_NOTIFY_RESUME) #endif /* __KERNEL__ */ #endif /* __ASM_ARM_THREAD_INFO_H */ diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index 265f908c4a6..314d4664eae 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -198,12 +198,30 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr) { pgtable_page_dtor(pte); - tlb_add_flush(tlb, addr); + + /* + * With the classic ARM MMU, a pte page has two corresponding pmd + * entries, each covering 1MB. + */ + addr &= PMD_MASK; + tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE); + tlb_add_flush(tlb, addr + SZ_1M); + tlb_remove_page(tlb, pte); } +static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, + unsigned long addr) +{ +#ifdef CONFIG_ARM_LPAE + tlb_add_flush(tlb, addr); + tlb_remove_page(tlb, virt_to_page(pmdp)); +#endif +} + #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr) -#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) +#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr) +#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp) #define tlb_migrate_finish(mm) do { } while (0) diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index 02b2f820398..6e924d3a77e 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -65,21 +65,6 @@ #define MULTI_TLB 1 #endif -#define v3_tlb_flags (TLB_V3_FULL | TLB_V3_PAGE) - -#ifdef CONFIG_CPU_TLB_V3 -# define v3_possible_flags v3_tlb_flags -# define v3_always_flags v3_tlb_flags -# ifdef _TLB -# define MULTI_TLB 1 -# else -# define _TLB v3 -# endif -#else -# define v3_possible_flags 0 -# define v3_always_flags (-1UL) -#endif - #define v4_tlb_flags (TLB_V4_U_FULL | TLB_V4_U_PAGE) #ifdef CONFIG_CPU_TLB_V4WT @@ -298,8 +283,7 @@ extern struct cpu_tlb_fns cpu_tlb; * implemented the "%?" method, but this has been discontinued due to too * many people getting it wrong. */ -#define possible_tlb_flags (v3_possible_flags | \ - v4_possible_flags | \ +#define possible_tlb_flags (v4_possible_flags | \ v4wbi_possible_flags | \ fr_possible_flags | \ v4wb_possible_flags | \ @@ -307,8 +291,7 @@ extern struct cpu_tlb_fns cpu_tlb; v6wbi_possible_flags | \ v7wbi_possible_flags) -#define always_tlb_flags (v3_always_flags & \ - v4_always_flags & \ +#define always_tlb_flags (v4_always_flags & \ v4wbi_always_flags & \ fr_always_flags & \ v4wb_always_flags & \ @@ -318,6 +301,21 @@ extern struct cpu_tlb_fns cpu_tlb; #define tlb_flag(f) ((always_tlb_flags & (f)) || (__tlb_flag & possible_tlb_flags & (f))) +#define __tlb_op(f, insnarg, arg) \ + do { \ + if (always_tlb_flags & (f)) \ + asm("mcr " insnarg \ + : : "r" (arg) : "cc"); \ + else if (possible_tlb_flags & (f)) \ + asm("tst %1, %2\n\t" \ + "mcrne " insnarg \ + : : "r" (arg), "r" (__tlb_flag), "Ir" (f) \ + : "cc"); \ + } while (0) + +#define tlb_op(f, regs, arg) __tlb_op(f, "p15, 0, %0, " regs, arg) +#define tlb_l2_op(f, regs, arg) __tlb_op(f, "p15, 1, %0, " regs, arg) + static inline void local_flush_tlb_all(void) { const int zero = 0; @@ -326,16 +324,11 @@ static inline void local_flush_tlb_all(void) if (tlb_flag(TLB_WB)) dsb(); - if (tlb_flag(TLB_V3_FULL)) - asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc"); - if (tlb_flag(TLB_V4_U_FULL | TLB_V6_U_FULL)) - asm("mcr p15, 0, %0, c8, c7, 0" : : "r" (zero) : "cc"); - if (tlb_flag(TLB_V4_D_FULL | TLB_V6_D_FULL)) - asm("mcr p15, 0, %0, c8, c6, 0" : : "r" (zero) : "cc"); - if (tlb_flag(TLB_V4_I_FULL | TLB_V6_I_FULL)) - asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); - if (tlb_flag(TLB_V7_UIS_FULL)) - asm("mcr p15, 0, %0, c8, c3, 0" : : "r" (zero) : "cc"); + tlb_op(TLB_V3_FULL, "c6, c0, 0", zero); + tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero); + tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero); + tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero); + tlb_op(TLB_V7_UIS_FULL, "c8, c3, 0", zero); if (tlb_flag(TLB_BARRIER)) { dsb(); @@ -352,29 +345,23 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm) if (tlb_flag(TLB_WB)) dsb(); - if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) { - if (tlb_flag(TLB_V3_FULL)) - asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc"); - if (tlb_flag(TLB_V4_U_FULL)) - asm("mcr p15, 0, %0, c8, c7, 0" : : "r" (zero) : "cc"); - if (tlb_flag(TLB_V4_D_FULL)) - asm("mcr p15, 0, %0, c8, c6, 0" : : "r" (zero) : "cc"); - if (tlb_flag(TLB_V4_I_FULL)) - asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); + if (possible_tlb_flags & (TLB_V3_FULL|TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) { + if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) { + tlb_op(TLB_V3_FULL, "c6, c0, 0", zero); + tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero); + tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero); + tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero); + } + put_cpu(); } - put_cpu(); - - if (tlb_flag(TLB_V6_U_ASID)) - asm("mcr p15, 0, %0, c8, c7, 2" : : "r" (asid) : "cc"); - if (tlb_flag(TLB_V6_D_ASID)) - asm("mcr p15, 0, %0, c8, c6, 2" : : "r" (asid) : "cc"); - if (tlb_flag(TLB_V6_I_ASID)) - asm("mcr p15, 0, %0, c8, c5, 2" : : "r" (asid) : "cc"); - if (tlb_flag(TLB_V7_UIS_ASID)) + + tlb_op(TLB_V6_U_ASID, "c8, c7, 2", asid); + tlb_op(TLB_V6_D_ASID, "c8, c6, 2", asid); + tlb_op(TLB_V6_I_ASID, "c8, c5, 2", asid); #ifdef CONFIG_ARM_ERRATA_720789 - asm("mcr p15, 0, %0, c8, c3, 0" : : "r" (zero) : "cc"); + tlb_op(TLB_V7_UIS_ASID, "c8, c3, 0", zero); #else - asm("mcr p15, 0, %0, c8, c3, 2" : : "r" (asid) : "cc"); + tlb_op(TLB_V7_UIS_ASID, "c8, c3, 2", asid); #endif if (tlb_flag(TLB_BARRIER)) @@ -392,30 +379,23 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) if (tlb_flag(TLB_WB)) dsb(); - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { - if (tlb_flag(TLB_V3_PAGE)) - asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (uaddr) : "cc"); - if (tlb_flag(TLB_V4_U_PAGE)) - asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (uaddr) : "cc"); - if (tlb_flag(TLB_V4_D_PAGE)) - asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (uaddr) : "cc"); - if (tlb_flag(TLB_V4_I_PAGE)) - asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc"); + if (possible_tlb_flags & (TLB_V3_PAGE|TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) && + cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { + tlb_op(TLB_V3_PAGE, "c6, c0, 0", uaddr); + tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr); + tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", uaddr); + tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", uaddr); if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL)) asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); } - if (tlb_flag(TLB_V6_U_PAGE)) - asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (uaddr) : "cc"); - if (tlb_flag(TLB_V6_D_PAGE)) - asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (uaddr) : "cc"); - if (tlb_flag(TLB_V6_I_PAGE)) - asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc"); - if (tlb_flag(TLB_V7_UIS_PAGE)) + tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", uaddr); + tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", uaddr); + tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", uaddr); #ifdef CONFIG_ARM_ERRATA_720789 - asm("mcr p15, 0, %0, c8, c3, 3" : : "r" (uaddr & PAGE_MASK) : "cc"); + tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 3", uaddr & PAGE_MASK); #else - asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (uaddr) : "cc"); + tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", uaddr); #endif if (tlb_flag(TLB_BARRIER)) @@ -432,25 +412,17 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr) if (tlb_flag(TLB_WB)) dsb(); - if (tlb_flag(TLB_V3_PAGE)) - asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (kaddr) : "cc"); - if (tlb_flag(TLB_V4_U_PAGE)) - asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (kaddr) : "cc"); - if (tlb_flag(TLB_V4_D_PAGE)) - asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (kaddr) : "cc"); - if (tlb_flag(TLB_V4_I_PAGE)) - asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (kaddr) : "cc"); + tlb_op(TLB_V3_PAGE, "c6, c0, 0", kaddr); + tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr); + tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr); + tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr); if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL)) asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); - if (tlb_flag(TLB_V6_U_PAGE)) - asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (kaddr) : "cc"); - if (tlb_flag(TLB_V6_D_PAGE)) - asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (kaddr) : "cc"); - if (tlb_flag(TLB_V6_I_PAGE)) - asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (kaddr) : "cc"); - if (tlb_flag(TLB_V7_UIS_PAGE)) - asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (kaddr) : "cc"); + tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", kaddr); + tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", kaddr); + tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", kaddr); + tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", kaddr); if (tlb_flag(TLB_BARRIER)) { dsb(); @@ -475,13 +447,8 @@ static inline void flush_pmd_entry(void *pmd) { const unsigned int __tlb_flag = __cpu_tlb_flags; - if (tlb_flag(TLB_DCLEAN)) - asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pmd" - : : "r" (pmd) : "cc"); - - if (tlb_flag(TLB_L2CLEAN_FR)) - asm("mcr p15, 1, %0, c15, c9, 1 @ L2 flush_pmd" - : : "r" (pmd) : "cc"); + tlb_op(TLB_DCLEAN, "c7, c10, 1 @ flush_pmd", pmd); + tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1 @ L2 flush_pmd", pmd); if (tlb_flag(TLB_WB)) dsb(); @@ -491,15 +458,11 @@ static inline void clean_pmd_entry(void *pmd) { const unsigned int __tlb_flag = __cpu_tlb_flags; - if (tlb_flag(TLB_DCLEAN)) - asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pmd" - : : "r" (pmd) : "cc"); - - if (tlb_flag(TLB_L2CLEAN_FR)) - asm("mcr p15, 1, %0, c15, c9, 1 @ L2 flush_pmd" - : : "r" (pmd) : "cc"); + tlb_op(TLB_DCLEAN, "c7, c10, 1 @ flush_pmd", pmd); + tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1 @ L2 flush_pmd", pmd); } +#undef tlb_op #undef tlb_flag #undef always_tlb_flags #undef possible_tlb_flags diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h index 60843eb0f61..73409e6c025 100644 --- a/arch/arm/include/asm/tls.h +++ b/arch/arm/include/asm/tls.h @@ -7,6 +7,8 @@ .macro set_tls_v6k, tp, tmp1, tmp2 mcr p15, 0, \tp, c13, c0, 3 @ set TLS register + mov \tmp1, #0 + mcr p15, 0, \tmp1, c13, c0, 2 @ clear user r/w TLS register .endm .macro set_tls_v6, tp, tmp1, tmp2 @@ -15,6 +17,8 @@ mov \tmp2, #0xffff0fff tst \tmp1, #HWCAP_TLS @ hardware TLS available? mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register + movne \tmp1, #0 + mcrne p15, 0, \tmp1, c13, c0, 2 @ clear user r/w TLS register streq \tp, [\tmp2, #-15] @ set TLS value at 0xffff0ff0 .endm diff --git a/arch/arm/include/asm/traps.h b/arch/arm/include/asm/traps.h index 5b29a667362..f555bb3664d 100644 --- a/arch/arm/include/asm/traps.h +++ b/arch/arm/include/asm/traps.h @@ -46,7 +46,7 @@ static inline int in_exception_text(unsigned long ptr) return in ? : __in_irqentry_text(ptr); } -extern void __init early_trap_init(void); +extern void __init early_trap_init(void *); extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame); extern void ptrace_break(struct task_struct *tsk, struct pt_regs *regs); diff --git a/arch/arm/include/asm/types.h b/arch/arm/include/asm/types.h index 48192ac3a23..28beab917ff 100644 --- a/arch/arm/include/asm/types.h +++ b/arch/arm/include/asm/types.h @@ -3,12 +3,6 @@ #include <asm-generic/int-ll64.h> -#ifndef __ASSEMBLY__ - -typedef unsigned short umode_t; - -#endif /* __ASSEMBLY__ */ - /* * These aren't exported outside the kernel to avoid name space clashes */ diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index b293616a1a1..71f6536d17a 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -16,8 +16,8 @@ #include <asm/errno.h> #include <asm/memory.h> #include <asm/domain.h> -#include <asm/system.h> #include <asm/unified.h> +#include <asm/compiler.h> #define VERIFY_READ 0 #define VERIFY_WRITE 1 @@ -227,7 +227,7 @@ do { \ #define __get_user_asm_byte(x,addr,err) \ __asm__ __volatile__( \ - "1: " T(ldrb) " %1,[%2],#0\n" \ + "1: " TUSER(ldrb) " %1,[%2],#0\n" \ "2:\n" \ " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ @@ -263,7 +263,7 @@ do { \ #define __get_user_asm_word(x,addr,err) \ __asm__ __volatile__( \ - "1: " T(ldr) " %1,[%2],#0\n" \ + "1: " TUSER(ldr) " %1,[%2],#0\n" \ "2:\n" \ " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ @@ -308,7 +308,7 @@ do { \ #define __put_user_asm_byte(x,__pu_addr,err) \ __asm__ __volatile__( \ - "1: " T(strb) " %1,[%2],#0\n" \ + "1: " TUSER(strb) " %1,[%2],#0\n" \ "2:\n" \ " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ @@ -341,7 +341,7 @@ do { \ #define __put_user_asm_word(x,__pu_addr,err) \ __asm__ __volatile__( \ - "1: " T(str) " %1,[%2],#0\n" \ + "1: " TUSER(str) " %1,[%2],#0\n" \ "2:\n" \ " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ @@ -366,10 +366,10 @@ do { \ #define __put_user_asm_dword(x,__pu_addr,err) \ __asm__ __volatile__( \ - ARM( "1: " T(str) " " __reg_oper1 ", [%1], #4\n" ) \ - ARM( "2: " T(str) " " __reg_oper0 ", [%1]\n" ) \ - THUMB( "1: " T(str) " " __reg_oper1 ", [%1]\n" ) \ - THUMB( "2: " T(str) " " __reg_oper0 ", [%1, #4]\n" ) \ + ARM( "1: " TUSER(str) " " __reg_oper1 ", [%1], #4\n" ) \ + ARM( "2: " TUSER(str) " " __reg_oper0 ", [%1]\n" ) \ + THUMB( "1: " TUSER(str) " " __reg_oper1 ", [%1]\n" ) \ + THUMB( "2: " TUSER(str) " " __reg_oper0 ", [%1, #4]\n" ) \ "3:\n" \ " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ diff --git a/arch/arm/include/asm/unified.h b/arch/arm/include/asm/unified.h index bc631161e9c..f5989f46b4d 100644 --- a/arch/arm/include/asm/unified.h +++ b/arch/arm/include/asm/unified.h @@ -37,8 +37,8 @@ #define THUMB(x...) x #ifdef __ASSEMBLY__ #define W(instr) instr.w -#endif #define BSYM(sym) sym + 1 +#endif #else /* !CONFIG_THUMB2_KERNEL */ @@ -49,8 +49,8 @@ #define THUMB(x...) #ifdef __ASSEMBLY__ #define W(instr) instr -#endif #define BSYM(sym) sym +#endif #endif /* CONFIG_THUMB2_KERNEL */ diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index 4a112378380..512cd147345 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h @@ -427,7 +427,8 @@ /* * The following syscalls are obsolete and no longer available for EABI. */ -#if defined(__ARM_EABI__) && !defined(__KERNEL__) +#if !defined(__KERNEL__) +#if defined(__ARM_EABI__) #undef __NR_time #undef __NR_umount #undef __NR_stime @@ -441,6 +442,7 @@ #undef __NR_syscall #undef __NR_ipc #endif +#endif #ifdef __KERNEL__ |