From 313819d4c25cdd46b42e85d70870b713bb9189ab Mon Sep 17 00:00:00 2001 From: Paul Bolle Date: Tue, 12 Jun 2012 12:04:29 +0200 Subject: ARM: remove linkup-l1110.h No file includes this header. Nothing seems interested in the things this header provides. It appears that it has never been included since at least v2.6.12-rc2. It can safely be removed. Signed-off-by: Paul Bolle Acked-by: Dmitry Artamonow Signed-off-by: Russell King --- arch/arm/include/asm/hardware/linkup-l1110.h | 48 ---------------------------- 1 file changed, 48 deletions(-) delete mode 100644 arch/arm/include/asm/hardware/linkup-l1110.h (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/hardware/linkup-l1110.h b/arch/arm/include/asm/hardware/linkup-l1110.h deleted file mode 100644 index 7ec91168a57..00000000000 --- a/arch/arm/include/asm/hardware/linkup-l1110.h +++ /dev/null @@ -1,48 +0,0 @@ -/* -* -* Definitions for H3600 Handheld Computer -* -* Copyright 2001 Compaq Computer Corporation. -* -* Use consistent with the GNU GPL is permitted, -* provided that this copyright notice is -* preserved in its entirety in all copies and derived works. -* -* COMPAQ COMPUTER CORPORATION MAKES NO WARRANTIES, EXPRESSED OR IMPLIED, -* AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS -* FITNESS FOR ANY PARTICULAR PURPOSE. -* -* Author: Jamey Hicks. -* -*/ - -/* LinkUp Systems PCCard/CompactFlash Interface for SA-1100 */ - -/* PC Card Status Register */ -#define LINKUP_PRS_S1 (1 << 0) /* voltage control bits S1-S4 */ -#define LINKUP_PRS_S2 (1 << 1) -#define LINKUP_PRS_S3 (1 << 2) -#define LINKUP_PRS_S4 (1 << 3) -#define LINKUP_PRS_BVD1 (1 << 4) -#define LINKUP_PRS_BVD2 (1 << 5) -#define LINKUP_PRS_VS1 (1 << 6) -#define LINKUP_PRS_VS2 (1 << 7) -#define LINKUP_PRS_RDY (1 << 8) -#define LINKUP_PRS_CD1 (1 << 9) -#define LINKUP_PRS_CD2 (1 << 10) - -/* PC Card Command Register */ -#define LINKUP_PRC_S1 (1 << 0) -#define LINKUP_PRC_S2 (1 << 1) -#define LINKUP_PRC_S3 (1 << 2) -#define LINKUP_PRC_S4 (1 << 3) -#define LINKUP_PRC_RESET (1 << 4) -#define LINKUP_PRC_APOE (1 << 5) /* Auto Power Off Enable: clears S1-S4 when either nCD goes high */ -#define LINKUP_PRC_CFE (1 << 6) /* CompactFlash mode Enable: addresses A[10:0] only, A[25:11] high */ -#define LINKUP_PRC_SOE (1 << 7) /* signal output driver enable */ -#define LINKUP_PRC_SSP (1 << 8) /* sock select polarity: 0 for socket 0, 1 for socket 1 */ -#define LINKUP_PRC_MBZ (1 << 15) /* must be zero */ - -struct linkup_l1110 { - volatile short prc; -}; -- cgit v1.2.3 From 158e8bfe802f730f9ea7cde32eee8b43285bdd4a Mon Sep 17 00:00:00 2001 From: Alessandro Rubini Date: Sun, 24 Jun 2012 12:46:26 +0100 Subject: ARM: 7432/1: use the new linux/sizes.h Signed-off-by: Alessandro Rubini Acked-by: Giancarlo Asnaghi Acked-by: Linus Walleij Cc: Alan Cox Signed-off-by: Russell King --- arch/arm/include/asm/memory.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index fcb575747e5..e965f1b560f 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -16,7 +16,7 @@ #include #include #include -#include +#include #ifdef CONFIG_NEED_MACH_MEMORY_H #include -- cgit v1.2.3 From bc89663aa5c7ca620f58c34ab531ca409119becc Mon Sep 17 00:00:00 2001 From: Shawn Guo Date: Thu, 28 Jun 2012 14:42:08 +0800 Subject: ARM: fiq: change FIQ_START to a variable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The commit a2be01b (ARM: only include mach/irqs.h for !SPARSE_IRQ) makes mach/irqs.h only be included for !SPARSE_IRQ build. There are a nubmer of platforms have FIQ_START defined in mach/irqs.h for FIQ support. arch/arm/mach-rpc/include/mach/irqs.h:#define FIQ_START 64 arch/arm/mach-s3c24xx/include/mach/irqs.h:#define FIQ_START IRQ_EINT0 arch/arm/plat-mxc/include/mach/irqs.h:#define FIQ_START 0 If SPARSE_IRQ is enabled for any of these platforms, the following compile error will be seen. arch/arm/kernel/fiq.c: In function ‘enable_fiq’: arch/arm/kernel/fiq.c:127:19: error: ‘FIQ_START’ undeclared (first use in this function) arch/arm/kernel/fiq.c:127:19: note: each undeclared identifier is reported only once for each function it appears in arch/arm/kernel/fiq.c: In function ‘disable_fiq’: arch/arm/kernel/fiq.c:132:20: error: ‘FIQ_START’ undeclared (first use in this function) The patch changes fiq code to have init_FIQ take FIQ_START from platforms as a parameter and assign it to variable fiq_start which is to replace FIQ_START uses in enable_fiq/disable_fiq. Signed-off-by: Shawn Guo Cc: Kukjin Kim Cc: Sascha Hauer Cc: Rob Herring Acked-by: Russell King --- arch/arm/include/asm/mach/irq.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h index febe495d0c6..15cb035309f 100644 --- a/arch/arm/include/asm/mach/irq.h +++ b/arch/arm/include/asm/mach/irq.h @@ -17,7 +17,7 @@ struct seq_file; /* * This is internal. Do not use it. */ -extern void init_FIQ(void); +extern void init_FIQ(int); extern int show_fiq_list(struct seq_file *, int); #ifdef CONFIG_MULTI_IRQ_HANDLER -- cgit v1.2.3 From a5203c4ce6750730b1d95a8bc1e8214765450f7e Mon Sep 17 00:00:00 2001 From: Paul Bolle Date: Sun, 8 Jul 2012 22:51:44 +0100 Subject: ARM: 7460/1: remove asm/locks.h Commit 64ac24e738823161693bf791f87adc802cf529ff ("Generic semaphore implementation") removed the last include of this header. Apparently it was just an oversight to keep this header. It can safely be removed now. Acked-by: Will Deacon Signed-off-by: Paul Bolle Signed-off-by: Russell King --- arch/arm/include/asm/locks.h | 274 ------------------------------------------- 1 file changed, 274 deletions(-) delete mode 100644 arch/arm/include/asm/locks.h (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/locks.h b/arch/arm/include/asm/locks.h deleted file mode 100644 index ef4c897772d..00000000000 --- a/arch/arm/include/asm/locks.h +++ /dev/null @@ -1,274 +0,0 @@ -/* - * arch/arm/include/asm/locks.h - * - * Copyright (C) 2000 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Interrupt safe locking assembler. - */ -#ifndef __ASM_PROC_LOCKS_H -#define __ASM_PROC_LOCKS_H - -#if __LINUX_ARM_ARCH__ >= 6 - -#define __down_op(ptr,fail) \ - ({ \ - __asm__ __volatile__( \ - "@ down_op\n" \ -"1: ldrex lr, [%0]\n" \ -" sub lr, lr, %1\n" \ -" strex ip, lr, [%0]\n" \ -" teq ip, #0\n" \ -" bne 1b\n" \ -" teq lr, #0\n" \ -" movmi ip, %0\n" \ -" blmi " #fail \ - : \ - : "r" (ptr), "I" (1) \ - : "ip", "lr", "cc"); \ - smp_mb(); \ - }) - -#define __down_op_ret(ptr,fail) \ - ({ \ - unsigned int ret; \ - __asm__ __volatile__( \ - "@ down_op_ret\n" \ -"1: ldrex lr, [%1]\n" \ -" sub lr, lr, %2\n" \ -" strex ip, lr, [%1]\n" \ -" teq ip, #0\n" \ -" bne 1b\n" \ -" teq lr, #0\n" \ -" movmi ip, %1\n" \ -" movpl ip, #0\n" \ -" blmi " #fail "\n" \ -" mov %0, ip" \ - : "=&r" (ret) \ - : "r" (ptr), "I" (1) \ - : "ip", "lr", "cc"); \ - smp_mb(); \ - ret; \ - }) - -#define __up_op(ptr,wake) \ - ({ \ - smp_mb(); \ - __asm__ __volatile__( \ - "@ up_op\n" \ -"1: ldrex lr, [%0]\n" \ -" add lr, lr, %1\n" \ -" strex ip, lr, [%0]\n" \ -" teq ip, #0\n" \ -" bne 1b\n" \ -" cmp lr, #0\n" \ -" movle ip, %0\n" \ -" blle " #wake \ - : \ - : "r" (ptr), "I" (1) \ - : "ip", "lr", "cc"); \ - }) - -/* - * The value 0x01000000 supports up to 128 processors and - * lots of processes. BIAS must be chosen such that sub'ing - * BIAS once per CPU will result in the long remaining - * negative. - */ -#define RW_LOCK_BIAS 0x01000000 -#define RW_LOCK_BIAS_STR "0x01000000" - -#define __down_op_write(ptr,fail) \ - ({ \ - __asm__ __volatile__( \ - "@ down_op_write\n" \ -"1: ldrex lr, [%0]\n" \ -" sub lr, lr, %1\n" \ -" strex ip, lr, [%0]\n" \ -" teq ip, #0\n" \ -" bne 1b\n" \ -" teq lr, #0\n" \ -" movne ip, %0\n" \ -" blne " #fail \ - : \ - : "r" (ptr), "I" (RW_LOCK_BIAS) \ - : "ip", "lr", "cc"); \ - smp_mb(); \ - }) - -#define __up_op_write(ptr,wake) \ - ({ \ - smp_mb(); \ - __asm__ __volatile__( \ - "@ up_op_write\n" \ -"1: ldrex lr, [%0]\n" \ -" adds lr, lr, %1\n" \ -" strex ip, lr, [%0]\n" \ -" teq ip, #0\n" \ -" bne 1b\n" \ -" movcs ip, %0\n" \ -" blcs " #wake \ - : \ - : "r" (ptr), "I" (RW_LOCK_BIAS) \ - : "ip", "lr", "cc"); \ - }) - -#define __down_op_read(ptr,fail) \ - __down_op(ptr, fail) - -#define __up_op_read(ptr,wake) \ - ({ \ - smp_mb(); \ - __asm__ __volatile__( \ - "@ up_op_read\n" \ -"1: ldrex lr, [%0]\n" \ -" add lr, lr, %1\n" \ -" strex ip, lr, [%0]\n" \ -" teq ip, #0\n" \ -" bne 1b\n" \ -" teq lr, #0\n" \ -" moveq ip, %0\n" \ -" bleq " #wake \ - : \ - : "r" (ptr), "I" (1) \ - : "ip", "lr", "cc"); \ - }) - -#else - -#define __down_op(ptr,fail) \ - ({ \ - __asm__ __volatile__( \ - "@ down_op\n" \ -" mrs ip, cpsr\n" \ -" orr lr, ip, #128\n" \ -" msr cpsr_c, lr\n" \ -" ldr lr, [%0]\n" \ -" subs lr, lr, %1\n" \ -" str lr, [%0]\n" \ -" msr cpsr_c, ip\n" \ -" movmi ip, %0\n" \ -" blmi " #fail \ - : \ - : "r" (ptr), "I" (1) \ - : "ip", "lr", "cc"); \ - smp_mb(); \ - }) - -#define __down_op_ret(ptr,fail) \ - ({ \ - unsigned int ret; \ - __asm__ __volatile__( \ - "@ down_op_ret\n" \ -" mrs ip, cpsr\n" \ -" orr lr, ip, #128\n" \ -" msr cpsr_c, lr\n" \ -" ldr lr, [%1]\n" \ -" subs lr, lr, %2\n" \ -" str lr, [%1]\n" \ -" msr cpsr_c, ip\n" \ -" movmi ip, %1\n" \ -" movpl ip, #0\n" \ -" blmi " #fail "\n" \ -" mov %0, ip" \ - : "=&r" (ret) \ - : "r" (ptr), "I" (1) \ - : "ip", "lr", "cc"); \ - smp_mb(); \ - ret; \ - }) - -#define __up_op(ptr,wake) \ - ({ \ - smp_mb(); \ - __asm__ __volatile__( \ - "@ up_op\n" \ -" mrs ip, cpsr\n" \ -" orr lr, ip, #128\n" \ -" msr cpsr_c, lr\n" \ -" ldr lr, [%0]\n" \ -" adds lr, lr, %1\n" \ -" str lr, [%0]\n" \ -" msr cpsr_c, ip\n" \ -" movle ip, %0\n" \ -" blle " #wake \ - : \ - : "r" (ptr), "I" (1) \ - : "ip", "lr", "cc"); \ - }) - -/* - * The value 0x01000000 supports up to 128 processors and - * lots of processes. BIAS must be chosen such that sub'ing - * BIAS once per CPU will result in the long remaining - * negative. - */ -#define RW_LOCK_BIAS 0x01000000 -#define RW_LOCK_BIAS_STR "0x01000000" - -#define __down_op_write(ptr,fail) \ - ({ \ - __asm__ __volatile__( \ - "@ down_op_write\n" \ -" mrs ip, cpsr\n" \ -" orr lr, ip, #128\n" \ -" msr cpsr_c, lr\n" \ -" ldr lr, [%0]\n" \ -" subs lr, lr, %1\n" \ -" str lr, [%0]\n" \ -" msr cpsr_c, ip\n" \ -" movne ip, %0\n" \ -" blne " #fail \ - : \ - : "r" (ptr), "I" (RW_LOCK_BIAS) \ - : "ip", "lr", "cc"); \ - smp_mb(); \ - }) - -#define __up_op_write(ptr,wake) \ - ({ \ - __asm__ __volatile__( \ - "@ up_op_write\n" \ -" mrs ip, cpsr\n" \ -" orr lr, ip, #128\n" \ -" msr cpsr_c, lr\n" \ -" ldr lr, [%0]\n" \ -" adds lr, lr, %1\n" \ -" str lr, [%0]\n" \ -" msr cpsr_c, ip\n" \ -" movcs ip, %0\n" \ -" blcs " #wake \ - : \ - : "r" (ptr), "I" (RW_LOCK_BIAS) \ - : "ip", "lr", "cc"); \ - smp_mb(); \ - }) - -#define __down_op_read(ptr,fail) \ - __down_op(ptr, fail) - -#define __up_op_read(ptr,wake) \ - ({ \ - smp_mb(); \ - __asm__ __volatile__( \ - "@ up_op_read\n" \ -" mrs ip, cpsr\n" \ -" orr lr, ip, #128\n" \ -" msr cpsr_c, lr\n" \ -" ldr lr, [%0]\n" \ -" adds lr, lr, %1\n" \ -" str lr, [%0]\n" \ -" msr cpsr_c, ip\n" \ -" moveq ip, %0\n" \ -" bleq " #wake \ - : \ - : "r" (ptr), "I" (1) \ - : "ip", "lr", "cc"); \ - }) - -#endif - -#endif -- cgit v1.2.3 From 546c2896a42202dbc7d02f7c6ec9948ac1bf511b Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 6 Jul 2012 15:43:41 +0100 Subject: ARM: 7446/1: spinlock: use ticket algorithm for ARMv6+ locking implementation Ticket spinlocks ensure locking fairness by introducing a FIFO-like nature to the granting of lock acquisitions and also reducing the thundering herd effect when spinning on a lock by allowing the cacheline to remain in a shared state amongst the waiting CPUs. This is especially important on systems where memory-access times are not necessarily uniform when accessing the lock structure (for example, on a multi-cluster platform where the lock is allocated into L1 when a CPU releases it). This patch implements the ticket spinlock algorithm for ARM, replacing the simpler implementation for ARMv6+ processors. Reviewed-by: Nicolas Pitre Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/spinlock.h | 72 +++++++++++++++++++++++------------ arch/arm/include/asm/spinlock_types.h | 17 ++++++++- 2 files changed, 63 insertions(+), 26 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index 65fa3c88095..0da2effd4b3 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -59,18 +59,13 @@ static inline void dsb_sev(void) } /* - * ARMv6 Spin-locking. + * ARMv6 ticket-based spin-locking. * - * We exclusively read the old value. If it is zero, we may have - * won the lock, so we try exclusively storing it. A memory barrier - * is required after we get a lock, and before we release it, because - * V6 CPUs are assumed to have weakly ordered memory. - * - * Unlocked value: 0 - * Locked value: 1 + * A memory barrier is required after we get a lock, and before we + * release it, because V6 CPUs are assumed to have weakly ordered + * memory. */ -#define arch_spin_is_locked(x) ((x)->lock != 0) #define arch_spin_unlock_wait(lock) \ do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) @@ -79,31 +74,39 @@ static inline void dsb_sev(void) static inline void arch_spin_lock(arch_spinlock_t *lock) { unsigned long tmp; + u32 newval; + arch_spinlock_t lockval; __asm__ __volatile__( -"1: ldrex %0, [%1]\n" -" teq %0, #0\n" - WFE("ne") -" strexeq %0, %2, [%1]\n" -" teqeq %0, #0\n" +"1: ldrex %0, [%3]\n" +" add %1, %0, %4\n" +" strex %2, %1, [%3]\n" +" teq %2, #0\n" " bne 1b" - : "=&r" (tmp) - : "r" (&lock->lock), "r" (1) + : "=&r" (lockval), "=&r" (newval), "=&r" (tmp) + : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) : "cc"); + while (lockval.tickets.next != lockval.tickets.owner) { + wfe(); + lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner); + } + smp_mb(); } static inline int arch_spin_trylock(arch_spinlock_t *lock) { unsigned long tmp; + u32 slock; __asm__ __volatile__( -" ldrex %0, [%1]\n" -" teq %0, #0\n" -" strexeq %0, %2, [%1]" - : "=&r" (tmp) - : "r" (&lock->lock), "r" (1) +" ldrex %0, [%2]\n" +" subs %1, %0, %0, ror #16\n" +" addeq %0, %0, %3\n" +" strexeq %1, %0, [%2]" + : "=&r" (slock), "=&r" (tmp) + : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) : "cc"); if (tmp == 0) { @@ -116,17 +119,38 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) static inline void arch_spin_unlock(arch_spinlock_t *lock) { + unsigned long tmp; + u32 slock; + smp_mb(); __asm__ __volatile__( -" str %1, [%0]\n" - : - : "r" (&lock->lock), "r" (0) +" mov %1, #1\n" +"1: ldrex %0, [%2]\n" +" uadd16 %0, %0, %1\n" +" strex %1, %0, [%2]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (slock), "=&r" (tmp) + : "r" (&lock->slock) : "cc"); dsb_sev(); } +static inline int arch_spin_is_locked(arch_spinlock_t *lock) +{ + struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets); + return tickets.owner != tickets.next; +} + +static inline int arch_spin_is_contended(arch_spinlock_t *lock) +{ + struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets); + return (tickets.next - tickets.owner) > 1; +} +#define arch_spin_is_contended arch_spin_is_contended + /* * RWLOCKS * diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h index d14d197ae04..b262d2f8b47 100644 --- a/arch/arm/include/asm/spinlock_types.h +++ b/arch/arm/include/asm/spinlock_types.h @@ -5,11 +5,24 @@ # error "please don't include this file directly" #endif +#define TICKET_SHIFT 16 + typedef struct { - volatile unsigned int lock; + union { + u32 slock; + struct __raw_tickets { +#ifdef __ARMEB__ + u16 next; + u16 owner; +#else + u16 owner; + u16 next; +#endif + } tickets; + }; } arch_spinlock_t; -#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } } typedef struct { volatile unsigned int lock; -- cgit v1.2.3 From 881ccccb6bd3f0e1fff8b9addbe0de90e0b16166 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 6 Jul 2012 15:44:13 +0100 Subject: ARM: 7447/1: rwlocks: remove unused branch labels from trylock routines The ARM arch_{read,write}_trylock implementations include unused backwards branch labels, since we don't retry the locking operation if the exclusive store fails. This patch removes the labels. Acked-by: Nicolas Pitre Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/spinlock.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index 0da2effd4b3..b4ca707d0a6 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -182,7 +182,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) unsigned long tmp; __asm__ __volatile__( -"1: ldrex %0, [%1]\n" +" ldrex %0, [%1]\n" " teq %0, #0\n" " strexeq %0, %2, [%1]" : "=&r" (tmp) @@ -268,7 +268,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) unsigned long tmp, tmp2 = 1; __asm__ __volatile__( -"1: ldrex %0, [%2]\n" +" ldrex %0, [%2]\n" " adds %0, %0, #1\n" " strexpl %1, %0, [%2]\n" : "=&r" (tmp), "+r" (tmp2) -- cgit v1.2.3 From 4295b898f5a5c7e62ae68e7a4ecc4b414622ffe6 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 6 Jul 2012 15:45:00 +0100 Subject: ARM: 7448/1: perf: remove arm_perf_pmu_ids global enumeration In order to provide PMU name strings compatible with the OProfile user ABI, an enumeration of all PMUs is currently used by perf to identify each PMU uniquely. Unfortunately, this does not scale well in the presence of multiple PMUs and creates a single, global namespace across all PMUs in the system. This patch removes the enumeration and instead uses the name string for the PMU to map onto the OProfile variant. perf_pmu_name is implemented for CPU PMUs, which is all that OProfile cares about anyway. Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/perf_event.h | 17 +---------------- arch/arm/include/asm/pmu.h | 3 +-- 2 files changed, 2 insertions(+), 18 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h index 00cbe10a50e..e074948d814 100644 --- a/arch/arm/include/asm/perf_event.h +++ b/arch/arm/include/asm/perf_event.h @@ -12,21 +12,6 @@ #ifndef __ARM_PERF_EVENT_H__ #define __ARM_PERF_EVENT_H__ -/* ARM perf PMU IDs for use by internal perf clients. */ -enum arm_perf_pmu_ids { - ARM_PERF_PMU_ID_XSCALE1 = 0, - ARM_PERF_PMU_ID_XSCALE2, - ARM_PERF_PMU_ID_V6, - ARM_PERF_PMU_ID_V6MP, - ARM_PERF_PMU_ID_CA8, - ARM_PERF_PMU_ID_CA9, - ARM_PERF_PMU_ID_CA5, - ARM_PERF_PMU_ID_CA15, - ARM_PERF_PMU_ID_CA7, - ARM_NUM_PMU_IDS, -}; - -extern enum arm_perf_pmu_ids -armpmu_get_pmu_id(void); +/* Nothing to see here... */ #endif /* __ARM_PERF_EVENT_H__ */ diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index 90114faa9f3..4432305f4a2 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h @@ -103,10 +103,9 @@ struct pmu_hw_events { struct arm_pmu { struct pmu pmu; - enum arm_perf_pmu_ids id; enum arm_pmu_type type; cpumask_t active_irqs; - const char *name; + char *name; irqreturn_t (*handle_irq)(int irq_num, void *dev); void (*enable)(struct hw_perf_event *evt, int idx); void (*disable)(struct hw_perf_event *evt, int idx); -- cgit v1.2.3 From 8c56cc8be5b38e3684eba96dc9b3f7ca7e495755 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 6 Jul 2012 15:45:39 +0100 Subject: ARM: 7449/1: use generic strnlen_user and strncpy_from_user functions This patch implements the word-at-a-time interface for ARM using the same algorithm as x86. We use the fls macro from ARMv5 onwards, where we have a clz instruction available which saves us a mov instruction when targetting Thumb-2. For older CPUs, we use the magic 0x0ff0001 constant. Big-endian configurations make use of the implementation from asm-generic. With this implemented, we can replace our byte-at-a-time strnlen_user and strncpy_from_user functions with the optimised generic versions. Reviewed-by: Nicolas Pitre Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/uaccess.h | 27 ++++------------- arch/arm/include/asm/word-at-a-time.h | 55 +++++++++++++++++++++++++++++++++++ 2 files changed, 61 insertions(+), 21 deletions(-) create mode 100644 arch/arm/include/asm/word-at-a-time.h (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 71f6536d17a..479a6352e0b 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -189,6 +189,9 @@ static inline void set_fs(mm_segment_t fs) #define access_ok(type,addr,size) (__range_ok(addr,size) == 0) +#define user_addr_max() \ + (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) + /* * The "__xxx" versions of the user access functions do not verify the * address space - it must have been done previously with a separate @@ -398,9 +401,6 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l #define __clear_user(addr,n) (memset((void __force *)addr, 0, n), 0) #endif -extern unsigned long __must_check __strncpy_from_user(char *to, const char __user *from, unsigned long count); -extern unsigned long __must_check __strnlen_user(const char __user *s, long n); - static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) { if (access_ok(VERIFY_READ, from, n)) @@ -427,24 +427,9 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo return n; } -static inline long __must_check strncpy_from_user(char *dst, const char __user *src, long count) -{ - long res = -EFAULT; - if (access_ok(VERIFY_READ, src, 1)) - res = __strncpy_from_user(dst, src, count); - return res; -} - -#define strlen_user(s) strnlen_user(s, ~0UL >> 1) +extern long strncpy_from_user(char *dest, const char __user *src, long count); -static inline long __must_check strnlen_user(const char __user *s, long n) -{ - unsigned long res = 0; - - if (__addr_ok(s)) - res = __strnlen_user(s, n); - - return res; -} +extern __must_check long strlen_user(const char __user *str); +extern __must_check long strnlen_user(const char __user *str, long n); #endif /* _ASMARM_UACCESS_H */ diff --git a/arch/arm/include/asm/word-at-a-time.h b/arch/arm/include/asm/word-at-a-time.h new file mode 100644 index 00000000000..74b2d457857 --- /dev/null +++ b/arch/arm/include/asm/word-at-a-time.h @@ -0,0 +1,55 @@ +#ifndef __ASM_ARM_WORD_AT_A_TIME_H +#define __ASM_ARM_WORD_AT_A_TIME_H + +#ifndef __ARMEB__ + +/* + * Little-endian word-at-a-time zero byte handling. + * Heavily based on the x86 algorithm. + */ +#include + +struct word_at_a_time { + const unsigned long one_bits, high_bits; +}; + +#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) } + +static inline unsigned long has_zero(unsigned long a, unsigned long *bits, + const struct word_at_a_time *c) +{ + unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits; + *bits = mask; + return mask; +} + +#define prep_zero_mask(a, bits, c) (bits) + +static inline unsigned long create_zero_mask(unsigned long bits) +{ + bits = (bits - 1) & ~bits; + return bits >> 7; +} + +static inline unsigned long find_zero(unsigned long mask) +{ + unsigned long ret; + +#if __LINUX_ARM_ARCH__ >= 5 + /* We have clz available. */ + ret = fls(mask) >> 3; +#else + /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */ + ret = (0x0ff0001 + mask) >> 23; + /* Fix the 1 for 00 case */ + ret &= mask; +#endif + + return ret; +} + +#else /* __ARMEB__ */ +#include +#endif + +#endif /* __ASM_ARM_WORD_AT_A_TIME_H */ -- cgit v1.2.3 From b9a50f74905ad9126c91b495ece8a5f45434c643 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 6 Jul 2012 15:46:08 +0100 Subject: ARM: 7450/1: dcache: select DCACHE_WORD_ACCESS for little-endian ARMv6+ CPUs DCACHE_WORD_ACCESS uses the word-at-a-time API for optimised string comparisons in the vfs layer. This patch implements support for load_unaligned_zeropad for ARM CPUs with native support for unaligned memory accesses (v6+) when running little-endian. Reviewed-by: Nicolas Pitre Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/word-at-a-time.h | 41 +++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/word-at-a-time.h b/arch/arm/include/asm/word-at-a-time.h index 74b2d457857..4d52f92967a 100644 --- a/arch/arm/include/asm/word-at-a-time.h +++ b/arch/arm/include/asm/word-at-a-time.h @@ -48,6 +48,47 @@ static inline unsigned long find_zero(unsigned long mask) return ret; } +#ifdef CONFIG_DCACHE_WORD_ACCESS + +#define zero_bytemask(mask) (mask) + +/* + * Load an unaligned word from kernel space. + * + * In the (very unlikely) case of the word being a page-crosser + * and the next page not being mapped, take the exception and + * return zeroes in the non-existing part. + */ +static inline unsigned long load_unaligned_zeropad(const void *addr) +{ + unsigned long ret, offset; + + /* Load word from unaligned pointer addr */ + asm( + "1: ldr %0, [%2]\n" + "2:\n" + " .pushsection .fixup,\"ax\"\n" + " .align 2\n" + "3: and %1, %2, #0x3\n" + " bic %2, %2, #0x3\n" + " ldr %0, [%2]\n" + " lsl %1, %1, #0x3\n" + " lsr %0, %0, %1\n" + " b 2b\n" + " .popsection\n" + " .pushsection __ex_table,\"a\"\n" + " .align 3\n" + " .long 1b, 3b\n" + " .popsection" + : "=&r" (ret), "=&r" (offset) + : "r" (addr), "Qo" (*(unsigned long *)addr)); + + return ret; +} + + +#endif /* DCACHE_WORD_ACCESS */ + #else /* __ARMEB__ */ #include #endif -- cgit v1.2.3 From 923df96b9f31b7d08d8438ff9677326d9537accf Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 6 Jul 2012 15:46:45 +0100 Subject: ARM: 7451/1: arch timer: implement read_current_timer and get_cycles This patch implements read_current_timer using the architected timers when they are selected via CONFIG_ARM_ARCH_TIMER. If they are detected not to be usable at runtime, we return -ENXIO to the caller. Furthermore, if read_current_timer is exported then we can implement get_cycles in terms of it for use as both an entropy source and for implementing __udelay and friends. Tested-by: Shinya Kuribayashi Reviewed-by: Stephen Boyd Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/arch_timer.h | 3 +++ arch/arm/include/asm/timex.h | 10 ++++++---- 2 files changed, 9 insertions(+), 4 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h index ed2e95d46e2..62e75475e57 100644 --- a/arch/arm/include/asm/arch_timer.h +++ b/arch/arm/include/asm/arch_timer.h @@ -1,7 +1,10 @@ #ifndef __ASMARM_ARCH_TIMER_H #define __ASMARM_ARCH_TIMER_H +#include + #ifdef CONFIG_ARM_ARCH_TIMER +#define ARCH_HAS_READ_CURRENT_TIMER int arch_timer_of_register(void); int arch_timer_sched_clock_init(void); #else diff --git a/arch/arm/include/asm/timex.h b/arch/arm/include/asm/timex.h index 3be8de3adab..ce119442277 100644 --- a/arch/arm/include/asm/timex.h +++ b/arch/arm/include/asm/timex.h @@ -12,13 +12,15 @@ #ifndef _ASMARM_TIMEX_H #define _ASMARM_TIMEX_H +#include #include typedef unsigned long cycles_t; -static inline cycles_t get_cycles (void) -{ - return 0; -} +#ifdef ARCH_HAS_READ_CURRENT_TIMER +#define get_cycles() ({ cycles_t c; read_current_timer(&c) ? 0 : c; }) +#else +#define get_cycles() (0) +#endif #endif -- cgit v1.2.3 From d0a533b18235d36206b9b422efadb7cee444dfdb Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 6 Jul 2012 15:47:17 +0100 Subject: ARM: 7452/1: delay: allow timer-based delay implementation to be selected This patch allows a timer-based delay implementation to be selected by switching the delay routines over to use get_cycles, which is implemented in terms of read_current_timer. This further allows us to skip the loop calibration and have a consistent delay function in the face of core frequency scaling. To avoid the pain of dealing with memory-mapped counters, this implementation uses the co-processor interface to the architected timers when they are available. The previous loop-based implementation is kept around for CPUs without the architected timers and we retain both the maximum delay (2ms) and the corresponding conversion factors for determining the number of loops required for a given interval. Since the indirection of the timer routines will only work when called from C, the sa1100 sleep routines are modified to branch to the loop-based delay functions directly. Tested-by: Shinya Kuribayashi Reviewed-by: Stephen Boyd Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/delay.h | 32 +++++++++++++++++++++++++------- 1 file changed, 25 insertions(+), 7 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h index b2deda18154..dc6145120de 100644 --- a/arch/arm/include/asm/delay.h +++ b/arch/arm/include/asm/delay.h @@ -6,9 +6,22 @@ #ifndef __ASM_ARM_DELAY_H #define __ASM_ARM_DELAY_H +#include #include /* HZ */ -extern void __delay(int loops); +#define MAX_UDELAY_MS 2 +#define UDELAY_MULT ((UL(2199023) * HZ) >> 11) +#define UDELAY_SHIFT 30 + +#ifndef __ASSEMBLY__ + +extern struct arm_delay_ops { + void (*delay)(unsigned long); + void (*const_udelay)(unsigned long); + void (*udelay)(unsigned long); +} arm_delay_ops; + +#define __delay(n) arm_delay_ops.delay(n) /* * This function intentionally does not exist; if you see references to @@ -23,22 +36,27 @@ extern void __bad_udelay(void); * division by multiplication: you don't have to worry about * loss of precision. * - * Use only for very small delays ( < 1 msec). Should probably use a + * Use only for very small delays ( < 2 msec). Should probably use a * lookup table, really, as the multiplications take much too long with * short delays. This is a "reasonable" implementation, though (and the * first constant multiplications gets optimized away if the delay is * a constant) */ -extern void __udelay(unsigned long usecs); -extern void __const_udelay(unsigned long); - -#define MAX_UDELAY_MS 2 +#define __udelay(n) arm_delay_ops.udelay(n) +#define __const_udelay(n) arm_delay_ops.const_udelay(n) #define udelay(n) \ (__builtin_constant_p(n) ? \ ((n) > (MAX_UDELAY_MS * 1000) ? __bad_udelay() : \ - __const_udelay((n) * ((2199023U*HZ)>>11))) : \ + __const_udelay((n) * UDELAY_MULT)) : \ __udelay(n)) +/* Loop-based definitions for assembly code. */ +extern void __loop_delay(unsigned long loops); +extern void __loop_udelay(unsigned long usecs); +extern void __loop_const_udelay(unsigned long); + +#endif /* __ASSEMBLY__ */ + #endif /* defined(_ARM_DELAY_H) */ -- cgit v1.2.3 From 4b7a7d889007ec22d4df741cc08ef79583d11189 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Fri, 25 Nov 2011 22:53:10 +0800 Subject: arm: remove km_type definitions Signed-off-by: Cong Wang --- arch/arm/include/asm/kmap_types.h | 26 +------------------------- 1 file changed, 1 insertion(+), 25 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h index e51b1e81df0..83eb2f77291 100644 --- a/arch/arm/include/asm/kmap_types.h +++ b/arch/arm/include/asm/kmap_types.h @@ -4,30 +4,6 @@ /* * This is the "bare minimum". AIO seems to require this. */ -enum km_type { - KM_BOUNCE_READ, - KM_SKB_SUNRPC_DATA, - KM_SKB_DATA_SOFTIRQ, - KM_USER0, - KM_USER1, - KM_BIO_SRC_IRQ, - KM_BIO_DST_IRQ, - KM_PTE0, - KM_PTE1, - KM_IRQ0, - KM_IRQ1, - KM_SOFTIRQ0, - KM_SOFTIRQ1, - KM_L1_CACHE, - KM_L2_CACHE, - KM_KDB, - KM_TYPE_NR -}; - -#ifdef CONFIG_DEBUG_HIGHMEM -#define KM_NMI (-1) -#define KM_NMI_PTE (-1) -#define KM_IRQ_PTE (-1) -#endif +#define KM_TYPE_NR 16 #endif -- cgit v1.2.3 From c2794437091a4fda72c4a4f3567dd728dcc0c3c9 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Wed, 29 Feb 2012 18:10:58 -0600 Subject: ARM: Add fixed PCI i/o mapping This adds a fixed virtual mapping for PCI i/o addresses. The mapping is located at the last 2MB of vmalloc region (0xfee00000-0xff000000). 2MB is used to align with PMD size, but IO_SPACE_LIMIT is 1MB. The space is reserved after .map_io and can be mapped at any time later with pci_ioremap_io. Platforms which need early i/o mapping (e.g. for vga console) can call pci_map_io_early in their .map_io function. This has changed completely from the 1st implementation which only supported creating the static mapping at .map_io. Signed-off-by: Rob Herring Cc: Russell King Acked-by: Nicolas Pitre --- arch/arm/include/asm/io.h | 8 ++++++++ arch/arm/include/asm/mach/map.h | 8 ++++++++ arch/arm/include/asm/mach/pci.h | 10 ++++++++++ 3 files changed, 26 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 815c669fec0..8f4db67533e 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -113,11 +113,19 @@ static inline void __iomem *__typesafe_io(unsigned long addr) #define __iowmb() do { } while (0) #endif +/* PCI fixed i/o mapping */ +#define PCI_IO_VIRT_BASE 0xfee00000 + +extern int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr); + /* * Now, pick up the machine-defined IO definitions */ #ifdef CONFIG_NEED_MACH_IO_H #include +#elif defined(CONFIG_PCI) +#define IO_SPACE_LIMIT ((resource_size_t)0xfffff) +#define __io(a) __typesafe_io(PCI_IO_VIRT_BASE + ((a) & IO_SPACE_LIMIT)) #else #define __io(a) __typesafe_io((a) & IO_SPACE_LIMIT) #endif diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h index a6efcdd6fd2..195ac2f9d3d 100644 --- a/arch/arm/include/asm/mach/map.h +++ b/arch/arm/include/asm/mach/map.h @@ -9,6 +9,9 @@ * * Page table mapping constructs and function prototypes */ +#ifndef __ASM_MACH_MAP_H +#define __ASM_MACH_MAP_H + #include struct map_desc { @@ -34,6 +37,8 @@ struct map_desc { #ifdef CONFIG_MMU extern void iotable_init(struct map_desc *, int); +extern void vm_reserve_area_early(unsigned long addr, unsigned long size, + void *caller); struct mem_type; extern const struct mem_type *get_mem_type(unsigned int type); @@ -44,4 +49,7 @@ extern int ioremap_page(unsigned long virt, unsigned long phys, const struct mem_type *mtype); #else #define iotable_init(map,num) do { } while (0) +#define vm_reserve_area_early(a,s,c) do { } while (0) +#endif + #endif diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h index 26c511fddf8..df818876fa3 100644 --- a/arch/arm/include/asm/mach/pci.h +++ b/arch/arm/include/asm/mach/pci.h @@ -11,6 +11,7 @@ #ifndef __ASM_MACH_PCI_H #define __ASM_MACH_PCI_H + struct pci_sys_data; struct pci_ops; struct pci_bus; @@ -54,6 +55,15 @@ struct pci_sys_data { */ void pci_common_init(struct hw_pci *); +/* + * Setup early fixed I/O mapping. + */ +#if defined(CONFIG_PCI) +extern void pci_map_io_early(unsigned long pfn); +#else +static inline void pci_map_io_early(unsigned long pfn) {} +#endif + /* * PCI controllers */ -- cgit v1.2.3 From 3c5d1699887bfcd17e6d9842ba7e2b3234e665db Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Mon, 9 Jul 2012 21:59:03 -0500 Subject: ARM: move PCI i/o resource setup into common code With consolidation of the PCI i/o mappings, the i/o space is being set to start at a PCI bus addr of 0x0 and fixed to 64K per bus. In this case the core ARM PCI setup code can setup the i/o resource. Currently, the resource is only setup if the platform did not setup an i/o resource. Signed-off-by: Rob Herring Reviewed-by: Arnd Bergmann --- arch/arm/include/asm/mach/pci.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h index df818876fa3..db9fedb57f2 100644 --- a/arch/arm/include/asm/mach/pci.h +++ b/arch/arm/include/asm/mach/pci.h @@ -11,6 +11,7 @@ #ifndef __ASM_MACH_PCI_H #define __ASM_MACH_PCI_H +#include struct pci_sys_data; struct pci_ops; @@ -43,6 +44,8 @@ struct pci_sys_data { unsigned long io_offset; /* bus->cpu IO mapping offset */ struct pci_bus *bus; /* PCI bus */ struct list_head resources; /* root bus resources (apertures) */ + struct resource io_res; + char io_res_name[12]; /* Bridge swizzling */ u8 (*swizzle)(struct pci_dev *, u8 *); /* IRQ mapping */ -- cgit v1.2.3 From dd9bf78040fa0da4cecc228e1682b9682b8cb180 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Wed, 4 Jul 2012 11:18:40 -0500 Subject: ARM: iop3xx: use fixed PCI i/o mapping Move iop33x and iop32x PCI to fixed i/o mapping and remove io.h. This changes the PCI bus addresses from the cpu address to 0 based. It appears that there is translation h/w for this, but its untested. Not sure what to do with io_offset. I think it should always be 0. AFAICT, PCI setup is skipped if the ATU is already setup. Signed-off-by: Rob Herring Reviewed-by: Arnd Bergmann --- arch/arm/include/asm/hardware/iop3xx.h | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h index 2ff2c75a463..02fe2fbe247 100644 --- a/arch/arm/include/asm/hardware/iop3xx.h +++ b/arch/arm/include/asm/hardware/iop3xx.h @@ -217,18 +217,8 @@ extern int iop3xx_get_init_atu(void); #define IOP3XX_PCI_LOWER_MEM_PA 0x80000000 #define IOP3XX_PCI_MEM_WINDOW_SIZE 0x08000000 -#define IOP3XX_PCI_IO_WINDOW_SIZE 0x00010000 #define IOP3XX_PCI_LOWER_IO_PA 0x90000000 -#define IOP3XX_PCI_LOWER_IO_VA 0xfe000000 -#define IOP3XX_PCI_LOWER_IO_BA 0x90000000 -#define IOP3XX_PCI_UPPER_IO_PA (IOP3XX_PCI_LOWER_IO_PA +\ - IOP3XX_PCI_IO_WINDOW_SIZE - 1) -#define IOP3XX_PCI_UPPER_IO_VA (IOP3XX_PCI_LOWER_IO_VA +\ - IOP3XX_PCI_IO_WINDOW_SIZE - 1) -#define IOP3XX_PCI_IO_PHYS_TO_VIRT(addr) (((u32) (addr) -\ - IOP3XX_PCI_LOWER_IO_PA) +\ - IOP3XX_PCI_LOWER_IO_VA) - +#define IOP3XX_PCI_LOWER_IO_BA 0x00000000 #ifndef __ASSEMBLY__ -- cgit v1.2.3 From a5d5f7daa744b34477c4a12728bde0a1694a1707 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Thu, 12 Jul 2012 23:57:35 +0100 Subject: ARM: 7465/1: Handle >4GB memory sizes in device tree and mem=size@start option The memory regions which are passed to arm_add_memory() from device tree blobs via early_init_dt_add_memory_arch() can have sizes which are larger than will fit in a 32 bit integer, so switch to using a phys_addr_t to hold them, to avoid silently dropping the top 32 bits of the size. Similarly, use phys_addr_t in early_mem() so that mem=size@start command line options specifying more than 4GB behave sensibly. Acked-by: Will Deacon Signed-off-by: Peter Maydell Signed-off-by: Russell King --- arch/arm/include/asm/setup.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h index 23ebc0c82a3..24d284a1bfc 100644 --- a/arch/arm/include/asm/setup.h +++ b/arch/arm/include/asm/setup.h @@ -196,7 +196,7 @@ static const struct tagtable __tagtable_##fn __tag = { tag, fn } struct membank { phys_addr_t start; - unsigned long size; + phys_addr_t size; unsigned int highmem; }; @@ -217,7 +217,7 @@ extern struct meminfo meminfo; #define bank_phys_end(bank) ((bank)->start + (bank)->size) #define bank_phys_size(bank) (bank)->size -extern int arm_add_memory(phys_addr_t start, unsigned long size); +extern int arm_add_memory(phys_addr_t start, phys_addr_t size); extern void early_print(const char *str, ...); extern void dump_machine_table(void); -- cgit v1.2.3 From e9da6e9905e639b0f842a244bc770b48ad0523e9 Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Mon, 30 Jul 2012 09:11:33 +0200 Subject: ARM: dma-mapping: remove custom consistent dma region This patch changes dma-mapping subsystem to use generic vmalloc areas for all consistent dma allocations. This increases the total size limit of the consistent allocations and removes platform hacks and a lot of duplicated code. Atomic allocations are served from special pool preallocated on boot, because vmalloc areas cannot be reliably created in atomic context. Signed-off-by: Marek Szyprowski Reviewed-by: Kyungmin Park Reviewed-by: Minchan Kim --- arch/arm/include/asm/dma-mapping.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index bbef15d0489..80777d87142 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -226,7 +226,7 @@ static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struc * DMA region above it's default value of 2MB. It must be called before the * memory allocator is initialised, i.e. before any core_initcall. */ -extern void __init init_consistent_dma_size(unsigned long size); +static inline void init_consistent_dma_size(unsigned long size) { } /* * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic" -- cgit v1.2.3 From 64ccc9c033c6089b2d426dad3c56477ab066c999 Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Thu, 14 Jun 2012 13:03:04 +0200 Subject: common: dma-mapping: add support for generic dma_mmap_* calls Commit 9adc5374 ('common: dma-mapping: introduce mmap method') added a generic method for implementing mmap user call to dma_map_ops structure. This patch converts ARM and PowerPC architectures (the only providers of dma_mmap_coherent/dma_mmap_writecombine calls) to use this generic dma_map_ops based call and adds a generic cross architecture definition for dma_mmap_attrs, dma_mmap_coherent, dma_mmap_writecombine functions. The generic mmap virt_to_page-based fallback implementation is provided for architectures which don't provide their own implementation for mmap method. Signed-off-by: Marek Szyprowski Reviewed-by: Kyungmin Park --- arch/arm/include/asm/dma-mapping.h | 19 ------------------- 1 file changed, 19 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 80777d87142..a0480333114 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -186,17 +186,6 @@ extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs); -#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL) - -static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, - size_t size, struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - BUG_ON(!ops); - return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); -} - static inline void *dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) { @@ -213,14 +202,6 @@ static inline void dma_free_writecombine(struct device *dev, size_t size, return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs); } -static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size) -{ - DEFINE_DMA_ATTRS(attrs); - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); - return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs); -} - /* * This can be called during boot to increase the size of the consistent * DMA region above it's default value of 2MB. It must be called before the -- cgit v1.2.3 From dc2832e1e7db3f9ad465d2fe894bd69ef05d1e4b Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Wed, 13 Jun 2012 10:01:15 +0200 Subject: ARM: dma-mapping: add support for dma_get_sgtable() This patch adds support for dma_get_sgtable() function which is required to let drivers to share the buffers allocated by DMA-mapping subsystem. Generic implementation based on virt_to_page() is not suitable for ARM dma-mapping subsystem. Signed-off-by: Marek Szyprowski Reviewed-by: Kyungmin Park --- arch/arm/include/asm/dma-mapping.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index a0480333114..2ae842df455 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -261,6 +261,9 @@ extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int, enum dma_data_direction); extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int, enum dma_data_direction); +extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + struct dma_attrs *attrs); #endif /* __KERNEL__ */ #endif -- cgit v1.2.3 From c1d7e01d7877a397655277a920aeaa3830ed9461 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 30 Jul 2012 14:42:46 -0700 Subject: ipc: use Kconfig options for __ARCH_WANT_[COMPAT_]IPC_PARSE_VERSION Rather than #define the options manually in the architecture code, add Kconfig options for them and select them there instead. This also allows us to select the compat IPC version parsing automatically for platforms using the old compat IPC interface. Reported-by: Andrew Morton Signed-off-by: Will Deacon Cc: Arnd Bergmann Cc: Chris Metcalf Cc: Catalin Marinas Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/include/asm/unistd.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index 512cd147345..0cab47d4a83 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h @@ -446,7 +446,6 @@ #ifdef __KERNEL__ -#define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_STAT64 #define __ARCH_WANT_SYS_GETHOSTNAME #define __ARCH_WANT_SYS_PAUSE -- cgit v1.2.3 From a76d7bd96d65fa5119adba97e1b58d95f2e78829 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 13 Jul 2012 19:15:40 +0100 Subject: ARM: 7467/1: mutex: use generic xchg-based implementation for ARMv6+ The open-coded mutex implementation for ARMv6+ cores suffers from a severe lack of barriers, so in the uncontended case we don't actually protect any accesses performed during the critical section. Furthermore, the code is largely a duplication of the ARMv6+ atomic_dec code but optimised to remove a branch instruction, as the mutex fastpath was previously inlined. Now that this is executed out-of-line, we can reuse the atomic access code for the locking (in fact, we use the xchg code as this produces shorter critical sections). This patch uses the generic xchg based implementation for mutexes on ARMv6+, which introduces barriers to the lock/unlock operations and also has the benefit of removing a fair amount of inline assembly code. Cc: Acked-by: Arnd Bergmann Acked-by: Nicolas Pitre Reported-by: Shan Kang Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/mutex.h | 119 ++----------------------------------------- 1 file changed, 4 insertions(+), 115 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/mutex.h b/arch/arm/include/asm/mutex.h index 93226cf23ae..b1479fd04a9 100644 --- a/arch/arm/include/asm/mutex.h +++ b/arch/arm/include/asm/mutex.h @@ -7,121 +7,10 @@ */ #ifndef _ASM_MUTEX_H #define _ASM_MUTEX_H - -#if __LINUX_ARM_ARCH__ < 6 -/* On pre-ARMv6 hardware the swp based implementation is the most efficient. */ -# include -#else - /* - * Attempting to lock a mutex on ARMv6+ can be done with a bastardized - * atomic decrement (it is not a reliable atomic decrement but it satisfies - * the defined semantics for our purpose, while being smaller and faster - * than a real atomic decrement or atomic swap. The idea is to attempt - * decrementing the lock value only once. If once decremented it isn't zero, - * or if its store-back fails due to a dispute on the exclusive store, we - * simply bail out immediately through the slow path where the lock will be - * reattempted until it succeeds. + * On pre-ARMv6 hardware this results in a swp-based implementation, + * which is the most efficient. For ARMv6+, we emit a pair of exclusive + * accesses instead. */ -static inline void -__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) -{ - int __ex_flag, __res; - - __asm__ ( - - "ldrex %0, [%2] \n\t" - "sub %0, %0, #1 \n\t" - "strex %1, %0, [%2] " - - : "=&r" (__res), "=&r" (__ex_flag) - : "r" (&(count)->counter) - : "cc","memory" ); - - __res |= __ex_flag; - if (unlikely(__res != 0)) - fail_fn(count); -} - -static inline int -__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) -{ - int __ex_flag, __res; - - __asm__ ( - - "ldrex %0, [%2] \n\t" - "sub %0, %0, #1 \n\t" - "strex %1, %0, [%2] " - - : "=&r" (__res), "=&r" (__ex_flag) - : "r" (&(count)->counter) - : "cc","memory" ); - - __res |= __ex_flag; - if (unlikely(__res != 0)) - __res = fail_fn(count); - return __res; -} - -/* - * Same trick is used for the unlock fast path. However the original value, - * rather than the result, is used to test for success in order to have - * better generated assembly. - */ -static inline void -__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) -{ - int __ex_flag, __res, __orig; - - __asm__ ( - - "ldrex %0, [%3] \n\t" - "add %1, %0, #1 \n\t" - "strex %2, %1, [%3] " - - : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag) - : "r" (&(count)->counter) - : "cc","memory" ); - - __orig |= __ex_flag; - if (unlikely(__orig != 0)) - fail_fn(count); -} - -/* - * If the unlock was done on a contended lock, or if the unlock simply fails - * then the mutex remains locked. - */ -#define __mutex_slowpath_needs_to_unlock() 1 - -/* - * For __mutex_fastpath_trylock we use another construct which could be - * described as a "single value cmpxchg". - * - * This provides the needed trylock semantics like cmpxchg would, but it is - * lighter and less generic than a true cmpxchg implementation. - */ -static inline int -__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) -{ - int __ex_flag, __res, __orig; - - __asm__ ( - - "1: ldrex %0, [%3] \n\t" - "subs %1, %0, #1 \n\t" - "strexeq %2, %1, [%3] \n\t" - "movlt %0, #0 \n\t" - "cmpeq %2, #0 \n\t" - "bgt 1b " - - : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag) - : "r" (&count->counter) - : "cc", "memory" ); - - return __orig; -} - -#endif +#include #endif -- cgit v1.2.3 From b74253f78400f9a4b42da84bb1de7540b88ce7c4 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 23 Jul 2012 14:18:13 +0100 Subject: ARM: 7479/1: mm: avoid NULL dereference when flushing gate_vma with VIVT caches The vivt_flush_cache_{range,page} functions check that the mm_struct of the VMA being flushed has been active on the current CPU before performing the cache maintenance. The gate_vma has a NULL mm_struct pointer and, as such, will cause a kernel fault if we try to flush it with the above operations. This happens during ELF core dumps, which include the gate_vma as it may be useful for debugging purposes. This patch adds checks to the VIVT cache flushing functions so that VMAs with a NULL mm_struct are flushed unconditionally (the vectors page may be dirty if we use it to store the current TLS pointer). Cc: # 3.4+ Reported-by: Gilles Chanteperdrix Tested-by: Uros Bizjak Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/cacheflush.h | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 004c1bc95d2..e4448e16046 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -215,7 +215,9 @@ static inline void vivt_flush_cache_mm(struct mm_struct *mm) static inline void vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) + struct mm_struct *mm = vma->vm_mm; + + if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), vma->vm_flags); } @@ -223,7 +225,9 @@ vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned static inline void vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) { - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { + struct mm_struct *mm = vma->vm_mm; + + if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) { unsigned long addr = user_addr & PAGE_MASK; __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); } -- cgit v1.2.3 From fa8bbb13ab49e77c00fa09e5504ec25b7176fb4b Mon Sep 17 00:00:00 2001 From: Bryan Wu Date: Wed, 14 Mar 2012 02:26:56 +0800 Subject: ARM: use new LEDS CPU trigger stub to replace old one Cc: Linus Walleij Signed-off-by: Bryan Wu --- arch/arm/include/asm/leds.h | 50 --------------------------------------------- 1 file changed, 50 deletions(-) delete mode 100644 arch/arm/include/asm/leds.h (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/leds.h b/arch/arm/include/asm/leds.h deleted file mode 100644 index c545739f39b..00000000000 --- a/arch/arm/include/asm/leds.h +++ /dev/null @@ -1,50 +0,0 @@ -/* - * arch/arm/include/asm/leds.h - * - * Copyright (C) 1998 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Event-driven interface for LEDs on machines - * Added led_start and led_stop- Alex Holden, 28th Dec 1998. - */ -#ifndef ASM_ARM_LEDS_H -#define ASM_ARM_LEDS_H - - -typedef enum { - led_idle_start, - led_idle_end, - led_timer, - led_start, - led_stop, - led_claim, /* override idle & timer leds */ - led_release, /* restore idle & timer leds */ - led_start_timer_mode, - led_stop_timer_mode, - led_green_on, - led_green_off, - led_amber_on, - led_amber_off, - led_red_on, - led_red_off, - led_blue_on, - led_blue_off, - /* - * I want this between led_timer and led_start, but - * someone has decided to export this to user space - */ - led_halted -} led_event_t; - -/* Use this routine to handle LEDs */ - -#ifdef CONFIG_LEDS -extern void (*leds_event)(led_event_t); -#else -#define leds_event(e) -#endif - -#endif -- cgit v1.2.3 From 4c071ee5268f7234c3d084b6093bebccc28cdcba Mon Sep 17 00:00:00 2001 From: Stefano Stabellini Date: Fri, 14 Sep 2012 13:53:39 +0000 Subject: arm: initial Xen support - Basic hypervisor.h and interface.h definitions. - Skeleton enlighten.c, set xen_start_info to an empty struct. - Make xen_initial_domain dependent on the SIF_PRIVILIGED_BIT. The new code only compiles when CONFIG_XEN is set, that is going to be added to arch/arm/Kconfig in patch #11 "xen/arm: introduce CONFIG_XEN on ARM". Changes in v3: - improve comments. Signed-off-by: Stefano Stabellini Acked-by: Konrad Rzeszutek Wilk --- arch/arm/include/asm/hypervisor.h | 6 +++ arch/arm/include/asm/xen/hypervisor.h | 19 ++++++++++ arch/arm/include/asm/xen/interface.h | 69 +++++++++++++++++++++++++++++++++++ 3 files changed, 94 insertions(+) create mode 100644 arch/arm/include/asm/hypervisor.h create mode 100644 arch/arm/include/asm/xen/hypervisor.h create mode 100644 arch/arm/include/asm/xen/interface.h (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/hypervisor.h b/arch/arm/include/asm/hypervisor.h new file mode 100644 index 00000000000..b90d9e523d6 --- /dev/null +++ b/arch/arm/include/asm/hypervisor.h @@ -0,0 +1,6 @@ +#ifndef _ASM_ARM_HYPERVISOR_H +#define _ASM_ARM_HYPERVISOR_H + +#include + +#endif diff --git a/arch/arm/include/asm/xen/hypervisor.h b/arch/arm/include/asm/xen/hypervisor.h new file mode 100644 index 00000000000..d7ab99a0c9e --- /dev/null +++ b/arch/arm/include/asm/xen/hypervisor.h @@ -0,0 +1,19 @@ +#ifndef _ASM_ARM_XEN_HYPERVISOR_H +#define _ASM_ARM_XEN_HYPERVISOR_H + +extern struct shared_info *HYPERVISOR_shared_info; +extern struct start_info *xen_start_info; + +/* Lazy mode for batching updates / context switch */ +enum paravirt_lazy_mode { + PARAVIRT_LAZY_NONE, + PARAVIRT_LAZY_MMU, + PARAVIRT_LAZY_CPU, +}; + +static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void) +{ + return PARAVIRT_LAZY_NONE; +} + +#endif /* _ASM_ARM_XEN_HYPERVISOR_H */ diff --git a/arch/arm/include/asm/xen/interface.h b/arch/arm/include/asm/xen/interface.h new file mode 100644 index 00000000000..74c72b5083a --- /dev/null +++ b/arch/arm/include/asm/xen/interface.h @@ -0,0 +1,69 @@ +/****************************************************************************** + * Guest OS interface to ARM Xen. + * + * Stefano Stabellini , Citrix, 2012 + */ + +#ifndef _ASM_ARM_XEN_INTERFACE_H +#define _ASM_ARM_XEN_INTERFACE_H + +#include + +#define __DEFINE_GUEST_HANDLE(name, type) \ + typedef type * __guest_handle_ ## name + +#define DEFINE_GUEST_HANDLE_STRUCT(name) \ + __DEFINE_GUEST_HANDLE(name, struct name) +#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name) +#define GUEST_HANDLE(name) __guest_handle_ ## name + +#define set_xen_guest_handle(hnd, val) \ + do { \ + if (sizeof(hnd) == 8) \ + *(uint64_t *)&(hnd) = 0; \ + (hnd) = val; \ + } while (0) + +#ifndef __ASSEMBLY__ +/* Explicitly size integers that represent pfns in the interface with + * Xen so that we can have one ABI that works for 32 and 64 bit guests. */ +typedef uint64_t xen_pfn_t; +/* Guest handles for primitive C types. */ +__DEFINE_GUEST_HANDLE(uchar, unsigned char); +__DEFINE_GUEST_HANDLE(uint, unsigned int); +__DEFINE_GUEST_HANDLE(ulong, unsigned long); +DEFINE_GUEST_HANDLE(char); +DEFINE_GUEST_HANDLE(int); +DEFINE_GUEST_HANDLE(long); +DEFINE_GUEST_HANDLE(void); +DEFINE_GUEST_HANDLE(uint64_t); +DEFINE_GUEST_HANDLE(uint32_t); +DEFINE_GUEST_HANDLE(xen_pfn_t); + +/* Maximum number of virtual CPUs in multi-processor guests. */ +#define MAX_VIRT_CPUS 1 + +struct arch_vcpu_info { }; +struct arch_shared_info { }; + +/* TODO: Move pvclock definitions some place arch independent */ +struct pvclock_vcpu_time_info { + u32 version; + u32 pad0; + u64 tsc_timestamp; + u64 system_time; + u32 tsc_to_system_mul; + s8 tsc_shift; + u8 flags; + u8 pad[2]; +} __attribute__((__packed__)); /* 32 bytes */ + +/* It is OK to have a 12 bytes struct with no padding because it is packed */ +struct pvclock_wall_clock { + u32 version; + u32 sec; + u32 nsec; +} __attribute__((__packed__)); +#endif + +#endif /* _ASM_ARM_XEN_INTERFACE_H */ -- cgit v1.2.3 From aa2466d21bd9e872690693d56feb946781443f28 Mon Sep 17 00:00:00 2001 From: Stefano Stabellini Date: Fri, 14 Sep 2012 13:33:21 +0000 Subject: xen/arm: hypercalls Use r12 to pass the hypercall number to the hypervisor. We need a register to pass the hypercall number because we might not know it at compile time and HVC only takes an immediate argument. Among the available registers r12 seems to be the best choice because it is defined as "intra-procedure call scratch register". Use the ISS to pass an hypervisor specific tag. Changes in v2: - define an HYPERCALL macro for 5 arguments hypercall wrappers, even if at the moment is unused; - use ldm instead of pop; - fix up comments. Signed-off-by: Stefano Stabellini Reviewed-by: Konrad Rzeszutek Wilk --- arch/arm/include/asm/xen/hypercall.h | 50 ++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) create mode 100644 arch/arm/include/asm/xen/hypercall.h (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/xen/hypercall.h b/arch/arm/include/asm/xen/hypercall.h new file mode 100644 index 00000000000..4ac06241436 --- /dev/null +++ b/arch/arm/include/asm/xen/hypercall.h @@ -0,0 +1,50 @@ +/****************************************************************************** + * hypercall.h + * + * Linux-specific hypervisor handling. + * + * Stefano Stabellini , Citrix, 2012 + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation; or, when distributed + * separately from the Linux kernel or incorporated into other + * software packages, subject to the following license: + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this source file (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, modify, + * merge, publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef _ASM_ARM_XEN_HYPERCALL_H +#define _ASM_ARM_XEN_HYPERCALL_H + +#include + +long privcmd_call(unsigned call, unsigned long a1, + unsigned long a2, unsigned long a3, + unsigned long a4, unsigned long a5); +int HYPERVISOR_xen_version(int cmd, void *arg); +int HYPERVISOR_console_io(int cmd, int count, char *str); +int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count); +int HYPERVISOR_sched_op(int cmd, void *arg); +int HYPERVISOR_event_channel_op(int cmd, void *arg); +unsigned long HYPERVISOR_hvm_op(int op, void *arg); +int HYPERVISOR_memory_op(unsigned int cmd, void *arg); +int HYPERVISOR_physdev_op(int cmd, void *arg); + +#endif /* _ASM_ARM_XEN_HYPERCALL_H */ -- cgit v1.2.3 From 36a67abce227af005b4a595735556942b74f6741 Mon Sep 17 00:00:00 2001 From: Stefano Stabellini Date: Wed, 8 Aug 2012 16:33:46 +0000 Subject: xen/arm: page.h definitions ARM Xen guests always use paging in hardware, like PV on HVM guests in the X86 world. Changes in v3: - improve comments. Signed-off-by: Stefano Stabellini Acked-by: Konrad Rzeszutek Wilk --- arch/arm/include/asm/xen/page.h | 82 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) create mode 100644 arch/arm/include/asm/xen/page.h (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h new file mode 100644 index 00000000000..174202318df --- /dev/null +++ b/arch/arm/include/asm/xen/page.h @@ -0,0 +1,82 @@ +#ifndef _ASM_ARM_XEN_PAGE_H +#define _ASM_ARM_XEN_PAGE_H + +#include +#include + +#include +#include + +#include + +#define pfn_to_mfn(pfn) (pfn) +#define phys_to_machine_mapping_valid (1) +#define mfn_to_pfn(mfn) (mfn) +#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) + +#define pte_mfn pte_pfn +#define mfn_pte pfn_pte + +/* Xen machine address */ +typedef struct xmaddr { + phys_addr_t maddr; +} xmaddr_t; + +/* Xen pseudo-physical address */ +typedef struct xpaddr { + phys_addr_t paddr; +} xpaddr_t; + +#define XMADDR(x) ((xmaddr_t) { .maddr = (x) }) +#define XPADDR(x) ((xpaddr_t) { .paddr = (x) }) + +static inline xmaddr_t phys_to_machine(xpaddr_t phys) +{ + unsigned offset = phys.paddr & ~PAGE_MASK; + return XMADDR(PFN_PHYS(pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset); +} + +static inline xpaddr_t machine_to_phys(xmaddr_t machine) +{ + unsigned offset = machine.maddr & ~PAGE_MASK; + return XPADDR(PFN_PHYS(mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset); +} +/* VIRT <-> MACHINE conversion */ +#define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v)))) +#define virt_to_pfn(v) (PFN_DOWN(__pa(v))) +#define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v))) +#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) + +static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr) +{ + /* TODO: assuming it is mapped in the kernel 1:1 */ + return virt_to_machine(vaddr); +} + +/* TODO: this shouldn't be here but it is because the frontend drivers + * are using it (its rolled in headers) even though we won't hit the code path. + * So for right now just punt with this. + */ +static inline pte_t *lookup_address(unsigned long address, unsigned int *level) +{ + BUG(); + return NULL; +} + +static inline int m2p_add_override(unsigned long mfn, struct page *page, + struct gnttab_map_grant_ref *kmap_op) +{ + return 0; +} + +static inline int m2p_remove_override(struct page *page, bool clear_pte) +{ + return 0; +} + +static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) +{ + BUG(); + return false; +} +#endif /* _ASM_ARM_XEN_PAGE_H */ -- cgit v1.2.3 From e54d2f61528165bbe0e5f628b111eab3be31c3b5 Mon Sep 17 00:00:00 2001 From: Stefano Stabellini Date: Wed, 8 Aug 2012 16:34:01 +0000 Subject: xen/arm: sync_bitops sync_bitops functions are equivalent to the SMP implementation of the original functions, independently from CONFIG_SMP being defined. We need them because _set_bit etc are not SMP safe if !CONFIG_SMP. But under Xen you might be communicating with a completely external entity who might be on another CPU (e.g. two uniprocessor guests communicating via event channels and grant tables). So we need a variant of the bit ops which are SMP safe even on a UP kernel. Signed-off-by: Stefano Stabellini Acked-by: Konrad Rzeszutek Wilk --- arch/arm/include/asm/sync_bitops.h | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 arch/arm/include/asm/sync_bitops.h (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/sync_bitops.h b/arch/arm/include/asm/sync_bitops.h new file mode 100644 index 00000000000..63479eecbf7 --- /dev/null +++ b/arch/arm/include/asm/sync_bitops.h @@ -0,0 +1,27 @@ +#ifndef __ASM_SYNC_BITOPS_H__ +#define __ASM_SYNC_BITOPS_H__ + +#include +#include + +/* sync_bitops functions are equivalent to the SMP implementation of the + * original functions, independently from CONFIG_SMP being defined. + * + * We need them because _set_bit etc are not SMP safe if !CONFIG_SMP. But + * under Xen you might be communicating with a completely external entity + * who might be on another CPU (e.g. two uniprocessor guests communicating + * via event channels and grant tables). So we need a variant of the bit + * ops which are SMP safe even on a UP kernel. + */ + +#define sync_set_bit(nr, p) _set_bit(nr, p) +#define sync_clear_bit(nr, p) _clear_bit(nr, p) +#define sync_change_bit(nr, p) _change_bit(nr, p) +#define sync_test_and_set_bit(nr, p) _test_and_set_bit(nr, p) +#define sync_test_and_clear_bit(nr, p) _test_and_clear_bit(nr, p) +#define sync_test_and_change_bit(nr, p) _test_and_change_bit(nr, p) +#define sync_test_bit(nr, addr) test_bit(nr, addr) +#define sync_cmpxchg cmpxchg + + +#endif -- cgit v1.2.3 From 256f631f1f7e7bedc882510679ad4473a2274708 Mon Sep 17 00:00:00 2001 From: Stefano Stabellini Date: Fri, 14 Sep 2012 13:34:43 +0000 Subject: xen/arm: Introduce xen_ulong_t for unsigned long All the original Xen headers have xen_ulong_t as unsigned long type, however when they have been imported in Linux, xen_ulong_t has been replaced with unsigned long. That might work for x86 and ia64 but it does not for arm. Bring back xen_ulong_t and let each architecture define xen_ulong_t as they see fit. Also explicitly size pointers (__DEFINE_GUEST_HANDLE) to 64 bit. Changes in v3: - remove the incorrect changes to multicall_entry; - remove the change to apic_physbase. Signed-off-by: Stefano Stabellini Acked-by: Konrad Rzeszutek Wilk --- arch/arm/include/asm/xen/interface.h | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/xen/interface.h b/arch/arm/include/asm/xen/interface.h index 74c72b5083a..ae05e56dd17 100644 --- a/arch/arm/include/asm/xen/interface.h +++ b/arch/arm/include/asm/xen/interface.h @@ -9,8 +9,11 @@ #include +#define uint64_aligned_t uint64_t __attribute__((aligned(8))) + #define __DEFINE_GUEST_HANDLE(name, type) \ - typedef type * __guest_handle_ ## name + typedef struct { union { type *p; uint64_aligned_t q; }; } \ + __guest_handle_ ## name #define DEFINE_GUEST_HANDLE_STRUCT(name) \ __DEFINE_GUEST_HANDLE(name, struct name) @@ -21,13 +24,14 @@ do { \ if (sizeof(hnd) == 8) \ *(uint64_t *)&(hnd) = 0; \ - (hnd) = val; \ + (hnd).p = val; \ } while (0) #ifndef __ASSEMBLY__ /* Explicitly size integers that represent pfns in the interface with * Xen so that we can have one ABI that works for 32 and 64 bit guests. */ typedef uint64_t xen_pfn_t; +typedef uint64_t xen_ulong_t; /* Guest handles for primitive C types. */ __DEFINE_GUEST_HANDLE(uchar, unsigned char); __DEFINE_GUEST_HANDLE(uint, unsigned int); -- cgit v1.2.3 From 0ec53ecf38bcbf95b4b057328a8fbba4d22ef28b Mon Sep 17 00:00:00 2001 From: Stefano Stabellini Date: Fri, 14 Sep 2012 13:37:32 +0000 Subject: xen/arm: receive Xen events on ARM Compile events.c on ARM. Parse, map and enable the IRQ to get event notifications from the device tree (node "/xen"). Signed-off-by: Stefano Stabellini Acked-by: Konrad Rzeszutek Wilk --- arch/arm/include/asm/xen/events.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 arch/arm/include/asm/xen/events.h (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/xen/events.h b/arch/arm/include/asm/xen/events.h new file mode 100644 index 00000000000..94b4e9020b0 --- /dev/null +++ b/arch/arm/include/asm/xen/events.h @@ -0,0 +1,18 @@ +#ifndef _ASM_ARM_XEN_EVENTS_H +#define _ASM_ARM_XEN_EVENTS_H + +#include + +enum ipi_vector { + XEN_PLACEHOLDER_VECTOR, + + /* Xen IPIs go here */ + XEN_NR_IPIS, +}; + +static inline int xen_irqs_disabled(struct pt_regs *regs) +{ + return raw_irqs_disabled_flags(regs->ARM_cpsr); +} + +#endif /* _ASM_ARM_XEN_EVENTS_H */ -- cgit v1.2.3 From ca981633761c4d38a5b8b2889977a9078d5b9c99 Mon Sep 17 00:00:00 2001 From: Stefano Stabellini Date: Wed, 8 Aug 2012 17:21:23 +0000 Subject: xen/arm: compile netback Signed-off-by: Stefano Stabellini Acked-by: Konrad Rzeszutek Wilk --- arch/arm/include/asm/xen/hypercall.h | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/xen/hypercall.h b/arch/arm/include/asm/xen/hypercall.h index 4ac06241436..8a823253d77 100644 --- a/arch/arm/include/asm/xen/hypercall.h +++ b/arch/arm/include/asm/xen/hypercall.h @@ -47,4 +47,23 @@ unsigned long HYPERVISOR_hvm_op(int op, void *arg); int HYPERVISOR_memory_op(unsigned int cmd, void *arg); int HYPERVISOR_physdev_op(int cmd, void *arg); +static inline void +MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va, + unsigned int new_val, unsigned long flags) +{ + BUG(); +} + +static inline void +MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req, + int count, int *success_count, domid_t domid) +{ + BUG(); +} + +static inline int +HYPERVISOR_multicall(void *call_list, int nr_calls) +{ + BUG(); +} #endif /* _ASM_ARM_XEN_HYPERCALL_H */ -- cgit v1.2.3 From 237ec6f2e51d2fc2ff37c7c5f1ccc9264d09c85b Mon Sep 17 00:00:00 2001 From: Colin Cross Date: Tue, 7 Aug 2012 19:05:10 +0100 Subject: ARM: 7486/1: sched_clock: update epoch_cyc on resume Many clocks that are used to provide sched_clock will reset during suspend. If read_sched_clock returns 0 after suspend, sched_clock will appear to jump forward. This patch resets cd.epoch_cyc to the current value of read_sched_clock during resume, which causes sched_clock() just after suspend to return the same value as sched_clock() just before suspend. In addition, during the window where epoch_ns has been updated before suspend, but epoch_cyc has not been updated after suspend, it is unknown whether the clock has reset or not, and sched_clock() could return a bogus value. Add a suspended flag, and return the pre-suspend epoch_ns value during this period. The new behavior is triggered by calling setup_sched_clock_needs_suspend instead of setup_sched_clock. Signed-off-by: Colin Cross Reviewed-by: Linus Walleij Signed-off-by: Russell King --- arch/arm/include/asm/sched_clock.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/sched_clock.h b/arch/arm/include/asm/sched_clock.h index e3f75726343..05b8e82ec9f 100644 --- a/arch/arm/include/asm/sched_clock.h +++ b/arch/arm/include/asm/sched_clock.h @@ -10,5 +10,7 @@ extern void sched_clock_postinit(void); extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate); +extern void setup_sched_clock_needs_suspend(u32 (*read)(void), int bits, + unsigned long rate); #endif -- cgit v1.2.3 From 47f1204329237a0f8655f5a9f14a38ac81946ca1 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 10 Aug 2012 17:51:18 +0100 Subject: ARM: 7487/1: mm: avoid setting nG bit for user mappings that aren't present Swap entries are encoding in ptes such that !pte_present(pte) and pte_file(pte). The remaining bits of the descriptor are used to identify the swapfile and offset within it to the swap entry. When writing such a pte for a user virtual address, set_pte_at unconditionally sets the nG bit, which (in the case of LPAE) will corrupt the swapfile offset and lead to a BUG: [ 140.494067] swap_free: Unused swap offset entry 000763b4 [ 140.509989] BUG: Bad page map in process rs:main Q:Reg pte:0ec76800 pmd:8f92e003 This patch fixes the problem by only setting the nG bit for user mappings that are actually present. Cc: Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/pgtable.h | 34 ++++++++++++++++++---------------- 1 file changed, 18 insertions(+), 16 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index f66626d71e7..d88f9f049e9 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -195,6 +195,18 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd) #define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0) +#define pte_none(pte) (!pte_val(pte)) +#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) +#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY)) +#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) +#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) +#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN)) +#define pte_special(pte) (0) + +#define pte_present_user(pte) \ + ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \ + (L_PTE_PRESENT | L_PTE_USER)) + #if __LINUX_ARM_ARCH__ < 6 static inline void __sync_icache_dcache(pte_t pteval) { @@ -206,25 +218,15 @@ extern void __sync_icache_dcache(pte_t pteval); static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval) { - if (addr >= TASK_SIZE) - set_pte_ext(ptep, pteval, 0); - else { + unsigned long ext = 0; + + if (addr < TASK_SIZE && pte_present_user(pteval)) { __sync_icache_dcache(pteval); - set_pte_ext(ptep, pteval, PTE_EXT_NG); + ext |= PTE_EXT_NG; } -} -#define pte_none(pte) (!pte_val(pte)) -#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) -#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY)) -#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) -#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) -#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN)) -#define pte_special(pte) (0) - -#define pte_present_user(pte) \ - ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \ - (L_PTE_PRESENT | L_PTE_USER)) + set_pte_ext(ptep, pteval, ext); +} #define PTE_BIT_FUNC(fn,op) \ static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } -- cgit v1.2.3 From f5f2025ef3e2cdb593707cbf87378761f17befbe Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 10 Aug 2012 17:51:19 +0100 Subject: ARM: 7488/1: mm: use 5 bits for swapfile type encoding Page migration encodes the pfn in the offset field of a swp_entry_t. For LPAE, we support physical addresses of up to 36 bits (due to sparsemem limitations with the size of page flags), requiring 24 bits to represent a pfn. A further 3 bits are used to encode a swp_entry into a pte, leaving 5 bits for the type field. Furthermore, the core code defines MAX_SWAPFILES_SHIFT as 5, so the additional type bit does not get used. This patch reduces the width of the type field to 5 bits, allowing us to create up to 31 swapfiles of 64GB each. Cc: Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/pgtable.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index d88f9f049e9..41dc31f834c 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -253,13 +253,13 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) * * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 - * <--------------- offset --------------------> <- type --> 0 0 0 + * <--------------- offset ----------------------> < type -> 0 0 0 * - * This gives us up to 63 swap files and 32GB per swap file. Note that + * This gives us up to 31 swap files and 64GB per swap file. Note that * the offset field is always non-zero. */ #define __SWP_TYPE_SHIFT 3 -#define __SWP_TYPE_BITS 6 +#define __SWP_TYPE_BITS 5 #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) -- cgit v1.2.3 From 38f2e3772429f29a273a2ed7e95dd7a41f662f06 Mon Sep 17 00:00:00 2001 From: Chao Xie Date: Tue, 31 Jul 2012 14:13:12 +0800 Subject: ARM: cache: add extra feature enable for tauros2 The extra feature may be used by SOCs are prefetch, burst8, write buffer coalesce Signed-off-by: Chao Xie Signed-off-by: Haojian Zhuang --- arch/arm/include/asm/hardware/cache-tauros2.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/hardware/cache-tauros2.h b/arch/arm/include/asm/hardware/cache-tauros2.h index 538f17ca905..295e2e40151 100644 --- a/arch/arm/include/asm/hardware/cache-tauros2.h +++ b/arch/arm/include/asm/hardware/cache-tauros2.h @@ -8,4 +8,7 @@ * warranty of any kind, whether express or implied. */ -extern void __init tauros2_init(void); +#define CACHE_TAUROS2_PREFETCH_ON (1 << 0) +#define CACHE_TAUROS2_LINEFILL_BURST8 (1 << 1) + +extern void __init tauros2_init(unsigned int features); -- cgit v1.2.3 From 7be2958e97b37256b8016db39ac6cf51f711e390 Mon Sep 17 00:00:00 2001 From: Jon Hunter Date: Thu, 31 May 2012 13:05:20 -0500 Subject: ARM: PMU: Add runtime PM Support Add runtime PM support to the ARM PMU driver so that devices such as OMAP supporting dynamic PM can use the platform->runtime_* hooks to initialise hardware at runtime. Without having these runtime PM hooks in place any configuration of the PMU hardware would be lost when low power states are entered and hence would prevent PMU from working. This change also replaces the PMU platform functions enable_irq and disable_irq added by Ming Lei with runtime_resume and runtime_suspend funtions. Ming had added the enable_irq and disable_irq functions as a method to configure the cross trigger interface on OMAP4 for routing the PMU interrupts. By adding runtime PM support, we can move the code called by enable_irq and disable_irq into the runtime PM callbacks runtime_resume and runtime_suspend. Cc: Ming Lei Cc: Benoit Cousson Cc: Paul Walmsley Cc: Kevin Hilman Signed-off-by: Jon Hunter Signed-off-by: Will Deacon --- arch/arm/include/asm/pmu.h | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index 4432305f4a2..40d7dff8bc3 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h @@ -31,18 +31,22 @@ enum arm_pmu_type { * interrupt and passed the address of the low level handler, * and can be used to implement any platform specific handling * before or after calling it. - * @enable_irq: an optional handler which will be called after - * request_irq and be used to handle some platform specific - * irq enablement - * @disable_irq: an optional handler which will be called before - * free_irq and be used to handle some platform specific - * irq disablement + * @runtime_resume: an optional handler which will be called by the + * runtime PM framework following a call to pm_runtime_get(). + * Note that if pm_runtime_get() is called more than once in + * succession this handler will only be called once. + * @runtime_suspend: an optional handler which will be called by the + * runtime PM framework following a call to pm_runtime_put(). + * Note that if pm_runtime_get() is called more than once in + * succession this handler will only be called following the + * final call to pm_runtime_put() that actually disables the + * hardware. */ struct arm_pmu_platdata { irqreturn_t (*handle_irq)(int irq, void *dev, irq_handler_t pmu_handler); - void (*enable_irq)(int irq); - void (*disable_irq)(int irq); + int (*runtime_resume)(struct device *dev); + int (*runtime_suspend)(struct device *dev); }; #ifdef CONFIG_CPU_HAS_PMU -- cgit v1.2.3 From f0d1bc47953743aef9d2ed5326bc5973a3db08ab Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Sat, 28 Jul 2012 16:27:03 +0100 Subject: ARM: pmu: remove unused reservation mechanism The PMU reservation mechanism was originally intended to allow OProfile and perf-events to co-ordinate over access to the CPU PMU. Since then, OProfile for ARM has moved to using perf as its backend, so the reservation code is no longer used. This patch removes the reservation code for the CPU PMU on ARM. Signed-off-by: Will Deacon --- arch/arm/include/asm/pmu.h | 34 ---------------------------------- 1 file changed, 34 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index 40d7dff8bc3..05e0401d769 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h @@ -49,40 +49,6 @@ struct arm_pmu_platdata { int (*runtime_suspend)(struct device *dev); }; -#ifdef CONFIG_CPU_HAS_PMU - -/** - * reserve_pmu() - reserve the hardware performance counters - * - * Reserve the hardware performance counters in the system for exclusive use. - * Returns 0 on success or -EBUSY if the lock is already held. - */ -extern int -reserve_pmu(enum arm_pmu_type type); - -/** - * release_pmu() - Relinquish control of the performance counters - * - * Release the performance counters and allow someone else to use them. - */ -extern void -release_pmu(enum arm_pmu_type type); - -#else /* CONFIG_CPU_HAS_PMU */ - -#include - -static inline int -reserve_pmu(enum arm_pmu_type type) -{ - return -ENODEV; -} - -static inline void -release_pmu(enum arm_pmu_type type) { } - -#endif /* CONFIG_CPU_HAS_PMU */ - #ifdef CONFIG_HW_PERF_EVENTS /* The events for a given PMU register set. */ -- cgit v1.2.3 From df3d17e068bf69e3c5a53d52d30caad3d061b762 Mon Sep 17 00:00:00 2001 From: Sudeep KarkadaNagesha Date: Thu, 19 Jul 2012 09:50:21 +0100 Subject: ARM: pmu: remove arm_pmu_type enumeration The arm_pmu_type enumeration was initially introduced to identify different PMU types in the system, the usual one being that on the CPU (ARM_PMU_DEVICE_CPU). With the removal of the PMU reservation code and the introduction of devicetree bindings for the CPU PMU, the enumeration is no longer required. This patch removes the enumeration and updates the various CPU PMU platform devices so that they no longer pass an .id field referring to identify the PMU type. Cc: Haojian Zhuang Cc: Olof Johansson Cc: Pawel Moll Acked-by: Jon Hunter Acked-by: Kukjin Kim Acked-by: Linus Walleij Acked-by: Jiandong Zheng Signed-off-by: Sudeep KarkadaNagesha [will: cosmetic edits and actual removal of the enum type] Signed-off-by: Will Deacon --- arch/arm/include/asm/pmu.h | 10 ---------- 1 file changed, 10 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index 05e0401d769..fbec73a0ee7 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h @@ -15,15 +15,6 @@ #include #include -/* - * Types of PMUs that can be accessed directly and require mutual - * exclusion between profiling tools. - */ -enum arm_pmu_type { - ARM_PMU_DEVICE_CPU = 0, - ARM_NUM_PMU_DEVICES, -}; - /* * struct arm_pmu_platdata - ARM PMU platform data * @@ -73,7 +64,6 @@ struct pmu_hw_events { struct arm_pmu { struct pmu pmu; - enum arm_pmu_type type; cpumask_t active_irqs; char *name; irqreturn_t (*handle_irq)(int irq_num, void *dev); -- cgit v1.2.3 From 6dbc00297095122ea89e016ce6affad0b7c0ddac Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Sun, 29 Jul 2012 12:36:28 +0100 Subject: ARM: perf: prepare for moving CPU PMU code into separate file The CPU PMU code is tightly coupled with generic ARM PMU handling code. This makes it cumbersome when trying to add support for other ARM PMUs (e.g. interconnect, L2 cache controller, bus) as the generic parts of the code are not readily reusable. This patch cleans up perf_event.c so that reusable code is exposed via header files to other potential PMU drivers. The CPU code is consistently named to identify it as such and also to prepare for moving it into a separate file. Signed-off-by: Will Deacon --- arch/arm/include/asm/perf_event.h | 9 ++++++++- arch/arm/include/asm/pmu.h | 11 ++++++++++- 2 files changed, 18 insertions(+), 2 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h index e074948d814..625cd621a43 100644 --- a/arch/arm/include/asm/perf_event.h +++ b/arch/arm/include/asm/perf_event.h @@ -12,6 +12,13 @@ #ifndef __ARM_PERF_EVENT_H__ #define __ARM_PERF_EVENT_H__ -/* Nothing to see here... */ +/* + * The ARMv7 CPU PMU supports up to 32 event counters. + */ +#define ARMPMU_MAX_HWEVENTS 32 + +#define HW_OP_UNSUPPORTED 0xFFFF +#define C(_x) PERF_COUNT_HW_CACHE_##_x +#define CACHE_OP_UNSUPPORTED 0xFFFF #endif /* __ARM_PERF_EVENT_H__ */ diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index fbec73a0ee7..a993ad67604 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h @@ -89,7 +89,9 @@ struct arm_pmu { #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) -int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type); +extern const struct dev_pm_ops armpmu_dev_pm_ops; + +int armpmu_register(struct arm_pmu *armpmu, char *name, int type); u64 armpmu_event_update(struct perf_event *event, struct hw_perf_event *hwc, @@ -99,6 +101,13 @@ int armpmu_event_set_period(struct perf_event *event, struct hw_perf_event *hwc, int idx); +int armpmu_map_event(struct perf_event *event, + const unsigned (*event_map)[PERF_COUNT_HW_MAX], + const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX], + u32 raw_event_mask); + #endif /* CONFIG_HW_PERF_EVENTS */ #endif /* __ARM_PMU_H__ */ -- cgit v1.2.3 From 051f1b13144dd8553d5a5104dde94c7263ae3ba7 Mon Sep 17 00:00:00 2001 From: Sudeep KarkadaNagesha Date: Tue, 31 Jul 2012 10:34:25 +0100 Subject: ARM: perf: move irq registration into pmu implementation This patch moves the CPU-specific IRQ registration and parsing code into the CPU PMU backend. This is required because a PMU may have more than one interrupt, which in turn can be either PPI (per-cpu) or SPI (requiring strict affinity setting at the interrupt distributor). Signed-off-by: Sudeep KarkadaNagesha [will: cosmetic edits and reworked interrupt dispatching] Signed-off-by: Will Deacon --- arch/arm/include/asm/pmu.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index a993ad67604..a26170dce02 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h @@ -78,6 +78,8 @@ struct arm_pmu { void (*start)(void); void (*stop)(void); void (*reset)(void *); + int (*request_irq)(irq_handler_t handler); + void (*free_irq)(void); int (*map_event)(struct perf_event *event); int num_events; atomic_t active_events; -- cgit v1.2.3 From df547e08e800275a431e560a7f0a6b6f24ab2904 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 24 Aug 2012 15:23:06 +0100 Subject: ARM: 7503/1: mm: only flush both pmd entries for classic MMU LPAE does not use two pmd entries for a pte, so the additional tlb flushing is not required. Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/tlb.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index 314d4664eae..99a19512ee2 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -199,6 +199,9 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, { pgtable_page_dtor(pte); +#ifdef CONFIG_ARM_LPAE + tlb_add_flush(tlb, addr); +#else /* * With the classic ARM MMU, a pte page has two corresponding pmd * entries, each covering 1MB. @@ -206,6 +209,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, addr &= PMD_MASK; tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE); tlb_add_flush(tlb, addr + SZ_1M); +#endif tlb_remove_page(tlb, pte); } -- cgit v1.2.3 From 4a8052d8445a24771905d71d13f917146f9e822c Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Wed, 15 Aug 2012 16:28:35 +0100 Subject: ARM: 7491/1: use generic version of identical asm headers Inspired by the AArgh64 claim that it should be separate from ARM and one reason was being able to use more asm-generic headers. Doing a diff of arch/arm/include/asm and include/asm-generic there are numerous asm headers which are functionally identical to their asm-generic counterparts. Delete the ARM version and use the generic ones. Signed-off-by: Rob Herring Reviewed-by: Nicolas Pitre Tested-by: Thomas Petazzoni Reviewed-by: Arnd Bergmann Signed-off-by: Russell King --- arch/arm/include/asm/Kbuild | 15 +++ arch/arm/include/asm/current.h | 15 --- arch/arm/include/asm/exec.h | 6 -- arch/arm/include/asm/ipcbuf.h | 1 - arch/arm/include/asm/msgbuf.h | 31 ------- arch/arm/include/asm/param.h | 31 ------- arch/arm/include/asm/parport.h | 18 ---- arch/arm/include/asm/segment.h | 11 --- arch/arm/include/asm/sembuf.h | 25 ----- arch/arm/include/asm/serial.h | 19 ---- arch/arm/include/asm/shmbuf.h | 42 --------- arch/arm/include/asm/socket.h | 72 --------------- arch/arm/include/asm/sockios.h | 13 --- arch/arm/include/asm/termbits.h | 198 ---------------------------------------- arch/arm/include/asm/timex.h | 6 +- arch/arm/include/asm/types.h | 16 ---- 16 files changed, 17 insertions(+), 502 deletions(-) delete mode 100644 arch/arm/include/asm/current.h delete mode 100644 arch/arm/include/asm/exec.h delete mode 100644 arch/arm/include/asm/ipcbuf.h delete mode 100644 arch/arm/include/asm/msgbuf.h delete mode 100644 arch/arm/include/asm/param.h delete mode 100644 arch/arm/include/asm/parport.h delete mode 100644 arch/arm/include/asm/segment.h delete mode 100644 arch/arm/include/asm/sembuf.h delete mode 100644 arch/arm/include/asm/serial.h delete mode 100644 arch/arm/include/asm/shmbuf.h delete mode 100644 arch/arm/include/asm/socket.h delete mode 100644 arch/arm/include/asm/sockios.h delete mode 100644 arch/arm/include/asm/termbits.h delete mode 100644 arch/arm/include/asm/types.h (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index 960abceb8e1..6cc8a13a750 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild @@ -5,16 +5,31 @@ header-y += hwcap.h generic-y += auxvec.h generic-y += bitsperlong.h generic-y += cputime.h +generic-y += current.h generic-y += emergency-restart.h generic-y += errno.h +generic-y += exec.h generic-y += ioctl.h +generic-y += ipcbuf.h generic-y += irq_regs.h generic-y += kdebug.h generic-y += local.h generic-y += local64.h +generic-y += msgbuf.h +generic-y += param.h +generic-y += parport.h generic-y += percpu.h generic-y += poll.h generic-y += resource.h generic-y += sections.h +generic-y += segment.h +generic-y += sembuf.h +generic-y += serial.h +generic-y += shmbuf.h generic-y += siginfo.h generic-y += sizes.h +generic-y += socket.h +generic-y += sockios.h +generic-y += termbits.h +generic-y += timex.h +generic-y += types.h diff --git a/arch/arm/include/asm/current.h b/arch/arm/include/asm/current.h deleted file mode 100644 index 75d21e2a3ff..00000000000 --- a/arch/arm/include/asm/current.h +++ /dev/null @@ -1,15 +0,0 @@ -#ifndef _ASMARM_CURRENT_H -#define _ASMARM_CURRENT_H - -#include - -static inline struct task_struct *get_current(void) __attribute_const__; - -static inline struct task_struct *get_current(void) -{ - return current_thread_info()->task; -} - -#define current (get_current()) - -#endif /* _ASMARM_CURRENT_H */ diff --git a/arch/arm/include/asm/exec.h b/arch/arm/include/asm/exec.h deleted file mode 100644 index 7c4fbef72b3..00000000000 --- a/arch/arm/include/asm/exec.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_ARM_EXEC_H -#define __ASM_ARM_EXEC_H - -#define arch_align_stack(x) (x) - -#endif /* __ASM_ARM_EXEC_H */ diff --git a/arch/arm/include/asm/ipcbuf.h b/arch/arm/include/asm/ipcbuf.h deleted file mode 100644 index 84c7e51cb6d..00000000000 --- a/arch/arm/include/asm/ipcbuf.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/arm/include/asm/msgbuf.h b/arch/arm/include/asm/msgbuf.h deleted file mode 100644 index 33b35b946ea..00000000000 --- a/arch/arm/include/asm/msgbuf.h +++ /dev/null @@ -1,31 +0,0 @@ -#ifndef _ASMARM_MSGBUF_H -#define _ASMARM_MSGBUF_H - -/* - * The msqid64_ds structure for arm architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * Pad space is left for: - * - 64-bit time_t to solve y2038 problem - * - 2 miscellaneous 32-bit values - */ - -struct msqid64_ds { - struct ipc64_perm msg_perm; - __kernel_time_t msg_stime; /* last msgsnd time */ - unsigned long __unused1; - __kernel_time_t msg_rtime; /* last msgrcv time */ - unsigned long __unused2; - __kernel_time_t msg_ctime; /* last change time */ - unsigned long __unused3; - unsigned long msg_cbytes; /* current number of bytes on queue */ - unsigned long msg_qnum; /* number of messages in queue */ - unsigned long msg_qbytes; /* max number of bytes on queue */ - __kernel_pid_t msg_lspid; /* pid of last msgsnd */ - __kernel_pid_t msg_lrpid; /* last receive pid */ - unsigned long __unused4; - unsigned long __unused5; -}; - -#endif /* _ASMARM_MSGBUF_H */ diff --git a/arch/arm/include/asm/param.h b/arch/arm/include/asm/param.h deleted file mode 100644 index 8b24bf94c06..00000000000 --- a/arch/arm/include/asm/param.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - * arch/arm/include/asm/param.h - * - * Copyright (C) 1995-1999 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef __ASM_PARAM_H -#define __ASM_PARAM_H - -#ifdef __KERNEL__ -# define HZ CONFIG_HZ /* Internal kernel timer frequency */ -# define USER_HZ 100 /* User interfaces are in "ticks" */ -# define CLOCKS_PER_SEC (USER_HZ) /* like times() */ -#else -# define HZ 100 -#endif - -#define EXEC_PAGESIZE 4096 - -#ifndef NOGROUP -#define NOGROUP (-1) -#endif - -/* max length of hostname */ -#define MAXHOSTNAMELEN 64 - -#endif - diff --git a/arch/arm/include/asm/parport.h b/arch/arm/include/asm/parport.h deleted file mode 100644 index 26e94b09035..00000000000 --- a/arch/arm/include/asm/parport.h +++ /dev/null @@ -1,18 +0,0 @@ -/* - * arch/arm/include/asm/parport.h: ARM-specific parport initialisation - * - * Copyright (C) 1999, 2000 Tim Waugh - * - * This file should only be included by drivers/parport/parport_pc.c. - */ - -#ifndef __ASMARM_PARPORT_H -#define __ASMARM_PARPORT_H - -static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma); -static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma) -{ - return parport_pc_find_isa_ports (autoirq, autodma); -} - -#endif /* !(_ASMARM_PARPORT_H) */ diff --git a/arch/arm/include/asm/segment.h b/arch/arm/include/asm/segment.h deleted file mode 100644 index 9e24c21f630..00000000000 --- a/arch/arm/include/asm/segment.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef __ASM_ARM_SEGMENT_H -#define __ASM_ARM_SEGMENT_H - -#define __KERNEL_CS 0x0 -#define __KERNEL_DS 0x0 - -#define __USER_CS 0x1 -#define __USER_DS 0x1 - -#endif /* __ASM_ARM_SEGMENT_H */ - diff --git a/arch/arm/include/asm/sembuf.h b/arch/arm/include/asm/sembuf.h deleted file mode 100644 index 1c028395428..00000000000 --- a/arch/arm/include/asm/sembuf.h +++ /dev/null @@ -1,25 +0,0 @@ -#ifndef _ASMARM_SEMBUF_H -#define _ASMARM_SEMBUF_H - -/* - * The semid64_ds structure for arm architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * Pad space is left for: - * - 64-bit time_t to solve y2038 problem - * - 2 miscellaneous 32-bit values - */ - -struct semid64_ds { - struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ - __kernel_time_t sem_otime; /* last semop time */ - unsigned long __unused1; - __kernel_time_t sem_ctime; /* last change time */ - unsigned long __unused2; - unsigned long sem_nsems; /* no. of semaphores in array */ - unsigned long __unused3; - unsigned long __unused4; -}; - -#endif /* _ASMARM_SEMBUF_H */ diff --git a/arch/arm/include/asm/serial.h b/arch/arm/include/asm/serial.h deleted file mode 100644 index ebb049091e2..00000000000 --- a/arch/arm/include/asm/serial.h +++ /dev/null @@ -1,19 +0,0 @@ -/* - * arch/arm/include/asm/serial.h - * - * Copyright (C) 1996 Russell King. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Changelog: - * 15-10-1996 RMK Created - */ - -#ifndef __ASM_SERIAL_H -#define __ASM_SERIAL_H - -#define BASE_BAUD (1843200 / 16) - -#endif diff --git a/arch/arm/include/asm/shmbuf.h b/arch/arm/include/asm/shmbuf.h deleted file mode 100644 index 2e5c67ba1c9..00000000000 --- a/arch/arm/include/asm/shmbuf.h +++ /dev/null @@ -1,42 +0,0 @@ -#ifndef _ASMARM_SHMBUF_H -#define _ASMARM_SHMBUF_H - -/* - * The shmid64_ds structure for arm architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * Pad space is left for: - * - 64-bit time_t to solve y2038 problem - * - 2 miscellaneous 32-bit values - */ - -struct shmid64_ds { - struct ipc64_perm shm_perm; /* operation perms */ - size_t shm_segsz; /* size of segment (bytes) */ - __kernel_time_t shm_atime; /* last attach time */ - unsigned long __unused1; - __kernel_time_t shm_dtime; /* last detach time */ - unsigned long __unused2; - __kernel_time_t shm_ctime; /* last change time */ - unsigned long __unused3; - __kernel_pid_t shm_cpid; /* pid of creator */ - __kernel_pid_t shm_lpid; /* pid of last operator */ - unsigned long shm_nattch; /* no. of current attaches */ - unsigned long __unused4; - unsigned long __unused5; -}; - -struct shminfo64 { - unsigned long shmmax; - unsigned long shmmin; - unsigned long shmmni; - unsigned long shmseg; - unsigned long shmall; - unsigned long __unused1; - unsigned long __unused2; - unsigned long __unused3; - unsigned long __unused4; -}; - -#endif /* _ASMARM_SHMBUF_H */ diff --git a/arch/arm/include/asm/socket.h b/arch/arm/include/asm/socket.h deleted file mode 100644 index 6433cadb6ed..00000000000 --- a/arch/arm/include/asm/socket.h +++ /dev/null @@ -1,72 +0,0 @@ -#ifndef _ASMARM_SOCKET_H -#define _ASMARM_SOCKET_H - -#include - -/* For setsockopt(2) */ -#define SOL_SOCKET 1 - -#define SO_DEBUG 1 -#define SO_REUSEADDR 2 -#define SO_TYPE 3 -#define SO_ERROR 4 -#define SO_DONTROUTE 5 -#define SO_BROADCAST 6 -#define SO_SNDBUF 7 -#define SO_RCVBUF 8 -#define SO_SNDBUFFORCE 32 -#define SO_RCVBUFFORCE 33 -#define SO_KEEPALIVE 9 -#define SO_OOBINLINE 10 -#define SO_NO_CHECK 11 -#define SO_PRIORITY 12 -#define SO_LINGER 13 -#define SO_BSDCOMPAT 14 -/* To add :#define SO_REUSEPORT 15 */ -#define SO_PASSCRED 16 -#define SO_PEERCRED 17 -#define SO_RCVLOWAT 18 -#define SO_SNDLOWAT 19 -#define SO_RCVTIMEO 20 -#define SO_SNDTIMEO 21 - -/* Security levels - as per NRL IPv6 - don't actually do anything */ -#define SO_SECURITY_AUTHENTICATION 22 -#define SO_SECURITY_ENCRYPTION_TRANSPORT 23 -#define SO_SECURITY_ENCRYPTION_NETWORK 24 - -#define SO_BINDTODEVICE 25 - -/* Socket filtering */ -#define SO_ATTACH_FILTER 26 -#define SO_DETACH_FILTER 27 - -#define SO_PEERNAME 28 -#define SO_TIMESTAMP 29 -#define SCM_TIMESTAMP SO_TIMESTAMP - -#define SO_ACCEPTCONN 30 - -#define SO_PEERSEC 31 -#define SO_PASSSEC 34 -#define SO_TIMESTAMPNS 35 -#define SCM_TIMESTAMPNS SO_TIMESTAMPNS - -#define SO_MARK 36 - -#define SO_TIMESTAMPING 37 -#define SCM_TIMESTAMPING SO_TIMESTAMPING - -#define SO_PROTOCOL 38 -#define SO_DOMAIN 39 - -#define SO_RXQ_OVFL 40 - -#define SO_WIFI_STATUS 41 -#define SCM_WIFI_STATUS SO_WIFI_STATUS -#define SO_PEEK_OFF 42 - -/* Instruct lower device to use last 4-bytes of skb data as FCS */ -#define SO_NOFCS 43 - -#endif /* _ASM_SOCKET_H */ diff --git a/arch/arm/include/asm/sockios.h b/arch/arm/include/asm/sockios.h deleted file mode 100644 index a2588a2512d..00000000000 --- a/arch/arm/include/asm/sockios.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef __ARCH_ARM_SOCKIOS_H -#define __ARCH_ARM_SOCKIOS_H - -/* Socket-level I/O control calls. */ -#define FIOSETOWN 0x8901 -#define SIOCSPGRP 0x8902 -#define FIOGETOWN 0x8903 -#define SIOCGPGRP 0x8904 -#define SIOCATMARK 0x8905 -#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ -#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ - -#endif diff --git a/arch/arm/include/asm/termbits.h b/arch/arm/include/asm/termbits.h deleted file mode 100644 index 704135d28d1..00000000000 --- a/arch/arm/include/asm/termbits.h +++ /dev/null @@ -1,198 +0,0 @@ -#ifndef __ASM_ARM_TERMBITS_H -#define __ASM_ARM_TERMBITS_H - -typedef unsigned char cc_t; -typedef unsigned int speed_t; -typedef unsigned int tcflag_t; - -#define NCCS 19 -struct termios { - tcflag_t c_iflag; /* input mode flags */ - tcflag_t c_oflag; /* output mode flags */ - tcflag_t c_cflag; /* control mode flags */ - tcflag_t c_lflag; /* local mode flags */ - cc_t c_line; /* line discipline */ - cc_t c_cc[NCCS]; /* control characters */ -}; - -struct termios2 { - tcflag_t c_iflag; /* input mode flags */ - tcflag_t c_oflag; /* output mode flags */ - tcflag_t c_cflag; /* control mode flags */ - tcflag_t c_lflag; /* local mode flags */ - cc_t c_line; /* line discipline */ - cc_t c_cc[NCCS]; /* control characters */ - speed_t c_ispeed; /* input speed */ - speed_t c_ospeed; /* output speed */ -}; - -struct ktermios { - tcflag_t c_iflag; /* input mode flags */ - tcflag_t c_oflag; /* output mode flags */ - tcflag_t c_cflag; /* control mode flags */ - tcflag_t c_lflag; /* local mode flags */ - cc_t c_line; /* line discipline */ - cc_t c_cc[NCCS]; /* control characters */ - speed_t c_ispeed; /* input speed */ - speed_t c_ospeed; /* output speed */ -}; - - -/* c_cc characters */ -#define VINTR 0 -#define VQUIT 1 -#define VERASE 2 -#define VKILL 3 -#define VEOF 4 -#define VTIME 5 -#define VMIN 6 -#define VSWTC 7 -#define VSTART 8 -#define VSTOP 9 -#define VSUSP 10 -#define VEOL 11 -#define VREPRINT 12 -#define VDISCARD 13 -#define VWERASE 14 -#define VLNEXT 15 -#define VEOL2 16 - -/* c_iflag bits */ -#define IGNBRK 0000001 -#define BRKINT 0000002 -#define IGNPAR 0000004 -#define PARMRK 0000010 -#define INPCK 0000020 -#define ISTRIP 0000040 -#define INLCR 0000100 -#define IGNCR 0000200 -#define ICRNL 0000400 -#define IUCLC 0001000 -#define IXON 0002000 -#define IXANY 0004000 -#define IXOFF 0010000 -#define IMAXBEL 0020000 -#define IUTF8 0040000 - -/* c_oflag bits */ -#define OPOST 0000001 -#define OLCUC 0000002 -#define ONLCR 0000004 -#define OCRNL 0000010 -#define ONOCR 0000020 -#define ONLRET 0000040 -#define OFILL 0000100 -#define OFDEL 0000200 -#define NLDLY 0000400 -#define NL0 0000000 -#define NL1 0000400 -#define CRDLY 0003000 -#define CR0 0000000 -#define CR1 0001000 -#define CR2 0002000 -#define CR3 0003000 -#define TABDLY 0014000 -#define TAB0 0000000 -#define TAB1 0004000 -#define TAB2 0010000 -#define TAB3 0014000 -#define XTABS 0014000 -#define BSDLY 0020000 -#define BS0 0000000 -#define BS1 0020000 -#define VTDLY 0040000 -#define VT0 0000000 -#define VT1 0040000 -#define FFDLY 0100000 -#define FF0 0000000 -#define FF1 0100000 - -/* c_cflag bit meaning */ -#define CBAUD 0010017 -#define B0 0000000 /* hang up */ -#define B50 0000001 -#define B75 0000002 -#define B110 0000003 -#define B134 0000004 -#define B150 0000005 -#define B200 0000006 -#define B300 0000007 -#define B600 0000010 -#define B1200 0000011 -#define B1800 0000012 -#define B2400 0000013 -#define B4800 0000014 -#define B9600 0000015 -#define B19200 0000016 -#define B38400 0000017 -#define EXTA B19200 -#define EXTB B38400 -#define CSIZE 0000060 -#define CS5 0000000 -#define CS6 0000020 -#define CS7 0000040 -#define CS8 0000060 -#define CSTOPB 0000100 -#define CREAD 0000200 -#define PARENB 0000400 -#define PARODD 0001000 -#define HUPCL 0002000 -#define CLOCAL 0004000 -#define CBAUDEX 0010000 -#define BOTHER 0010000 -#define B57600 0010001 -#define B115200 0010002 -#define B230400 0010003 -#define B460800 0010004 -#define B500000 0010005 -#define B576000 0010006 -#define B921600 0010007 -#define B1000000 0010010 -#define B1152000 0010011 -#define B1500000 0010012 -#define B2000000 0010013 -#define B2500000 0010014 -#define B3000000 0010015 -#define B3500000 0010016 -#define B4000000 0010017 -#define CIBAUD 002003600000 /* input baud rate */ -#define CMSPAR 010000000000 /* mark or space (stick) parity */ -#define CRTSCTS 020000000000 /* flow control */ - -#define IBSHIFT 16 - -/* c_lflag bits */ -#define ISIG 0000001 -#define ICANON 0000002 -#define XCASE 0000004 -#define ECHO 0000010 -#define ECHOE 0000020 -#define ECHOK 0000040 -#define ECHONL 0000100 -#define NOFLSH 0000200 -#define TOSTOP 0000400 -#define ECHOCTL 0001000 -#define ECHOPRT 0002000 -#define ECHOKE 0004000 -#define FLUSHO 0010000 -#define PENDIN 0040000 -#define IEXTEN 0100000 -#define EXTPROC 0200000 - -/* tcflow() and TCXONC use these */ -#define TCOOFF 0 -#define TCOON 1 -#define TCIOFF 2 -#define TCION 3 - -/* tcflush() and TCFLSH use these */ -#define TCIFLUSH 0 -#define TCOFLUSH 1 -#define TCIOFLUSH 2 - -/* tcsetattr uses these */ -#define TCSANOW 0 -#define TCSADRAIN 1 -#define TCSAFLUSH 2 - -#endif /* __ASM_ARM_TERMBITS_H */ diff --git a/arch/arm/include/asm/timex.h b/arch/arm/include/asm/timex.h index ce119442277..5e711722ebf 100644 --- a/arch/arm/include/asm/timex.h +++ b/arch/arm/include/asm/timex.h @@ -15,12 +15,10 @@ #include #include -typedef unsigned long cycles_t; - #ifdef ARCH_HAS_READ_CURRENT_TIMER #define get_cycles() ({ cycles_t c; read_current_timer(&c) ? 0 : c; }) -#else -#define get_cycles() (0) #endif +#include + #endif diff --git a/arch/arm/include/asm/types.h b/arch/arm/include/asm/types.h deleted file mode 100644 index 28beab917ff..00000000000 --- a/arch/arm/include/asm/types.h +++ /dev/null @@ -1,16 +0,0 @@ -#ifndef __ASM_ARM_TYPES_H -#define __ASM_ARM_TYPES_H - -#include - -/* - * These aren't exported outside the kernel to avoid name space clashes - */ -#ifdef __KERNEL__ - -#define BITS_PER_LONG 32 - -#endif /* __KERNEL__ */ - -#endif - -- cgit v1.2.3 From d25c881aa3aeea447d0511e391a06c56783d173c Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Wed, 15 Aug 2012 16:28:37 +0100 Subject: ARM: 7493/1: use generic unaligned.h This moves ARM over to the asm-generic/unaligned.h header. This has the benefit of better code generated especially for ARMv7 on gcc 4.7+ compilers. As Arnd Bergmann, points out: The asm-generic version uses the "struct" version for native-endian unaligned access and the "byteshift" version for the opposite endianess. The current ARM version however uses the "byteshift" implementation for both. Thanks to Nicolas Pitre for the excellent analysis: Test case: int foo (int *x) { return get_unaligned(x); } long long bar (long long *x) { return get_unaligned(x); } With the current ARM version: foo: ldrb r3, [r0, #2] @ zero_extendqisi2 @ MEM[(const u8 *)x_1(D) + 2B], MEM[(const u8 *)x_1(D) + 2B] ldrb r1, [r0, #1] @ zero_extendqisi2 @ MEM[(const u8 *)x_1(D) + 1B], MEM[(const u8 *)x_1(D) + 1B] ldrb r2, [r0, #0] @ zero_extendqisi2 @ MEM[(const u8 *)x_1(D)], MEM[(const u8 *)x_1(D)] mov r3, r3, asl #16 @ tmp154, MEM[(const u8 *)x_1(D) + 2B], ldrb r0, [r0, #3] @ zero_extendqisi2 @ MEM[(const u8 *)x_1(D) + 3B], MEM[(const u8 *)x_1(D) + 3B] orr r3, r3, r1, asl #8 @, tmp155, tmp154, MEM[(const u8 *)x_1(D) + 1B], orr r3, r3, r2 @ tmp157, tmp155, MEM[(const u8 *)x_1(D)] orr r0, r3, r0, asl #24 @,, tmp157, MEM[(const u8 *)x_1(D) + 3B], bx lr @ bar: stmfd sp!, {r4, r5, r6, r7} @, mov r2, #0 @ tmp184, ldrb r5, [r0, #6] @ zero_extendqisi2 @ MEM[(const u8 *)x_1(D) + 6B], MEM[(const u8 *)x_1(D) + 6B] ldrb r4, [r0, #5] @ zero_extendqisi2 @ MEM[(const u8 *)x_1(D) + 5B], MEM[(const u8 *)x_1(D) + 5B] ldrb ip, [r0, #2] @ zero_extendqisi2 @ MEM[(const u8 *)x_1(D) + 2B], MEM[(const u8 *)x_1(D) + 2B] ldrb r1, [r0, #4] @ zero_extendqisi2 @ MEM[(const u8 *)x_1(D) + 4B], MEM[(const u8 *)x_1(D) + 4B] mov r5, r5, asl #16 @ tmp175, MEM[(const u8 *)x_1(D) + 6B], ldrb r7, [r0, #1] @ zero_extendqisi2 @ MEM[(const u8 *)x_1(D) + 1B], MEM[(const u8 *)x_1(D) + 1B] orr r5, r5, r4, asl #8 @, tmp176, tmp175, MEM[(const u8 *)x_1(D) + 5B], ldrb r6, [r0, #7] @ zero_extendqisi2 @ MEM[(const u8 *)x_1(D) + 7B], MEM[(const u8 *)x_1(D) + 7B] orr r5, r5, r1 @ tmp178, tmp176, MEM[(const u8 *)x_1(D) + 4B] ldrb r4, [r0, #0] @ zero_extendqisi2 @ MEM[(const u8 *)x_1(D)], MEM[(const u8 *)x_1(D)] mov ip, ip, asl #16 @ tmp188, MEM[(const u8 *)x_1(D) + 2B], ldrb r1, [r0, #3] @ zero_extendqisi2 @ MEM[(const u8 *)x_1(D) + 3B], MEM[(const u8 *)x_1(D) + 3B] orr ip, ip, r7, asl #8 @, tmp189, tmp188, MEM[(const u8 *)x_1(D) + 1B], orr r3, r5, r6, asl #24 @,, tmp178, MEM[(const u8 *)x_1(D) + 7B], orr ip, ip, r4 @ tmp191, tmp189, MEM[(const u8 *)x_1(D)] orr ip, ip, r1, asl #24 @, tmp194, tmp191, MEM[(const u8 *)x_1(D) + 3B], mov r1, r3 @, orr r0, r2, ip @ tmp171, tmp184, tmp194 ldmfd sp!, {r4, r5, r6, r7} bx lr In both cases the code is slightly suboptimal. One may wonder why wasting r2 with the constant 0 in the second case for example. And all the mov's could be folded in subsequent orr's, etc. Now with the asm-generic version: foo: ldr r0, [r0, #0] @ unaligned @,* x bx lr @ bar: mov r3, r0 @ x, x ldr r0, [r0, #0] @ unaligned @,* x ldr r1, [r3, #4] @ unaligned @, bx lr @ This is way better of course, but only because this was compiled for ARMv7. In this case the compiler knows that the hardware can do unaligned word access. This isn't that obvious for foo(), but if we remove the get_unaligned() from bar as follows: long long bar (long long *x) {return *x; } then the resulting code is: bar: ldmia r0, {r0, r1} @ x,, bx lr @ So this proves that the presumed aligned vs unaligned cases does have influence on the instructions the compiler may use and that the above unaligned code results are not just an accident. Still... this isn't fully conclusive without at least looking at the resulting assembly fron a pre ARMv6 compilation. Let's see with an ARMv5 target: foo: ldrb r3, [r0, #0] @ zero_extendqisi2 @ tmp139,* x ldrb r1, [r0, #1] @ zero_extendqisi2 @ tmp140, ldrb r2, [r0, #2] @ zero_extendqisi2 @ tmp143, ldrb r0, [r0, #3] @ zero_extendqisi2 @ tmp146, orr r3, r3, r1, asl #8 @, tmp142, tmp139, tmp140, orr r3, r3, r2, asl #16 @, tmp145, tmp142, tmp143, orr r0, r3, r0, asl #24 @,, tmp145, tmp146, bx lr @ bar: stmfd sp!, {r4, r5, r6, r7} @, ldrb r2, [r0, #0] @ zero_extendqisi2 @ tmp139,* x ldrb r7, [r0, #1] @ zero_extendqisi2 @ tmp140, ldrb r3, [r0, #4] @ zero_extendqisi2 @ tmp149, ldrb r6, [r0, #5] @ zero_extendqisi2 @ tmp150, ldrb r5, [r0, #2] @ zero_extendqisi2 @ tmp143, ldrb r4, [r0, #6] @ zero_extendqisi2 @ tmp153, ldrb r1, [r0, #7] @ zero_extendqisi2 @ tmp156, ldrb ip, [r0, #3] @ zero_extendqisi2 @ tmp146, orr r2, r2, r7, asl #8 @, tmp142, tmp139, tmp140, orr r3, r3, r6, asl #8 @, tmp152, tmp149, tmp150, orr r2, r2, r5, asl #16 @, tmp145, tmp142, tmp143, orr r3, r3, r4, asl #16 @, tmp155, tmp152, tmp153, orr r0, r2, ip, asl #24 @,, tmp145, tmp146, orr r1, r3, r1, asl #24 @,, tmp155, tmp156, ldmfd sp!, {r4, r5, r6, r7} bx lr Compared to the initial results, this is really nicely optimized and I couldn't do much better if I were to hand code it myself. Signed-off-by: Rob Herring Reviewed-by: Nicolas Pitre Tested-by: Thomas Petazzoni Reviewed-by: Arnd Bergmann Signed-off-by: Russell King --- arch/arm/include/asm/Kbuild | 1 + arch/arm/include/asm/unaligned.h | 19 ------------------- 2 files changed, 1 insertion(+), 19 deletions(-) delete mode 100644 arch/arm/include/asm/unaligned.h (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index 6cc8a13a750..a86cabf9118 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild @@ -33,3 +33,4 @@ generic-y += sockios.h generic-y += termbits.h generic-y += timex.h generic-y += types.h +generic-y += unaligned.h diff --git a/arch/arm/include/asm/unaligned.h b/arch/arm/include/asm/unaligned.h deleted file mode 100644 index 44593a89490..00000000000 --- a/arch/arm/include/asm/unaligned.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifndef _ASM_ARM_UNALIGNED_H -#define _ASM_ARM_UNALIGNED_H - -#include -#include -#include - -/* - * Select endianness - */ -#ifndef __ARMEB__ -#define get_unaligned __get_unaligned_le -#define put_unaligned __put_unaligned_le -#else -#define get_unaligned __get_unaligned_be -#define put_unaligned __put_unaligned_be -#endif - -#endif /* _ASM_ARM_UNALIGNED_H */ -- cgit v1.2.3 From e780c452cfea10cbfae1c96dc16e506758138b6f Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Wed, 15 Aug 2012 16:28:38 +0100 Subject: ARM: 7494/1: use generic termios.h As pointed out by Arnd Bergmann, this fixes a couple of issues but will increase code size: The original macro user_termio_to_kernel_termios was not endian safe. It used an unsigned short ptr to access the low bits in a 32-bit word. Both user_termio_to_kernel_termios and kernel_termios_to_user_termio are missing error checking on put_user/get_user and copy_to/from_user. Signed-off-by: Rob Herring Reviewed-by: Nicolas Pitre Tested-by: Thomas Petazzoni Reviewed-by: Arnd Bergmann Signed-off-by: Russell King --- arch/arm/include/asm/Kbuild | 1 + arch/arm/include/asm/termios.h | 92 ------------------------------------------ 2 files changed, 1 insertion(+), 92 deletions(-) delete mode 100644 arch/arm/include/asm/termios.h (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index a86cabf9118..8a7196ca510 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild @@ -31,6 +31,7 @@ generic-y += sizes.h generic-y += socket.h generic-y += sockios.h generic-y += termbits.h +generic-y += termios.h generic-y += timex.h generic-y += types.h generic-y += unaligned.h diff --git a/arch/arm/include/asm/termios.h b/arch/arm/include/asm/termios.h deleted file mode 100644 index 293e3f1bc3f..00000000000 --- a/arch/arm/include/asm/termios.h +++ /dev/null @@ -1,92 +0,0 @@ -#ifndef __ASM_ARM_TERMIOS_H -#define __ASM_ARM_TERMIOS_H - -#include -#include - -struct winsize { - unsigned short ws_row; - unsigned short ws_col; - unsigned short ws_xpixel; - unsigned short ws_ypixel; -}; - -#define NCC 8 -struct termio { - unsigned short c_iflag; /* input mode flags */ - unsigned short c_oflag; /* output mode flags */ - unsigned short c_cflag; /* control mode flags */ - unsigned short c_lflag; /* local mode flags */ - unsigned char c_line; /* line discipline */ - unsigned char c_cc[NCC]; /* control characters */ -}; - -#ifdef __KERNEL__ -/* intr=^C quit=^| erase=del kill=^U - eof=^D vtime=\0 vmin=\1 sxtc=\0 - start=^Q stop=^S susp=^Z eol=\0 - reprint=^R discard=^U werase=^W lnext=^V - eol2=\0 -*/ -#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0" -#endif - -/* modem lines */ -#define TIOCM_LE 0x001 -#define TIOCM_DTR 0x002 -#define TIOCM_RTS 0x004 -#define TIOCM_ST 0x008 -#define TIOCM_SR 0x010 -#define TIOCM_CTS 0x020 -#define TIOCM_CAR 0x040 -#define TIOCM_RNG 0x080 -#define TIOCM_DSR 0x100 -#define TIOCM_CD TIOCM_CAR -#define TIOCM_RI TIOCM_RNG -#define TIOCM_OUT1 0x2000 -#define TIOCM_OUT2 0x4000 -#define TIOCM_LOOP 0x8000 - -/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ - -#ifdef __KERNEL__ - -/* - * Translate a "termio" structure into a "termios". Ugh. - */ -#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \ - unsigned short __tmp; \ - get_user(__tmp,&(termio)->x); \ - *(unsigned short *) &(termios)->x = __tmp; \ -} - -#define user_termio_to_kernel_termios(termios, termio) \ -({ \ - SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \ - SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \ - SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \ - SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \ - copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \ -}) - -/* - * Translate a "termios" structure into a "termio". Ugh. - */ -#define kernel_termios_to_user_termio(termio, termios) \ -({ \ - put_user((termios)->c_iflag, &(termio)->c_iflag); \ - put_user((termios)->c_oflag, &(termio)->c_oflag); \ - put_user((termios)->c_cflag, &(termio)->c_cflag); \ - put_user((termios)->c_lflag, &(termio)->c_lflag); \ - put_user((termios)->c_line, &(termio)->c_line); \ - copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ -}) - -#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2)) -#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2)) -#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios)) -#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios)) - -#endif /* __KERNEL__ */ - -#endif /* __ASM_ARM_TERMIOS_H */ -- cgit v1.2.3 From 08928e7aea930e6822ce8f1b20068bf857ecf20d Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 16 Aug 2012 18:43:04 +0100 Subject: ARM: 7495/1: mutex: use generic atomic_dec-based implementation for ARMv6+ Commit a76d7bd96d65 ("ARM: 7467/1: mutex: use generic xchg-based implementation for ARMv6+") removed the barrier-less, ARM-specific mutex implementation in favour of the generic xchg-based code. Since then, a bug was uncovered in the xchg code when running on SMP platforms, due to interactions between the locking paths and the MUTEX_SPIN_ON_OWNER code. This was fixed in 0bce9c46bf3b ("mutex: place lock in contended state after fastpath_lock failure"), however, the atomic_dec-based mutex algorithm is now marginally more efficient for ARM (~0.5% improvement in hackbench scores on dual A15). This patch moves ARMv6+ platforms to the atomic_dec-based mutex code. Acked-by: Nicolas Pitre Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/mutex.h | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/mutex.h b/arch/arm/include/asm/mutex.h index b1479fd04a9..87c044910fe 100644 --- a/arch/arm/include/asm/mutex.h +++ b/arch/arm/include/asm/mutex.h @@ -9,8 +9,13 @@ #define _ASM_MUTEX_H /* * On pre-ARMv6 hardware this results in a swp-based implementation, - * which is the most efficient. For ARMv6+, we emit a pair of exclusive - * accesses instead. + * which is the most efficient. For ARMv6+, we have exclusive memory + * accessors and use atomic_dec to avoid the extra xchg operations + * on the locking slowpaths. */ +#if __LINUX_ARM_ARCH__ < 6 #include +#else +#include #endif +#endif /* _ASM_MUTEX_H */ -- cgit v1.2.3 From 195bbcac2e5c12f7fb99cdcc492c3000c5537f4a Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 24 Aug 2012 15:18:45 +0100 Subject: ARM: 7500/1: io: avoid writeback addressing modes for __raw_ accessors Data aborts taken to hyp mode do not provide a valid instruction syndrome field in the HSR if the faulting instruction is a memory access using a writeback addressing mode. For hypervisors emulating MMIO accesses to virtual peripherals, taking such an exception requires disassembling the faulting instruction in order to determine the behaviour of the access. Since this requires manually walking the two stages of translation, the world must be stopped to prevent races against page aging in the guest, where the first-stage translation is invalidated after the hypervisor has translated to an IPA and the physical page is reused for something else. This patch avoids taking this heavy performance penalty when running Linux as a guest by ensuring that our I/O accessors do not make use of writeback addressing modes. Cc: Marc Zyngier Reviewed-by: Arnd Bergmann Reviewed-by: Nicolas Pitre Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/io.h | 67 ++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 61 insertions(+), 6 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 815c669fec0..09c4628efbe 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -47,13 +47,68 @@ extern void __raw_readsb(const void __iomem *addr, void *data, int bytelen); extern void __raw_readsw(const void __iomem *addr, void *data, int wordlen); extern void __raw_readsl(const void __iomem *addr, void *data, int longlen); -#define __raw_writeb(v,a) ((void)(__chk_io_ptr(a), *(volatile unsigned char __force *)(a) = (v))) -#define __raw_writew(v,a) ((void)(__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v))) -#define __raw_writel(v,a) ((void)(__chk_io_ptr(a), *(volatile unsigned int __force *)(a) = (v))) +#if __LINUX_ARM_ARCH__ < 6 +/* + * Half-word accesses are problematic with RiscPC due to limitations of + * the bus. Rather than special-case the machine, just let the compiler + * generate the access for CPUs prior to ARMv6. + */ +#define __raw_readw(a) (__chk_io_ptr(a), *(volatile unsigned short __force *)(a)) +#define __raw_writew(v,a) ((void)(__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v))) +#else +/* + * When running under a hypervisor, we want to avoid I/O accesses with + * writeback addressing modes as these incur a significant performance + * overhead (the address generation must be emulated in software). + */ +static inline void __raw_writew(u16 val, volatile void __iomem *addr) +{ + asm volatile("strh %1, %0" + : "+Qo" (*(volatile u16 __force *)addr) + : "r" (val)); +} + +static inline u16 __raw_readw(const volatile void __iomem *addr) +{ + u16 val; + asm volatile("ldrh %1, %0" + : "+Qo" (*(volatile u16 __force *)addr), + "=r" (val)); + return val; +} +#endif -#define __raw_readb(a) (__chk_io_ptr(a), *(volatile unsigned char __force *)(a)) -#define __raw_readw(a) (__chk_io_ptr(a), *(volatile unsigned short __force *)(a)) -#define __raw_readl(a) (__chk_io_ptr(a), *(volatile unsigned int __force *)(a)) +static inline void __raw_writeb(u8 val, volatile void __iomem *addr) +{ + asm volatile("strb %1, %0" + : "+Qo" (*(volatile u8 __force *)addr) + : "r" (val)); +} + +static inline void __raw_writel(u32 val, volatile void __iomem *addr) +{ + asm volatile("str %1, %0" + : "+Qo" (*(volatile u32 __force *)addr) + : "r" (val)); +} + +static inline u8 __raw_readb(const volatile void __iomem *addr) +{ + u8 val; + asm volatile("ldrb %1, %0" + : "+Qo" (*(volatile u8 __force *)addr), + "=r" (val)); + return val; +} + +static inline u32 __raw_readl(const volatile void __iomem *addr) +{ + u32 val; + asm volatile("ldr %1, %0" + : "+Qo" (*(volatile u32 __force *)addr), + "=r" (val)); + return val; +} /* * Architecture ioremap implementation. -- cgit v1.2.3 From 6e5267aa543817015edb4a65c66e15f9809f92bd Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Mon, 20 Aug 2012 11:19:25 +0200 Subject: ARM: DMA-Mapping: add function for setting coherent pool size from platform code Some platforms might require to increase atomic coherent pool to make sure that their device will be able to allocate all their buffers from atomic context. This function can be also used to decrease atomic coherent pool size if coherent allocations are not used for the given sub-platform. Suggested-by: Josh Coombs Signed-off-by: Marek Szyprowski --- arch/arm/include/asm/dma-mapping.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 2ae842df455..5c44dcb0987 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -202,6 +202,13 @@ static inline void dma_free_writecombine(struct device *dev, size_t size, return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs); } +/* + * This can be called during early boot to increase the size of the atomic + * coherent DMA pool above the default value of 256KiB. It must be called + * before postcore_initcall. + */ +extern void __init init_dma_coherent_pool_size(unsigned long size); + /* * This can be called during boot to increase the size of the consistent * DMA region above it's default value of 2MB. It must be called before the -- cgit v1.2.3 From 9fc31ddc70ac594b5a1d8a83303b65008221088c Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 29 Aug 2012 11:16:59 +0100 Subject: ARM: Don't unconditionally bloat thread_info There is no point reserving space at the bottom of the kernel stack for per-thread crunch state, and per-thread VFP state if these are not being supported by the kernel being built. Remove these members from the thread union when these features are disabled. Reported-by: Tim Bird Signed-off-by: Russell King --- arch/arm/include/asm/thread_info.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index b79f8e97f77..7b0a8f5f66f 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -59,7 +59,9 @@ struct thread_info { __u32 syscall; /* syscall number */ __u8 used_cp[16]; /* thread used copro */ unsigned long tp_value; +#ifdef CONFIG_CRUNCH struct crunch_state crunchstate; +#endif union fp_state fpstate __attribute__((aligned(8))); union vfp_state vfpstate; #ifdef CONFIG_ARM_THUMBEE -- cgit v1.2.3 From a67e1ce145785d884b29b17e4d82a6ecd67bb97a Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 2 Sep 2012 09:17:09 +0100 Subject: ARM: Fix bad merge bd1274dc005 (Merge branch 'v6v7' into devel) Commit 774c096bf9e49 (ARM: v6/v7 cache: allow cache calls to be optimized) got dropped when the merge conflicts for moving the contents of the files in commit 753790e713d (ARM: move cache/processor/fault glue to separate include files) was fixed up in merge bd1274dc005 (Merge branch 'v6v7' into devel). This puts the change back. Signed-off-by: Russell King --- arch/arm/include/asm/glue-cache.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h index 7e30874377e..4f8d2c0dc44 100644 --- a/arch/arm/include/asm/glue-cache.h +++ b/arch/arm/include/asm/glue-cache.h @@ -110,19 +110,19 @@ #endif #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) -//# ifdef _CACHE +# ifdef _CACHE # define MULTI_CACHE 1 -//# else -//# define _CACHE v6 -//# endif +# else +# define _CACHE v6 +# endif #endif #if defined(CONFIG_CPU_V7) -//# ifdef _CACHE +# ifdef _CACHE # define MULTI_CACHE 1 -//# else -//# define _CACHE v7 -//# endif +# else +# define _CACHE v7 +# endif #endif #if !defined(_CACHE) && !defined(MULTI_CACHE) -- cgit v1.2.3 From b4ad51559c2f12c34a0340b26ffb02e4b285bc51 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Tue, 4 Sep 2012 20:04:35 +0100 Subject: ARM: 7512/1: Fix XIP build due to PHYS_OFFSET definition moving During the p2v changes, the PHYS_OFFSET #define moved into a !__ASSEMBLY__ section. This causes a XIP build to fail with arch/arm/kernel/head.o: In function 'stext': arch/arm/kernel/head.S:146: undefined reference to 'PHYS_OFFSET' Momentarily leave the #ifndef __ASSEMBLY__ section so we can define PHYS_OFFSET for all compilation units. Signed-off-by: Stephen Boyd Signed-off-by: Russell King --- arch/arm/include/asm/memory.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index e965f1b560f..5f6ddcc5645 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -187,6 +187,7 @@ static inline unsigned long __phys_to_virt(unsigned long x) #define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET) #endif #endif +#endif /* __ASSEMBLY__ */ #ifndef PHYS_OFFSET #ifdef PLAT_PHYS_OFFSET @@ -196,6 +197,8 @@ static inline unsigned long __phys_to_virt(unsigned long x) #endif #endif +#ifndef __ASSEMBLY__ + /* * PFNs are used to describe any physical page; this means * PFN 0 == physical address 0. -- cgit v1.2.3 From 8404663f81d212918ff85f493649a7991209fa04 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 7 Sep 2012 18:22:28 +0100 Subject: ARM: 7527/1: uaccess: explicitly check __user pointer when !CPU_USE_DOMAINS The {get,put}_user macros don't perform range checking on the provided __user address when !CPU_HAS_DOMAINS. This patch reworks the out-of-line assembly accessors to check the user address against a specified limit, returning -EFAULT if is is out of range. [will: changed get_user register allocation to match put_user] [rmk: fixed building on older ARM architectures] Reported-by: Catalin Marinas Signed-off-by: Will Deacon Cc: stable@vger.kernel.org Signed-off-by: Russell King --- arch/arm/include/asm/assembler.h | 8 ++++++++ arch/arm/include/asm/uaccess.h | 40 +++++++++++++++++++++++++++------------- 2 files changed, 35 insertions(+), 13 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 03fb93621d0..5c8b3bf4d82 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -320,4 +320,12 @@ .size \name , . - \name .endm + .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req +#ifndef CONFIG_CPU_USE_DOMAINS + adds \tmp, \addr, #\size - 1 + sbcccs \tmp, \tmp, \limit + bcs \bad +#endif + .endm + #endif /* __ASM_ASSEMBLER_H__ */ diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 479a6352e0b..6f83ad6e4d3 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -101,28 +101,39 @@ extern int __get_user_1(void *); extern int __get_user_2(void *); extern int __get_user_4(void *); -#define __get_user_x(__r2,__p,__e,__s,__i...) \ +#define __GUP_CLOBBER_1 "lr", "cc" +#ifdef CONFIG_CPU_USE_DOMAINS +#define __GUP_CLOBBER_2 "ip", "lr", "cc" +#else +#define __GUP_CLOBBER_2 "lr", "cc" +#endif +#define __GUP_CLOBBER_4 "lr", "cc" + +#define __get_user_x(__r2,__p,__e,__l,__s) \ __asm__ __volatile__ ( \ __asmeq("%0", "r0") __asmeq("%1", "r2") \ + __asmeq("%3", "r1") \ "bl __get_user_" #__s \ : "=&r" (__e), "=r" (__r2) \ - : "0" (__p) \ - : __i, "cc") + : "0" (__p), "r" (__l) \ + : __GUP_CLOBBER_##__s) #define get_user(x,p) \ ({ \ + unsigned long __limit = current_thread_info()->addr_limit - 1; \ register const typeof(*(p)) __user *__p asm("r0") = (p);\ register unsigned long __r2 asm("r2"); \ + register unsigned long __l asm("r1") = __limit; \ register int __e asm("r0"); \ switch (sizeof(*(__p))) { \ case 1: \ - __get_user_x(__r2, __p, __e, 1, "lr"); \ - break; \ + __get_user_x(__r2, __p, __e, __l, 1); \ + break; \ case 2: \ - __get_user_x(__r2, __p, __e, 2, "r3", "lr"); \ + __get_user_x(__r2, __p, __e, __l, 2); \ break; \ case 4: \ - __get_user_x(__r2, __p, __e, 4, "lr"); \ + __get_user_x(__r2, __p, __e, __l, 4); \ break; \ default: __e = __get_user_bad(); break; \ } \ @@ -135,31 +146,34 @@ extern int __put_user_2(void *, unsigned int); extern int __put_user_4(void *, unsigned int); extern int __put_user_8(void *, unsigned long long); -#define __put_user_x(__r2,__p,__e,__s) \ +#define __put_user_x(__r2,__p,__e,__l,__s) \ __asm__ __volatile__ ( \ __asmeq("%0", "r0") __asmeq("%2", "r2") \ + __asmeq("%3", "r1") \ "bl __put_user_" #__s \ : "=&r" (__e) \ - : "0" (__p), "r" (__r2) \ + : "0" (__p), "r" (__r2), "r" (__l) \ : "ip", "lr", "cc") #define put_user(x,p) \ ({ \ + unsigned long __limit = current_thread_info()->addr_limit - 1; \ register const typeof(*(p)) __r2 asm("r2") = (x); \ register const typeof(*(p)) __user *__p asm("r0") = (p);\ + register unsigned long __l asm("r1") = __limit; \ register int __e asm("r0"); \ switch (sizeof(*(__p))) { \ case 1: \ - __put_user_x(__r2, __p, __e, 1); \ + __put_user_x(__r2, __p, __e, __l, 1); \ break; \ case 2: \ - __put_user_x(__r2, __p, __e, 2); \ + __put_user_x(__r2, __p, __e, __l, 2); \ break; \ case 4: \ - __put_user_x(__r2, __p, __e, 4); \ + __put_user_x(__r2, __p, __e, __l, 4); \ break; \ case 8: \ - __put_user_x(__r2, __p, __e, 8); \ + __put_user_x(__r2, __p, __e, __l, 8); \ break; \ default: __e = __put_user_bad(); break; \ } \ -- cgit v1.2.3 From ad72907acd2943304c292ae36960bb66e6dc23c9 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 7 Sep 2012 18:24:10 +0100 Subject: ARM: 7528/1: uaccess: annotate [__]{get,put}_user functions with might_fault() The user access functions may generate a fault, resulting in invocation of a handler that may sleep. This patch annotates the accessors with might_fault() so that we print a warning if they are invoked from atomic context and help lockdep keep track of mmap_sem. Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/uaccess.h | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 6f83ad6e4d3..77bd79f2ffd 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -118,7 +118,7 @@ extern int __get_user_4(void *); : "0" (__p), "r" (__l) \ : __GUP_CLOBBER_##__s) -#define get_user(x,p) \ +#define __get_user_check(x,p) \ ({ \ unsigned long __limit = current_thread_info()->addr_limit - 1; \ register const typeof(*(p)) __user *__p asm("r0") = (p);\ @@ -141,6 +141,12 @@ extern int __get_user_4(void *); __e; \ }) +#define get_user(x,p) \ + ({ \ + might_fault(); \ + __get_user_check(x,p); \ + }) + extern int __put_user_1(void *, unsigned int); extern int __put_user_2(void *, unsigned int); extern int __put_user_4(void *, unsigned int); @@ -155,7 +161,7 @@ extern int __put_user_8(void *, unsigned long long); : "0" (__p), "r" (__r2), "r" (__l) \ : "ip", "lr", "cc") -#define put_user(x,p) \ +#define __put_user_check(x,p) \ ({ \ unsigned long __limit = current_thread_info()->addr_limit - 1; \ register const typeof(*(p)) __r2 asm("r2") = (x); \ @@ -180,6 +186,12 @@ extern int __put_user_8(void *, unsigned long long); __e; \ }) +#define put_user(x,p) \ + ({ \ + might_fault(); \ + __put_user_check(x,p); \ + }) + #else /* CONFIG_MMU */ /* @@ -233,6 +245,7 @@ do { \ unsigned long __gu_addr = (unsigned long)(ptr); \ unsigned long __gu_val; \ __chk_user_ptr(ptr); \ + might_fault(); \ switch (sizeof(*(ptr))) { \ case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \ case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \ @@ -314,6 +327,7 @@ do { \ unsigned long __pu_addr = (unsigned long)(ptr); \ __typeof__(*(ptr)) __pu_val = (x); \ __chk_user_ptr(ptr); \ + might_fault(); \ switch (sizeof(*(ptr))) { \ case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \ case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \ -- cgit v1.2.3 From abcee5fb0dfbb248d883a2f6bdb4820abe3ac524 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 8 Sep 2011 09:06:10 +0100 Subject: ARM: SoC: add per-platform SMP operations This adds a 'struct smp_operations' to abstract the CPU initialization and hot plugging functions on SMP systems, which otherwise conflict in a multiplatform kernel. This also helps shmobile and potentially others that have more than one method to do these. To allow the kernel to continue building, the platform hooks are defined as weak symbols which are overrided by the platform code. Once all platforms are converted, the "weak" attribute will be removed and the function made static. Unlike the original version from Marc, this new version from Arnd does not use a generalized abstraction for per-soc data structures but only tries to solve the problem for the SMP operations. This way, we can collapse the previous four data structures into a single struct, which is less systematic but also easier to follow as a causal reader. Signed-off-by: Marc Zyngier Acked-by: Nicolas Pitre Signed-off-by: Arnd Bergmann --- arch/arm/include/asm/mach/arch.h | 7 +++++++ arch/arm/include/asm/smp.h | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h index 0b1c94b8c65..917d4fcfd9b 100644 --- a/arch/arm/include/asm/mach/arch.h +++ b/arch/arm/include/asm/mach/arch.h @@ -14,6 +14,12 @@ struct tag; struct meminfo; struct sys_timer; struct pt_regs; +struct smp_operations; +#ifdef CONFIG_SMP +#define smp_ops(ops) (&(ops)) +#else +#define smp_ops(ops) (struct smp_operations *)NULL +#endif struct machine_desc { unsigned int nr; /* architecture number */ @@ -35,6 +41,7 @@ struct machine_desc { unsigned char reserve_lp1 :1; /* never has lp1 */ unsigned char reserve_lp2 :1; /* never has lp2 */ char restart_mode; /* default restart mode */ + struct smp_operations *smp; /* SMP operations */ void (*fixup)(struct tag *, char **, struct meminfo *); void (*reserve)(void);/* reserve mem blocks */ diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h index ae29293270a..f79a9f51e32 100644 --- a/arch/arm/include/asm/smp.h +++ b/arch/arm/include/asm/smp.h @@ -93,4 +93,37 @@ extern void platform_cpu_enable(unsigned int cpu); extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); +struct smp_operations { +#ifdef CONFIG_SMP + /* + * Setup the set of possible CPUs (via set_cpu_possible) + */ + void (*smp_init_cpus)(void); + /* + * Initialize cpu_possible map, and enable coherency + */ + void (*smp_prepare_cpus)(unsigned int max_cpus); + + /* + * Perform platform specific initialisation of the specified CPU. + */ + void (*smp_secondary_init)(unsigned int cpu); + /* + * Boot a secondary CPU, and assign it the specified idle task. + * This also gives us the initial stack to use for this CPU. + */ + int (*smp_boot_secondary)(unsigned int cpu, struct task_struct *idle); +#ifdef CONFIG_HOTPLUG_CPU + int (*cpu_kill)(unsigned int cpu); + void (*cpu_die)(unsigned int cpu); + int (*cpu_disable)(unsigned int cpu); +#endif +#endif +}; + +/* + * set platform specific SMP operations + */ +extern void smp_set_ops(struct smp_operations *); + #endif /* ifndef __ASM_ARM_SMP_H */ -- cgit v1.2.3 From ac6c7998712d55bd15aa2dd5ae85f5988c0cb526 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 27 Sep 2011 14:48:23 +0100 Subject: ARM: smp: Make SMP operations mandatory Now that all SMP platforms have been converted to use struct smp_operations, remove the "weak" attribute from the hooks in smp.c, and make the functions static wherever possible. Signed-off-by: Marc Zyngier Acked-by: Nicolas Pitre Signed-off-by: Arnd Bergmann --- arch/arm/include/asm/smp.h | 14 -------------- 1 file changed, 14 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h index f79a9f51e32..3a8cfee26c9 100644 --- a/arch/arm/include/asm/smp.h +++ b/arch/arm/include/asm/smp.h @@ -60,15 +60,6 @@ extern int boot_secondary(unsigned int cpu, struct task_struct *); */ asmlinkage void secondary_start_kernel(void); -/* - * Perform platform specific initialisation of the specified CPU. - */ -extern void platform_secondary_init(unsigned int cpu); - -/* - * Initialize cpu_possible map, and enable coherency - */ -extern void platform_smp_prepare_cpus(unsigned int); /* * Initial data for bringing up a secondary CPU. @@ -81,15 +72,10 @@ struct secondary_data { extern struct secondary_data secondary_data; extern int __cpu_disable(void); -extern int platform_cpu_disable(unsigned int cpu); extern void __cpu_die(unsigned int cpu); extern void cpu_die(void); -extern void platform_cpu_die(unsigned int cpu); -extern int platform_cpu_kill(unsigned int cpu); -extern void platform_cpu_enable(unsigned int cpu); - extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); -- cgit v1.2.3 From 28e8e29c616f947348cc66bea684d0035c76021a Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 12 Jun 2012 11:16:27 +0100 Subject: ARM: consolidate pen_release instead of having per platform definitions Almost each SMP platform defines pen_release to manage booting secondary CPUs. This of course clashes with the single zImage effort. Add the pen_release definition to the ARM SMP code, and remove all others. This should only be used by platforms which lack any kind of CPU power management... Reported-by: Arnd Bergmann Signed-off-by: Marc Zyngier Acked-by: Nicolas Pitre Signed-off-by: Arnd Bergmann --- arch/arm/include/asm/smp.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h index 3a8cfee26c9..2e3be16c676 100644 --- a/arch/arm/include/asm/smp.h +++ b/arch/arm/include/asm/smp.h @@ -70,6 +70,7 @@ struct secondary_data { void *stack; }; extern struct secondary_data secondary_data; +extern volatile int pen_release; extern int __cpu_disable(void); -- cgit v1.2.3 From 01464226ac6089bd6a33f9899cc792c2355ebf39 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Tue, 28 Aug 2012 13:06:41 -0500 Subject: ARM: make mach/gpio.h headers optional Most platforms don't need mach/gpio.h and it prevents multi-platform kernel images. Add CONFIG_NEED_MACH_GPIO_H and make platforns select it if they need gpio.h. This is platforms that define __GPIOLIB_COMPLEX or have lots of implicit includes pulled in by mach/gpio.h. at91 and omap have gpio clean-up pending and can drop CONFIG_NEED_MACH_GPIO_H once that is in. Signed-off-by: Rob Herring Cc: Russell King Acked-by: Jason Cooper Acked-by: Linus Walleij --- arch/arm/include/asm/gpio.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/gpio.h b/arch/arm/include/asm/gpio.h index c402e9b31f4..477e0206e01 100644 --- a/arch/arm/include/asm/gpio.h +++ b/arch/arm/include/asm/gpio.h @@ -6,7 +6,9 @@ #endif /* not all ARM platforms necessarily support this API ... */ +#ifdef CONFIG_NEED_MACH_GPIO_H #include +#endif #ifndef __ARM_GPIOLIB_COMPLEX /* Note: this may rely upon the value of ARCH_NR_GPIOS set in mach/gpio.h */ -- cgit v1.2.3 From 387798b37c8dd0ae24c0ac12ba456dd76865bca3 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Thu, 6 Sep 2012 13:41:12 -0500 Subject: ARM: initial multiplatform support This lets us build a multiplatform kernel for experimental purposes. However, it will not be useful for any real work, because it relies on a number of useful things to be disabled for now: * SMP support must be turned off because of conflicting symbols. Marc Zyngier has proposed a solution by adding a new SOC operations structure to hold indirect function pointers for these, but that work is currently stalled * We turn on SPARSE_IRQ unconditionally, which is not supported on most platforms. Each of them is currently in a different state, but most are being worked on. * A common clock framework is in place since v3.4 but not yet being used. Work on this is on its way. * DEBUG_LL for early debugging is currently disabled. * THUMB2_KERNEL does not work with allyesconfig because the kernel gets too big [Rob Herring]: Rebased to not be dependent on the mass mach header rename. As a result, omap2plus, imx, mxs and ux500 are not converted. Highbank, picoxcell, mvebu, and socfpga are converted. Signed-off-by: Arnd Bergmann Signed-off-by: Rob Herring Cc: Russell King Cc: Jason Cooper Cc: Andrew Lunn Acked-by: Jamie Iles Cc: Dinh Nguyen --- arch/arm/include/asm/timex.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/timex.h b/arch/arm/include/asm/timex.h index ce119442277..963342acebb 100644 --- a/arch/arm/include/asm/timex.h +++ b/arch/arm/include/asm/timex.h @@ -13,7 +13,11 @@ #define _ASMARM_TIMEX_H #include +#ifdef CONFIG_ARCH_MULTIPLATFORM +#define CLOCK_TICK_RATE 1000000 +#else #include +#endif typedef unsigned long cycles_t; -- cgit v1.2.3 From 57b9da32addd819b0baef5573a88fcfaf3e6699b Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Mon, 3 Sep 2012 13:49:22 +0100 Subject: ARM: 7508/1: opcodes: Don't define the thumb32 byteswapping macros for BE32 The existing __mem_to_opcode_thumb32() is incorrect for BE32 platforms. However, these don't support Thumb-2 kernels, so this option is not so relevant for those platforms anyway. This operation is complicated by the lack of unaligned memory access support prior to ARMv6. Rather than provide a "working" macro which will probably won't get used (or worse, will get misused), this patch removes the macro for BE32 kernels. People manipulating Thumb opcodes prior to ARMv6 should almost certainly be splitting these operations into halfwords anyway, using __opcode_thumb32_{first,second,compose}() and the 16-bit opcode transformations. Signed-off-by: Dave Martin Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/include/asm/opcodes.h | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/opcodes.h b/arch/arm/include/asm/opcodes.h index 19c48deda70..6bf54f9411a 100644 --- a/arch/arm/include/asm/opcodes.h +++ b/arch/arm/include/asm/opcodes.h @@ -49,18 +49,31 @@ extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr); #include #ifdef CONFIG_CPU_ENDIAN_BE8 + #define __opcode_to_mem_arm(x) swab32(x) #define __opcode_to_mem_thumb16(x) swab16(x) #define __opcode_to_mem_thumb32(x) swahb32(x) -#else + +#else /* ! CONFIG_CPU_ENDIAN_BE8 */ + #define __opcode_to_mem_arm(x) ((u32)(x)) #define __opcode_to_mem_thumb16(x) ((u16)(x)) +#ifndef CONFIG_CPU_ENDIAN_BE32 +/* + * On BE32 systems, using 32-bit accesses to store Thumb instructions will not + * work in all cases, due to alignment constraints. For now, a correct + * version is not provided for BE32. + */ #define __opcode_to_mem_thumb32(x) swahw32(x) #endif +#endif /* ! CONFIG_CPU_ENDIAN_BE8 */ + #define __mem_to_opcode_arm(x) __opcode_to_mem_arm(x) #define __mem_to_opcode_thumb16(x) __opcode_to_mem_thumb16(x) +#ifndef CONFIG_CPU_ENDIAN_BE32 #define __mem_to_opcode_thumb32(x) __opcode_to_mem_thumb32(x) +#endif /* Operations specific to Thumb opcodes */ -- cgit v1.2.3 From 0ce3de23f2a520a6ac8c2179fb8f88342c4992ef Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Mon, 3 Sep 2012 13:49:23 +0100 Subject: ARM: 7509/1: opcodes: Make opcode byteswapping macros assembly-compatible Most of the existing macros don't work with assembler, due to the use of type casts and C functions from . This patch abstracts out those operations and provides simple explicit versions for use in assembly code. __opcode_is_thumb32() and __opcode_is_thumb16() are also converted to do bitmask-based testing to avoid confusion if these are used in assembly code (the assembler typically treats all arithmetic values as signed). These changes avoid the need for the compiler to pre-evaluate constant expressions used to generate opcodes. By ensuring that the forms of these expressions can be evaluated directly by the assembler, we can just stringify the expressions directly into the asm during the preprocessing pass. The alternative approach (passing the evaluated expression via an inline asm "i" constraint) gets painful because the contents of the asm and the constraints must be kept in sync. This makes the resulting macros awkward to use. Retaining the C forms of the macros allows more efficient code to be generated when opcodes are generated programmatically at run- time, but there is no way to embed run-time-generated opcodes in asm() blocks. Signed-off-by: Dave Martin Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/include/asm/opcodes.h | 97 +++++++++++++++++++++++++++++++++++------- 1 file changed, 82 insertions(+), 15 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/opcodes.h b/arch/arm/include/asm/opcodes.h index 6bf54f9411a..f57e417cab9 100644 --- a/arch/arm/include/asm/opcodes.h +++ b/arch/arm/include/asm/opcodes.h @@ -18,6 +18,33 @@ extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr); #define ARM_OPCODE_CONDTEST_UNCOND 2 +/* + * Assembler opcode byteswap helpers. + * These are only intended for use by this header: don't use them directly, + * because they will be suboptimal in most cases. + */ +#define ___asm_opcode_swab32(x) ( \ + (((x) << 24) & 0xFF000000) \ + | (((x) << 8) & 0x00FF0000) \ + | (((x) >> 8) & 0x0000FF00) \ + | (((x) >> 24) & 0x000000FF) \ +) +#define ___asm_opcode_swab16(x) ( \ + (((x) << 8) & 0xFF00) \ + | (((x) >> 8) & 0x00FF) \ +) +#define ___asm_opcode_swahb32(x) ( \ + (((x) << 8) & 0xFF00FF00) \ + | (((x) >> 8) & 0x00FF00FF) \ +) +#define ___asm_opcode_swahw32(x) ( \ + (((x) << 16) & 0xFFFF0000) \ + | (((x) >> 16) & 0x0000FFFF) \ +) +#define ___asm_opcode_identity32(x) ((x) & 0xFFFFFFFF) +#define ___asm_opcode_identity16(x) ((x) & 0xFFFF) + + /* * Opcode byteswap helpers * @@ -41,30 +68,58 @@ extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr); * Note that values in the range 0x0000E800..0xE7FFFFFF intentionally do not * represent any valid Thumb-2 instruction. For this range, * __opcode_is_thumb32() and __opcode_is_thumb16() will both be false. + * + * The ___asm variants are intended only for use by this header, in situations + * involving inline assembler. For .S files, the normal __opcode_*() macros + * should do the right thing. */ +#ifdef __ASSEMBLY__ -#ifndef __ASSEMBLY__ +#define ___opcode_swab32(x) ___asm_opcode_swab32(x) +#define ___opcode_swab16(x) ___asm_opcode_swab16(x) +#define ___opcode_swahb32(x) ___asm_opcode_swahb32(x) +#define ___opcode_swahw32(x) ___asm_opcode_swahw32(x) +#define ___opcode_identity32(x) ___asm_opcode_identity32(x) +#define ___opcode_identity16(x) ___asm_opcode_identity16(x) + +#else /* ! __ASSEMBLY__ */ #include #include +#define ___opcode_swab32(x) swab32(x) +#define ___opcode_swab16(x) swab16(x) +#define ___opcode_swahb32(x) swahb32(x) +#define ___opcode_swahw32(x) swahw32(x) +#define ___opcode_identity32(x) ((u32)(x)) +#define ___opcode_identity16(x) ((u16)(x)) + +#endif /* ! __ASSEMBLY__ */ + + #ifdef CONFIG_CPU_ENDIAN_BE8 -#define __opcode_to_mem_arm(x) swab32(x) -#define __opcode_to_mem_thumb16(x) swab16(x) -#define __opcode_to_mem_thumb32(x) swahb32(x) +#define __opcode_to_mem_arm(x) ___opcode_swab32(x) +#define __opcode_to_mem_thumb16(x) ___opcode_swab16(x) +#define __opcode_to_mem_thumb32(x) ___opcode_swahb32(x) +#define ___asm_opcode_to_mem_arm(x) ___asm_opcode_swab32(x) +#define ___asm_opcode_to_mem_thumb16(x) ___asm_opcode_swab16(x) +#define ___asm_opcode_to_mem_thumb32(x) ___asm_opcode_swahb32(x) #else /* ! CONFIG_CPU_ENDIAN_BE8 */ -#define __opcode_to_mem_arm(x) ((u32)(x)) -#define __opcode_to_mem_thumb16(x) ((u16)(x)) +#define __opcode_to_mem_arm(x) ___opcode_identity32(x) +#define __opcode_to_mem_thumb16(x) ___opcode_identity16(x) +#define ___asm_opcode_to_mem_arm(x) ___asm_opcode_identity32(x) +#define ___asm_opcode_to_mem_thumb16(x) ___asm_opcode_identity16(x) #ifndef CONFIG_CPU_ENDIAN_BE32 /* * On BE32 systems, using 32-bit accesses to store Thumb instructions will not * work in all cases, due to alignment constraints. For now, a correct * version is not provided for BE32. */ -#define __opcode_to_mem_thumb32(x) swahw32(x) +#define __opcode_to_mem_thumb32(x) ___opcode_swahw32(x) +#define ___asm_opcode_to_mem_thumb32(x) ___asm_opcode_swahw32(x) #endif #endif /* ! CONFIG_CPU_ENDIAN_BE8 */ @@ -78,15 +133,27 @@ extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr); /* Operations specific to Thumb opcodes */ /* Instruction size checks: */ -#define __opcode_is_thumb32(x) ((u32)(x) >= 0xE8000000UL) -#define __opcode_is_thumb16(x) ((u32)(x) < 0xE800UL) +#define __opcode_is_thumb32(x) ( \ + ((x) & 0xF8000000) == 0xE8000000 \ + || ((x) & 0xF0000000) == 0xF0000000 \ +) +#define __opcode_is_thumb16(x) ( \ + ((x) & 0xFFFF0000) == 0 \ + && !(((x) & 0xF800) == 0xE800 || ((x) & 0xF000) == 0xF000) \ +) /* Operations to construct or split 32-bit Thumb instructions: */ -#define __opcode_thumb32_first(x) ((u16)((x) >> 16)) -#define __opcode_thumb32_second(x) ((u16)(x)) -#define __opcode_thumb32_compose(first, second) \ - (((u32)(u16)(first) << 16) | (u32)(u16)(second)) - -#endif /* __ASSEMBLY__ */ +#define __opcode_thumb32_first(x) (___opcode_identity16((x) >> 16)) +#define __opcode_thumb32_second(x) (___opcode_identity16(x)) +#define __opcode_thumb32_compose(first, second) ( \ + (___opcode_identity32(___opcode_identity16(first)) << 16) \ + | ___opcode_identity32(___opcode_identity16(second)) \ +) +#define ___asm_opcode_thumb32_first(x) (___asm_opcode_identity16((x) >> 16)) +#define ___asm_opcode_thumb32_second(x) (___asm_opcode_identity16(x)) +#define ___asm_opcode_thumb32_compose(first, second) ( \ + (___asm_opcode_identity32(___asm_opcode_identity16(first)) << 16) \ + | ___asm_opcode_identity32(___asm_opcode_identity16(second)) \ +) #endif /* __ASM_ARM_OPCODES_H */ -- cgit v1.2.3 From a61a41a01822d48bbe53e6c1df56b88ee2f3de34 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Mon, 3 Sep 2012 13:49:24 +0100 Subject: ARM: 7510/1: opcodes: Add helpers for emitting custom opcodes This patch adds some __inst_() macros for injecting custom opcodes in assembler (both inline and in .S files). They should make it easier and cleaner to get things right in little-/big- endian/ARM/Thumb-2 kernels without a lot of #ifdefs. This pure-preprocessor approach is preferred over the alternative method of wedging extra assembler directives into the assembler input using top-level asm() blocks, since there is no way to guarantee that the compiler won't reorder those with respect to each other or with respect to non-toplevel asm() blocks, unless -fno-toplevel-reorder is passed (which is in itself somewhat undesirable because it defeats some potential optimisations). Currently _does_ silently rely on the compiler not reordering at the top level, but it seems better to avoid adding extra code which depends on this if the same result can be achieved in another way. Signed-off-by: Dave Martin Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/include/asm/opcodes.h | 69 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/opcodes.h b/arch/arm/include/asm/opcodes.h index f57e417cab9..f7937e1c46b 100644 --- a/arch/arm/include/asm/opcodes.h +++ b/arch/arm/include/asm/opcodes.h @@ -156,4 +156,73 @@ extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr); | ___asm_opcode_identity32(___asm_opcode_identity16(second)) \ ) +/* + * Opcode injection helpers + * + * In rare cases it is necessary to assemble an opcode which the + * assembler does not support directly, or which would normally be + * rejected because of the CFLAGS or AFLAGS used to build the affected + * file. + * + * Before using these macros, consider carefully whether it is feasible + * instead to change the build flags for your file, or whether it really + * makes sense to support old assembler versions when building that + * particular kernel feature. + * + * The macros defined here should only be used where there is no viable + * alternative. + * + * + * __inst_arm(x): emit the specified ARM opcode + * __inst_thumb16(x): emit the specified 16-bit Thumb opcode + * __inst_thumb32(x): emit the specified 32-bit Thumb opcode + * + * __inst_arm_thumb16(arm, thumb): emit either the specified arm or + * 16-bit Thumb opcode, depending on whether an ARM or Thumb-2 + * kernel is being built + * + * __inst_arm_thumb32(arm, thumb): emit either the specified arm or + * 32-bit Thumb opcode, depending on whether an ARM or Thumb-2 + * kernel is being built + * + * + * Note that using these macros directly is poor practice. Instead, you + * should use them to define human-readable wrapper macros to encode the + * instructions that you care about. In code which might run on ARMv7 or + * above, you can usually use the __inst_arm_thumb{16,32} macros to + * specify the ARM and Thumb alternatives at the same time. This ensures + * that the correct opcode gets emitted depending on the instruction set + * used for the kernel build. + */ +#include + +#define __inst_arm(x) ___inst_arm(___asm_opcode_to_mem_arm(x)) +#define __inst_thumb32(x) ___inst_thumb32( \ + ___asm_opcode_to_mem_thumb16(___asm_opcode_thumb32_first(x)), \ + ___asm_opcode_to_mem_thumb16(___asm_opcode_thumb32_second(x)) \ +) +#define __inst_thumb16(x) ___inst_thumb16(___asm_opcode_to_mem_thumb16(x)) + +#ifdef CONFIG_THUMB2_KERNEL +#define __inst_arm_thumb16(arm_opcode, thumb_opcode) \ + __inst_thumb16(thumb_opcode) +#define __inst_arm_thumb32(arm_opcode, thumb_opcode) \ + __inst_thumb32(thumb_opcode) +#else +#define __inst_arm_thumb16(arm_opcode, thumb_opcode) __inst_arm(arm_opcode) +#define __inst_arm_thumb32(arm_opcode, thumb_opcode) __inst_arm(arm_opcode) +#endif + +/* Helpers for the helpers. Don't use these directly. */ +#ifdef __ASSEMBLY__ +#define ___inst_arm(x) .long x +#define ___inst_thumb16(x) .short x +#define ___inst_thumb32(first, second) .short first, second +#else +#define ___inst_arm(x) ".long " __stringify(x) "\n\t" +#define ___inst_thumb16(x) ".short " __stringify(x) "\n\t" +#define ___inst_thumb32(first, second) \ + ".short " __stringify(first) ", " __stringify(second) "\n\t" +#endif + #endif /* __ASM_ARM_OPCODES_H */ -- cgit v1.2.3 From 508514ed25315dd28340000831b17535d6f772da Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Mon, 3 Sep 2012 13:49:25 +0100 Subject: ARM: 7511/1: opcodes: Opcode definitions for the Virtualization Extensions For now, this patch just adds a definition for the HVC instruction. More can be added here later, as needed. Now that we have a real example of how to use the opcode injection macros properly, this patch also adds a cross-reference from the explanation in opcodes.h (since without an example, figuring out how to use the macros is not that easy). Signed-off-by: Dave Martin Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/include/asm/opcodes-virt.h | 29 +++++++++++++++++++++++++++++ arch/arm/include/asm/opcodes.h | 2 ++ 2 files changed, 31 insertions(+) create mode 100644 arch/arm/include/asm/opcodes-virt.h (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/opcodes-virt.h b/arch/arm/include/asm/opcodes-virt.h new file mode 100644 index 00000000000..b85665a96f8 --- /dev/null +++ b/arch/arm/include/asm/opcodes-virt.h @@ -0,0 +1,29 @@ +/* + * opcodes-virt.h: Opcode definitions for the ARM virtualization extensions + * Copyright (C) 2012 Linaro Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ +#ifndef __ASM_ARM_OPCODES_VIRT_H +#define __ASM_ARM_OPCODES_VIRT_H + +#include + +#define __HVC(imm16) __inst_arm_thumb32( \ + 0xE1400070 | (((imm16) & 0xFFF0) << 4) | ((imm16) & 0x000F), \ + 0xF7E08000 | (((imm16) & 0xF000) << 4) | ((imm16) & 0x0FFF) \ +) + +#endif /* ! __ASM_ARM_OPCODES_VIRT_H */ diff --git a/arch/arm/include/asm/opcodes.h b/arch/arm/include/asm/opcodes.h index f7937e1c46b..74e211a6fb2 100644 --- a/arch/arm/include/asm/opcodes.h +++ b/arch/arm/include/asm/opcodes.h @@ -193,6 +193,8 @@ extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr); * specify the ARM and Thumb alternatives at the same time. This ensures * that the correct opcode gets emitted depending on the instruction set * used for the kernel build. + * + * Look at opcodes-virt.h for an example of how to use these macros. */ #include -- cgit v1.2.3 From a1b2dde70419ae947fd7c9c8fcad7da005dc600e Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 7 Sep 2012 18:09:58 +0100 Subject: ARM: 7522/1: arch_timers: register a time/cycle counter Some subsystems (KVM for example) need access to a cycle counter. In the KVM case, this is used to measure the time delta between host and guest in order to accurately generate timer events for the guest. Signed-off-by: Marc Zyngier Signed-off-by: Russell King --- arch/arm/include/asm/arch_timer.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h index 62e75475e57..ad9b155c826 100644 --- a/arch/arm/include/asm/arch_timer.h +++ b/arch/arm/include/asm/arch_timer.h @@ -2,11 +2,13 @@ #define __ASMARM_ARCH_TIMER_H #include +#include #ifdef CONFIG_ARM_ARCH_TIMER #define ARCH_HAS_READ_CURRENT_TIMER int arch_timer_of_register(void); int arch_timer_sched_clock_init(void); +struct timecounter *arch_timer_get_timecounter(void); #else static inline int arch_timer_of_register(void) { @@ -17,6 +19,11 @@ static inline int arch_timer_sched_clock_init(void) { return -ENXIO; } + +static inline struct timecounter *arch_timer_get_timecounter(void) +{ + return NULL; +} #endif #endif -- cgit v1.2.3 From b9a348cb12f3925a27fcf0a38a146b40978588d0 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 9 Aug 2012 10:43:29 +0100 Subject: ARM: opcodes: add __ERET/__MSR_ELR_HYP instruction encoding Enabling boot from HYP mode requires the use of some more virt-specific instructions ("eret" and "msr elr_hyp, reg"). Add the necessary encoding to asm/opcode-virt.h. Signed-off-by: Marc Zyngier --- arch/arm/include/asm/opcodes-virt.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/opcodes-virt.h b/arch/arm/include/asm/opcodes-virt.h index b85665a96f8..efcfdf92d9d 100644 --- a/arch/arm/include/asm/opcodes-virt.h +++ b/arch/arm/include/asm/opcodes-virt.h @@ -26,4 +26,14 @@ 0xF7E08000 | (((imm16) & 0xF000) << 4) | ((imm16) & 0x0FFF) \ ) +#define __ERET __inst_arm_thumb32( \ + 0xE160006E, \ + 0xF3DE8F00 \ +) + +#define __MSR_ELR_HYP(regnum) __inst_arm_thumb32( \ + 0xE12EF300 | regnum, \ + 0xF3808E30 | (regnum << 16) \ +) + #endif /* ! __ASM_ARM_OPCODES_VIRT_H */ -- cgit v1.2.3 From 80c59dafb1a9a86fa996e6e34d06b60567c925ca Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Thu, 9 Feb 2012 08:47:17 -0800 Subject: ARM: virt: allow the kernel to be entered in HYP mode This patch does two things: * Ensure that asynchronous aborts are masked at kernel entry. The bootloader should be masking these anyway, but this reduces the damage window just in case it doesn't. * Enter svc mode via exception return to ensure that CPU state is properly serialised. This does not matter when switching from an ordinary privileged mode ("PL1" modes in ARMv7-AR rev C parlance), but it potentially does matter when switching from a another privileged mode such as hyp mode. This should allow the kernel to boot safely either from svc mode or hyp mode, even if no support for use of the ARM Virtualization Extensions is built into the kernel. Signed-off-by: Dave Martin Signed-off-by: Marc Zyngier --- arch/arm/include/asm/assembler.h | 28 ++++++++++++++++++++++ arch/arm/include/asm/ptrace.h | 1 + arch/arm/include/asm/virt.h | 52 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 81 insertions(+) create mode 100644 arch/arm/include/asm/virt.h (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 03fb93621d0..658a15dbc87 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -22,6 +22,7 @@ #include #include +#include #define IOMEM(x) (x) @@ -239,6 +240,33 @@ .endm #endif +/* + * Helper macro to enter SVC mode cleanly and mask interrupts. reg is + * a scratch register for the macro to overwrite. + * + * This macro is intended for forcing the CPU into SVC mode at boot time. + * you cannot return to the original mode. + * + * Beware, it also clobers LR. + */ +.macro safe_svcmode_maskall reg:req + mrs \reg , cpsr + mov lr , \reg + and lr , lr , #MODE_MASK + cmp lr , #HYP_MODE + orr \reg , \reg , #PSR_A_BIT | PSR_I_BIT | PSR_F_BIT + bic \reg , \reg , #MODE_MASK + orr \reg , \reg , #SVC_MODE +THUMB( orr \reg , \reg , #PSR_T_BIT ) + msr spsr_cxsf, \reg + adr lr, BSYM(2f) + bne 1f + __MSR_ELR_HYP(14) + __ERET +1: movs pc, lr +2: +.endm + /* * STRT/LDRT access macros with ARM and Thumb-2 variants */ diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index 355ece523f4..91ef6c231c4 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h @@ -44,6 +44,7 @@ #define IRQ_MODE 0x00000012 #define SVC_MODE 0x00000013 #define ABT_MODE 0x00000017 +#define HYP_MODE 0x0000001a #define UND_MODE 0x0000001b #define SYSTEM_MODE 0x0000001f #define MODE32_BIT 0x00000010 diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h new file mode 100644 index 00000000000..0a99723edbf --- /dev/null +++ b/arch/arm/include/asm/virt.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2012 Linaro Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#ifndef VIRT_H +#define VIRT_H + +#include + +/* + * Flag indicating that the kernel was not entered in the same mode on every + * CPU. The zImage loader stashes this value in an SPSR, so we need an + * architecturally defined flag bit here (the N flag, as it happens) + */ +#define BOOT_CPU_MODE_MISMATCH (1<<31) + +#ifndef __ASSEMBLY__ + +#ifdef CONFIG_ARM_VIRT_EXT +/* + * __boot_cpu_mode records what mode the primary CPU was booted in. + * A correctly-implemented bootloader must start all CPUs in the same mode: + * if it fails to do this, the flag BOOT_CPU_MODE_MISMATCH is set to indicate + * that some CPU(s) were booted in a different mode. + * + * This allows the kernel to flag an error when the secondaries have come up. + */ +extern int __boot_cpu_mode; + +void __hyp_set_vectors(unsigned long phys_vector_base); +unsigned long __hyp_get_vectors(void); +#else +#define __boot_cpu_mode (SVC_MODE) +#endif + +#endif /* __ASSEMBLY__ */ + +#endif /* ! VIRT_H */ -- cgit v1.2.3 From 4588c34daabb5aebee9cbe90f0ca6ab11412f207 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Fri, 17 Feb 2012 16:54:28 +0000 Subject: ARM: virt: Add boot-time diagnostics In order to easily detect pathological cases, print some diagnostics when the kernel boots. This also provides helpers to detect that HYP mode is actually available, which can be used by other subsystems to enable HYP specific features. Signed-off-by: Dave Martin Signed-off-by: Marc Zyngier --- arch/arm/include/asm/virt.h | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h index 0a99723edbf..86164df86cb 100644 --- a/arch/arm/include/asm/virt.h +++ b/arch/arm/include/asm/virt.h @@ -47,6 +47,23 @@ unsigned long __hyp_get_vectors(void); #define __boot_cpu_mode (SVC_MODE) #endif +#ifndef ZIMAGE +void hyp_mode_check(void); + +/* Reports the availability of HYP mode */ +static inline bool is_hyp_mode_available(void) +{ + return ((__boot_cpu_mode & MODE_MASK) == HYP_MODE && + !(__boot_cpu_mode & BOOT_CPU_MODE_MISMATCH)); +} + +/* Check if the bootloader has booted CPUs in different modes */ +static inline bool is_hyp_mode_mismatched(void) +{ + return !!(__boot_cpu_mode & BOOT_CPU_MODE_MISMATCH); +} +#endif + #endif /* __ASSEMBLY__ */ #endif /* ! VIRT_H */ -- cgit v1.2.3 From 1f66e06fb6414732bef7bf4a071ef76a837badec Mon Sep 17 00:00:00 2001 From: Wade Farnsworth Date: Fri, 7 Sep 2012 18:18:25 +0100 Subject: ARM: 7524/1: support syscall tracing As specified by ftrace-design.txt, TIF_SYSCALL_TRACEPOINT was added, as well as NR_syscalls in asm/unistd.h. Additionally, __sys_trace was modified to call trace_sys_enter and trace_sys_exit when appropriate. Tests #2 - #4 of "perf test" now complete successfully. Signed-off-by: Steven Walter Signed-off-by: Wade Farnsworth Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/syscall.h | 4 ++++ arch/arm/include/asm/thread_info.h | 4 +++- arch/arm/include/asm/unistd.h | 8 ++++++++ 3 files changed, 15 insertions(+), 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h index c334a23ddf7..47486a41c56 100644 --- a/arch/arm/include/asm/syscall.h +++ b/arch/arm/include/asm/syscall.h @@ -9,6 +9,10 @@ #include +#include + +#define NR_syscalls (__NR_syscalls) + extern const unsigned long sys_call_table[]; static inline int syscall_get_nr(struct task_struct *task, diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index af7b0bda335..5ded667e330 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -148,6 +148,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ #define TIF_SYSCALL_TRACE 8 #define TIF_SYSCALL_AUDIT 9 +#define TIF_SYSCALL_TRACEPOINT 10 #define TIF_POLLING_NRFLAG 16 #define TIF_USING_IWMMXT 17 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ @@ -160,12 +161,13 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) +#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) /* Checks for any syscall work in entry-common.S */ -#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT) +#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT) /* * Change these and you break ASM code in entry-common.S diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index 0cab47d4a83..a566ec20d48 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h @@ -405,6 +405,14 @@ #define __NR_process_vm_readv (__NR_SYSCALL_BASE+376) #define __NR_process_vm_writev (__NR_SYSCALL_BASE+377) +/* + * This may need to be greater than __NR_last_syscall+1 in order to + * account for the padding in the syscall table + */ +#ifdef __KERNEL__ +#define __NR_syscalls (380) +#endif /* __KERNEL__ */ + /* * The following SWIs are ARM private. */ -- cgit v1.2.3 From 559a593905e583fca23229b916c016e5211c6766 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Wed, 19 Sep 2012 08:16:07 +0100 Subject: ARM: 7536/1: smp: Formalize an IPI for wakeup Remove the offset from ipi_msg_type and assume that SGI0 is the wakeup interrupt now that all WFI hotplug users call gic_raise_softirq() with 0 instead of 1. This allows us to track how many wakeup interrupts are sent and also removes the unknown IPI printk message for WFI hotplug based systems. Reviewed-by: Nicolas Pitre Signed-off-by: Stephen Boyd Signed-off-by: Russell King --- arch/arm/include/asm/hardirq.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h index 436e60b2cf7..2740c2a2df6 100644 --- a/arch/arm/include/asm/hardirq.h +++ b/arch/arm/include/asm/hardirq.h @@ -5,7 +5,7 @@ #include #include -#define NR_IPI 5 +#define NR_IPI 6 typedef struct { unsigned int __softirq_pending; -- cgit v1.2.3 From ddd03a1f7591827906d63dbe3ee003f832bb584f Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 3 Aug 2012 12:12:38 +0400 Subject: get rid of generic instances of asm/exec.h Signed-off-by: Al Viro --- arch/arm/include/asm/Kbuild | 1 + arch/arm/include/asm/exec.h | 6 ------ 2 files changed, 1 insertion(+), 6 deletions(-) delete mode 100644 arch/arm/include/asm/exec.h (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index 960abceb8e1..7779e389b16 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild @@ -7,6 +7,7 @@ generic-y += bitsperlong.h generic-y += cputime.h generic-y += emergency-restart.h generic-y += errno.h +generic-y += exec.h generic-y += ioctl.h generic-y += irq_regs.h generic-y += kdebug.h diff --git a/arch/arm/include/asm/exec.h b/arch/arm/include/asm/exec.h deleted file mode 100644 index 7c4fbef72b3..00000000000 --- a/arch/arm/include/asm/exec.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_ARM_EXEC_H -#define __ASM_ARM_EXEC_H - -#define arch_align_stack(x) (x) - -#endif /* __ASM_ARM_EXEC_H */ -- cgit v1.2.3 From 826eba4db0c643cc0e705c8365902c63fac2b7ee Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 3 Aug 2012 12:14:44 +0400 Subject: the only place that needs to include asm/exec.h is linux/binfmts.h Signed-off-by: Al Viro --- arch/arm/include/asm/system.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 74542c52f9b..368165e33c1 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -2,7 +2,6 @@ #include #include #include -#include #include #include #include -- cgit v1.2.3 From bf619faeced5c2beb32a39559e1f1117f88fd702 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 21 Sep 2012 17:55:20 +0100 Subject: ARM: reserve syscall 378 for kcmp kcmp has appeared on x86, but has not been noticed because checksyscalls.sh is broken at the moment. Reserve ARM syscall 378 for this should we ever need it, and add an __IGNORE entry for this unimplemented syscall. Signed-off-by: Russell King --- arch/arm/include/asm/unistd.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index 0cab47d4a83..2fde5fd1acc 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h @@ -404,6 +404,7 @@ #define __NR_setns (__NR_SYSCALL_BASE+375) #define __NR_process_vm_readv (__NR_SYSCALL_BASE+376) #define __NR_process_vm_writev (__NR_SYSCALL_BASE+377) + /* 378 for kcmp */ /* * The following SWIs are ARM private. @@ -483,6 +484,7 @@ */ #define __IGNORE_fadvise64_64 #define __IGNORE_migrate_pages +#define __IGNORE_kcmp #endif /* __KERNEL__ */ #endif /* __ASM_ARM_UNISTD_H */ -- cgit v1.2.3 From 031bd879f79d59d2f4fccd44377adf24fb977b5a Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Thu, 6 Sep 2012 18:35:13 +0530 Subject: ARM: mm: implement LoUIS API for cache maintenance ops ARM v7 architecture introduced the concept of cache levels and related control registers. New processors like A7 and A15 embed an L2 unified cache controller that becomes part of the cache level hierarchy. Some operations in the kernel like cpu_suspend and __cpu_disable do not require a flush of the entire cache hierarchy to DRAM but just the cache levels belonging to the Level of Unification Inner Shareable (LoUIS), which in most of ARM v7 systems correspond to L1. The current cache flushing API used in cpu_suspend and __cpu_disable, flush_cache_all(), ends up flushing the whole cache hierarchy since for v7 it cleans and invalidates all cache levels up to Level of Coherency (LoC) which cripples system performance when used in hot paths like hotplug and cpuidle. Therefore a new kernel cache maintenance API must be added to cope with latest ARM system requirements. This patch adds flush_cache_louis() to the ARM kernel cache maintenance API. This function cleans and invalidates all data cache levels up to the Level of Unification Inner Shareable (LoUIS) and invalidates the instruction cache for processors that support it (> v7). This patch also creates an alias of the cache LoUIS function to flush_kern_all for all processor versions prior to v7, so that the current cache flushing behaviour is unchanged for those processors. v7 cache maintenance code implements a cache LoUIS function that cleans and invalidates the D-cache up to LoUIS and invalidates the I-cache, according to the new API. Reviewed-by: Santosh Shilimkar Reviewed-by: Nicolas Pitre Signed-off-by: Lorenzo Pieralisi Tested-by: Shawn Guo --- arch/arm/include/asm/cacheflush.h | 15 +++++++++++++++ arch/arm/include/asm/glue-cache.h | 1 + 2 files changed, 16 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index e4448e16046..e1489c54cd1 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -49,6 +49,13 @@ * * Unconditionally clean and invalidate the entire cache. * + * flush_kern_louis() + * + * Flush data cache levels up to the level of unification + * inner shareable and invalidate the I-cache. + * Only needed from v7 onwards, falls back to flush_cache_all() + * for all other processor versions. + * * flush_user_all() * * Clean and invalidate all user space cache entries @@ -97,6 +104,7 @@ struct cpu_cache_fns { void (*flush_icache_all)(void); void (*flush_kern_all)(void); + void (*flush_kern_louis)(void); void (*flush_user_all)(void); void (*flush_user_range)(unsigned long, unsigned long, unsigned int); @@ -119,6 +127,7 @@ extern struct cpu_cache_fns cpu_cache; #define __cpuc_flush_icache_all cpu_cache.flush_icache_all #define __cpuc_flush_kern_all cpu_cache.flush_kern_all +#define __cpuc_flush_kern_louis cpu_cache.flush_kern_louis #define __cpuc_flush_user_all cpu_cache.flush_user_all #define __cpuc_flush_user_range cpu_cache.flush_user_range #define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range @@ -139,6 +148,7 @@ extern struct cpu_cache_fns cpu_cache; extern void __cpuc_flush_icache_all(void); extern void __cpuc_flush_kern_all(void); +extern void __cpuc_flush_kern_louis(void); extern void __cpuc_flush_user_all(void); extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int); extern void __cpuc_coherent_kern_range(unsigned long, unsigned long); @@ -204,6 +214,11 @@ static inline void __flush_icache_all(void) __flush_icache_preferred(); } +/* + * Flush caches up to Level of Unification Inner Shareable + */ +#define flush_cache_louis() __cpuc_flush_kern_louis() + #define flush_cache_all() __cpuc_flush_kern_all() static inline void vivt_flush_cache_mm(struct mm_struct *mm) diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h index 7e30874377e..2d6a7de87a8 100644 --- a/arch/arm/include/asm/glue-cache.h +++ b/arch/arm/include/asm/glue-cache.h @@ -132,6 +132,7 @@ #ifndef MULTI_CACHE #define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all) #define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all) +#define __cpuc_flush_kern_louis __glue(_CACHE,_flush_kern_cache_louis) #define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all) #define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range) #define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range) -- cgit v1.2.3 From 56942fec06efa0e17df0f4c3b438332c923b9014 Mon Sep 17 00:00:00 2001 From: Jonathan Austin Date: Fri, 21 Sep 2012 18:51:44 +0100 Subject: ARM: 7538/1: delay: add registration mechanism for delay timer sources The current timer-based delay loop relies on the architected timer to initiate the switch away from the polling-based implementation. This is unfortunate for platforms without the architected timers but with a suitable delay source (that is, constant frequency, always powered-up and ticking as long as the CPUs are online). This patch introduces a registration mechanism for the delay timer (which provides an unconditional read_current_timer implementation) and updates the architected timer code to use the new interface. Reviewed-by: Stephen Boyd Signed-off-by: Jonathan Austin Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/arch_timer.h | 1 - arch/arm/include/asm/delay.h | 9 +++++++++ arch/arm/include/asm/timex.h | 6 ------ 3 files changed, 9 insertions(+), 7 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h index ad9b155c826..d40229d9a1c 100644 --- a/arch/arm/include/asm/arch_timer.h +++ b/arch/arm/include/asm/arch_timer.h @@ -5,7 +5,6 @@ #include #ifdef CONFIG_ARM_ARCH_TIMER -#define ARCH_HAS_READ_CURRENT_TIMER int arch_timer_of_register(void); int arch_timer_sched_clock_init(void); struct timecounter *arch_timer_get_timecounter(void); diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h index dc6145120de..ab98fdd083b 100644 --- a/arch/arm/include/asm/delay.h +++ b/arch/arm/include/asm/delay.h @@ -15,6 +15,11 @@ #ifndef __ASSEMBLY__ +struct delay_timer { + unsigned long (*read_current_timer)(void); + unsigned long freq; +}; + extern struct arm_delay_ops { void (*delay)(unsigned long); void (*const_udelay)(unsigned long); @@ -56,6 +61,10 @@ extern void __loop_delay(unsigned long loops); extern void __loop_udelay(unsigned long usecs); extern void __loop_const_udelay(unsigned long); +/* Delay-loop timer registration. */ +#define ARCH_HAS_READ_CURRENT_TIMER +extern void register_current_timer_delay(const struct delay_timer *timer); + #endif /* __ASSEMBLY__ */ #endif /* defined(_ARM_DELAY_H) */ diff --git a/arch/arm/include/asm/timex.h b/arch/arm/include/asm/timex.h index ce119442277..9acc135dad9 100644 --- a/arch/arm/include/asm/timex.h +++ b/arch/arm/include/asm/timex.h @@ -12,15 +12,9 @@ #ifndef _ASMARM_TIMEX_H #define _ASMARM_TIMEX_H -#include #include typedef unsigned long cycles_t; - -#ifdef ARCH_HAS_READ_CURRENT_TIMER #define get_cycles() ({ cycles_t c; read_current_timer(&c) ? 0 : c; }) -#else -#define get_cycles() (0) -#endif #endif -- cgit v1.2.3 From 786d35d45cc40b2a51a18f73e14e135d47fdced7 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 28 Sep 2012 14:31:03 +0930 Subject: Make most arch asm/module.h files use asm-generic/module.h Use the mapping of Elf_[SPE]hdr, Elf_Addr, Elf_Sym, Elf_Dyn, Elf_Rel/Rela, ELF_R_TYPE() and ELF_R_SYM() to either the 32-bit version or the 64-bit version into asm-generic/module.h for all arches bar MIPS. Also, use the generic definition mod_arch_specific where possible. To this end, I've defined three new config bools: (*) HAVE_MOD_ARCH_SPECIFIC Arches define this if they don't want to use the empty generic mod_arch_specific struct. (*) MODULES_USE_ELF_RELA Arches define this if their modules can contain RELA records. This causes the Elf_Rela mapping to be emitted and allows apply_relocate_add() to be defined by the arch rather than have the core emit an error message. (*) MODULES_USE_ELF_REL Arches define this if their modules can contain REL records. This causes the Elf_Rel mapping to be emitted and allows apply_relocate() to be defined by the arch rather than have the core emit an error message. Note that it is possible to allow both REL and RELA records: m68k and mips are two arches that do this. With this, some arch asm/module.h files can be deleted entirely and replaced with a generic-y marker in the arch Kbuild file. Additionally, I have removed the bits from m32r and score that handle the unsupported type of relocation record as that's now handled centrally. Signed-off-by: David Howells Acked-by: Sam Ravnborg Signed-off-by: Rusty Russell --- arch/arm/include/asm/module.h | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h index 6c6809f982f..0d3a28dbc8e 100644 --- a/arch/arm/include/asm/module.h +++ b/arch/arm/include/asm/module.h @@ -1,9 +1,7 @@ #ifndef _ASM_ARM_MODULE_H #define _ASM_ARM_MODULE_H -#define Elf_Shdr Elf32_Shdr -#define Elf_Sym Elf32_Sym -#define Elf_Ehdr Elf32_Ehdr +#include struct unwind_table; @@ -16,13 +14,11 @@ enum { ARM_SEC_DEVEXIT, ARM_SEC_MAX, }; -#endif struct mod_arch_specific { -#ifdef CONFIG_ARM_UNWIND struct unwind_table *unwind[ARM_SEC_MAX]; -#endif }; +#endif /* * Add the ARM architecture version to the version magic string -- cgit v1.2.3 From 9e14f828ee4a7a4a98703e380d180717a579fb35 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 9 Sep 2012 21:31:07 -0400 Subject: arm: split ret_from_fork, simplify kernel_thread() [based on patch by rmk] Signed-off-by: Al Viro --- arch/arm/include/asm/processor.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h index 99afa749826..06e7d509eaa 100644 --- a/arch/arm/include/asm/processor.h +++ b/arch/arm/include/asm/processor.h @@ -85,11 +85,6 @@ unsigned long get_wchan(struct task_struct *p); #define cpu_relax() barrier() #endif -/* - * Create a new kernel thread - */ -extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); - #define task_pt_regs(p) \ ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1) -- cgit v1.2.3 From 583d632fb3f6d7e9d133a7260829dd7565f8cc99 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 2 Aug 2012 11:46:39 +0400 Subject: arm: introduce ret_from_kernel_execve(), switch to generic kernel_execve() Signed-off-by: Al Viro --- arch/arm/include/asm/unistd.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index 0cab47d4a83..2c9b7a87e64 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h @@ -469,6 +469,7 @@ #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_SYS_SOCKETCALL #endif +#define __ARCH_WANT_KERNEL_EXECVE /* * "Conditional" syscalls -- cgit v1.2.3 From bfd170d56505bf21cb636b0f1f169eaa815bdfe4 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 2 Aug 2012 11:49:43 +0400 Subject: arm: optimized current_pt_regs() ... no need to read current_thread_info()->task only to feed it to task_thread_page() immediately afterwards. Moreover, not using current_thread_info() at all ends up with better assembler - we need a location very close to the top of kernel stack page and it's actually better to do or with 0x1fff, followed be subtracting a small constant than and with ~0x1fff, followed by adding a large one. Both & and | would be a couple of insns (mvn lsr/mvn lsl for |, a pair of bic for &), but the following addition would cost a pair of add while the subtraction ends up as a single sub. Signed-off-by: Al Viro --- arch/arm/include/asm/ptrace.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index 355ece523f4..44fe998269d 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h @@ -254,6 +254,11 @@ static inline unsigned long user_stack_pointer(struct pt_regs *regs) return regs->ARM_sp; } +#define current_pt_regs(void) ({ \ + register unsigned long sp asm ("sp"); \ + (struct pt_regs *)((sp | (THREAD_SIZE - 1)) - 7) - 1; \ +}) + #endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ -- cgit v1.2.3 From a63c97a000c9c9a03372943a40c3ba8652aa0ccf Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 2 Aug 2012 11:52:41 +0400 Subject: arm: get rid of execve wrapper, switch to generic execve() implementation Signed-off-by: Al Viro --- arch/arm/include/asm/unistd.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index 2c9b7a87e64..6a70aa42deb 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h @@ -469,6 +469,7 @@ #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_SYS_SOCKETCALL #endif +#define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_KERNEL_EXECVE /* -- cgit v1.2.3 From 16a8016372c42c7628eb4a39d75386a461e8c5d0 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 1 Jun 2012 14:22:01 -0400 Subject: sanitize tsk_is_polling() Make default just return 0. The current default (checking TIF_POLLING_NRFLAG) is taken to architectures that need it; ones that don't do polling in their idle threads don't need to defined TIF_POLLING_NRFLAG at all. ia64 defined both TS_POLLING (used by its tsk_is_polling()) and TIF_POLLING_NRFLAG (not used at all). Killed the latter... Signed-off-by: Al Viro --- arch/arm/include/asm/thread_info.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index af7b0bda335..b2d6b412172 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -148,7 +148,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ #define TIF_SYSCALL_TRACE 8 #define TIF_SYSCALL_AUDIT 9 -#define TIF_POLLING_NRFLAG 16 #define TIF_USING_IWMMXT 17 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ #define TIF_RESTORE_SIGMASK 20 @@ -160,7 +159,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) -#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) -- cgit v1.2.3 From dd37e9405a8e85be49a60b2530efeb5f06bcb753 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Tue, 21 Aug 2012 12:20:17 +0200 Subject: ARM: add coherent dma ops arch_is_coherent is problematic as it is a global symbol. This doesn't work for multi-platform kernels or platforms which can support per device coherent DMA. This adds arm_coherent_dma_ops to be used for devices which connected coherently (i.e. to the ACP port on Cortex-A9 or A15). The arm_dma_ops are modified at boot when arch_is_coherent is true. Signed-off-by: Rob Herring Cc: Russell King Cc: Marek Szyprowski Signed-off-by: Marek Szyprowski --- arch/arm/include/asm/dma-mapping.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 5c44dcb0987..23004847bb0 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -13,6 +13,7 @@ #define DMA_ERROR_CODE (~0) extern struct dma_map_ops arm_dma_ops; +extern struct dma_map_ops arm_coherent_dma_ops; static inline struct dma_map_ops *get_dma_ops(struct device *dev) { -- cgit v1.2.3 From 48aa820f1e3824e46dde6251db98e5961abf605d Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Tue, 21 Aug 2012 12:26:24 +0200 Subject: ARM: kill off arch_is_coherent With ixp2xxx removed, there are no platforms that define arch_is_coherent, so the last occurrences of arch_is_coherent can be removed. Any new platform with coherent i/o should use coherent dma mapping functions. Signed-off-by: Rob Herring Cc: Russell King Cc: Marek Szyprowski Signed-off-by: Marek Szyprowski --- arch/arm/include/asm/barrier.h | 7 +++---- arch/arm/include/asm/memory.h | 8 -------- 2 files changed, 3 insertions(+), 12 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h index 05112380dc5..8dcd9c702d9 100644 --- a/arch/arm/include/asm/barrier.h +++ b/arch/arm/include/asm/barrier.h @@ -44,10 +44,9 @@ #define rmb() dsb() #define wmb() mb() #else -#include -#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) -#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) -#define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) +#define mb() barrier() +#define rmb() barrier() +#define wmb() barrier() #endif #ifndef CONFIG_SMP diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 5f6ddcc5645..73cf03aa981 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -275,14 +275,6 @@ static inline __deprecated void *bus_to_virt(unsigned long x) #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) #define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) -/* - * Optional coherency support. Currently used only by selected - * Intel XSC3-based systems. - */ -#ifndef arch_is_coherent -#define arch_is_coherent() 0 -#endif - #endif #include -- cgit v1.2.3 From a1ce39288e6fbefdd8d607021d02384eb4a20b99 Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 2 Oct 2012 18:01:25 +0100 Subject: UAPI: (Scripted) Convert #include "..." to #include in kernel system headers Convert #include "..." to #include in kernel system headers. Signed-off-by: David Howells Acked-by: Arnd Bergmann Acked-by: Thomas Gleixner Acked-by: Paul E. McKenney Acked-by: Dave Jones --- arch/arm/include/asm/page.h | 2 +- arch/arm/include/asm/pgtable.h | 2 +- arch/arm/include/asm/vfpmacros.h | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index ecf901902e4..812a4944e78 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h @@ -19,7 +19,7 @@ #ifndef CONFIG_MMU -#include "page-nommu.h" +#include #else diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 41dc31f834c..08c12312a1f 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -16,7 +16,7 @@ #ifndef CONFIG_MMU #include -#include "pgtable-nommu.h" +#include #else diff --git a/arch/arm/include/asm/vfpmacros.h b/arch/arm/include/asm/vfpmacros.h index 3d5fc41ae8d..a7aadbd9a6d 100644 --- a/arch/arm/include/asm/vfpmacros.h +++ b/arch/arm/include/asm/vfpmacros.h @@ -5,7 +5,7 @@ */ #include -#include "vfp.h" +#include @ Macros to allow building with old toolkits (with no VFP support) .macro VFPFMRX, rd, sysreg, cond -- cgit v1.2.3 From 8ef102c6b4bc996ff96ca52b34775fe931ec90c9 Mon Sep 17 00:00:00 2001 From: Wade Farnsworth Date: Tue, 2 Oct 2012 17:08:30 +0100 Subject: ARM: 7548/1: include linux/sched.h in syscall.h The syscall tracing patch introduces a compile bug in lttng-modules when the latter calls syscall_get_nr(), similar to the following: /arch/arm/include/asm/syscall.h:21:2: error: implicit declaration of function 'task_thread_info' [-Werror=implicit-function-declaration] The issue is that we are using task_thread_info() in the syscall_get_nr() function in asm/syscall.h, but not explicitly including sched.h from this file, so we can expect this bug might surface any time that syscall_get_nr() is called. Explicitly including sched.h solves the problem. Cc: [3.5, 3.6] Signed-off-by: Wade Farnsworth Acked-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/syscall.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h index c334a23ddf7..fce38a684e4 100644 --- a/arch/arm/include/asm/syscall.h +++ b/arch/arm/include/asm/syscall.h @@ -8,6 +8,7 @@ #define _ASM_ARM_SYSCALL_H #include +#include extern const unsigned long sys_call_table[]; -- cgit v1.2.3 From 2a552d5e63d7fa602c9a9a0717008737f55625a6 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sat, 6 Oct 2012 17:03:17 +0100 Subject: ARM: 7549/1: HYP: fix boot on some ARM1136 cores It appears that performing a "movs pc, lr" to force the kernel into SVC mode on the OMAP2420 (ARM1136) prevents the platform from booting correctly (change introduced in 80c59da [ARM: virt: allow the kernel to be entered in HYP mode]). While the reason it fails is not understood yet (the same code runs fine on the OMAP2430, ARM1136 as well), partially revert that change for platforms that do not enter in HYP mode, preserving the new feature and restoring a working kernel on the OMAP2420. Reported-by: Tony Lindgren Acked-by: Nicolas Pitre Tested-by: Tony Lindgren Signed-off-by: Marc Zyngier Signed-off-by: Russell King --- arch/arm/include/asm/assembler.h | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 683a1e6b602..2ef95813fce 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -254,16 +254,17 @@ mov lr , \reg and lr , lr , #MODE_MASK cmp lr , #HYP_MODE - orr \reg , \reg , #PSR_A_BIT | PSR_I_BIT | PSR_F_BIT + orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT bic \reg , \reg , #MODE_MASK orr \reg , \reg , #SVC_MODE THUMB( orr \reg , \reg , #PSR_T_BIT ) - msr spsr_cxsf, \reg - adr lr, BSYM(2f) bne 1f + orr \reg, \reg, #PSR_A_BIT + adr lr, BSYM(2f) + msr spsr_cxsf, \reg __MSR_ELR_HYP(14) __ERET -1: movs pc, lr +1: msr cpsr_c, \reg 2: .endm -- cgit v1.2.3 From 846a136881b8f73c1f74250bf6acfaa309cab1f2 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 9 Oct 2012 11:13:26 +0100 Subject: ARM: vfp: fix saving d16-d31 vfp registers on v6+ kernels Michael Olbrich reported that his test program fails when built with -O2 -mcpu=cortex-a8 -mfpu=neon, and a kernel which supports v6 and v7 CPUs: volatile int x = 2; volatile int64_t y = 2; int main() { volatile int a = 0; volatile int64_t b = 0; while (1) { a = (a + x) % (1 << 30); b = (b + y) % (1 << 30); assert(a == b); } } and two instances are run. When built for just v7 CPUs, this program works fine. It uses the "vadd.i64 d19, d18, d16" VFP instruction. It appears that we do not save the high-16 double VFP registers across context switches when the kernel is built for v6 CPUs. Fix that. Cc: Tested-By: Michael Olbrich Signed-off-by: Russell King --- arch/arm/include/asm/vfpmacros.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/vfpmacros.h b/arch/arm/include/asm/vfpmacros.h index a7aadbd9a6d..6a6f1e485f4 100644 --- a/arch/arm/include/asm/vfpmacros.h +++ b/arch/arm/include/asm/vfpmacros.h @@ -28,7 +28,7 @@ ldr \tmp, =elf_hwcap @ may not have MVFR regs ldr \tmp, [\tmp, #0] tst \tmp, #HWCAP_VFPv3D16 - ldceq p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31} + ldceql p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31} addne \base, \base, #32*4 @ step over unused register space #else VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0 @@ -52,7 +52,7 @@ ldr \tmp, =elf_hwcap @ may not have MVFR regs ldr \tmp, [\tmp, #0] tst \tmp, #HWCAP_VFPv3D16 - stceq p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31} + stceql p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31} addne \base, \base, #32*4 @ step over unused register space #else VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0 -- cgit v1.2.3 From f6d5d8a5ed60c726354605877e8f8214fb11cc02 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 30 Apr 2012 13:18:37 +0000 Subject: ARM: binfmt_flat: unused variable 'persistent' The flat_get_addr_from_rp() macro does not use the 'persistent' argument on ARM, causing a harmless compiler warning. A cast to void removes that warning. Without this patch, building at91x40_defconfig results in: fs/binfmt_flat.c: In function 'load_flat_file': fs/binfmt_flat.c:746:17: warning: unused variable 'persistent' [-Wunused-variable] Signed-off-by: Arnd Bergmann Acked-by: Greg Ungerer Cc: Russell King Cc: Bryan Wu --- arch/arm/include/asm/flat.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/flat.h b/arch/arm/include/asm/flat.h index 59426a4595c..e847d23351e 100644 --- a/arch/arm/include/asm/flat.h +++ b/arch/arm/include/asm/flat.h @@ -8,7 +8,7 @@ #define flat_argvp_envp_on_stack() 1 #define flat_old_ram_flag(flags) (flags) #define flat_reloc_valid(reloc, size) ((reloc) <= (size)) -#define flat_get_addr_from_rp(rp, relval, flags, persistent) get_unaligned(rp) +#define flat_get_addr_from_rp(rp, relval, flags, persistent) ((void)persistent,get_unaligned(rp)) #define flat_put_addr_at_rp(rp, val, relval) put_unaligned(val,rp) #define flat_get_relocate_addr(rel) (rel) #define flat_set_persistent(relval, p) 0 -- cgit v1.2.3 From 8e7fc18b5eacc37b6e6fcf486ec4eafbfef91738 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 30 Apr 2012 13:18:46 +0000 Subject: ARM: warnings in arch/arm/include/asm/uaccess.h On NOMMU ARM, the __addr_ok() and __range_ok() macros do not evaluate their arguments, which may lead to harmless build warnings in some code where the variables are not used otherwise. Adding a cast to void gets rid of the warning and does not make any semantic changes. Without this patch, building at91x40_defconfig results in: fs/read_write.c: In function 'rw_copy_check_uvector': fs/read_write.c:684:9: warning: unused variable 'buf' [-Wunused-variable] Signed-off-by: Arnd Bergmann Acked-by: Greg Ungerer Cc: Russell King --- arch/arm/include/asm/uaccess.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 77bd79f2ffd..7e1f76027f6 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -200,8 +200,8 @@ extern int __put_user_8(void *, unsigned long long); #define USER_DS KERNEL_DS #define segment_eq(a,b) (1) -#define __addr_ok(addr) (1) -#define __range_ok(addr,size) (0) +#define __addr_ok(addr) ((void)(addr),1) +#define __range_ok(addr,size) ((void)(addr),0) #define get_fs() (KERNEL_DS) static inline void set_fs(mm_segment_t fs) -- cgit v1.2.3 From cb8db5d4578ac9d996200ab59aa655344d305f5b Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 12 Oct 2012 13:05:52 +0100 Subject: UAPI: (Scripted) Disintegrate arch/arm/include/asm Signed-off-by: David Howells Acked-by: Arnd Bergmann Acked-by: Thomas Gleixner Acked-by: Michael Kerrisk Acked-by: Paul E. McKenney Acked-by: Dave Jones --- arch/arm/include/asm/Kbuild | 2 - arch/arm/include/asm/a.out.h | 34 --- arch/arm/include/asm/byteorder.h | 25 --- arch/arm/include/asm/fcntl.h | 11 - arch/arm/include/asm/hwcap.h | 27 +-- arch/arm/include/asm/ioctls.h | 8 - arch/arm/include/asm/kvm_para.h | 1 - arch/arm/include/asm/mman.h | 4 - arch/arm/include/asm/posix_types.h | 37 ---- arch/arm/include/asm/ptrace.h | 127 +---------- arch/arm/include/asm/setup.h | 172 +-------------- arch/arm/include/asm/sigcontext.h | 34 --- arch/arm/include/asm/signal.h | 127 +---------- arch/arm/include/asm/stat.h | 87 -------- arch/arm/include/asm/statfs.h | 12 - arch/arm/include/asm/swab.h | 37 +--- arch/arm/include/asm/unistd.h | 440 +------------------------------------ 17 files changed, 6 insertions(+), 1179 deletions(-) delete mode 100644 arch/arm/include/asm/a.out.h delete mode 100644 arch/arm/include/asm/byteorder.h delete mode 100644 arch/arm/include/asm/fcntl.h delete mode 100644 arch/arm/include/asm/ioctls.h delete mode 100644 arch/arm/include/asm/kvm_para.h delete mode 100644 arch/arm/include/asm/mman.h delete mode 100644 arch/arm/include/asm/posix_types.h delete mode 100644 arch/arm/include/asm/sigcontext.h delete mode 100644 arch/arm/include/asm/stat.h delete mode 100644 arch/arm/include/asm/statfs.h (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index 8a7196ca510..f70ae175a3d 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild @@ -1,6 +1,4 @@ -include include/asm-generic/Kbuild.asm -header-y += hwcap.h generic-y += auxvec.h generic-y += bitsperlong.h diff --git a/arch/arm/include/asm/a.out.h b/arch/arm/include/asm/a.out.h deleted file mode 100644 index 083894b2e3b..00000000000 --- a/arch/arm/include/asm/a.out.h +++ /dev/null @@ -1,34 +0,0 @@ -#ifndef __ARM_A_OUT_H__ -#define __ARM_A_OUT_H__ - -#include -#include - -struct exec -{ - __u32 a_info; /* Use macros N_MAGIC, etc for access */ - __u32 a_text; /* length of text, in bytes */ - __u32 a_data; /* length of data, in bytes */ - __u32 a_bss; /* length of uninitialized data area for file, in bytes */ - __u32 a_syms; /* length of symbol table data in file, in bytes */ - __u32 a_entry; /* start address */ - __u32 a_trsize; /* length of relocation info for text, in bytes */ - __u32 a_drsize; /* length of relocation info for data, in bytes */ -}; - -/* - * This is always the same - */ -#define N_TXTADDR(a) (0x00008000) - -#define N_TRSIZE(a) ((a).a_trsize) -#define N_DRSIZE(a) ((a).a_drsize) -#define N_SYMSIZE(a) ((a).a_syms) - -#define M_ARM 103 - -#ifndef LIBRARY_START_TEXT -#define LIBRARY_START_TEXT (0x00c00000) -#endif - -#endif /* __A_OUT_GNU_H__ */ diff --git a/arch/arm/include/asm/byteorder.h b/arch/arm/include/asm/byteorder.h deleted file mode 100644 index 77379748b17..00000000000 --- a/arch/arm/include/asm/byteorder.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * arch/arm/include/asm/byteorder.h - * - * ARM Endian-ness. In little endian mode, the data bus is connected such - * that byte accesses appear as: - * 0 = d0...d7, 1 = d8...d15, 2 = d16...d23, 3 = d24...d31 - * and word accesses (data or instruction) appear as: - * d0...d31 - * - * When in big endian mode, byte accesses appear as: - * 0 = d24...d31, 1 = d16...d23, 2 = d8...d15, 3 = d0...d7 - * and word accesses (data or instruction) appear as: - * d0...d31 - */ -#ifndef __ASM_ARM_BYTEORDER_H -#define __ASM_ARM_BYTEORDER_H - -#ifdef __ARMEB__ -#include -#else -#include -#endif - -#endif - diff --git a/arch/arm/include/asm/fcntl.h b/arch/arm/include/asm/fcntl.h deleted file mode 100644 index a80b6607b2e..00000000000 --- a/arch/arm/include/asm/fcntl.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef _ARM_FCNTL_H -#define _ARM_FCNTL_H - -#define O_DIRECTORY 040000 /* must be a directory */ -#define O_NOFOLLOW 0100000 /* don't follow links */ -#define O_DIRECT 0200000 /* direct disk access hint - currently ignored */ -#define O_LARGEFILE 0400000 - -#include - -#endif diff --git a/arch/arm/include/asm/hwcap.h b/arch/arm/include/asm/hwcap.h index 917626128a1..6ff56eca3f1 100644 --- a/arch/arm/include/asm/hwcap.h +++ b/arch/arm/include/asm/hwcap.h @@ -1,31 +1,8 @@ #ifndef __ASMARM_HWCAP_H #define __ASMARM_HWCAP_H -/* - * HWCAP flags - for elf_hwcap (in kernel) and AT_HWCAP - */ -#define HWCAP_SWP (1 << 0) -#define HWCAP_HALF (1 << 1) -#define HWCAP_THUMB (1 << 2) -#define HWCAP_26BIT (1 << 3) /* Play it safe */ -#define HWCAP_FAST_MULT (1 << 4) -#define HWCAP_FPA (1 << 5) -#define HWCAP_VFP (1 << 6) -#define HWCAP_EDSP (1 << 7) -#define HWCAP_JAVA (1 << 8) -#define HWCAP_IWMMXT (1 << 9) -#define HWCAP_CRUNCH (1 << 10) -#define HWCAP_THUMBEE (1 << 11) -#define HWCAP_NEON (1 << 12) -#define HWCAP_VFPv3 (1 << 13) -#define HWCAP_VFPv3D16 (1 << 14) -#define HWCAP_TLS (1 << 15) -#define HWCAP_VFPv4 (1 << 16) -#define HWCAP_IDIVA (1 << 17) -#define HWCAP_IDIVT (1 << 18) -#define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT) +#include -#if defined(__KERNEL__) #if !defined(__ASSEMBLY__) /* * This yields a mask that user programs can use to figure out what @@ -35,5 +12,3 @@ extern unsigned int elf_hwcap; #endif #endif - -#endif diff --git a/arch/arm/include/asm/ioctls.h b/arch/arm/include/asm/ioctls.h deleted file mode 100644 index 9c962981612..00000000000 --- a/arch/arm/include/asm/ioctls.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef __ASM_ARM_IOCTLS_H -#define __ASM_ARM_IOCTLS_H - -#define FIOQSIZE 0x545E - -#include - -#endif diff --git a/arch/arm/include/asm/kvm_para.h b/arch/arm/include/asm/kvm_para.h deleted file mode 100644 index 14fab8f0b95..00000000000 --- a/arch/arm/include/asm/kvm_para.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/arm/include/asm/mman.h b/arch/arm/include/asm/mman.h deleted file mode 100644 index 41f99c573b9..00000000000 --- a/arch/arm/include/asm/mman.h +++ /dev/null @@ -1,4 +0,0 @@ -#include - -#define arch_mmap_check(addr, len, flags) \ - (((flags) & MAP_FIXED && (addr) < FIRST_USER_ADDRESS) ? -EINVAL : 0) diff --git a/arch/arm/include/asm/posix_types.h b/arch/arm/include/asm/posix_types.h deleted file mode 100644 index d2de9cbbcd9..00000000000 --- a/arch/arm/include/asm/posix_types.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * arch/arm/include/asm/posix_types.h - * - * Copyright (C) 1996-1998 Russell King. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Changelog: - * 27-06-1996 RMK Created - */ -#ifndef __ARCH_ARM_POSIX_TYPES_H -#define __ARCH_ARM_POSIX_TYPES_H - -/* - * This file is generally used by user-level software, so you need to - * be a little careful about namespace pollution etc. Also, we cannot - * assume GCC is being used. - */ - -typedef unsigned short __kernel_mode_t; -#define __kernel_mode_t __kernel_mode_t - -typedef unsigned short __kernel_ipc_pid_t; -#define __kernel_ipc_pid_t __kernel_ipc_pid_t - -typedef unsigned short __kernel_uid_t; -typedef unsigned short __kernel_gid_t; -#define __kernel_uid_t __kernel_uid_t - -typedef unsigned short __kernel_old_dev_t; -#define __kernel_old_dev_t __kernel_old_dev_t - -#include - -#endif diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index 142d6ae4123..3d52ee1bfb3 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h @@ -10,133 +10,12 @@ #ifndef __ASM_ARM_PTRACE_H #define __ASM_ARM_PTRACE_H -#include - -#define PTRACE_GETREGS 12 -#define PTRACE_SETREGS 13 -#define PTRACE_GETFPREGS 14 -#define PTRACE_SETFPREGS 15 -/* PTRACE_ATTACH is 16 */ -/* PTRACE_DETACH is 17 */ -#define PTRACE_GETWMMXREGS 18 -#define PTRACE_SETWMMXREGS 19 -/* 20 is unused */ -#define PTRACE_OLDSETOPTIONS 21 -#define PTRACE_GET_THREAD_AREA 22 -#define PTRACE_SET_SYSCALL 23 -/* PTRACE_SYSCALL is 24 */ -#define PTRACE_GETCRUNCHREGS 25 -#define PTRACE_SETCRUNCHREGS 26 -#define PTRACE_GETVFPREGS 27 -#define PTRACE_SETVFPREGS 28 -#define PTRACE_GETHBPREGS 29 -#define PTRACE_SETHBPREGS 30 - -/* - * PSR bits - */ -#define USR26_MODE 0x00000000 -#define FIQ26_MODE 0x00000001 -#define IRQ26_MODE 0x00000002 -#define SVC26_MODE 0x00000003 -#define USR_MODE 0x00000010 -#define FIQ_MODE 0x00000011 -#define IRQ_MODE 0x00000012 -#define SVC_MODE 0x00000013 -#define ABT_MODE 0x00000017 -#define HYP_MODE 0x0000001a -#define UND_MODE 0x0000001b -#define SYSTEM_MODE 0x0000001f -#define MODE32_BIT 0x00000010 -#define MODE_MASK 0x0000001f -#define PSR_T_BIT 0x00000020 -#define PSR_F_BIT 0x00000040 -#define PSR_I_BIT 0x00000080 -#define PSR_A_BIT 0x00000100 -#define PSR_E_BIT 0x00000200 -#define PSR_J_BIT 0x01000000 -#define PSR_Q_BIT 0x08000000 -#define PSR_V_BIT 0x10000000 -#define PSR_C_BIT 0x20000000 -#define PSR_Z_BIT 0x40000000 -#define PSR_N_BIT 0x80000000 - -/* - * Groups of PSR bits - */ -#define PSR_f 0xff000000 /* Flags */ -#define PSR_s 0x00ff0000 /* Status */ -#define PSR_x 0x0000ff00 /* Extension */ -#define PSR_c 0x000000ff /* Control */ - -/* - * ARMv7 groups of PSR bits - */ -#define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */ -#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */ -#define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */ -#define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */ - -/* - * Default endianness state - */ -#ifdef CONFIG_CPU_ENDIAN_BE8 -#define PSR_ENDSTATE PSR_E_BIT -#else -#define PSR_ENDSTATE 0 -#endif - -/* - * These are 'magic' values for PTRACE_PEEKUSR that return info about where a - * process is located in memory. - */ -#define PT_TEXT_ADDR 0x10000 -#define PT_DATA_ADDR 0x10004 -#define PT_TEXT_END_ADDR 0x10008 +#include #ifndef __ASSEMBLY__ - -/* - * This struct defines the way the registers are stored on the - * stack during a system call. Note that sizeof(struct pt_regs) - * has to be a multiple of 8. - */ -#ifndef __KERNEL__ -struct pt_regs { - long uregs[18]; -}; -#else /* __KERNEL__ */ struct pt_regs { unsigned long uregs[18]; }; -#endif /* __KERNEL__ */ - -#define ARM_cpsr uregs[16] -#define ARM_pc uregs[15] -#define ARM_lr uregs[14] -#define ARM_sp uregs[13] -#define ARM_ip uregs[12] -#define ARM_fp uregs[11] -#define ARM_r10 uregs[10] -#define ARM_r9 uregs[9] -#define ARM_r8 uregs[8] -#define ARM_r7 uregs[7] -#define ARM_r6 uregs[6] -#define ARM_r5 uregs[5] -#define ARM_r4 uregs[4] -#define ARM_r3 uregs[3] -#define ARM_r2 uregs[2] -#define ARM_r1 uregs[1] -#define ARM_r0 uregs[0] -#define ARM_ORIG_r0 uregs[17] - -/* - * The size of the user-visible VFP state as seen by PTRACE_GET/SETVFPREGS - * and core dumps. - */ -#define ARM_VFPREGS_SIZE ( 32 * 8 /*fpregs*/ + 4 /*fpscr*/ ) - -#ifdef __KERNEL__ #define user_mode(regs) \ (((regs)->ARM_cpsr & 0xf) == 0) @@ -260,9 +139,5 @@ static inline unsigned long user_stack_pointer(struct pt_regs *regs) (struct pt_regs *)((sp | (THREAD_SIZE - 1)) - 7) - 1; \ }) -#endif /* __KERNEL__ */ - #endif /* __ASSEMBLY__ */ - #endif - diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h index 24d284a1bfc..c50f0560950 100644 --- a/arch/arm/include/asm/setup.h +++ b/arch/arm/include/asm/setup.h @@ -14,176 +14,8 @@ #ifndef __ASMARM_SETUP_H #define __ASMARM_SETUP_H -#include +#include -#define COMMAND_LINE_SIZE 1024 - -/* The list ends with an ATAG_NONE node. */ -#define ATAG_NONE 0x00000000 - -struct tag_header { - __u32 size; - __u32 tag; -}; - -/* The list must start with an ATAG_CORE node */ -#define ATAG_CORE 0x54410001 - -struct tag_core { - __u32 flags; /* bit 0 = read-only */ - __u32 pagesize; - __u32 rootdev; -}; - -/* it is allowed to have multiple ATAG_MEM nodes */ -#define ATAG_MEM 0x54410002 - -struct tag_mem32 { - __u32 size; - __u32 start; /* physical start address */ -}; - -/* VGA text type displays */ -#define ATAG_VIDEOTEXT 0x54410003 - -struct tag_videotext { - __u8 x; - __u8 y; - __u16 video_page; - __u8 video_mode; - __u8 video_cols; - __u16 video_ega_bx; - __u8 video_lines; - __u8 video_isvga; - __u16 video_points; -}; - -/* describes how the ramdisk will be used in kernel */ -#define ATAG_RAMDISK 0x54410004 - -struct tag_ramdisk { - __u32 flags; /* bit 0 = load, bit 1 = prompt */ - __u32 size; /* decompressed ramdisk size in _kilo_ bytes */ - __u32 start; /* starting block of floppy-based RAM disk image */ -}; - -/* describes where the compressed ramdisk image lives (virtual address) */ -/* - * this one accidentally used virtual addresses - as such, - * it's deprecated. - */ -#define ATAG_INITRD 0x54410005 - -/* describes where the compressed ramdisk image lives (physical address) */ -#define ATAG_INITRD2 0x54420005 - -struct tag_initrd { - __u32 start; /* physical start address */ - __u32 size; /* size of compressed ramdisk image in bytes */ -}; - -/* board serial number. "64 bits should be enough for everybody" */ -#define ATAG_SERIAL 0x54410006 - -struct tag_serialnr { - __u32 low; - __u32 high; -}; - -/* board revision */ -#define ATAG_REVISION 0x54410007 - -struct tag_revision { - __u32 rev; -}; - -/* initial values for vesafb-type framebuffers. see struct screen_info - * in include/linux/tty.h - */ -#define ATAG_VIDEOLFB 0x54410008 - -struct tag_videolfb { - __u16 lfb_width; - __u16 lfb_height; - __u16 lfb_depth; - __u16 lfb_linelength; - __u32 lfb_base; - __u32 lfb_size; - __u8 red_size; - __u8 red_pos; - __u8 green_size; - __u8 green_pos; - __u8 blue_size; - __u8 blue_pos; - __u8 rsvd_size; - __u8 rsvd_pos; -}; - -/* command line: \0 terminated string */ -#define ATAG_CMDLINE 0x54410009 - -struct tag_cmdline { - char cmdline[1]; /* this is the minimum size */ -}; - -/* acorn RiscPC specific information */ -#define ATAG_ACORN 0x41000101 - -struct tag_acorn { - __u32 memc_control_reg; - __u32 vram_pages; - __u8 sounddefault; - __u8 adfsdrives; -}; - -/* footbridge memory clock, see arch/arm/mach-footbridge/arch.c */ -#define ATAG_MEMCLK 0x41000402 - -struct tag_memclk { - __u32 fmemclk; -}; - -struct tag { - struct tag_header hdr; - union { - struct tag_core core; - struct tag_mem32 mem; - struct tag_videotext videotext; - struct tag_ramdisk ramdisk; - struct tag_initrd initrd; - struct tag_serialnr serialnr; - struct tag_revision revision; - struct tag_videolfb videolfb; - struct tag_cmdline cmdline; - - /* - * Acorn specific - */ - struct tag_acorn acorn; - - /* - * DC21285 specific - */ - struct tag_memclk memclk; - } u; -}; - -struct tagtable { - __u32 tag; - int (*parse)(const struct tag *); -}; - -#define tag_member_present(tag,member) \ - ((unsigned long)(&((struct tag *)0L)->member + 1) \ - <= (tag)->hdr.size * 4) - -#define tag_next(t) ((struct tag *)((__u32 *)(t) + (t)->hdr.size)) -#define tag_size(type) ((sizeof(struct tag_header) + sizeof(struct type)) >> 2) - -#define for_each_tag(t,base) \ - for (t = base; t->hdr.size; t = tag_next(t)) - -#ifdef __KERNEL__ #define __tag __used __attribute__((__section__(".taglist.init"))) #define __tagtable(tag, fn) \ @@ -221,6 +53,4 @@ extern int arm_add_memory(phys_addr_t start, phys_addr_t size); extern void early_print(const char *str, ...); extern void dump_machine_table(void); -#endif /* __KERNEL__ */ - #endif diff --git a/arch/arm/include/asm/sigcontext.h b/arch/arm/include/asm/sigcontext.h deleted file mode 100644 index fc0b80b6a6f..00000000000 --- a/arch/arm/include/asm/sigcontext.h +++ /dev/null @@ -1,34 +0,0 @@ -#ifndef _ASMARM_SIGCONTEXT_H -#define _ASMARM_SIGCONTEXT_H - -/* - * Signal context structure - contains all info to do with the state - * before the signal handler was invoked. Note: only add new entries - * to the end of the structure. - */ -struct sigcontext { - unsigned long trap_no; - unsigned long error_code; - unsigned long oldmask; - unsigned long arm_r0; - unsigned long arm_r1; - unsigned long arm_r2; - unsigned long arm_r3; - unsigned long arm_r4; - unsigned long arm_r5; - unsigned long arm_r6; - unsigned long arm_r7; - unsigned long arm_r8; - unsigned long arm_r9; - unsigned long arm_r10; - unsigned long arm_fp; - unsigned long arm_ip; - unsigned long arm_sp; - unsigned long arm_lr; - unsigned long arm_pc; - unsigned long arm_cpsr; - unsigned long fault_address; -}; - - -#endif diff --git a/arch/arm/include/asm/signal.h b/arch/arm/include/asm/signal.h index 43ba0fb1c8a..5a7963dbd3f 100644 --- a/arch/arm/include/asm/signal.h +++ b/arch/arm/include/asm/signal.h @@ -1,12 +1,8 @@ #ifndef _ASMARM_SIGNAL_H #define _ASMARM_SIGNAL_H -#include +#include -/* Avoid too many header ordering problems. */ -struct siginfo; - -#ifdef __KERNEL__ /* Most things should be clean enough to redefine this at will, if care is taken to make libc match. */ @@ -20,100 +16,6 @@ typedef struct { unsigned long sig[_NSIG_WORDS]; } sigset_t; -#else -/* Here we must cater to libcs that poke about in kernel headers. */ - -#define NSIG 32 -typedef unsigned long sigset_t; - -#endif /* __KERNEL__ */ - -#define SIGHUP 1 -#define SIGINT 2 -#define SIGQUIT 3 -#define SIGILL 4 -#define SIGTRAP 5 -#define SIGABRT 6 -#define SIGIOT 6 -#define SIGBUS 7 -#define SIGFPE 8 -#define SIGKILL 9 -#define SIGUSR1 10 -#define SIGSEGV 11 -#define SIGUSR2 12 -#define SIGPIPE 13 -#define SIGALRM 14 -#define SIGTERM 15 -#define SIGSTKFLT 16 -#define SIGCHLD 17 -#define SIGCONT 18 -#define SIGSTOP 19 -#define SIGTSTP 20 -#define SIGTTIN 21 -#define SIGTTOU 22 -#define SIGURG 23 -#define SIGXCPU 24 -#define SIGXFSZ 25 -#define SIGVTALRM 26 -#define SIGPROF 27 -#define SIGWINCH 28 -#define SIGIO 29 -#define SIGPOLL SIGIO -/* -#define SIGLOST 29 -*/ -#define SIGPWR 30 -#define SIGSYS 31 -#define SIGUNUSED 31 - -/* These should not be considered constants from userland. */ -#define SIGRTMIN 32 -#define SIGRTMAX _NSIG - -#define SIGSWI 32 - -/* - * SA_FLAGS values: - * - * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. - * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. - * SA_SIGINFO deliver the signal with SIGINFO structs - * SA_THIRTYTWO delivers the signal in 32-bit mode, even if the task - * is running in 26-bit. - * SA_ONSTACK allows alternate signal stacks (see sigaltstack(2)). - * SA_RESTART flag to get restarting signals (which were the default long ago) - * SA_NODEFER prevents the current signal from being masked in the handler. - * SA_RESETHAND clears the handler when the signal is delivered. - * - * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single - * Unix names RESETHAND and NODEFER respectively. - */ -#define SA_NOCLDSTOP 0x00000001 -#define SA_NOCLDWAIT 0x00000002 -#define SA_SIGINFO 0x00000004 -#define SA_THIRTYTWO 0x02000000 -#define SA_RESTORER 0x04000000 -#define SA_ONSTACK 0x08000000 -#define SA_RESTART 0x10000000 -#define SA_NODEFER 0x40000000 -#define SA_RESETHAND 0x80000000 - -#define SA_NOMASK SA_NODEFER -#define SA_ONESHOT SA_RESETHAND - - -/* - * sigaltstack controls - */ -#define SS_ONSTACK 1 -#define SS_DISABLE 2 - -#define MINSIGSTKSZ 2048 -#define SIGSTKSZ 8192 - -#include - -#ifdef __KERNEL__ struct old_sigaction { __sighandler_t sa_handler; old_sigset_t sa_mask; @@ -132,33 +34,6 @@ struct k_sigaction { struct sigaction sa; }; -#else -/* Here we must cater to libcs that poke about in kernel headers. */ - -struct sigaction { - union { - __sighandler_t _sa_handler; - void (*_sa_sigaction)(int, struct siginfo *, void *); - } _u; - sigset_t sa_mask; - unsigned long sa_flags; - void (*sa_restorer)(void); -}; - -#define sa_handler _u._sa_handler -#define sa_sigaction _u._sa_sigaction - -#endif /* __KERNEL__ */ - -typedef struct sigaltstack { - void __user *ss_sp; - int ss_flags; - size_t ss_size; -} stack_t; - -#ifdef __KERNEL__ #include #define ptrace_signal_deliver(regs, cookie) do { } while (0) #endif - -#endif diff --git a/arch/arm/include/asm/stat.h b/arch/arm/include/asm/stat.h deleted file mode 100644 index 42c0c13999d..00000000000 --- a/arch/arm/include/asm/stat.h +++ /dev/null @@ -1,87 +0,0 @@ -#ifndef _ASMARM_STAT_H -#define _ASMARM_STAT_H - -struct __old_kernel_stat { - unsigned short st_dev; - unsigned short st_ino; - unsigned short st_mode; - unsigned short st_nlink; - unsigned short st_uid; - unsigned short st_gid; - unsigned short st_rdev; - unsigned long st_size; - unsigned long st_atime; - unsigned long st_mtime; - unsigned long st_ctime; -}; - -#define STAT_HAVE_NSEC - -struct stat { -#if defined(__ARMEB__) - unsigned short st_dev; - unsigned short __pad1; -#else - unsigned long st_dev; -#endif - unsigned long st_ino; - unsigned short st_mode; - unsigned short st_nlink; - unsigned short st_uid; - unsigned short st_gid; -#if defined(__ARMEB__) - unsigned short st_rdev; - unsigned short __pad2; -#else - unsigned long st_rdev; -#endif - unsigned long st_size; - unsigned long st_blksize; - unsigned long st_blocks; - unsigned long st_atime; - unsigned long st_atime_nsec; - unsigned long st_mtime; - unsigned long st_mtime_nsec; - unsigned long st_ctime; - unsigned long st_ctime_nsec; - unsigned long __unused4; - unsigned long __unused5; -}; - -/* This matches struct stat64 in glibc2.1, hence the absolutely - * insane amounts of padding around dev_t's. - * Note: The kernel zero's the padded region because glibc might read them - * in the hope that the kernel has stretched to using larger sizes. - */ -struct stat64 { - unsigned long long st_dev; - unsigned char __pad0[4]; - -#define STAT64_HAS_BROKEN_ST_INO 1 - unsigned long __st_ino; - unsigned int st_mode; - unsigned int st_nlink; - - unsigned long st_uid; - unsigned long st_gid; - - unsigned long long st_rdev; - unsigned char __pad3[4]; - - long long st_size; - unsigned long st_blksize; - unsigned long long st_blocks; /* Number 512-byte blocks allocated. */ - - unsigned long st_atime; - unsigned long st_atime_nsec; - - unsigned long st_mtime; - unsigned long st_mtime_nsec; - - unsigned long st_ctime; - unsigned long st_ctime_nsec; - - unsigned long long st_ino; -}; - -#endif diff --git a/arch/arm/include/asm/statfs.h b/arch/arm/include/asm/statfs.h deleted file mode 100644 index 079447c05ba..00000000000 --- a/arch/arm/include/asm/statfs.h +++ /dev/null @@ -1,12 +0,0 @@ -#ifndef _ASMARM_STATFS_H -#define _ASMARM_STATFS_H - -/* - * With EABI there is 4 bytes of padding added to this structure. - * Let's pack it so the padding goes away to simplify dual ABI support. - * Note that user space does NOT have to pack this structure. - */ -#define ARCH_PACK_STATFS64 __attribute__((packed,aligned(4))) - -#include -#endif diff --git a/arch/arm/include/asm/swab.h b/arch/arm/include/asm/swab.h index b859d82e30c..537fc9b9188 100644 --- a/arch/arm/include/asm/swab.h +++ b/arch/arm/include/asm/swab.h @@ -15,14 +15,8 @@ #ifndef __ASM_ARM_SWAB_H #define __ASM_ARM_SWAB_H -#include -#include +#include -#if !defined(__STRICT_ANSI__) || defined(__KERNEL__) -# define __SWAB_64_THRU_32__ -#endif - -#if defined(__KERNEL__) #if __LINUX_ARM_ARCH__ >= 6 static inline __attribute_const__ __u32 __arch_swahb32(__u32 x) @@ -42,32 +36,3 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x) #endif #endif - -#if !defined(__KERNEL__) || __LINUX_ARM_ARCH__ < 6 -static inline __attribute_const__ __u32 __arch_swab32(__u32 x) -{ - __u32 t; - -#ifndef __thumb__ - if (!__builtin_constant_p(x)) { - /* - * The compiler needs a bit of a hint here to always do the - * right thing and not screw it up to different degrees - * depending on the gcc version. - */ - asm ("eor\t%0, %1, %1, ror #16" : "=r" (t) : "r" (x)); - } else -#endif - t = x ^ ((x << 16) | (x >> 16)); /* eor r1,r0,r0,ror #16 */ - - x = (x << 24) | (x >> 8); /* mov r0,r0,ror #8 */ - t &= ~0x00FF0000; /* bic r1,r1,#0x00FF0000 */ - x ^= (t >> 8); /* eor r0,r0,r1,lsr #8 */ - - return x; -} -#define __arch_swab32 __arch_swab32 - -#endif - -#endif diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index f259921edfe..202bc3a2ddc 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h @@ -13,447 +13,10 @@ #ifndef __ASM_ARM_UNISTD_H #define __ASM_ARM_UNISTD_H -#define __NR_OABI_SYSCALL_BASE 0x900000 +#include -#if defined(__thumb__) || defined(__ARM_EABI__) -#define __NR_SYSCALL_BASE 0 -#else -#define __NR_SYSCALL_BASE __NR_OABI_SYSCALL_BASE -#endif - -/* - * This file contains the system call numbers. - */ - -#define __NR_restart_syscall (__NR_SYSCALL_BASE+ 0) -#define __NR_exit (__NR_SYSCALL_BASE+ 1) -#define __NR_fork (__NR_SYSCALL_BASE+ 2) -#define __NR_read (__NR_SYSCALL_BASE+ 3) -#define __NR_write (__NR_SYSCALL_BASE+ 4) -#define __NR_open (__NR_SYSCALL_BASE+ 5) -#define __NR_close (__NR_SYSCALL_BASE+ 6) - /* 7 was sys_waitpid */ -#define __NR_creat (__NR_SYSCALL_BASE+ 8) -#define __NR_link (__NR_SYSCALL_BASE+ 9) -#define __NR_unlink (__NR_SYSCALL_BASE+ 10) -#define __NR_execve (__NR_SYSCALL_BASE+ 11) -#define __NR_chdir (__NR_SYSCALL_BASE+ 12) -#define __NR_time (__NR_SYSCALL_BASE+ 13) -#define __NR_mknod (__NR_SYSCALL_BASE+ 14) -#define __NR_chmod (__NR_SYSCALL_BASE+ 15) -#define __NR_lchown (__NR_SYSCALL_BASE+ 16) - /* 17 was sys_break */ - /* 18 was sys_stat */ -#define __NR_lseek (__NR_SYSCALL_BASE+ 19) -#define __NR_getpid (__NR_SYSCALL_BASE+ 20) -#define __NR_mount (__NR_SYSCALL_BASE+ 21) -#define __NR_umount (__NR_SYSCALL_BASE+ 22) -#define __NR_setuid (__NR_SYSCALL_BASE+ 23) -#define __NR_getuid (__NR_SYSCALL_BASE+ 24) -#define __NR_stime (__NR_SYSCALL_BASE+ 25) -#define __NR_ptrace (__NR_SYSCALL_BASE+ 26) -#define __NR_alarm (__NR_SYSCALL_BASE+ 27) - /* 28 was sys_fstat */ -#define __NR_pause (__NR_SYSCALL_BASE+ 29) -#define __NR_utime (__NR_SYSCALL_BASE+ 30) - /* 31 was sys_stty */ - /* 32 was sys_gtty */ -#define __NR_access (__NR_SYSCALL_BASE+ 33) -#define __NR_nice (__NR_SYSCALL_BASE+ 34) - /* 35 was sys_ftime */ -#define __NR_sync (__NR_SYSCALL_BASE+ 36) -#define __NR_kill (__NR_SYSCALL_BASE+ 37) -#define __NR_rename (__NR_SYSCALL_BASE+ 38) -#define __NR_mkdir (__NR_SYSCALL_BASE+ 39) -#define __NR_rmdir (__NR_SYSCALL_BASE+ 40) -#define __NR_dup (__NR_SYSCALL_BASE+ 41) -#define __NR_pipe (__NR_SYSCALL_BASE+ 42) -#define __NR_times (__NR_SYSCALL_BASE+ 43) - /* 44 was sys_prof */ -#define __NR_brk (__NR_SYSCALL_BASE+ 45) -#define __NR_setgid (__NR_SYSCALL_BASE+ 46) -#define __NR_getgid (__NR_SYSCALL_BASE+ 47) - /* 48 was sys_signal */ -#define __NR_geteuid (__NR_SYSCALL_BASE+ 49) -#define __NR_getegid (__NR_SYSCALL_BASE+ 50) -#define __NR_acct (__NR_SYSCALL_BASE+ 51) -#define __NR_umount2 (__NR_SYSCALL_BASE+ 52) - /* 53 was sys_lock */ -#define __NR_ioctl (__NR_SYSCALL_BASE+ 54) -#define __NR_fcntl (__NR_SYSCALL_BASE+ 55) - /* 56 was sys_mpx */ -#define __NR_setpgid (__NR_SYSCALL_BASE+ 57) - /* 58 was sys_ulimit */ - /* 59 was sys_olduname */ -#define __NR_umask (__NR_SYSCALL_BASE+ 60) -#define __NR_chroot (__NR_SYSCALL_BASE+ 61) -#define __NR_ustat (__NR_SYSCALL_BASE+ 62) -#define __NR_dup2 (__NR_SYSCALL_BASE+ 63) -#define __NR_getppid (__NR_SYSCALL_BASE+ 64) -#define __NR_getpgrp (__NR_SYSCALL_BASE+ 65) -#define __NR_setsid (__NR_SYSCALL_BASE+ 66) -#define __NR_sigaction (__NR_SYSCALL_BASE+ 67) - /* 68 was sys_sgetmask */ - /* 69 was sys_ssetmask */ -#define __NR_setreuid (__NR_SYSCALL_BASE+ 70) -#define __NR_setregid (__NR_SYSCALL_BASE+ 71) -#define __NR_sigsuspend (__NR_SYSCALL_BASE+ 72) -#define __NR_sigpending (__NR_SYSCALL_BASE+ 73) -#define __NR_sethostname (__NR_SYSCALL_BASE+ 74) -#define __NR_setrlimit (__NR_SYSCALL_BASE+ 75) -#define __NR_getrlimit (__NR_SYSCALL_BASE+ 76) /* Back compat 2GB limited rlimit */ -#define __NR_getrusage (__NR_SYSCALL_BASE+ 77) -#define __NR_gettimeofday (__NR_SYSCALL_BASE+ 78) -#define __NR_settimeofday (__NR_SYSCALL_BASE+ 79) -#define __NR_getgroups (__NR_SYSCALL_BASE+ 80) -#define __NR_setgroups (__NR_SYSCALL_BASE+ 81) -#define __NR_select (__NR_SYSCALL_BASE+ 82) -#define __NR_symlink (__NR_SYSCALL_BASE+ 83) - /* 84 was sys_lstat */ -#define __NR_readlink (__NR_SYSCALL_BASE+ 85) -#define __NR_uselib (__NR_SYSCALL_BASE+ 86) -#define __NR_swapon (__NR_SYSCALL_BASE+ 87) -#define __NR_reboot (__NR_SYSCALL_BASE+ 88) -#define __NR_readdir (__NR_SYSCALL_BASE+ 89) -#define __NR_mmap (__NR_SYSCALL_BASE+ 90) -#define __NR_munmap (__NR_SYSCALL_BASE+ 91) -#define __NR_truncate (__NR_SYSCALL_BASE+ 92) -#define __NR_ftruncate (__NR_SYSCALL_BASE+ 93) -#define __NR_fchmod (__NR_SYSCALL_BASE+ 94) -#define __NR_fchown (__NR_SYSCALL_BASE+ 95) -#define __NR_getpriority (__NR_SYSCALL_BASE+ 96) -#define __NR_setpriority (__NR_SYSCALL_BASE+ 97) - /* 98 was sys_profil */ -#define __NR_statfs (__NR_SYSCALL_BASE+ 99) -#define __NR_fstatfs (__NR_SYSCALL_BASE+100) - /* 101 was sys_ioperm */ -#define __NR_socketcall (__NR_SYSCALL_BASE+102) -#define __NR_syslog (__NR_SYSCALL_BASE+103) -#define __NR_setitimer (__NR_SYSCALL_BASE+104) -#define __NR_getitimer (__NR_SYSCALL_BASE+105) -#define __NR_stat (__NR_SYSCALL_BASE+106) -#define __NR_lstat (__NR_SYSCALL_BASE+107) -#define __NR_fstat (__NR_SYSCALL_BASE+108) - /* 109 was sys_uname */ - /* 110 was sys_iopl */ -#define __NR_vhangup (__NR_SYSCALL_BASE+111) - /* 112 was sys_idle */ -#define __NR_syscall (__NR_SYSCALL_BASE+113) /* syscall to call a syscall! */ -#define __NR_wait4 (__NR_SYSCALL_BASE+114) -#define __NR_swapoff (__NR_SYSCALL_BASE+115) -#define __NR_sysinfo (__NR_SYSCALL_BASE+116) -#define __NR_ipc (__NR_SYSCALL_BASE+117) -#define __NR_fsync (__NR_SYSCALL_BASE+118) -#define __NR_sigreturn (__NR_SYSCALL_BASE+119) -#define __NR_clone (__NR_SYSCALL_BASE+120) -#define __NR_setdomainname (__NR_SYSCALL_BASE+121) -#define __NR_uname (__NR_SYSCALL_BASE+122) - /* 123 was sys_modify_ldt */ -#define __NR_adjtimex (__NR_SYSCALL_BASE+124) -#define __NR_mprotect (__NR_SYSCALL_BASE+125) -#define __NR_sigprocmask (__NR_SYSCALL_BASE+126) - /* 127 was sys_create_module */ -#define __NR_init_module (__NR_SYSCALL_BASE+128) -#define __NR_delete_module (__NR_SYSCALL_BASE+129) - /* 130 was sys_get_kernel_syms */ -#define __NR_quotactl (__NR_SYSCALL_BASE+131) -#define __NR_getpgid (__NR_SYSCALL_BASE+132) -#define __NR_fchdir (__NR_SYSCALL_BASE+133) -#define __NR_bdflush (__NR_SYSCALL_BASE+134) -#define __NR_sysfs (__NR_SYSCALL_BASE+135) -#define __NR_personality (__NR_SYSCALL_BASE+136) - /* 137 was sys_afs_syscall */ -#define __NR_setfsuid (__NR_SYSCALL_BASE+138) -#define __NR_setfsgid (__NR_SYSCALL_BASE+139) -#define __NR__llseek (__NR_SYSCALL_BASE+140) -#define __NR_getdents (__NR_SYSCALL_BASE+141) -#define __NR__newselect (__NR_SYSCALL_BASE+142) -#define __NR_flock (__NR_SYSCALL_BASE+143) -#define __NR_msync (__NR_SYSCALL_BASE+144) -#define __NR_readv (__NR_SYSCALL_BASE+145) -#define __NR_writev (__NR_SYSCALL_BASE+146) -#define __NR_getsid (__NR_SYSCALL_BASE+147) -#define __NR_fdatasync (__NR_SYSCALL_BASE+148) -#define __NR__sysctl (__NR_SYSCALL_BASE+149) -#define __NR_mlock (__NR_SYSCALL_BASE+150) -#define __NR_munlock (__NR_SYSCALL_BASE+151) -#define __NR_mlockall (__NR_SYSCALL_BASE+152) -#define __NR_munlockall (__NR_SYSCALL_BASE+153) -#define __NR_sched_setparam (__NR_SYSCALL_BASE+154) -#define __NR_sched_getparam (__NR_SYSCALL_BASE+155) -#define __NR_sched_setscheduler (__NR_SYSCALL_BASE+156) -#define __NR_sched_getscheduler (__NR_SYSCALL_BASE+157) -#define __NR_sched_yield (__NR_SYSCALL_BASE+158) -#define __NR_sched_get_priority_max (__NR_SYSCALL_BASE+159) -#define __NR_sched_get_priority_min (__NR_SYSCALL_BASE+160) -#define __NR_sched_rr_get_interval (__NR_SYSCALL_BASE+161) -#define __NR_nanosleep (__NR_SYSCALL_BASE+162) -#define __NR_mremap (__NR_SYSCALL_BASE+163) -#define __NR_setresuid (__NR_SYSCALL_BASE+164) -#define __NR_getresuid (__NR_SYSCALL_BASE+165) - /* 166 was sys_vm86 */ - /* 167 was sys_query_module */ -#define __NR_poll (__NR_SYSCALL_BASE+168) -#define __NR_nfsservctl (__NR_SYSCALL_BASE+169) -#define __NR_setresgid (__NR_SYSCALL_BASE+170) -#define __NR_getresgid (__NR_SYSCALL_BASE+171) -#define __NR_prctl (__NR_SYSCALL_BASE+172) -#define __NR_rt_sigreturn (__NR_SYSCALL_BASE+173) -#define __NR_rt_sigaction (__NR_SYSCALL_BASE+174) -#define __NR_rt_sigprocmask (__NR_SYSCALL_BASE+175) -#define __NR_rt_sigpending (__NR_SYSCALL_BASE+176) -#define __NR_rt_sigtimedwait (__NR_SYSCALL_BASE+177) -#define __NR_rt_sigqueueinfo (__NR_SYSCALL_BASE+178) -#define __NR_rt_sigsuspend (__NR_SYSCALL_BASE+179) -#define __NR_pread64 (__NR_SYSCALL_BASE+180) -#define __NR_pwrite64 (__NR_SYSCALL_BASE+181) -#define __NR_chown (__NR_SYSCALL_BASE+182) -#define __NR_getcwd (__NR_SYSCALL_BASE+183) -#define __NR_capget (__NR_SYSCALL_BASE+184) -#define __NR_capset (__NR_SYSCALL_BASE+185) -#define __NR_sigaltstack (__NR_SYSCALL_BASE+186) -#define __NR_sendfile (__NR_SYSCALL_BASE+187) - /* 188 reserved */ - /* 189 reserved */ -#define __NR_vfork (__NR_SYSCALL_BASE+190) -#define __NR_ugetrlimit (__NR_SYSCALL_BASE+191) /* SuS compliant getrlimit */ -#define __NR_mmap2 (__NR_SYSCALL_BASE+192) -#define __NR_truncate64 (__NR_SYSCALL_BASE+193) -#define __NR_ftruncate64 (__NR_SYSCALL_BASE+194) -#define __NR_stat64 (__NR_SYSCALL_BASE+195) -#define __NR_lstat64 (__NR_SYSCALL_BASE+196) -#define __NR_fstat64 (__NR_SYSCALL_BASE+197) -#define __NR_lchown32 (__NR_SYSCALL_BASE+198) -#define __NR_getuid32 (__NR_SYSCALL_BASE+199) -#define __NR_getgid32 (__NR_SYSCALL_BASE+200) -#define __NR_geteuid32 (__NR_SYSCALL_BASE+201) -#define __NR_getegid32 (__NR_SYSCALL_BASE+202) -#define __NR_setreuid32 (__NR_SYSCALL_BASE+203) -#define __NR_setregid32 (__NR_SYSCALL_BASE+204) -#define __NR_getgroups32 (__NR_SYSCALL_BASE+205) -#define __NR_setgroups32 (__NR_SYSCALL_BASE+206) -#define __NR_fchown32 (__NR_SYSCALL_BASE+207) -#define __NR_setresuid32 (__NR_SYSCALL_BASE+208) -#define __NR_getresuid32 (__NR_SYSCALL_BASE+209) -#define __NR_setresgid32 (__NR_SYSCALL_BASE+210) -#define __NR_getresgid32 (__NR_SYSCALL_BASE+211) -#define __NR_chown32 (__NR_SYSCALL_BASE+212) -#define __NR_setuid32 (__NR_SYSCALL_BASE+213) -#define __NR_setgid32 (__NR_SYSCALL_BASE+214) -#define __NR_setfsuid32 (__NR_SYSCALL_BASE+215) -#define __NR_setfsgid32 (__NR_SYSCALL_BASE+216) -#define __NR_getdents64 (__NR_SYSCALL_BASE+217) -#define __NR_pivot_root (__NR_SYSCALL_BASE+218) -#define __NR_mincore (__NR_SYSCALL_BASE+219) -#define __NR_madvise (__NR_SYSCALL_BASE+220) -#define __NR_fcntl64 (__NR_SYSCALL_BASE+221) - /* 222 for tux */ - /* 223 is unused */ -#define __NR_gettid (__NR_SYSCALL_BASE+224) -#define __NR_readahead (__NR_SYSCALL_BASE+225) -#define __NR_setxattr (__NR_SYSCALL_BASE+226) -#define __NR_lsetxattr (__NR_SYSCALL_BASE+227) -#define __NR_fsetxattr (__NR_SYSCALL_BASE+228) -#define __NR_getxattr (__NR_SYSCALL_BASE+229) -#define __NR_lgetxattr (__NR_SYSCALL_BASE+230) -#define __NR_fgetxattr (__NR_SYSCALL_BASE+231) -#define __NR_listxattr (__NR_SYSCALL_BASE+232) -#define __NR_llistxattr (__NR_SYSCALL_BASE+233) -#define __NR_flistxattr (__NR_SYSCALL_BASE+234) -#define __NR_removexattr (__NR_SYSCALL_BASE+235) -#define __NR_lremovexattr (__NR_SYSCALL_BASE+236) -#define __NR_fremovexattr (__NR_SYSCALL_BASE+237) -#define __NR_tkill (__NR_SYSCALL_BASE+238) -#define __NR_sendfile64 (__NR_SYSCALL_BASE+239) -#define __NR_futex (__NR_SYSCALL_BASE+240) -#define __NR_sched_setaffinity (__NR_SYSCALL_BASE+241) -#define __NR_sched_getaffinity (__NR_SYSCALL_BASE+242) -#define __NR_io_setup (__NR_SYSCALL_BASE+243) -#define __NR_io_destroy (__NR_SYSCALL_BASE+244) -#define __NR_io_getevents (__NR_SYSCALL_BASE+245) -#define __NR_io_submit (__NR_SYSCALL_BASE+246) -#define __NR_io_cancel (__NR_SYSCALL_BASE+247) -#define __NR_exit_group (__NR_SYSCALL_BASE+248) -#define __NR_lookup_dcookie (__NR_SYSCALL_BASE+249) -#define __NR_epoll_create (__NR_SYSCALL_BASE+250) -#define __NR_epoll_ctl (__NR_SYSCALL_BASE+251) -#define __NR_epoll_wait (__NR_SYSCALL_BASE+252) -#define __NR_remap_file_pages (__NR_SYSCALL_BASE+253) - /* 254 for set_thread_area */ - /* 255 for get_thread_area */ -#define __NR_set_tid_address (__NR_SYSCALL_BASE+256) -#define __NR_timer_create (__NR_SYSCALL_BASE+257) -#define __NR_timer_settime (__NR_SYSCALL_BASE+258) -#define __NR_timer_gettime (__NR_SYSCALL_BASE+259) -#define __NR_timer_getoverrun (__NR_SYSCALL_BASE+260) -#define __NR_timer_delete (__NR_SYSCALL_BASE+261) -#define __NR_clock_settime (__NR_SYSCALL_BASE+262) -#define __NR_clock_gettime (__NR_SYSCALL_BASE+263) -#define __NR_clock_getres (__NR_SYSCALL_BASE+264) -#define __NR_clock_nanosleep (__NR_SYSCALL_BASE+265) -#define __NR_statfs64 (__NR_SYSCALL_BASE+266) -#define __NR_fstatfs64 (__NR_SYSCALL_BASE+267) -#define __NR_tgkill (__NR_SYSCALL_BASE+268) -#define __NR_utimes (__NR_SYSCALL_BASE+269) -#define __NR_arm_fadvise64_64 (__NR_SYSCALL_BASE+270) -#define __NR_pciconfig_iobase (__NR_SYSCALL_BASE+271) -#define __NR_pciconfig_read (__NR_SYSCALL_BASE+272) -#define __NR_pciconfig_write (__NR_SYSCALL_BASE+273) -#define __NR_mq_open (__NR_SYSCALL_BASE+274) -#define __NR_mq_unlink (__NR_SYSCALL_BASE+275) -#define __NR_mq_timedsend (__NR_SYSCALL_BASE+276) -#define __NR_mq_timedreceive (__NR_SYSCALL_BASE+277) -#define __NR_mq_notify (__NR_SYSCALL_BASE+278) -#define __NR_mq_getsetattr (__NR_SYSCALL_BASE+279) -#define __NR_waitid (__NR_SYSCALL_BASE+280) -#define __NR_socket (__NR_SYSCALL_BASE+281) -#define __NR_bind (__NR_SYSCALL_BASE+282) -#define __NR_connect (__NR_SYSCALL_BASE+283) -#define __NR_listen (__NR_SYSCALL_BASE+284) -#define __NR_accept (__NR_SYSCALL_BASE+285) -#define __NR_getsockname (__NR_SYSCALL_BASE+286) -#define __NR_getpeername (__NR_SYSCALL_BASE+287) -#define __NR_socketpair (__NR_SYSCALL_BASE+288) -#define __NR_send (__NR_SYSCALL_BASE+289) -#define __NR_sendto (__NR_SYSCALL_BASE+290) -#define __NR_recv (__NR_SYSCALL_BASE+291) -#define __NR_recvfrom (__NR_SYSCALL_BASE+292) -#define __NR_shutdown (__NR_SYSCALL_BASE+293) -#define __NR_setsockopt (__NR_SYSCALL_BASE+294) -#define __NR_getsockopt (__NR_SYSCALL_BASE+295) -#define __NR_sendmsg (__NR_SYSCALL_BASE+296) -#define __NR_recvmsg (__NR_SYSCALL_BASE+297) -#define __NR_semop (__NR_SYSCALL_BASE+298) -#define __NR_semget (__NR_SYSCALL_BASE+299) -#define __NR_semctl (__NR_SYSCALL_BASE+300) -#define __NR_msgsnd (__NR_SYSCALL_BASE+301) -#define __NR_msgrcv (__NR_SYSCALL_BASE+302) -#define __NR_msgget (__NR_SYSCALL_BASE+303) -#define __NR_msgctl (__NR_SYSCALL_BASE+304) -#define __NR_shmat (__NR_SYSCALL_BASE+305) -#define __NR_shmdt (__NR_SYSCALL_BASE+306) -#define __NR_shmget (__NR_SYSCALL_BASE+307) -#define __NR_shmctl (__NR_SYSCALL_BASE+308) -#define __NR_add_key (__NR_SYSCALL_BASE+309) -#define __NR_request_key (__NR_SYSCALL_BASE+310) -#define __NR_keyctl (__NR_SYSCALL_BASE+311) -#define __NR_semtimedop (__NR_SYSCALL_BASE+312) -#define __NR_vserver (__NR_SYSCALL_BASE+313) -#define __NR_ioprio_set (__NR_SYSCALL_BASE+314) -#define __NR_ioprio_get (__NR_SYSCALL_BASE+315) -#define __NR_inotify_init (__NR_SYSCALL_BASE+316) -#define __NR_inotify_add_watch (__NR_SYSCALL_BASE+317) -#define __NR_inotify_rm_watch (__NR_SYSCALL_BASE+318) -#define __NR_mbind (__NR_SYSCALL_BASE+319) -#define __NR_get_mempolicy (__NR_SYSCALL_BASE+320) -#define __NR_set_mempolicy (__NR_SYSCALL_BASE+321) -#define __NR_openat (__NR_SYSCALL_BASE+322) -#define __NR_mkdirat (__NR_SYSCALL_BASE+323) -#define __NR_mknodat (__NR_SYSCALL_BASE+324) -#define __NR_fchownat (__NR_SYSCALL_BASE+325) -#define __NR_futimesat (__NR_SYSCALL_BASE+326) -#define __NR_fstatat64 (__NR_SYSCALL_BASE+327) -#define __NR_unlinkat (__NR_SYSCALL_BASE+328) -#define __NR_renameat (__NR_SYSCALL_BASE+329) -#define __NR_linkat (__NR_SYSCALL_BASE+330) -#define __NR_symlinkat (__NR_SYSCALL_BASE+331) -#define __NR_readlinkat (__NR_SYSCALL_BASE+332) -#define __NR_fchmodat (__NR_SYSCALL_BASE+333) -#define __NR_faccessat (__NR_SYSCALL_BASE+334) -#define __NR_pselect6 (__NR_SYSCALL_BASE+335) -#define __NR_ppoll (__NR_SYSCALL_BASE+336) -#define __NR_unshare (__NR_SYSCALL_BASE+337) -#define __NR_set_robust_list (__NR_SYSCALL_BASE+338) -#define __NR_get_robust_list (__NR_SYSCALL_BASE+339) -#define __NR_splice (__NR_SYSCALL_BASE+340) -#define __NR_arm_sync_file_range (__NR_SYSCALL_BASE+341) -#define __NR_sync_file_range2 __NR_arm_sync_file_range -#define __NR_tee (__NR_SYSCALL_BASE+342) -#define __NR_vmsplice (__NR_SYSCALL_BASE+343) -#define __NR_move_pages (__NR_SYSCALL_BASE+344) -#define __NR_getcpu (__NR_SYSCALL_BASE+345) -#define __NR_epoll_pwait (__NR_SYSCALL_BASE+346) -#define __NR_kexec_load (__NR_SYSCALL_BASE+347) -#define __NR_utimensat (__NR_SYSCALL_BASE+348) -#define __NR_signalfd (__NR_SYSCALL_BASE+349) -#define __NR_timerfd_create (__NR_SYSCALL_BASE+350) -#define __NR_eventfd (__NR_SYSCALL_BASE+351) -#define __NR_fallocate (__NR_SYSCALL_BASE+352) -#define __NR_timerfd_settime (__NR_SYSCALL_BASE+353) -#define __NR_timerfd_gettime (__NR_SYSCALL_BASE+354) -#define __NR_signalfd4 (__NR_SYSCALL_BASE+355) -#define __NR_eventfd2 (__NR_SYSCALL_BASE+356) -#define __NR_epoll_create1 (__NR_SYSCALL_BASE+357) -#define __NR_dup3 (__NR_SYSCALL_BASE+358) -#define __NR_pipe2 (__NR_SYSCALL_BASE+359) -#define __NR_inotify_init1 (__NR_SYSCALL_BASE+360) -#define __NR_preadv (__NR_SYSCALL_BASE+361) -#define __NR_pwritev (__NR_SYSCALL_BASE+362) -#define __NR_rt_tgsigqueueinfo (__NR_SYSCALL_BASE+363) -#define __NR_perf_event_open (__NR_SYSCALL_BASE+364) -#define __NR_recvmmsg (__NR_SYSCALL_BASE+365) -#define __NR_accept4 (__NR_SYSCALL_BASE+366) -#define __NR_fanotify_init (__NR_SYSCALL_BASE+367) -#define __NR_fanotify_mark (__NR_SYSCALL_BASE+368) -#define __NR_prlimit64 (__NR_SYSCALL_BASE+369) -#define __NR_name_to_handle_at (__NR_SYSCALL_BASE+370) -#define __NR_open_by_handle_at (__NR_SYSCALL_BASE+371) -#define __NR_clock_adjtime (__NR_SYSCALL_BASE+372) -#define __NR_syncfs (__NR_SYSCALL_BASE+373) -#define __NR_sendmmsg (__NR_SYSCALL_BASE+374) -#define __NR_setns (__NR_SYSCALL_BASE+375) -#define __NR_process_vm_readv (__NR_SYSCALL_BASE+376) -#define __NR_process_vm_writev (__NR_SYSCALL_BASE+377) - /* 378 for kcmp */ - -/* - * This may need to be greater than __NR_last_syscall+1 in order to - * account for the padding in the syscall table - */ -#ifdef __KERNEL__ #define __NR_syscalls (380) -#endif /* __KERNEL__ */ - -/* - * The following SWIs are ARM private. - */ -#define __ARM_NR_BASE (__NR_SYSCALL_BASE+0x0f0000) -#define __ARM_NR_breakpoint (__ARM_NR_BASE+1) -#define __ARM_NR_cacheflush (__ARM_NR_BASE+2) -#define __ARM_NR_usr26 (__ARM_NR_BASE+3) -#define __ARM_NR_usr32 (__ARM_NR_BASE+4) -#define __ARM_NR_set_tls (__ARM_NR_BASE+5) - -/* - * *NOTE*: This is a ghost syscall private to the kernel. Only the - * __kuser_cmpxchg code in entry-armv.S should be aware of its - * existence. Don't ever use this from user code. - */ -#ifdef __KERNEL__ #define __ARM_NR_cmpxchg (__ARM_NR_BASE+0x00fff0) -#endif - -/* - * The following syscalls are obsolete and no longer available for EABI. - */ -#if !defined(__KERNEL__) -#if defined(__ARM_EABI__) -#undef __NR_time -#undef __NR_umount -#undef __NR_stime -#undef __NR_alarm -#undef __NR_utime -#undef __NR_getrlimit -#undef __NR_select -#undef __NR_readdir -#undef __NR_mmap -#undef __NR_socketcall -#undef __NR_syscall -#undef __NR_ipc -#endif -#endif - -#ifdef __KERNEL__ #define __ARCH_WANT_STAT64 #define __ARCH_WANT_SYS_GETHOSTNAME @@ -496,5 +59,4 @@ #define __IGNORE_migrate_pages #define __IGNORE_kcmp -#endif /* __KERNEL__ */ #endif /* __ASM_ARM_UNISTD_H */ -- cgit v1.2.3 From 9fff2fa0db911b0b75ec1f9bec72460c0a676ef5 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 10 Oct 2012 22:23:29 -0400 Subject: arm: switch to saner kernel_execve() semantics Signed-off-by: Al Viro --- arch/arm/include/asm/unistd.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index 6a70aa42deb..984ad42ed0a 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h @@ -470,7 +470,6 @@ #define __ARCH_WANT_SYS_SOCKETCALL #endif #define __ARCH_WANT_SYS_EXECVE -#define __ARCH_WANT_KERNEL_EXECVE /* * "Conditional" syscalls -- cgit v1.2.3 From c3545236e8740ab556022f87685d18503c86e187 Mon Sep 17 00:00:00 2001 From: Gregory CLEMENT Date: Mon, 1 Oct 2012 10:59:29 +0100 Subject: ARM: 7546/1: cache-l2x0: add an optional register to save/restore Tested-and-Reviewed-by: Yehuda Yitschak Tested-and-Reviewed-by: Lior Amsalem Signed-off-by: Gregory CLEMENT Signed-off-by: Russell King --- arch/arm/include/asm/hardware/cache-l2x0.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h index c4c87bc1223..5f2c7b44fda 100644 --- a/arch/arm/include/asm/hardware/cache-l2x0.h +++ b/arch/arm/include/asm/hardware/cache-l2x0.h @@ -126,6 +126,7 @@ struct l2x0_regs { unsigned long filter_end; unsigned long prefetch_ctrl; unsigned long pwr_ctrl; + unsigned long ctrl; }; extern struct l2x0_regs l2x0_saved_regs; -- cgit v1.2.3 From 07c9249f1fa90cc8189bed44c0bcece664596a72 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Tue, 16 Oct 2012 18:50:00 +0100 Subject: ARM: 7554/1: VIC: use irq_domain_add_simple() Instead of allocating descriptors on-the-fly for the device tree initialization case, use irq_domain_add_simple() which will take care of this if you pass negative as the first_irq. Alter the signature of __vic_init() to pass the first_irq as signed so this works as expected. Switching the VIC to use irq_domain_add_simple() also has the upside of displaying the same WARNING when you boot with pre-allocated descriptors on systems using SPARSE_IRQ but yet not using device tree. Cc: Grant Likely Acked-by: Rob Herring Signed-off-by: Linus Walleij Signed-off-by: Russell King --- arch/arm/include/asm/hardware/vic.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/hardware/vic.h b/arch/arm/include/asm/hardware/vic.h index e14af1a1a32..2bebad36fc8 100644 --- a/arch/arm/include/asm/hardware/vic.h +++ b/arch/arm/include/asm/hardware/vic.h @@ -47,7 +47,7 @@ struct device_node; struct pt_regs; -void __vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, +void __vic_init(void __iomem *base, int irq_start, u32 vic_sources, u32 resume_sources, struct device_node *node); void vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, u32 resume_sources); int vic_of_init(struct device_node *node, struct device_node *parent); -- cgit v1.2.3 From 37ea0fcb6a3f3318bf45888e624722a2945cec04 Mon Sep 17 00:00:00 2001 From: Ian Campbell Date: Wed, 17 Oct 2012 09:39:10 +0100 Subject: xen: sysfs: fix build warning. Define PRI macros for xen_ulong_t and xen_pfn_t and use to fix: drivers/xen/sys-hypervisor.c:288:4: warning: format '%lx' expects argument of type 'long unsigned int', but argument 3 has type 'xen_ulong_t' [-Wformat] Ideally this would use PRIx64 on ARM but these (or equivalent) don't seem to be available in the kernel. Acked-by: Stefano Stabellini Signed-off-by: Ian Campbell Signed-off-by: Konrad Rzeszutek Wilk --- arch/arm/include/asm/xen/interface.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/xen/interface.h b/arch/arm/include/asm/xen/interface.h index ae05e56dd17..62160f259b0 100644 --- a/arch/arm/include/asm/xen/interface.h +++ b/arch/arm/include/asm/xen/interface.h @@ -31,7 +31,9 @@ /* Explicitly size integers that represent pfns in the interface with * Xen so that we can have one ABI that works for 32 and 64 bit guests. */ typedef uint64_t xen_pfn_t; +#define PRI_xen_pfn "llx" typedef uint64_t xen_ulong_t; +#define PRI_xen_ulong "llx" /* Guest handles for primitive C types. */ __DEFINE_GUEST_HANDLE(uchar, unsigned char); __DEFINE_GUEST_HANDLE(uint, unsigned int); -- cgit v1.2.3 From ef32f89298c094b6ed76c0c4981b7a51e939cb71 Mon Sep 17 00:00:00 2001 From: Ian Campbell Date: Wed, 17 Oct 2012 09:39:14 +0100 Subject: xen: grant: use xen_pfn_t type for frame_list. This correctly sizes it as 64 bit on ARM but leaves it as unsigned long on x86 (therefore no intended change on x86). The long and ulong guest handles are now unused (and a bit dangerous) so remove them. Acked-by: Stefano Stabellini Signed-off-by: Ian Campbell Signed-off-by: Konrad Rzeszutek Wilk --- arch/arm/include/asm/xen/interface.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/xen/interface.h b/arch/arm/include/asm/xen/interface.h index 62160f259b0..1d6ef9c2d1d 100644 --- a/arch/arm/include/asm/xen/interface.h +++ b/arch/arm/include/asm/xen/interface.h @@ -37,10 +37,8 @@ typedef uint64_t xen_ulong_t; /* Guest handles for primitive C types. */ __DEFINE_GUEST_HANDLE(uchar, unsigned char); __DEFINE_GUEST_HANDLE(uint, unsigned int); -__DEFINE_GUEST_HANDLE(ulong, unsigned long); DEFINE_GUEST_HANDLE(char); DEFINE_GUEST_HANDLE(int); -DEFINE_GUEST_HANDLE(long); DEFINE_GUEST_HANDLE(void); DEFINE_GUEST_HANDLE(uint64_t); DEFINE_GUEST_HANDLE(uint32_t); -- cgit v1.2.3 From ee7b5958e2494619ee3ff52de68580feed6906a2 Mon Sep 17 00:00:00 2001 From: Ian Campbell Date: Wed, 17 Oct 2012 09:39:17 +0100 Subject: xen: arm: make p2m operations NOPs This makes common code less ifdef-y and is consistent with PVHVM on x86. Also note that phys_to_machine_mapping_valid should take a pfn argument and make it do so. Add __set_phys_to_machine, make set_phys_to_machine a simple wrapper (on systems with non-nop implementations the outer one can allocate new p2m pages). Make __set_phys_to_machine check for identity mapping or invalid only. Acked-by: Stefano Stabellini Signed-off-by: Ian Campbell Signed-off-by: Konrad Rzeszutek Wilk --- arch/arm/include/asm/xen/page.h | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h index 174202318df..c6b9096cef9 100644 --- a/arch/arm/include/asm/xen/page.h +++ b/arch/arm/include/asm/xen/page.h @@ -10,7 +10,7 @@ #include #define pfn_to_mfn(pfn) (pfn) -#define phys_to_machine_mapping_valid (1) +#define phys_to_machine_mapping_valid(pfn) (1) #define mfn_to_pfn(mfn) (mfn) #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) @@ -30,6 +30,8 @@ typedef struct xpaddr { #define XMADDR(x) ((xmaddr_t) { .maddr = (x) }) #define XPADDR(x) ((xpaddr_t) { .paddr = (x) }) +#define INVALID_P2M_ENTRY (~0UL) + static inline xmaddr_t phys_to_machine(xpaddr_t phys) { unsigned offset = phys.paddr & ~PAGE_MASK; @@ -74,9 +76,14 @@ static inline int m2p_remove_override(struct page *page, bool clear_pte) return 0; } +static inline bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) +{ + BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); + return true; +} + static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) { - BUG(); - return false; + return __set_phys_to_machine(pfn, mfn); } #endif /* _ASM_ARM_XEN_PAGE_H */ -- cgit v1.2.3 From 3ab0b83bf6a1e834f4b884150d8012990c75d25d Mon Sep 17 00:00:00 2001 From: Ian Campbell Date: Thu, 18 Oct 2012 08:26:17 +0100 Subject: xen: arm: comment on why 64-bit xen_pfn_t is safe even on 32 bit Signed-off-by: Ian Campbell Signed-off-by: Konrad Rzeszutek Wilk --- arch/arm/include/asm/xen/interface.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/xen/interface.h b/arch/arm/include/asm/xen/interface.h index 1d6ef9c2d1d..5000397134b 100644 --- a/arch/arm/include/asm/xen/interface.h +++ b/arch/arm/include/asm/xen/interface.h @@ -29,7 +29,13 @@ #ifndef __ASSEMBLY__ /* Explicitly size integers that represent pfns in the interface with - * Xen so that we can have one ABI that works for 32 and 64 bit guests. */ + * Xen so that we can have one ABI that works for 32 and 64 bit guests. + * Note that this means that the xen_pfn_t type may be capable of + * representing pfn's which the guest cannot represent in its own pfn + * type. However since pfn space is controlled by the guest this is + * fine since it simply wouldn't be able to create any sure pfns in + * the first place. + */ typedef uint64_t xen_pfn_t; #define PRI_xen_pfn "llx" typedef uint64_t xen_ulong_t; -- cgit v1.2.3 From 871ae57adc5ed092c1341f411514d0e8482e2611 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Mon, 22 Oct 2012 20:44:03 +0800 Subject: ARM: dma-mapping: support debug_dma_mapping_error Without the patch, kind of below warning will be dumped if DMA-API debug is enabled: [ 11.069763] ------------[ cut here ]------------ [ 11.074645] WARNING: at lib/dma-debug.c:948 check_unmap+0x770/0x860() [ 11.081420] ehci-omap ehci-omap.0: DMA-API: device driver failed to check map error[device address=0x0000000 0adb78e80] [size=8 bytes] [mapped as single] [ 11.095611] Modules linked in: Cc: Russell King Cc: Marek Szyprowski Signed-off-by: Ming Lei Signed-off-by: Marek Szyprowski --- arch/arm/include/asm/dma-mapping.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 23004847bb0..78d8e9b5544 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -91,6 +91,7 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) */ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { + debug_dma_mapping_error(dev, dma_addr); return dma_addr == DMA_ERROR_CODE; } -- cgit v1.2.3 From 7e6735c3578e76c270a2797225a4214176ba13ef Mon Sep 17 00:00:00 2001 From: Cyril Chemparathy Date: Wed, 12 Sep 2012 14:05:58 -0400 Subject: /dev/mem: use phys_addr_t for physical addresses This patch fixes the /dev/mem driver to use phys_addr_t for physical addresses. This is required on PAE systems, especially those that run entirely out of >4G physical memory space. Signed-off-by: Cyril Chemparathy Signed-off-by: Greg Kroah-Hartman --- arch/arm/include/asm/io.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 35c1ed89b93..f52a93d9866 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -374,7 +374,7 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr); #ifdef CONFIG_MMU #define ARCH_HAS_VALID_PHYS_ADDR_RANGE -extern int valid_phys_addr_range(unsigned long addr, size_t size); +extern int valid_phys_addr_range(phys_addr_t addr, size_t size); extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); extern int devmem_is_allowed(unsigned long pfn); #endif -- cgit v1.2.3 From 697575896670ba9e76760ce8bbc1f5a3001967d6 Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Fri, 26 Oct 2012 07:39:13 +0200 Subject: Revert "ARM: dma-mapping: support debug_dma_mapping_error" This reverts commit 871ae57adc5ed092c1341f411514d0e8482e2611, which is scheduled for v3.8 and accidently got into v3.7-rc series. Signed-off-by: Marek Szyprowski --- arch/arm/include/asm/dma-mapping.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 78d8e9b5544..23004847bb0 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -91,7 +91,6 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) */ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { - debug_dma_mapping_error(dev, dma_addr); return dma_addr == DMA_ERROR_CODE; } -- cgit v1.2.3 From 6a4dae5e138a32b45ca5218cc2b81802f9d378c3 Mon Sep 17 00:00:00 2001 From: Felipe Balbi 2 Date: Tue, 23 Oct 2012 19:00:03 +0100 Subject: ARM: 7565/1: sched: stop sched_clock() during suspend The scheduler imposes a requirement to sched_clock() which is to stop the clock during suspend, if we don't do that any RT thread will be rescheduled in the future which might cause any sort of problems. This became an issue on OMAP when we converted omap-i2c.c to use threaded IRQs, it turned out that depending on how much time we spent on suspend, the I2C IRQ thread would end up being rescheduled so far in the future that I2C transfers would timeout and, because omap_hsmmc depends on an I2C-connected device to detect if an MMC card is inserted in the slot, our rootfs would just vanish. arch/arm/kernel/sched_clock.c already had an optional implementation (sched_clock_needs_suspend()) which would handle scheduler's requirement properly, what this patch does is simply to make that implementation non-optional. Note that this has the side-effect that printk timings won't reflect the actual time spent on suspend so other methods to measure that will have to be used. This has been tested with beagleboard XM (OMAP3630) and pandaboard rev A3 (OMAP4430). Suspend to RAM is now working after this patch. Thanks to Kevin Hilman for helping out with debugging. Acked-by: Kevin Hilman Acked-by: Linus Walleij Signed-off-by: Felipe Balbi Signed-off-by: Russell King --- arch/arm/include/asm/sched_clock.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/sched_clock.h b/arch/arm/include/asm/sched_clock.h index 05b8e82ec9f..e3f75726343 100644 --- a/arch/arm/include/asm/sched_clock.h +++ b/arch/arm/include/asm/sched_clock.h @@ -10,7 +10,5 @@ extern void sched_clock_postinit(void); extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate); -extern void setup_sched_clock_needs_suspend(u32 (*read)(void), int bits, - unsigned long rate); #endif -- cgit v1.2.3 From 39141ddfb63a664f26d3f42f64ee386e879b492c Mon Sep 17 00:00:00 2001 From: Paul Walmsley Date: Tue, 23 Oct 2012 20:32:59 +0100 Subject: ARM: 7566/1: vfp: fix save and restore when running on pre-VFPv3 and CONFIG_VFPv3 set MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit After commit 846a136881b8f73c1f74250bf6acfaa309cab1f2 ("ARM: vfp: fix saving d16-d31 vfp registers on v6+ kernels"), the OMAP 2430SDP board started crashing during boot with omap2plus_defconfig: [ 3.875122] mmcblk0: mmc0:e624 SD04G 3.69 GiB [ 3.915954] mmcblk0: p1 [ 4.086639] Internal error: Oops - undefined instruction: 0 [#1] SMP ARM [ 4.093719] Modules linked in: [ 4.096954] CPU: 0 Not tainted (3.6.0-02232-g759e00b #570) [ 4.103149] PC is at vfp_reload_hw+0x1c/0x44 [ 4.107666] LR is at __und_usr_fault_32+0x0/0x8 It turns out that the context save/restore fix unmasked a latent bug in commit 5aaf254409f8d58229107b59507a8235b715a960 ("ARM: 6203/1: Make VFPv3 usable on ARMv6"). When CONFIG_VFPv3 is set, but the kernel is booted on a pre-VFPv3 core, the code attempts to save and restore the d16-d31 VFP registers. These are only present on non-D16 VFPv3+, so this results in an undefined instruction exception. The code didn't crash before commit 846a136 because the save and restore code was only touching d0-d15, present on all VFP. Fix by implementing a request from Russell King to add a new HWCAP flag that affirmatively indicates the presence of the d16-d31 registers: http://marc.info/?l=linux-arm-kernel&m=135013547905283&w=2 and some feedback from Måns to clarify the name of the HWCAP flag. Signed-off-by: Paul Walmsley Cc: Tony Lindgren Cc: Catalin Marinas Cc: Dave Martin Cc: Måns Rullgård Signed-off-by: Russell King --- arch/arm/include/asm/vfpmacros.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/vfpmacros.h b/arch/arm/include/asm/vfpmacros.h index 6a6f1e485f4..301c1db3e99 100644 --- a/arch/arm/include/asm/vfpmacros.h +++ b/arch/arm/include/asm/vfpmacros.h @@ -27,9 +27,9 @@ #if __LINUX_ARM_ARCH__ <= 6 ldr \tmp, =elf_hwcap @ may not have MVFR regs ldr \tmp, [\tmp, #0] - tst \tmp, #HWCAP_VFPv3D16 - ldceql p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31} - addne \base, \base, #32*4 @ step over unused register space + tst \tmp, #HWCAP_VFPD32 + ldcnel p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31} + addeq \base, \base, #32*4 @ step over unused register space #else VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0 and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field @@ -51,9 +51,9 @@ #if __LINUX_ARM_ARCH__ <= 6 ldr \tmp, =elf_hwcap @ may not have MVFR regs ldr \tmp, [\tmp, #0] - tst \tmp, #HWCAP_VFPv3D16 - stceql p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31} - addne \base, \base, #32*4 @ step over unused register space + tst \tmp, #HWCAP_VFPD32 + stcnel p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31} + addeq \base, \base, #32*4 @ step over unused register space #else VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0 and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field -- cgit v1.2.3 From 7629a9f661f72eb383fb896e59efea1eac74e882 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 26 Oct 2012 18:42:08 +0100 Subject: ARM: 7567/1: io: avoid GCC's offsettable addressing modes for halfword accesses Using the 'o' memory constraint in inline assembly can result in GCC generating invalid immediate offsets for memory access instructions with reduced addressing capabilities (i.e. smaller than 12-bit immediate offsets): http://gcc.gnu.org/bugzilla/show_bug.cgi?id=54983 As there is no constraint to specify the exact addressing mode we need, fallback to using 'Q' exclusively for halfword I/O accesses. This may emit an additional add instruction (using an extra register) in order to construct the address but it will always be accepted by GAS. Reported-by: Bastian Hecht Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/io.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 35c1ed89b93..42f042ee4ad 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -64,7 +64,7 @@ extern void __raw_readsl(const void __iomem *addr, void *data, int longlen); static inline void __raw_writew(u16 val, volatile void __iomem *addr) { asm volatile("strh %1, %0" - : "+Qo" (*(volatile u16 __force *)addr) + : "+Q" (*(volatile u16 __force *)addr) : "r" (val)); } @@ -72,7 +72,7 @@ static inline u16 __raw_readw(const volatile void __iomem *addr) { u16 val; asm volatile("ldrh %1, %0" - : "+Qo" (*(volatile u16 __force *)addr), + : "+Q" (*(volatile u16 __force *)addr), "=r" (val)); return val; } -- cgit v1.2.3 From bcd6f569e87471d7f104bd9497f0b516a3b12e32 Mon Sep 17 00:00:00 2001 From: Pawel Moll Date: Tue, 18 Sep 2012 15:17:48 +0100 Subject: clk: Common clocks implementation for Versatile Express This patch adds a DT and non-DT based implementation of the common clock infrastructure for Versatile Express platform. It registers (statically or using DT) all required fixed clocks, initialises motherboard's SP810 cell (that provides clocks for SP804 timers) and explicitly registers VE "osc" driver, to make the clock generators available early. Signed-off-by: Pawel Moll Signed-off-by: Mike Turquette --- arch/arm/include/asm/hardware/sp810.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/hardware/sp810.h b/arch/arm/include/asm/hardware/sp810.h index 6b9b077d86b..afd7e916472 100644 --- a/arch/arm/include/asm/hardware/sp810.h +++ b/arch/arm/include/asm/hardware/sp810.h @@ -56,6 +56,8 @@ #define SCCTRL_TIMEREN1SEL_REFCLK (0 << 17) #define SCCTRL_TIMEREN1SEL_TIMCLK (1 << 17) +#define SCCTRL_TIMERENnSEL_SHIFT(n) (15 + ((n) * 2)) + static inline void sysctl_soft_reset(void __iomem *base) { /* switch to slow mode */ -- cgit v1.2.3 From b5466f8728527a05a493cc4abe9e6f034a1bbaab Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 15 Jun 2012 14:47:31 +0100 Subject: ARM: mm: remove IPI broadcasting on ASID rollover ASIDs are allocated to MMU contexts based on a rolling counter. This means that after 255 allocations we must invalidate all existing ASIDs via an expensive IPI mechanism to synchronise all of the online CPUs and ensure that all tasks execute with an ASID from the new generation. This patch changes the rollover behaviour so that we rely instead on the hardware broadcasting of the TLB invalidation to avoid the IPI calls. This works by keeping track of the active ASID on each core, which is then reserved in the case of a rollover so that currently scheduled tasks can continue to run. For cores without hardware TLB broadcasting, we keep track of pending flushes in a cpumask, so cores can flush their local TLB before scheduling a new mm. Reviewed-by: Catalin Marinas Tested-by: Marc Zyngier Signed-off-by: Will Deacon --- arch/arm/include/asm/mmu.h | 11 ++--- arch/arm/include/asm/mmu_context.h | 82 ++------------------------------------ 2 files changed, 7 insertions(+), 86 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index 14965658a92..5b53b53ab5c 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h @@ -5,18 +5,15 @@ typedef struct { #ifdef CONFIG_CPU_HAS_ASID - unsigned int id; - raw_spinlock_t id_lock; + u64 id; #endif unsigned int kvm_seq; } mm_context_t; #ifdef CONFIG_CPU_HAS_ASID -#define ASID(mm) ((mm)->context.id & 255) - -/* init_mm.context.id_lock should be initialized. */ -#define INIT_MM_CONTEXT(name) \ - .context.id_lock = __RAW_SPIN_LOCK_UNLOCKED(name.context.id_lock), +#define ASID_BITS 8 +#define ASID_MASK ((~0ULL) << ASID_BITS) +#define ASID(mm) ((mm)->context.id & ~ASID_MASK) #else #define ASID(mm) (0) #endif diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index 0306bc642c0..a64f61cb23d 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -24,84 +24,8 @@ void __check_kvm_seq(struct mm_struct *mm); #ifdef CONFIG_CPU_HAS_ASID -/* - * On ARMv6, we have the following structure in the Context ID: - * - * 31 7 0 - * +-------------------------+-----------+ - * | process ID | ASID | - * +-------------------------+-----------+ - * | context ID | - * +-------------------------------------+ - * - * The ASID is used to tag entries in the CPU caches and TLBs. - * The context ID is used by debuggers and trace logic, and - * should be unique within all running processes. - */ -#define ASID_BITS 8 -#define ASID_MASK ((~0) << ASID_BITS) -#define ASID_FIRST_VERSION (1 << ASID_BITS) - -extern unsigned int cpu_last_asid; - -void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); -void __new_context(struct mm_struct *mm); -void cpu_set_reserved_ttbr0(void); - -static inline void switch_new_context(struct mm_struct *mm) -{ - unsigned long flags; - - __new_context(mm); - - local_irq_save(flags); - cpu_switch_mm(mm->pgd, mm); - local_irq_restore(flags); -} - -static inline void check_and_switch_context(struct mm_struct *mm, - struct task_struct *tsk) -{ - if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) - __check_kvm_seq(mm); - - /* - * Required during context switch to avoid speculative page table - * walking with the wrong TTBR. - */ - cpu_set_reserved_ttbr0(); - - if (!((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) - /* - * The ASID is from the current generation, just switch to the - * new pgd. This condition is only true for calls from - * context_switch() and interrupts are already disabled. - */ - cpu_switch_mm(mm->pgd, mm); - else if (irqs_disabled()) - /* - * Defer the new ASID allocation until after the context - * switch critical region since __new_context() cannot be - * called with interrupts disabled (it sends IPIs). - */ - set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM); - else - /* - * That is a direct call to switch_mm() or activate_mm() with - * interrupts enabled and a new context. - */ - switch_new_context(mm); -} - -#define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0) - -#define finish_arch_post_lock_switch \ - finish_arch_post_lock_switch -static inline void finish_arch_post_lock_switch(void) -{ - if (test_and_clear_thread_flag(TIF_SWITCH_MM)) - switch_new_context(current->mm); -} +void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); +#define init_new_context(tsk,mm) ({ mm->context.id = 0; }) #else /* !CONFIG_CPU_HAS_ASID */ @@ -143,6 +67,7 @@ static inline void finish_arch_post_lock_switch(void) #endif /* CONFIG_CPU_HAS_ASID */ #define destroy_context(mm) do { } while(0) +#define activate_mm(prev,next) switch_mm(prev, next, NULL) /* * This is called when "tsk" is about to enter lazy TLB mode. @@ -186,6 +111,5 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, } #define deactivate_mm(tsk,mm) do { } while (0) -#define activate_mm(prev,next) switch_mm(prev, next, NULL) #endif -- cgit v1.2.3 From 38669e045dbf8f62a008898a7fb1e93975b3817c Mon Sep 17 00:00:00 2001 From: Pawel Moll Date: Tue, 9 Oct 2012 12:56:36 +0100 Subject: ARM: vexpress: Start using new Versatile Express infrastructure This patch starts using all the configuration infrastructure. - generic GPIO library is forced now - sysreg GPIOs are used as MMC CD and WP information sources; thanks to this MMCI auxiliary data is not longer necessary - DVI muxer and mode control is removed from non-DT V2P-CA9 code as this is now handled by the vexpress-dvi driver - clock generators control is removed as is being handled by the common clock driver now - the sysreg and sysctl control is now delegated to the appropriate drivers and all related code was removed - NOR Flash set_vpp function has been removed as the control bit used does _not_ control its VPP line, but the #WP signal instead (which is de facto unusable in case of Linux MTD drivers); this also allowed the remove its DT auxiliary data The non-DT code defines only minimal required number of the config devices. Device Trees are updated to make use of all new features. Signed-off-by: Pawel Moll --- arch/arm/include/asm/hardware/sp810.h | 6 ------ 1 file changed, 6 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/hardware/sp810.h b/arch/arm/include/asm/hardware/sp810.h index afd7e916472..6636430dd0e 100644 --- a/arch/arm/include/asm/hardware/sp810.h +++ b/arch/arm/include/asm/hardware/sp810.h @@ -50,12 +50,6 @@ #define SCPCELLID2 0xFF8 #define SCPCELLID3 0xFFC -#define SCCTRL_TIMEREN0SEL_REFCLK (0 << 15) -#define SCCTRL_TIMEREN0SEL_TIMCLK (1 << 15) - -#define SCCTRL_TIMEREN1SEL_REFCLK (0 << 17) -#define SCCTRL_TIMEREN1SEL_TIMCLK (1 << 17) - #define SCCTRL_TIMERENnSEL_SHIFT(n) (15 + ((n) * 2)) static inline void sysctl_soft_reset(void __iomem *base) -- cgit v1.2.3 From e5c5f2adeb370559f4b221d57214db85858b786a Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Mon, 22 Oct 2012 11:42:54 -0600 Subject: ARM: implement debug_ll_io_init() When using DEBUG_LL, the UART's (or other HW's) registers are mapped into early page tables based on the results of assembly macro addruart. Later, when the page tables are replaced, the same virtual address must remain valid. Historically, this has been ensured by using defines from in both the implementation of addruart, and the machine's .map_io() function. However, with the move to single zImage, we wish to remove . To enable this, the macro addruart may be used when constructing the late page tables too; addruart is exposed as a C function debug_ll_addr(), and used to set up the required mapping in debug_ll_io_init(), which may called on an opt-in basis from a machine's .map_io() function. Signed-off-by: Rob Herring [swarren: Mask map.virtual with PAGE_MASK. Checked for NULL results from debug_ll_addr (e.g. when selected UART isn't valid). Fixed compile when either !CONFIG_DEBUG_LL or CONFIG_DEBUG_SEMIHOSTING.] Signed-off-by: Stephen Warren Signed-off-by: Olof Johansson --- arch/arm/include/asm/mach/map.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h index 195ac2f9d3d..2fe141fcc8d 100644 --- a/arch/arm/include/asm/mach/map.h +++ b/arch/arm/include/asm/mach/map.h @@ -40,6 +40,13 @@ extern void iotable_init(struct map_desc *, int); extern void vm_reserve_area_early(unsigned long addr, unsigned long size, void *caller); +#ifdef CONFIG_DEBUG_LL +extern void debug_ll_addr(unsigned long *paddr, unsigned long *vaddr); +extern void debug_ll_io_init(void); +#else +static inline void debug_ll_io_init(void) {} +#endif + struct mem_type; extern const struct mem_type *get_mem_type(unsigned int type); /* -- cgit v1.2.3 From b8db6b886a1fecd6a5b1d13b190f3149247305ef Mon Sep 17 00:00:00 2001 From: Gregory CLEMENT Date: Tue, 6 Nov 2012 01:58:07 +0100 Subject: ARM: 7547/4: cache-l2x0: add support for Aurora L2 cache ctrl Aurora Cache Controller was designed to be compatible with the ARM L2 Cache Controller. It comes with some difference or improvement such as: - no cache id part number available through hardware (need to get it by the DT). - always write through mode available. - two flavors of the controller outer cache and system cache (meaning maintenance operations on L1 are broadcasted to the L2 and L2 performs the same operation). - in outer cache mode, the cache maintenance operations are improved and can be done on a range inside a page and are not limited to a cache line. Tested-and-Reviewed-by: Lior Amsalem Signed-off-by: Gregory CLEMENT Signed-off-by: Yehuda Yitschak Reviewed-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/hardware/cache-l2x0.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h index 5f2c7b44fda..3b2c40b5bfa 100644 --- a/arch/arm/include/asm/hardware/cache-l2x0.h +++ b/arch/arm/include/asm/hardware/cache-l2x0.h @@ -102,6 +102,10 @@ #define L2X0_ADDR_FILTER_EN 1 +#define L2X0_CTRL_EN 1 + +#define L2X0_WAY_SIZE_SHIFT 3 + #ifndef __ASSEMBLY__ extern void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask); #if defined(CONFIG_CACHE_L2X0) && defined(CONFIG_OF) -- cgit v1.2.3 From e50c54189f7c6211a99539156e3978474f0b1a0b Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 13 Sep 2012 16:40:46 +0100 Subject: ARM: perf: add guest vs host discrimination Add minimal guest support to perf, so it can distinguish whether the PMU interrupt was in the host or the guest, as well as collecting some very basic information (guest PC, user vs kernel mode). This is not feature complete though, as it doesn't support backtracing in the guest. Based on the x86 implementation, tested with KVM/ARM. Signed-off-by: Marc Zyngier Signed-off-by: Will Deacon --- arch/arm/include/asm/perf_event.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h index 625cd621a43..00416edecea 100644 --- a/arch/arm/include/asm/perf_event.h +++ b/arch/arm/include/asm/perf_event.h @@ -21,4 +21,9 @@ #define C(_x) PERF_COUNT_HW_CACHE_##_x #define CACHE_OP_UNSUPPORTED 0xFFFF +struct pt_regs; +extern unsigned long perf_instruction_pointer(struct pt_regs *regs); +extern unsigned long perf_misc_flags(struct pt_regs *regs); +#define perf_misc_flags(regs) perf_misc_flags(regs) + #endif /* __ARM_PERF_EVENT_H__ */ -- cgit v1.2.3 From ed6f2a522398c26559f4da23a80aa6195e6284c7 Mon Sep 17 00:00:00 2001 From: Sudeep KarkadaNagesha Date: Mon, 30 Jul 2012 12:00:02 +0100 Subject: ARM: perf: consistently use struct perf_event in arm_pmu functions The arm_pmu functions have wildly varied parameters which can often be derived from struct perf_event. This patch changes the arm_pmu function prototypes so that struct perf_event pointers are passed in preference to fields that can be derived from the event. Signed-off-by: Sudeep KarkadaNagesha Signed-off-by: Will Deacon --- arch/arm/include/asm/pmu.h | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index a26170dce02..a209a384dbc 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h @@ -67,19 +67,19 @@ struct arm_pmu { cpumask_t active_irqs; char *name; irqreturn_t (*handle_irq)(int irq_num, void *dev); - void (*enable)(struct hw_perf_event *evt, int idx); - void (*disable)(struct hw_perf_event *evt, int idx); + void (*enable)(struct perf_event *event); + void (*disable)(struct perf_event *event); int (*get_event_idx)(struct pmu_hw_events *hw_events, - struct hw_perf_event *hwc); + struct perf_event *event); int (*set_event_filter)(struct hw_perf_event *evt, struct perf_event_attr *attr); - u32 (*read_counter)(int idx); - void (*write_counter)(int idx, u32 val); - void (*start)(void); - void (*stop)(void); + u32 (*read_counter)(struct perf_event *event); + void (*write_counter)(struct perf_event *event, u32 val); + void (*start)(struct arm_pmu *); + void (*stop)(struct arm_pmu *); void (*reset)(void *); - int (*request_irq)(irq_handler_t handler); - void (*free_irq)(void); + int (*request_irq)(struct arm_pmu *, irq_handler_t handler); + void (*free_irq)(struct arm_pmu *); int (*map_event)(struct perf_event *event); int num_events; atomic_t active_events; @@ -95,13 +95,9 @@ extern const struct dev_pm_ops armpmu_dev_pm_ops; int armpmu_register(struct arm_pmu *armpmu, char *name, int type); -u64 armpmu_event_update(struct perf_event *event, - struct hw_perf_event *hwc, - int idx); +u64 armpmu_event_update(struct perf_event *event); -int armpmu_event_set_period(struct perf_event *event, - struct hw_perf_event *hwc, - int idx); +int armpmu_event_set_period(struct perf_event *event); int armpmu_map_event(struct perf_event *event, const unsigned (*event_map)[PERF_COUNT_HW_MAX], -- cgit v1.2.3 From 0305230a3d92d6829db89c9e0c096d4d8733f317 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 21 Sep 2012 14:23:47 +0100 Subject: ARM: perf: consistently use arm_pmu->name for PMU name Perf has three ways to name a PMU: either by passing an explicit char *, reading arm_pmu->name or accessing arm_pmu->pmu.name. Just use arm_pmu->name consistently in the ARM backend. Signed-off-by: Will Deacon --- arch/arm/include/asm/pmu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index a209a384dbc..f24edad26c7 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h @@ -93,7 +93,7 @@ struct arm_pmu { extern const struct dev_pm_ops armpmu_dev_pm_ops; -int armpmu_register(struct arm_pmu *armpmu, char *name, int type); +int armpmu_register(struct arm_pmu *armpmu, int type); u64 armpmu_event_update(struct perf_event *event); -- cgit v1.2.3 From 9e962f76602dbd293a57030f4ce5a4b57853e2ea Mon Sep 17 00:00:00 2001 From: Dietmar Eggemann Date: Wed, 26 Sep 2012 17:28:47 +0100 Subject: ARM: hw_breakpoint: use CRn as argument for debug reg accessor macros The coprocessor register CRn for accesses to the debug register can be a different one than C0. Take this into account for the ARM_DBG_READ and the ARM_DBG_WRITE macro. The inline assembler calls which used a coprocessor register CRn other than C0 are replaced by the ARM_DBG_READ or ARM_DBG_WRITE macro. Tested-by: Stephen Boyd Signed-off-by: Dietmar Eggemann Signed-off-by: Will Deacon --- arch/arm/include/asm/hw_breakpoint.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h index c190bc992f0..01169dd723f 100644 --- a/arch/arm/include/asm/hw_breakpoint.h +++ b/arch/arm/include/asm/hw_breakpoint.h @@ -98,12 +98,12 @@ static inline void decode_ctrl_reg(u32 reg, #define ARM_BASE_WCR 112 /* Accessor macros for the debug registers. */ -#define ARM_DBG_READ(M, OP2, VAL) do {\ - asm volatile("mrc p14, 0, %0, c0," #M ", " #OP2 : "=r" (VAL));\ +#define ARM_DBG_READ(N, M, OP2, VAL) do {\ + asm volatile("mrc p14, 0, %0, " #N "," #M ", " #OP2 : "=r" (VAL));\ } while (0) -#define ARM_DBG_WRITE(M, OP2, VAL) do {\ - asm volatile("mcr p14, 0, %0, c0," #M ", " #OP2 : : "r" (VAL));\ +#define ARM_DBG_WRITE(N, M, OP2, VAL) do {\ + asm volatile("mcr p14, 0, %0, " #N "," #M ", " #OP2 : : "r" (VAL));\ } while (0) struct notifier_block; -- cgit v1.2.3 From dbf62d50067e55a782583fe53c3d2a3d98b1f6f3 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 19 Jul 2012 11:51:05 +0100 Subject: ARM: mm: introduce L_PTE_VALID for page table entries For long-descriptor translation table formats, the ARMv7 architecture defines the last two bits of the second- and third-level descriptors to be: x0b - Invalid 01b - Block (second-level), Reserved (third-level) 11b - Table (second-level), Page (third-level) This allows us to define L_PTE_PRESENT as (3 << 0) and use this value to create ptes directly. However, when determining whether a given pte value is present in the low-level page table accessors, we only need to check the least significant bit of the descriptor, allowing us to write faulting, present entries which are required for PROT_NONE mappings. This patch introduces L_PTE_VALID, which can be used to test whether a pte should fault, and updates the low-level page table accessors accordingly. Signed-off-by: Will Deacon --- arch/arm/include/asm/pgtable-2level.h | 1 + arch/arm/include/asm/pgtable-3level.h | 3 ++- arch/arm/include/asm/pgtable.h | 4 +--- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h index 2317a71c8f8..c44a1ecfc28 100644 --- a/arch/arm/include/asm/pgtable-2level.h +++ b/arch/arm/include/asm/pgtable-2level.h @@ -115,6 +115,7 @@ * The PTE table pointer refers to the hardware entries; the "Linux" * entries are stored 1024 bytes below. */ +#define L_PTE_VALID (_AT(pteval_t, 1) << 0) /* Valid */ #define L_PTE_PRESENT (_AT(pteval_t, 1) << 0) #define L_PTE_YOUNG (_AT(pteval_t, 1) << 1) #define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */ diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index b24903549d1..e32311a9abc 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h @@ -67,7 +67,8 @@ * These bits overlap with the hardware bits but the naming is preserved for * consistency with the classic page table format. */ -#define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Valid */ +#define L_PTE_VALID (_AT(pteval_t, 1) << 0) /* Valid */ +#define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Present */ #define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */ #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */ #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */ diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 08c12312a1f..ccf34b6e990 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -203,9 +203,7 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd) #define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN)) #define pte_special(pte) (0) -#define pte_present_user(pte) \ - ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \ - (L_PTE_PRESENT | L_PTE_USER)) +#define pte_present_user(pte) (pte_present(pte) && (pte_val(pte) & L_PTE_USER)) #if __LINUX_ARM_ARCH__ < 6 static inline void __sync_icache_dcache(pte_t pteval) -- cgit v1.2.3 From 26ffd0d43b186b0d5186354da8714a1c2d360df0 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Sat, 1 Sep 2012 05:22:12 +0100 Subject: ARM: mm: introduce present, faulting entries for PAGE_NONE PROT_NONE mappings apply the page protection attributes defined by _P000 which translate to PAGE_NONE for ARM. These attributes specify an XN, RDONLY pte that is inaccessible to userspace. However, on kernels configured without support for domains, such a pte *is* accessible to the kernel and can be read via get_user, allowing tasks to read PROT_NONE pages via syscalls such as read/write over a pipe. This patch introduces a new software pte flag, L_PTE_NONE, that is set to identify faulting, present entries. Signed-off-by: Will Deacon --- arch/arm/include/asm/pgtable-2level.h | 1 + arch/arm/include/asm/pgtable-3level.h | 1 + arch/arm/include/asm/pgtable.h | 6 +++--- 3 files changed, 5 insertions(+), 3 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h index c44a1ecfc28..f97ee02386e 100644 --- a/arch/arm/include/asm/pgtable-2level.h +++ b/arch/arm/include/asm/pgtable-2level.h @@ -124,6 +124,7 @@ #define L_PTE_USER (_AT(pteval_t, 1) << 8) #define L_PTE_XN (_AT(pteval_t, 1) << 9) #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */ +#define L_PTE_NONE (_AT(pteval_t, 1) << 11) /* * These are the memory types, defined to be compatible with diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index e32311a9abc..a3f37929940 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h @@ -77,6 +77,7 @@ #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */ #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */ #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */ +#define L_PTE_NONE (_AT(pteval_t, 1) << 57) /* PROT_NONE */ /* * To be used in assembly code with the upper page attributes. diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index ccf34b6e990..9c82f988c0e 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -73,7 +73,7 @@ extern pgprot_t pgprot_kernel; #define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) -#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY) +#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY | L_PTE_NONE) #define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN) #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER) #define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) @@ -83,7 +83,7 @@ extern pgprot_t pgprot_kernel; #define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN) #define PAGE_KERNEL_EXEC pgprot_kernel -#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN) +#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE) #define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) #define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER) #define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) @@ -240,7 +240,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; } static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { - const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER; + const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | L_PTE_NONE; pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); return pte; } -- cgit v1.2.3 From 16cf8a80a8f0f4757427b17cdfb6c4897674db68 Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Thu, 8 Nov 2012 08:46:23 +0100 Subject: ARM: dma-mapping: remove init_consistent_dma_size() stub Since commit e9da6e9905e639b0 ("ARM: dma-mapping: remove custom consistent dma region") setting consistent dma memory size is not longer required. All calls to this function has been already removed, so the init_consistent_dma_size() stub can also be gone. Signed-off-by: Marek Szyprowski --- arch/arm/include/asm/dma-mapping.h | 7 ------- 1 file changed, 7 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 23004847bb0..8ea02ac3ec1 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -210,13 +210,6 @@ static inline void dma_free_writecombine(struct device *dev, size_t size, */ extern void __init init_dma_coherent_pool_size(unsigned long size); -/* - * This can be called during boot to increase the size of the consistent - * DMA region above it's default value of 2MB. It must be called before the - * memory allocator is initialised, i.e. before any core_initcall. - */ -static inline void init_consistent_dma_size(unsigned long size) { } - /* * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic" * and utilize bounce buffers as needed to work around limited DMA windows. -- cgit v1.2.3 From b62655f4c6f3e4d21934eee14ac2ac5cd479c97c Mon Sep 17 00:00:00 2001 From: Shawn Guo Date: Tue, 6 Nov 2012 03:48:40 +0100 Subject: ARM: 7571/1: SMP: add function arch_send_wakeup_ipi_mask() Add function arch_send_wakeup_ipi_mask(), so that platform code can use it as an easy way to wake up cores that are in WFI. Signed-off-by: Shawn Guo Signed-off-by: Russell King --- arch/arm/include/asm/smp.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h index 2e3be16c676..d3a22bebe6c 100644 --- a/arch/arm/include/asm/smp.h +++ b/arch/arm/include/asm/smp.h @@ -79,6 +79,7 @@ extern void cpu_die(void); extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); +extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask); struct smp_operations { #ifdef CONFIG_SMP -- cgit v1.2.3 From 8cbd9cc6254065c97c4bac42daa55ba1abe73a8e Mon Sep 17 00:00:00 2001 From: David Sharp Date: Tue, 13 Nov 2012 12:18:21 -0800 Subject: tracing,x86: Add a TSC trace_clock In order to promote interoperability between userspace tracers and ftrace, add a trace_clock that reports raw TSC values which will then be recorded in the ring buffer. Userspace tracers that also record TSCs are then on exactly the same time base as the kernel and events can be unambiguously interlaced. Tested: Enabled a tracepoint and the "tsc" trace_clock and saw very large timestamp values. v2: Move arch-specific bits out of generic code. v3: Rename "x86-tsc", cleanups v7: Generic arch bits in Kbuild. Google-Bug-Id: 6980623 Link: http://lkml.kernel.org/r/1352837903-32191-1-git-send-email-dhsharp@google.com Acked-by: Ingo Molnar Cc: Masami Hiramatsu Cc: Ingo Molnar Cc: Thomas Gleixner Cc: "H. Peter Anvin" Signed-off-by: David Sharp Signed-off-by: Steven Rostedt --- arch/arm/include/asm/Kbuild | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index f70ae175a3d..514e398f1a0 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild @@ -31,5 +31,6 @@ generic-y += sockios.h generic-y += termbits.h generic-y += termios.h generic-y += timex.h +generic-y += trace_clock.h generic-y += types.h generic-y += unaligned.h -- cgit v1.2.3 From 31982e52f0f5d6d51e69d5c4c4a7be5d52307c6e Mon Sep 17 00:00:00 2001 From: Pantelis Antoniou Date: Wed, 31 Oct 2012 17:57:49 +0200 Subject: arm-dt: Enable DT proc updates. This simple patch enables dynamic changes of the DT tree on runtime to be visible to the device-tree proc interface. Signed-off-by: Pantelis Antoniou Acked-by: Rob Herring Signed-off-by: Grant Likely --- arch/arm/include/asm/prom.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/prom.h b/arch/arm/include/asm/prom.h index aeae9c609df..6d65ba222db 100644 --- a/arch/arm/include/asm/prom.h +++ b/arch/arm/include/asm/prom.h @@ -11,6 +11,8 @@ #ifndef __ASMARM_PROM_H #define __ASMARM_PROM_H +#define HAVE_ARCH_DEVTREE_FIXUPS + #ifdef CONFIG_OF extern struct machine_desc *setup_machine_fdt(unsigned int dt_phys); -- cgit v1.2.3 From f600b9fcd2bcb8ee0adb235f54ccdd93c729c442 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 15 Nov 2012 21:28:43 +0000 Subject: ARM: cti: fix manipulation of debug lock registers The LOCKSTATUS register for memory-mapped coresight devices indicates whether or not the device in question implements hardware locking. If not, locking is not present (i.e. LSR.SLI == 0) and LAR is write-ignore, so software doesn't actually need to check the status register at all. This patch removes the broken LSR checks. Cc: Ming Lei Reported-by: Mike Williams Signed-off-by: Will Deacon --- arch/arm/include/asm/cti.h | 20 ++------------------ 1 file changed, 2 insertions(+), 18 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/cti.h b/arch/arm/include/asm/cti.h index a0ada3ea435..f2e5cad3f30 100644 --- a/arch/arm/include/asm/cti.h +++ b/arch/arm/include/asm/cti.h @@ -146,15 +146,7 @@ static inline void cti_irq_ack(struct cti *cti) */ static inline void cti_unlock(struct cti *cti) { - void __iomem *base = cti->base; - unsigned long val; - - val = __raw_readl(base + LOCKSTATUS); - - if (val & 1) { - val = LOCKCODE; - __raw_writel(val, base + LOCKACCESS); - } + __raw_writel(LOCKCODE, cti->base + LOCKACCESS); } /** @@ -166,14 +158,6 @@ static inline void cti_unlock(struct cti *cti) */ static inline void cti_lock(struct cti *cti) { - void __iomem *base = cti->base; - unsigned long val; - - val = __raw_readl(base + LOCKSTATUS); - - if (!(val & 1)) { - val = ~LOCKCODE; - __raw_writel(val, base + LOCKACCESS); - } + __raw_writel(~LOCKCODE, cti->base + LOCKACCESS); } #endif -- cgit v1.2.3 From 6920b5a791bbb54810159fbf6acf2c6ae14cdd22 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 21 Sep 2012 10:18:58 +0100 Subject: ARM: move serial_sa1100.h header file to linux/platform_data This is really driver platform data, so move it to the appropriate directory. Acked-by: Greg Kroah-Hartman Signed-off-by: Russell King --- arch/arm/include/asm/mach/serial_sa1100.h | 31 ------------------------------- 1 file changed, 31 deletions(-) delete mode 100644 arch/arm/include/asm/mach/serial_sa1100.h (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/mach/serial_sa1100.h b/arch/arm/include/asm/mach/serial_sa1100.h deleted file mode 100644 index d09064bf95a..00000000000 --- a/arch/arm/include/asm/mach/serial_sa1100.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - * arch/arm/include/asm/mach/serial_sa1100.h - * - * Author: Nicolas Pitre - * - * Moved and changed lots, Russell King - * - * Low level machine dependent UART functions. - */ - -struct uart_port; -struct uart_info; - -/* - * This is a temporary structure for registering these - * functions; it is intended to be discarded after boot. - */ -struct sa1100_port_fns { - void (*set_mctrl)(struct uart_port *, u_int); - u_int (*get_mctrl)(struct uart_port *); - void (*pm)(struct uart_port *, u_int, u_int); - int (*set_wake)(struct uart_port *, u_int); -}; - -#ifdef CONFIG_SERIAL_SA1100 -void sa1100_register_uart_fns(struct sa1100_port_fns *fns); -void sa1100_register_uart(int idx, int port); -#else -#define sa1100_register_uart_fns(fns) do { } while (0) -#define sa1100_register_uart(idx,port) do { } while (0) -#endif -- cgit v1.2.3 From 46000065a631c6d21452d533baf086175c384ba4 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 21 Sep 2012 10:23:44 +0100 Subject: ARM: move udc_pxa2xx.h to linux/platform_data Move the PXA2xx/IXP4xx UDC header file into linux/platform_data as it only contains a driver platform data structure. Acked-by: Felipe Balbi Acked-by: Greg Kroah-Hartman Acked-by: Krzysztof Halasa Signed-off-by: Russell King --- arch/arm/include/asm/mach/udc_pxa2xx.h | 26 -------------------------- 1 file changed, 26 deletions(-) delete mode 100644 arch/arm/include/asm/mach/udc_pxa2xx.h (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/mach/udc_pxa2xx.h b/arch/arm/include/asm/mach/udc_pxa2xx.h deleted file mode 100644 index ea297ac70bc..00000000000 --- a/arch/arm/include/asm/mach/udc_pxa2xx.h +++ /dev/null @@ -1,26 +0,0 @@ -/* - * arch/arm/include/asm/mach/udc_pxa2xx.h - * - * This supports machine-specific differences in how the PXA2xx - * USB Device Controller (UDC) is wired. - * - * It is set in linux/arch/arm/mach-pxa/.c or in - * linux/arch/mach-ixp4xx/.c and used in - * the probe routine of linux/drivers/usb/gadget/pxa2xx_udc.c - */ - -struct pxa2xx_udc_mach_info { - int (*udc_is_connected)(void); /* do we see host? */ - void (*udc_command)(int cmd); -#define PXA2XX_UDC_CMD_CONNECT 0 /* let host see us */ -#define PXA2XX_UDC_CMD_DISCONNECT 1 /* so host won't see us */ - - /* Boards following the design guidelines in the developer's manual, - * with on-chip GPIOs not Lubbock's weird hardware, can have a sane - * VBUS IRQ and omit the methods above. Store the GPIO number - * here. Note that sometimes the signals go through inverters... - */ - bool gpio_pullup_inverted; - int gpio_pullup; /* high == pullup activated */ -}; - -- cgit v1.2.3 From 95e629b761ce36996d1befe2824d5346b5a220b9 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 21 Sep 2012 10:26:40 +0100 Subject: ARM/AVR32: get rid of serial_at91.h The definitions provided by serial_at91.h are only used by the atmel_serial driver, and the function that uses it is never called from anywhere in the kernel. Therefore, these definitions are unused and/or obsolete, and can be removed. Acked-by: Greg Kroah-Hartman Acked-by: Jean-Christophe PLAGNIOL-VILLARD Acked-by: Nicolas Ferre Signed-off-by: Russell King --- arch/arm/include/asm/mach/serial_at91.h | 33 --------------------------------- 1 file changed, 33 deletions(-) delete mode 100644 arch/arm/include/asm/mach/serial_at91.h (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/mach/serial_at91.h b/arch/arm/include/asm/mach/serial_at91.h deleted file mode 100644 index ea6d063923b..00000000000 --- a/arch/arm/include/asm/mach/serial_at91.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * arch/arm/include/asm/mach/serial_at91.h - * - * Based on serial_sa1100.h by Nicolas Pitre - * - * Copyright (C) 2002 ATMEL Rousset - * - * Low level machine dependent UART functions. - */ - -struct uart_port; - -/* - * This is a temporary structure for registering these - * functions; it is intended to be discarded after boot. - */ -struct atmel_port_fns { - void (*set_mctrl)(struct uart_port *, u_int); - u_int (*get_mctrl)(struct uart_port *); - void (*enable_ms)(struct uart_port *); - void (*pm)(struct uart_port *, u_int, u_int); - int (*set_wake)(struct uart_port *, u_int); - int (*open)(struct uart_port *); - void (*close)(struct uart_port *); -}; - -#if defined(CONFIG_SERIAL_ATMEL) -void atmel_register_uart_fns(struct atmel_port_fns *fns); -#else -#define atmel_register_uart_fns(fns) do { } while (0) -#endif - - -- cgit v1.2.3 From 1f59d13bee172945ccdfbc5018477ba94a0ac28e Mon Sep 17 00:00:00 2001 From: Will Drewry Date: Thu, 15 Nov 2012 22:11:29 +0100 Subject: ARM: 7577/1: arch/add syscall_get_arch Provide an ARM implementation of syscall_get_arch. This is a pre-requisite for CONFIG_HAVE_ARCH_SECCOMP_FILTER. Signed-off-by: Will Drewry Signed-off-by: Kees Cook Reviewed-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/syscall.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h index 9fdded6b108..f1d96d4e809 100644 --- a/arch/arm/include/asm/syscall.h +++ b/arch/arm/include/asm/syscall.h @@ -7,6 +7,8 @@ #ifndef _ASM_ARM_SYSCALL_H #define _ASM_ARM_SYSCALL_H +#include /* for AUDIT_ARCH_* */ +#include /* for ELF_EM */ #include #include @@ -95,4 +97,11 @@ static inline void syscall_set_arguments(struct task_struct *task, memcpy(®s->ARM_r0 + i, args, n * sizeof(args[0])); } +static inline int syscall_get_arch(struct task_struct *task, + struct pt_regs *regs) +{ + /* ARM tasks don't change audit architectures on the fly. */ + return AUDIT_ARCH_ARM; +} + #endif /* _ASM_ARM_SYSCALL_H */ -- cgit v1.2.3 From 9b790d71d58be65f9508ab60920eb978af828412 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 15 Nov 2012 22:12:00 +0100 Subject: ARM: 7578/1: arch/move secure_computing into trace There is very little difference in the TIF_SECCOMP and TIF_SYSCALL_WORK path in entry-common.S, so merge TIF_SECCOMP into TIF_SYSCALL_WORK and move seccomp into the syscall_trace_enter() handler. Expanded some of the tracehook logic into the callers to make this code more readable. Since tracehook needs to do register changing, this portion is best left in its own function instead of copy/pasting into the callers. Additionally, the return value for secure_computing() is now checked and a -1 value will result in the system call being skipped. Signed-off-by: Kees Cook Acked-by: Will Drewry Reviewed-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/thread_info.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 8477b4c1d39..cddda1f41f0 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -151,10 +151,10 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, #define TIF_SYSCALL_TRACE 8 #define TIF_SYSCALL_AUDIT 9 #define TIF_SYSCALL_TRACEPOINT 10 +#define TIF_SECCOMP 11 /* seccomp syscall filtering active */ #define TIF_USING_IWMMXT 17 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ #define TIF_RESTORE_SIGMASK 20 -#define TIF_SECCOMP 21 #define TIF_SWITCH_MM 22 /* deferred switch_mm */ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) @@ -163,11 +163,12 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) -#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) +#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) /* Checks for any syscall work in entry-common.S */ -#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT) +#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ + _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP) /* * Change these and you break ASM code in entry-common.S -- cgit v1.2.3 From e8d432c9cf0a3d569b2d00877d12c9ffe9e55286 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Tue, 6 Nov 2012 11:57:43 +0000 Subject: ARM: kernel: add MIDR to per-CPU information data The advent of big.LITTLE ARM platforms requires the kernel to be able to identify the MIDRs of all online CPUs upon request. MIDRs are stashed at boot time so that kernel subsystems can detect the MIDR of online CPUs by simply retrieving per-CPU data updated by all booted CPUs. Signed-off-by: Lorenzo Pieralisi Acked-by: Nicolas Pitre --- arch/arm/include/asm/cpu.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/cpu.h b/arch/arm/include/asm/cpu.h index d797223b39d..2744f060255 100644 --- a/arch/arm/include/asm/cpu.h +++ b/arch/arm/include/asm/cpu.h @@ -15,6 +15,7 @@ struct cpuinfo_arm { struct cpu cpu; + u32 cpuid; #ifdef CONFIG_SMP unsigned int loops_per_jiffy; #endif -- cgit v1.2.3 From dca463daa0151c5bbbd8ec8fd42882a3966d3c44 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Thu, 15 Nov 2012 17:30:32 +0000 Subject: ARM: kernel: enhance MPIDR macro definitions Kernel subsystems other than the topology layer need the MPIDR mask definitions to access the MPIDR without relying on hardcoded masks. This patch moves the MPIDR register masks definition to a header file and defines a macro to simplify access to MPIDR bit fields representing affinity levels. Signed-off-by: Lorenzo Pieralisi Acked-by: Will Deacon Acked-by: Nicolas Pitre --- arch/arm/include/asm/cputype.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index cb47d28cbe1..a59dcb5ab5f 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h @@ -25,6 +25,19 @@ #define CPUID_EXT_ISAR4 "c2, 4" #define CPUID_EXT_ISAR5 "c2, 5" +#define MPIDR_SMP_BITMASK (0x3 << 30) +#define MPIDR_SMP_VALUE (0x2 << 30) + +#define MPIDR_MT_BITMASK (0x1 << 24) + +#define MPIDR_HWID_BITMASK 0xFFFFFF + +#define MPIDR_LEVEL_BITS 8 +#define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1) + +#define MPIDR_AFFINITY_LEVEL(mpidr, level) \ + ((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK) + extern unsigned int processor_id; #ifdef CONFIG_CPU_CP15 -- cgit v1.2.3 From a0ae02405076ac32bd17ece976e914b5b6075bb0 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Thu, 17 Nov 2011 17:31:51 +0000 Subject: ARM: kernel: add device tree init map function When booting through a device tree, the kernel cpu logical id map can be initialized using device tree data passed by FW or through an embedded blob. This patch adds a function that parses device tree "cpu" nodes and retrieves the corresponding CPUs hardware identifiers (MPIDR). It sets the possible cpus and the cpu logical map values according to the number of CPUs defined in the device tree and respective properties. The device tree HW identifiers are considered valid if all CPU nodes contain a "reg" property, there are no duplicate "reg" entries and the DT defines a CPU node whose "reg" property matches the MPIDR[23:0] of the boot CPU. The primary CPU is assigned cpu logical number 0 to keep the current convention valid. Current bindings documentation is included in the patch: Documentation/devicetree/bindings/arm/cpus.txt Signed-off-by: Lorenzo Pieralisi Acked-by: Nicolas Pitre --- arch/arm/include/asm/prom.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/prom.h b/arch/arm/include/asm/prom.h index aeae9c609df..8dd51dc1a36 100644 --- a/arch/arm/include/asm/prom.h +++ b/arch/arm/include/asm/prom.h @@ -15,6 +15,7 @@ extern struct machine_desc *setup_machine_fdt(unsigned int dt_phys); extern void arm_dt_memblock_reserve(void); +extern void __init arm_dt_init_cpu_maps(void); #else /* CONFIG_OF */ @@ -24,6 +25,7 @@ static inline struct machine_desc *setup_machine_fdt(unsigned int dt_phys) } static inline void arm_dt_memblock_reserve(void) { } +static inline void arm_dt_init_cpu_maps(void) { } #endif /* CONFIG_OF */ #endif /* ASMARM_PROM_H */ -- cgit v1.2.3 From 7f124aaf01439d2fa54283f3c375ce3b9fc776d4 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Thu, 17 Nov 2011 17:36:24 +0000 Subject: ARM: kernel: add logical mappings look-up In ARM SMP systems the MPIDR register ([23:0] bits) is used to uniquely identify CPUs. In order to retrieve the logical CPU index corresponding to a given MPIDR value and guarantee a consistent translation throughout the kernel, this patch adds a look-up based on the MPIDR[23:0] so that kernel subsystems can use it whenever the logical cpu index corresponding to a given MPIDR value is needed. Signed-off-by: Lorenzo Pieralisi Acked-by: Will Deacon Acked-by: Nicolas Pitre --- arch/arm/include/asm/smp_plat.h | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h index 558d6c80aca..aaa61b6f50f 100644 --- a/arch/arm/include/asm/smp_plat.h +++ b/arch/arm/include/asm/smp_plat.h @@ -5,6 +5,9 @@ #ifndef __ASMARM_SMP_PLAT_H #define __ASMARM_SMP_PLAT_H +#include +#include + #include /* @@ -48,5 +51,19 @@ static inline int cache_ops_need_broadcast(void) */ extern int __cpu_logical_map[]; #define cpu_logical_map(cpu) __cpu_logical_map[cpu] +/* + * Retrieve logical cpu index corresponding to a given MPIDR[23:0] + * - mpidr: MPIDR[23:0] to be used for the look-up + * + * Returns the cpu logical index or -EINVAL on look-up error + */ +static inline int get_logical_index(u32 mpidr) +{ + int cpu; + for (cpu = 0; cpu < nr_cpu_ids; cpu++) + if (cpu_logical_map(cpu) == mpidr) + return cpu; + return -EINVAL; +} #endif -- cgit v1.2.3 From 87b54e786afda828984645a8364a228ae8ac71f4 Mon Sep 17 00:00:00 2001 From: Gregory CLEMENT Date: Wed, 21 Nov 2012 09:39:19 +0100 Subject: arm: dma mapping: Export a dma ops function arm_dma_set_mask Expose another DMA operations function: arm_dma_set_mask. This function will be added to a custom DMA ops for Armada 370/XP. Depending of its configuration Armada 370/XP can be set as a "nearly" coherent architecture. In this case the DMA ops is made of: - specific functions for this architecture - already exposed arm DMA related functions - the arm_dma_set_mask which was not exposed yet. Signed-off-by: Gregory CLEMENT Acked-by: Marek Szyprowski --- arch/arm/include/asm/dma-mapping.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 23004847bb0..98d4dabb2c1 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -111,6 +111,8 @@ static inline void dma_free_noncoherent(struct device *dev, size_t size, extern int dma_supported(struct device *dev, u64 mask); +extern int arm_dma_set_mask(struct device *dev, u64 dma_mask); + /** * arm_dma_alloc - allocate consistent memory for DMA * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices -- cgit v1.2.3 From c7cc504bc351e41e871e317ca7f032f4562f34ad Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 22 Nov 2012 13:05:55 +0100 Subject: ARM: 7584/1: perf: fix link error when CONFIG_HW_PERF_EVENTS is not selected Commit e50c541 (ARM: perf: add guest vs host discrimination) broken the link as perf_instruction_pointer and perf_misc_flags are not defined when CONFIG_HW_PERF_EVENTS is not selected. As it make little sense to try and profile a guest without any HW event, just fallback to the original code when this config option is not selected. Reported-by: Russell King Acked-by: Will Deacon Signed-off-by: Marc Zyngier Signed-off-by: Russell King --- arch/arm/include/asm/perf_event.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h index 00416edecea..755877527cf 100644 --- a/arch/arm/include/asm/perf_event.h +++ b/arch/arm/include/asm/perf_event.h @@ -21,9 +21,11 @@ #define C(_x) PERF_COUNT_HW_CACHE_##_x #define CACHE_OP_UNSUPPORTED 0xFFFF +#ifdef CONFIG_HW_PERF_EVENTS struct pt_regs; extern unsigned long perf_instruction_pointer(struct pt_regs *regs); extern unsigned long perf_misc_flags(struct pt_regs *regs); #define perf_misc_flags(regs) perf_misc_flags(regs) +#endif #endif /* __ARM_PERF_EVENT_H__ */ -- cgit v1.2.3 From 3e99675af1b25a191c467700499b1cbe5585a778 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Sun, 25 Nov 2012 03:24:32 +0100 Subject: ARM: 7582/2: rename kvm_seq to vmalloc_seq so to avoid confusion with KVM The kvm_seq value has nothing to do what so ever with this other KVM. Given that KVM support on ARM is imminent, it's best to rename kvm_seq into something else to clearly identify what it is about i.e. a sequence number for vmalloc section mappings. Signed-off-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/include/asm/mmu.h | 2 +- arch/arm/include/asm/mmu_context.h | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index 5b53b53ab5c..9f77e7804f3 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h @@ -7,7 +7,7 @@ typedef struct { #ifdef CONFIG_CPU_HAS_ASID u64 id; #endif - unsigned int kvm_seq; + unsigned int vmalloc_seq; } mm_context_t; #ifdef CONFIG_CPU_HAS_ASID diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index a64f61cb23d..e1f644bc7cc 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -20,7 +20,7 @@ #include #include -void __check_kvm_seq(struct mm_struct *mm); +void __check_vmalloc_seq(struct mm_struct *mm); #ifdef CONFIG_CPU_HAS_ASID @@ -34,8 +34,8 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); static inline void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) { - if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) - __check_kvm_seq(mm); + if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) + __check_vmalloc_seq(mm); if (irqs_disabled()) /* -- cgit v1.2.3 From 38a61b6b4a45ec8c82c75403848e1c579113c3c5 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 21 Oct 2012 15:54:27 -0400 Subject: arm: switch to generic fork/vfork/clone Signed-off-by: Al Viro --- arch/arm/include/asm/unistd.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index 8f60b6e6bd4..7cd13cc6262 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h @@ -42,6 +42,9 @@ #define __ARCH_WANT_SYS_SOCKETCALL #endif #define __ARCH_WANT_SYS_EXECVE +#define __ARCH_WANT_SYS_FORK +#define __ARCH_WANT_SYS_VFORK +#define __ARCH_WANT_SYS_CLONE /* * "Conditional" syscalls -- cgit v1.2.3 From 4f4202fe5ae9a43e59303f20d700571f695d7b1b Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 5 Nov 2012 12:59:15 -0500 Subject: unify default ptrace_signal_deliver Signed-off-by: Al Viro --- arch/arm/include/asm/signal.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/signal.h b/arch/arm/include/asm/signal.h index 5a7963dbd3f..9a0ea6ab988 100644 --- a/arch/arm/include/asm/signal.h +++ b/arch/arm/include/asm/signal.h @@ -35,5 +35,4 @@ struct k_sigaction { }; #include -#define ptrace_signal_deliver(regs, cookie) do { } while (0) #endif -- cgit v1.2.3 From f832da068b0aadb15f747f6427b6bf945f525ba4 Mon Sep 17 00:00:00 2001 From: Ian Campbell Date: Wed, 3 Oct 2012 16:37:09 +0100 Subject: xen: arm: implement remap interfaces needed for privcmd mappings. We use XENMEM_add_to_physmap_range which is the preferred interface for foreign mappings. Acked-by: Mukesh Rathor Acked-by: Stefano Stabellini Signed-off-by: Ian Campbell Signed-off-by: Konrad Rzeszutek Wilk --- arch/arm/include/asm/xen/interface.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/xen/interface.h b/arch/arm/include/asm/xen/interface.h index 5000397134b..1151188bcd8 100644 --- a/arch/arm/include/asm/xen/interface.h +++ b/arch/arm/include/asm/xen/interface.h @@ -49,6 +49,7 @@ DEFINE_GUEST_HANDLE(void); DEFINE_GUEST_HANDLE(uint64_t); DEFINE_GUEST_HANDLE(uint32_t); DEFINE_GUEST_HANDLE(xen_pfn_t); +DEFINE_GUEST_HANDLE(xen_ulong_t); /* Maximum number of virtual CPUs in multi-processor guests. */ #define MAX_VIRT_CPUS 1 -- cgit v1.2.3 From 14318efb322e2fe1a034c69463d725209eb9d548 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Thu, 29 Nov 2012 20:39:54 +0100 Subject: ARM: 7587/1: implement optimized percpu variable access Use the previously unused TPIDRPRW register to store percpu offsets. TPIDRPRW is only accessible in PL1, so it can only be used in the kernel. This replaces 2 loads with a mrc instruction for each percpu variable access. With hackbench, the performance improvement is 1.4% on Cortex-A9 (highbank). Taking an average of 30 runs of "hackbench -l 1000" yields: Before: 6.2191 After: 6.1348 Will Deacon reported similar delta on v6 with 11MPCore. The asm "memory clobber" are needed here to ensure the percpu offset gets reloaded. Testing by Will found that this would not happen in __schedule() which is a bit of a special case as preemption is disabled but the execution can move cores. Signed-off-by: Rob Herring Acked-by: Will Deacon Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/include/asm/Kbuild | 1 - arch/arm/include/asm/percpu.h | 45 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+), 1 deletion(-) create mode 100644 arch/arm/include/asm/percpu.h (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index f70ae175a3d..2ffdaacd461 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild @@ -16,7 +16,6 @@ generic-y += local64.h generic-y += msgbuf.h generic-y += param.h generic-y += parport.h -generic-y += percpu.h generic-y += poll.h generic-y += resource.h generic-y += sections.h diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h new file mode 100644 index 00000000000..968c0a14e0a --- /dev/null +++ b/arch/arm/include/asm/percpu.h @@ -0,0 +1,45 @@ +/* + * Copyright 2012 Calxeda, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ +#ifndef _ASM_ARM_PERCPU_H_ +#define _ASM_ARM_PERCPU_H_ + +/* + * Same as asm-generic/percpu.h, except that we store the per cpu offset + * in the TPIDRPRW. TPIDRPRW only exists on V6K and V7 + */ +#if defined(CONFIG_SMP) && !defined(CONFIG_CPU_V6) +static inline void set_my_cpu_offset(unsigned long off) +{ + /* Set TPIDRPRW */ + asm volatile("mcr p15, 0, %0, c13, c0, 4" : : "r" (off) : "memory"); +} + +static inline unsigned long __my_cpu_offset(void) +{ + unsigned long off; + /* Read TPIDRPRW */ + asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : : "memory"); + return off; +} +#define __my_cpu_offset __my_cpu_offset() +#else +#define set_my_cpu_offset(x) do {} while(0) + +#endif /* CONFIG_SMP */ + +#include + +#endif /* _ASM_ARM_PERCPU_H_ */ -- cgit v1.2.3 From a0157573041403e7507a6f3f32279fc14ff5c02e Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Mon, 22 Oct 2012 20:44:03 +0800 Subject: ARM: dma-mapping: support debug_dma_mapping_error Without the patch, kind of below warning will be dumped if DMA-API debug is enabled: [ 11.069763] ------------[ cut here ]------------ [ 11.074645] WARNING: at lib/dma-debug.c:948 check_unmap+0x770/0x860() [ 11.081420] ehci-omap ehci-omap.0: DMA-API: device driver failed to check map error[device address=0x0000000 0adb78e80] [size=8 bytes] [mapped as single] [ 11.095611] Modules linked in: Cc: Russell King Cc: Marek Szyprowski Signed-off-by: Ming Lei Signed-off-by: Joerg Roedel --- arch/arm/include/asm/dma-mapping.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 23004847bb0..78d8e9b5544 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -91,6 +91,7 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) */ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { + debug_dma_mapping_error(dev, dma_addr); return dma_addr == DMA_ERROR_CODE; } -- cgit v1.2.3 From 1ecec696c8bb9b4cefb09495d81d081d1c81b578 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Mon, 10 Dec 2012 18:35:22 +0100 Subject: ARM: 7599/1: head: Remove boot-time HYP mode check for v5 and below The kernel can only be entered on HYP mode on CPUs which actually support it, i.e. >= ARMv7. pre-v6 platform support cannot coexist in the same kernel as support for v7 and higher, so there is no advantage in having the HYP mode check on pre-v6 hardware. At least one pre-v6 board is known to fail when the HYP mode check code is present, although the exact cause remains unknown and may be unrelated. [1] This patch restores the old behaviour for pre-v6 platforms, whereby the CPSR is forced directly to SVC mode with IRQs and FIQs masked. All kernels capable of booting on v7 hardware will retain the check, so this should not impair functionality. [1] http://lists.arm.linux.org.uk/lurker/message/20121130.013814.19218413.en.html ([ARM] head.S change broke platform device registration?) Signed-off-by: Dave Martin Signed-off-by: Russell King --- arch/arm/include/asm/assembler.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 2ef95813fce..eb87200aa4b 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -250,6 +250,7 @@ * Beware, it also clobers LR. */ .macro safe_svcmode_maskall reg:req +#if __LINUX_ARM_ARCH__ >= 6 mrs \reg , cpsr mov lr , \reg and lr , lr , #MODE_MASK @@ -266,6 +267,13 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) __ERET 1: msr cpsr_c, \reg 2: +#else +/* + * workaround for possibly broken pre-v6 hardware + * (akita, Sharp Zaurus C-1000, PXA270-based) + */ + setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg +#endif .endm /* -- cgit v1.2.3 From ae903caae267154de7cf8576b130ff474630596b Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 14 Dec 2012 12:44:11 -0500 Subject: Bury the conditionals from kernel_thread/kernel_execve series All architectures have CONFIG_GENERIC_KERNEL_THREAD CONFIG_GENERIC_KERNEL_EXECVE __ARCH_WANT_SYS_EXECVE None of them have __ARCH_WANT_KERNEL_EXECVE and there are only two callers of kernel_execve() (which is a trivial wrapper for do_execve() now) left. Kill the conditionals and make both callers use do_execve(). Signed-off-by: Al Viro --- arch/arm/include/asm/unistd.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index 7cd13cc6262..21a2700d295 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h @@ -41,7 +41,6 @@ #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_SYS_SOCKETCALL #endif -#define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_FORK #define __ARCH_WANT_SYS_VFORK #define __ARCH_WANT_SYS_CLONE -- cgit v1.2.3 From eed8812387e201e158059fe6fb908f8a662084dd Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Thu, 31 Jan 2013 09:26:06 -0600 Subject: ARM: scu: add empty scu_enable for !CONFIG_SMP Add an empty version of scu_enable for !SMP builds. This fixes compile error for highbank suspend code on !SMP builds. Signed-off-by: Rob Herring --- arch/arm/include/asm/smp_scu.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/smp_scu.h b/arch/arm/include/asm/smp_scu.h index 4eb6d005ffa..86dff32a073 100644 --- a/arch/arm/include/asm/smp_scu.h +++ b/arch/arm/include/asm/smp_scu.h @@ -7,8 +7,14 @@ #ifndef __ASSEMBLER__ unsigned int scu_get_core_count(void __iomem *); -void scu_enable(void __iomem *); int scu_power_mode(void __iomem *, unsigned int); + +#ifdef CONFIG_SMP +void scu_enable(void __iomem *scu_base); +#else +static inline void scu_enable(void __iomem *scu_base) {} +#endif + #endif #endif -- cgit v1.2.3 From 79d1f5c9acf9fc8d06e5537083b19114ce87159f Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 8 Feb 2013 12:52:29 +0100 Subject: ARM: 7641/1: memory: fix broken mmap by ensuring TASK_UNMAPPED_BASE is aligned We have received multiple reports of mmap failures when running with a 2:2 vm split. These manifest as either -EINVAL with a non page-aligned address (ending 0xaaa) or a SEGV, depending on the application. The issue is commonly observed in children of make, which appears to use bottom-up mmap (assumedly because it changes the stack rlimit). Further investigation reveals that this regression was triggered by 394ef6403abc ("mm: use vm_unmapped_area() on arm architecture"), whereby TASK_UNMAPPED_BASE is no longer page-aligned for bottom-up mmap, causing get_unmapped_area to choke on misaligned addressed. This patch fixes the problem by defining TASK_UNMAPPED_BASE in terms of TASK_SIZE and explicitly aligns the result to 16M, matching the other end of the heap. Acked-by: Nicolas Pitre Reported-by: Steve Capper Reported-by: Jean-Francois Moine Reported-by: Christoffer Dall Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/memory.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 73cf03aa981..1c4df27f933 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -37,7 +37,7 @@ */ #define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) #define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(0x01000000)) -#define TASK_UNMAPPED_BASE (UL(CONFIG_PAGE_OFFSET) / 3) +#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M) /* * The maximum size of a 26-bit user space task. -- cgit v1.2.3