aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2008-08-28 11:22:32 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2008-09-01 12:06:34 +0100
commit93ed3970114983543bbebd195bef65db84444ea2 (patch)
tree9df88b61a2a7b3cc493c6cfc5f4848448250f6b5 /arch/arm/kernel
parent8d5796d2ec6b5a4e7a52861144e63af438d6f8f7 (diff)
[ARM] 5227/1: Add the ENDPROC declarations to the .S files
This declaration specifies the "function" type and size for various assembly functions, mainly needed for generating the correct branch instructions in Thumb-2. Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/debug.S5
-rw-r--r--arch/arm/kernel/entry-armv.S16
-rw-r--r--arch/arm/kernel/entry-common.S25
-rw-r--r--arch/arm/kernel/head-common.S19
-rw-r--r--arch/arm/kernel/head-nommu.S4
-rw-r--r--arch/arm/kernel/head.S12
6 files changed, 62 insertions, 19 deletions
diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S
index 9550ff0ddde..f53c5829054 100644
--- a/arch/arm/kernel/debug.S
+++ b/arch/arm/kernel/debug.S
@@ -89,10 +89,12 @@
ENTRY(printhex8)
mov r1, #8
b printhex
+ENDPROC(printhex8)
ENTRY(printhex4)
mov r1, #4
b printhex
+ENDPROC(printhex4)
ENTRY(printhex2)
mov r1, #2
@@ -110,6 +112,7 @@ printhex: adr r2, hexbuf
bne 1b
mov r0, r2
b printascii
+ENDPROC(printhex2)
.ltorg
@@ -127,11 +130,13 @@ ENTRY(printascii)
teqne r1, #0
bne 1b
mov pc, lr
+ENDPROC(printascii)
ENTRY(printch)
addruart r3
mov r1, r0
mov r0, #0
b 1b
+ENDPROC(printch)
hexbuf: .space 16
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 617e509d60d..77b04747553 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -76,14 +76,17 @@
__pabt_invalid:
inv_entry BAD_PREFETCH
b common_invalid
+ENDPROC(__pabt_invalid)
__dabt_invalid:
inv_entry BAD_DATA
b common_invalid
+ENDPROC(__dabt_invalid)
__irq_invalid:
inv_entry BAD_IRQ
b common_invalid
+ENDPROC(__irq_invalid)
__und_invalid:
inv_entry BAD_UNDEFINSTR
@@ -107,6 +110,7 @@ common_invalid:
mov r0, sp
b bad_mode
+ENDPROC(__und_invalid)
/*
* SVC mode handlers
@@ -192,6 +196,7 @@ __dabt_svc:
ldr r0, [sp, #S_PSR]
msr spsr_cxsf, r0
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
+ENDPROC(__dabt_svc)
.align 5
__irq_svc:
@@ -223,6 +228,7 @@ __irq_svc:
bleq trace_hardirqs_on
#endif
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
+ENDPROC(__irq_svc)
.ltorg
@@ -272,6 +278,7 @@ __und_svc:
ldr lr, [sp, #S_PSR] @ Get SVC cpsr
msr spsr_cxsf, lr
ldmia sp, {r0 - pc}^ @ Restore SVC registers
+ENDPROC(__und_svc)
.align 5
__pabt_svc:
@@ -313,6 +320,7 @@ __pabt_svc:
ldr r0, [sp, #S_PSR]
msr spsr_cxsf, r0
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
+ENDPROC(__pabt_svc)
.align 5
.LCcralign:
@@ -412,6 +420,7 @@ __dabt_usr:
mov r2, sp
adr lr, ret_from_exception
b do_DataAbort
+ENDPROC(__dabt_usr)
.align 5
__irq_usr:
@@ -441,6 +450,7 @@ __irq_usr:
mov why, #0
b ret_to_user
+ENDPROC(__irq_usr)
.ltorg
@@ -474,6 +484,7 @@ __und_usr:
#else
b __und_usr_unknown
#endif
+ENDPROC(__und_usr)
@
@ fallthrough to call_fpe
@@ -642,6 +653,7 @@ __und_usr_unknown:
mov r0, sp
adr lr, ret_from_exception
b do_undefinstr
+ENDPROC(__und_usr_unknown)
.align 5
__pabt_usr:
@@ -666,6 +678,8 @@ ENTRY(ret_from_exception)
get_thread_info tsk
mov why, #0
b ret_to_user
+ENDPROC(__pabt_usr)
+ENDPROC(ret_from_exception)
/*
* Register switch for ARMv3 and ARMv4 processors
@@ -702,6 +716,7 @@ ENTRY(__switch_to)
bl atomic_notifier_call_chain
mov r0, r5
ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
+ENDPROC(__switch_to)
__INIT
@@ -1029,6 +1044,7 @@ vector_\name:
mov r0, sp
ldr lr, [pc, lr, lsl #2]
movs pc, lr @ branch to handler in SVC mode
+ENDPROC(vector_\name)
.endm
.globl __stubs_start
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 060d7e2e9f6..3aa14dcc5ba 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -77,6 +77,7 @@ no_work_pending:
mov r0, r0
add sp, sp, #S_FRAME_SIZE - S_PC
movs pc, lr @ return & move spsr_svc into cpsr
+ENDPROC(ret_to_user)
/*
* This is how we return from a fork.
@@ -92,7 +93,7 @@ ENTRY(ret_from_fork)
mov r0, #1 @ trace exit [IP = 1]
bl syscall_trace
b ret_slow_syscall
-
+ENDPROC(ret_from_fork)
.equ NR_syscalls,0
#define CALL(x) .equ NR_syscalls,NR_syscalls+1
@@ -269,6 +270,7 @@ ENTRY(vector_swi)
eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back
bcs arm_syscall
b sys_ni_syscall @ not private func
+ENDPROC(vector_swi)
/*
* This is the really slow path. We're going to be doing
@@ -326,7 +328,6 @@ ENTRY(sys_call_table)
*/
@ r0 = syscall number
@ r8 = syscall table
- .type sys_syscall, #function
sys_syscall:
bic scno, r0, #__NR_OABI_SYSCALL_BASE
cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
@@ -338,53 +339,65 @@ sys_syscall:
movlo r3, r4
ldrlo pc, [tbl, scno, lsl #2]
b sys_ni_syscall
+ENDPROC(sys_syscall)
sys_fork_wrapper:
add r0, sp, #S_OFF
b sys_fork
+ENDPROC(sys_fork_wrapper)
sys_vfork_wrapper:
add r0, sp, #S_OFF
b sys_vfork
+ENDPROC(sys_vfork_wrapper)
sys_execve_wrapper:
add r3, sp, #S_OFF
b sys_execve
+ENDPROC(sys_execve_wrapper)
sys_clone_wrapper:
add ip, sp, #S_OFF
str ip, [sp, #4]
b sys_clone
+ENDPROC(sys_clone_wrapper)
sys_sigsuspend_wrapper:
add r3, sp, #S_OFF
b sys_sigsuspend
+ENDPROC(sys_sigsuspend_wrapper)
sys_rt_sigsuspend_wrapper:
add r2, sp, #S_OFF
b sys_rt_sigsuspend
+ENDPROC(sys_rt_sigsuspend_wrapper)
sys_sigreturn_wrapper:
add r0, sp, #S_OFF
b sys_sigreturn
+ENDPROC(sys_sigreturn_wrapper)
sys_rt_sigreturn_wrapper:
add r0, sp, #S_OFF
b sys_rt_sigreturn
+ENDPROC(sys_rt_sigreturn_wrapper)
sys_sigaltstack_wrapper:
ldr r2, [sp, #S_OFF + S_SP]
b do_sigaltstack
+ENDPROC(sys_sigaltstack_wrapper)
sys_statfs64_wrapper:
teq r1, #88
moveq r1, #84
b sys_statfs64
+ENDPROC(sys_statfs64_wrapper)
sys_fstatfs64_wrapper:
teq r1, #88
moveq r1, #84
b sys_fstatfs64
+ENDPROC(sys_fstatfs64_wrapper)
/*
* Note: off_4k (r5) is always units of 4K. If we can't do the requested
@@ -402,11 +415,14 @@ sys_mmap2:
str r5, [sp, #4]
b do_mmap2
#endif
+ENDPROC(sys_mmap2)
ENTRY(pabort_ifar)
mrc p15, 0, r0, cr6, cr0, 2
ENTRY(pabort_noifar)
mov pc, lr
+ENDPROC(pabort_ifar)
+ENDPROC(pabort_noifar)
#ifdef CONFIG_OABI_COMPAT
@@ -417,26 +433,31 @@ ENTRY(pabort_noifar)
sys_oabi_pread64:
stmia sp, {r3, r4}
b sys_pread64
+ENDPROC(sys_oabi_pread64)
sys_oabi_pwrite64:
stmia sp, {r3, r4}
b sys_pwrite64
+ENDPROC(sys_oabi_pwrite64)
sys_oabi_truncate64:
mov r3, r2
mov r2, r1
b sys_truncate64
+ENDPROC(sys_oabi_truncate64)
sys_oabi_ftruncate64:
mov r3, r2
mov r2, r1
b sys_ftruncate64
+ENDPROC(sys_oabi_ftruncate64)
sys_oabi_readahead:
str r3, [sp]
mov r3, r2
mov r2, r1
b sys_readahead
+ENDPROC(sys_oabi_readahead)
/*
* Let's declare a second syscall table for old ABI binaries
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index 1c3c6ea5f9e..bde52df1c66 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -36,7 +36,6 @@ __switch_data:
* r2 = atags pointer
* r9 = processor ID
*/
- .type __mmap_switched, %function
__mmap_switched:
adr r3, __switch_data + 4
@@ -59,6 +58,7 @@ __mmap_switched:
bic r4, r0, #CR_A @ Clear 'A' bit
stmia r7, {r0, r4} @ Save control register values
b start_kernel
+ENDPROC(__mmap_switched)
/*
* Exception handling. Something went wrong and we can't proceed. We
@@ -69,8 +69,6 @@ __mmap_switched:
* and hope for the best (useful if bootloader fails to pass a proper
* machine ID for example).
*/
-
- .type __error_p, %function
__error_p:
#ifdef CONFIG_DEBUG_LL
adr r0, str_p1
@@ -84,8 +82,8 @@ str_p1: .asciz "\nError: unrecognized/unsupported processor variant (0x"
str_p2: .asciz ").\n"
.align
#endif
+ENDPROC(__error_p)
- .type __error_a, %function
__error_a:
#ifdef CONFIG_DEBUG_LL
mov r4, r1 @ preserve machine ID
@@ -115,13 +113,14 @@ __error_a:
adr r0, str_a3
bl printascii
b __error
+ENDPROC(__error_a)
+
str_a1: .asciz "\nError: unrecognized/unsupported machine ID (r1 = 0x"
str_a2: .asciz ").\n\nAvailable machine support:\n\nID (hex)\tNAME\n"
str_a3: .asciz "\nPlease check your kernel config and/or bootloader.\n"
.align
#endif
- .type __error, %function
__error:
#ifdef CONFIG_ARCH_RPC
/*
@@ -138,6 +137,7 @@ __error:
#endif
1: mov r0, r0
b 1b
+ENDPROC(__error)
/*
@@ -153,7 +153,6 @@ __error:
* r5 = proc_info pointer in physical address space
* r9 = cpuid (preserved)
*/
- .type __lookup_processor_type, %function
__lookup_processor_type:
adr r3, 3f
ldmda r3, {r5 - r7}
@@ -169,6 +168,7 @@ __lookup_processor_type:
blo 1b
mov r5, #0 @ unknown processor
2: mov pc, lr
+ENDPROC(__lookup_processor_type)
/*
* This provides a C-API version of the above function.
@@ -179,6 +179,7 @@ ENTRY(lookup_processor_type)
bl __lookup_processor_type
mov r0, r5
ldmfd sp!, {r4 - r7, r9, pc}
+ENDPROC(lookup_processor_type)
/*
* Look in <asm/procinfo.h> and arch/arm/kernel/arch.[ch] for
@@ -201,7 +202,6 @@ ENTRY(lookup_processor_type)
* r3, r4, r6 corrupted
* r5 = mach_info pointer in physical address space
*/
- .type __lookup_machine_type, %function
__lookup_machine_type:
adr r3, 3b
ldmia r3, {r4, r5, r6}
@@ -216,6 +216,7 @@ __lookup_machine_type:
blo 1b
mov r5, #0 @ unknown machine
2: mov pc, lr
+ENDPROC(__lookup_machine_type)
/*
* This provides a C-API version of the above function.
@@ -226,6 +227,7 @@ ENTRY(lookup_machine_type)
bl __lookup_machine_type
mov r0, r5
ldmfd sp!, {r4 - r6, pc}
+ENDPROC(lookup_machine_type)
/* Determine validity of the r2 atags pointer. The heuristic requires
* that the pointer be aligned, in the first 16k of physical RAM and
@@ -239,8 +241,6 @@ ENTRY(lookup_machine_type)
* r2 either valid atags pointer, or zero
* r5, r6 corrupted
*/
-
- .type __vet_atags, %function
__vet_atags:
tst r2, #0x3 @ aligned?
bne 1f
@@ -257,3 +257,4 @@ __vet_atags:
1: mov r2, #0
mov pc, lr
+ENDPROC(__vet_atags)
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 27329bd3203..cc87e1765ed 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -33,7 +33,6 @@
*
*/
.section ".text.head", "ax"
- .type stext, %function
ENTRY(stext)
msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode
@ and irqs disabled
@@ -53,11 +52,11 @@ ENTRY(stext)
@ the initialization is done
adr lr, __after_proc_init @ return (PIC) address
add pc, r10, #PROCINFO_INITFUNC
+ENDPROC(stext)
/*
* Set the Control Register and Read the process ID.
*/
- .type __after_proc_init, %function
__after_proc_init:
#ifdef CONFIG_CPU_CP15
mrc p15, 0, r0, c1, c0, 0 @ read control reg
@@ -85,6 +84,7 @@ __after_proc_init:
mov pc, r13 @ clear the BSS and jump
@ to start_kernel
+ENDPROC(__after_proc_init)
.ltorg
#include "head-common.S"
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index bff4c6e90dd..21e17dc94cb 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -75,7 +75,6 @@
* circumstances, zImage) is for.
*/
.section ".text.head", "ax"
- .type stext, %function
ENTRY(stext)
msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode
@ and irqs disabled
@@ -100,9 +99,9 @@ ENTRY(stext)
@ mmu has been enabled
adr lr, __enable_mmu @ return (PIC) address
add pc, r10, #PROCINFO_INITFUNC
+ENDPROC(stext)
#if defined(CONFIG_SMP)
- .type secondary_startup, #function
ENTRY(secondary_startup)
/*
* Common entry point for secondary CPUs.
@@ -128,6 +127,7 @@ ENTRY(secondary_startup)
adr lr, __enable_mmu @ return address
add pc, r10, #PROCINFO_INITFUNC @ initialise processor
@ (return control reg)
+ENDPROC(secondary_startup)
/*
* r6 = &secondary_data
@@ -136,6 +136,7 @@ ENTRY(__secondary_switched)
ldr sp, [r7, #4] @ get secondary_data.stack
mov fp, #0
b secondary_start_kernel
+ENDPROC(__secondary_switched)
.type __secondary_data, %object
__secondary_data:
@@ -151,7 +152,6 @@ __secondary_data:
* this is just loading the page table pointer and domain access
* registers.
*/
- .type __enable_mmu, %function
__enable_mmu:
#ifdef CONFIG_ALIGNMENT_TRAP
orr r0, r0, #CR_A
@@ -174,6 +174,7 @@ __enable_mmu:
mcr p15, 0, r5, c3, c0, 0 @ load domain access register
mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
b __turn_mmu_on
+ENDPROC(__enable_mmu)
/*
* Enable the MMU. This completely changes the structure of the visible
@@ -187,7 +188,6 @@ __enable_mmu:
* other registers depend on the function called upon completion
*/
.align 5
- .type __turn_mmu_on, %function
__turn_mmu_on:
mov r0, r0
mcr p15, 0, r0, c1, c0, 0 @ write control reg
@@ -195,7 +195,7 @@ __turn_mmu_on:
mov r3, r3
mov r3, r3
mov pc, r13
-
+ENDPROC(__turn_mmu_on)
/*
@@ -211,7 +211,6 @@ __turn_mmu_on:
* r0, r3, r6, r7 corrupted
* r4 = physical page table address
*/
- .type __create_page_tables, %function
__create_page_tables:
pgtbl r4 @ page table address
@@ -325,6 +324,7 @@ __create_page_tables:
#endif
#endif
mov pc, lr
+ENDPROC(__create_page_tables)
.ltorg
#include "head-common.S"