From a4a12e008e292a81d312659529b71be2026ab355 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Fri, 30 Nov 2012 11:56:05 +0000 Subject: ARM: virt: Avoid bx instruction for compatibility with <=ARMv4 Non-T variants of ARMv4 do not support the bx instruction. However, __hyp_stub_install is always called from the same instruction set used to build the bulk of the kernel, so bx should not be necessary. This patch uses the traditional "mov pc" instead of bx. Cc: Signed-off-by: Dave Martin [will: fixed up remaining bx instruction] Signed-off-by: Will Deacon --- arch/arm/kernel/hyp-stub.S | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S index 65b2417aebc..3c60256d392 100644 --- a/arch/arm/kernel/hyp-stub.S +++ b/arch/arm/kernel/hyp-stub.S @@ -99,7 +99,7 @@ ENTRY(__hyp_stub_install_secondary) * immediately. */ compare_cpu_mode_with_primary r4, r5, r6, r7 - bxne lr + movne pc, lr /* * Once we have given up on one CPU, we do not try to install the @@ -111,7 +111,7 @@ ENTRY(__hyp_stub_install_secondary) */ cmp r4, #HYP_MODE - bxne lr @ give up if the CPU is not in HYP mode + movne pc, lr @ give up if the CPU is not in HYP mode /* * Configure HSCTLR to set correct exception endianness/instruction set @@ -200,7 +200,7 @@ ENDPROC(__hyp_get_vectors) @ fall through ENTRY(__hyp_set_vectors) __HVC(0) - bx lr + mov pc, lr ENDPROC(__hyp_set_vectors) #ifndef ZIMAGE -- cgit v1.2.3 From 6e484be1ccca3ea495db45900fd42aac8d49d754 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 4 Jan 2013 17:44:14 +0000 Subject: ARM: virt: boot secondary CPUs through the right entry point Secondary CPUs should use the __hyp_stub_install_secondary entry point, so boot mode inconsistencies can be detected. Cc: Acked-by: Dave Martin Reported-by: Ian Molton Signed-off-by: Marc Zyngier Signed-off-by: Will Deacon --- arch/arm/kernel/head.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 4eee351f466..16abc8322f7 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -331,7 +331,7 @@ ENTRY(secondary_startup) * as it has already been validated by the primary processor. */ #ifdef CONFIG_ARM_VIRT_EXT - bl __hyp_stub_install + bl __hyp_stub_install_secondary #endif safe_svcmode_maskall r9 -- cgit v1.2.3 From d01723479e6a6c70c83295f7847477a016d5e14a Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 4 Jan 2013 17:44:15 +0000 Subject: ARM: virt: simplify __hyp_stub_install epilog __hyp_stub_install duplicates quite a bit of safe_svcmode_maskall by forcing the CPU back to SVC. This is unnecessary, as safe_svcmode_maskall is called just after. Furthermore, the way we build SPSR_hyp is buggy as we fail to mask the interrupts, leading to interesting behaviours on TC2 + UEFI. The fix is to simply remove this code and rely on safe_svcmode_maskall to do the right thing. Cc: Reviewed-by: Dave Martin Reported-by: Harry Liebel Signed-off-by: Marc Zyngier Signed-off-by: Will Deacon --- arch/arm/kernel/hyp-stub.S | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S index 3c60256d392..1315c4ccfa5 100644 --- a/arch/arm/kernel/hyp-stub.S +++ b/arch/arm/kernel/hyp-stub.S @@ -120,7 +120,8 @@ ENTRY(__hyp_stub_install_secondary) * Eventually, CPU-specific code might be needed -- assume not for now * * This code relies on the "eret" instruction to synchronize the - * various coprocessor accesses. + * various coprocessor accesses. This is done when we switch to SVC + * (see safe_svcmode_maskall). */ @ Now install the hypervisor stub: adr r7, __hyp_stub_vectors @@ -155,14 +156,7 @@ THUMB( orr r7, #(1 << 30) ) @ HSCTLR.TE 1: #endif - bic r7, r4, #MODE_MASK - orr r7, r7, #SVC_MODE -THUMB( orr r7, r7, #PSR_T_BIT ) - msr spsr_cxsf, r7 @ This is SPSR_hyp. - - __MSR_ELR_HYP(14) @ msr elr_hyp, lr - __ERET @ return, switching to SVC mode - @ The boot CPU mode is left in r4. + bx lr @ The boot CPU mode is left in r4. ENDPROC(__hyp_stub_install_secondary) __hyp_stub_do_trap: -- cgit v1.2.3 From 568dca15aa2a0f4ddee255894ec393a159f13147 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Mon, 14 Jan 2013 19:50:42 +0100 Subject: ARM: 7627/1: Predicate preempt logic on PREEMP_COUNT not PREEMPT alone Patrik Kluba reports that the preempt count becomes invalid due to the preempt_enable() call being unbalanced with a preempt_disable() call in the vfp assembly routines. This happens because preempt_enable() and preempt_disable() update preempt counts under PREEMPT_COUNT=y but the vfp assembly routines do so under PREEMPT=y. In a configuration where PREEMPT=n and DEBUG_ATOMIC_SLEEP=y, PREEMPT_COUNT=y and so the preempt_enable() call in VFP_bounce() keeps subtracting from the preempt count until it goes negative. Fix this by always using PREEMPT_COUNT to decided when to update preempt counts in the ARM assembly code. Signed-off-by: Stephen Boyd Reported-by: Patrik Kluba Tested-by: Patrik Kluba Cc: # 2.6.30 Signed-off-by: Russell King --- arch/arm/vfp/entry.S | 6 +++--- arch/arm/vfp/vfphw.S | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S index cc926c98598..323ce1a62bb 100644 --- a/arch/arm/vfp/entry.S +++ b/arch/arm/vfp/entry.S @@ -22,7 +22,7 @@ @ IRQs disabled. @ ENTRY(do_vfp) -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPT_COUNT ldr r4, [r10, #TI_PREEMPT] @ get preempt count add r11, r4, #1 @ increment it str r11, [r10, #TI_PREEMPT] @@ -35,7 +35,7 @@ ENTRY(do_vfp) ENDPROC(do_vfp) ENTRY(vfp_null_entry) -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPT_COUNT get_thread_info r10 ldr r4, [r10, #TI_PREEMPT] @ get preempt count sub r11, r4, #1 @ decrement it @@ -53,7 +53,7 @@ ENDPROC(vfp_null_entry) __INIT ENTRY(vfp_testing_entry) -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPT_COUNT get_thread_info r10 ldr r4, [r10, #TI_PREEMPT] @ get preempt count sub r11, r4, #1 @ decrement it diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S index ea0349f6358..dd5e56f95f3 100644 --- a/arch/arm/vfp/vfphw.S +++ b/arch/arm/vfp/vfphw.S @@ -168,7 +168,7 @@ vfp_hw_state_valid: @ else it's one 32-bit instruction, so @ always subtract 4 from the following @ instruction address. -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPT_COUNT get_thread_info r10 ldr r4, [r10, #TI_PREEMPT] @ get preempt count sub r11, r4, #1 @ decrement it @@ -192,7 +192,7 @@ look_for_VFP_exceptions: @ not recognised by VFP DBGSTR "not VFP" -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPT_COUNT get_thread_info r10 ldr r4, [r10, #TI_PREEMPT] @ get preempt count sub r11, r4, #1 @ decrement it -- cgit v1.2.3 From 6f16f4998f98e42e3f2dedf663cfb691ff0324af Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Tue, 15 Jan 2013 18:51:32 +0100 Subject: ARM: 7628/1: head.S: map one extra section for the ATAG/DTB area We currently use a temporary 1MB section aligned to a 1MB boundary for mapping the provided device tree until the final page table is created. However, if the device tree happens to cross that 1MB boundary, the end of it remains unmapped and the kernel crashes when it attempts to access it. Given no restriction on the location of that DTB, it could end up with only a few bytes mapped at the end of a section. Solve this issue by mapping two consecutive sections. Signed-off-by: Nicolas Pitre Tested-by: Sascha Hauer Tested-by: Tomasz Figa Cc: stable@vger.kernel.org Signed-off-by: Russell King --- arch/arm/kernel/head.S | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 4eee351f466..61fcb18c7e5 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -246,6 +246,7 @@ __create_page_tables: /* * Then map boot params address in r2 if specified. + * We map 2 sections in case the ATAGs/DTB crosses a section boundary. */ mov r0, r2, lsr #SECTION_SHIFT movs r0, r0, lsl #SECTION_SHIFT @@ -253,6 +254,8 @@ __create_page_tables: addne r3, r3, #PAGE_OFFSET addne r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER) orrne r6, r7, r0 + strne r6, [r3], #1 << PMD_ORDER + addne r6, r6, #1 << SECTION_SHIFT strne r6, [r3] #ifdef CONFIG_DEBUG_LL -- cgit v1.2.3 From 15653371c67c3fbe359ae37b720639dd4c7b42c5 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 19 Jan 2013 11:05:57 +0000 Subject: ARM: DMA: Fix struct page iterator in dma_cache_maint() to work with sparsemem Subhash Jadavani reported this partial backtrace: Now consider this call stack from MMC block driver (this is on the ARMv7 based board): [] (v7_dma_inv_range+0x30/0x48) from [] (dma_cache_maint_page+0x1c4/0x24c) [] (dma_cache_maint_page+0x1c4/0x24c) from [] (___dma_page_cpu_to_dev+0x14/0x1c) [] (___dma_page_cpu_to_dev+0x14/0x1c) from [] (dma_map_sg+0x3c/0x114) This is caused by incrementing the struct page pointer, and running off the end of the sparsemem page array. Fix this by incrementing by pfn instead, and convert the pfn to a struct page. Cc: Suggested-by: James Bottomley Tested-by: Subhash Jadavani Signed-off-by: Russell King --- arch/arm/mm/dma-mapping.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 6b2fb87c869..076c26d4386 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -774,25 +774,27 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, void (*op)(const void *, size_t, int)) { + unsigned long pfn; + size_t left = size; + + pfn = page_to_pfn(page) + offset / PAGE_SIZE; + offset %= PAGE_SIZE; + /* * A single sg entry may refer to multiple physically contiguous * pages. But we still need to process highmem pages individually. * If highmem is not configured then the bulk of this loop gets * optimized out. */ - size_t left = size; do { size_t len = left; void *vaddr; + page = pfn_to_page(pfn); + if (PageHighMem(page)) { - if (len + offset > PAGE_SIZE) { - if (offset >= PAGE_SIZE) { - page += offset / PAGE_SIZE; - offset %= PAGE_SIZE; - } + if (len + offset > PAGE_SIZE) len = PAGE_SIZE - offset; - } vaddr = kmap_high_get(page); if (vaddr) { vaddr += offset; @@ -809,7 +811,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, op(vaddr, len, dir); } offset = 0; - page++; + pfn++; left -= len; } while (left); } -- cgit v1.2.3 From 93d5bf073a1e01035be66dc41860b9ae9aa9ccfa Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Thu, 17 Jan 2013 07:18:04 +0100 Subject: ARM: 7629/1: mm: Fix missing XN flag for for MT_MEMORY_SO Commit 8fb54284ba6a {ARM: mm: Add strongly ordered descriptor support} added XN flag at section level but missed it at PTE level. Fix it by adding the L_PTE_XN to MT_MEMORY_SO PTE descriptor. Reported-by: Richard Woodruff Signed-off-by: Santosh Shilimkar Signed-off-by: Russell King --- arch/arm/mm/mmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm') diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 9f0610243bd..ce328c7f5c9 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -283,7 +283,7 @@ static struct mem_type mem_types[] = { }, [MT_MEMORY_SO] = { .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | - L_PTE_MT_UNCACHED, + L_PTE_MT_UNCACHED | L_PTE_XN, .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S | PMD_SECT_UNCACHED | PMD_SECT_XN, -- cgit v1.2.3