dect
/
linux-2.6
Archived
13
0
Fork 0

sh: kdump support.

This adds support for kexec based crash dumps.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
Paul Mundt 2007-04-27 11:25:57 +09:00 committed by Paul Mundt
parent db62e5bd29
commit 4d5ade5b29
6 changed files with 129 additions and 9 deletions

View File

@ -551,6 +551,20 @@ config KEXEC
support. As of this writing the exact hardware interface is
strongly in flux, so no good recommendation can be made.
config CRASH_DUMP
bool "kernel crash dumps (EXPERIMENTAL)"
depends on EXPERIMENTAL
help
Generate crash dump after being started by kexec.
This should be normally only set in special crash dump kernels
which are loaded in the main kernel with kexec-tools into
a specially reserved region and then later executed after
a crash by kdump/kexec. The crash dump kernel must be compiled
to a memory address not used by the main kernel using
MEMORY_START.
For more details see Documentation/kdump/kdump.txt
config SMP
bool "Symmetric multi-processing support"
---help---

View File

@ -20,5 +20,6 @@ obj-$(CONFIG_SH_CPU_FREQ) += cpufreq.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_PM) += pm.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o

View File

@ -0,0 +1,46 @@
/*
* crash_dump.c - Memory preserving reboot related code.
*
* Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
* Copyright (C) IBM Corporation, 2004. All rights reserved
*/
#include <linux/errno.h>
#include <linux/crash_dump.h>
#include <linux/io.h>
#include <asm/uaccess.h>
/**
* copy_oldmem_page - copy one page from "oldmem"
* @pfn: page frame number to be copied
* @buf: target memory address for the copy; this can be in kernel address
* space or user address space (see @userbuf)
* @csize: number of bytes to copy
* @offset: offset in bytes into the page (based on pfn) to begin the copy
* @userbuf: if set, @buf is in user address space, use copy_to_user(),
* otherwise @buf is in kernel address space, use memcpy().
*
* Copy a page from "oldmem". For this page, there is no pte mapped
* in the current kernel. We stitch up a pte, similar to kmap_atomic.
*/
ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
size_t csize, unsigned long offset, int userbuf)
{
void *vaddr;
if (!csize)
return 0;
vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
if (userbuf) {
if (copy_to_user(buf, (vaddr + offset), csize)) {
iounmap(vaddr);
return -EFAULT;
}
} else
memcpy(buf, (vaddr + offset), csize);
iounmap(vaddr);
return csize;
}

View File

@ -59,13 +59,13 @@ static void kexec_info(struct kimage *image)
printk(" segment[%d]: 0x%08x - 0x%08x (0x%08x)\n",
i,
(unsigned int)image->segment[i].mem,
(unsigned int)image->segment[i].mem + image->segment[i].memsz,
(unsigned int)image->segment[i].mem +
image->segment[i].memsz,
(unsigned int)image->segment[i].memsz);
}
}
printk(" start : 0x%08x\n\n", (unsigned int)image->start);
}
/*
* Do not allocate memory (or fail in any way) in machine_kexec().
* We are past the point of no return, committed to rebooting now.
@ -101,6 +101,27 @@ NORET_TYPE void machine_kexec(struct kimage *image)
/* now call it */
rnk = (relocate_new_kernel_t) reboot_code_buffer;
(*rnk)(page_list, reboot_code_buffer, image->start, vbr_reg);
(*rnk)(page_list, reboot_code_buffer, image->start, vbr_reg);
}
/* crashkernel=size@addr specifies the location to reserve for
* a crash kernel. By reserving this memory we guarantee
* that linux never sets it up as a DMA target.
* Useful for holding code to do something appropriate
* after a kernel panic.
*/
static int __init parse_crashkernel(char *arg)
{
unsigned long size, base;
size = memparse(arg, &arg);
if (*arg == '@') {
base = memparse(arg+1, &arg);
/* FIXME: Do I want a sanity check
* to validate the memory range?
*/
crashk_res.start = base;
crashk_res.end = base + size - 1;
}
return 0;
}
early_param("crashkernel", parse_crashkernel);

View File

@ -20,6 +20,7 @@
#include <linux/pfn.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/kexec.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/sections.h>
@ -287,6 +288,11 @@ void __init setup_bootmem_allocator(unsigned long start_pfn)
}
}
#endif
#ifdef CONFIG_KEXEC
if (crashk_res.start != crashk_res.end)
reserve_bootmem(crashk_res.start,
crashk_res.end - crashk_res.start + 1);
#endif
}
#ifndef CONFIG_NEED_MULTIPLE_NODES

View File

@ -1,5 +1,8 @@
#ifndef _SH_KEXEC_H
#define _SH_KEXEC_H
#ifndef __ASM_SH_KEXEC_H
#define __ASM_SH_KEXEC_H
#include <asm/ptrace.h>
#include <asm/string.h>
/*
* KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
@ -25,8 +28,37 @@
#define MAX_NOTE_BYTES 1024
/* Provide a dummy definition to avoid build failures. */
static inline void crash_setup_regs(struct pt_regs *newregs,
struct pt_regs *oldregs) { }
struct pt_regs *oldregs)
{
if (oldregs)
memcpy(newregs, oldregs, sizeof(*newregs));
else {
__asm__ __volatile__ ("mov r0, %0" : "=r" (newregs->regs[0]));
__asm__ __volatile__ ("mov r1, %0" : "=r" (newregs->regs[1]));
__asm__ __volatile__ ("mov r2, %0" : "=r" (newregs->regs[2]));
__asm__ __volatile__ ("mov r3, %0" : "=r" (newregs->regs[3]));
__asm__ __volatile__ ("mov r4, %0" : "=r" (newregs->regs[4]));
__asm__ __volatile__ ("mov r5, %0" : "=r" (newregs->regs[5]));
__asm__ __volatile__ ("mov r6, %0" : "=r" (newregs->regs[6]));
__asm__ __volatile__ ("mov r7, %0" : "=r" (newregs->regs[7]));
__asm__ __volatile__ ("mov r8, %0" : "=r" (newregs->regs[8]));
__asm__ __volatile__ ("mov r9, %0" : "=r" (newregs->regs[9]));
__asm__ __volatile__ ("mov r10, %0" : "=r" (newregs->regs[10]));
__asm__ __volatile__ ("mov r11, %0" : "=r" (newregs->regs[11]));
__asm__ __volatile__ ("mov r12, %0" : "=r" (newregs->regs[12]));
__asm__ __volatile__ ("mov r13, %0" : "=r" (newregs->regs[13]));
__asm__ __volatile__ ("mov r14, %0" : "=r" (newregs->regs[14]));
__asm__ __volatile__ ("mov r15, %0" : "=r" (newregs->regs[15]));
#endif /* _SH_KEXEC_H */
__asm__ __volatile__ ("sts pr, %0" : "=r" (newregs->pr));
__asm__ __volatile__ ("sts macl, %0" : "=r" (newregs->macl));
__asm__ __volatile__ ("sts mach, %0" : "=r" (newregs->mach));
__asm__ __volatile__ ("stc gbr, %0" : "=r" (newregs->gbr));
__asm__ __volatile__ ("stc sr, %0" : "=r" (newregs->sr));
newregs->pc = (unsigned long)current_text_addr();
}
}
#endif /* __ASM_SH_KEXEC_H */