dect
/
linux-2.6
Archived
13
0
Fork 0

Merge branch 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull smp hotplug cleanups from Thomas Gleixner:
 "This series is merily a cleanup of code copied around in arch/* and
  not changing any of the real cpu hotplug horrors yet.  I wish I'd had
  something more substantial for 3.5, but I underestimated the lurking
  horror..."

Fix up trivial conflicts in arch/{arm,sparc,x86}/Kconfig and
arch/sparc/include/asm/thread_info_32.h

* 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (79 commits)
  um: Remove leftover declaration of alloc_task_struct_node()
  task_allocator: Use config switches instead of magic defines
  sparc: Use common threadinfo allocator
  score: Use common threadinfo allocator
  sh-use-common-threadinfo-allocator
  mn10300: Use common threadinfo allocator
  powerpc: Use common threadinfo allocator
  mips: Use common threadinfo allocator
  hexagon: Use common threadinfo allocator
  m32r: Use common threadinfo allocator
  frv: Use common threadinfo allocator
  cris: Use common threadinfo allocator
  x86: Use common threadinfo allocator
  c6x: Use common threadinfo allocator
  fork: Provide kmemcache based thread_info allocator
  tile: Use common threadinfo allocator
  fork: Provide weak arch_release_[task_struct|thread_info] functions
  fork: Move thread info gfp flags to header
  fork: Remove the weak insanity
  sh: Remove cpu_idle_wait()
  ...
This commit is contained in:
Linus Torvalds 2012-05-21 19:43:57 -07:00
commit bf67f3a5c4
157 changed files with 415 additions and 1934 deletions

View File

@ -145,6 +145,21 @@ config HAVE_DMA_ATTRS
config USE_GENERIC_SMP_HELPERS
bool
config GENERIC_SMP_IDLE_THREAD
bool
# Select if arch init_task initializer is different to init/init_task.c
config ARCH_INIT_TASK
bool
# Select if arch has its private alloc_task_struct() function
config ARCH_TASK_STRUCT_ALLOCATOR
bool
# Select if arch has its private alloc_thread_info() function
config ARCH_THREAD_INFO_ALLOCATOR
bool
config HAVE_REGS_AND_STACK_ACCESS_API
bool
help

View File

@ -15,6 +15,7 @@ config ALPHA
select GENERIC_IRQ_SHOW
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_SMP_IDLE_THREAD
help
The Alpha is a 64-bit general-purpose processor designed and
marketed by the Digital Equipment Corporation of blessed memory,

View File

@ -6,7 +6,7 @@ extra-y := head.o vmlinux.lds
asflags-y := $(KBUILD_CFLAGS)
ccflags-y := -Wno-sign-compare
obj-y := entry.o traps.o process.o init_task.o osf_sys.o irq.o \
obj-y := entry.o traps.o process.o osf_sys.o irq.o \
irq_alpha.o signal.o setup.o ptrace.o time.o \
alpha_ksyms.o systbls.o err_common.o io.o

View File

@ -357,24 +357,10 @@ secondary_cpu_start(int cpuid, struct task_struct *idle)
* Bring one cpu online.
*/
static int __cpuinit
smp_boot_one_cpu(int cpuid)
smp_boot_one_cpu(int cpuid, struct task_struct *idle)
{
struct task_struct *idle;
unsigned long timeout;
/* Cook up an idler for this guy. Note that the address we
give to kernel_thread is irrelevant -- it's going to start
where HWRPB.CPU_restart says to start. But this gets all
the other task-y sort of data structures set up like we
wish. We can't use kernel_thread since we must avoid
rescheduling the child. */
idle = fork_idle(cpuid);
if (IS_ERR(idle))
panic("failed fork for CPU %d", cpuid);
DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n",
cpuid, idle->state, idle->flags));
/* Signal the secondary to wait a moment. */
smp_secondary_alive = -1;
@ -487,9 +473,9 @@ smp_prepare_boot_cpu(void)
}
int __cpuinit
__cpu_up(unsigned int cpu)
__cpu_up(unsigned int cpu, struct task_struct *tidle)
{
smp_boot_one_cpu(cpu);
smp_boot_one_cpu(cpu, tidle);
return cpu_online(cpu) ? 0 : -ENOSYS;
}

View File

@ -37,6 +37,7 @@ config ARM
select CPU_PM if (SUSPEND || CPU_IDLE)
select GENERIC_PCI_IOMAP
select HAVE_BPF_JIT
select GENERIC_SMP_IDLE_THREAD
help
The ARM series is a line of low-power-consumption RISC chip designs
licensed by ARM Ltd and targeted at embedded applications and
@ -154,9 +155,6 @@ config ARCH_HAS_CPUFREQ
and that the relevant menu configurations are displayed for
it.
config ARCH_HAS_CPU_IDLE_WAIT
def_bool y
config GENERIC_HWEIGHT
bool
default y

View File

@ -117,7 +117,7 @@ KBUILD_AFLAGS +=$(CFLAGS_ABI) $(AFLAGS_THUMB2) $(arch-y) $(tune-y) -include asm/
CHECKFLAGS += -D__arm__
#Default value
head-y := arch/arm/kernel/head$(MMUEXT).o arch/arm/kernel/init_task.o
head-y := arch/arm/kernel/head$(MMUEXT).o
textofs-y := 0x00008000
textofs-$(CONFIG_ARCH_CLPS711X) := 0x00028000
# We don't want the htc bootloader to corrupt kernel during resume

View File

@ -16,7 +16,6 @@
struct cpuinfo_arm {
struct cpu cpu;
#ifdef CONFIG_SMP
struct task_struct *idle;
unsigned int loops_per_jiffy;
#endif
};

View File

@ -88,8 +88,6 @@ unsigned long get_wchan(struct task_struct *p);
#define cpu_relax() barrier()
#endif
void cpu_idle_wait(void);
/*
* Create a new kernel thread
*/

View File

@ -82,4 +82,4 @@ head-y := head$(MMUEXT).o
obj-$(CONFIG_DEBUG_LL) += debug.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
extra-y := $(head-y) init_task.o vmlinux.lds
extra-y := $(head-y) vmlinux.lds

View File

@ -1,37 +0,0 @@
/*
* linux/arch/arm/kernel/init_task.c
*/
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/init_task.h>
#include <linux/mqueue.h>
#include <linux/uaccess.h>
#include <asm/pgtable.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
/*
* Initial thread structure.
*
* We need to make sure that this is 8192-byte aligned due to the
* way process stacks are handled. This is done by making sure
* the linker maps this in the .text segment right after head.S,
* and making head.S ensure the proper alignment.
*
* The things we do for performance..
*/
union thread_union init_thread_union __init_task_data =
{ INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);

View File

@ -157,26 +157,6 @@ EXPORT_SYMBOL(pm_power_off);
void (*arm_pm_restart)(char str, const char *cmd) = null_restart;
EXPORT_SYMBOL_GPL(arm_pm_restart);
static void do_nothing(void *unused)
{
}
/*
* cpu_idle_wait - Used to ensure that all the CPUs discard old value of
* pm_idle and update to new pm_idle value. Required while changing pm_idle
* handler on SMP systems.
*
* Caller must have changed pm_idle to the new value before the call. Old
* pm_idle value will not be used by any CPU after the return of this function.
*/
void cpu_idle_wait(void)
{
smp_mb();
/* kick all the CPUs so that they exit out of pm_idle */
smp_call_function(do_nothing, NULL, 1);
}
EXPORT_SYMBOL_GPL(cpu_idle_wait);
/*
* This is our default idle handler.
*/

View File

@ -60,31 +60,10 @@ enum ipi_msg_type {
static DECLARE_COMPLETION(cpu_running);
int __cpuinit __cpu_up(unsigned int cpu)
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
{
struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
struct task_struct *idle = ci->idle;
int ret;
/*
* Spawn a new process manually, if not already done.
* Grab a pointer to its task struct so we can mess with it
*/
if (!idle) {
idle = fork_idle(cpu);
if (IS_ERR(idle)) {
printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
return PTR_ERR(idle);
}
ci->idle = idle;
} else {
/*
* Since this idle thread is being re-used, call
* init_idle() to reinitialize the thread structure.
*/
init_idle(idle, cpu);
}
/*
* We need to tell the secondary core where to find
* its stack and the page tables.
@ -318,9 +297,6 @@ void __init smp_cpus_done(unsigned int max_cpus)
void __init smp_prepare_boot_cpu(void)
{
unsigned int cpu = smp_processor_id();
per_cpu(cpu_data, cpu).idle = current;
}
void __init smp_prepare_cpus(unsigned int max_cpus)

View File

@ -8,7 +8,7 @@ obj-$(CONFIG_SUBARCH_AVR32B) += entry-avr32b.o
obj-y += syscall_table.o syscall-stubs.o irq.o
obj-y += setup.o traps.o ocd.o ptrace.o
obj-y += signal.o sys_avr32.o process.o time.o
obj-y += init_task.o switch_to.o cpu.o
obj-y += switch_to.o cpu.o
obj-$(CONFIG_MODULES) += module.o avr32_ksyms.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o

View File

@ -1,31 +0,0 @@
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/init_task.h>
#include <linux/mqueue.h>
#include <asm/pgtable.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
/*
* Initial thread structure. Must be aligned on an 8192-byte boundary.
*/
union thread_union init_thread_union __init_task_data =
{ INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);

View File

@ -37,6 +37,7 @@ config BLACKFIN
select GENERIC_IRQ_PROBE
select IRQ_PER_CPU if SMP
select HAVE_NMI_WATCHDOG if NMI_WATCHDOG
select GENERIC_SMP_IDLE_THREAD
config GENERIC_CSUM
def_bool y

View File

@ -109,8 +109,6 @@ KBUILD_AFLAGS += -mcpu=$(CPU_REV)
CHECKFLAGS_SILICON = $(shell echo "" | $(CPP) $(KBUILD_CFLAGS) -dD - 2>/dev/null | awk '$$2 == "__SILICON_REVISION__" { print $$3 }')
CHECKFLAGS += -D__SILICON_REVISION__=$(CHECKFLAGS_SILICON) -D__bfin__
head-y := arch/$(ARCH)/kernel/init_task.o
core-y += arch/$(ARCH)/kernel/ arch/$(ARCH)/mm/ arch/$(ARCH)/mach-common/
# If we have a machine-specific directory, then include it in the build.

View File

@ -2,7 +2,7 @@
# arch/blackfin/kernel/Makefile
#
extra-y := init_task.o vmlinux.lds
extra-y := vmlinux.lds
obj-y := \
entry.o process.o bfin_ksyms.o ptrace.o setup.o signal.o \

View File

@ -1,32 +0,0 @@
/*
* Copyright 2004-2009 Analog Devices Inc.
*
* Licensed under the GPL-2 or later
*/
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/init_task.h>
#include <linux/mqueue.h>
#include <linux/fs.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
/*
* Initial thread structure.
*
* We need to make sure that this is 8192-byte aligned due to the
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry.
*/
union thread_union init_thread_union
__init_task_data = {
INIT_THREAD_INFO(init_task)};

View File

@ -340,27 +340,10 @@ void smp_send_stop(void)
return;
}
int __cpuinit __cpu_up(unsigned int cpu)
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
{
int ret;
struct blackfin_cpudata *ci = &per_cpu(cpu_data, cpu);
struct task_struct *idle = ci->idle;
if (idle) {
free_task(idle);
idle = NULL;
}
if (!idle) {
idle = fork_idle(cpu);
if (IS_ERR(idle)) {
printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
return PTR_ERR(idle);
}
ci->idle = idle;
} else {
init_idle(idle, cpu);
}
secondary_stack = task_stack_page(idle) + THREAD_SIZE;
ret = platform_boot_secondary(cpu, idle);

View File

@ -20,11 +20,11 @@
#ifdef CONFIG_4KSTACKS
#define THREAD_SIZE 4096
#define THREAD_SHIFT 12
#define THREAD_ORDER 0
#define THREAD_SIZE_ORDER 0
#else
#define THREAD_SIZE 8192
#define THREAD_SHIFT 13
#define THREAD_ORDER 1
#define THREAD_SIZE_ORDER 1
#endif
#define THREAD_START_SP (THREAD_SIZE - 8)
@ -80,19 +80,6 @@ struct thread_info *current_thread_info(void)
return ti;
}
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
/* thread information allocation */
#ifdef CONFIG_DEBUG_STACK_USAGE
#define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
#else
#define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK)
#endif
#define alloc_thread_info_node(tsk, node) \
((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER)
#define get_thread_info(ti) get_task_struct((ti)->task)
#define put_thread_info(ti) put_task_struct((ti)->task)
#endif /* __ASSEMBLY__ */

View File

@ -26,22 +26,6 @@ void (*c6x_halt)(void);
extern asmlinkage void ret_from_fork(void);
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
/*
* Initial thread structure.
*/
union thread_union init_thread_union __init_task_data = {
INIT_THREAD_INFO(init_task)
};
/*
* Initial task structure.
*/
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
/*
* power off function, if any
*/

View File

@ -49,6 +49,7 @@ config CRIS
select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_SHOW
select GENERIC_IOMAP
select GENERIC_SMP_IDLE_THREAD if ETRAX_ARCH_V32
config HZ
int

View File

@ -108,17 +108,12 @@ void __init smp_cpus_done(unsigned int max_cpus)
/* Bring one cpu online.*/
static int __init
smp_boot_one_cpu(int cpuid)
smp_boot_one_cpu(int cpuid, struct task_struct idle)
{
unsigned timeout;
struct task_struct *idle;
cpumask_t cpu_mask;
cpumask_clear(&cpu_mask);
idle = fork_idle(cpuid);
if (IS_ERR(idle))
panic("SMP: fork failed for CPU:%d", cpuid);
task_thread_info(idle)->cpu = cpuid;
/* Information to the CPU that is about to boot */
@ -142,9 +137,6 @@ smp_boot_one_cpu(int cpuid)
barrier();
}
put_task_struct(idle);
idle = NULL;
printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid);
return -1;
}
@ -207,9 +199,9 @@ int setup_profiling_timer(unsigned int multiplier)
*/
unsigned long cache_decay_ticks = 1;
int __cpuinit __cpu_up(unsigned int cpu)
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
smp_boot_one_cpu(cpu);
smp_boot_one_cpu(cpu, tidle);
return cpu_online(cpu) ? 0 : -ENOSYS;
}

View File

@ -25,13 +25,12 @@ struct task_struct;
*/
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
/* THREAD_SIZE is the size of the task_struct/kernel_stack combo.
/* THREAD_SIZE is the size of the thread_info/kernel_stack combo.
* normally, the stack is found by doing something like p + THREAD_SIZE
* in CRIS, a page is 8192 bytes, which seems like a sane size
*/
#define THREAD_SIZE PAGE_SIZE
#define KERNEL_STACK_SIZE PAGE_SIZE
#define THREAD_SIZE_ORDER (0)
/*
* At user->kernel entry, the pt_regs struct is stacked on the top of the kernel-stack.

View File

@ -65,12 +65,6 @@ struct thread_info {
#define init_thread_info (init_thread_union.thread_info)
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
/* thread information allocation */
#define alloc_thread_info_node(tsk, node) \
((struct thread_info *) __get_free_pages(GFP_KERNEL, 1))
#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
#endif /* !__ASSEMBLY__ */
/*

View File

@ -28,34 +28,6 @@
//#define DEBUG
/*
* Initial task structure. Make this a per-architecture thing,
* because different architectures tend to have different
* alignment requirements and potentially different initial
* setup.
*/
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
/*
* Initial thread structure.
*
* We need to make sure that this is 8192-byte aligned due to the
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
union thread_union init_thread_union __init_task_data =
{ INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
/*
* The hlt_counter, disable_hlt and enable_hlt is just here as a hook if
* there would ever be a halt sequence (for power save when idle) with

View File

@ -81,7 +81,7 @@ ifdef CONFIG_DEBUG_INFO
KBUILD_AFLAGS += -Wa,--gdwarf2
endif
head-y := arch/frv/kernel/head.o arch/frv/kernel/init_task.o
head-y := arch/frv/kernel/head.o
core-y += arch/frv/kernel/ arch/frv/mm/
libs-y += arch/frv/lib/

View File

@ -21,8 +21,6 @@
#define THREAD_SIZE 8192
#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
/*
* low level task data that entry.S needs immediate access to
* - this struct should fit entirely inside of one cache line
@ -82,19 +80,6 @@ register struct thread_info *__current_thread_info asm("gr15");
#define current_thread_info() ({ __current_thread_info; })
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
/* thread information allocation */
#ifdef CONFIG_DEBUG_STACK_USAGE
#define alloc_thread_info_node(tsk, node) \
kzalloc_node(THREAD_SIZE, GFP_KERNEL, node)
#else
#define alloc_thread_info_node(tsk, node) \
kmalloc_node(THREAD_SIZE, GFP_KERNEL, node)
#endif
#define free_thread_info(info) kfree(info)
#endif /* __ASSEMBLY__ */
/*

View File

@ -5,7 +5,7 @@
heads-y := head-uc-fr401.o head-uc-fr451.o head-uc-fr555.o
heads-$(CONFIG_MMU) := head-mmu-fr451.o
extra-y:= head.o init_task.o vmlinux.lds
extra-y:= head.o vmlinux.lds
obj-y := $(heads-y) entry.o entry-table.o break.o switch_to.o kernel_thread.o \
kernel_execve.o process.o traps.o ptrace.o signal.o dma.o \

View File

@ -1,32 +0,0 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/init_task.h>
#include <linux/fs.h>
#include <linux/mqueue.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
/*
* Initial thread structure.
*
* We need to make sure that this is THREAD_SIZE aligned due to the
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
union thread_union init_thread_union __init_task_data =
{ INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);

View File

@ -43,21 +43,6 @@ asmlinkage void ret_from_fork(void);
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
struct task_struct *alloc_task_struct_node(int node)
{
struct task_struct *p = kmalloc_node(THREAD_SIZE, GFP_KERNEL, node);
if (p)
atomic_set((atomic_t *)(p+1), 1);
return p;
}
void free_task_struct(struct task_struct *p)
{
if (atomic_dec_and_test((atomic_t *)(p+1)))
kfree(p);
}
static void core_sleep_idle(void)
{
#ifdef LED_DEBUG_SLEEP

View File

@ -6,7 +6,7 @@ extra-y := vmlinux.lds
obj-y := process.o traps.o ptrace.o irq.o \
sys_h8300.o time.o signal.o \
setup.o gpio.o init_task.o syscalls.o \
setup.o gpio.o syscalls.o \
entry.o timer/
obj-$(CONFIG_MODULES) += module.o h8300_ksyms.o

View File

@ -1,36 +0,0 @@
/*
* linux/arch/h8300/kernel/init_task.c
*/
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/init_task.h>
#include <linux/fs.h>
#include <linux/mqueue.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
__asm__(".align 4");
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
/*
* Initial thread structure.
*
* We need to make sure that this is 8192-byte aligned due to the
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
union thread_union init_thread_union __init_task_data =
{ INIT_THREAD_INFO(init_task) };

View File

@ -27,6 +27,7 @@ config HEXAGON
select HAVE_ARCH_TRACEHOOK
select NO_IOPORT
select GENERIC_IOMAP
select GENERIC_SMP_IDLE_THREAD
# mostly generic routines, with some accelerated ones
---help---
Qualcomm Hexagon is a processor architecture designed for high

View File

@ -45,8 +45,7 @@ KBUILD_AFLAGS += -DTHREADINFO_REG=$(TIR_NAME)
LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
libs-y += $(LIBGCC)
head-y := arch/hexagon/kernel/head.o \
arch/hexagon/kernel/init_task.o
head-y := arch/hexagon/kernel/head.o
core-y += arch/hexagon/kernel/ \
arch/hexagon/mm/ \

View File

@ -31,15 +31,7 @@
#define THREAD_SHIFT 12
#define THREAD_SIZE (1<<THREAD_SHIFT)
#if THREAD_SHIFT >= PAGE_SHIFT
#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
#else /* don't use standard allocator */
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
extern struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node);
extern void free_thread_info(struct thread_info *ti);
#endif
#ifndef __ASSEMBLY__

View File

@ -1,4 +1,4 @@
extra-y := head.o vmlinux.lds init_task.o
extra-y := head.o vmlinux.lds
obj-$(CONFIG_SMP) += smp.o topology.o

View File

@ -1,54 +0,0 @@
/*
* Init task definition
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init_task.h>
#include <linux/fs.h>
#include <linux/mqueue.h>
#include <asm/thread_info.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
/*
* Initial thread structure.
*
* We need to make sure that this is 8192-byte aligned due to the
* way process stacks are handled. This is done by making sure
* the linker maps this in the .text segment right after head.S,
* and making head.S ensure the proper alignment.
*/
union thread_union init_thread_union
__attribute__((__section__(".data.init_task"),
__aligned__(THREAD_SIZE))) = {
INIT_THREAD_INFO(init_task)
};
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);

View File

@ -233,43 +233,6 @@ unsigned long get_wchan(struct task_struct *p)
return 0;
}
/*
* Borrowed from PowerPC -- basically allow smaller kernel stacks if we
* go crazy with the page sizes.
*/
#if THREAD_SHIFT < PAGE_SHIFT
static struct kmem_cache *thread_info_cache;
struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
{
struct thread_info *ti;
ti = kmem_cache_alloc_node(thread_info_cache, GFP_KERNEL, node);
if (unlikely(ti == NULL))
return NULL;
#ifdef CONFIG_DEBUG_STACK_USAGE
memset(ti, 0, THREAD_SIZE);
#endif
return ti;
}
void free_thread_info(struct thread_info *ti)
{
kmem_cache_free(thread_info_cache, ti);
}
/* Weak symbol; called by init/main.c */
void thread_info_cache_init(void)
{
thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
THREAD_SIZE, 0, NULL);
BUG_ON(thread_info_cache == NULL);
}
#endif /* THREAD_SHIFT < PAGE_SHIFT */
/*
* Required placeholder.
*/

View File

@ -196,18 +196,11 @@ void __cpuinit start_secondary(void)
* maintains control until "cpu_online(cpu)" is set.
*/
int __cpuinit __cpu_up(unsigned int cpu)
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
{
struct task_struct *idle;
struct thread_info *thread;
struct thread_info *thread = (struct thread_info *)idle->stack;
void *stack_start;
/* Create new init task for the CPU */
idle = fork_idle(cpu);
if (IS_ERR(idle))
panic(KERN_ERR "fork_idle failed\n");
thread = (struct thread_info *)idle->stack;
thread->cpu = cpu;
/* Boot to the head. */

View File

@ -33,6 +33,10 @@ config IA64
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_IOMAP
select GENERIC_SMP_IDLE_THREAD
select ARCH_INIT_TASK
select ARCH_TASK_STRUCT_ALLOCATOR
select ARCH_THREAD_INFO_ALLOCATOR
default y
help
The Itanium Processor Family is Intel's 64-bit successor to

View File

@ -723,7 +723,6 @@ extern unsigned long boot_option_idle_override;
enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_FORCE_MWAIT,
IDLE_NOMWAIT, IDLE_POLL};
void cpu_idle_wait(void);
void default_idle(void);
#define ia64_platform_is(x) (strcmp(x, platform_name) == 0)

View File

@ -54,8 +54,6 @@ struct thread_info {
}, \
}
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
#ifndef ASM_OFFSETS_C
/* how to get the thread information struct from C */
#define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE))
@ -84,7 +82,6 @@ struct thread_info {
#endif
#define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET)
#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
#define alloc_task_struct_node(node) \
({ \
struct page *page = alloc_pages_node(node, GFP_KERNEL | __GFP_COMP, \

View File

@ -273,26 +273,6 @@ static inline void play_dead(void)
}
#endif /* CONFIG_HOTPLUG_CPU */
static void do_nothing(void *unused)
{
}
/*
* cpu_idle_wait - Used to ensure that all the CPUs discard old value of
* pm_idle and update to new pm_idle value. Required while changing pm_idle
* handler on SMP systems.
*
* Caller must have changed pm_idle to the new value before the call. Old
* pm_idle value will not be used by any CPU after the return of this function.
*/
void cpu_idle_wait(void)
{
smp_mb();
/* kick all the CPUs so that they exit out of pm_idle */
smp_call_function(do_nothing, NULL, 1);
}
EXPORT_SYMBOL_GPL(cpu_idle_wait);
void __attribute__((noreturn))
cpu_idle (void)
{

View File

@ -74,13 +74,6 @@
#define bsp_remove_ok 0
#endif
/*
* Store all idle threads, this can be reused instead of creating
* a new thread. Also avoids complicated thread destroy functionality
* for idle threads.
*/
struct task_struct *idle_thread_array[NR_CPUS];
/*
* Global array allocated for NR_CPUS at boot time
*/
@ -94,13 +87,7 @@ struct sal_to_os_boot *sal_state_for_booting_cpu = &sal_boot_rendez_state[0];
#define set_brendez_area(x) (sal_state_for_booting_cpu = &sal_boot_rendez_state[(x)]);
#define get_idle_for_cpu(x) (idle_thread_array[(x)])
#define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p))
#else
#define get_idle_for_cpu(x) (NULL)
#define set_idle_for_cpu(x,p)
#define set_brendez_area(x)
#endif
@ -480,54 +467,12 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
return NULL;
}
struct create_idle {
struct work_struct work;
struct task_struct *idle;
struct completion done;
int cpu;
};
void __cpuinit
do_fork_idle(struct work_struct *work)
{
struct create_idle *c_idle =
container_of(work, struct create_idle, work);
c_idle->idle = fork_idle(c_idle->cpu);
complete(&c_idle->done);
}
static int __cpuinit
do_boot_cpu (int sapicid, int cpu)
do_boot_cpu (int sapicid, int cpu, struct task_struct *idle)
{
int timeout;
struct create_idle c_idle = {
.work = __WORK_INITIALIZER(c_idle.work, do_fork_idle),
.cpu = cpu,
.done = COMPLETION_INITIALIZER(c_idle.done),
};
/*
* We can't use kernel_thread since we must avoid to
* reschedule the child.
*/
c_idle.idle = get_idle_for_cpu(cpu);
if (c_idle.idle) {
init_idle(c_idle.idle, cpu);
goto do_rest;
}
schedule_work(&c_idle.work);
wait_for_completion(&c_idle.done);
if (IS_ERR(c_idle.idle))
panic("failed fork for CPU %d", cpu);
set_idle_for_cpu(cpu, c_idle.idle);
do_rest:
task_for_booting_cpu = c_idle.idle;
task_for_booting_cpu = idle;
Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid);
set_brendez_area(cpu);
@ -793,7 +738,7 @@ set_cpu_sibling_map(int cpu)
}
int __cpuinit
__cpu_up (unsigned int cpu)
__cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int ret;
int sapicid;
@ -811,7 +756,7 @@ __cpu_up (unsigned int cpu)
per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
/* Processor goes to start_secondary(), sets online flag */
ret = do_boot_cpu(sapicid, cpu);
ret = do_boot_cpu(sapicid, cpu, tidle);
if (ret < 0)
return ret;

View File

@ -31,7 +31,7 @@ KBUILD_AFLAGS += $(aflags-y)
CHECKFLAGS += -D__m32r__ -D__BIG_ENDIAN__=1
head-y := arch/m32r/kernel/head.o arch/m32r/kernel/init_task.o
head-y := arch/m32r/kernel/head.o
LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)

View File

@ -55,8 +55,8 @@ struct thread_info {
#define PREEMPT_ACTIVE 0x10000000
#define THREAD_SIZE (PAGE_SIZE << 1)
#define THREAD_SIZE (PAGE_SIZE << 1)
#define THREAD_SIZE_ORDER 1
/*
* macros/functions for gaining access to the thread information structure
*/
@ -92,19 +92,6 @@ static inline struct thread_info *current_thread_info(void)
return ti;
}
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
/* thread information allocation */
#ifdef CONFIG_DEBUG_STACK_USAGE
#define alloc_thread_info_node(tsk, node) \
kzalloc_node(THREAD_SIZE, GFP_KERNEL, node)
#else
#define alloc_thread_info_node(tsk, node) \
kmalloc_node(THREAD_SIZE, GFP_KERNEL, node)
#endif
#define free_thread_info(info) kfree(info)
#define TI_FLAG_FAULT_CODE_SHIFT 28
static inline void set_thread_fault_code(unsigned int val)

View File

@ -2,7 +2,7 @@
# Makefile for the Linux/M32R kernel.
#
extra-y := head.o init_task.o vmlinux.lds
extra-y := head.o vmlinux.lds
obj-y := process.o entry.o traps.o align.o irq.o setup.o time.o \
m32r_ksyms.o sys_m32r.o signal.o ptrace.o

View File

@ -1,34 +0,0 @@
/* orig : i386 init_task.c */
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/init_task.h>
#include <linux/fs.h>
#include <linux/mqueue.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
/*
* Initial thread structure.
*
* We need to make sure that this is 8192-byte aligned due to the
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
union thread_union init_thread_union __init_task_data =
{ INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);

View File

@ -109,12 +109,8 @@ static unsigned int calibration_result;
/* Function Prototypes */
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
void smp_prepare_boot_cpu(void);
void smp_prepare_cpus(unsigned int);
static void init_ipi_lock(void);
static void do_boot_cpu(int);
int __cpu_up(unsigned int);
void smp_cpus_done(unsigned int);
int start_secondary(void *);
static void smp_callin(void);
@ -347,7 +343,7 @@ static void __init do_boot_cpu(int phys_id)
}
}
int __cpuinit __cpu_up(unsigned int cpu_id)
int __cpuinit __cpu_up(unsigned int cpu_id, struct task_struct *tidle)
{
int timeout;

View File

@ -13,7 +13,7 @@ extra-$(CONFIG_SUN3X) := head.o
extra-$(CONFIG_SUN3) := sun3-head.o
extra-y += vmlinux.lds
obj-y := entry.o init_task.o irq.o m68k_ksyms.o module.o process.o ptrace.o
obj-y := entry.o irq.o m68k_ksyms.o module.o process.o ptrace.o
obj-y += setup.o signal.o sys_m68k.o syscalltable.o time.o traps.o
obj-$(CONFIG_MMU_MOTOROLA) += ints.o vectors.o

View File

@ -1,35 +0,0 @@
/*
* linux/arch/m68knommu/kernel/init_task.c
*/
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/init_task.h>
#include <linux/fs.h>
#include <linux/mqueue.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
/*
* Initial thread structure.
*
* We need to make sure that this is THREAD size aligned due to the
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
union thread_union init_thread_union __init_task_data =
{ INIT_THREAD_INFO(init_task) };

View File

@ -16,7 +16,7 @@ endif
extra-y := head.o vmlinux.lds
obj-y += dma.o exceptions.o \
hw_exception_handler.o init_task.o intc.o irq.o \
hw_exception_handler.o intc.o irq.o \
process.o prom.o prom_parse.o ptrace.o \
reset.o setup.o signal.o sys_microblaze.o timer.o traps.o unwind.o

View File

@ -1,26 +0,0 @@
/*
* Copyright (C) 2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init_task.h>
#include <linux/fs.h>
#include <linux/mqueue.h>
#include <asm/pgtable.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
union thread_union init_thread_union __init_task_data =
{ INIT_THREAD_INFO(init_task) };
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);

View File

@ -29,6 +29,7 @@ config MIPS
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select ARCH_DISCARD_MEMBLOCK
select GENERIC_SMP_IDLE_THREAD
menu "Machine selection"

View File

@ -235,7 +235,7 @@ endif
OBJCOPYFLAGS += --remove-section=.reginfo
head-y := arch/mips/kernel/head.o arch/mips/kernel/init_task.o
head-y := arch/mips/kernel/head.o
libs-y += arch/mips/lib/

View File

@ -85,18 +85,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
#define STACK_WARN (THREAD_SIZE / 8)
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
#ifdef CONFIG_DEBUG_STACK_USAGE
#define alloc_thread_info_node(tsk, node) \
kzalloc_node(THREAD_SIZE, GFP_KERNEL, node)
#else
#define alloc_thread_info_node(tsk, node) \
kmalloc_node(THREAD_SIZE, GFP_KERNEL, node)
#endif
#define free_thread_info(info) kfree(info)
#endif /* !__ASSEMBLY__ */
#define PREEMPT_ACTIVE 0x10000000

View File

@ -2,7 +2,7 @@
# Makefile for the Linux/MIPS kernel.
#
extra-y := head.o init_task.o vmlinux.lds
extra-y := head.o vmlinux.lds
obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
ptrace.o reset.o setup.o signal.o syscall.o \

View File

@ -1,35 +0,0 @@
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/init_task.h>
#include <linux/fs.h>
#include <linux/mqueue.h>
#include <asm/thread_info.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
/*
* Initial thread structure.
*
* We need to make sure that this is 8192-byte aligned due to the
* way process stacks are handled. This is done by making sure
* the linker maps this in the .text segment right after head.S,
* and making head.S ensure the proper alignment.
*
* The things we do for performance..
*/
union thread_union init_thread_union __init_task_data
__attribute__((__aligned__(THREAD_SIZE))) =
{ INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);

View File

@ -186,61 +186,9 @@ void __devinit smp_prepare_boot_cpu(void)
cpu_set(0, cpu_callin_map);
}
/*
* Called once for each "cpu_possible(cpu)". Needs to spin up the cpu
* and keep control until "cpu_online(cpu)" is set. Note: cpu is
* physical, not logical.
*/
static struct task_struct *cpu_idle_thread[NR_CPUS];
struct create_idle {
struct work_struct work;
struct task_struct *idle;
struct completion done;
int cpu;
};
static void __cpuinit do_fork_idle(struct work_struct *work)
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
struct create_idle *c_idle =
container_of(work, struct create_idle, work);
c_idle->idle = fork_idle(c_idle->cpu);
complete(&c_idle->done);
}
int __cpuinit __cpu_up(unsigned int cpu)
{
struct task_struct *idle;
/*
* Processor goes to start_secondary(), sets online flag
* The following code is purely to make sure
* Linux can schedule processes on this slave.
*/
if (!cpu_idle_thread[cpu]) {
/*
* Schedule work item to avoid forking user task
* Ported from arch/x86/kernel/smpboot.c
*/
struct create_idle c_idle = {
.cpu = cpu,
.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
};
INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
schedule_work(&c_idle.work);
wait_for_completion(&c_idle.done);
idle = cpu_idle_thread[cpu] = c_idle.idle;
if (IS_ERR(idle))
panic(KERN_ERR "Fork failed for CPU %d", cpu);
} else {
idle = cpu_idle_thread[cpu];
init_idle(idle, cpu);
}
mp_ops->boot_secondary(cpu, idle);
mp_ops->boot_secondary(cpu, tidle);
/*
* Trust is futile. We should really have timeouts ...

View File

@ -51,7 +51,7 @@ UNIT := asb2364
endif
head-y := arch/mn10300/kernel/head.o arch/mn10300/kernel/init_task.o
head-y := arch/mn10300/kernel/head.o
core-y += arch/mn10300/kernel/ arch/mn10300/mm/

View File

@ -20,8 +20,10 @@
#ifdef CONFIG_4KSTACKS
#define THREAD_SIZE (4096)
#define THREAD_SIZE_ORDER (0)
#else
#define THREAD_SIZE (8192)
#define THREAD_SIZE_ORDER (1)
#endif
#define STACK_WARN (THREAD_SIZE / 8)
@ -120,21 +122,8 @@ static inline unsigned long current_stack_pointer(void)
return sp;
}
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
/* thread information allocation */
#ifdef CONFIG_DEBUG_STACK_USAGE
#define alloc_thread_info_node(tsk, node) \
kzalloc_node(THREAD_SIZE, GFP_KERNEL, node)
#else
#define alloc_thread_info_node(tsk, node) \
kmalloc_node(THREAD_SIZE, GFP_KERNEL, node)
#endif
#ifndef CONFIG_KGDB
#define free_thread_info(ti) kfree((ti))
#else
extern void free_thread_info(struct thread_info *);
void arch_release_thread_info(struct thread_info *ti)
#endif
#define get_thread_info(ti) get_task_struct((ti)->task)
#define put_thread_info(ti) put_task_struct((ti)->task)

View File

@ -1,7 +1,7 @@
#
# Makefile for the MN10300-specific core kernel code
#
extra-y := head.o init_task.o vmlinux.lds
extra-y := head.o vmlinux.lds
fpu-obj-y := fpu-nofpu.o fpu-nofpu-low.o
fpu-obj-$(CONFIG_FPU) := fpu.o fpu-low.o

View File

@ -1,39 +0,0 @@
/* MN10300 Initial task definitions
*
* Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd.
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/init_task.h>
#include <linux/fs.h>
#include <linux/mqueue.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
/*
* Initial thread structure.
*
* We need to make sure that this is THREAD_SIZE aligned due to the
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
union thread_union init_thread_union __init_task_data =
{ INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);

View File

@ -397,7 +397,7 @@ static bool kgdb_arch_undo_singlestep(struct pt_regs *regs)
* single-step state is cleared. At this point the breakpoints should have
* been removed by __switch_to().
*/
void free_thread_info(struct thread_info *ti)
void arch_release_thread_info(struct thread_info *ti)
{
if (kgdb_sstep_thread == ti) {
kgdb_sstep_thread = NULL;
@ -407,7 +407,6 @@ void free_thread_info(struct thread_info *ti)
* so force immediate reentry */
kgdb_breakpoint();
}
kfree(ti);
}
/*

View File

@ -924,7 +924,7 @@ void initialize_secondary(void)
* __cpu_up - Set smp_commenced_mask for the nominated CPU
* @cpu: The target CPU.
*/
int __devinit __cpu_up(unsigned int cpu)
int __devinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int timeout;

View File

@ -38,7 +38,7 @@ else
KBUILD_CFLAGS += $(call cc-option,-msoft-div)
endif
head-y := arch/openrisc/kernel/head.o arch/openrisc/kernel/init_task.o
head-y := arch/openrisc/kernel/head.o
core-y += arch/openrisc/lib/ \
arch/openrisc/kernel/ \

View File

@ -2,7 +2,7 @@
# Makefile for the linux kernel.
#
extra-y := head.o vmlinux.lds init_task.o
extra-y := head.o vmlinux.lds
obj-y := setup.o idle.o or32_ksyms.o process.o dma.o \
traps.o time.o irq.o entry.o ptrace.o signal.o sys_or32.o \

View File

@ -1,42 +0,0 @@
/*
* OpenRISC init_task.c
*
* Linux architectural port borrowing liberally from similar works of
* others. All original copyrights apply as per the original source
* declaration.
*
* Modifications for the OpenRISC architecture:
* Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
* Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/init_task.h>
#include <linux/mqueue.h>
#include <linux/export.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
/*
* Initial thread structure.
*
* We need to make sure that this is THREAD_SIZE aligned due to the
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
union thread_union init_thread_union __init_task_data = {
INIT_THREAD_INFO(init_task)
};
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);

View File

@ -17,6 +17,7 @@ config PARISC
select GENERIC_PCI_IOMAP
select IRQ_PER_CPU
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_SMP_IDLE_THREAD
help
The PA-RISC microprocessor is designed by Hewlett-Packard and used

View File

@ -75,7 +75,7 @@ head-y := arch/parisc/kernel/head.o
KBUILD_CFLAGS += $(cflags-y)
kernel-y := mm/ kernel/ math-emu/ kernel/init_task.o
kernel-y := mm/ kernel/ math-emu/
kernel-$(CONFIG_HPUX) += hpux/
core-y += $(addprefix arch/parisc/, $(kernel-y))

View File

@ -2,7 +2,7 @@
# Makefile for arch/parisc/kernel
#
extra-y := init_task.o head.o vmlinux.lds
extra-y := head.o vmlinux.lds
obj-y := cache.o pacache.o setup.o traps.o time.o irq.o \
pa7300lc.o syscall.o entry.o sys_parisc.o firmware.o \

View File

@ -1,70 +0,0 @@
/*
* Static declaration of "init" task data structure.
*
* Copyright (C) 2000 Paul Bame <bame at parisc-linux.org>
* Copyright (C) 2000-2001 John Marvin <jsm at parisc-linux.org>
* Copyright (C) 2001 Helge Deller <deller @ parisc-linux.org>
* Copyright (C) 2002 Matthew Wilcox <willy with parisc-linux.org>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/init_task.h>
#include <linux/mqueue.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
/*
* Initial task structure.
*
* We need to make sure that this is 16384-byte aligned due to the
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
union thread_union init_thread_union __init_task_data
__attribute__((aligned(128))) =
{ INIT_THREAD_INFO(init_task) };
#if PT_NLEVELS == 3
/* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout
* with the first pmd adjacent to the pgd and below it. gcc doesn't actually
* guarantee that global objects will be laid out in memory in the same order
* as the order of declaration, so put these in different sections and use
* the linker script to order them. */
pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned(PAGE_SIZE)));
#endif
pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data..vm0.pgd"), aligned(PAGE_SIZE)));
pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pte"), aligned(PAGE_SIZE)));
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
EXPORT_SYMBOL(init_task);
__asm__(".data");
struct task_struct init_task = INIT_TASK(init_task);

View File

@ -340,26 +340,11 @@ void __init smp_callin(void)
/*
* Bring one cpu online.
*/
int __cpuinit smp_boot_one_cpu(int cpuid)
int __cpuinit smp_boot_one_cpu(int cpuid, struct task_struct *idle)
{
const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid);
struct task_struct *idle;
long timeout;
/*
* Create an idle task for this CPU. Note the address wed* give
* to kernel_thread is irrelevant -- it's going to start
* where OS_BOOT_RENDEVZ vector in SAL says to start. But
* this gets all the other task-y sort of data structures set
* up like we wish. We need to pull the just created idle task
* off the run queue and stuff it into the init_tasks[] array.
* Sheesh . . .
*/
idle = fork_idle(cpuid);
if (IS_ERR(idle))
panic("SMP: fork failed for CPU:%d", cpuid);
task_thread_info(idle)->cpu = cpuid;
/* Let _start know what logical CPU we're booting
@ -403,10 +388,6 @@ int __cpuinit smp_boot_one_cpu(int cpuid)
udelay(100);
barrier();
}
put_task_struct(idle);
idle = NULL;
printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid);
return -1;
@ -455,10 +436,10 @@ void smp_cpus_done(unsigned int cpu_max)
}
int __cpuinit __cpu_up(unsigned int cpu)
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
if (cpu != 0 && cpu < parisc_max_cpus)
smp_boot_one_cpu(cpu);
smp_boot_one_cpu(cpu, tidle);
return cpu_online(cpu) ? 0 : -ENOSYS;
}

View File

@ -33,6 +33,18 @@
extern int data_start;
#if PT_NLEVELS == 3
/* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout
* with the first pmd adjacent to the pgd and below it. gcc doesn't actually
* guarantee that global objects will be laid out in memory in the same order
* as the order of declaration, so put these in different sections and use
* the linker script to order them. */
pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned(PAGE_SIZE)));
#endif
pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data..vm0.pgd"), aligned(PAGE_SIZE)));
pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pte"), aligned(PAGE_SIZE)));
#ifdef CONFIG_DISCONTIGMEM
struct node_map_data node_data[MAX_NUMNODES] __read_mostly;
unsigned char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;

View File

@ -87,10 +87,6 @@ config ARCH_HAS_ILOG2_U64
bool
default y if 64BIT
config ARCH_HAS_CPU_IDLE_WAIT
bool
default y
config GENERIC_HWEIGHT
bool
default y
@ -144,6 +140,7 @@ config PPC
select HAVE_BPF_JIT if PPC64
select HAVE_ARCH_JUMP_LABEL
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_SMP_IDLE_THREAD
config EARLY_PRINTK
bool

View File

@ -386,7 +386,6 @@ extern unsigned long cpuidle_disable;
enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF};
extern int powersave_nap; /* set if nap mode can be used in idle loop */
void cpu_idle_wait(void);
#ifdef CONFIG_PSERIES_IDLE
extern void update_smt_snooze_delay(int snooze);

View File

@ -62,21 +62,8 @@ struct thread_info {
#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
/* thread information allocation */
#if THREAD_SHIFT >= PAGE_SHIFT
#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
#else /* THREAD_SHIFT < PAGE_SHIFT */
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
extern struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node);
extern void free_thread_info(struct thread_info *ti);
#endif /* THREAD_SHIFT < PAGE_SHIFT */
/* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
{

View File

@ -28,7 +28,7 @@ endif
obj-y := cputable.o ptrace.o syscalls.o \
irq.o align.o signal_32.o pmc.o vdso.o \
init_task.o process.o systbl.o idle.o \
process.o systbl.o idle.o \
signal.o sysfs.o cacheinfo.o time.o \
prom.o traps.o setup-common.o \
udbg.o misc.o io.o dma.o \

View File

@ -113,29 +113,6 @@ void cpu_idle(void)
}
}
/*
* cpu_idle_wait - Used to ensure that all the CPUs come out of the old
* idle loop and start using the new idle loop.
* Required while changing idle handler on SMP systems.
* Caller must have changed idle handler to the new value before the call.
* This window may be larger on shared systems.
*/
void cpu_idle_wait(void)
{
int cpu;
smp_mb();
/* kick all the CPUs so that they exit out of old idle routine */
get_online_cpus();
for_each_online_cpu(cpu) {
if (cpu != smp_processor_id())
smp_send_reschedule(cpu);
}
put_online_cpus();
}
EXPORT_SYMBOL_GPL(cpu_idle_wait);
int powersave_nap;
#ifdef CONFIG_SYSCTL

View File

@ -1,29 +0,0 @@
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/init_task.h>
#include <linux/fs.h>
#include <linux/mqueue.h>
#include <asm/uaccess.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
/*
* Initial thread structure.
*
* We need to make sure that this is 16384-byte aligned due to the
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
union thread_union init_thread_union __init_task_data =
{ INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);

View File

@ -1252,37 +1252,6 @@ void __ppc64_runlatch_off(void)
}
#endif /* CONFIG_PPC64 */
#if THREAD_SHIFT < PAGE_SHIFT
static struct kmem_cache *thread_info_cache;
struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
{
struct thread_info *ti;
ti = kmem_cache_alloc_node(thread_info_cache, GFP_KERNEL, node);
if (unlikely(ti == NULL))
return NULL;
#ifdef CONFIG_DEBUG_STACK_USAGE
memset(ti, 0, THREAD_SIZE);
#endif
return ti;
}
void free_thread_info(struct thread_info *ti)
{
kmem_cache_free(thread_info_cache, ti);
}
void thread_info_cache_init(void)
{
thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
THREAD_SIZE, 0, NULL);
BUG_ON(thread_info_cache == NULL);
}
#endif /* THREAD_SHIFT < PAGE_SHIFT */
unsigned long arch_align_stack(unsigned long sp)
{
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)

View File

@ -57,27 +57,9 @@
#define DBG(fmt...)
#endif
/* Store all idle threads, this can be reused instead of creating
* a new thread. Also avoids complicated thread destroy functionality
* for idle threads.
*/
#ifdef CONFIG_HOTPLUG_CPU
/*
* Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is
* removed after init for !CONFIG_HOTPLUG_CPU.
*/
static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
/* State of each CPU during hotplug phases */
static DEFINE_PER_CPU(int, cpu_state) = { 0 };
#else
static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
#define get_idle_for_cpu(x) (idle_thread_array[(x)])
#define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p))
#endif
struct thread_info *secondary_ti;
@ -429,60 +411,19 @@ int generic_check_cpu_restart(unsigned int cpu)
}
#endif
struct create_idle {
struct work_struct work;
struct task_struct *idle;
struct completion done;
int cpu;
};
static void __cpuinit do_fork_idle(struct work_struct *work)
static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
{
struct create_idle *c_idle =
container_of(work, struct create_idle, work);
c_idle->idle = fork_idle(c_idle->cpu);
complete(&c_idle->done);
}
static int __cpuinit create_idle(unsigned int cpu)
{
struct thread_info *ti;
struct create_idle c_idle = {
.cpu = cpu,
.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
};
INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
c_idle.idle = get_idle_for_cpu(cpu);
/* We can't use kernel_thread since we must avoid to
* reschedule the child. We use a workqueue because
* we want to fork from a kernel thread, not whatever
* userspace process happens to be trying to online us.
*/
if (!c_idle.idle) {
schedule_work(&c_idle.work);
wait_for_completion(&c_idle.done);
} else
init_idle(c_idle.idle, cpu);
if (IS_ERR(c_idle.idle)) {
pr_err("Failed fork for CPU %u: %li", cpu, PTR_ERR(c_idle.idle));
return PTR_ERR(c_idle.idle);
}
ti = task_thread_info(c_idle.idle);
struct thread_info *ti = task_thread_info(idle);
#ifdef CONFIG_PPC64
paca[cpu].__current = c_idle.idle;
paca[cpu].__current = idle;
paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
#endif
ti->cpu = cpu;
current_set[cpu] = ti;
return 0;
secondary_ti = current_set[cpu] = ti;
}
int __cpuinit __cpu_up(unsigned int cpu)
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int rc, c;
@ -490,12 +431,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
(smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
return -EINVAL;
/* Make sure we have an idle thread */
rc = create_idle(cpu);
if (rc)
return rc;
secondary_ti = current_set[cpu];
cpu_idle_thread_init(cpu, tidle);
/* Make sure callin-map entry is 0 (can be leftover a CPU
* hotplug

View File

@ -122,6 +122,7 @@ config S390
select ARCH_INLINE_WRITE_UNLOCK_BH
select ARCH_INLINE_WRITE_UNLOCK_IRQ
select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
select GENERIC_SMP_IDLE_THREAD
config SCHED_OMIT_FRAME_POINTER
def_bool y

View File

@ -91,7 +91,6 @@ OBJCOPYFLAGS := -O binary
head-y := arch/s390/kernel/head.o
head-y += arch/s390/kernel/$(if $(CONFIG_64BIT),head64.o,head31.o)
head-y += arch/s390/kernel/init_task.o
# See arch/s390/Kbuild for content of core part of the kernel
core-y += arch/s390/

View File

@ -16,7 +16,7 @@
extern struct mutex smp_cpu_state_mutex;
extern struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
extern int __cpu_up(unsigned int cpu);
extern int __cpu_up(unsigned int cpu, struct task_struct *tidle);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);

View File

@ -28,7 +28,7 @@ obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o \
obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
extra-y += head.o init_task.o vmlinux.lds
extra-y += head.o vmlinux.lds
extra-y += $(if $(CONFIG_64BIT),head64.o,head31.o)
obj-$(CONFIG_MODULES) += s390_ksyms.o module.o

View File

@ -1,38 +0,0 @@
/*
* arch/s390/kernel/init_task.c
*
* S390 version
*
* Derived from "arch/i386/kernel/init_task.c"
*/
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init_task.h>
#include <linux/mqueue.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
/*
* Initial thread structure.
*
* We need to make sure that this is THREAD_SIZE aligned due to the
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
union thread_union init_thread_union __init_task_data =
{ INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);

View File

@ -85,7 +85,6 @@ enum {
struct pcpu {
struct cpu cpu;
struct task_struct *idle; /* idle process for the cpu */
struct _lowcore *lowcore; /* lowcore page(s) for the cpu */
unsigned long async_stack; /* async stack for the cpu */
unsigned long panic_stack; /* panic stack for the cpu */
@ -725,26 +724,9 @@ static void __cpuinit smp_start_secondary(void *cpuvoid)
cpu_idle();
}
struct create_idle {
struct work_struct work;
struct task_struct *idle;
struct completion done;
int cpu;
};
static void __cpuinit smp_fork_idle(struct work_struct *work)
{
struct create_idle *c_idle;
c_idle = container_of(work, struct create_idle, work);
c_idle->idle = fork_idle(c_idle->cpu);
complete(&c_idle->done);
}
/* Upping and downing of CPUs */
int __cpuinit __cpu_up(unsigned int cpu)
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
struct create_idle c_idle;
struct pcpu *pcpu;
int rc;
@ -754,22 +736,12 @@ int __cpuinit __cpu_up(unsigned int cpu)
if (pcpu_sigp_retry(pcpu, sigp_initial_cpu_reset, 0) !=
sigp_order_code_accepted)
return -EIO;
if (!pcpu->idle) {
c_idle.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done);
INIT_WORK_ONSTACK(&c_idle.work, smp_fork_idle);
c_idle.cpu = cpu;
schedule_work(&c_idle.work);
wait_for_completion(&c_idle.done);
if (IS_ERR(c_idle.idle))
return PTR_ERR(c_idle.idle);
pcpu->idle = c_idle.idle;
}
init_idle(pcpu->idle, cpu);
rc = pcpu_alloc_lowcore(pcpu, cpu);
if (rc)
return rc;
pcpu_prepare_secondary(pcpu, cpu);
pcpu_attach_task(pcpu, pcpu->idle);
pcpu_attach_task(pcpu, tidle);
pcpu_start_fn(pcpu, smp_start_secondary, NULL);
while (!cpu_online(cpu))
cpu_relax();
@ -856,7 +828,6 @@ void __init smp_prepare_boot_cpu(void)
struct pcpu *pcpu = pcpu_devices;
boot_cpu_address = stap();
pcpu->idle = current;
pcpu->state = CPU_STATE_CONFIGURED;
pcpu->address = boot_cpu_address;
pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();

View File

@ -11,10 +11,9 @@
#include <linux/const.h>
/* thread information allocation */
#define THREAD_SIZE_ORDER (1)
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
#define THREAD_MASK (THREAD_SIZE - _AC(1,UL))
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
#define THREAD_SIZE_ORDER (1)
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
#define THREAD_MASK (THREAD_SIZE - _AC(1,UL))
#ifndef __ASSEMBLY__
@ -71,9 +70,6 @@ struct thread_info {
register struct thread_info *__current_thread_info __asm__("r28");
#define current_thread_info() __current_thread_info
#define alloc_thread_info_node(tsk, node) kmalloc_node(THREAD_SIZE, GFP_KERNEL, node)
#define free_thread_info(info) kfree(info)
#endif /* !__ASSEMBLY__ */
#define PREEMPT_ACTIVE 0x10000000

View File

@ -4,7 +4,7 @@
extra-y := head.o vmlinux.lds
obj-y += entry.o init_task.o irq.o process.o ptrace.o \
obj-y += entry.o irq.o process.o ptrace.o \
setup.o signal.o sys_score.o time.o traps.o \
sys_call_table.o

View File

@ -1,46 +0,0 @@
/*
* arch/score/kernel/init_task.c
*
* Score Processor version.
*
* Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/init_task.h>
#include <linux/mqueue.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
/*
* Initial thread structure.
*
* We need to make sure that this is THREAD_SIZE aligned due to the
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
union thread_union init_thread_union __init_task_data =
{ INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);

View File

@ -28,6 +28,7 @@ config SUPERH
select RTC_LIB
select GENERIC_ATOMIC64
select GENERIC_IRQ_SHOW
select GENERIC_SMP_IDLE_THREAD
help
The SuperH is a RISC processor targeted for use in embedded systems
and consumer electronics; it was also used in the Sega Dreamcast
@ -152,9 +153,6 @@ config ARCH_NO_VIRT_TO_BUS
config ARCH_HAS_DEFAULT_IDLE
def_bool y
config ARCH_HAS_CPU_IDLE_WAIT
def_bool y
config NO_IOPORT
def_bool !PCI
depends on !SH_CAYMAN && !SH_SH4202_MICRODEV && !SH_SHMIN

View File

@ -124,7 +124,7 @@ endif
export ld-bfd BITS
head-y := arch/sh/kernel/init_task.o arch/sh/kernel/head_$(BITS).o
head-y := arch/sh/kernel/head_$(BITS).o
core-y += arch/sh/kernel/ arch/sh/mm/ arch/sh/boards/
core-$(CONFIG_SH_FPU_EMU) += arch/sh/math-emu/

View File

@ -85,10 +85,6 @@ struct sh_cpuinfo {
struct tlb_info itlb;
struct tlb_info dtlb;
#ifdef CONFIG_SMP
struct task_struct *idle;
#endif
unsigned int phys_bits;
unsigned long flags;
} __attribute__ ((aligned(L1_CACHE_BYTES)));
@ -102,7 +98,6 @@ extern struct sh_cpuinfo cpu_data[];
#define cpu_relax() barrier()
void default_idle(void);
void cpu_idle_wait(void);
void stop_this_cpu(void *);
/* Forward decl */

View File

@ -88,22 +88,13 @@ static inline struct thread_info *current_thread_info(void)
return ti;
}
/* thread information allocation */
#if THREAD_SHIFT >= PAGE_SHIFT
#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
#endif
extern struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node);
extern void free_thread_info(struct thread_info *ti);
extern void arch_task_cache_init(void);
#define arch_task_cache_init arch_task_cache_init
extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
extern void arch_release_task_struct(struct task_struct *tsk);
extern void init_thread_xstate(void);
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
#endif /* __ASSEMBLY__ */
/*

View File

@ -2,7 +2,7 @@
# Makefile for the Linux/SuperH kernel.
#
extra-y := head_$(BITS).o init_task.o vmlinux.lds
extra-y := head_$(BITS).o vmlinux.lds
ifdef CONFIG_FUNCTION_TRACER
# Do not profile debug and lowlevel utilities

View File

@ -132,10 +132,6 @@ void __init select_idle_routine(void)
pm_idle = poll_idle;
}
static void do_nothing(void *unused)
{
}
void stop_this_cpu(void *unused)
{
local_irq_disable();
@ -144,19 +140,3 @@ void stop_this_cpu(void *unused)
for (;;)
cpu_sleep();
}
/*
* cpu_idle_wait - Used to ensure that all the CPUs discard old value of
* pm_idle and update to new pm_idle value. Required while changing pm_idle
* handler on SMP systems.
*
* Caller must have changed pm_idle to the new value before the call. Old
* pm_idle value will not be used by any CPU after the return of this function.
*/
void cpu_idle_wait(void)
{
smp_mb();
/* kick all the CPUs so that they exit out of pm_idle */
smp_call_function(do_nothing, NULL, 1);
}
EXPORT_SYMBOL_GPL(cpu_idle_wait);

View File

@ -1,30 +0,0 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init_task.h>
#include <linux/mqueue.h>
#include <linux/fs.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
struct pt_regs fake_swapper_regs;
/*
* Initial thread structure.
*
* We need to make sure that this is 8192-byte aligned due to the
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
union thread_union init_thread_union __init_task_data =
{ INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);

View File

@ -29,53 +29,11 @@ void free_thread_xstate(struct task_struct *tsk)
}
}
#if THREAD_SHIFT < PAGE_SHIFT
static struct kmem_cache *thread_info_cache;
struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
void arch_release_task_struct(struct task_struct *tsk)
{
struct thread_info *ti;
#ifdef CONFIG_DEBUG_STACK_USAGE
gfp_t mask = GFP_KERNEL | __GFP_ZERO;
#else
gfp_t mask = GFP_KERNEL;
#endif
ti = kmem_cache_alloc_node(thread_info_cache, mask, node);
return ti;
free_thread_xstate(tsk);
}
void free_thread_info(struct thread_info *ti)
{
free_thread_xstate(ti->task);
kmem_cache_free(thread_info_cache, ti);
}
void thread_info_cache_init(void)
{
thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
THREAD_SIZE, SLAB_PANIC, NULL);
}
#else
struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
{
#ifdef CONFIG_DEBUG_STACK_USAGE
gfp_t mask = GFP_KERNEL | __GFP_ZERO;
#else
gfp_t mask = GFP_KERNEL;
#endif
struct page *page = alloc_pages_node(node, mask, THREAD_SIZE_ORDER);
return page ? page_address(page) : NULL;
}
void free_thread_info(struct thread_info *ti)
{
free_thread_xstate(ti->task);
free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
}
#endif /* THREAD_SHIFT < PAGE_SHIFT */
void arch_task_cache_init(void)
{
if (!xstate_size)

Some files were not shown because too many files have changed in this diff Show More