From 043df59eb3798c094e6ba47136f3d3b34a6791a7 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 25 Feb 2006 12:15:31 -0800 Subject: [SPARC64]: Implement futex_atomic_op_inuser(). Signed-off-by: David S. Miller --- include/asm-sparc64/futex.h | 88 ++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 84 insertions(+), 4 deletions(-) diff --git a/include/asm-sparc64/futex.h b/include/asm-sparc64/futex.h index 6a332a9f099..0caf60147e9 100644 --- a/include/asm-sparc64/futex.h +++ b/include/asm-sparc64/futex.h @@ -1,6 +1,86 @@ -#ifndef _ASM_FUTEX_H -#define _ASM_FUTEX_H +#ifndef _SPARC64_FUTEX_H +#define _SPARC64_FUTEX_H -#include +#include +#include +#include +#include -#endif +#define __futex_cas_op(insn, ret, oldval, uaddr, oparg) \ + __asm__ __volatile__( \ + "\n1: lduwa [%3] %%asi, %2\n" \ + " " insn "\n" \ + "2: casa [%3] %%asi, %2, %1\n" \ + " cmp %2, %1\n" \ + " bne,pn %%icc, 1b\n" \ + " mov 0, %0\n" \ + "3:\n" \ + " .section .fixup,#alloc,#execinstr\n" \ + " .align 4\n" \ + "4: ba 3b\n" \ + " mov %5, %0\n" \ + " .previous\n" \ + " .section __ex_table,#alloc\n" \ + " .align 4\n" \ + " .word 1b, 4b\n" \ + " .word 2b, 4b\n" \ + " .previous\n" \ + : "=&r" (ret), "=&r" (oldval), "=&r" (tem) \ + : "r" (uaddr), "r" (oparg), "i" (-EFAULT) \ + : "memory") + +static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) +{ + int op = (encoded_op >> 28) & 7; + int cmp = (encoded_op >> 24) & 15; + int oparg = (encoded_op << 8) >> 20; + int cmparg = (encoded_op << 20) >> 20; + int oldval = 0, ret, tem; + + if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))) + return -EFAULT; + if (unlikely((((unsigned long) uaddr) & 0x3UL))) + return -EINVAL; + + if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) + oparg = 1 << oparg; + + inc_preempt_count(); + + switch (op) { + case FUTEX_OP_SET: + __futex_cas_op("mov\t%4, %1", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_ADD: + __futex_cas_op("add\t%2, %4, %1", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_OR: + __futex_cas_op("or\t%2, %4, %1", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_ANDN: + __futex_cas_op("and\t%2, %4, %1", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_XOR: + __futex_cas_op("xor\t%2, %4, %1", ret, oldval, uaddr, oparg); + break; + default: + ret = -ENOSYS; + } + + dec_preempt_count(); + + if (!ret) { + switch (cmp) { + case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; + case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; + case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; + case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; + case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; + case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; + default: ret = -ENOSYS; + } + } + return ret; +} + +#endif /* !(_SPARC64_FUTEX_H) */ -- cgit v1.2.3 From 7abea9214585823f7f19d91872d7c6f8874bef9a Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 25 Feb 2006 13:39:56 -0800 Subject: [SPARC64]: Make cpu_present_map available earlier. The change to kernel/sched.c's init code to use for_each_cpu() requires that the cpu_possible_map be setup much earlier. Set it up via setup_arch(), constrained to NR_CPUS, and later constrain it to max_cpus in smp_prepare_cpus(). This fixes SMP booting on sparc64. Signed-off-by: David S. Miller --- arch/sparc64/kernel/setup.c | 2 ++ arch/sparc64/kernel/smp.c | 28 +++++++++++++++++++--------- include/asm-sparc64/smp.h | 6 ++++++ 3 files changed, 27 insertions(+), 9 deletions(-) diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c index 054461e6946..158bd31e15b 100644 --- a/arch/sparc64/kernel/setup.c +++ b/arch/sparc64/kernel/setup.c @@ -542,6 +542,8 @@ void __init setup_arch(char **cmdline_p) } #endif + smp_setup_cpu_possible_map(); + paging_init(); } diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 1fb6323e65a..1f7ad8a6905 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -1079,18 +1079,12 @@ int setup_profiling_timer(unsigned int multiplier) return 0; } +/* Constrain the number of cpus to max_cpus. */ void __init smp_prepare_cpus(unsigned int max_cpus) { - int instance, mid; - - instance = 0; - while (!cpu_find_by_instance(instance, NULL, &mid)) { - if (mid < max_cpus) - cpu_set(mid, phys_cpu_present_map); - instance++; - } - if (num_possible_cpus() > max_cpus) { + int instance, mid; + instance = 0; while (!cpu_find_by_instance(instance, NULL, &mid)) { if (mid != boot_cpu_id) { @@ -1105,6 +1099,22 @@ void __init smp_prepare_cpus(unsigned int max_cpus) smp_store_cpu_info(boot_cpu_id); } +/* Set this up early so that things like the scheduler can init + * properly. We use the same cpu mask for both the present and + * possible cpu map. + */ +void __init smp_setup_cpu_possible_map(void) +{ + int instance, mid; + + instance = 0; + while (!cpu_find_by_instance(instance, NULL, &mid)) { + if (mid < NR_CPUS) + cpu_set(mid, phys_cpu_present_map); + instance++; + } +} + void __devinit smp_prepare_boot_cpu(void) { if (hard_smp_processor_id() >= NR_CPUS) { diff --git a/include/asm-sparc64/smp.h b/include/asm-sparc64/smp.h index 110a2de8912..473edb2603e 100644 --- a/include/asm-sparc64/smp.h +++ b/include/asm-sparc64/smp.h @@ -66,8 +66,14 @@ static __inline__ int hard_smp_processor_id(void) #define raw_smp_processor_id() (current_thread_info()->cpu) +extern void smp_setup_cpu_possible_map(void); + #endif /* !(__ASSEMBLY__) */ +#else + +#define smp_setup_cpu_possible_map() do { } while (0) + #endif /* !(CONFIG_SMP) */ #define NO_PROC_ID 0xFF -- cgit v1.2.3