summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/sparc64/kernel/setup.c2
-rw-r--r--arch/sparc64/kernel/smp.c28
-rw-r--r--include/asm-sparc64/futex.h88
-rw-r--r--include/asm-sparc64/smp.h6
4 files changed, 111 insertions, 13 deletions
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index 054461e6946..158bd31e15b 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -542,6 +542,8 @@ void __init setup_arch(char **cmdline_p)
}
#endif
+ smp_setup_cpu_possible_map();
+
paging_init();
}
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 1fb6323e65a..1f7ad8a6905 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -1079,18 +1079,12 @@ int setup_profiling_timer(unsigned int multiplier)
return 0;
}
+/* Constrain the number of cpus to max_cpus. */
void __init smp_prepare_cpus(unsigned int max_cpus)
{
- int instance, mid;
-
- instance = 0;
- while (!cpu_find_by_instance(instance, NULL, &mid)) {
- if (mid < max_cpus)
- cpu_set(mid, phys_cpu_present_map);
- instance++;
- }
-
if (num_possible_cpus() > max_cpus) {
+ int instance, mid;
+
instance = 0;
while (!cpu_find_by_instance(instance, NULL, &mid)) {
if (mid != boot_cpu_id) {
@@ -1105,6 +1099,22 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
smp_store_cpu_info(boot_cpu_id);
}
+/* Set this up early so that things like the scheduler can init
+ * properly. We use the same cpu mask for both the present and
+ * possible cpu map.
+ */
+void __init smp_setup_cpu_possible_map(void)
+{
+ int instance, mid;
+
+ instance = 0;
+ while (!cpu_find_by_instance(instance, NULL, &mid)) {
+ if (mid < NR_CPUS)
+ cpu_set(mid, phys_cpu_present_map);
+ instance++;
+ }
+}
+
void __devinit smp_prepare_boot_cpu(void)
{
if (hard_smp_processor_id() >= NR_CPUS) {
diff --git a/include/asm-sparc64/futex.h b/include/asm-sparc64/futex.h
index 6a332a9f099..0caf60147e9 100644
--- a/include/asm-sparc64/futex.h
+++ b/include/asm-sparc64/futex.h
@@ -1,6 +1,86 @@
-#ifndef _ASM_FUTEX_H
-#define _ASM_FUTEX_H
+#ifndef _SPARC64_FUTEX_H
+#define _SPARC64_FUTEX_H
-#include <asm-generic/futex.h>
+#include <linux/futex.h>
+#include <asm/errno.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
-#endif
+#define __futex_cas_op(insn, ret, oldval, uaddr, oparg) \
+ __asm__ __volatile__( \
+ "\n1: lduwa [%3] %%asi, %2\n" \
+ " " insn "\n" \
+ "2: casa [%3] %%asi, %2, %1\n" \
+ " cmp %2, %1\n" \
+ " bne,pn %%icc, 1b\n" \
+ " mov 0, %0\n" \
+ "3:\n" \
+ " .section .fixup,#alloc,#execinstr\n" \
+ " .align 4\n" \
+ "4: ba 3b\n" \
+ " mov %5, %0\n" \
+ " .previous\n" \
+ " .section __ex_table,#alloc\n" \
+ " .align 4\n" \
+ " .word 1b, 4b\n" \
+ " .word 2b, 4b\n" \
+ " .previous\n" \
+ : "=&r" (ret), "=&r" (oldval), "=&r" (tem) \
+ : "r" (uaddr), "r" (oparg), "i" (-EFAULT) \
+ : "memory")
+
+static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
+{
+ int op = (encoded_op >> 28) & 7;
+ int cmp = (encoded_op >> 24) & 15;
+ int oparg = (encoded_op << 8) >> 20;
+ int cmparg = (encoded_op << 20) >> 20;
+ int oldval = 0, ret, tem;
+
+ if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(int))))
+ return -EFAULT;
+ if (unlikely((((unsigned long) uaddr) & 0x3UL)))
+ return -EINVAL;
+
+ if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+ oparg = 1 << oparg;
+
+ inc_preempt_count();
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ __futex_cas_op("mov\t%4, %1", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ADD:
+ __futex_cas_op("add\t%2, %4, %1", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_OR:
+ __futex_cas_op("or\t%2, %4, %1", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ANDN:
+ __futex_cas_op("and\t%2, %4, %1", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_XOR:
+ __futex_cas_op("xor\t%2, %4, %1", ret, oldval, uaddr, oparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ dec_preempt_count();
+
+ if (!ret) {
+ switch (cmp) {
+ case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
+ case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
+ case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
+ case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
+ case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
+ case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
+ default: ret = -ENOSYS;
+ }
+ }
+ return ret;
+}
+
+#endif /* !(_SPARC64_FUTEX_H) */
diff --git a/include/asm-sparc64/smp.h b/include/asm-sparc64/smp.h
index 110a2de8912..473edb2603e 100644
--- a/include/asm-sparc64/smp.h
+++ b/include/asm-sparc64/smp.h
@@ -66,8 +66,14 @@ static __inline__ int hard_smp_processor_id(void)
#define raw_smp_processor_id() (current_thread_info()->cpu)
+extern void smp_setup_cpu_possible_map(void);
+
#endif /* !(__ASSEMBLY__) */
+#else
+
+#define smp_setup_cpu_possible_map() do { } while (0)
+
#endif /* !(CONFIG_SMP) */
#define NO_PROC_ID 0xFF