diff options
Diffstat (limited to 'packages/qemu/qemu-0.8.0/arm_nptl.patch')
-rw-r--r-- | packages/qemu/qemu-0.8.0/arm_nptl.patch | 854 |
1 files changed, 0 insertions, 854 deletions
diff --git a/packages/qemu/qemu-0.8.0/arm_nptl.patch b/packages/qemu/qemu-0.8.0/arm_nptl.patch deleted file mode 100644 index daf105071c..0000000000 --- a/packages/qemu/qemu-0.8.0/arm_nptl.patch +++ /dev/null @@ -1,854 +0,0 @@ -diff -ur qemu-0.8.0.orig/configure qemu-0.8.0/configure ---- qemu-0.8.0.orig/configure 2005-12-20 00:51:53.000000000 +0200 -+++ qemu-0.8.0/configure 2006-07-12 15:26:40.000000000 +0300 -@@ -89,6 +89,7 @@ - cocoa="no" - check_gfx="yes" - check_gcc="yes" -+nptl=yes - - # OS specific - targetos=`uname -s` -@@ -205,6 +206,8 @@ - ;; - --disable-gcc-check) check_gcc="no" - ;; -+ --disable-nptl) nptl="no" -+ ;; - esac - done - -@@ -299,6 +302,23 @@ - fi - fi - -+# check NPTL support -+cat > $TMPC <<EOF -+#include <sched.h> -+void foo() -+{ -+#ifndef CLONE_SETTLS -+#error bork -+#endif -+} -+EOF -+ -+if $cc -c -o $TMPO $TMPC 2> /dev/null ; then -+ : -+else -+ nptl="no" -+fi -+ - ########################################## - # SDL probe - -@@ -489,6 +509,7 @@ - fi - echo "" - echo "kqemu support $kqemu" -+echo "NPTL support $nptl" - if test $kqemu = "yes" -a $linux = "yes" ; then - echo "" - echo "KQEMU Linux module configuration:" -@@ -793,6 +814,14 @@ - fi - echo "" >> $config_mak - fi -+else -+ if test "$nptl" = "yes" ; then -+ case "$target_cpu" in -+ arm | armeb) -+ echo "#define USE_NPTL 1" >> $config_h -+ ;; -+ esac -+ fi - fi - - if test "$cocoa" = "yes" ; then -Only in qemu-0.8.0: configure.rej~ -Only in qemu-0.8.0: configure~ -diff -ur qemu-0.8.0.orig/exec-all.h qemu-0.8.0/exec-all.h ---- qemu-0.8.0.orig/exec-all.h 2005-12-20 00:51:53.000000000 +0200 -+++ qemu-0.8.0/exec-all.h 2006-07-12 15:23:46.000000000 +0300 -@@ -347,163 +347,7 @@ - extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4]; - extern void *io_mem_opaque[IO_MEM_NB_ENTRIES]; - --#ifdef __powerpc__ --static inline int testandset (int *p) --{ -- int ret; -- __asm__ __volatile__ ( -- "0: lwarx %0,0,%1\n" -- " xor. %0,%3,%0\n" -- " bne 1f\n" -- " stwcx. %2,0,%1\n" -- " bne- 0b\n" -- "1: " -- : "=&r" (ret) -- : "r" (p), "r" (1), "r" (0) -- : "cr0", "memory"); -- return ret; --} --#endif -- --#ifdef __i386__ --static inline int testandset (int *p) --{ -- long int readval = 0; -- -- __asm__ __volatile__ ("lock; cmpxchgl %2, %0" -- : "+m" (*p), "+a" (readval) -- : "r" (1) -- : "cc"); -- return readval; --} --#endif -- --#ifdef __x86_64__ --static inline int testandset (int *p) --{ -- long int readval = 0; -- -- __asm__ __volatile__ ("lock; cmpxchgl %2, %0" -- : "+m" (*p), "+a" (readval) -- : "r" (1) -- : "cc"); -- return readval; --} --#endif -- --#ifdef __s390__ --static inline int testandset (int *p) --{ -- int ret; -- -- __asm__ __volatile__ ("0: cs %0,%1,0(%2)\n" -- " jl 0b" -- : "=&d" (ret) -- : "r" (1), "a" (p), "0" (*p) -- : "cc", "memory" ); -- return ret; --} --#endif -- --#ifdef __alpha__ --static inline int testandset (int *p) --{ -- int ret; -- unsigned long one; -- -- __asm__ __volatile__ ("0: mov 1,%2\n" -- " ldl_l %0,%1\n" -- " stl_c %2,%1\n" -- " beq %2,1f\n" -- ".subsection 2\n" -- "1: br 0b\n" -- ".previous" -- : "=r" (ret), "=m" (*p), "=r" (one) -- : "m" (*p)); -- return ret; --} --#endif -- --#ifdef __sparc__ --static inline int testandset (int *p) --{ -- int ret; -- -- __asm__ __volatile__("ldstub [%1], %0" -- : "=r" (ret) -- : "r" (p) -- : "memory"); -- -- return (ret ? 1 : 0); --} --#endif -- --#ifdef __arm__ --static inline int testandset (int *spinlock) --{ -- register unsigned int ret; -- __asm__ __volatile__("swp %0, %1, [%2]" -- : "=r"(ret) -- : "0"(1), "r"(spinlock)); -- -- return ret; --} --#endif -- --#ifdef __mc68000 --static inline int testandset (int *p) --{ -- char ret; -- __asm__ __volatile__("tas %1; sne %0" -- : "=r" (ret) -- : "m" (p) -- : "cc","memory"); -- return ret; --} --#endif -- --#ifdef __ia64 --#include <ia64intrin.h> -- --static inline int testandset (int *p) --{ -- return __sync_lock_test_and_set (p, 1); --} --#endif -- --typedef int spinlock_t; -- --#define SPIN_LOCK_UNLOCKED 0 -- --#if defined(CONFIG_USER_ONLY) --static inline void spin_lock(spinlock_t *lock) --{ -- while (testandset(lock)); --} -- --static inline void spin_unlock(spinlock_t *lock) --{ -- *lock = 0; --} -- --static inline int spin_trylock(spinlock_t *lock) --{ -- return !testandset(lock); --} --#else --static inline void spin_lock(spinlock_t *lock) --{ --} -- --static inline void spin_unlock(spinlock_t *lock) --{ --} -- --static inline int spin_trylock(spinlock_t *lock) --{ -- return 1; --} --#endif -+#include "qemu_spinlock.h" - - extern spinlock_t tb_lock; - -diff -ur qemu-0.8.0.orig/linux-user/arm/syscall.h qemu-0.8.0/linux-user/arm/syscall.h ---- qemu-0.8.0.orig/linux-user/arm/syscall.h 2005-12-20 00:51:53.000000000 +0200 -+++ qemu-0.8.0/linux-user/arm/syscall.h 2006-07-12 15:23:46.000000000 +0300 -@@ -28,7 +28,9 @@ - #define ARM_SYSCALL_BASE 0x900000 - #define ARM_THUMB_SYSCALL 0 - --#define ARM_NR_cacheflush (ARM_SYSCALL_BASE + 0xf0000 + 2) -+#define ARM_NR_BASE 0xf0000 -+#define ARM_NR_cacheflush (ARM_NR_BASE + 2) -+#define ARM_NR_set_tls (ARM_NR_BASE + 5) - - #define ARM_NR_semihosting 0x123456 - #define ARM_NR_thumb_semihosting 0xAB -diff -ur qemu-0.8.0.orig/linux-user/main.c qemu-0.8.0/linux-user/main.c ---- qemu-0.8.0.orig/linux-user/main.c 2006-07-12 15:20:37.000000000 +0300 -+++ qemu-0.8.0/linux-user/main.c 2006-07-12 15:23:46.000000000 +0300 -@@ -326,6 +326,50 @@ - } - } - -+/* Handle a jump to the kernel code page. */ -+static int -+do_kernel_trap(CPUARMState *env) -+{ -+ uint32_t addr; -+ uint32_t *ptr; -+ uint32_t cpsr; -+ -+ switch (env->regs[15]) { -+ case 0xffff0fc0: /* __kernel_cmpxchg */ -+ /* XXX: This only works between threads, not between processes. -+ Use native atomic operations. */ -+ /* ??? This probably breaks horribly if the access segfaults. */ -+ cpu_lock(); -+ ptr = (uint32_t *)env->regs[2]; -+ cpsr = cpsr_read(env); -+ if (*ptr == env->regs[0]) { -+ *ptr = env->regs[1]; -+ env->regs[0] = 0; -+ cpsr |= CPSR_C; -+ } else { -+ env->regs[0] = -1; -+ cpsr &= ~CPSR_C; -+ } -+ cpsr_write(env, cpsr, CPSR_C); -+ cpu_unlock(); -+ break; -+ case 0xffff0fe0: /* __kernel_get_tls */ -+ env->regs[0] = env->cp15.c13_tls; -+ break; -+ default: -+ return 1; -+ } -+ /* Jump back to the caller. */ -+ addr = env->regs[14]; -+ if (addr & 1) { -+ env->thumb = 1; -+ addr &= ~1; -+ } -+ env->regs[15] = addr; -+ -+ return 0; -+} -+ - void cpu_loop(CPUARMState *env) - { - int trapnr; -@@ -368,10 +412,8 @@ - n = insn & 0xffffff; - } - -- if (n == ARM_NR_cacheflush) { -- arm_cache_flush(env->regs[0], env->regs[1]); -- } else if (n == ARM_NR_semihosting -- || n == ARM_NR_thumb_semihosting) { -+ if (n == ARM_NR_semihosting -+ || n == ARM_NR_thumb_semihosting) { - env->regs[0] = do_arm_semihosting (env); - } else if (n >= ARM_SYSCALL_BASE - || (env->thumb && n == ARM_THUMB_SYSCALL)) { -@@ -381,14 +423,34 @@ - } else { - n -= ARM_SYSCALL_BASE; - } -- env->regs[0] = do_syscall(env, -- n, -- env->regs[0], -- env->regs[1], -- env->regs[2], -- env->regs[3], -- env->regs[4], -- env->regs[5]); -+ if ( n > ARM_NR_BASE) { -+ switch (n) -+ { -+ case ARM_NR_cacheflush: -+ arm_cache_flush(env->regs[0], env->regs[1]); -+ break; -+#ifdef USE_NPTL -+ case ARM_NR_set_tls: -+ cpu_set_tls(env, env->regs[0]); -+ env->regs[0] = 0; -+ break; -+#endif -+ default: -+ printf ("Error: Bad syscall: %x\n", n); -+ goto error; -+ } -+ } -+ else -+ { -+ env->regs[0] = do_syscall(env, -+ n, -+ env->regs[0], -+ env->regs[1], -+ env->regs[2], -+ env->regs[3], -+ env->regs[4], -+ env->regs[5]); -+ } - } else { - goto error; - } -@@ -427,6 +489,10 @@ - } - } - break; -+ case EXCP_KERNEL_TRAP: -+ if (do_kernel_trap(env)) -+ goto error; -+ break; - default: - error: - fprintf(stderr, "qemu: unhandled CPU exception 0x%x - aborting\n", -@@ -1602,6 +1668,10 @@ - ts->heap_base = info->brk; - /* This will be filled in on the first SYS_HEAPINFO call. */ - ts->heap_limit = 0; -+ /* Register the magic kernel code page. The cpu will generate a -+ special exception when it tries to execute code here. We can't -+ put real code here because it may be in use by the host kernel. */ -+ page_set_flags(0xffff0000, 0xffff0fff, 0); - } - #elif defined(TARGET_SPARC) - { -diff -ur qemu-0.8.0.orig/linux-user/qemu.h qemu-0.8.0/linux-user/qemu.h ---- qemu-0.8.0.orig/linux-user/qemu.h 2005-12-20 00:51:53.000000000 +0200 -+++ qemu-0.8.0/linux-user/qemu.h 2006-07-12 15:23:46.000000000 +0300 -@@ -76,6 +76,9 @@ - uint32_t v86mask; - #endif - int used; /* non zero if used */ -+#ifdef USE_NPTL -+ uint32_t *child_tidptr; -+#endif - uint8_t stack[0]; - } __attribute__((aligned(16))) TaskState; - -Only in qemu-0.8.0.orig/linux-user: signal.c.orig -diff -ur qemu-0.8.0.orig/linux-user/syscall.c qemu-0.8.0/linux-user/syscall.c ---- qemu-0.8.0.orig/linux-user/syscall.c 2006-07-12 15:20:37.000000000 +0300 -+++ qemu-0.8.0/linux-user/syscall.c 2006-07-12 15:36:51.000000000 +0300 -@@ -72,9 +72,18 @@ - #include <linux/kd.h> - - #include "qemu.h" -+#include "qemu_spinlock.h" - - //#define DEBUG - -+#ifdef USE_NPTL -+#define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \ -+ CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID) -+#else -+/* XXX: Hardcode the above values. */ -+#define CLONE_NPTL_FLAGS2 0 -+#endif -+ - #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_SPARC) - /* 16 bit uid wrappers emulation */ - #define USE_UID16 -@@ -1527,20 +1536,38 @@ - thread/process */ - #define NEW_STACK_SIZE 8192 - -+#ifdef USE_NPTL -+static spinlock_t nptl_lock = SPIN_LOCK_UNLOCKED; -+#endif -+ - static int clone_func(void *arg) - { - CPUState *env = arg; -+#ifdef HAVE_NPTL -+ /* Wait until the parent has finshed initializing the tls state. */ -+ while (!spin_trylock(&nptl_lock)) -+ usleep(1); -+ spin_unlock(&nptl_lock); -+#endif - cpu_loop(env); - /* never exits */ - return 0; - } - --int do_fork(CPUState *env, unsigned int flags, unsigned long newsp) -+int do_fork(CPUState *env, unsigned int flags, unsigned long newsp, -+ uint32_t *parent_tidptr, void *newtls, -+ uint32_t *child_tidptr) - { - int ret; - TaskState *ts; - uint8_t *new_stack; - CPUState *new_env; -+#ifdef USE_NPTL -+ unsigned int nptl_flags; -+ -+ if (flags & CLONE_PARENT_SETTID) -+ *parent_tidptr = gettid(); -+#endif - - if (flags & CLONE_VM) { - ts = malloc(sizeof(TaskState) + NEW_STACK_SIZE); -@@ -1576,16 +1603,62 @@ - #error unsupported target CPU - #endif - new_env->opaque = ts; -+#ifdef USE_NPTL -+ nptl_flags = flags; -+ flags &= ~CLONE_NPTL_FLAGS2; -+ -+ if (nptl_flags & CLONE_CHILD_CLEARTID) { -+ ts->child_tidptr = child_tidptr; -+ } -+ -+ if (nptl_flags & CLONE_SETTLS) -+ cpu_set_tls (new_env, newtls); -+ /* Grab the global cpu lock so that the thread setup appears -+ atomic. */ -+ if (nptl_flags & CLONE_CHILD_SETTID) -+ spin_lock(&nptl_lock); -+#else -+ if (flags & CLONE_NPTL_FLAGS2) -+ return -EINVAL; -+#endif -+ - #ifdef __ia64__ - ret = clone2(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env); - #else - ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env); - #endif -+#ifdef USE_NPTL -+ if (ret != -1) { -+ if (nptl_flags & CLONE_CHILD_SETTID) -+ *child_tidptr = ret; -+ } -+ /* Allow the child to continue. */ -+ if (nptl_flags & CLONE_CHILD_SETTID) -+ spin_unlock(&nptl_lock); -+#endif - } else { - /* if no CLONE_VM, we consider it is a fork */ -- if ((flags & ~CSIGNAL) != 0) -+ if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) - return -EINVAL; - ret = fork(); -+#ifdef USE_NPTL -+ /* There is a race condition here. The parent process could -+ theoretically read the TID in the child process before the child -+ tid is set. This would require using either ptrace -+ (not implemented) or having *_tidptr to point at a shared memory -+ mapping. We can't repeat the spinlock hack used above because -+ the child process gets its own copy of the lock. */ -+ if (ret == 0) { -+ /* Child Process. */ -+ if (flags & CLONE_CHILD_SETTID) -+ *child_tidptr = gettid(); -+ ts = (TaskState *)env->opaque; -+ if (flags & CLONE_CHILD_CLEARTID) -+ ts->child_tidptr = child_tidptr; -+ if (flags & CLONE_SETTLS) -+ cpu_set_tls (env, newtls); -+ } -+#endif - } - return ret; - } -@@ -1757,7 +1830,7 @@ - ret = do_brk((char *)arg1); - break; - case TARGET_NR_fork: -- ret = get_errno(do_fork(cpu_env, SIGCHLD, 0)); -+ ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, NULL, NULL, NULL)); - break; - case TARGET_NR_waitpid: - { -@@ -2564,7 +2637,8 @@ - ret = get_errno(fsync(arg1)); - break; - case TARGET_NR_clone: -- ret = get_errno(do_fork(cpu_env, arg1, arg2)); -+ ret = get_errno(do_fork(cpu_env, arg1, arg2, (uint32_t *)arg3, -+ (void *)arg4, (uint32_t *)arg5)); - break; - #ifdef __NR_exit_group - /* new thread calls */ -@@ -2925,7 +2999,8 @@ - #endif - #ifdef TARGET_NR_vfork - case TARGET_NR_vfork: -- ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD, 0)); -+ ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD, 0, -+ NULL, NULL, NULL)); - break; - #endif - #ifdef TARGET_NR_ugetrlimit -@@ -3347,4 +3422,3 @@ - #endif - return ret; - } -- -Only in qemu-0.8.0.orig/linux-user: syscall.c.orig -Only in qemu-0.8.0/linux-user: syscall.c.rej~ -Only in qemu-0.8.0/linux-user: syscall.c~ -Only in qemu-0.8.0: qemu_spinlock.h -diff -ur qemu-0.8.0.orig/target-arm/cpu.h qemu-0.8.0/target-arm/cpu.h ---- qemu-0.8.0.orig/target-arm/cpu.h 2005-12-20 00:51:53.000000000 +0200 -+++ qemu-0.8.0/target-arm/cpu.h 2006-07-12 15:27:28.000000000 +0300 -@@ -34,6 +34,9 @@ - #define EXCP_DATA_ABORT 4 - #define EXCP_IRQ 5 - #define EXCP_FIQ 6 -+#define EXCP_BKPT 7 -+#define EXCP_KERNEL_TRAP 8 /* Jumped to kernel code page. */ -+ - - /* We currently assume float and double are IEEE single and double - precision respectively. -@@ -83,6 +86,7 @@ - uint32_t c9_data; - uint32_t c13_fcse; /* FCSE PID. */ - uint32_t c13_context; /* Context ID. */ -+ uint32_t c13_tls; /* Paul Brook told me to just add this ;) */ - } cp15; - - /* exception/interrupt handling */ -@@ -126,6 +130,15 @@ - int cpu_arm_signal_handler(int host_signum, struct siginfo *info, - void *puc); - -+void cpu_lock(void); -+void cpu_unlock(void); -+#if defined(USE_NPTL) -+static inline void cpu_set_tls(CPUARMState *env, void *newtls) -+{ -+ env->cp15.c13_tls = (uint32_t)newtls; -+} -+#endif -+ - #define CPSR_M (0x1f) - #define CPSR_T (1 << 5) - #define CPSR_F (1 << 6) -@@ -137,7 +150,11 @@ - #define CPSR_J (1 << 24) - #define CPSR_IT_0_1 (3 << 25) - #define CPSR_Q (1 << 27) --#define CPSR_NZCV (0xf << 28) -+#define CPSR_V (1 << 28) -+#define CPSR_C (1 << 29) -+#define CPSR_Z (1 << 30) -+#define CPSR_N (1 << 31) -+#define CPSR_NZCV (CPSR_N | CPSR_Z | CPSR_C | CPSR_V) - - #define CACHED_CPSR_BITS (CPSR_T | CPSR_Q | CPSR_NZCV) - /* Return the current CPSR value. */ -Only in qemu-0.8.0/target-arm: cpu.h~ -diff -ur qemu-0.8.0.orig/target-arm/exec.h qemu-0.8.0/target-arm/exec.h ---- qemu-0.8.0.orig/target-arm/exec.h 2005-12-20 00:51:53.000000000 +0200 -+++ qemu-0.8.0/target-arm/exec.h 2006-07-12 15:23:46.000000000 +0300 -@@ -51,8 +51,6 @@ - - /* In op_helper.c */ - --void cpu_lock(void); --void cpu_unlock(void); - void helper_set_cp15(CPUState *, uint32_t, uint32_t); - uint32_t helper_get_cp15(CPUState *, uint32_t); - -diff -ur qemu-0.8.0.orig/target-arm/op.c qemu-0.8.0/target-arm/op.c ---- qemu-0.8.0.orig/target-arm/op.c 2005-12-20 00:51:53.000000000 +0200 -+++ qemu-0.8.0/target-arm/op.c 2006-07-12 15:23:46.000000000 +0300 -@@ -885,6 +885,12 @@ - cpu_loop_exit(); - } - -+void OPPROTO op_kernel_trap(void) -+{ -+ env->exception_index = EXCP_KERNEL_TRAP; -+ cpu_loop_exit(); -+} -+ - /* VFP support. We follow the convention used for VFP instrunctions: - Single precition routines have a "s" suffix, double precision a - "d" suffix. */ -diff -ur qemu-0.8.0.orig/target-arm/translate.c qemu-0.8.0/target-arm/translate.c ---- qemu-0.8.0.orig/target-arm/translate.c 2005-12-20 00:51:53.000000000 +0200 -+++ qemu-0.8.0/target-arm/translate.c 2006-07-12 15:23:46.000000000 +0300 -@@ -2282,6 +2282,7 @@ - s->is_jmp = DISAS_JUMP; - } - -+ - /* generate intermediate code in gen_opc_buf and gen_opparam_buf for - basic block 'tb'. If search_pc is TRUE, also generate PC - information for each intermediate instruction. */ -@@ -2316,6 +2317,15 @@ - nb_gen_labels = 0; - lj = -1; - do { -+#ifdef CONFIG_USER_ONLY -+ /* Intercept jump to the magic kernel page. */ -+ if (dc->pc > 0xffff0000) { -+ gen_op_kernel_trap(); -+ dc->is_jmp = DISAS_UPDATE; -+ break; -+ } -+#endif -+ - if (env->nb_breakpoints > 0) { - for(j = 0; j < env->nb_breakpoints; j++) { - if (env->breakpoints[j] == dc->pc) { -diff -urN qemu-0.8.1.orig/qemu_spinlock.h qemu-0.8.1/qemu_spinlock.h ---- qemu-0.8.1.orig/qemu_spinlock.h 1970-01-01 02:00:00.000000000 +0200 -+++ qemu-0.8.1/qemu_spinlock.h 2006-06-04 00:59:23.000000000 +0300 -@@ -0,0 +1,182 @@ -+/* -+ * internal execution defines for qemu -+ * -+ * Copyright (c) 2003 Fabrice Bellard -+ * -+ * This library is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU Lesser General Public -+ * License as published by the Free Software Foundation; either -+ * version 2 of the License, or (at your option) any later version. -+ * -+ * This library is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * Lesser General Public License for more details. -+ * -+ * You should have received a copy of the GNU Lesser General Public -+ * License along with this library; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ */ -+ -+#ifndef _QEMU_SPINLOCK_H -+#define _QEMU_SPINLOCK_H -+ -+#ifdef __powerpc__ -+static inline int testandset (int *p) -+{ -+ int ret; -+ __asm__ __volatile__ ( -+ "0: lwarx %0,0,%1\n" -+ " xor. %0,%3,%0\n" -+ " bne 1f\n" -+ " stwcx. %2,0,%1\n" -+ " bne- 0b\n" -+ "1: " -+ : "=&r" (ret) -+ : "r" (p), "r" (1), "r" (0) -+ : "cr0", "memory"); -+ return ret; -+} -+#endif -+ -+#ifdef __i386__ -+static inline int testandset (int *p) -+{ -+ long int readval = 0; -+ -+ __asm__ __volatile__ ("lock; cmpxchgl %2, %0" -+ : "+m" (*p), "+a" (readval) -+ : "r" (1) -+ : "cc"); -+ return readval; -+} -+#endif -+ -+#ifdef __x86_64__ -+static inline int testandset (int *p) -+{ -+ long int readval = 0; -+ -+ __asm__ __volatile__ ("lock; cmpxchgl %2, %0" -+ : "+m" (*p), "+a" (readval) -+ : "r" (1) -+ : "cc"); -+ return readval; -+} -+#endif -+ -+#ifdef __s390__ -+static inline int testandset (int *p) -+{ -+ int ret; -+ -+ __asm__ __volatile__ ("0: cs %0,%1,0(%2)\n" -+ " jl 0b" -+ : "=&d" (ret) -+ : "r" (1), "a" (p), "0" (*p) -+ : "cc", "memory" ); -+ return ret; -+} -+#endif -+ -+#ifdef __alpha__ -+static inline int testandset (int *p) -+{ -+ int ret; -+ unsigned long one; -+ -+ __asm__ __volatile__ ("0: mov 1,%2\n" -+ " ldl_l %0,%1\n" -+ " stl_c %2,%1\n" -+ " beq %2,1f\n" -+ ".subsection 2\n" -+ "1: br 0b\n" -+ ".previous" -+ : "=r" (ret), "=m" (*p), "=r" (one) -+ : "m" (*p)); -+ return ret; -+} -+#endif -+ -+#ifdef __sparc__ -+static inline int testandset (int *p) -+{ -+ int ret; -+ -+ __asm__ __volatile__("ldstub [%1], %0" -+ : "=r" (ret) -+ : "r" (p) -+ : "memory"); -+ -+ return (ret ? 1 : 0); -+} -+#endif -+ -+#ifdef __arm__ -+static inline int testandset (int *spinlock) -+{ -+ register unsigned int ret; -+ __asm__ __volatile__("swp %0, %1, [%2]" -+ : "=r"(ret) -+ : "0"(1), "r"(spinlock)); -+ -+ return ret; -+} -+#endif -+ -+#ifdef __mc68000 -+static inline int testandset (int *p) -+{ -+ char ret; -+ __asm__ __volatile__("tas %1; sne %0" -+ : "=r" (ret) -+ : "m" (p) -+ : "cc","memory"); -+ return ret; -+} -+#endif -+ -+#ifdef __ia64 -+#include <ia64intrin.h> -+ -+static inline int testandset (int *p) -+{ -+ return __sync_lock_test_and_set (p, 1); -+} -+#endif -+ -+typedef int spinlock_t; -+ -+#define SPIN_LOCK_UNLOCKED 0 -+ -+#if defined(CONFIG_USER_ONLY) -+static inline void spin_lock(spinlock_t *lock) -+{ -+ while (testandset(lock)); -+} -+ -+static inline void spin_unlock(spinlock_t *lock) -+{ -+ *lock = 0; -+} -+ -+static inline int spin_trylock(spinlock_t *lock) -+{ -+ return !testandset(lock); -+} -+#else -+static inline void spin_lock(spinlock_t *lock) -+{ -+} -+ -+static inline void spin_unlock(spinlock_t *lock) -+{ -+} -+ -+static inline int spin_trylock(spinlock_t *lock) -+{ -+ return 1; -+} -+#endif -+ -+#endif /* ! _QEMU_SPINLOCK_H */ - |