These are Paul Brook's patches to QEMU-0.8.2 to enable the running of single
ARM binaries under QEMU's user-emulation mode. Without them, QEMU-0.8.1
immediately dies saying:
	Error: f0005
	qemu: uncaught target signal 6 (Aborted) - exiting
while qemu-0.8.2 dies saying:
	qemu: Unsupported syscall: 983045
	cannot set up thread-local storage: unknown error

This file is a rediffing of the patches visible at
https://nowt.dyndns.org/patch.qemu_nptl on 27 Sept 2006
which "patch" fails to apply automatically.
See also http://lists.gnu.org/archive/html/qemu-devel/2006-09/msg00194.html

	Martin Guy, 27 Sept 2006

---
 configure                |   25 ++++++
 exec-all.h               |  165 ------------------------------------------
 linux-user/arm/syscall.h |    4 -
 linux-user/main.c        |   94 +++++++++++++++++++++---
 linux-user/qemu.h        |    3 
 linux-user/syscall.c     |   91 ++++++++++++++++++++++-
 qemu_spinlock.h          |  181 +++++++++++++++++++++++++++++++++++++++++++++++
 target-arm/cpu.h         |   10 ++
 target-arm/op.c          |    6 +
 target-arm/translate.c   |    9 ++
 10 files changed, 405 insertions(+), 183 deletions(-)

--- qemu.orig/configure
+++ qemu/configure
@@ -103,10 +103,11 @@ check_gcc="yes"
 softmmu="yes"
 linux_user="no"
 darwin_user="no"
 build_docs="no"
 uname_release=""
+nptl="yes"
 
 # OS specific
 targetos=`uname -s`
 case $targetos in
 CYGWIN*)
@@ -322,10 +323,12 @@ for opt do
   ;;
   --disable-werror) werror="no"
   ;;
   *) echo "ERROR: unknown option $opt"; show_help="yes"
   ;;
+  --disable-nptl) nptl="no"
+  ;;
   esac
 done
 
 if [ "$bsd" = "yes" -o "$darwin" = "yes" -o "$mingw32" = "yes" ] ; then
     AIOLIBS=
@@ -417,10 +420,11 @@ echo "  --enable-system          enable 
 echo "  --disable-system         disable all system emulation targets"
 echo "  --enable-linux-user      enable all linux usermode emulation targets"
 echo "  --disable-linux-user     disable all linux usermode emulation targets"
 echo "  --enable-darwin-user     enable all darwin usermode emulation targets"
 echo "  --disable-darwin-user    disable all darwin usermode emulation targets"
+echo "  --disable-nptl           disable usermode NPTL guest support"
 echo "  --fmod-lib               path to FMOD library"
 echo "  --fmod-inc               path to FMOD includes"
 echo "  --enable-uname-release=R Return R for uname -r in usermode emulation"
 echo "  --sparc_cpu=V            Build qemu for Sparc architecture v7, v8, v8plus, v8plusa, v9"
 echo ""
@@ -583,10 +587,27 @@ fi
 cat > $TMPC <<EOF
 int main(void) {
 }
 EOF
 
+# check NPTL support
+cat > $TMPC <<EOF
+#include <sched.h>
+void foo()
+{
+#ifndef CLONE_SETTLS
+#error bork
+#endif
+}
+EOF
+
+if $cc -c -o $TMPO $TMPC 2> /dev/null ; then
+  :
+else
+   nptl="no"
+fi
+
 ##########################################
 # SDL probe
 
 sdl_too_old=no
 
@@ -747,10 +768,11 @@ if test -n "$sparc_cpu"; then
 fi
 echo "kqemu support     $kqemu"
 echo "Documentation     $build_docs"
 [ ! -z "$uname_release" ] && \
 echo "uname -r          $uname_release"
+echo "NPTL support      $nptl"
 
 if test $sdl_too_old = "yes"; then
 echo "-> Your SDL version is too old - please upgrade to have SDL support"
 fi
 if [ -s /tmp/qemu-$$-sdl-config.log ]; then
@@ -1063,10 +1085,13 @@ if test "$target_cpu" = "i386" ; then
   fi
 elif test "$target_cpu" = "arm" -o "$target_cpu" = "armeb" ; then
   echo "TARGET_ARCH=arm" >> $config_mak
   echo "#define TARGET_ARCH \"arm\"" >> $config_h
   echo "#define TARGET_ARM 1" >> $config_h
+  if test "$nptl" = "yes" ; then
+	  echo "#define USE_NPTL 1" >> $config_h
+  fi
   bflt="yes"
 elif test "$target_cpu" = "sparc" ; then
   echo "TARGET_ARCH=sparc" >> $config_mak
   echo "#define TARGET_ARCH \"sparc\"" >> $config_h
   echo "#define TARGET_SPARC 1" >> $config_h
--- qemu.orig/exec-all.h
+++ qemu/exec-all.h
@@ -338,174 +338,11 @@ dummy_label ## n: ;\
 
 extern CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
 extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
 extern void *io_mem_opaque[IO_MEM_NB_ENTRIES];
 
-#if defined(__powerpc__)
-static inline int testandset (int *p)
-{
-    int ret;
-    __asm__ __volatile__ (
-                          "0:    lwarx %0,0,%1\n"
-                          "      xor. %0,%3,%0\n"
-                          "      bne 1f\n"
-                          "      stwcx. %2,0,%1\n"
-                          "      bne- 0b\n"
-                          "1:    "
-                          : "=&r" (ret)
-                          : "r" (p), "r" (1), "r" (0)
-                          : "cr0", "memory");
-    return ret;
-}
-#elif defined(__i386__)
-static inline int testandset (int *p)
-{
-    long int readval = 0;
-
-    __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
-                          : "+m" (*p), "+a" (readval)
-                          : "r" (1)
-                          : "cc");
-    return readval;
-}
-#elif defined(__x86_64__)
-static inline int testandset (int *p)
-{
-    long int readval = 0;
-
-    __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
-                          : "+m" (*p), "+a" (readval)
-                          : "r" (1)
-                          : "cc");
-    return readval;
-}
-#elif defined(__s390__)
-static inline int testandset (int *p)
-{
-    int ret;
-
-    __asm__ __volatile__ ("0: cs    %0,%1,0(%2)\n"
-			  "   jl    0b"
-			  : "=&d" (ret)
-			  : "r" (1), "a" (p), "0" (*p)
-			  : "cc", "memory" );
-    return ret;
-}
-#elif defined(__alpha__)
-static inline int testandset (int *p)
-{
-    int ret;
-    unsigned long one;
-
-    __asm__ __volatile__ ("0:	mov 1,%2\n"
-			  "	ldl_l %0,%1\n"
-			  "	stl_c %2,%1\n"
-			  "	beq %2,1f\n"
-			  ".subsection 2\n"
-			  "1:	br 0b\n"
-			  ".previous"
-			  : "=r" (ret), "=m" (*p), "=r" (one)
-			  : "m" (*p));
-    return ret;
-}
-#elif defined(__sparc__)
-static inline int testandset (int *p)
-{
-	int ret;
-
-	__asm__ __volatile__("ldstub	[%1], %0"
-			     : "=r" (ret)
-			     : "r" (p)
-			     : "memory");
-
-	return (ret ? 1 : 0);
-}
-#elif defined(__arm__)
-static inline int testandset (int *spinlock)
-{
-    register unsigned int ret;
-    __asm__ __volatile__("swp %0, %1, [%2]"
-                         : "=r"(ret)
-                         : "0"(1), "r"(spinlock));
-
-    return ret;
-}
-#elif defined(__mc68000)
-static inline int testandset (int *p)
-{
-    char ret;
-    __asm__ __volatile__("tas %1; sne %0"
-                         : "=r" (ret)
-                         : "m" (p)
-                         : "cc","memory");
-    return ret;
-}
-#elif defined(__ia64)
-
-#include <ia64intrin.h>
-
-static inline int testandset (int *p)
-{
-    return __sync_lock_test_and_set (p, 1);
-}
-#elif defined(__mips__)
-static inline int testandset (int *p)
-{
-    int ret;
-
-    __asm__ __volatile__ (
-	"	.set push		\n"
-	"	.set noat		\n"
-	"	.set mips2		\n"
-	"1:	li	$1, 1		\n"
-	"	ll	%0, %1		\n"
-	"	sc	$1, %1		\n"
-	"	beqz	$1, 1b		\n"
-	"	.set pop		"
-	: "=r" (ret), "+R" (*p)
-	:
-	: "memory");
-
-    return ret;
-}
-#else
-#error unimplemented CPU support
-#endif
-
-typedef int spinlock_t;
-
-#define SPIN_LOCK_UNLOCKED 0
-
-#if defined(CONFIG_USER_ONLY)
-static inline void spin_lock(spinlock_t *lock)
-{
-    while (testandset(lock));
-}
-
-static inline void spin_unlock(spinlock_t *lock)
-{
-    *lock = 0;
-}
-
-static inline int spin_trylock(spinlock_t *lock)
-{
-    return !testandset(lock);
-}
-#else
-static inline void spin_lock(spinlock_t *lock)
-{
-}
-
-static inline void spin_unlock(spinlock_t *lock)
-{
-}
-
-static inline int spin_trylock(spinlock_t *lock)
-{
-    return 1;
-}
-#endif
+#include "qemu_spinlock.h"
 
 extern spinlock_t tb_lock;
 
 extern int tb_invalidated_flag;
 
--- qemu.orig/linux-user/arm/syscall.h
+++ qemu/linux-user/arm/syscall.h
@@ -26,11 +26,13 @@ struct target_pt_regs {
 #define ARM_ORIG_r0	uregs[17]
 
 #define ARM_SYSCALL_BASE	0x900000
 #define ARM_THUMB_SYSCALL	0
 
-#define ARM_NR_cacheflush (ARM_SYSCALL_BASE + 0xf0000 + 2)
+#define ARM_NR_BASE	  0xf0000
+#define ARM_NR_cacheflush (ARM_NR_BASE + 2)
+#define ARM_NR_set_tls	  (ARM_NR_BASE + 5)
 
 #define ARM_NR_semihosting	  0x123456
 #define ARM_NR_thumb_semihosting  0xAB
 
 #if defined(TARGET_WORDS_BIGENDIAN)
--- qemu.orig/linux-user/main.c
+++ qemu/linux-user/main.c
@@ -361,10 +361,54 @@ static void arm_cache_flush(abi_ulong st
             break;
         addr = last1 + 1;
     }
 }
 
+/* Handle a jump to the kernel code page.  */
+static int
+do_kernel_trap(CPUARMState *env)
+{
+    uint32_t addr;
+    uint32_t *ptr;
+    uint32_t cpsr;
+
+    switch (env->regs[15]) {
+    case 0xffff0fc0: /* __kernel_cmpxchg */
+        /* XXX: This only works between threads, not between processes.
+           Use native atomic operations.  */
+        /* ??? This probably breaks horribly if the access segfaults.  */
+        cpu_lock();
+        ptr = (uint32_t *)env->regs[2];
+        cpsr = cpsr_read(env);
+        if (*ptr == env->regs[0]) {
+            *ptr = env->regs[1];
+            env->regs[0] = 0;
+            cpsr |= CPSR_C;
+        } else {
+            env->regs[0] = -1;
+            cpsr &= ~CPSR_C;
+        }
+        cpsr_write(env, cpsr, CPSR_C);
+        cpu_unlock();
+        break;
+    case 0xffff0fe0: /* __kernel_get_tls */
+        env->regs[0] = env->cp15.c13_tls;
+        break;
+    default:
+        return 1;
+    }
+    /* Jump back to the caller.  */
+    addr = env->regs[14];
+    if (addr & 1) {
+        env->thumb = 1;
+        addr &= ~1;
+    }
+    env->regs[15] = addr;
+
+    return 0;
+}
+
 void cpu_loop(CPUARMState *env)
 {
     int trapnr;
     unsigned int n, insn;
     target_siginfo_t info;
@@ -471,32 +515,50 @@ void cpu_loop(CPUARMState *env)
                         get_user_u32(insn, env->regs[15] - 4);
                         n = insn & 0xffffff;
                     }
                 }
 
-                if (n == ARM_NR_cacheflush) {
-                    arm_cache_flush(env->regs[0], env->regs[1]);
-                } else if (n == ARM_NR_semihosting
-                           || n == ARM_NR_thumb_semihosting) {
+                if (n == ARM_NR_semihosting
+                    || n == ARM_NR_thumb_semihosting) {
                     env->regs[0] = do_arm_semihosting (env);
                 } else if (n == 0 || n >= ARM_SYSCALL_BASE
                            || (env->thumb && n == ARM_THUMB_SYSCALL)) {
                     /* linux syscall */
                     if (env->thumb || n == 0) {
                         n = env->regs[7];
                     } else {
                         n -= ARM_SYSCALL_BASE;
                         env->eabi = 0;
                     }
-                    env->regs[0] = do_syscall(env,
-                                              n,
-                                              env->regs[0],
-                                              env->regs[1],
-                                              env->regs[2],
-                                              env->regs[3],
-                                              env->regs[4],
-                                              env->regs[5]);
+                    if ( n > ARM_NR_BASE) {
+                        switch (n)
+                          {
+                          case ARM_NR_cacheflush:
+                              arm_cache_flush(env->regs[0], env->regs[1]);
+                              break;
+#ifdef USE_NPTL
+                          case ARM_NR_set_tls:
+                              cpu_set_tls(env, env->regs[0]);
+                              env->regs[0] = 0;
+                              break;
+#endif
+                          default:
+                              printf ("Error: Bad syscall: %x\n", n);
+                              goto error;
+                          }
+                      }
+                    else
+                      {
+                        env->regs[0] = do_syscall(env,
+                                                  n,
+                                                  env->regs[0],
+                                                  env->regs[1],
+                                                  env->regs[2],
+                                                  env->regs[3],
+                                                  env->regs[4],
+                                                  env->regs[5]);
+                      }
                 } else {
                     goto error;
                 }
             }
             break;
@@ -531,10 +593,14 @@ void cpu_loop(CPUARMState *env)
                     info.si_code = TARGET_TRAP_BRKPT;
                     queue_signal(info.si_signo, &info);
                   }
             }
             break;
+        case EXCP_KERNEL_TRAP:
+            if (do_kernel_trap(env))
+              goto error;
+            break;
         default:
         error:
             fprintf(stderr, "qemu: unhandled CPU exception 0x%x - aborting\n",
                     trapnr);
             cpu_dump_state(env, stderr, fprintf, 0);
@@ -2378,10 +2444,14 @@ int main(int argc, char **argv)
 #if defined(TARGET_ARM) || defined(TARGET_M68K)
     ts->stack_base = info->start_stack;
     ts->heap_base = info->brk;
     /* This will be filled in on the first SYS_HEAPINFO call.  */
     ts->heap_limit = 0;
+    /* Register the magic kernel code page.  The cpu will generate a
+       special exception when it tries to execute code here.  We can't
+       put real code here because it may be in use by the host kernel.  */
+    page_set_flags(0xffff0000, 0xffff0fff, 0);
 #endif
 
     if (gdbstub_port) {
         gdbserver_start (gdbstub_port);
         gdb_handlesig(env, 0);
--- qemu.orig/linux-user/qemu.h
+++ qemu/linux-user/qemu.h
@@ -100,10 +100,13 @@ typedef struct TaskState {
     /* Extra fields for semihosted binaries.  */
     uint32_t stack_base;
     uint32_t heap_base;
     uint32_t heap_limit;
 #endif
+#ifdef USE_NPTL
+    uint32_t *child_tidptr;
+#endif
     int used; /* non zero if used */
     struct image_info *info;
     uint8_t stack[0];
 } __attribute__((aligned(16))) TaskState;
 
--- qemu.orig/linux-user/syscall.c
+++ qemu/linux-user/syscall.c
@@ -69,13 +69,22 @@
 #include <linux/soundcard.h>
 #include <linux/dirent.h>
 #include <linux/kd.h>
 
 #include "qemu.h"
+#include "qemu_spinlock.h"
 
 //#define DEBUG
 
+#ifdef USE_NPTL
+#define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
+    CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
+#else
+/* XXX: Hardcode the above values.  */
+#define CLONE_NPTL_FLAGS2 0
+#endif
+
 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_SPARC) \
     || defined(TARGET_M68K) || defined(TARGET_SH4) || defined(TARGET_CRIS)
 /* 16 bit uid wrappers emulation */
 #define USE_UID16
 #endif
@@ -2690,27 +2699,46 @@ abi_long do_arch_prctl(CPUX86State *env,
 
 /* this stack is the equivalent of the kernel stack associated with a
    thread/process */
 #define NEW_STACK_SIZE 8192
 
+#ifdef USE_NPTL
+static spinlock_t nptl_lock = SPIN_LOCK_UNLOCKED;
+#endif
+
 static int clone_func(void *arg)
 {
     CPUState *env = arg;
+#ifdef HAVE_NPTL
+    /* Wait until the parent has finshed initializing the tls state.  */
+    while (!spin_trylock(&nptl_lock))
+        usleep(1);
+    spin_unlock(&nptl_lock);
+#endif
     cpu_loop(env);
     /* never exits */
     return 0;
 }
 
 /* do_fork() Must return host values and target errnos (unlike most
    do_*() functions). */
-int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp)
+int do_fork(CPUState *env, unsigned int flags, unsigned long newsp,
+            uint32_t *parent_tidptr, void *newtls,
+            uint32_t *child_tidptr)
 {
     int ret;
     TaskState *ts;
     uint8_t *new_stack;
     CPUState *new_env;
 
+#ifdef USE_NPTL
+    unsigned int nptl_flags;
+
+    if (flags & CLONE_PARENT_SETTID)
+        *parent_tidptr = gettid();
+#endif
+
     if (flags & CLONE_VM) {
         ts = malloc(sizeof(TaskState) + NEW_STACK_SIZE);
         memset(ts, 0, sizeof(TaskState));
         new_stack = ts->stack;
         ts->used = 1;
@@ -2772,20 +2800,71 @@ int do_fork(CPUState *env, unsigned int 
 	new_env->regs[14] = newsp;
 #else
 #error unsupported target CPU
 #endif
         new_env->opaque = ts;
+#ifdef USE_NPTL
+        nptl_flags = flags;
+        flags &= ~CLONE_NPTL_FLAGS2;
+
+        if (nptl_flags & CLONE_CHILD_CLEARTID) {
+            ts->child_tidptr = child_tidptr;
+        }
+
+        if (nptl_flags & CLONE_SETTLS)
+            cpu_set_tls (new_env, newtls);
+
+        /* Grab the global cpu lock so that the thread setup appears
+           atomic.  */
+        if (nptl_flags & CLONE_CHILD_SETTID)
+            spin_lock(&nptl_lock);
+
+#else
+        if (flags & CLONE_NPTL_FLAGS2)
+            return -EINVAL;
+#endif
+
+	 if (CLONE_VFORK & flags)
+		flags ^= CLONE_VM;
 #ifdef __ia64__
         ret = __clone2(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
 #else
 	ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
 #endif
+#ifdef USE_NPTL
+        if (ret != -1) {
+            if (nptl_flags & CLONE_CHILD_SETTID)
+                *child_tidptr = ret;
+        }
+
+        /* Allow the child to continue.  */
+        if (nptl_flags & CLONE_CHILD_SETTID)
+            spin_unlock(&nptl_lock);
+#endif
     } else {
         /* if no CLONE_VM, we consider it is a fork */
-        if ((flags & ~CSIGNAL) != 0)
+        if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
             return -EINVAL;
         ret = fork();
+#ifdef USE_NPTL
+        /* There is a race condition here.  The parent process could
+           theoretically read the TID in the child process before the child
+           tid is set.  This would require using either ptrace
+           (not implemented) or having *_tidptr to point at a shared memory
+           mapping.  We can't repeat the spinlock hack used above because
+           the child process gets its own copy of the lock.  */
+        if (ret == 0) {
+            /* Child Process.  */
+            if (flags & CLONE_CHILD_SETTID)
+                *child_tidptr = gettid();
+            ts = (TaskState *)env->opaque;
+            if (flags & CLONE_CHILD_CLEARTID)
+                ts->child_tidptr = child_tidptr;
+            if (flags & CLONE_SETTLS)
+                cpu_set_tls (env, newtls);
+        }
+#endif
     }
     return ret;
 }
 
 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
@@ -3106,11 +3185,11 @@ abi_long do_syscall(void *cpu_env, int n
         break;
     case TARGET_NR_brk:
         ret = do_brk(arg1);
         break;
     case TARGET_NR_fork:
-        ret = get_errno(do_fork(cpu_env, SIGCHLD, 0));
+        ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, NULL, NULL, NULL));
         break;
 #ifdef TARGET_NR_waitpid
     case TARGET_NR_waitpid:
         {
             int status;
@@ -4463,11 +4542,12 @@ abi_long do_syscall(void *cpu_env, int n
 #endif
     case TARGET_NR_fsync:
         ret = get_errno(fsync(arg1));
         break;
     case TARGET_NR_clone:
-        ret = get_errno(do_fork(cpu_env, arg1, arg2));
+        ret = get_errno(do_fork(cpu_env, arg1, arg2, (uint32_t *)arg3,
+                        (void *)arg4, (uint32_t *)arg5));
         break;
 #ifdef __NR_exit_group
         /* new thread calls */
     case TARGET_NR_exit_group:
         gdb_exit(cpu_env, arg1);
@@ -4908,11 +4988,12 @@ abi_long do_syscall(void *cpu_env, int n
     case TARGET_NR_putpmsg:
         goto unimplemented;
 #endif
 #ifdef TARGET_NR_vfork
     case TARGET_NR_vfork:
-        ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD, 0));
+        ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD, 0,
+                                NULL, NULL, NULL));
         break;
 #endif
 #ifdef TARGET_NR_ugetrlimit
     case TARGET_NR_ugetrlimit:
     {
--- /dev/null
+++ qemu/qemu_spinlock.h
@@ -0,0 +1,181 @@
+/*
+ * Atomic operation helper include
+ *
+ *  Copyright (c) 2005 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#ifndef QEMU_SPINLOCK_H
+#define QEMU_SPINLOCK_H
+
+#ifdef __powerpc__
+static inline int testandset (int *p)
+{
+    int ret;
+    __asm__ __volatile__ (
+                          "0:    lwarx %0,0,%1\n"
+                          "      xor. %0,%3,%0\n"
+                          "      bne 1f\n"
+                          "      stwcx. %2,0,%1\n"
+                          "      bne- 0b\n"
+                          "1:    "
+                          : "=&r" (ret)
+                          : "r" (p), "r" (1), "r" (0)
+                          : "cr0", "memory");
+    return ret;
+}
+#endif
+
+#ifdef __i386__
+static inline int testandset (int *p)
+{
+    long int readval = 0;
+
+    __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
+                          : "+m" (*p), "+a" (readval)
+                          : "r" (1)
+                          : "cc");
+    return readval;
+}
+#endif
+
+#ifdef __x86_64__
+static inline int testandset (int *p)
+{
+    long int readval = 0;
+
+    __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
+                          : "+m" (*p), "+a" (readval)
+                          : "r" (1)
+                          : "cc");
+    return readval;
+}
+#endif
+
+#ifdef __s390__
+static inline int testandset (int *p)
+{
+    int ret;
+
+    __asm__ __volatile__ ("0: cs    %0,%1,0(%2)\n"
+			  "   jl    0b"
+			  : "=&d" (ret)
+			  : "r" (1), "a" (p), "0" (*p)
+			  : "cc", "memory" );
+    return ret;
+}
+#endif
+
+#ifdef __alpha__
+static inline int testandset (int *p)
+{
+    int ret;
+    unsigned long one;
+
+    __asm__ __volatile__ ("0:	mov 1,%2\n"
+			  "	ldl_l %0,%1\n"
+			  "	stl_c %2,%1\n"
+			  "	beq %2,1f\n"
+			  ".subsection 2\n"
+			  "1:	br 0b\n"
+			  ".previous"
+			  : "=r" (ret), "=m" (*p), "=r" (one)
+			  : "m" (*p));
+    return ret;
+}
+#endif
+
+#ifdef __sparc__
+static inline int testandset (int *p)
+{
+	int ret;
+
+	__asm__ __volatile__("ldstub	[%1], %0"
+			     : "=r" (ret)
+			     : "r" (p)
+			     : "memory");
+
+	return (ret ? 1 : 0);
+}
+#endif
+
+#ifdef __arm__
+static inline int testandset (int *spinlock)
+{
+    register unsigned int ret;
+    __asm__ __volatile__("swp %0, %1, [%2]"
+                         : "=r"(ret)
+                         : "0"(1), "r"(spinlock));
+
+    return ret;
+}
+#endif
+
+#ifdef __mc68000
+static inline int testandset (int *p)
+{
+    char ret;
+    __asm__ __volatile__("tas %1; sne %0"
+                         : "=r" (ret)
+                         : "m" (p)
+                         : "cc","memory");
+    return ret;
+}
+#endif
+
+#ifdef __ia64
+#include <ia64intrin.h>
+
+static inline int testandset (int *p)
+{
+    return __sync_lock_test_and_set (p, 1);
+}
+#endif
+
+typedef int spinlock_t;
+
+#define SPIN_LOCK_UNLOCKED 0
+
+#if defined(CONFIG_USER_ONLY)
+static inline void spin_lock(spinlock_t *lock)
+{
+    while (testandset(lock));
+}
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+    *lock = 0;
+}
+
+static inline int spin_trylock(spinlock_t *lock)
+{
+    return !testandset(lock);
+}
+#else
+static inline void spin_lock(spinlock_t *lock)
+{
+}
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+}
+
+static inline int spin_trylock(spinlock_t *lock)
+{
+    return 1;
+}
+#endif
+
+#endif
--- qemu.orig/target-arm/cpu.h
+++ qemu/target-arm/cpu.h
@@ -36,10 +36,11 @@
 #define EXCP_DATA_ABORT      4
 #define EXCP_IRQ             5
 #define EXCP_FIQ             6
 #define EXCP_BKPT            7
 #define EXCP_EXCEPTION_EXIT  8   /* Return from v7M exception.  */
+#define EXCP_KERNEL_TRAP     9   /* Jumped to kernel code page.  */
 
 #define ARMV7M_EXCP_RESET   1
 #define ARMV7M_EXCP_NMI     2
 #define ARMV7M_EXCP_HARD    3
 #define ARMV7M_EXCP_MEM     4
@@ -220,10 +221,19 @@ int cpu_arm_signal_handler(int host_sign
                            void *puc);
 
 void cpu_lock(void);
 void cpu_unlock(void);
 
+void cpu_lock(void);
+void cpu_unlock(void);
+#if defined(USE_NPTL)
+static inline void cpu_set_tls(CPUARMState *env, void *newtls)
+{
+  env->cp15.c13_tls2 = (uint32_t)(long)newtls;
+}
+#endif
+
 #define CPSR_M (0x1f)
 #define CPSR_T (1 << 5)
 #define CPSR_F (1 << 6)
 #define CPSR_I (1 << 7)
 #define CPSR_A (1 << 8)
--- qemu.orig/target-arm/op.c
+++ qemu/target-arm/op.c
@@ -1007,10 +1007,16 @@ void OPPROTO op_exception_exit(void)
 {
     env->exception_index = EXCP_EXCEPTION_EXIT;
     cpu_loop_exit();
 }
 
+void OPPROTO op_kernel_trap(void)
+{
+    env->exception_index = EXCP_KERNEL_TRAP;
+    cpu_loop_exit();
+}
+
 /* VFP support.  We follow the convention used for VFP instrunctions:
    Single precition routines have a "s" suffix, double precision a
    "d" suffix.  */
 
 #define VFP_OP(name, p) void OPPROTO op_vfp_##name##p(void)
--- qemu.orig/target-arm/translate.c
+++ qemu/target-arm/translate.c
@@ -7518,11 +7518,18 @@ static inline int gen_intermediate_code_
             /* We always get here via a jump, so know we are not in a
                conditional execution block.  */
             gen_op_exception_exit();
         }
 #endif
-
+#ifdef CONFIG_USER_ONLY
+        /* Intercept jump to the magic kernel page.  */
+        if (dc->pc > 0xffff0000) {
+            gen_op_kernel_trap();
+            dc->is_jmp = DISAS_UPDATE;
+            break;
+        }
+#endif
         if (env->nb_breakpoints > 0) {
             for(j = 0; j < env->nb_breakpoints; j++) {
                 if (env->breakpoints[j] == dc->pc) {
                     gen_set_condexec(dc);
                     gen_op_movl_T0_im((long)dc->pc);