diff options
author | Marek Vasut <marek.vasut@gmail.com> | 2007-07-06 20:17:10 +0000 |
---|---|---|
committer | Michael Krelin <hacker@klever.net> | 2007-07-06 20:17:10 +0000 |
commit | e8149fc33657862b24ce407324229ff42109da23 (patch) | |
tree | ed1affa03ee2147983c5091811fb55f298751f21 /packages/qemu/qemu-0.9.0+cvs20070701 | |
parent | bc71d701903d36346bc69faf313a7a814fbe3824 (diff) |
qemu-native: amd64-enabled 0.9.0 cvs snapshot from bug #2589.
Diffstat (limited to 'packages/qemu/qemu-0.9.0+cvs20070701')
7 files changed, 1996 insertions, 0 deletions
diff --git a/packages/qemu/qemu-0.9.0+cvs20070701/.mtn2git_empty b/packages/qemu/qemu-0.9.0+cvs20070701/.mtn2git_empty new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/packages/qemu/qemu-0.9.0+cvs20070701/.mtn2git_empty diff --git a/packages/qemu/qemu-0.9.0+cvs20070701/arm_nptl-0.9.0.patch b/packages/qemu/qemu-0.9.0+cvs20070701/arm_nptl-0.9.0.patch new file mode 100644 index 0000000000..fe1f0945b6 --- /dev/null +++ b/packages/qemu/qemu-0.9.0+cvs20070701/arm_nptl-0.9.0.patch @@ -0,0 +1,853 @@ +diff -Naru qemu-snapshot-2007-06-24_05.orig/configure qemu-snapshot-2007-06-24_05/configure +--- qemu-snapshot-2007-06-24_05.orig/configure 2007-06-24 16:31:54.000000000 +0200 ++++ qemu-snapshot-2007-06-24_05/configure 2007-06-24 16:33:58.000000000 +0200 +@@ -101,6 +101,7 @@ + darwin_user="no" + build_docs="no" + uname_release="" ++nptl="yes" + + # OS specific + targetos=`uname -s` +@@ -287,6 +288,8 @@ + *) echo "undefined SPARC architecture. Exiting";exit 1;; + esac + ;; ++ --disable-nptl) nptl="no" ++ ;; + esac + done + +@@ -530,6 +533,23 @@ + } + EOF + ++# check NPTL support ++cat > $TMPC <<EOF ++#include <sched.h> ++void foo() ++{ ++#ifndef CLONE_SETTLS ++#error bork ++#endif ++} ++EOF ++ ++if $cc -c -o $TMPO $TMPC 2> /dev/null ; then ++ : ++else ++ nptl="no" ++fi ++ + ########################################## + # SDL probe + +@@ -681,6 +701,7 @@ + echo "Target Sparc Arch $sparc_cpu" + fi + echo "kqemu support $kqemu" ++echo "NPTL support $nptl" + echo "Documentation $build_docs" + [ ! -z "$uname_release" ] && \ + echo "uname -r $uname_release" +@@ -1063,6 +1084,14 @@ + echo "SDL_CFLAGS=`$sdl_config --cflags`" >> $config_mak + fi + fi ++else ++ if test "$nptl" = "yes" ; then ++ case "$target_cpu" in ++ arm | armeb) ++ echo "#define USE_NPTL 1" >> $config_h ++ ;; ++ esac ++ fi + fi + + if test "$cocoa" = "yes" ; then +diff -Naru qemu-snapshot-2007-06-24_05.orig/exec-all.h qemu-snapshot-2007-06-24_05/exec-all.h +--- qemu-snapshot-2007-06-24_05.orig/exec-all.h 2007-05-23 21:58:10.000000000 +0200 ++++ qemu-snapshot-2007-06-24_05/exec-all.h 2007-06-24 16:33:58.000000000 +0200 +@@ -360,170 +360,7 @@ + extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4]; + extern void *io_mem_opaque[IO_MEM_NB_ENTRIES]; + +-#if defined(__powerpc__) +-static inline int testandset (int *p) +-{ +- int ret; +- __asm__ __volatile__ ( +- "0: lwarx %0,0,%1\n" +- " xor. %0,%3,%0\n" +- " bne 1f\n" +- " stwcx. %2,0,%1\n" +- " bne- 0b\n" +- "1: " +- : "=&r" (ret) +- : "r" (p), "r" (1), "r" (0) +- : "cr0", "memory"); +- return ret; +-} +-#elif defined(__i386__) +-static inline int testandset (int *p) +-{ +- long int readval = 0; +- +- __asm__ __volatile__ ("lock; cmpxchgl %2, %0" +- : "+m" (*p), "+a" (readval) +- : "r" (1) +- : "cc"); +- return readval; +-} +-#elif defined(__x86_64__) +-static inline int testandset (int *p) +-{ +- long int readval = 0; +- +- __asm__ __volatile__ ("lock; cmpxchgl %2, %0" +- : "+m" (*p), "+a" (readval) +- : "r" (1) +- : "cc"); +- return readval; +-} +-#elif defined(__s390__) +-static inline int testandset (int *p) +-{ +- int ret; +- +- __asm__ __volatile__ ("0: cs %0,%1,0(%2)\n" +- " jl 0b" +- : "=&d" (ret) +- : "r" (1), "a" (p), "0" (*p) +- : "cc", "memory" ); +- return ret; +-} +-#elif defined(__alpha__) +-static inline int testandset (int *p) +-{ +- int ret; +- unsigned long one; +- +- __asm__ __volatile__ ("0: mov 1,%2\n" +- " ldl_l %0,%1\n" +- " stl_c %2,%1\n" +- " beq %2,1f\n" +- ".subsection 2\n" +- "1: br 0b\n" +- ".previous" +- : "=r" (ret), "=m" (*p), "=r" (one) +- : "m" (*p)); +- return ret; +-} +-#elif defined(__sparc__) +-static inline int testandset (int *p) +-{ +- int ret; +- +- __asm__ __volatile__("ldstub [%1], %0" +- : "=r" (ret) +- : "r" (p) +- : "memory"); +- +- return (ret ? 1 : 0); +-} +-#elif defined(__arm__) +-static inline int testandset (int *spinlock) +-{ +- register unsigned int ret; +- __asm__ __volatile__("swp %0, %1, [%2]" +- : "=r"(ret) +- : "0"(1), "r"(spinlock)); +- +- return ret; +-} +-#elif defined(__mc68000) +-static inline int testandset (int *p) +-{ +- char ret; +- __asm__ __volatile__("tas %1; sne %0" +- : "=r" (ret) +- : "m" (p) +- : "cc","memory"); +- return ret; +-} +-#elif defined(__ia64) +- +-#include <ia64intrin.h> +- +-static inline int testandset (int *p) +-{ +- return __sync_lock_test_and_set (p, 1); +-} +-#elif defined(__mips__) +-static inline int testandset (int *p) +-{ +- int ret; +- +- __asm__ __volatile__ ( +- " .set push \n" +- " .set noat \n" +- " .set mips2 \n" +- "1: li $1, 1 \n" +- " ll %0, %1 \n" +- " sc $1, %1 \n" +- " beqz $1, 1b \n" +- " .set pop " +- : "=r" (ret), "+R" (*p) +- : +- : "memory"); +- +- return ret; +-} +-#else +-#error unimplemented CPU support +-#endif +- +-typedef int spinlock_t; +- +-#define SPIN_LOCK_UNLOCKED 0 +- +-#if defined(CONFIG_USER_ONLY) +-static inline void spin_lock(spinlock_t *lock) +-{ +- while (testandset(lock)); +-} +- +-static inline void spin_unlock(spinlock_t *lock) +-{ +- *lock = 0; +-} +- +-static inline int spin_trylock(spinlock_t *lock) +-{ +- return !testandset(lock); +-} +-#else +-static inline void spin_lock(spinlock_t *lock) +-{ +-} +- +-static inline void spin_unlock(spinlock_t *lock) +-{ +-} +- +-static inline int spin_trylock(spinlock_t *lock) +-{ +- return 1; +-} +-#endif ++#include "qemu_spinlock.h" + + extern spinlock_t tb_lock; + +diff -Naru qemu-snapshot-2007-06-24_05.orig/linux-user/arm/syscall.h qemu-snapshot-2007-06-24_05/linux-user/arm/syscall.h +--- qemu-snapshot-2007-06-24_05.orig/linux-user/arm/syscall.h 2005-04-27 22:11:21.000000000 +0200 ++++ qemu-snapshot-2007-06-24_05/linux-user/arm/syscall.h 2007-06-24 16:33:58.000000000 +0200 +@@ -28,7 +28,9 @@ + #define ARM_SYSCALL_BASE 0x900000 + #define ARM_THUMB_SYSCALL 0 + +-#define ARM_NR_cacheflush (ARM_SYSCALL_BASE + 0xf0000 + 2) ++#define ARM_NR_BASE 0xf0000 ++#define ARM_NR_cacheflush (ARM_NR_BASE + 2) ++#define ARM_NR_set_tls (ARM_NR_BASE + 5) + + #define ARM_NR_semihosting 0x123456 + #define ARM_NR_thumb_semihosting 0xAB +diff -Naru qemu-snapshot-2007-06-24_05.orig/linux-user/main.c qemu-snapshot-2007-06-24_05/linux-user/main.c +--- qemu-snapshot-2007-06-24_05.orig/linux-user/main.c 2007-06-22 00:55:02.000000000 +0200 ++++ qemu-snapshot-2007-06-24_05/linux-user/main.c 2007-06-24 16:33:58.000000000 +0200 +@@ -325,6 +325,50 @@ + } + } + ++/* Handle a jump to the kernel code page. */ ++static int ++do_kernel_trap(CPUARMState *env) ++{ ++ uint32_t addr; ++ uint32_t *ptr; ++ uint32_t cpsr; ++ ++ switch (env->regs[15]) { ++ case 0xffff0fc0: /* __kernel_cmpxchg */ ++ /* XXX: This only works between threads, not between processes. ++ Use native atomic operations. */ ++ /* ??? This probably breaks horribly if the access segfaults. */ ++ cpu_lock(); ++ ptr = (uint32_t *)env->regs[2]; ++ cpsr = cpsr_read(env); ++ if (*ptr == env->regs[0]) { ++ *ptr = env->regs[1]; ++ env->regs[0] = 0; ++ cpsr |= CPSR_C; ++ } else { ++ env->regs[0] = -1; ++ cpsr &= ~CPSR_C; ++ } ++ cpsr_write(env, cpsr, CPSR_C); ++ cpu_unlock(); ++ break; ++ case 0xffff0fe0: /* __kernel_get_tls */ ++ env->regs[0] = env->cp15.c13_tls; ++ break; ++ default: ++ return 1; ++ } ++ /* Jump back to the caller. */ ++ addr = env->regs[14]; ++ if (addr & 1) { ++ env->thumb = 1; ++ addr &= ~1; ++ } ++ env->regs[15] = addr; ++ ++ return 0; ++} ++ + void cpu_loop(CPUARMState *env) + { + int trapnr; +@@ -381,10 +425,8 @@ + } + } + +- if (n == ARM_NR_cacheflush) { +- arm_cache_flush(env->regs[0], env->regs[1]); +- } else if (n == ARM_NR_semihosting +- || n == ARM_NR_thumb_semihosting) { ++ if (n == ARM_NR_semihosting ++ || n == ARM_NR_thumb_semihosting) { + env->regs[0] = do_arm_semihosting (env); + } else if (n == 0 || n >= ARM_SYSCALL_BASE + || (env->thumb && n == ARM_THUMB_SYSCALL)) { +@@ -395,14 +437,34 @@ + n -= ARM_SYSCALL_BASE; + env->eabi = 0; + } +- env->regs[0] = do_syscall(env, +- n, +- env->regs[0], +- env->regs[1], +- env->regs[2], +- env->regs[3], +- env->regs[4], +- env->regs[5]); ++ if ( n > ARM_NR_BASE) { ++ switch (n) ++ { ++ case ARM_NR_cacheflush: ++ arm_cache_flush(env->regs[0], env->regs[1]); ++ break; ++#ifdef USE_NPTL ++ case ARM_NR_set_tls: ++ cpu_set_tls(env, env->regs[0]); ++ env->regs[0] = 0; ++ break; ++#endif ++ default: ++ printf ("Error: Bad syscall: %x\n", n); ++ goto error; ++ } ++ } ++ else ++ { ++ env->regs[0] = do_syscall(env, ++ n, ++ env->regs[0], ++ env->regs[1], ++ env->regs[2], ++ env->regs[3], ++ env->regs[4], ++ env->regs[5]); ++ } + } else { + goto error; + } +@@ -441,6 +503,10 @@ + } + } + break; ++ case EXCP_KERNEL_TRAP: ++ if (do_kernel_trap(env)) ++ goto error; ++ break; + default: + error: + fprintf(stderr, "qemu: unhandled CPU exception 0x%x - aborting\n", +@@ -2074,6 +2140,10 @@ + ts->heap_base = info->brk; + /* This will be filled in on the first SYS_HEAPINFO call. */ + ts->heap_limit = 0; ++ /* Register the magic kernel code page. The cpu will generate a ++ special exception when it tries to execute code here. We can't ++ put real code here because it may be in use by the host kernel. */ ++ page_set_flags(0xffff0000, 0xffff0fff, 0); + #endif + + if (gdbstub_port) { +diff -Naru qemu-snapshot-2007-06-24_05.orig/linux-user/qemu.h qemu-snapshot-2007-06-24_05/linux-user/qemu.h +--- qemu-snapshot-2007-06-24_05.orig/linux-user/qemu.h 2007-05-26 17:09:38.000000000 +0200 ++++ qemu-snapshot-2007-06-24_05/linux-user/qemu.h 2007-06-24 16:33:58.000000000 +0200 +@@ -81,6 +81,9 @@ + uint32_t heap_limit; + #endif + int used; /* non zero if used */ ++#ifdef USE_NPTL ++ uint32_t *child_tidptr; ++#endif + struct image_info *info; + uint8_t stack[0]; + } __attribute__((aligned(16))) TaskState; +diff -Naru qemu-snapshot-2007-06-24_05.orig/linux-user/syscall.c qemu-snapshot-2007-06-24_05/linux-user/syscall.c +--- qemu-snapshot-2007-06-24_05.orig/linux-user/syscall.c 2007-06-21 23:57:11.000000000 +0200 ++++ qemu-snapshot-2007-06-24_05/linux-user/syscall.c 2007-06-24 16:33:58.000000000 +0200 +@@ -70,9 +70,18 @@ + #include <linux/kd.h> + + #include "qemu.h" ++#include "qemu_spinlock.h" + + //#define DEBUG + ++#ifdef USE_NPTL ++#define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \ ++ CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID) ++#else ++/* XXX: Hardcode the above values. */ ++#define CLONE_NPTL_FLAGS2 0 ++#endif ++ + #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_SPARC) \ + || defined(TARGET_M68K) || defined(TARGET_SH4) + /* 16 bit uid wrappers emulation */ +@@ -2121,20 +2130,38 @@ + thread/process */ + #define NEW_STACK_SIZE 8192 + ++#ifdef USE_NPTL ++static spinlock_t nptl_lock = SPIN_LOCK_UNLOCKED; ++#endif ++ + static int clone_func(void *arg) + { + CPUState *env = arg; ++#ifdef HAVE_NPTL ++ /* Wait until the parent has finshed initializing the tls state. */ ++ while (!spin_trylock(&nptl_lock)) ++ usleep(1); ++ spin_unlock(&nptl_lock); ++#endif + cpu_loop(env); + /* never exits */ + return 0; + } + +-int do_fork(CPUState *env, unsigned int flags, unsigned long newsp) ++int do_fork(CPUState *env, unsigned int flags, unsigned long newsp, ++ uint32_t *parent_tidptr, void *newtls, ++ uint32_t *child_tidptr) + { + int ret; + TaskState *ts; + uint8_t *new_stack; + CPUState *new_env; ++#ifdef USE_NPTL ++ unsigned int nptl_flags; ++ ++ if (flags & CLONE_PARENT_SETTID) ++ *parent_tidptr = gettid(); ++#endif + + if (flags & CLONE_VM) { + ts = malloc(sizeof(TaskState) + NEW_STACK_SIZE); +@@ -2201,16 +2228,60 @@ + #error unsupported target CPU + #endif + new_env->opaque = ts; ++#ifdef USE_NPTL ++ nptl_flags = flags; ++ flags &= ~CLONE_NPTL_FLAGS2; ++ if (nptl_flags & CLONE_CHILD_CLEARTID) { ++ ts->child_tidptr = child_tidptr; ++ } ++ if (nptl_flags & CLONE_SETTLS) ++ cpu_set_tls (new_env, newtls); ++ /* Grab the global cpu lock so that the thread setup appears ++ atomic. */ ++ if (nptl_flags & CLONE_CHILD_SETTID) ++ spin_lock(&nptl_lock); ++#else ++ if (flags & CLONE_NPTL_FLAGS2) ++ return -EINVAL; ++#endif ++ + #ifdef __ia64__ + ret = __clone2(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env); + #else + ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env); + #endif ++#ifdef USE_NPTL ++ if (ret != -1) { ++ if (nptl_flags & CLONE_CHILD_SETTID) ++ *child_tidptr = ret; ++ } ++ /* Allow the child to continue. */ ++ if (nptl_flags & CLONE_CHILD_SETTID) ++ spin_unlock(&nptl_lock); ++#endif + } else { +- /* if no CLONE_VM, we consider it is a fork */ +- if ((flags & ~CSIGNAL) != 0) +- return -EINVAL; +- ret = fork(); ++ /* if no CLONE_VM, we consider it is a fork */ ++ if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) ++ return -EINVAL; ++ ret = fork(); ++#ifdef USE_NPTL ++ /* There is a race condition here. The parent process could ++ theoretically read the TID in the child process before the child ++ tid is set. This would require using either ptrace ++ (not implemented) or having *_tidptr to point at a shared memory ++ mapping. We can't repeat the spinlock hack used above because ++ the child process gets its own copy of the lock. */ ++ if (ret == 0) { ++ /* Child Process. */ ++ if (flags & CLONE_CHILD_SETTID) ++ *child_tidptr = gettid(); ++ ts = (TaskState *)env->opaque; ++ if (flags & CLONE_CHILD_CLEARTID) ++ ts->child_tidptr = child_tidptr; ++ if (flags & CLONE_SETTLS) ++ cpu_set_tls (env, newtls); ++ } ++#endif + } + return ret; + } +@@ -2487,7 +2558,7 @@ + ret = do_brk(arg1); + break; + case TARGET_NR_fork: +- ret = get_errno(do_fork(cpu_env, SIGCHLD, 0)); ++ ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, NULL, NULL, NULL)); + break; + #ifdef TARGET_NR_waitpid + case TARGET_NR_waitpid: +@@ -3651,7 +3722,8 @@ + ret = get_errno(fsync(arg1)); + break; + case TARGET_NR_clone: +- ret = get_errno(do_fork(cpu_env, arg1, arg2)); ++ ret = get_errno(do_fork(cpu_env, arg1, arg2, (uint32_t *)arg3, ++ (void *)arg4, (uint32_t *)arg5)); + break; + #ifdef __NR_exit_group + /* new thread calls */ +@@ -4039,7 +4111,8 @@ + #endif + #ifdef TARGET_NR_vfork + case TARGET_NR_vfork: +- ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD, 0)); ++ ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD, 0, ++ NULL, NULL, NULL)); + break; + #endif + #ifdef TARGET_NR_ugetrlimit +@@ -4632,4 +4705,3 @@ + #endif + return ret; + } +- +diff -Naru qemu-snapshot-2007-06-24_05.orig/qemu_spinlock.h qemu-snapshot-2007-06-24_05/qemu_spinlock.h +--- qemu-snapshot-2007-06-24_05.orig/qemu_spinlock.h 1970-01-01 01:00:00.000000000 +0100 ++++ qemu-snapshot-2007-06-24_05/qemu_spinlock.h 2007-06-24 16:33:58.000000000 +0200 +@@ -0,0 +1,182 @@ ++/* ++ * internal execution defines for qemu ++ * ++ * Copyright (c) 2003 Fabrice Bellard ++ * ++ * This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2 of the License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public ++ * License along with this library; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ */ ++ ++#ifndef _QEMU_SPINLOCK_H ++#define _QEMU_SPINLOCK_H ++ ++#ifdef __powerpc__ ++static inline int testandset (int *p) ++{ ++ int ret; ++ __asm__ __volatile__ ( ++ "0: lwarx %0,0,%1\n" ++ " xor. %0,%3,%0\n" ++ " bne 1f\n" ++ " stwcx. %2,0,%1\n" ++ " bne- 0b\n" ++ "1: " ++ : "=&r" (ret) ++ : "r" (p), "r" (1), "r" (0) ++ : "cr0", "memory"); ++ return ret; ++} ++#endif ++ ++#ifdef __i386__ ++static inline int testandset (int *p) ++{ ++ long int readval = 0; ++ ++ __asm__ __volatile__ ("lock; cmpxchgl %2, %0" ++ : "+m" (*p), "+a" (readval) ++ : "r" (1) ++ : "cc"); ++ return readval; ++} ++#endif ++ ++#ifdef __x86_64__ ++static inline int testandset (int *p) ++{ ++ long int readval = 0; ++ ++ __asm__ __volatile__ ("lock; cmpxchgl %2, %0" ++ : "+m" (*p), "+a" (readval) ++ : "r" (1) ++ : "cc"); ++ return readval; ++} ++#endif ++ ++#ifdef __s390__ ++static inline int testandset (int *p) ++{ ++ int ret; ++ ++ __asm__ __volatile__ ("0: cs %0,%1,0(%2)\n" ++ " jl 0b" ++ : "=&d" (ret) ++ : "r" (1), "a" (p), "0" (*p) ++ : "cc", "memory" ); ++ return ret; ++} ++#endif ++ ++#ifdef __alpha__ ++static inline int testandset (int *p) ++{ ++ int ret; ++ unsigned long one; ++ ++ __asm__ __volatile__ ("0: mov 1,%2\n" ++ " ldl_l %0,%1\n" ++ " stl_c %2,%1\n" ++ " beq %2,1f\n" ++ ".subsection 2\n" ++ "1: br 0b\n" ++ ".previous" ++ : "=r" (ret), "=m" (*p), "=r" (one) ++ : "m" (*p)); ++ return ret; ++} ++#endif ++ ++#ifdef __sparc__ ++static inline int testandset (int *p) ++{ ++ int ret; ++ ++ __asm__ __volatile__("ldstub [%1], %0" ++ : "=r" (ret) ++ : "r" (p) ++ : "memory"); ++ ++ return (ret ? 1 : 0); ++} ++#endif ++ ++#ifdef __arm__ ++static inline int testandset (int *spinlock) ++{ ++ register unsigned int ret; ++ __asm__ __volatile__("swp %0, %1, [%2]" ++ : "=r"(ret) ++ : "0"(1), "r"(spinlock)); ++ ++ return ret; ++} ++#endif ++ ++#ifdef __mc68000 ++static inline int testandset (int *p) ++{ ++ char ret; ++ __asm__ __volatile__("tas %1; sne %0" ++ : "=r" (ret) ++ : "m" (p) ++ : "cc","memory"); ++ return ret; ++} ++#endif ++ ++#ifdef __ia64 ++#include <ia64intrin.h> ++ ++static inline int testandset (int *p) ++{ ++ return __sync_lock_test_and_set (p, 1); ++} ++#endif ++ ++typedef int spinlock_t; ++ ++#define SPIN_LOCK_UNLOCKED 0 ++ ++#if defined(CONFIG_USER_ONLY) ++static inline void spin_lock(spinlock_t *lock) ++{ ++ while (testandset(lock)); ++} ++ ++static inline void spin_unlock(spinlock_t *lock) ++{ ++ *lock = 0; ++} ++ ++static inline int spin_trylock(spinlock_t *lock) ++{ ++ return !testandset(lock); ++} ++#else ++static inline void spin_lock(spinlock_t *lock) ++{ ++} ++ ++static inline void spin_unlock(spinlock_t *lock) ++{ ++} ++ ++static inline int spin_trylock(spinlock_t *lock) ++{ ++ return 1; ++} ++#endif ++ ++#endif /* ! _QEMU_SPINLOCK_H */ +diff -Naru qemu-snapshot-2007-06-24_05.orig/target-arm/cpu.h qemu-snapshot-2007-06-24_05/target-arm/cpu.h +--- qemu-snapshot-2007-06-24_05.orig/target-arm/cpu.h 2007-06-03 23:02:37.000000000 +0200 ++++ qemu-snapshot-2007-06-24_05/target-arm/cpu.h 2007-06-24 16:33:58.000000000 +0200 +@@ -37,6 +37,9 @@ + #define EXCP_IRQ 5 + #define EXCP_FIQ 6 + #define EXCP_BKPT 7 ++#define EXCP_KERNEL_TRAP 8 /* Jumped to kernel code page. */ ++ ++ + + typedef void ARMWriteCPFunc(void *opaque, int cp_info, + int srcreg, int operand, uint32_t value); +@@ -97,6 +100,7 @@ + uint32_t c9_data; + uint32_t c13_fcse; /* FCSE PID. */ + uint32_t c13_context; /* Context ID. */ ++ uint32_t c13_tls; /* Paul Brook told me to just add this ;) */ + uint32_t c15_cpar; /* XScale Coprocessor Access Register */ + } cp15; + +@@ -169,6 +173,15 @@ + int cpu_arm_signal_handler(int host_signum, void *pinfo, + void *puc); + ++void cpu_lock(void); ++void cpu_unlock(void); ++#if defined(USE_NPTL) ++static inline void cpu_set_tls(CPUARMState *env, void *newtls) ++{ ++ env->cp15.c13_tls = (uint32_t)newtls; ++} ++#endif ++ + #define CPSR_M (0x1f) + #define CPSR_T (1 << 5) + #define CPSR_F (1 << 6) +@@ -180,7 +193,11 @@ + #define CPSR_J (1 << 24) + #define CPSR_IT_0_1 (3 << 25) + #define CPSR_Q (1 << 27) +-#define CPSR_NZCV (0xf << 28) ++#define CPSR_V (1 << 28) ++#define CPSR_C (1 << 29) ++#define CPSR_Z (1 << 30) ++#define CPSR_N (1 << 31) ++#define CPSR_NZCV (CPSR_N | CPSR_Z | CPSR_C | CPSR_V) + + #define CACHED_CPSR_BITS (CPSR_T | CPSR_Q | CPSR_NZCV) + /* Return the current CPSR value. */ +diff -Naru qemu-snapshot-2007-06-24_05.orig/target-arm/exec.h qemu-snapshot-2007-06-24_05/target-arm/exec.h +--- qemu-snapshot-2007-06-24_05.orig/target-arm/exec.h 2007-06-03 19:44:36.000000000 +0200 ++++ qemu-snapshot-2007-06-24_05/target-arm/exec.h 2007-06-24 16:33:58.000000000 +0200 +@@ -68,8 +68,6 @@ + + /* In op_helper.c */ + +-void cpu_lock(void); +-void cpu_unlock(void); + void helper_set_cp(CPUState *, uint32_t, uint32_t); + uint32_t helper_get_cp(CPUState *, uint32_t); + void helper_set_cp15(CPUState *, uint32_t, uint32_t); +diff -Naru qemu-snapshot-2007-06-24_05.orig/target-arm/op.c qemu-snapshot-2007-06-24_05/target-arm/op.c +--- qemu-snapshot-2007-06-24_05.orig/target-arm/op.c 2007-05-21 19:48:01.000000000 +0200 ++++ qemu-snapshot-2007-06-24_05/target-arm/op.c 2007-06-24 16:33:58.000000000 +0200 +@@ -891,6 +891,12 @@ + cpu_loop_exit(); + } + ++void OPPROTO op_kernel_trap(void) ++{ ++ env->exception_index = EXCP_KERNEL_TRAP; ++ cpu_loop_exit(); ++} ++ + /* VFP support. We follow the convention used for VFP instrunctions: + Single precition routines have a "s" suffix, double precision a + "d" suffix. */ +diff -Naru qemu-snapshot-2007-06-24_05.orig/target-arm/translate.c qemu-snapshot-2007-06-24_05/target-arm/translate.c +--- qemu-snapshot-2007-06-24_05.orig/target-arm/translate.c 2007-06-11 20:59:35.000000000 +0200 ++++ qemu-snapshot-2007-06-24_05/target-arm/translate.c 2007-06-24 16:33:58.000000000 +0200 +@@ -3513,6 +3513,7 @@ + s->is_jmp = DISAS_JUMP; + } + ++ + /* generate intermediate code in gen_opc_buf and gen_opparam_buf for + basic block 'tb'. If search_pc is TRUE, also generate PC + information for each intermediate instruction. */ +@@ -3548,6 +3549,15 @@ + nb_gen_labels = 0; + lj = -1; + do { ++#ifdef CONFIG_USER_ONLY ++ /* Intercept jump to the magic kernel page. */ ++ if (dc->pc > 0xffff0000) { ++ gen_op_kernel_trap(); ++ dc->is_jmp = DISAS_UPDATE; ++ break; ++ } ++#endif ++ + if (env->nb_breakpoints > 0) { + for(j = 0; j < env->nb_breakpoints; j++) { + if (env->breakpoints[j] == dc->pc) { diff --git a/packages/qemu/qemu-0.9.0+cvs20070701/configure-0.9.0.patch b/packages/qemu/qemu-0.9.0+cvs20070701/configure-0.9.0.patch new file mode 100644 index 0000000000..d92f6a8264 --- /dev/null +++ b/packages/qemu/qemu-0.9.0+cvs20070701/configure-0.9.0.patch @@ -0,0 +1,12 @@ +diff -Naru qemu-snapshot-2007-06-24_05.orig/configure qemu-snapshot-2007-06-24_05/configure +--- qemu-snapshot-2007-06-24_05.orig/configure 2007-06-23 18:03:35.000000000 +0200 ++++ qemu-snapshot-2007-06-24_05/configure 2007-06-24 16:30:32.000000000 +0200 +@@ -711,7 +711,7 @@ + echo "CC=$cc" >> $config_mak + echo "HOST_CC=$host_cc" >> $config_mak + echo "AR=$ar" >> $config_mak +-echo "STRIP=$strip -s -R .comment -R .note" >> $config_mak ++echo "STRIP=$strip" >> $config_mak + echo "OS_CFLAGS=$OS_CFLAGS" >> $config_mak + echo "OS_LDFLAGS=$OS_LDFLAGS" >> $config_mak + echo "ARCH_CFLAGS=$ARCH_CFLAGS" >> $config_mak diff --git a/packages/qemu/qemu-0.9.0+cvs20070701/pl110_rgb-r0-0.9.0.patch b/packages/qemu/qemu-0.9.0+cvs20070701/pl110_rgb-r0-0.9.0.patch new file mode 100644 index 0000000000..3ad4115e73 --- /dev/null +++ b/packages/qemu/qemu-0.9.0+cvs20070701/pl110_rgb-r0-0.9.0.patch @@ -0,0 +1,217 @@ +diff -Naru qemu-neo1973.orig/hw/pl110.c qemu-neo1973/hw/pl110.c +--- qemu-neo1973.orig/hw/pl110.c 2007-06-24 13:56:37.000000000 +0200 ++++ qemu-neo1973/hw/pl110.c 2007-06-24 14:12:09.000000000 +0200 +@@ -10,6 +10,7 @@ + #include "vl.h" + + #define PL110_CR_EN 0x001 ++#define PL110_CR_BGR 0x100 + #define PL110_CR_BEBO 0x200 + #define PL110_CR_BEPO 0x400 + #define PL110_CR_PWR 0x800 +@@ -114,6 +115,7 @@ + int first, last = 0; + int dirty, new_dirty; + int i; ++ int bpp_offset; + + if (!pl110_enabled(s)) + return; +@@ -145,12 +147,17 @@ + fprintf(stderr, "pl110: Bad color depth\n"); + exit(1); + } ++ if (s->cr & PL110_CR_BGR) ++ bpp_offset = 0; ++ else ++ bpp_offset = 18; ++ + if (s->cr & PL110_CR_BEBO) +- fn = fntable[s->bpp + 6]; ++ fn = fntable[s->bpp + 6 + bpp_offset]; + else if (s->cr & PL110_CR_BEPO) +- fn = fntable[s->bpp + 12]; ++ fn = fntable[s->bpp + 12 + bpp_offset]; + else +- fn = fntable[s->bpp]; ++ fn = fntable[s->bpp + bpp_offset]; + + src_width = s->cols; + switch (s->bpp) { +diff -Naru qemu-neo1973.orig/hw/pl110_template.h qemu-neo1973/hw/pl110_template.h +--- qemu-neo1973.orig/hw/pl110_template.h 2007-06-24 13:56:37.000000000 +0200 ++++ qemu-neo1973/hw/pl110_template.h 2007-06-24 14:12:09.000000000 +0200 +@@ -24,6 +24,16 @@ + #error unknown bit depth + #endif + ++#undef RGB ++#define BORDER bgr ++#define ORDER 0 ++#include "pl110_template.h" ++#define ORDER 1 ++#include "pl110_template.h" ++#define ORDER 2 ++#include "pl110_template.h" ++#define RGB ++#define BORDER rgb + #define ORDER 0 + #include "pl110_template.h" + #define ORDER 1 +@@ -33,26 +43,47 @@ + + static drawfn glue(pl110_draw_fn_,BITS)[18] = + { +- glue(pl110_draw_line1_lblp,BITS), +- glue(pl110_draw_line2_lblp,BITS), +- glue(pl110_draw_line4_lblp,BITS), +- glue(pl110_draw_line8_lblp,BITS), +- glue(pl110_draw_line16_lblp,BITS), +- glue(pl110_draw_line32_lblp,BITS), +- +- glue(pl110_draw_line1_bbbp,BITS), +- glue(pl110_draw_line2_bbbp,BITS), +- glue(pl110_draw_line4_bbbp,BITS), +- glue(pl110_draw_line8_bbbp,BITS), +- glue(pl110_draw_line16_bbbp,BITS), +- glue(pl110_draw_line32_bbbp,BITS), +- +- glue(pl110_draw_line1_lbbp,BITS), +- glue(pl110_draw_line2_lbbp,BITS), +- glue(pl110_draw_line4_lbbp,BITS), +- glue(pl110_draw_line8_lbbp,BITS), +- glue(pl110_draw_line16_lbbp,BITS), +- glue(pl110_draw_line32_lbbp,BITS) ++ glue(pl110_draw_line1_lblp_bgr,BITS), ++ glue(pl110_draw_line2_lblp_bgr,BITS), ++ glue(pl110_draw_line4_lblp_bgr,BITS), ++ glue(pl110_draw_line8_lblp_bgr,BITS), ++ glue(pl110_draw_line16_lblp_bgr,BITS), ++ glue(pl110_draw_line32_lblp_bgr,BITS), ++ ++ glue(pl110_draw_line1_bbbp_bgr,BITS), ++ glue(pl110_draw_line2_bbbp_bgr,BITS), ++ glue(pl110_draw_line4_bbbp_bgr,BITS), ++ glue(pl110_draw_line8_bbbp_bgr,BITS), ++ glue(pl110_draw_line16_bbbp_bgr,BITS), ++ glue(pl110_draw_line32_bbbp_bgr,BITS), ++ ++ glue(pl110_draw_line1_lbbp_bgr,BITS), ++ glue(pl110_draw_line2_lbbp_bgr,BITS), ++ glue(pl110_draw_line4_lbbp_bgr,BITS), ++ glue(pl110_draw_line8_lbbp_bgr,BITS), ++ glue(pl110_draw_line16_lbbp_bgr,BITS), ++ glue(pl110_draw_line32_lbbp_bgr,BITS), ++ ++ glue(pl110_draw_line1_lblp_rgb,BITS), ++ glue(pl110_draw_line2_lblp_rgb,BITS), ++ glue(pl110_draw_line4_lblp_rgb,BITS), ++ glue(pl110_draw_line8_lblp_rgb,BITS), ++ glue(pl110_draw_line16_lblp_rgb,BITS), ++ glue(pl110_draw_line32_lblp_rgb,BITS), ++ ++ glue(pl110_draw_line1_bbbp_rgb,BITS), ++ glue(pl110_draw_line2_bbbp_rgb,BITS), ++ glue(pl110_draw_line4_bbbp_rgb,BITS), ++ glue(pl110_draw_line8_bbbp_rgb,BITS), ++ glue(pl110_draw_line16_bbbp_rgb,BITS), ++ glue(pl110_draw_line32_bbbp_rgb,BITS), ++ ++ glue(pl110_draw_line1_lbbp_rgb,BITS), ++ glue(pl110_draw_line2_lbbp_rgb,BITS), ++ glue(pl110_draw_line4_lbbp_rgb,BITS), ++ glue(pl110_draw_line8_lbbp_rgb,BITS), ++ glue(pl110_draw_line16_lbbp_rgb,BITS), ++ glue(pl110_draw_line32_lbbp_rgb,BITS), + }; + + #undef BITS +@@ -61,18 +92,18 @@ + #else + + #if ORDER == 0 +-#define NAME glue(lblp, BITS) ++#define NAME glue(glue(lblp_, BORDER), BITS) + #ifdef WORDS_BIGENDIAN + #define SWAP_WORDS 1 + #endif + #elif ORDER == 1 +-#define NAME glue(bbbp, BITS) ++#define NAME glue(glue(bbbp_, BORDER), BITS) + #ifndef WORDS_BIGENDIAN + #define SWAP_WORDS 1 + #endif + #else + #define SWAP_PIXELS 1 +-#define NAME glue(lbbp, BITS) ++#define NAME glue(glue(lbbp_, BORDER), BITS) + #ifdef WORDS_BIGENDIAN + #define SWAP_WORDS 1 + #endif +@@ -195,27 +226,34 @@ + #ifdef SWAP_WORDS + data = bswap32(data); + #endif ++#ifdef RGB ++#define LSB r ++#define MSB b ++#else ++#define LSB b ++#define MSB r ++#endif + #if 0 +- r = data & 0x1f; ++ LSB = data & 0x1f; + data >>= 5; + g = data & 0x3f; + data >>= 6; +- b = data & 0x1f; ++ MSB = data & 0x1f; + data >>= 5; + #else +- r = (data & 0x1f) << 3; ++ LSB = (data & 0x1f) << 3; + data >>= 5; + g = (data & 0x3f) << 2; + data >>= 6; +- b = (data & 0x1f) << 3; ++ MSB = (data & 0x1f) << 3; + data >>= 5; + #endif + COPY_PIXEL(d, glue(rgb_to_pixel,BITS)(r, g, b)); +- r = (data & 0x1f) << 3; ++ LSB = (data & 0x1f) << 3; + data >>= 5; + g = (data & 0x3f) << 2; + data >>= 6; +- b = (data & 0x1f) << 3; ++ MSB = (data & 0x1f) << 3; + data >>= 5; + COPY_PIXEL(d, glue(rgb_to_pixel,BITS)(r, g, b)); + width -= 2; +@@ -229,14 +267,21 @@ + unsigned int r, g, b; + while (width > 0) { + data = *(uint32_t *)src; ++#ifdef RGB ++#define LSB r ++#define MSB b ++#else ++#define LSB b ++#define MSB r ++#endif + #ifdef SWAP_WORDS +- r = data & 0xff; ++ LSB = data & 0xff; + g = (data >> 8) & 0xff; +- b = (data >> 16) & 0xff; ++ MSB = (data >> 16) & 0xff; + #else +- r = (data >> 24) & 0xff; ++ LSB = (data >> 24) & 0xff; + g = (data >> 16) & 0xff; +- b = (data >> 8) & 0xff; ++ MSB = (data >> 8) & 0xff; + #endif + COPY_PIXEL(d, glue(rgb_to_pixel,BITS)(r, g, b)); + width--; diff --git a/packages/qemu/qemu-0.9.0+cvs20070701/qemu-0.9.0-gcc4.patch b/packages/qemu/qemu-0.9.0+cvs20070701/qemu-0.9.0-gcc4.patch new file mode 100644 index 0000000000..189cd09b67 --- /dev/null +++ b/packages/qemu/qemu-0.9.0+cvs20070701/qemu-0.9.0-gcc4.patch @@ -0,0 +1,881 @@ +diff -Naru qemu-neo1973.orig/dyngen.c qemu-neo1973/dyngen.c +--- qemu-neo1973.orig/dyngen.c 2007-06-24 13:56:38.000000000 +0200 ++++ qemu-neo1973/dyngen.c 2007-06-24 14:33:11.000000000 +0200 +@@ -32,6 +32,8 @@ + + #include "config-host.h" + ++//#define DEBUG_OP ++ + /* NOTE: we test CONFIG_WIN32 instead of _WIN32 to enabled cross + compilation */ + #if defined(CONFIG_WIN32) +@@ -1429,6 +1431,644 @@ + #endif + + ++#if defined(HOST_I386) || defined(HOST_X86_64) ++ ++/* This byte is the first byte of an instruction. */ ++#define FLAG_INSN (1 << 0) ++/* This byte has been processed as part of an instruction. */ ++#define FLAG_SCANNED (1 << 1) ++/* This instruction is a return instruction. Gcc cometimes generates prefix ++ bytes, so may be more than one byte long. */ ++#define FLAG_RET (1 << 2) ++/* This is either the target of a jump, or the preceeding instruction uses ++ a pc-relative offset. */ ++#define FLAG_TARGET (1 << 3) ++/* This is a magic instruction that needs fixing up. */ ++#define FLAG_EXIT (1 << 4) ++#define MAX_EXITS 5 ++ ++static void ++bad_opcode(const char *name, uint32_t op) ++{ ++ error("Unsupported opcode %0*x in %s", (op > 0xff) ? 4 : 2, op, name); ++} ++ ++/* Mark len bytes as scanned, Returns insn_size + len. Reports an error ++ if these bytes have already been scanned. */ ++static int ++eat_bytes(const char *name, char *flags, int insn, int insn_size, int len) ++{ ++ while (len > 0) { ++ /* This should never occur in sane code. */ ++ if (flags[insn + insn_size] & FLAG_SCANNED) ++ error ("Overlapping instructions in %s", name); ++ flags[insn + insn_size] |= FLAG_SCANNED; ++ insn_size++; ++ len--; ++ } ++ return insn_size; ++} ++ ++static void ++trace_i386_insn (const char *name, uint8_t *start_p, char *flags, int insn, ++ int len) ++{ ++ uint8_t *ptr; ++ uint8_t op; ++ int modrm; ++ int is_prefix; ++ int op_size; ++ int addr_size; ++ int insn_size; ++ int is_ret; ++ int is_condjmp; ++ int is_jmp; ++ int is_exit; ++ int is_pcrel; ++ int immed; ++ int seen_rexw; ++ int32_t disp; ++ ++ ptr = start_p + insn; ++ /* nonzero if this insn has a ModR/M byte. */ ++ modrm = 1; ++ /* The size of the immediate value in this instruction. */ ++ immed = 0; ++ /* The operand size. */ ++ op_size = 4; ++ /* The address size */ ++ addr_size = 4; ++ /* The total length of this instruction. */ ++ insn_size = 0; ++ is_prefix = 1; ++ is_ret = 0; ++ is_condjmp = 0; ++ is_jmp = 0; ++ is_exit = 0; ++ seen_rexw = 0; ++ is_pcrel = 0; ++ ++ while (is_prefix) { ++ op = ptr[insn_size]; ++ insn_size = eat_bytes(name, flags, insn, insn_size, 1); ++ is_prefix = 0; ++ switch (op >> 4) { ++ case 0: ++ case 1: ++ case 2: ++ case 3: ++ if (op == 0x0f) { ++ /* two-byte opcode. */ ++ op = ptr[insn_size]; ++ insn_size = eat_bytes(name, flags, insn, insn_size, 1); ++ switch (op >> 4) { ++ case 0: ++ if ((op & 0xf) > 3) ++ modrm = 0; ++ break; ++ case 1: /* vector move or prefetch */ ++ case 2: /* various moves and vector compares. */ ++ case 4: /* cmov */ ++ case 5: /* vector instructions */ ++ case 6: ++ case 13: ++ case 14: ++ case 15: ++ break; ++ case 7: /* mmx */ ++ if (op & 0x77) /* emms */ ++ modrm = 0; ++ break; ++ case 3: /* wrmsr, rdtsc, rdmsr, rdpmc, sysenter, sysexit */ ++ modrm = 0; ++ break; ++ case 8: /* long conditional jump */ ++ is_condjmp = 1; ++ immed = op_size; ++ modrm = 0; ++ break; ++ case 9: /* setcc */ ++ break; ++ case 10: ++ switch (op & 0x7) { ++ case 0: /* push fs/gs */ ++ case 1: /* pop fs/gs */ ++ case 2: /* cpuid/rsm */ ++ modrm = 0; ++ break; ++ case 4: /* shld/shrd immediate */ ++ immed = 1; ++ break; ++ default: /* Normal instructions with a ModR/M byte. */ ++ break; ++ } ++ break; ++ case 11: ++ switch (op & 0xf) { ++ case 10: /* bt, bts, btr, btc */ ++ immed = 1; ++ break; ++ default: ++ /* cmpxchg, lss, btr, lfs, lgs, movzx, btc, bsf, bsr ++ undefined, and movsx */ ++ break; ++ } ++ break; ++ case 12: ++ if (op & 8) { ++ /* bswap */ ++ modrm = 0; ++ } else { ++ switch (op & 0x7) { ++ case 2: ++ case 4: ++ case 5: ++ case 6: ++ immed = 1; ++ break; ++ default: ++ break; ++ } ++ } ++ break; ++ } ++ } else if ((op & 0x07) <= 0x3) { ++ /* General arithmentic ax. */ ++ } else if ((op & 0x07) <= 0x5) { ++ /* General arithmetic ax, immediate. */ ++ if (op & 0x01) ++ immed = op_size; ++ else ++ immed = 1; ++ modrm = 0; ++ } else if ((op & 0x23) == 0x22) { ++ /* Segment prefix. */ ++ is_prefix = 1; ++ } else { ++ /* Segment register push/pop or DAA/AAA/DAS/AAS. */ ++ modrm = 0; ++ } ++ break; ++ ++#if defined(HOST_X86_64) ++ case 4: /* rex prefix. */ ++ is_prefix = 1; ++ /* The address/operand size is actually 64-bit, but the immediate ++ values in the instruction are still 32-bit. */ ++ op_size = 4; ++ addr_size = 4; ++ if (op & 8) ++ seen_rexw = 1; ++ break; ++#else ++ case 4: /* inc/dec register. */ ++#endif ++ case 5: /* push/pop general register. */ ++ modrm = 0; ++ break; ++ ++ case 6: ++ switch (op & 0x0f) { ++ case 0: /* pusha */ ++ case 1: /* popa */ ++ modrm = 0; ++ break; ++ case 2: /* bound */ ++ case 3: /* arpl */ ++ break; ++ case 4: /* FS */ ++ case 5: /* GS */ ++ is_prefix = 1; ++ break; ++ case 6: /* opcode size prefix. */ ++ op_size = 2; ++ is_prefix = 1; ++ break; ++ case 7: /* Address size prefix. */ ++ addr_size = 2; ++ is_prefix = 1; ++ break; ++ case 8: /* push immediate */ ++ immed = op_size; ++ modrm = 0; ++ break; ++ case 10: /* push 8-bit immediate */ ++ immed = 1; ++ modrm = 0; ++ break; ++ case 9: /* imul immediate */ ++ immed = op_size; ++ break; ++ case 11: /* imul 8-bit immediate */ ++ immed = 1; ++ break; ++ case 12: /* insb */ ++ case 13: /* insw */ ++ case 14: /* outsb */ ++ case 15: /* outsw */ ++ modrm = 0; ++ break; ++ } ++ break; ++ ++ case 7: /* Short conditional jump. */ ++ is_condjmp = 1; ++ immed = 1; ++ modrm = 0; ++ break; ++ ++ case 8: ++ if ((op & 0xf) <= 3) { ++ /* arithmetic immediate. */ ++ if ((op & 3) == 1) ++ immed = op_size; ++ else ++ immed = 1; ++ } ++ /* else test, xchg, mov, lea or pop general. */ ++ break; ++ ++ case 9: ++ /* Various single-byte opcodes with no modrm byte. */ ++ modrm = 0; ++ if (op == 10) { ++ /* Call */ ++ immed = 4; ++ } ++ break; ++ ++ case 10: ++ switch ((op & 0xe) >> 1) { ++ case 0: /* mov absoliute immediate. */ ++ case 1: ++ if (seen_rexw) ++ immed = 8; ++ else ++ immed = addr_size; ++ break; ++ case 4: /* test immediate. */ ++ if (op & 1) ++ immed = op_size; ++ else ++ immed = 1; ++ break; ++ default: /* Various string ops. */ ++ break; ++ } ++ modrm = 0; ++ break; ++ ++ case 11: /* move immediate to register */ ++ if (op & 8) { ++ if (seen_rexw) ++ immed = 8; ++ else ++ immed = op_size; ++ } else { ++ immed = 1; ++ } ++ modrm = 0; ++ break; ++ ++ case 12: ++ switch (op & 0xf) { ++ case 0: /* shift immediate */ ++ case 1: ++ immed = 1; ++ break; ++ case 2: /* ret immediate */ ++ immed = 2; ++ modrm = 0; ++ bad_opcode(name, op); ++ break; ++ case 3: /* ret */ ++ modrm = 0; ++ is_ret = 1; ++ case 4: /* les */ ++ case 5: /* lds */ ++ break; ++ case 6: /* mov immediate byte */ ++ immed = 1; ++ break; ++ case 7: /* mov immediate */ ++ immed = op_size; ++ break; ++ case 8: /* enter */ ++ /* TODO: Is this right? */ ++ immed = 3; ++ modrm = 0; ++ break; ++ case 10: /* retf immediate */ ++ immed = 2; ++ modrm = 0; ++ bad_opcode(name, op); ++ break; ++ case 13: /* int */ ++ immed = 1; ++ modrm = 0; ++ break; ++ case 11: /* retf */ ++ case 15: /* iret */ ++ modrm = 0; ++ bad_opcode(name, op); ++ break; ++ default: /* leave, int3 or into */ ++ modrm = 0; ++ break; ++ } ++ break; ++ ++ case 13: ++ if ((op & 0xf) >= 8) { ++ /* Coprocessor escape. For our purposes this is just a normal ++ instruction with a ModR/M byte. */ ++ } else if ((op & 0xf) >= 4) { ++ /* AAM, AAD or XLAT */ ++ modrm = 0; ++ } ++ /* else shift instruction */ ++ break; ++ ++ case 14: ++ switch ((op & 0xc) >> 2) { ++ case 0: /* loop or jcxz */ ++ is_condjmp = 1; ++ immed = 1; ++ break; ++ case 1: /* in/out immed */ ++ immed = 1; ++ break; ++ case 2: /* call or jmp */ ++ switch (op & 3) { ++ case 0: /* call */ ++ immed = op_size; ++ break; ++ case 1: /* long jump */ ++ immed = 4; ++ is_jmp = 1; ++ break; ++ case 2: /* far jmp */ ++ bad_opcode(name, op); ++ break; ++ case 3: /* short jmp */ ++ immed = 1; ++ is_jmp = 1; ++ break; ++ } ++ break; ++ case 3: /* in/out register */ ++ break; ++ } ++ modrm = 0; ++ break; ++ ++ case 15: ++ switch ((op & 0xe) >> 1) { ++ case 0: ++ case 1: ++ is_prefix = 1; ++ break; ++ case 2: ++ case 4: ++ case 5: ++ case 6: ++ modrm = 0; ++ /* Some privileged insns are used as markers. */ ++ switch (op) { ++ case 0xf4: /* hlt: Exit translation block. */ ++ is_exit = 1; ++ break; ++ case 0xfa: /* cli: Jump to label. */ ++ is_exit = 1; ++ immed = 4; ++ break; ++ case 0xfb: /* sti: TB patch jump. */ ++ /* Mark the insn for patching, but continue sscanning. */ ++ flags[insn] |= FLAG_EXIT; ++ immed = 4; ++ break; ++ } ++ break; ++ case 3: /* unary grp3 */ ++ if ((ptr[insn_size] & 0x38) == 0) { ++ if (op == 0xf7) ++ immed = op_size; ++ else ++ immed = 1; /* test immediate */ ++ } ++ break; ++ case 7: /* inc/dec grp4/5 */ ++ /* TODO: This includes indirect jumps. We should fail if we ++ encounter one of these. */ ++ break; ++ } ++ break; ++ } ++ } ++ ++ if (modrm) { ++ if (addr_size != 4) ++ error("16-bit addressing mode used in %s", name); ++ ++ disp = 0; ++ modrm = ptr[insn_size]; ++ insn_size = eat_bytes(name, flags, insn, insn_size, 1); ++ modrm &= 0xc7; ++ switch ((modrm & 0xc0) >> 6) { ++ case 0: ++ if (modrm == 5) ++ disp = 4; ++ break; ++ case 1: ++ disp = 1; ++ break; ++ case 2: ++ disp = 4; ++ break; ++ } ++ if ((modrm & 0xc0) != 0xc0 && (modrm & 0x7) == 4) { ++ /* SIB byte */ ++ if (modrm == 4 && (ptr[insn_size] & 0x7) == 5) { ++ disp = 4; ++ is_pcrel = 1; ++ } ++ insn_size = eat_bytes(name, flags, insn, insn_size, 1); ++ } ++ insn_size = eat_bytes(name, flags, insn, insn_size, disp); ++ } ++ insn_size = eat_bytes(name, flags, insn, insn_size, immed); ++ if (is_condjmp || is_jmp) { ++ if (immed == 1) { ++ disp = (int8_t)*(ptr + insn_size - 1); ++ } else { ++ disp = (((int32_t)*(ptr + insn_size - 1)) << 24) ++ | (((int32_t)*(ptr + insn_size - 2)) << 16) ++ | (((int32_t)*(ptr + insn_size - 3)) << 8) ++ | *(ptr + insn_size - 4); ++ } ++ disp += insn_size; ++ /* Jumps to external symbols point to the address of the offset ++ before relocation. */ ++ /* ??? These are probably a tailcall. We could fix them up by ++ replacing them with jmp to EOB + call, but it's easier to just ++ prevent the compiler generating them. */ ++ if (disp == 1) ++ error("Unconditional jump (sibcall?) in %s", name); ++ disp += insn; ++ if (disp < 0 || disp > len) ++ error("Jump outside instruction in %s", name); ++ ++ if ((flags[disp] & (FLAG_INSN | FLAG_SCANNED)) == FLAG_SCANNED) ++ error("Overlapping instructions in %s", name); ++ ++ flags[disp] |= (FLAG_INSN | FLAG_TARGET); ++ is_pcrel = 1; ++ } ++ if (is_pcrel) { ++ /* Mark the following insn as a jump target. This will stop ++ this instruction being moved. */ ++ flags[insn + insn_size] |= FLAG_TARGET; ++ } ++ if (is_ret) ++ flags[insn] |= FLAG_RET; ++ ++ if (is_exit) ++ flags[insn] |= FLAG_EXIT; ++ ++ if (!(is_jmp || is_ret || is_exit)) ++ flags[insn + insn_size] |= FLAG_INSN; ++} ++ ++/* Scan a function body. Returns the position of the return sequence. ++ Sets *patch_bytes to the number of bytes that need to be copied from that ++ location. If no patching is required (ie. the return is the last insn) ++ *patch_bytes will be set to -1. *plen is the number of code bytes to copy. ++ */ ++static int trace_i386_op(const char * name, uint8_t *start_p, int *plen, ++ int *patch_bytes, int *exit_addrs) ++{ ++ char *flags; ++ int more; ++ int insn; ++ int retpos; ++ int bytes; ++ int num_exits; ++ int len; ++ int last_insn; ++ ++ len = *plen; ++ flags = malloc(len + 1); ++ memset(flags, 0, len + 1); ++ flags[0] |= FLAG_INSN; ++ more = 1; ++ while (more) { ++ more = 0; ++ for (insn = 0; insn < len; insn++) { ++ if ((flags[insn] & (FLAG_INSN | FLAG_SCANNED)) == FLAG_INSN) { ++ trace_i386_insn(name, start_p, flags, insn, len); ++ more = 1; ++ } ++ } ++ } ++ ++ /* Strip any unused code at the end of the function. */ ++ while (len > 0 && flags[len - 1] == 0) ++ len--; ++ ++ retpos = -1; ++ num_exits = 0; ++ last_insn = 0; ++ for (insn = 0; insn < len; insn++) { ++ if (flags[insn] & FLAG_RET) { ++ /* ??? In theory it should be possible to handle multiple return ++ points. In practice it's not worth the effort. */ ++ if (retpos != -1) ++ error("Multiple return instructions in %s", name); ++ retpos = insn; ++ } ++ if (flags[insn] & FLAG_EXIT) { ++ if (num_exits == MAX_EXITS) ++ error("Too many block exits in %s", name); ++ exit_addrs[num_exits] = insn; ++ num_exits++; ++ } ++ if (flags[insn] & FLAG_INSN) ++ last_insn = insn; ++ } ++ ++ exit_addrs[num_exits] = -1; ++ if (retpos == -1) { ++ if (num_exits == 0) { ++ error ("No return instruction found in %s", name); ++ } else { ++ retpos = len; ++ last_insn = len; ++ } ++ } ++ ++ /* If the return instruction is the last instruction we can just ++ remove it. */ ++ if (retpos == last_insn) ++ *patch_bytes = -1; ++ else ++ *patch_bytes = 0; ++ ++ /* Back up over any nop instructions. */ ++ while (retpos > 0 ++ && (flags[retpos] & FLAG_TARGET) == 0 ++ && (flags[retpos - 1] & FLAG_INSN) != 0 ++ && start_p[retpos - 1] == 0x90) { ++ retpos--; ++ } ++ ++ if (*patch_bytes == -1) { ++ *plen = retpos; ++ free (flags); ++ return retpos; ++ } ++ *plen = len; ++ ++ /* The ret is in the middle of the function. Find four more bytes that ++ so the ret can be replaced by a jmp. */ ++ /* ??? Use a short jump where possible. */ ++ bytes = 4; ++ insn = retpos + 1; ++ /* We can clobber everything up to the next jump target. */ ++ while (insn < len && bytes > 0 && (flags[insn] & FLAG_TARGET) == 0) { ++ insn++; ++ bytes--; ++ } ++ if (bytes > 0) { ++ /* ???: Strip out nop blocks. */ ++ /* We can't do the replacement without clobbering anything important. ++ Copy preceeding instructions(s) to give us some space. */ ++ while (retpos > 0) { ++ /* If this byte is the target of a jmp we can't move it. */ ++ if (flags[retpos] & FLAG_TARGET) ++ break; ++ ++ (*patch_bytes)++; ++ bytes--; ++ retpos--; ++ ++ /* Break out of the loop if we have enough space and this is either ++ the first byte of an instruction or a pad byte. */ ++ if ((flags[retpos] & (FLAG_INSN | FLAG_SCANNED)) != FLAG_SCANNED ++ && bytes <= 0) { ++ break; ++ } ++ } ++ } ++ ++ if (bytes > 0) ++ error("Unable to replace ret with jmp in %s\n", name); ++ ++ free(flags); ++ return retpos; ++} ++ ++#endif ++ + #define MAX_ARGS 3 + + /* generate op code */ +@@ -1442,6 +2082,11 @@ + uint8_t args_present[MAX_ARGS]; + const char *sym_name, *p; + EXE_RELOC *rel; ++#if defined(HOST_I386) || defined(HOST_X86_64) ++ int patch_bytes; ++ int retpos; ++ int exit_addrs[MAX_EXITS]; ++#endif + + /* Compute exact size excluding prologue and epilogue instructions. + * Increment start_offset to skip epilogue instructions, then compute +@@ -1452,33 +2097,12 @@ + p_end = p_start + size; + start_offset = offset; + #if defined(HOST_I386) || defined(HOST_X86_64) +-#ifdef CONFIG_FORMAT_COFF +- { +- uint8_t *p; +- p = p_end - 1; +- if (p == p_start) +- error("empty code for %s", name); +- while (*p != 0xc3) { +- p--; +- if (p <= p_start) +- error("ret or jmp expected at the end of %s", name); +- } +- copy_size = p - p_start; +- } +-#else + { + int len; + len = p_end - p_start; +- if (len == 0) +- error("empty code for %s", name); +- if (p_end[-1] == 0xc3) { +- len--; +- } else { +- error("ret or jmp expected at the end of %s", name); +- } ++ retpos = trace_i386_op(name, p_start, &len, &patch_bytes, exit_addrs); + copy_size = len; + } +-#endif + #elif defined(HOST_PPC) + { + uint8_t *p; +@@ -1710,6 +2334,13 @@ + } + + if (gen_switch == 2) { ++#if defined(HOST_I386) || defined(HOST_X86_64) ++ if (patch_bytes != -1) ++ copy_size += patch_bytes; ++#ifdef DEBUG_OP ++ copy_size += 2; ++#endif ++#endif + fprintf(outfile, "DEF(%s, %d, %d)\n", name + 3, nb_args, copy_size); + } else if (gen_switch == 1) { + +@@ -1915,7 +2546,43 @@ + #error unsupport object format + #endif + } ++ } ++ /* Replace the marker instructions with the actual opcodes. */ ++ for (i = 0; exit_addrs[i] != -1; i++) { ++ int op; ++ switch (p_start[exit_addrs[i]]) ++ { ++ case 0xf4: op = 0xc3; break; /* hlt -> ret */ ++ case 0xfa: op = 0xe9; break; /* cli -> jmp */ ++ case 0xfb: op = 0xe9; break; /* sti -> jmp */ ++ default: error("Internal error"); ++ } ++ fprintf(outfile, ++ " *(uint8_t *)(gen_code_ptr + %d) = 0x%x;\n", ++ exit_addrs[i], op); + } ++ /* Fix up the return instruction. */ ++ if (patch_bytes != -1) { ++ if (patch_bytes) { ++ fprintf(outfile, " memcpy(gen_code_ptr + %d," ++ "gen_code_ptr + %d, %d);\n", ++ copy_size, retpos, patch_bytes); ++ } ++ fprintf(outfile, ++ " *(uint8_t *)(gen_code_ptr + %d) = 0xe9;\n", ++ retpos); ++ fprintf(outfile, ++ " *(uint32_t *)(gen_code_ptr + %d) = 0x%x;\n", ++ retpos + 1, copy_size - (retpos + 5)); ++ ++ copy_size += patch_bytes; ++ } ++#ifdef DEBUG_OP ++ fprintf(outfile, ++ " *(uint16_t *)(gen_code_ptr + %d) = 0x9090;\n", ++ copy_size); ++ copy_size += 2; ++#endif + } + #elif defined(HOST_X86_64) + { +@@ -1949,6 +2616,42 @@ + } + } + } ++ /* Replace the marker instructions with the actual opcodes. */ ++ for (i = 0; exit_addrs[i] != -1; i++) { ++ int op; ++ switch (p_start[exit_addrs[i]]) ++ { ++ case 0xf4: op = 0xc3; break; /* hlt -> ret */ ++ case 0xfa: op = 0xe9; break; /* cli -> jmp */ ++ case 0xfb: op = 0xe9; break; /* sti -> jmp */ ++ default: error("Internal error"); ++ } ++ fprintf(outfile, ++ " *(uint8_t *)(gen_code_ptr + %d) = 0x%x;\n", ++ exit_addrs[i], op); ++ } ++ /* Fix up the return instruction. */ ++ if (patch_bytes != -1) { ++ if (patch_bytes) { ++ fprintf(outfile, " memcpy(gen_code_ptr + %d," ++ "gen_code_ptr + %d, %d);\n", ++ copy_size, retpos, patch_bytes); ++ } ++ fprintf(outfile, ++ " *(uint8_t *)(gen_code_ptr + %d) = 0xe9;\n", ++ retpos); ++ fprintf(outfile, ++ " *(uint32_t *)(gen_code_ptr + %d) = 0x%x;\n", ++ retpos + 1, copy_size - (retpos + 5)); ++ ++ copy_size += patch_bytes; ++ } ++#ifdef DEBUG_OP ++ fprintf(outfile, ++ " *(uint16_t *)(gen_code_ptr + %d) = 0x9090;\n", ++ copy_size); ++ copy_size += 2; ++#endif + } + #elif defined(HOST_PPC) + { +diff -Naru qemu-neo1973.orig/dyngen-exec.h qemu-neo1973/dyngen-exec.h +--- qemu-neo1973.orig/dyngen-exec.h 2007-06-24 13:56:38.000000000 +0200 ++++ qemu-neo1973/dyngen-exec.h 2007-06-24 14:35:52.000000000 +0200 +@@ -194,7 +194,12 @@ + #endif + + /* force GCC to generate only one epilog at the end of the function */ ++#if defined(__i386__) || defined(__x86_64__) ++/* Also add 4 bytes of padding so that we can replace the ret with a jmp. */ ++#define FORCE_RET() asm volatile ("nop;nop;nop;nop"); ++#else + #define FORCE_RET() __asm__ __volatile__("" : : : "memory"); ++#endif + + #ifndef OPPROTO + #define OPPROTO +@@ -244,11 +249,18 @@ + #endif + + #if defined(__i386__) +-#define EXIT_TB() asm volatile ("ret") +-#define GOTO_LABEL_PARAM(n) asm volatile ("jmp " ASM_NAME(__op_gen_label) #n) ++/* Dyngen will replace hlt instructions with a ret instruction. Inserting a ++ ret directly would confuse dyngen. */ ++#define EXIT_TB() asm volatile ("hlt") ++/* Dyngen will replace cli with 0x9e (jmp). ++ We generate the offset manually. */ ++#define GOTO_LABEL_PARAM(n) \ ++ asm volatile ("cli;.long " ASM_NAME(__op_gen_label) #n " - 1f;1:") + #elif defined(__x86_64__) +-#define EXIT_TB() asm volatile ("ret") +-#define GOTO_LABEL_PARAM(n) asm volatile ("jmp " ASM_NAME(__op_gen_label) #n) ++/* The same as i386. */ ++#define EXIT_TB() asm volatile ("hlt") ++#define GOTO_LABEL_PARAM(n) \ ++ asm volatile ("cli;.long " ASM_NAME(__op_gen_label) #n " - 1f;1:") + #elif defined(__powerpc__) + #define EXIT_TB() asm volatile ("blr") + #define GOTO_LABEL_PARAM(n) asm volatile ("b " ASM_NAME(__op_gen_label) #n) +diff -Naru qemu-neo1973.orig/exec-all.h qemu-neo1973/exec-all.h +--- qemu-neo1973.orig/exec-all.h 2007-06-24 14:31:58.000000000 +0200 ++++ qemu-neo1973/exec-all.h 2007-06-24 14:33:11.000000000 +0200 +@@ -329,14 +329,15 @@ + + #elif defined(__i386__) && defined(USE_DIRECT_JUMP) + +-/* we patch the jump instruction directly */ ++/* we patch the jump instruction directly. Use sti in place of the actual ++ jmp instruction so that dyngen can patch in the correct result. */ + #define GOTO_TB(opname, tbparam, n)\ + do {\ + asm volatile (".section .data\n"\ + ASM_OP_LABEL_NAME(n, opname) ":\n"\ + ".long 1f\n"\ + ASM_PREVIOUS_SECTION \ +- "jmp " ASM_NAME(__op_jmp) #n "\n"\ ++ "sti;.long " ASM_NAME(__op_jmp) #n " - 1f\n"\ + "1:\n");\ + } while (0) + +diff -Naru qemu-neo1973.orig/target-ppc/exec.h qemu-neo1973/target-ppc/exec.h +--- qemu-neo1973.orig/target-ppc/exec.h 2007-06-24 13:56:32.000000000 +0200 ++++ qemu-neo1973/target-ppc/exec.h 2007-06-24 14:33:11.000000000 +0200 +@@ -66,11 +66,7 @@ + #define FT1 (env->ft1) + #define FT2 (env->ft2) + +-#if defined (DEBUG_OP) +-# define RETURN() __asm__ __volatile__("nop" : : : "memory"); +-#else +-# define RETURN() __asm__ __volatile__("" : : : "memory"); +-#endif ++#define RETURN() FORCE_RET() + + static inline target_ulong rotl8 (target_ulong i, int n) + { diff --git a/packages/qemu/qemu-0.9.0+cvs20070701/qemu-amd64-32b-mapping-0.9.0.patch b/packages/qemu/qemu-0.9.0+cvs20070701/qemu-amd64-32b-mapping-0.9.0.patch new file mode 100644 index 0000000000..8f5e9d8b4b --- /dev/null +++ b/packages/qemu/qemu-0.9.0+cvs20070701/qemu-amd64-32b-mapping-0.9.0.patch @@ -0,0 +1,21 @@ +diff -Naru qemu-snapshot-2007-07-01_05.orig/linux-user/mmap.c qemu-snapshot-2007-07-01_05/linux-user/mmap.c +--- qemu-snapshot-2007-07-01_05.orig/linux-user/mmap.c 2007-06-03 17:31:32.000000000 +0200 ++++ qemu-snapshot-2007-07-01_05/linux-user/mmap.c 2007-07-02 14:00:30.000000000 +0200 +@@ -234,7 +234,7 @@ + host_offset = offset & qemu_host_page_mask; + host_len = len + offset - host_offset; + host_start = (long)mmap(real_start ? g2h(real_start) : NULL, +- host_len, prot, flags, fd, host_offset); ++ host_len, prot, (flags | MAP_32BIT), fd, host_offset); + if (host_start == -1) + return host_start; + /* update start so that it points to the file position at 'offset' */ +@@ -388,7 +388,7 @@ + int prot; + + /* XXX: use 5 args syscall */ +- new_addr = (long)mremap(g2h(old_addr), old_size, new_size, flags); ++ new_addr = (long)mremap(g2h(old_addr), old_size, new_size, (flags | MAP_32BIT)); + if (new_addr == -1) + return new_addr; + new_addr = h2g(new_addr); diff --git a/packages/qemu/qemu-0.9.0+cvs20070701/qemu-sdl-cursor-0.9.0.patch b/packages/qemu/qemu-0.9.0+cvs20070701/qemu-sdl-cursor-0.9.0.patch new file mode 100644 index 0000000000..08d5513f36 --- /dev/null +++ b/packages/qemu/qemu-0.9.0+cvs20070701/qemu-sdl-cursor-0.9.0.patch @@ -0,0 +1,12 @@ +diff -Naru qemu-snapshot-2007-06-24_05.orig/sdl.c qemu-snapshot-2007-06-24_05/sdl.c +--- qemu-snapshot-2007-06-24_05.orig/sdl.c 2007-06-21 23:08:02.000000000 +0200 ++++ qemu-snapshot-2007-06-24_05/sdl.c 2007-06-24 16:33:20.000000000 +0200 +@@ -245,7 +245,7 @@ + + if (kbd_mouse_is_absolute()) { + SDL_ShowCursor(1); +- SDL_SetCursor(sdl_cursor_hidden); ++ /* SDL_SetCursor(sdl_cursor_hidden); */ + } else { + SDL_ShowCursor(0); + } |