diff options
author | Nitin A Kamble <nitin.a.kamble@intel.com> | 2011-12-02 12:20:06 -0800 |
---|---|---|
committer | Richard Purdie <richard.purdie@linuxfoundation.org> | 2011-12-05 22:45:06 +0000 |
commit | f43d633540b41c94eacfbc3c5c450ddd192b2164 (patch) | |
tree | 96e6c2b74d0a63ec9d4eb176e86c2dbea5cfddcb /meta/recipes-support/liboil/liboil-0.3.17 | |
parent | 22f487a089be2e1689e7137e4438f7b66c67984e (diff) | |
download | openembedded-core-f43d633540b41c94eacfbc3c5c450ddd192b2164.tar.gz openembedded-core-f43d633540b41c94eacfbc3c5c450ddd192b2164.tar.bz2 openembedded-core-f43d633540b41c94eacfbc3c5c450ddd192b2164.zip |
liboil: patch source code for x32
Make the assembly syntax compatible with x32 gcc. Othewise x32 gcc throws errors.
This Fixes bug: [YOCTO #1412]
Signed-off-by: Nitin A Kamble <nitin.a.kamble@intel.com>
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Diffstat (limited to 'meta/recipes-support/liboil/liboil-0.3.17')
-rw-r--r-- | meta/recipes-support/liboil/liboil-0.3.17/liboil_fix_for_x32.patch | 222 |
1 files changed, 222 insertions, 0 deletions
diff --git a/meta/recipes-support/liboil/liboil-0.3.17/liboil_fix_for_x32.patch b/meta/recipes-support/liboil/liboil-0.3.17/liboil_fix_for_x32.patch new file mode 100644 index 0000000000..473380e9fc --- /dev/null +++ b/meta/recipes-support/liboil/liboil-0.3.17/liboil_fix_for_x32.patch @@ -0,0 +1,222 @@ +Upstream-Status: Pending + +Make the assembly syntax compatible with x32 gcc. Othewise x32 gcc throws errors. + +Signed-Off-By: Nitin A Kamble <nitin.a.kamble@intel.com> +2011/12/01 + + +Index: liboil-0.3.17/liboil/amd64/wavelet.c +=================================================================== +--- liboil-0.3.17.orig/liboil/amd64/wavelet.c ++++ liboil-0.3.17/liboil/amd64/wavelet.c +@@ -21,14 +21,14 @@ deinterleave2_asm (int16_t *d1, int16_t + asm volatile ("\n" + " sub $2, %%rcx\n" + "1:\n" +- " movw (%1,%%rcx,4), %%ax\n" +- " movw %%ax, (%0,%%rcx,2)\n" +- " movw 2(%1,%%rcx,4), %%ax\n" +- " movw %%ax, (%2,%%rcx,2)\n" +- " movw 4(%1,%%rcx,4), %%ax\n" +- " movw %%ax, 2(%0,%%rcx,2)\n" +- " movw 6(%1,%%rcx,4), %%ax\n" +- " movw %%ax, 2(%2,%%rcx,2)\n" ++ " movw (%q1,%%rcx,4), %%ax\n" ++ " movw %%ax, (%q0,%%rcx,2)\n" ++ " movw 2(%q1,%%rcx,4), %%ax\n" ++ " movw %%ax, (%q2,%%rcx,2)\n" ++ " movw 4(%q1,%%rcx,4), %%ax\n" ++ " movw %%ax, 2(%q0,%%rcx,2)\n" ++ " movw 6(%q1,%%rcx,4), %%ax\n" ++ " movw %%ax, 2(%q2,%%rcx,2)\n" + " sub $2, %%rcx\n" + " jge 1b\n" + : "+r" (d1), "+r" (s_2xn), "+r" (d2), "+c" (n) +@@ -53,20 +53,20 @@ deinterleave2_mmx (int16_t *d1, int16_t + asm volatile ("\n" + " xor %%rcx, %%rcx\n" + "1:\n" +- " movq (%1,%%rcx,4), %%mm0\n" +- " movq 8(%1,%%rcx,4), %%mm1\n" ++ " movq (%q1,%%rcx,4), %%mm0\n" ++ " movq 8(%q1,%%rcx,4), %%mm1\n" + " pslld $16, %%mm0\n" + " pslld $16, %%mm1\n" + " psrad $16, %%mm0\n" + " psrad $16, %%mm1\n" + " packssdw %%mm1, %%mm0\n" +- " movq %%mm0, (%0,%%rcx,2)\n" +- " movq (%1,%%rcx,4), %%mm0\n" +- " movq 8(%1,%%rcx,4), %%mm1\n" ++ " movq %%mm0, (%q0,%%rcx,2)\n" ++ " movq (%q1,%%rcx,4), %%mm0\n" ++ " movq 8(%q1,%%rcx,4), %%mm1\n" + " psrad $16, %%mm0\n" + " psrad $16, %%mm1\n" + " packssdw %%mm1, %%mm0\n" +- " movq %%mm0, (%2,%%rcx,2)\n" ++ " movq %%mm0, (%q2,%%rcx,2)\n" + " add $4, %%rcx\n" + " cmp %3, %%ecx\n" + " jl 1b\n" +@@ -93,10 +93,10 @@ deinterleave2_mmx_2 (int16_t *d1, int16_ + asm volatile ("\n" + " xor %%rcx, %%rcx\n" + "1:\n" +- " pshufw $0xd8, (%1,%%rcx,4), %%mm0\n" +- " movd %%mm0, (%0,%%rcx,2)\n" +- " pshufw $0x8d, (%1,%%rcx,4), %%mm0\n" +- " movd %%mm0, (%2,%%rcx,2)\n" ++ " pshufw $0xd8, (%q1,%%rcx,4), %%mm0\n" ++ " movd %%mm0, (%q0,%%rcx,2)\n" ++ " pshufw $0x8d, (%q1,%%rcx,4), %%mm0\n" ++ " movd %%mm0, (%q2,%%rcx,2)\n" + " add $2, %%rcx\n" + " cmp %3, %%ecx\n" + " jl 1b\n" +@@ -123,16 +123,16 @@ deinterleave2_mmx_3 (int16_t *d1, int16_ + asm volatile ("\n" + " xor %%rcx, %%rcx\n" + "1:\n" +- " movq (%1,%%rcx,4), %%mm1\n" +- " movq (%1,%%rcx,4), %%mm2\n" +- " movq 8(%1,%%rcx,4), %%mm0\n" ++ " movq (%q1,%%rcx,4), %%mm1\n" ++ " movq (%q1,%%rcx,4), %%mm2\n" ++ " movq 8(%q1,%%rcx,4), %%mm0\n" + " punpcklwd %%mm0, %%mm1\n" + " punpckhwd %%mm0, %%mm2\n" + " movq %%mm1, %%mm0\n" + " punpcklwd %%mm2, %%mm0\n" + " punpckhwd %%mm2, %%mm1\n" +- " movq %%mm0, (%0,%%rcx,2)\n" +- " movq %%mm1, (%2,%%rcx,2)\n" ++ " movq %%mm0, (%q0,%%rcx,2)\n" ++ " movq %%mm1, (%q2,%%rcx,2)\n" + " add $4, %%rcx\n" + " cmp %3, %%ecx\n" + " jl 1b\n" +@@ -159,26 +159,26 @@ deinterleave2_mmx_4 (int16_t *d1, int16_ + asm volatile ("\n" + " xor %%rcx, %%rcx\n" + "1:\n" +- " movq (%1,%%rcx,4), %%mm1\n" ++ " movq (%q1,%%rcx,4), %%mm1\n" + " movq %%mm1, %%mm2\n" +- " movq 8(%1,%%rcx,4), %%mm0\n" +- " movq 16(%1,%%rcx,4), %%mm5\n" ++ " movq 8(%q1,%%rcx,4), %%mm0\n" ++ " movq 16(%q1,%%rcx,4), %%mm5\n" + " punpcklwd %%mm0, %%mm1\n" + " movq %%mm5, %%mm6\n" + " punpckhwd %%mm0, %%mm2\n" +- " movq 24(%1,%%rcx,4), %%mm4\n" ++ " movq 24(%q1,%%rcx,4), %%mm4\n" + " movq %%mm1, %%mm0\n" + " punpcklwd %%mm4, %%mm5\n" + " punpcklwd %%mm2, %%mm0\n" + " punpckhwd %%mm4, %%mm6\n" + " punpckhwd %%mm2, %%mm1\n" + " movq %%mm5, %%mm4\n" +- " movq %%mm0, (%0,%%rcx,2)\n" ++ " movq %%mm0, (%q0,%%rcx,2)\n" + " punpcklwd %%mm6, %%mm4\n" +- " movq %%mm1, (%2,%%rcx,2)\n" ++ " movq %%mm1, (%q2,%%rcx,2)\n" + " punpckhwd %%mm6, %%mm5\n" +- " movq %%mm4, 8(%0,%%rcx,2)\n" +- " movq %%mm5, 8(%2,%%rcx,2)\n" ++ " movq %%mm4, 8(%q0,%%rcx,2)\n" ++ " movq %%mm5, 8(%q2,%%rcx,2)\n" + " add $8, %%rcx\n" + " cmp %3, %%ecx\n" + " jl 1b\n" +@@ -252,13 +252,13 @@ interleave2_mmx (int16_t *d_2xn, int16_t + asm volatile ("\n" + " xor %%rcx, %%rcx\n" + "1:\n" +- " movq (%1,%%rcx,2), %%mm0\n" +- " movq (%2,%%rcx,2), %%mm1\n" ++ " movq (%q1,%%rcx,2), %%mm0\n" ++ " movq (%q2,%%rcx,2), %%mm1\n" + " movq %%mm0, %%mm2\n" + " punpckhwd %%mm1, %%mm0\n" + " punpcklwd %%mm1, %%mm2\n" +- " movq %%mm2, (%0,%%rcx,4)\n" +- " movq %%mm0, 8(%0,%%rcx,4)\n" ++ " movq %%mm2, (%q0,%%rcx,4)\n" ++ " movq %%mm0, 8(%q0,%%rcx,4)\n" + " add $4, %%rcx\n" + " cmp %3, %%ecx\n" + " jl 1b\n" +@@ -285,12 +285,12 @@ lift_add_shift1_mmx (int16_t *d, int16_t + asm volatile ("\n" + " xor %%rcx, %%rcx\n" + "1:\n" +- " movq (%2,%%rcx,2), %%mm1\n" +- " movq (%3,%%rcx,2), %%mm2\n" ++ " movq (%q2,%%rcx,2), %%mm1\n" ++ " movq (%q3,%%rcx,2), %%mm2\n" + " paddw %%mm2, %%mm1\n" + " psraw $1, %%mm1\n" +- " paddw (%1,%%rcx,2), %%mm1\n" +- " movq %%mm1, (%0,%%rcx,2)\n" ++ " paddw (%q1,%%rcx,2), %%mm1\n" ++ " movq %%mm1, (%q0,%%rcx,2)\n" + " add $4, %%rcx\n" + " cmp %4, %%ecx\n" + " jl 1b\n" +@@ -317,13 +317,13 @@ lift_sub_shift1_mmx (int16_t *d, int16_t + asm volatile ("\n" + " xor %%rcx, %%rcx\n" + "1:\n" +- " movq (%2,%%rcx,2), %%mm1\n" +- " movq (%3,%%rcx,2), %%mm2\n" +- " movq (%1,%%rcx,2), %%mm0\n" ++ " movq (%q2,%%rcx,2), %%mm1\n" ++ " movq (%q3,%%rcx,2), %%mm2\n" ++ " movq (%q1,%%rcx,2), %%mm0\n" + " paddw %%mm2, %%mm1\n" + " psraw $1, %%mm1\n" + " psubw %%mm1, %%mm0\n" +- " movq %%mm0, (%0,%%rcx,2)\n" ++ " movq %%mm0, (%q0,%%rcx,2)\n" + " add $4, %%rcx\n" + " cmp %4, %%ecx\n" + " jl 1b\n" +@@ -350,12 +350,12 @@ lift_add_shift2_mmx (int16_t *d, int16_t + asm volatile ("\n" + " xor %%rcx, %%rcx\n" + "1:\n" +- " movq (%2,%%rcx,2), %%mm1\n" +- " movq (%3,%%rcx,2), %%mm2\n" ++ " movq (%q2,%%rcx,2), %%mm1\n" ++ " movq (%q3,%%rcx,2), %%mm2\n" + " paddw %%mm2, %%mm1\n" + " psraw $2, %%mm1\n" +- " paddw (%1,%%rcx,2), %%mm1\n" +- " movq %%mm1, (%0,%%rcx,2)\n" ++ " paddw (%q1,%%rcx,2), %%mm1\n" ++ " movq %%mm1, (%q0,%%rcx,2)\n" + " add $4, %%rcx\n" + " cmp %4, %%ecx\n" + " jl 1b\n" +@@ -382,13 +382,13 @@ lift_sub_shift2_mmx (int16_t *d, int16_t + asm volatile ("\n" + " xor %%rcx, %%rcx\n" + "1:\n" +- " movq (%2,%%rcx,2), %%mm1\n" +- " movq (%3,%%rcx,2), %%mm2\n" +- " movq (%1,%%rcx,2), %%mm0\n" ++ " movq (%q2,%%rcx,2), %%mm1\n" ++ " movq (%q3,%%rcx,2), %%mm2\n" ++ " movq (%q1,%%rcx,2), %%mm0\n" + " paddw %%mm2, %%mm1\n" + " psraw $2, %%mm1\n" + " psubw %%mm1, %%mm0\n" +- " movq %%mm0, (%0,%%rcx,2)\n" ++ " movq %%mm0, (%q0,%%rcx,2)\n" + " add $4, %%rcx\n" + " cmp %4, %%ecx\n" + " jl 1b\n" |