summaryrefslogtreecommitdiff
path: root/meta
diff options
context:
space:
mode:
Diffstat (limited to 'meta')
-rw-r--r--meta/recipes-support/liboil/liboil-0.3.17/liboil_fix_for_x32.patch222
-rw-r--r--meta/recipes-support/liboil/liboil_0.3.17.bb3
2 files changed, 224 insertions, 1 deletions
diff --git a/meta/recipes-support/liboil/liboil-0.3.17/liboil_fix_for_x32.patch b/meta/recipes-support/liboil/liboil-0.3.17/liboil_fix_for_x32.patch
new file mode 100644
index 0000000000..473380e9fc
--- /dev/null
+++ b/meta/recipes-support/liboil/liboil-0.3.17/liboil_fix_for_x32.patch
@@ -0,0 +1,222 @@
+Upstream-Status: Pending
+
+Make the assembly syntax compatible with x32 gcc. Othewise x32 gcc throws errors.
+
+Signed-Off-By: Nitin A Kamble <nitin.a.kamble@intel.com>
+2011/12/01
+
+
+Index: liboil-0.3.17/liboil/amd64/wavelet.c
+===================================================================
+--- liboil-0.3.17.orig/liboil/amd64/wavelet.c
++++ liboil-0.3.17/liboil/amd64/wavelet.c
+@@ -21,14 +21,14 @@ deinterleave2_asm (int16_t *d1, int16_t
+ asm volatile ("\n"
+ " sub $2, %%rcx\n"
+ "1:\n"
+- " movw (%1,%%rcx,4), %%ax\n"
+- " movw %%ax, (%0,%%rcx,2)\n"
+- " movw 2(%1,%%rcx,4), %%ax\n"
+- " movw %%ax, (%2,%%rcx,2)\n"
+- " movw 4(%1,%%rcx,4), %%ax\n"
+- " movw %%ax, 2(%0,%%rcx,2)\n"
+- " movw 6(%1,%%rcx,4), %%ax\n"
+- " movw %%ax, 2(%2,%%rcx,2)\n"
++ " movw (%q1,%%rcx,4), %%ax\n"
++ " movw %%ax, (%q0,%%rcx,2)\n"
++ " movw 2(%q1,%%rcx,4), %%ax\n"
++ " movw %%ax, (%q2,%%rcx,2)\n"
++ " movw 4(%q1,%%rcx,4), %%ax\n"
++ " movw %%ax, 2(%q0,%%rcx,2)\n"
++ " movw 6(%q1,%%rcx,4), %%ax\n"
++ " movw %%ax, 2(%q2,%%rcx,2)\n"
+ " sub $2, %%rcx\n"
+ " jge 1b\n"
+ : "+r" (d1), "+r" (s_2xn), "+r" (d2), "+c" (n)
+@@ -53,20 +53,20 @@ deinterleave2_mmx (int16_t *d1, int16_t
+ asm volatile ("\n"
+ " xor %%rcx, %%rcx\n"
+ "1:\n"
+- " movq (%1,%%rcx,4), %%mm0\n"
+- " movq 8(%1,%%rcx,4), %%mm1\n"
++ " movq (%q1,%%rcx,4), %%mm0\n"
++ " movq 8(%q1,%%rcx,4), %%mm1\n"
+ " pslld $16, %%mm0\n"
+ " pslld $16, %%mm1\n"
+ " psrad $16, %%mm0\n"
+ " psrad $16, %%mm1\n"
+ " packssdw %%mm1, %%mm0\n"
+- " movq %%mm0, (%0,%%rcx,2)\n"
+- " movq (%1,%%rcx,4), %%mm0\n"
+- " movq 8(%1,%%rcx,4), %%mm1\n"
++ " movq %%mm0, (%q0,%%rcx,2)\n"
++ " movq (%q1,%%rcx,4), %%mm0\n"
++ " movq 8(%q1,%%rcx,4), %%mm1\n"
+ " psrad $16, %%mm0\n"
+ " psrad $16, %%mm1\n"
+ " packssdw %%mm1, %%mm0\n"
+- " movq %%mm0, (%2,%%rcx,2)\n"
++ " movq %%mm0, (%q2,%%rcx,2)\n"
+ " add $4, %%rcx\n"
+ " cmp %3, %%ecx\n"
+ " jl 1b\n"
+@@ -93,10 +93,10 @@ deinterleave2_mmx_2 (int16_t *d1, int16_
+ asm volatile ("\n"
+ " xor %%rcx, %%rcx\n"
+ "1:\n"
+- " pshufw $0xd8, (%1,%%rcx,4), %%mm0\n"
+- " movd %%mm0, (%0,%%rcx,2)\n"
+- " pshufw $0x8d, (%1,%%rcx,4), %%mm0\n"
+- " movd %%mm0, (%2,%%rcx,2)\n"
++ " pshufw $0xd8, (%q1,%%rcx,4), %%mm0\n"
++ " movd %%mm0, (%q0,%%rcx,2)\n"
++ " pshufw $0x8d, (%q1,%%rcx,4), %%mm0\n"
++ " movd %%mm0, (%q2,%%rcx,2)\n"
+ " add $2, %%rcx\n"
+ " cmp %3, %%ecx\n"
+ " jl 1b\n"
+@@ -123,16 +123,16 @@ deinterleave2_mmx_3 (int16_t *d1, int16_
+ asm volatile ("\n"
+ " xor %%rcx, %%rcx\n"
+ "1:\n"
+- " movq (%1,%%rcx,4), %%mm1\n"
+- " movq (%1,%%rcx,4), %%mm2\n"
+- " movq 8(%1,%%rcx,4), %%mm0\n"
++ " movq (%q1,%%rcx,4), %%mm1\n"
++ " movq (%q1,%%rcx,4), %%mm2\n"
++ " movq 8(%q1,%%rcx,4), %%mm0\n"
+ " punpcklwd %%mm0, %%mm1\n"
+ " punpckhwd %%mm0, %%mm2\n"
+ " movq %%mm1, %%mm0\n"
+ " punpcklwd %%mm2, %%mm0\n"
+ " punpckhwd %%mm2, %%mm1\n"
+- " movq %%mm0, (%0,%%rcx,2)\n"
+- " movq %%mm1, (%2,%%rcx,2)\n"
++ " movq %%mm0, (%q0,%%rcx,2)\n"
++ " movq %%mm1, (%q2,%%rcx,2)\n"
+ " add $4, %%rcx\n"
+ " cmp %3, %%ecx\n"
+ " jl 1b\n"
+@@ -159,26 +159,26 @@ deinterleave2_mmx_4 (int16_t *d1, int16_
+ asm volatile ("\n"
+ " xor %%rcx, %%rcx\n"
+ "1:\n"
+- " movq (%1,%%rcx,4), %%mm1\n"
++ " movq (%q1,%%rcx,4), %%mm1\n"
+ " movq %%mm1, %%mm2\n"
+- " movq 8(%1,%%rcx,4), %%mm0\n"
+- " movq 16(%1,%%rcx,4), %%mm5\n"
++ " movq 8(%q1,%%rcx,4), %%mm0\n"
++ " movq 16(%q1,%%rcx,4), %%mm5\n"
+ " punpcklwd %%mm0, %%mm1\n"
+ " movq %%mm5, %%mm6\n"
+ " punpckhwd %%mm0, %%mm2\n"
+- " movq 24(%1,%%rcx,4), %%mm4\n"
++ " movq 24(%q1,%%rcx,4), %%mm4\n"
+ " movq %%mm1, %%mm0\n"
+ " punpcklwd %%mm4, %%mm5\n"
+ " punpcklwd %%mm2, %%mm0\n"
+ " punpckhwd %%mm4, %%mm6\n"
+ " punpckhwd %%mm2, %%mm1\n"
+ " movq %%mm5, %%mm4\n"
+- " movq %%mm0, (%0,%%rcx,2)\n"
++ " movq %%mm0, (%q0,%%rcx,2)\n"
+ " punpcklwd %%mm6, %%mm4\n"
+- " movq %%mm1, (%2,%%rcx,2)\n"
++ " movq %%mm1, (%q2,%%rcx,2)\n"
+ " punpckhwd %%mm6, %%mm5\n"
+- " movq %%mm4, 8(%0,%%rcx,2)\n"
+- " movq %%mm5, 8(%2,%%rcx,2)\n"
++ " movq %%mm4, 8(%q0,%%rcx,2)\n"
++ " movq %%mm5, 8(%q2,%%rcx,2)\n"
+ " add $8, %%rcx\n"
+ " cmp %3, %%ecx\n"
+ " jl 1b\n"
+@@ -252,13 +252,13 @@ interleave2_mmx (int16_t *d_2xn, int16_t
+ asm volatile ("\n"
+ " xor %%rcx, %%rcx\n"
+ "1:\n"
+- " movq (%1,%%rcx,2), %%mm0\n"
+- " movq (%2,%%rcx,2), %%mm1\n"
++ " movq (%q1,%%rcx,2), %%mm0\n"
++ " movq (%q2,%%rcx,2), %%mm1\n"
+ " movq %%mm0, %%mm2\n"
+ " punpckhwd %%mm1, %%mm0\n"
+ " punpcklwd %%mm1, %%mm2\n"
+- " movq %%mm2, (%0,%%rcx,4)\n"
+- " movq %%mm0, 8(%0,%%rcx,4)\n"
++ " movq %%mm2, (%q0,%%rcx,4)\n"
++ " movq %%mm0, 8(%q0,%%rcx,4)\n"
+ " add $4, %%rcx\n"
+ " cmp %3, %%ecx\n"
+ " jl 1b\n"
+@@ -285,12 +285,12 @@ lift_add_shift1_mmx (int16_t *d, int16_t
+ asm volatile ("\n"
+ " xor %%rcx, %%rcx\n"
+ "1:\n"
+- " movq (%2,%%rcx,2), %%mm1\n"
+- " movq (%3,%%rcx,2), %%mm2\n"
++ " movq (%q2,%%rcx,2), %%mm1\n"
++ " movq (%q3,%%rcx,2), %%mm2\n"
+ " paddw %%mm2, %%mm1\n"
+ " psraw $1, %%mm1\n"
+- " paddw (%1,%%rcx,2), %%mm1\n"
+- " movq %%mm1, (%0,%%rcx,2)\n"
++ " paddw (%q1,%%rcx,2), %%mm1\n"
++ " movq %%mm1, (%q0,%%rcx,2)\n"
+ " add $4, %%rcx\n"
+ " cmp %4, %%ecx\n"
+ " jl 1b\n"
+@@ -317,13 +317,13 @@ lift_sub_shift1_mmx (int16_t *d, int16_t
+ asm volatile ("\n"
+ " xor %%rcx, %%rcx\n"
+ "1:\n"
+- " movq (%2,%%rcx,2), %%mm1\n"
+- " movq (%3,%%rcx,2), %%mm2\n"
+- " movq (%1,%%rcx,2), %%mm0\n"
++ " movq (%q2,%%rcx,2), %%mm1\n"
++ " movq (%q3,%%rcx,2), %%mm2\n"
++ " movq (%q1,%%rcx,2), %%mm0\n"
+ " paddw %%mm2, %%mm1\n"
+ " psraw $1, %%mm1\n"
+ " psubw %%mm1, %%mm0\n"
+- " movq %%mm0, (%0,%%rcx,2)\n"
++ " movq %%mm0, (%q0,%%rcx,2)\n"
+ " add $4, %%rcx\n"
+ " cmp %4, %%ecx\n"
+ " jl 1b\n"
+@@ -350,12 +350,12 @@ lift_add_shift2_mmx (int16_t *d, int16_t
+ asm volatile ("\n"
+ " xor %%rcx, %%rcx\n"
+ "1:\n"
+- " movq (%2,%%rcx,2), %%mm1\n"
+- " movq (%3,%%rcx,2), %%mm2\n"
++ " movq (%q2,%%rcx,2), %%mm1\n"
++ " movq (%q3,%%rcx,2), %%mm2\n"
+ " paddw %%mm2, %%mm1\n"
+ " psraw $2, %%mm1\n"
+- " paddw (%1,%%rcx,2), %%mm1\n"
+- " movq %%mm1, (%0,%%rcx,2)\n"
++ " paddw (%q1,%%rcx,2), %%mm1\n"
++ " movq %%mm1, (%q0,%%rcx,2)\n"
+ " add $4, %%rcx\n"
+ " cmp %4, %%ecx\n"
+ " jl 1b\n"
+@@ -382,13 +382,13 @@ lift_sub_shift2_mmx (int16_t *d, int16_t
+ asm volatile ("\n"
+ " xor %%rcx, %%rcx\n"
+ "1:\n"
+- " movq (%2,%%rcx,2), %%mm1\n"
+- " movq (%3,%%rcx,2), %%mm2\n"
+- " movq (%1,%%rcx,2), %%mm0\n"
++ " movq (%q2,%%rcx,2), %%mm1\n"
++ " movq (%q3,%%rcx,2), %%mm2\n"
++ " movq (%q1,%%rcx,2), %%mm0\n"
+ " paddw %%mm2, %%mm1\n"
+ " psraw $2, %%mm1\n"
+ " psubw %%mm1, %%mm0\n"
+- " movq %%mm0, (%0,%%rcx,2)\n"
++ " movq %%mm0, (%q0,%%rcx,2)\n"
+ " add $4, %%rcx\n"
+ " cmp %4, %%ecx\n"
+ " jl 1b\n"
diff --git a/meta/recipes-support/liboil/liboil_0.3.17.bb b/meta/recipes-support/liboil/liboil_0.3.17.bb
index 10a845e42f..b1e21ab353 100644
--- a/meta/recipes-support/liboil/liboil_0.3.17.bb
+++ b/meta/recipes-support/liboil/liboil_0.3.17.bb
@@ -10,12 +10,13 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=ad80780d9c5205d63481a0184e199a15 \
file://testsuite/trans.c;endline=29;md5=380ecd43121fe3dcc0d8d7e5984f283d"
DEPENDS = "glib-2.0"
-PR = "r4"
+PR = "r5"
SRC_URI = "http://liboil.freedesktop.org/download/${BPN}-${PV}.tar.gz \
file://no-tests.patch \
file://fix-unaligned-whitelist.patch \
file://0001-Fix-enable-vfp-flag.patch \
+ file://liboil_fix_for_x32.patch \
"
SRC_URI[md5sum] = "47dc734f82faeb2964d97771cfd2e701"