diff -Nur gst-ffmpeg-0.10.2/gst-libs/ext/ffmpeg/configure.ac gst-ffmpeg-0.10.2.new/gst-libs/ext/ffmpeg/configure.ac --- gst-ffmpeg-0.10.2/gst-libs/ext/ffmpeg/configure.ac 2006-09-23 15:35:21.000000000 +0200 +++ gst-ffmpeg-0.10.2.new/gst-libs/ext/ffmpeg/configure.ac 2007-05-01 12:23:39.000000000 +0200 @@ -190,7 +190,7 @@ ARCH_X86=yes ;; # armv4l is a subset of armv5tel - armv4l|armv5tel) + arm|armv4l|armv5tel|armv5te) TARGET_ARCH="armv4l" ARCH_ARMV4L=yes ;; @@ -364,11 +364,8 @@ AC_FF_ALLOW_DISABLE(HAVE_IWMMXT, iwmmxt, use ARM/IWMMXT optimizations,[ if test x$TARGET_ARCH = xarmv4l; then AC_MSG_CHECKING(for support of IWMMXT optimizations) - AC_TRY_RUN([ - int main () { + AC_TRY_COMPILE(,[ __asm__ __volatile__ ("wunpckelub wr6, wr4"); - return 0; - } ],[ HAVE_IWMMXT=yes && AC_MSG_RESULT(yes) ],[ HAVE_IWMMXT=no && AC_MSG_RESULT(no) ]) else @@ -376,6 +373,19 @@ fi ]) +dnl ARMV5TE +AC_FF_ALLOW_DISABLE(HAVE_ARMV5TE, armv5te, use ARMV5TE optimizations,[ + if test x$TARGET_ARCH = xarmv4l; then + AC_MSG_CHECKING(for support of ARMV5TE specific instructions) + AC_TRY_COMPILE(, [ + __asm__ __volatile__ ("smulbb a4,ip,a3"); + ],[ HAVE_ARMV5TE=yes && AC_MSG_RESULT(yes) ],[ + HAVE_ARMV5TE=no && AC_MSG_RESULT(no) ]) + else + HAVE_ARMV5TE=no + fi +]) + dnl GProf (-p) AC_FF_ALLOW_ENABLE(HAVE_GPROF, gprof, profiling with gprof,[ GPROF_FLAGS="-p" diff -Nur gst-ffmpeg-0.10.2/gst-libs/ext/ffmpeg/libavcodec/armv4l/dsputil_arm.c gst-ffmpeg-0.10.2.new/gst-libs/ext/ffmpeg/libavcodec/armv4l/dsputil_arm.c --- gst-ffmpeg-0.10.2/gst-libs/ext/ffmpeg/libavcodec/armv4l/dsputil_arm.c 2006-09-20 20:55:37.000000000 +0200 +++ gst-ffmpeg-0.10.2.new/gst-libs/ext/ffmpeg/libavcodec/armv4l/dsputil_arm.c 2007-05-01 12:23:40.000000000 +0200 * ARMv4L optimized DSP utils * Copyright (c) 2001 Lionel Ulmer. * - * This library is free software; you can redistribute it and/or + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. + * version 2.1 of the License, or (at your option) any later version. * - * This library is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ -#include "dsputil.h" +#include "../dsputil.h" #ifdef HAVE_IPP #include "ipp.h" #endif @@ -27,6 +29,12 @@ extern void j_rev_dct_ARM(DCTELEM *data); extern void simple_idct_ARM(DCTELEM *data); +extern void simple_idct_armv5te(DCTELEM *data); +extern void simple_idct_put_armv5te(uint8_t *dest, int line_size, + DCTELEM *data); +extern void simple_idct_add_armv5te(uint8_t *dest, int line_size, + DCTELEM *data); + /* XXX: local hack */ static void (*ff_put_pixels_clamped)(const DCTELEM *block, uint8_t *pixels, int line_size); static void (*ff_add_pixels_clamped)(const DCTELEM *block, uint8_t *pixels, int line_size); @@ -196,8 +204,10 @@ ff_add_pixels_clamped = c->add_pixels_clamped; if(idct_algo == FF_IDCT_AUTO){ -#ifdef HAVE_IPP +#if defined(HAVE_IPP) idct_algo = FF_IDCT_IPP; +#elif defined(HAVE_ARMV5TE) + idct_algo = FF_IDCT_SIMPLEARMV5TE; #else idct_algo = FF_IDCT_ARM; #endif @@ -213,6 +223,13 @@ c->idct_add= simple_idct_ARM_add; c->idct = simple_idct_ARM; c->idct_permutation_type= FF_NO_IDCT_PERM; +#ifdef HAVE_ARMV5TE + } else if (idct_algo==FF_IDCT_SIMPLEARMV5TE){ + c->idct_put= simple_idct_put_armv5te; + c->idct_add= simple_idct_add_armv5te; + c->idct = simple_idct_armv5te; + c->idct_permutation_type = FF_NO_IDCT_PERM; +#endif #ifdef HAVE_IPP } else if (idct_algo==FF_IDCT_IPP){ c->idct_put= simple_idct_ipp_put; --- gst-ffmpeg-0.10.2/gst-libs/ext/ffmpeg/libavcodec/armv4l/dsputil_arm_s.S 2006-02-19 00:04:59.000000000 +0100 +++ gst-ffmpeg-0.10.2.new/gst-libs/ext/ffmpeg/libavcodec/armv4l/dsputil_arm_s.S 2007-05-01 12:23:40.000000000 +0200 @@ -2,20 +2,29 @@ @ ARMv4L optimized DSP utils @ Copyright (c) 2004 AGAWA Koji <i (AT) atty (DOT) jp> @ -@ This library is free software; you can redistribute it and/or +@ This file is part of FFmpeg. +@ +@ FFmpeg is free software; you can redistribute it and/or @ modify it under the terms of the GNU Lesser General Public @ License as published by the Free Software Foundation; either -@ version 2 of the License, or (at your option) any later version. +@ version 2.1 of the License, or (at your option) any later version. @ -@ This library is distributed in the hope that it will be useful, +@ FFmpeg is distributed in the hope that it will be useful, @ but WITHOUT ANY WARRANTY; without even the implied warranty of @ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU @ Lesser General Public License for more details. @ @ You should have received a copy of the GNU Lesser General Public -@ License along with this library; if not, write to the Free Software +@ License along with FFmpeg; if not, write to the Free Software @ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA @ +#if defined(__ARM_ARCH_5__) || \ + defined(__ARM_ARCH_5T__) || \ + defined(__ARM_ARCH_5TE__) +#define PLD(code...) code +#else +#define PLD(code...) +#endif .macro ADJ_ALIGN_QUADWORD_D shift, Rd0, Rd1, Rd2, Rd3, Rn0, Rn1, Rn2, Rn3, Rn4 mov \Rd0, \Rn0, lsr #(\shift * 8) @@ -74,7 +83,7 @@ put_pixels16_arm: @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h) @ block = word aligned, pixles = unaligned - pld [r1] + PLD ( pld [r1] ) stmfd sp!, {r4-r11, lr} @ R14 is also called LR adr r5, 5f ands r4, r1, #3 @@ -85,7 +94,7 @@ ldmia r1, {r4-r7} add r1, r1, r2 stmia r0, {r4-r7} - pld [r1] + PLD ( pld [r1] ) subs r3, r3, #1 add r0, r0, r2 bne 1b @@ -95,7 +104,7 @@ ldmia r1, {r4-r8} add r1, r1, r2 ADJ_ALIGN_QUADWORD_D 1, r9, r10, r11, r12, r4, r5, r6, r7, r8 - pld [r1] + PLD ( pld [r1] ) subs r3, r3, #1 stmia r0, {r9-r12} add r0, r0, r2 @@ -106,7 +115,7 @@ ldmia r1, {r4-r8} add r1, r1, r2 ADJ_ALIGN_QUADWORD_D 2, r9, r10, r11, r12, r4, r5, r6, r7, r8 - pld [r1] + PLD ( pld [r1] ) subs r3, r3, #1 stmia r0, {r9-r12} add r0, r0, r2 @@ -117,7 +126,7 @@ ldmia r1, {r4-r8} add r1, r1, r2 ADJ_ALIGN_QUADWORD_D 3, r9, r10, r11, r12, r4, r5, r6, r7, r8 - pld [r1] + PLD ( pld [r1] ) subs r3, r3, #1 stmia r0, {r9-r12} add r0, r0, r2 @@ -136,7 +145,7 @@ put_pixels8_arm: @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h) @ block = word aligned, pixles = unaligned - pld [r1] + PLD ( pld [r1] ) stmfd sp!, {r4-r5,lr} @ R14 is also called LR adr r5, 5f ands r4, r1, #3 @@ -147,7 +156,7 @@ ldmia r1, {r4-r5} add r1, r1, r2 subs r3, r3, #1 - pld [r1] + PLD ( pld [r1] ) stmia r0, {r4-r5} add r0, r0, r2 bne 1b @@ -157,7 +166,7 @@ ldmia r1, {r4-r5, r12} add r1, r1, r2 ADJ_ALIGN_DOUBLEWORD 1, r4, r5, r12 - pld [r1] + PLD ( pld [r1] ) subs r3, r3, #1 stmia r0, {r4-r5} add r0, r0, r2 @@ -168,7 +177,7 @@ ldmia r1, {r4-r5, r12} add r1, r1, r2 ADJ_ALIGN_DOUBLEWORD 2, r4, r5, r12 - pld [r1] + PLD ( pld [r1] ) subs r3, r3, #1 stmia r0, {r4-r5} add r0, r0, r2 @@ -179,7 +188,7 @@ ldmia r1, {r4-r5, r12} add r1, r1, r2 ADJ_ALIGN_DOUBLEWORD 3, r4, r5, r12 - pld [r1] + PLD ( pld [r1] ) subs r3, r3, #1 stmia r0, {r4-r5} add r0, r0, r2 @@ -198,7 +207,7 @@ put_pixels8_x2_arm: @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h) @ block = word aligned, pixles = unaligned - pld [r1] + PLD ( pld [r1] ) stmfd sp!, {r4-r10,lr} @ R14 is also called LR adr r5, 5f ands r4, r1, #3 @@ -210,7 +219,7 @@ ldmia r1, {r4-r5, r10} add r1, r1, r2 ADJ_ALIGN_DOUBLEWORD_D 1, r6, r7, r4, r5, r10 - pld [r1] + PLD ( pld [r1] ) RND_AVG32 r8, r9, r4, r5, r6, r7, r12 subs r3, r3, #1 stmia r0, {r8-r9} @@ -223,7 +232,7 @@ add r1, r1, r2 ADJ_ALIGN_DOUBLEWORD_D 1, r6, r7, r4, r5, r10 ADJ_ALIGN_DOUBLEWORD_D 2, r8, r9, r4, r5, r10 - pld [r1] + PLD ( pld [r1] ) RND_AVG32 r4, r5, r6, r7, r8, r9, r12 subs r3, r3, #1 stmia r0, {r4-r5} @@ -236,7 +245,7 @@ add r1, r1, r2 ADJ_ALIGN_DOUBLEWORD_D 2, r6, r7, r4, r5, r10 ADJ_ALIGN_DOUBLEWORD_D 3, r8, r9, r4, r5, r10 - pld [r1] + PLD ( pld [r1] ) RND_AVG32 r4, r5, r6, r7, r8, r9, r12 subs r3, r3, #1 stmia r0, {r4-r5} @@ -248,7 +257,7 @@ ldmia r1, {r4-r5, r10} add r1, r1, r2 ADJ_ALIGN_DOUBLEWORD_D 3, r6, r7, r4, r5, r10 - pld [r1] + PLD ( pld [r1] ) RND_AVG32 r8, r9, r6, r7, r5, r10, r12 subs r3, r3, #1 stmia r0, {r8-r9} @@ -267,7 +276,7 @@ put_no_rnd_pixels8_x2_arm: @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h) @ block = word aligned, pixles = unaligned - pld [r1] + PLD ( pld [r1] ) stmfd sp!, {r4-r10,lr} @ R14 is also called LR adr r5, 5f ands r4, r1, #3 @@ -279,7 +288,7 @@ ldmia r1, {r4-r5, r10} add r1, r1, r2 ADJ_ALIGN_DOUBLEWORD_D 1, r6, r7, r4, r5, r10 - pld [r1] + PLD ( pld [r1] ) NO_RND_AVG32 r8, r9, r4, r5, r6, r7, r12 subs r3, r3, #1 stmia r0, {r8-r9} @@ -292,7 +301,7 @@ add r1, r1, r2 ADJ_ALIGN_DOUBLEWORD_D 1, r6, r7, r4, r5, r10 ADJ_ALIGN_DOUBLEWORD_D 2, r8, r9, r4, r5, r10 - pld [r1] + PLD ( pld [r1] ) NO_RND_AVG32 r4, r5, r6, r7, r8, r9, r12 subs r3, r3, #1 stmia r0, {r4-r5} @@ -305,7 +314,7 @@ add r1, r1, r2 ADJ_ALIGN_DOUBLEWORD_D 2, r6, r7, r4, r5, r10 ADJ_ALIGN_DOUBLEWORD_D 3, r8, r9, r4, r5, r10 - pld [r1] + PLD ( pld [r1] ) NO_RND_AVG32 r4, r5, r6, r7, r8, r9, r12 subs r3, r3, #1 stmia r0, {r4-r5} @@ -317,7 +326,7 @@ ldmia r1, {r4-r5, r10} add r1, r1, r2 ADJ_ALIGN_DOUBLEWORD_D 3, r6, r7, r4, r5, r10 - pld [r1] + PLD ( pld [r1] ) NO_RND_AVG32 r8, r9, r6, r7, r5, r10, r12 subs r3, r3, #1 stmia r0, {r8-r9} @@ -338,7 +347,7 @@ put_pixels8_y2_arm: @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h) @ block = word aligned, pixles = unaligned - pld [r1] + PLD ( pld [r1] ) stmfd sp!, {r4-r11,lr} @ R14 is also called LR adr r5, 5f ands r4, r1, #3 @@ -352,13 +361,13 @@ add r1, r1, r2 6: ldmia r1, {r6-r7} add r1, r1, r2 - pld [r1] + PLD ( pld [r1] ) RND_AVG32 r8, r9, r4, r5, r6, r7, r12 ldmia r1, {r4-r5} add r1, r1, r2 stmia r0, {r8-r9} add r0, r0, r2 - pld [r1] + PLD ( pld [r1] ) RND_AVG32 r8, r9, r6, r7, r4, r5, r12 subs r3, r3, #1 stmia r0, {r8-r9} @@ -369,18 +378,18 @@ 2: ldmia r1, {r4-r6} add r1, r1, r2 - pld [r1] + PLD ( pld [r1] ) ADJ_ALIGN_DOUBLEWORD 1, r4, r5, r6 6: ldmia r1, {r7-r9} add r1, r1, r2 - pld [r1] + PLD ( pld [r1] ) ADJ_ALIGN_DOUBLEWORD 1, r7, r8, r9 RND_AVG32 r10, r11, r4, r5, r7, r8, r12 stmia r0, {r10-r11} add r0, r0, r2 ldmia r1, {r4-r6} add r1, r1, r2 - pld [r1] + PLD ( pld [r1] ) ADJ_ALIGN_DOUBLEWORD 1, r4, r5, r6 subs r3, r3, #1 RND_AVG32 r10, r11, r7, r8, r4, r5, r12 @@ -392,18 +401,18 @@ 3: ldmia r1, {r4-r6} add r1, r1, r2 - pld [r1] + PLD ( pld [r1] ) ADJ_ALIGN_DOUBLEWORD 2, r4, r5, r6 6: ldmia r1, {r7-r9} add r1, r1, r2 - pld [r1] + PLD ( pld [r1] ) ADJ_ALIGN_DOUBLEWORD 2, r7, r8, r9 RND_AVG32 r10, r11, r4, r5, r7, r8, r12 stmia r0, {r10-r11} add r0, r0, r2 ldmia r1, {r4-r6} add r1, r1, r2 - pld [r1] + PLD ( pld [r1] ) ADJ_ALIGN_DOUBLEWORD 2, r4, r5, r6 subs r3, r3, #1 RND_AVG32 r10, r11, r7, r8, r4, r5, r12 @@ -415,18 +424,18 @@ 4: ldmia r1, {r4-r6} add r1, r1, r2 - pld [r1] + PLD ( pld [r1] ) ADJ_ALIGN_DOUBLEWORD 3, r4, r5, r6 6: ldmia r1, {r7-r9} add r1, r1, r2 - pld [r1] + PLD ( pld [r1] ) ADJ_ALIGN_DOUBLEWORD 3, r7, r8, r9 RND_AVG32 r10, r11, r4, r5, r7, r8, r12 stmia r0, {r10-r11} add r0, r0, r2 ldmia r1, {r4-r6} add r1, r1, r2 - pld [r1] + PLD ( pld [r1] ) ADJ_ALIGN_DOUBLEWORD 3, r4, r5, r6 subs r3, r3, #1 RND_AVG32 r10, r11, r7, r8, r4, r5, r12 @@ -447,7 +456,7 @@ put_no_rnd_pixels8_y2_arm: @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h) @ block = word aligned, pixles = unaligned - pld [r1] + PLD ( pld [r1] ) stmfd sp!, {r4-r11,lr} @ R14 is also called LR adr r5, 5f ands r4, r1, #3 @@ -461,13 +470,13 @@ add r1, r1, r2 6: ldmia r1, {r6-r7} add r1, r1, r2 - pld [r1] + PLD ( pld [r1] ) NO_RND_AVG32 r8, r9, r4, r5, r6, r7, r12 ldmia r1, {r4-r5} add r1, r1, r2 stmia r0, {r8-r9} add r0, r0, r2 - pld [r1] + PLD ( pld [r1] ) NO_RND_AVG32 r8, r9, r6, r7, r4, r5, r12 subs r3, r3, #1 stmia r0, {r8-r9} @@ -478,18 +487,18 @@ 2: ldmia r1, {r4-r6} add r1, r1, r2 - pld [r1] + PLD ( pld [r1] ) ADJ_ALIGN_DOUBLEWORD 1, r4, r5, r6 6: ldmia r1, {r7-r9} add r1, r1, r2 - pld [r1] + PLD ( pld [r1] ) ADJ_ALIGN_DOUBLEWORD 1, r7, r8, r9 NO_RND_AVG32 r10, r11, r4, r5, r7, r8, r12 stmia r0, {r10-r11} add r0, r0, r2 ldmia r1, {r4-r6} add r1, r1, r2 - pld [r1] + PLD ( pld [r1] ) ADJ_ALIGN_DOUBLEWORD 1, r4, r5, r6 subs r3, r3, #1 NO_RND_AVG32 r10, r11, r7, r8, r4, r5, r12 @@ -501,18 +510,18 @@ 3: ldmia r1, {r4-r6} add r1, r1, r2 - pld [r1] + PLD ( pld [r1] ) ADJ_ALIGN_DOUBLEWORD 2, r4, r5, r6 6: ldmia r1, {r7-r9} add r1, r1, r2 - pld [r1] + PLD ( pld [r1] ) ADJ_ALIGN_DOUBLEWORD 2, r7, r8, r9 NO_RND_AVG32 r10, r11, r4, r5, r7, r8, r12 stmia r0, {r10-r11} add r0, r0, r2 ldmia r1, {r4-r6} add r1, r1, r2 - pld [r1] + PLD ( pld [r1] ) ADJ_ALIGN_DOUBLEWORD 2, r4, r5, r6 subs r3, r3, #1 NO_RND_AVG32 r10, r11, r7, r8, r4, r5, r12 @@ -524,18 +533,18 @@ 4: ldmia r1, {r4-r6} add r1, r1, r2 - pld [r1] + PLD ( pld [r1] ) ADJ_ALIGN_DOUBLEWORD 3, r4, r5, r6 6: ldmia r1, {r7-r9} add r1, r1, r2 - pld [r1] + PLD ( pld [r1] ) ADJ_ALIGN_DOUBLEWORD 3, r7, r8, r9 NO_RND_AVG32 r10, r11, r4, r5, r7, r8, r12 stmia r0, {r10-r11} add r0, r0, r2 ldmia r1, {r4-r6} add r1, r1, r2 - pld [r1] + PLD ( pld [r1] ) ADJ_ALIGN_DOUBLEWORD 3, r4, r5, r6 subs r3, r3, #1 NO_RND_AVG32 r10, r11, r7, r8, r4, r5, r12 @@ -562,7 +571,7 @@ ldmia r1, {r8-r10} .endif add r1, r1, r2 - pld [r1] + PLD ( pld [r1] ) .if \align == 0 ADJ_ALIGN_DOUBLEWORD_D 1, r4, r5, r6, r7, r8 .elseif \align == 1 @@ -624,7 +633,7 @@ put_pixels8_xy2_arm: @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h) @ block = word aligned, pixles = unaligned - pld [r1] + PLD ( pld [r1] ) stmfd sp!, {r4-r11,lr} @ R14 is also called LR adrl r12, 5f ands r4, r1, #3 @@ -661,7 +670,7 @@ put_no_rnd_pixels8_xy2_arm: @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h) @ block = word aligned, pixles = unaligned - pld [r1] + PLD ( pld [r1] ) stmfd sp!, {r4-r11,lr} @ R14 is also called LR adrl r12, 5f ands r4, r1, #3 diff -Nur gst-ffmpeg-0.10.2/gst-libs/ext/ffmpeg/libavcodec/armv4l/dsputil_iwmmxt.c gst-ffmpeg-0.10.2.new/gst-libs/ext/ffmpeg/libavcodec/armv4l/dsputil_iwmmxt.c --- gst-ffmpeg-0.10.2/gst-libs/ext/ffmpeg/libavcodec/armv4l/dsputil_iwmmxt.c 2006-02-19 00:04:59.000000000 +0100 +++ gst-ffmpeg-0.10.2.new/gst-libs/ext/ffmpeg/libavcodec/armv4l/dsputil_iwmmxt.c 2007-05-01 12:23:40.000000000 +0200 @@ -2,18 +2,20 @@ * iWMMXt optimized DSP utils * Copyright (c) 2004 AGAWA Koji * - * This library is free software; you can redistribute it and/or + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. + * version 2.1 of the License, or (at your option) any later version. * - * This library is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ diff -Nur gst-ffmpeg-0.10.2/gst-libs/ext/ffmpeg/libavcodec/armv4l/dsputil_iwmmxt_rnd.h gst-ffmpeg-0.10.2.new/gst-libs/ext/ffmpeg/libavcodec/armv4l/dsputil_iwmmxt_rnd.h --- gst-ffmpeg-0.10.2/gst-libs/ext/ffmpeg/libavcodec/armv4l/dsputil_iwmmxt_rnd.h 2006-09-20 20:55:37.000000000 +0200 +++ gst-ffmpeg-0.10.2.new/gst-libs/ext/ffmpeg/libavcodec/armv4l/dsputil_iwmmxt_rnd.h 2007-05-01 12:23:40.000000000 +0200 @@ -2,18 +2,20 @@ * iWMMXt optimized DSP utils * copyright (c) 2004 AGAWA Koji * - * This library is free software; you can redistribute it and/or + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. + * version 2.1 of the License, or (at your option) any later version. * - * This library is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ diff -Nur gst-ffmpeg-0.10.2/gst-libs/ext/ffmpeg/libavcodec/armv4l/Makefile.am gst-ffmpeg-0.10.2.new/gst-libs/ext/ffmpeg/libavcodec/armv4l/Makefile.am --- gst-ffmpeg-0.10.2/gst-libs/ext/ffmpeg/libavcodec/armv4l/Makefile.am 2006-09-22 06:07:23.000000000 +0200 +++ gst-ffmpeg-0.10.2.new/gst-libs/ext/ffmpeg/libavcodec/armv4l/Makefile.am 2007-05-01 12:23:40.000000000 +0200 @@ -7,9 +7,14 @@ iwmmxt_libs = libiwmmxt.la endif +if HAVE_ARMV5TE +armv5te_libs = libarmv5te.la +endif + noinst_LTLIBRARIES = \ libarmv4l.la \ - $(iwmmxt_libs) + $(iwmmxt_libs) \ + $(armv5te_libs) libarmv4l_la_SOURCES = \ jrevdct_arm.S \ @@ -18,6 +23,9 @@ dsputil_arm.c \ mpegvideo_arm.c +libarmv5te_la_SOURCES = \ + simple_idct_armv5te.S + libiwmmxt_la_SOURCES = \ dsputil_iwmmxt.c \ mpegvideo_iwmmxt.c diff -Nur gst-ffmpeg-0.10.2/gst-libs/ext/ffmpeg/libavcodec/armv4l/mathops.h gst-ffmpeg-0.10.2.new/gst-libs/ext/ffmpeg/libavcodec/armv4l/mathops.h --- gst-ffmpeg-0.10.2/gst-libs/ext/ffmpeg/libavcodec/armv4l/mathops.h 2006-09-22 06:07:23.000000000 +0200 +++ gst-ffmpeg-0.10.2.new/gst-libs/ext/ffmpeg/libavcodec/armv4l/mathops.h 2007-05-01 12:23:40.000000000 +0200 @@ -2,18 +2,20 @@ * simple math operations * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al * - * This library is free software; you can redistribute it and/or + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. + * version 2.1 of the License, or (at your option) any later version. * - * This library is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -39,9 +41,9 @@ # define MAC16(rt, ra, rb) \ asm ("smlabb %0, %2, %3, %0" : "=r" (rt) : "0" (rt), "r" (ra), "r" (rb)); /* signed 16x16 -> 32 multiply */ -# define MUL16(ra, rb) \ - ({ int __rt;\ - asm ("smulbb %0, %1, %2" : "=r" (__rt) : "r" (ra), "r" (rb)); +# define MUL16(ra, rb) \ + ({ int __rt; \ + asm ("smulbb %0, %1, %2" : "=r" (__rt) : "r" (ra), "r" (rb)); \ __rt; }) #endif diff -Nur gst-ffmpeg-0.10.2/gst-libs/ext/ffmpeg/libavcodec/armv4l/mpegvideo_arm.c gst-ffmpeg-0.10.2.new/gst-libs/ext/ffmpeg/libavcodec/armv4l/mpegvideo_arm.c --- gst-ffmpeg-0.10.2/gst-libs/ext/ffmpeg/libavcodec/armv4l/mpegvideo_arm.c 2006-02-19 00:04:59.000000000 +0100 +++ gst-ffmpeg-0.10.2.new/gst-libs/ext/ffmpeg/libavcodec/armv4l/mpegvideo_arm.c 2007-05-01 12:23:40.000000000 +0200 @@ -1,25 +1,27 @@ /* * Copyright (c) 2002 Michael Niedermayer * - * This library is free software; you can redistribute it and/or + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. + * version 2.1 of the License, or (at your option) any later version. * - * This library is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * */ -#include "dsputil.h" -#include "mpegvideo.h" -#include "avcodec.h" +#include "../dsputil.h" +#include "../mpegvideo.h" +#include "../avcodec.h" extern void MPV_common_init_iwmmxt(MpegEncContext *s); diff -Nur gst-ffmpeg-0.10.2/gst-libs/ext/ffmpeg/libavcodec/armv4l/mpegvideo_iwmmxt.c gst-ffmpeg-0.10.2.new/gst-libs/ext/ffmpeg/libavcodec/armv4l/mpegvideo_iwmmxt.c --- gst-ffmpeg-0.10.2/gst-libs/ext/ffmpeg/libavcodec/armv4l/mpegvideo_iwmmxt.c 2006-09-20 20:55:37.000000000 +0200 +++ gst-ffmpeg-0.10.2.new/gst-libs/ext/ffmpeg/libavcodec/armv4l/mpegvideo_iwmmxt.c 2007-05-01 12:23:40.000000000 +0200 @@ -1,18 +1,20 @@ /* * copyright (c) 2004 AGAWA Koji * - * This library is free software; you can redistribute it and/or + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. + * version 2.1 of the License, or (at your option) any later version. * - * This library is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ diff -Nur gst-ffmpeg-0.10.2/gst-libs/ext/ffmpeg/libavcodec/armv4l/simple_idct_arm.S gst-ffmpeg-0.10.2.new/gst-libs/ext/ffmpeg/libavcodec/armv4l/simple_idct_arm.S --- gst-ffmpeg-0.10.2/gst-libs/ext/ffmpeg/libavcodec/armv4l/simple_idct_arm.S 2006-09-20 20:55:37.000000000 +0200 +++ gst-ffmpeg-0.10.2.new/gst-libs/ext/ffmpeg/libavcodec/armv4l/simple_idct_arm.S 2007-05-01 12:23:40.000000000 +0200 @@ -5,18 +5,20 @@ * * Author: Frederic Boulay <dilb@handhelds.org> * - * This library is free software; you can redistribute it and/or + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. + * version 2.1 of the License, or (at your option) any later version. * - * This library is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * * The function defined in this file, is derived from the simple_idct function diff -Nur gst-ffmpeg-0.10.2/gst-libs/ext/ffmpeg/libavcodec/armv4l/simple_idct_armv5te.S gst-ffmpeg-0.10.2.new/gst-libs/ext/ffmpeg/libavcodec/armv4l/simple_idct_armv5te.S --- gst-ffmpeg-0.10.2/gst-libs/ext/ffmpeg/libavcodec/armv4l/simple_idct_armv5te.S 1970-01-01 01:00:00.000000000 +0100 +++ gst-ffmpeg-0.10.2.new/gst-libs/ext/ffmpeg/libavcodec/armv4l/simple_idct_armv5te.S 2007-05-01 12:23:40.000000000 +0200 @@ -0,0 +1,718 @@ +/* + * Simple IDCT + * + * Copyright (c) 2001 Michael Niedermayer <michaelni@gmx.at> + * Copyright (c) 2006 Mans Rullgard <mru@inprovide.com> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#define W1 22725 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */ +#define W2 21407 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */ +#define W3 19266 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */ +#define W4 16383 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */ +#define W5 12873 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */ +#define W6 8867 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */ +#define W7 4520 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */ +#define ROW_SHIFT 11 +#define COL_SHIFT 20 + +#define W13 (W1 | (W3 << 16)) +#define W26 (W2 | (W6 << 16)) +#define W57 (W5 | (W7 << 16)) + + .text + .align +w13: .long W13 +w26: .long W26 +w57: .long W57 + + .align + .func idct_row_armv5te +idct_row_armv5te: + str lr, [sp, #-4]! + + ldrd v1, [a1, #8] + ldrd a3, [a1] /* a3 = row[1:0], a4 = row[3:2] */ + orrs v1, v1, v2 + cmpeq v1, a4 + cmpeq v1, a3, lsr #16 + beq row_dc_only + + mov v1, #(1<<(ROW_SHIFT-1)) + mov ip, #16384 + sub ip, ip, #1 /* ip = W4 */ + smlabb v1, ip, a3, v1 /* v1 = W4*row[0]+(1<<(RS-1)) */ + ldr ip, [pc, #(w26-.-8)] /* ip = W2 | (W6 << 16) */ + smultb a2, ip, a4 + smulbb lr, ip, a4 + add v2, v1, a2 + sub v3, v1, a2 + sub v4, v1, lr + add v1, v1, lr + + ldr ip, [pc, #(w13-.-8)] /* ip = W1 | (W3 << 16) */ + ldr lr, [pc, #(w57-.-8)] /* lr = W5 | (W7 << 16) */ + smulbt v5, ip, a3 + smultt v6, lr, a4 + smlatt v5, ip, a4, v5 + smultt a2, ip, a3 + smulbt v7, lr, a3 + sub v6, v6, a2 + smulbt a2, ip, a4 + smultt fp, lr, a3 + sub v7, v7, a2 + smulbt a2, lr, a4 + ldrd a3, [a1, #8] /* a3=row[5:4] a4=row[7:6] */ + sub fp, fp, a2 + + orrs a2, a3, a4 + beq 1f + + smlabt v5, lr, a3, v5 + smlabt v6, ip, a3, v6 + smlatt v5, lr, a4, v5 + smlabt v6, lr, a4, v6 + smlatt v7, lr, a3, v7 + smlatt fp, ip, a3, fp + smulbt a2, ip, a4 + smlatt v7, ip, a4, v7 + sub fp, fp, a2 + + ldr ip, [pc, #(w26-.-8)] /* ip = W2 | (W6 << 16) */ + mov a2, #16384 + sub a2, a2, #1 /* a2 = W4 */ + smulbb a2, a2, a3 /* a2 = W4*row[4] */ + smultb lr, ip, a4 /* lr = W6*row[6] */ + add v1, v1, a2 /* v1 += W4*row[4] */ + add v1, v1, lr /* v1 += W6*row[6] */ + add v4, v4, a2 /* v4 += W4*row[4] */ + sub v4, v4, lr /* v4 -= W6*row[6] */ + smulbb lr, ip, a4 /* lr = W2*row[6] */ + sub v2, v2, a2 /* v2 -= W4*row[4] */ + sub v2, v2, lr /* v2 -= W2*row[6] */ + sub v3, v3, a2 /* v3 -= W4*row[4] */ + add v3, v3, lr /* v3 += W2*row[6] */ + +1: add a2, v1, v5 + mov a3, a2, lsr #11 + bic a3, a3, #0x1f0000 + sub a2, v2, v6 + mov a2, a2, lsr #11 + add a3, a3, a2, lsl #16 + add a2, v3, v7 + mov a4, a2, lsr #11 + bic a4, a4, #0x1f0000 + add a2, v4, fp + mov a2, a2, lsr #11 + add a4, a4, a2, lsl #16 + strd a3, [a1] + + sub a2, v4, fp + mov a3, a2, lsr #11 + bic a3, a3, #0x1f0000 + sub a2, v3, v7 + mov a2, a2, lsr #11 + add a3, a3, a2, lsl #16 + add a2, v2, v6 + mov a4, a2, lsr #11 + bic a4, a4, #0x1f0000 + sub a2, v1, v5 + mov a2, a2, lsr #11 + add a4, a4, a2, lsl #16 + strd a3, [a1, #8] + + ldr pc, [sp], #4 + +row_dc_only: + orr a3, a3, a3, lsl #16 + bic a3, a3, #0xe000 + mov a3, a3, lsl #3 + mov a4, a3 + strd a3, [a1] + strd a3, [a1, #8] + + ldr pc, [sp], #4 + .endfunc + + .macro idct_col + ldr a4, [a1] /* a4 = col[1:0] */ + mov ip, #16384 + sub ip, ip, #1 /* ip = W4 */ +#if 0 + mov v1, #(1<<(COL_SHIFT-1)) + smlabt v2, ip, a4, v1 /* v2 = W4*col[1] + (1<<(COL_SHIFT-1)) */ + smlabb v1, ip, a4, v1 /* v1 = W4*col[0] + (1<<(COL_SHIFT-1)) */ + ldr a4, [a1, #(16*4)] +#else + mov v1, #((1<<(COL_SHIFT-1))/W4) /* this matches the C version */ + add v2, v1, a4, asr #16 + rsb v2, v2, v2, lsl #14 + mov a4, a4, lsl #16 + add v1, v1, a4, asr #16 + ldr a4, [a1, #(16*4)] + rsb v1, v1, v1, lsl #14 +#endif + + smulbb lr, ip, a4 + smulbt a3, ip, a4 + sub v3, v1, lr + sub v5, v1, lr + add v7, v1, lr + add v1, v1, lr + sub v4, v2, a3 + sub v6, v2, a3 + add fp, v2, a3 + ldr ip, [pc, #(w26-.-8)] + ldr a4, [a1, #(16*2)] + add v2, v2, a3 + + smulbb lr, ip, a4 + smultb a3, ip, a4 + add v1, v1, lr + sub v7, v7, lr + add v3, v3, a3 + sub v5, v5, a3 + smulbt lr, ip, a4 + smultt a3, ip, a4 + add v2, v2, lr + sub fp, fp, lr + add v4, v4, a3 + ldr a4, [a1, #(16*6)] + sub v6, v6, a3 + + smultb lr, ip, a4 + smulbb a3, ip, a4 + add v1, v1, lr + sub v7, v7, lr + sub v3, v3, a3 + add v5, v5, a3 + smultt lr, ip, a4 + smulbt a3, ip, a4 + add v2, v2, lr + sub fp, fp, lr + sub v4, v4, a3 + add v6, v6, a3 + + stmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp} + + ldr ip, [pc, #(w13-.-8)] + ldr a4, [a1, #(16*1)] + ldr lr, [pc, #(w57-.-8)] + smulbb v1, ip, a4 + smultb v3, ip, a4 + smulbb v5, lr, a4 + smultb v7, lr, a4 + smulbt v2, ip, a4 + smultt v4, ip, a4 + smulbt v6, lr, a4 + smultt fp, lr, a4 + rsb v4, v4, #0 + ldr a4, [a1, #(16*3)] + rsb v3, v3, #0 + + smlatb v1, ip, a4, v1 + smlatb v3, lr, a4, v3 + smulbb a3, ip, a4 + smulbb a2, lr, a4 + sub v5, v5, a3 + sub v7, v7, a2 + smlatt v2, ip, a4, v2 + smlatt v4, lr, a4, v4 + smulbt a3, ip, a4 + smulbt a2, lr, a4 + sub v6, v6, a3 + ldr a4, [a1, #(16*5)] + sub fp, fp, a2 + + smlabb v1, lr, a4, v1 + smlabb v3, ip, a4, v3 + smlatb v5, lr, a4, v5 + smlatb v7, ip, a4, v7 + smlabt v2, lr, a4, v2 + smlabt v4, ip, a4, v4 + smlatt v6, lr, a4, v6 + ldr a3, [a1, #(16*7)] + smlatt fp, ip, a4, fp + + smlatb v1, lr, a3, v1 + smlabb v3, lr, a3, v3 + smlatb v5, ip, a3, v5 + smulbb a4, ip, a3 + smlatt v2, lr, a3, v2 + sub v7, v7, a4 + smlabt v4, lr, a3, v4 + smulbt a4, ip, a3 + smlatt v6, ip, a3, v6 + sub fp, fp, a4 + .endm + + .align + .func idct_col_armv5te +idct_col_armv5te: + str lr, [sp, #-4]! + + idct_col + + ldmfd sp!, {a3, a4} + adds a2, a3, v1 + mov a2, a2, lsr #20 + orrmi a2, a2, #0xf000 + add ip, a4, v2 + mov ip, ip, asr #20 + orr a2, a2, ip, lsl #16 + str a2, [a1] + subs a3, a3, v1 + mov a2, a3, lsr #20 + orrmi a2, a2, #0xf000 + sub a4, a4, v2 + mov a4, a4, asr #20 + orr a2, a2, a4, lsl #16 + ldmfd sp!, {a3, a4} + str a2, [a1, #(16*7)] + + subs a2, a3, v3 + mov a2, a2, lsr #20 + orrmi a2, a2, #0xf000 + sub ip, a4, v4 + mov ip, ip, asr #20 + orr a2, a2, ip, lsl #16 + str a2, [a1, #(16*1)] + adds a3, a3, v3 + mov a2, a3, lsr #20 + orrmi a2, a2, #0xf000 + add a4, a4, v4 + mov a4, a4, asr #20 + orr a2, a2, a4, lsl #16 + ldmfd sp!, {a3, a4} + str a2, [a1, #(16*6)] + + adds a2, a3, v5 + mov a2, a2, lsr #20 + orrmi a2, a2, #0xf000 + add ip, a4, v6 + mov ip, ip, asr #20 + orr a2, a2, ip, lsl #16 + str a2, [a1, #(16*2)] + subs a3, a3, v5 + mov a2, a3, lsr #20 + orrmi a2, a2, #0xf000 + sub a4, a4, v6 + mov a4, a4, asr #20 + orr a2, a2, a4, lsl #16 + ldmfd sp!, {a3, a4} + str a2, [a1, #(16*5)] + + adds a2, a3, v7 + mov a2, a2, lsr #20 + orrmi a2, a2, #0xf000 + add ip, a4, fp + mov ip, ip, asr #20 + orr a2, a2, ip, lsl #16 + str a2, [a1, #(16*3)] + subs a3, a3, v7 + mov a2, a3, lsr #20 + orrmi a2, a2, #0xf000 + sub a4, a4, fp + mov a4, a4, asr #20 + orr a2, a2, a4, lsl #16 + str a2, [a1, #(16*4)] + + ldr pc, [sp], #4 + .endfunc + + .align + .func idct_col_put_armv5te +idct_col_put_armv5te: + str lr, [sp, #-4]! + + idct_col + + ldmfd sp!, {a3, a4} + ldr lr, [sp, #32] + add a2, a3, v1 + movs a2, a2, asr #20 + movmi a2, #0 + cmp a2, #255 + movgt a2, #255 + add ip, a4, v2 + movs ip, ip, asr #20 + movmi ip, #0 + cmp ip, #255 + movgt ip, #255 + orr a2, a2, ip, lsl #8 + sub a3, a3, v1 + movs a3, a3, asr #20 + movmi a3, #0 + cmp a3, #255 + movgt a3, #255 + sub a4, a4, v2 + movs a4, a4, asr #20 + movmi a4, #0 + cmp a4, #255 + ldr v1, [sp, #28] + movgt a4, #255 + strh a2, [v1] + add a2, v1, #2 + str a2, [sp, #28] + orr a2, a3, a4, lsl #8 + rsb v2, lr, lr, lsl #3 + ldmfd sp!, {a3, a4} + strh a2, [v2, v1]! + + sub a2, a3, v3 + movs a2, a2, asr #20 + movmi a2, #0 + cmp a2, #255 + movgt a2, #255 + sub ip, a4, v4 + movs ip, ip, asr #20 + movmi ip, #0 + cmp ip, #255 + movgt ip, #255 + orr a2, a2, ip, lsl #8 + strh a2, [v1, lr]! + add a3, a3, v3 + movs a2, a3, asr #20 + movmi a2, #0 + cmp a2, #255 + movgt a2, #255 + add a4, a4, v4 + movs a4, a4, asr #20 + movmi a4, #0 + cmp a4, #255 + movgt a4, #255 + orr a2, a2, a4, lsl #8 + ldmfd sp!, {a3, a4} + strh a2, [v2, -lr]! + + add a2, a3, v5 + movs a2, a2, asr #20 + movmi a2, #0 + cmp a2, #255 + movgt a2, #255 + add ip, a4, v6 + movs ip, ip, asr #20 + movmi ip, #0 + cmp ip, #255 + movgt ip, #255 + orr a2, a2, ip, lsl #8 + strh a2, [v1, lr]! + sub a3, a3, v5 + movs a2, a3, asr #20 + movmi a2, #0 + cmp a2, #255 + movgt a2, #255 + sub a4, a4, v6 + movs a4, a4, asr #20 + movmi a4, #0 + cmp a4, #255 + movgt a4, #255 + orr a2, a2, a4, lsl #8 + ldmfd sp!, {a3, a4} + strh a2, [v2, -lr]! + + add a2, a3, v7 + movs a2, a2, asr #20 + movmi a2, #0 + cmp a2, #255 + movgt a2, #255 + add ip, a4, fp + movs ip, ip, asr #20 + movmi ip, #0 + cmp ip, #255 + movgt ip, #255 + orr a2, a2, ip, lsl #8 + strh a2, [v1, lr] + sub a3, a3, v7 + movs a2, a3, asr #20 + movmi a2, #0 + cmp a2, #255 + movgt a2, #255 + sub a4, a4, fp + movs a4, a4, asr #20 + movmi a4, #0 + cmp a4, #255 + movgt a4, #255 + orr a2, a2, a4, lsl #8 + strh a2, [v2, -lr] + + ldr pc, [sp], #4 + .endfunc + + .align + .func idct_col_add_armv5te +idct_col_add_armv5te: + str lr, [sp, #-4]! + + idct_col + + ldr lr, [sp, #36] + + ldmfd sp!, {a3, a4} + ldrh ip, [lr] + add a2, a3, v1 + mov a2, a2, asr #20 + sub a3, a3, v1 + and v1, ip, #255 + adds a2, a2, v1 + movmi a2, #0 + cmp a2, #255 + movgt a2, #255 + add v1, a4, v2 + mov v1, v1, asr #20 + adds v1, v1, ip, lsr #8 + movmi v1, #0 + cmp v1, #255 + movgt v1, #255 + orr a2, a2, v1, lsl #8 + ldr v1, [sp, #32] + sub a4, a4, v2 + rsb v2, v1, v1, lsl #3 + ldrh ip, [v2, lr]! + strh a2, [lr] + mov a3, a3, asr #20 + and a2, ip, #255 + adds a3, a3, a2 + movmi a3, #0 + cmp a3, #255 + movgt a3, #255 + mov a4, a4, asr #20 + adds a4, a4, ip, lsr #8 + movmi a4, #0 + cmp a4, #255 + movgt a4, #255 + add a2, lr, #2 + str a2, [sp, #28] + orr a2, a3, a4, lsl #8 + strh a2, [v2] + + ldmfd sp!, {a3, a4} + ldrh ip, [lr, v1]! + sub a2, a3, v3 + mov a2, a2, asr #20 + add a3, a3, v3 + and v3, ip, #255 + adds a2, a2, v3 + movmi a2, #0 + cmp a2, #255 + movgt a2, #255 + sub v3, a4, v4 + mov v3, v3, asr #20 + adds v3, v3, ip, lsr #8 + movmi v3, #0 + cmp v3, #255 + movgt v3, #255 + orr a2, a2, v3, lsl #8 + add a4, a4, v4 + ldrh ip, [v2, -v1]! + strh a2, [lr] + mov a3, a3, asr #20 + and a2, ip, #255 + adds a3, a3, a2 + movmi a3, #0 + cmp a3, #255 + movgt a3, #255 + mov a4, a4, asr #20 + adds a4, a4, ip, lsr #8 + movmi a4, #0 + cmp a4, #255 + movgt a4, #255 + orr a2, a3, a4, lsl #8 + strh a2, [v2] + + ldmfd sp!, {a3, a4} + ldrh ip, [lr, v1]! + add a2, a3, v5 + mov a2, a2, asr #20 + sub a3, a3, v5 + and v3, ip, #255 + adds a2, a2, v3 + movmi a2, #0 + cmp a2, #255 + movgt a2, #255 + add v3, a4, v6 + mov v3, v3, asr #20 + adds v3, v3, ip, lsr #8 + movmi v3, #0 + cmp v3, #255 + movgt v3, #255 + orr a2, a2, v3, lsl #8 + sub a4, a4, v6 + ldrh ip, [v2, -v1]! + strh a2, [lr] + mov a3, a3, asr #20 + and a2, ip, #255 + adds a3, a3, a2 + movmi a3, #0 + cmp a3, #255 + movgt a3, #255 + mov a4, a4, asr #20 + adds a4, a4, ip, lsr #8 + movmi a4, #0 + cmp a4, #255 + movgt a4, #255 + orr a2, a3, a4, lsl #8 + strh a2, [v2] + + ldmfd sp!, {a3, a4} + ldrh ip, [lr, v1]! + add a2, a3, v7 + mov a2, a2, asr #20 + sub a3, a3, v7 + and v3, ip, #255 + adds a2, a2, v3 + movmi a2, #0 + cmp a2, #255 + movgt a2, #255 + add v3, a4, fp + mov v3, v3, asr #20 + adds v3, v3, ip, lsr #8 + movmi v3, #0 + cmp v3, #255 + movgt v3, #255 + orr a2, a2, v3, lsl #8 + sub a4, a4, fp + ldrh ip, [v2, -v1]! + strh a2, [lr] + mov a3, a3, asr #20 + and a2, ip, #255 + adds a3, a3, a2 + movmi a3, #0 + cmp a3, #255 + movgt a3, #255 + mov a4, a4, asr #20 + adds a4, a4, ip, lsr #8 + movmi a4, #0 + cmp a4, #255 + movgt a4, #255 + orr a2, a3, a4, lsl #8 + strh a2, [v2] + + ldr pc, [sp], #4 + .endfunc + + .align + .global simple_idct_armv5te + .func simple_idct_armv5te +simple_idct_armv5te: + stmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, lr} + + bl idct_row_armv5te + add a1, a1, #16 + bl idct_row_armv5te + add a1, a1, #16 + bl idct_row_armv5te + add a1, a1, #16 + bl idct_row_armv5te + add a1, a1, #16 + bl idct_row_armv5te + add a1, a1, #16 + bl idct_row_armv5te + add a1, a1, #16 + bl idct_row_armv5te + add a1, a1, #16 + bl idct_row_armv5te + + sub a1, a1, #(16*7) + + bl idct_col_armv5te + add a1, a1, #4 + bl idct_col_armv5te + add a1, a1, #4 + bl idct_col_armv5te + add a1, a1, #4 + bl idct_col_armv5te + + ldmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, pc} + .endfunc + + .align + .global simple_idct_add_armv5te + .func simple_idct_add_armv5te +simple_idct_add_armv5te: + stmfd sp!, {a1, a2, v1, v2, v3, v4, v5, v6, v7, fp, lr} + + mov a1, a3 + + bl idct_row_armv5te + add a1, a1, #16 + bl idct_row_armv5te + add a1, a1, #16 + bl idct_row_armv5te + add a1, a1, #16 + bl idct_row_armv5te + add a1, a1, #16 + bl idct_row_armv5te + add a1, a1, #16 + bl idct_row_armv5te + add a1, a1, #16 + bl idct_row_armv5te + add a1, a1, #16 + bl idct_row_armv5te + + sub a1, a1, #(16*7) + + bl idct_col_add_armv5te + add a1, a1, #4 + bl idct_col_add_armv5te + add a1, a1, #4 + bl idct_col_add_armv5te + add a1, a1, #4 + bl idct_col_add_armv5te + + add sp, sp, #8 + ldmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, pc} + .endfunc + + .align + .global simple_idct_put_armv5te + .func simple_idct_put_armv5te +simple_idct_put_armv5te: + stmfd sp!, {a1, a2, v1, v2, v3, v4, v5, v6, v7, fp, lr} + + mov a1, a3 + + bl idct_row_armv5te + add a1, a1, #16 + bl idct_row_armv5te + add a1, a1, #16 + bl idct_row_armv5te + add a1, a1, #16 + bl idct_row_armv5te + add a1, a1, #16 + bl idct_row_armv5te + add a1, a1, #16 + bl idct_row_armv5te + add a1, a1, #16 + bl idct_row_armv5te + add a1, a1, #16 + bl idct_row_armv5te + + sub a1, a1, #(16*7) + + bl idct_col_put_armv5te + add a1, a1, #4 + bl idct_col_put_armv5te + add a1, a1, #4 + bl idct_col_put_armv5te + add a1, a1, #4 + bl idct_col_put_armv5te + + add sp, sp, #8 + ldmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, pc} + .endfunc diff -Nur gst-ffmpeg-0.10.2/gst-libs/ext/ffmpeg/libavcodec/avcodec.h gst-ffmpeg-0.10.2.new/gst-libs/ext/ffmpeg/libavcodec/avcodec.h --- gst-ffmpeg-0.10.2/gst-libs/ext/ffmpeg/libavcodec/avcodec.h 2006-09-20 20:55:36.000000000 +0200 +++ gst-ffmpeg-0.10.2.new/gst-libs/ext/ffmpeg/libavcodec/avcodec.h 2007-05-01 12:23:40.000000000 +0200 @@ -1217,6 +1217,7 @@ #define FF_IDCT_IPP 13 #define FF_IDCT_XVIDMMX 14 #define FF_IDCT_CAVS 15 +#define FF_IDCT_SIMPLEARMV5TE 16 /** * slice count. diff -Nur gst-ffmpeg-0.10.2/gst-libs/ext/ffmpeg/libavcodec/Makefile.am gst-ffmpeg-0.10.2.new/gst-libs/ext/ffmpeg/libavcodec/Makefile.am --- gst-ffmpeg-0.10.2/gst-libs/ext/ffmpeg/libavcodec/Makefile.am 2006-09-22 06:07:23.000000000 +0200 +++ gst-ffmpeg-0.10.2.new/gst-libs/ext/ffmpeg/libavcodec/Makefile.am 2007-05-01 12:23:40.000000000 +0200 @@ -19,7 +19,10 @@ if HAVE_IWMMXT iwmmxt_libs = armv4l/libiwmmxt.la endif -armv4l_libs = armv4l/libarmv4l.la $(iwmmxt_libs) +if HAVE_ARMV5TE +armv5te_libs = armv4l/libarmv5te.la +endif +armv4l_libs = armv4l/libarmv4l.la $(iwmmxt_libs) $(armv5te_libs) armv4l_dirs = armv4l endif