summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--recipes/linux/files/0001-implement-TIF_RESTORE_SIGMASK-support-and-enable-the.patch429
-rw-r--r--recipes/linux/linux-omap-2.6.29/0001-implement-TIF_RESTORE_SIGMASK-support-and-enable-the.patch287
-rw-r--r--recipes/linux/linux-omap-pm-2.6.29/0001-implement-TIF_RESTORE_SIGMASK-support-and-enable-the.patch287
-rw-r--r--recipes/xorg-lib/pixman/nearest-neighbour.patch1040
-rw-r--r--recipes/xorg-lib/pixman/over-8888-0565.patch296
-rw-r--r--recipes/xorg-lib/pixman/pixman-28986.patch32
-rw-r--r--recipes/xorg-lib/pixman/remove-broken.patch826
-rw-r--r--recipes/xorg-lib/pixman_git.bb10
8 files changed, 2630 insertions, 577 deletions
diff --git a/recipes/linux/files/0001-implement-TIF_RESTORE_SIGMASK-support-and-enable-the.patch b/recipes/linux/files/0001-implement-TIF_RESTORE_SIGMASK-support-and-enable-the.patch
new file mode 100644
index 0000000000..1dee314700
--- /dev/null
+++ b/recipes/linux/files/0001-implement-TIF_RESTORE_SIGMASK-support-and-enable-the.patch
@@ -0,0 +1,429 @@
+Path: news.gmane.org!not-for-mail
+From: Mikael Pettersson <mikpe@it.uu.se>
+Newsgroups: gmane.linux.ports.arm.kernel
+Subject: [PATCH][v3] ARM support for
+ TIF_RESTORE_SIGMASK/pselect6/ppoll/epoll_pwait
+Date: Sat, 15 Aug 2009 13:09:28 +0200 (MEST)
+Lines: 362
+Approved: news@gmane.org
+Message-ID: <200908151109.n7FB9Sbs000150@pilspetsen.it.uu.se>
+NNTP-Posting-Host: lo.gmane.org
+Mime-Version: 1.0
+Content-Type: text/plain; charset="us-ascii"
+Content-Transfer-Encoding: 7bit
+X-Trace: ger.gmane.org 1250334940 22899 80.91.229.12 (15 Aug 2009 11:15:40 GMT)
+X-Complaints-To: usenet@ger.gmane.org
+NNTP-Posting-Date: Sat, 15 Aug 2009 11:15:40 +0000 (UTC)
+To: linux-arm-kernel@lists.arm.linux.org.uk
+Original-X-From: linux-arm-kernel-bounces+linux-arm-kernel=m.gmane.org@lists.arm.linux.org.uk Sat Aug 15 13:15:33 2009
+Return-path: <linux-arm-kernel-bounces+linux-arm-kernel=m.gmane.org@lists.arm.linux.org.uk>
+Envelope-to: linux-arm-kernel@m.gmane.org
+Original-Received: from zeniv.linux.org.uk ([195.92.253.2])
+ by lo.gmane.org with esmtp (Exim 4.50)
+ id 1McHER-0000U2-PG
+ for linux-arm-kernel@m.gmane.org; Sat, 15 Aug 2009 13:15:31 +0200
+Original-Received: from [2002:4e20:1eda:1:201:3dff:fe00:156] (helo=lists.arm.linux.org.uk)
+ by ZenIV.linux.org.uk with esmtpsa (Exim 4.69 #1 (Red Hat Linux))
+ id 1McH9u-0005LV-3W; Sat, 15 Aug 2009 11:10:57 +0000
+DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed;
+ d=arm.linux.org.uk; s=lists; h=Date:Message-Id:From:To:Subject:
+ List-Id:List-Unsubscribe:List-Archive:List-Post:List-Help:
+ List-Subscribe:MIME-Version:Content-Type:
+ Content-Transfer-Encoding; bh=RVq9hPAexlUcptd3h6j5nomOyoEIjcDWAP
+ VeJtlhsa4=; b=HAoqRMC9svNzKlR0f938E0cRlLoJZvG2aSun171woWZAClu/Ud
+ IYN0iD2vm4eF+kYQkTYD6PGFCMbNnugmZe6gCpS0QhijoKyOzRBdykQHJuOLQtLE
+ q9KJzp24cQ/vwsV2+O1OqcmIFboIg2L5JQaUuv0djS7OkdspH0ysmi1lg=
+Original-Received: from [::1] (helo=lists.arm.linux.org.uk)
+ by lists.arm.linux.org.uk with esmtp (Exim 4.69)
+ (envelope-from <linux-arm-kernel-bounces@lists.arm.linux.org.uk>)
+ id 1McH9o-0002us-34; Sat, 15 Aug 2009 12:10:44 +0100
+Original-Received: from aun.it.uu.se ([130.238.12.36] ident=root)
+ by lists.arm.linux.org.uk with esmtps (TLSv1:AES256-SHA:256)
+ (Exim 4.69) (envelope-from <mikpe@user.it.uu.se>) id 1McH8h-0002uj-Ck
+ for linux-arm-kernel@lists.arm.linux.org.uk;
+ Sat, 15 Aug 2009 12:09:35 +0100
+Original-Received: from pilspetsen.it.uu.se (daemon@pilspetsen.it.uu.se [130.238.18.39])
+ by aun.it.uu.se (8.13.6/8.13.6) with ESMTP id n7FB9S04005839
+ for <linux-arm-kernel@lists.arm.linux.org.uk>;
+ Sat, 15 Aug 2009 13:09:28 +0200 (MEST)
+Original-Received: (from mikpe@localhost)
+ by pilspetsen.it.uu.se (8.13.8+Sun/8.13.7) id n7FB9Sbs000150
+ for linux-arm-kernel@lists.arm.linux.org.uk;
+ Sat, 15 Aug 2009 13:09:28 +0200 (MEST)
+X-BeenThere: linux-arm-kernel@lists.arm.linux.org.uk
+X-Mailman-Version: 2.1.9
+Precedence: list
+List-Id: ARM Linux kernel discussions <linux-arm-kernel.lists.arm.linux.org.uk>
+List-Unsubscribe: <http://lists.arm.linux.org.uk/mailman/listinfo/linux-arm-kernel>,
+ <mailto:linux-arm-kernel-request@lists.arm.linux.org.uk?subject=unsubscribe>
+List-Archive: <http://lists.arm.linux.org.uk/lurker/list/linux-arm-kernel.html>
+List-Post: <mailto:linux-arm-kernel@lists.arm.linux.org.uk>
+List-Help: <mailto:linux-arm-kernel-request@lists.arm.linux.org.uk?subject=help>
+List-Subscribe: <http://lists.arm.linux.org.uk/mailman/listinfo/linux-arm-kernel>,
+ <mailto:linux-arm-kernel-request@lists.arm.linux.org.uk?subject=subscribe>
+Errors-To: linux-arm-kernel-bounces+linux-arm-kernel=m.gmane.org@lists.arm.linux.org.uk
+Xref: news.gmane.org gmane.linux.ports.arm.kernel:64340
+Archived-At: <http://permalink.gmane.org/gmane.linux.ports.arm.kernel/64340>
+
+This patch adds support for TIF_RESTORE_SIGMASK to ARM's signal handling,
+which allows to hook up the pselect6, ppoll, and epoll_pwait syscalls on ARM.
+
+Tested here with eabi userspace and a test program with a deliberate race
+between a child's exit and the parent's sigprocmask/select sequence.
+Using sys_pselect6() instead of sigprocmask/select reliably prevents the race.
+
+The other arch's support for TIF_RESTORE_SIGMASK has evolved over time:
+
+In 2.6.16:
+- add TIF_RESTORE_SIGMASK which parallels TIF_SIGPENDING
+- test both when checking for pending signal [bad, changed later]
+- reimplement sys_sigsuspend() to use current->saved_sigmask,
+ TIF_RESTORE_SIGMASK [changed later], and -ERESTARTNOHAND;
+ ditto for sys_rt_sigsuspend(), but drop private code and
+ use common code via __ARCH_WANT_SYS_RT_SIGSUSPEND;
+- there are now no "extra" calls to do_signal() so its oldset
+ parameter is always &current->blocked so need not be passed,
+ also its return value is changed to void
+- change handle_signal() to return 0/-errno
+- change do_signal() to honor TIF_RESTORE_SIGMASK:
+ + get oldset from current->saved_sigmask if TIF_RESTORE_SIGMASK is set
+ + if handle_signal() was successful then clear TIF_RESTORE_SIGMASK
+ + if no signal was delivered and TIF_RESTORE_SIGMASK is set then
+ clear it and restore the sigmask
+- hook up sys_pselect6() and sys_ppoll()
+
+In 2.6.19:
+- hook up sys_epoll_pwait()
+
+In 2.6.26:
+- allow archs to override how TIF_RESTORE_SIGMASK is implemented;
+ default set_restore_sigmask() sets both TIF_RESTORE_SIGMASK and
+ TIF_SIGPENDING; archs need now just test TIF_SIGPENDING again
+ when checking for pending signal work; some archs now implement
+ TIF_RESTORE_SIGMASK as a secondary/non-atomic thread flag bit
+- call set_restore_sigmask() in sys_sigsuspend() instead of setting
+ TIF_RESTORE_SIGMASK
+
+In 2.6.29-rc:
+- kill sys_pselect7() which no arch wanted
+
+So for 2.6.31-rc6/ARM this patch does the following:
+- Add TIF_RESTORE_SIGMASK. Use the generic set_restore_sigmask()
+ which sets both TIF_SIGPENDING and TIF_RESTORE_SIGMASK, so
+ TIF_RESTORE_SIGMASK need not claim one of the scarce low thread
+ flags, and existing TIF_SIGPENDING and _TIF_WORK_MASK tests need
+ not be extended for TIF_RESTORE_SIGMASK.
+- sys_sigsuspend() is reimplemented to use current->saved_sigmask and
+ set_restore_sigmask(), making it identical to most other archs.
+- The private code for sys_rt_sigsuspend() is removed, instead generic
+ code supplies it via __ARCH_WANT_SYS_RT_SIGSUSPEND.
+- sys_sigsuspend() and sys_rt_sigsuspend() no longer need a pt_regs
+ parameter, so their assembly code wrappers are removed.
+- handle_signal() is changed to return 0 on success or -errno.
+- The oldset parameter to do_signal() is now redundant and removed,
+ and the return value is now also redundant so it's changed to void.
+- do_signal() is changed to honor TIF_RESTORE_SIGMASK:
+ + get oldset from current->saved_sigmask if TIF_RESTORE_SIGMASK is set
+ + if handle_signal() was successful then clear TIF_RESTORE_SIGMASK
+ + if no signal was delivered and TIF_RESTORE_SIGMASK is set then
+ clear it and restore the sigmask
+- Hook up sys_pselect6, sys_ppoll, and sys_epoll_pwait.
+
+Signed-off-by: Mikael Pettersson
+---
+Changes from previous versions:
+
+v2 (only posted as incremental diff against v1):
+- moved sigmask restoration in no-signal-delivered case into
+ "if (syscall)" block for extra protection against exception
+ path also invoking do_signal()
+v3 (this version):
+- moved oldset initialisation into "if (signr > 0)" block,
+ as requested by Russell
+
+ arch/arm/include/asm/thread_info.h | 2
+ arch/arm/include/asm/unistd.h | 7 +--
+ arch/arm/kernel/calls.S | 10 ++--
+ arch/arm/kernel/entry-common.S | 10 ----
+ arch/arm/kernel/signal.c | 86 +++++++++++++++----------------------
+ 5 files changed, 48 insertions(+), 67 deletions(-)
+
+diff -rupN linux-2.6.31-rc6/arch/arm/include/asm/thread_info.h linux-2.6.31-rc6.arm-restore-sigmask-v3/arch/arm/include/asm/thread_info.h
+--- linux-2.6.31-rc6/arch/arm/include/asm/thread_info.h 2009-08-14 11:11:10.000000000 +0200
++++ linux-2.6.31-rc6.arm-restore-sigmask-v3/arch/arm/include/asm/thread_info.h 2009-08-14 11:26:44.000000000 +0200
+@@ -140,6 +140,7 @@ extern void vfp_sync_state(struct thread
+ #define TIF_USING_IWMMXT 17
+ #define TIF_MEMDIE 18
+ #define TIF_FREEZE 19
++#define TIF_RESTORE_SIGMASK 20
+
+ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+ #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+@@ -147,6 +148,7 @@ extern void vfp_sync_state(struct thread
+ #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
+ #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
+ #define _TIF_FREEZE (1 << TIF_FREEZE)
++#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
+
+ /*
+ * Change these and you break ASM code in entry-common.S
+diff -rupN linux-2.6.31-rc6/arch/arm/include/asm/unistd.h linux-2.6.31-rc6.arm-restore-sigmask-v3/arch/arm/include/asm/unistd.h
+--- linux-2.6.31-rc6/arch/arm/include/asm/unistd.h 2009-08-14 11:11:10.000000000 +0200
++++ linux-2.6.31-rc6.arm-restore-sigmask-v3/arch/arm/include/asm/unistd.h 2009-08-14 11:26:44.000000000 +0200
+@@ -360,8 +360,8 @@
+ #define __NR_readlinkat (__NR_SYSCALL_BASE+332)
+ #define __NR_fchmodat (__NR_SYSCALL_BASE+333)
+ #define __NR_faccessat (__NR_SYSCALL_BASE+334)
+- /* 335 for pselect6 */
+- /* 336 for ppoll */
++#define __NR_pselect6 (__NR_SYSCALL_BASE+335)
++#define __NR_ppoll (__NR_SYSCALL_BASE+336)
+ #define __NR_unshare (__NR_SYSCALL_BASE+337)
+ #define __NR_set_robust_list (__NR_SYSCALL_BASE+338)
+ #define __NR_get_robust_list (__NR_SYSCALL_BASE+339)
+@@ -372,7 +372,7 @@
+ #define __NR_vmsplice (__NR_SYSCALL_BASE+343)
+ #define __NR_move_pages (__NR_SYSCALL_BASE+344)
+ #define __NR_getcpu (__NR_SYSCALL_BASE+345)
+- /* 346 for epoll_pwait */
++#define __NR_epoll_pwait (__NR_SYSCALL_BASE+346)
+ #define __NR_kexec_load (__NR_SYSCALL_BASE+347)
+ #define __NR_utimensat (__NR_SYSCALL_BASE+348)
+ #define __NR_signalfd (__NR_SYSCALL_BASE+349)
+@@ -432,6 +432,7 @@
+ #define __ARCH_WANT_SYS_SIGPENDING
+ #define __ARCH_WANT_SYS_SIGPROCMASK
+ #define __ARCH_WANT_SYS_RT_SIGACTION
++#define __ARCH_WANT_SYS_RT_SIGSUSPEND
+
+ #if !defined(CONFIG_AEABI) || defined(CONFIG_OABI_COMPAT)
+ #define __ARCH_WANT_SYS_TIME
+diff -rupN linux-2.6.31-rc6/arch/arm/kernel/calls.S linux-2.6.31-rc6.arm-restore-sigmask-v3/arch/arm/kernel/calls.S
+--- linux-2.6.31-rc6/arch/arm/kernel/calls.S 2009-08-14 11:11:10.000000000 +0200
++++ linux-2.6.31-rc6.arm-restore-sigmask-v3/arch/arm/kernel/calls.S 2009-08-14 11:26:44.000000000 +0200
+@@ -81,7 +81,7 @@
+ CALL(sys_ni_syscall) /* was sys_ssetmask */
+ /* 70 */ CALL(sys_setreuid16)
+ CALL(sys_setregid16)
+- CALL(sys_sigsuspend_wrapper)
++ CALL(sys_sigsuspend)
+ CALL(sys_sigpending)
+ CALL(sys_sethostname)
+ /* 75 */ CALL(sys_setrlimit)
+@@ -188,7 +188,7 @@
+ CALL(sys_rt_sigpending)
+ CALL(sys_rt_sigtimedwait)
+ CALL(sys_rt_sigqueueinfo)
+- CALL(sys_rt_sigsuspend_wrapper)
++ CALL(sys_rt_sigsuspend)
+ /* 180 */ CALL(ABI(sys_pread64, sys_oabi_pread64))
+ CALL(ABI(sys_pwrite64, sys_oabi_pwrite64))
+ CALL(sys_chown16)
+@@ -344,8 +344,8 @@
+ CALL(sys_readlinkat)
+ CALL(sys_fchmodat)
+ CALL(sys_faccessat)
+-/* 335 */ CALL(sys_ni_syscall) /* eventually pselect6 */
+- CALL(sys_ni_syscall) /* eventually ppoll */
++/* 335 */ CALL(sys_pselect6)
++ CALL(sys_ppoll)
+ CALL(sys_unshare)
+ CALL(sys_set_robust_list)
+ CALL(sys_get_robust_list)
+@@ -355,7 +355,7 @@
+ CALL(sys_vmsplice)
+ CALL(sys_move_pages)
+ /* 345 */ CALL(sys_getcpu)
+- CALL(sys_ni_syscall) /* eventually epoll_pwait */
++ CALL(sys_epoll_pwait)
+ CALL(sys_kexec_load)
+ CALL(sys_utimensat)
+ CALL(sys_signalfd)
+diff -rupN linux-2.6.31-rc6/arch/arm/kernel/entry-common.S linux-2.6.31-rc6.arm-restore-sigmask-v3/arch/arm/kernel/entry-common.S
+--- linux-2.6.31-rc6/arch/arm/kernel/entry-common.S 2009-08-14 11:11:10.000000000 +0200
++++ linux-2.6.31-rc6.arm-restore-sigmask-v3/arch/arm/kernel/entry-common.S 2009-08-14 11:26:44.000000000 +0200
+@@ -373,16 +373,6 @@ sys_clone_wrapper:
+ b sys_clone
+ ENDPROC(sys_clone_wrapper)
+
+-sys_sigsuspend_wrapper:
+- add r3, sp, #S_OFF
+- b sys_sigsuspend
+-ENDPROC(sys_sigsuspend_wrapper)
+-
+-sys_rt_sigsuspend_wrapper:
+- add r2, sp, #S_OFF
+- b sys_rt_sigsuspend
+-ENDPROC(sys_rt_sigsuspend_wrapper)
+-
+ sys_sigreturn_wrapper:
+ add r0, sp, #S_OFF
+ b sys_sigreturn
+diff -rupN linux-2.6.31-rc6/arch/arm/kernel/signal.c linux-2.6.31-rc6.arm-restore-sigmask-v3/arch/arm/kernel/signal.c
+--- linux-2.6.31-rc6/arch/arm/kernel/signal.c 2009-08-14 11:11:10.000000000 +0200
++++ linux-2.6.31-rc6.arm-restore-sigmask-v3/arch/arm/kernel/signal.c 2009-08-14 11:26:44.000000000 +0200
+@@ -47,57 +47,22 @@ const unsigned long sigreturn_codes[7] =
+ MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
+ };
+
+-static int do_signal(sigset_t *oldset, struct pt_regs * regs, int syscall);
+-
+ /*
+ * atomically swap in the new signal mask, and wait for a signal.
+ */
+-asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask, struct pt_regs *regs)
++asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask)
+ {
+- sigset_t saveset;
+-
+ mask &= _BLOCKABLE;
+ spin_lock_irq(&current->sighand->siglock);
+- saveset = current->blocked;
++ current->saved_sigmask = current->blocked;
+ siginitset(&current->blocked, mask);
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+- regs->ARM_r0 = -EINTR;
+-
+- while (1) {
+- current->state = TASK_INTERRUPTIBLE;
+- schedule();
+- if (do_signal(&saveset, regs, 0))
+- return regs->ARM_r0;
+- }
+-}
+-
+-asmlinkage int
+-sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize, struct pt_regs *regs)
+-{
+- sigset_t saveset, newset;
+-
+- /* XXX: Don't preclude handling different sized sigset_t's. */
+- if (sigsetsize != sizeof(sigset_t))
+- return -EINVAL;
+-
+- if (copy_from_user(&newset, unewset, sizeof(newset)))
+- return -EFAULT;
+- sigdelsetmask(&newset, ~_BLOCKABLE);
+-
+- spin_lock_irq(&current->sighand->siglock);
+- saveset = current->blocked;
+- current->blocked = newset;
+- recalc_sigpending();
+- spin_unlock_irq(&current->sighand->siglock);
+- regs->ARM_r0 = -EINTR;
+
+- while (1) {
+- current->state = TASK_INTERRUPTIBLE;
+- schedule();
+- if (do_signal(&saveset, regs, 0))
+- return regs->ARM_r0;
+- }
++ current->state = TASK_INTERRUPTIBLE;
++ schedule();
++ set_restore_sigmask();
++ return -ERESTARTNOHAND;
+ }
+
+ asmlinkage int
+@@ -545,7 +510,7 @@ static inline void setup_syscall_restart
+ /*
+ * OK, we're invoking a handler
+ */
+-static void
++static int
+ handle_signal(unsigned long sig, struct k_sigaction *ka,
+ siginfo_t *info, sigset_t *oldset,
+ struct pt_regs * regs, int syscall)
+@@ -596,7 +561,7 @@ handle_signal(unsigned long sig, struct
+
+ if (ret != 0) {
+ force_sigsegv(sig, tsk);
+- return;
++ return ret;
+ }
+
+ /*
+@@ -610,6 +575,7 @@ handle_signal(unsigned long sig, struct
+ recalc_sigpending();
+ spin_unlock_irq(&tsk->sighand->siglock);
+
++ return 0;
+ }
+
+ /*
+@@ -621,7 +587,7 @@ handle_signal(unsigned long sig, struct
+ * the kernel can handle, and then we build all the user-level signal handling
+ * stack-frames in one go after that.
+ */
+-static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall)
++static void do_signal(struct pt_regs *regs, int syscall)
+ {
+ struct k_sigaction ka;
+ siginfo_t info;
+@@ -634,7 +600,7 @@ static int do_signal(sigset_t *oldset, s
+ * if so.
+ */
+ if (!user_mode(regs))
+- return 0;
++ return;
+
+ if (try_to_freeze())
+ goto no_signal;
+@@ -643,9 +609,24 @@ static int do_signal(sigset_t *oldset, s
+
+ signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+ if (signr > 0) {
+- handle_signal(signr, &ka, &info, oldset, regs, syscall);
++ sigset_t *oldset;
++
++ if (test_thread_flag(TIF_RESTORE_SIGMASK))
++ oldset = &current->saved_sigmask;
++ else
++ oldset = &current->blocked;
++ if (handle_signal(signr, &ka, &info, oldset, regs, syscall) == 0) {
++ /*
++ * A signal was successfully delivered; the saved
++ * sigmask will have been stored in the signal frame,
++ * and will be restored by sigreturn, so we can simply
++ * clear the TIF_RESTORE_SIGMASK flag.
++ */
++ if (test_thread_flag(TIF_RESTORE_SIGMASK))
++ clear_thread_flag(TIF_RESTORE_SIGMASK);
++ }
+ single_step_set(current);
+- return 1;
++ return;
+ }
+
+ no_signal:
+@@ -697,14 +678,21 @@ static int do_signal(sigset_t *oldset, s
+ regs->ARM_r0 == -ERESTARTNOINTR) {
+ setup_syscall_restart(regs);
+ }
++
++ /* If there's no signal to deliver, we just put the saved sigmask
++ * back.
++ */
++ if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
++ clear_thread_flag(TIF_RESTORE_SIGMASK);
++ sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
++ }
+ }
+ single_step_set(current);
+- return 0;
+ }
+
+ asmlinkage void
+ do_notify_resume(struct pt_regs *regs, unsigned int thread_flags, int syscall)
+ {
+ if (thread_flags & _TIF_SIGPENDING)
+- do_signal(&current->blocked, regs, syscall);
++ do_signal(regs, syscall);
+ }
+
+-------------------------------------------------------------------
+List admin: http://lists.arm.linux.org.uk/mailman/listinfo/linux-arm-kernel
+FAQ: http://www.arm.linux.org.uk/mailinglists/faq.php
+Etiquette: http://www.arm.linux.org.uk/mailinglists/etiquette.php
+
diff --git a/recipes/linux/linux-omap-2.6.29/0001-implement-TIF_RESTORE_SIGMASK-support-and-enable-the.patch b/recipes/linux/linux-omap-2.6.29/0001-implement-TIF_RESTORE_SIGMASK-support-and-enable-the.patch
deleted file mode 100644
index 7852f0afdb..0000000000
--- a/recipes/linux/linux-omap-2.6.29/0001-implement-TIF_RESTORE_SIGMASK-support-and-enable-the.patch
+++ /dev/null
@@ -1,287 +0,0 @@
-From 8a7643b09856f4f661403dcedbe0455b3cbeeea9 Mon Sep 17 00:00:00 2001
-From: Steven Newbury <s_j_newbury@yahoo.co.uk>
-Date: Fri, 22 May 2009 14:25:40 +0200
-Subject: [PATCH] implement TIF_RESTORE_SIGMASK support and enable the related
- syscalls:
-
-pselect6
-ppoll
-epoll_pwait
-
-Based on http://www.spinics.net/lists/arm-kernel/msg38114.html
----
- arch/arm/include/asm/thread_info.h | 2 +
- arch/arm/include/asm/unistd.h | 7 ++-
- arch/arm/kernel/calls.S | 6 +-
- arch/arm/kernel/signal.c | 90 +++++++++++++++---------------------
- 4 files changed, 46 insertions(+), 59 deletions(-)
-
-diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
-index 4f88482..2cf0917 100644
---- a/arch/arm/include/asm/thread_info.h
-+++ b/arch/arm/include/asm/thread_info.h
-@@ -136,6 +136,7 @@ extern void vfp_sync_state(struct thread_info *thread);
- #define TIF_SIGPENDING 0
- #define TIF_NEED_RESCHED 1
- #define TIF_SYSCALL_TRACE 8
-+#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal */
- #define TIF_POLLING_NRFLAG 16
- #define TIF_USING_IWMMXT 17
- #define TIF_MEMDIE 18
-@@ -144,6 +145,7 @@ extern void vfp_sync_state(struct thread_info *thread);
- #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
- #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
- #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
-+#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
- #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
- #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
- #define _TIF_FREEZE (1 << TIF_FREEZE)
-diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
-index 94cc58e..cd1eaa0 100644
---- a/arch/arm/include/asm/unistd.h
-+++ b/arch/arm/include/asm/unistd.h
-@@ -360,8 +360,8 @@
- #define __NR_readlinkat (__NR_SYSCALL_BASE+332)
- #define __NR_fchmodat (__NR_SYSCALL_BASE+333)
- #define __NR_faccessat (__NR_SYSCALL_BASE+334)
-- /* 335 for pselect6 */
-- /* 336 for ppoll */
-+#define __NR_pselect6 (__NR_SYSCALL_BASE+335)
-+#define __NR_ppoll (__NR_SYSCALL_BASE+336)
- #define __NR_unshare (__NR_SYSCALL_BASE+337)
- #define __NR_set_robust_list (__NR_SYSCALL_BASE+338)
- #define __NR_get_robust_list (__NR_SYSCALL_BASE+339)
-@@ -372,7 +372,7 @@
- #define __NR_vmsplice (__NR_SYSCALL_BASE+343)
- #define __NR_move_pages (__NR_SYSCALL_BASE+344)
- #define __NR_getcpu (__NR_SYSCALL_BASE+345)
-- /* 346 for epoll_pwait */
-+#define __NR_epoll_pwait (__NR_SYSCALL_BASE+346)
- #define __NR_kexec_load (__NR_SYSCALL_BASE+347)
- #define __NR_utimensat (__NR_SYSCALL_BASE+348)
- #define __NR_signalfd (__NR_SYSCALL_BASE+349)
-@@ -430,6 +430,7 @@
- #define __ARCH_WANT_SYS_SIGPENDING
- #define __ARCH_WANT_SYS_SIGPROCMASK
- #define __ARCH_WANT_SYS_RT_SIGACTION
-+#define __ARCH_WANT_SYS_RT_SIGSUSPEND
-
- #if !defined(CONFIG_AEABI) || defined(CONFIG_OABI_COMPAT)
- #define __ARCH_WANT_SYS_TIME
-diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
-index 1680e9e..534000d 100644
---- a/arch/arm/kernel/calls.S
-+++ b/arch/arm/kernel/calls.S
-@@ -344,8 +344,8 @@
- CALL(sys_readlinkat)
- CALL(sys_fchmodat)
- CALL(sys_faccessat)
--/* 335 */ CALL(sys_ni_syscall) /* eventually pselect6 */
-- CALL(sys_ni_syscall) /* eventually ppoll */
-+/* 335 */ CALL(sys_pselect6)
-+ CALL(sys_ppoll)
- CALL(sys_unshare)
- CALL(sys_set_robust_list)
- CALL(sys_get_robust_list)
-@@ -355,7 +355,7 @@
- CALL(sys_vmsplice)
- CALL(sys_move_pages)
- /* 345 */ CALL(sys_getcpu)
-- CALL(sys_ni_syscall) /* eventually epoll_pwait */
-+ CALL(sys_epoll_pwait)
- CALL(sys_kexec_load)
- CALL(sys_utimensat)
- CALL(sys_signalfd)
-diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
-index 80b8b5c..7645048 100644
---- a/arch/arm/kernel/signal.c
-+++ b/arch/arm/kernel/signal.c
-@@ -47,57 +47,23 @@ const unsigned long sigreturn_codes[7] = {
- MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
- };
-
--static int do_signal(sigset_t *oldset, struct pt_regs * regs, int syscall);
-+static void do_signal(struct pt_regs * regs, int syscall);
-
- /*
- * atomically swap in the new signal mask, and wait for a signal.
- */
--asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask, struct pt_regs *regs)
-+asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask)
- {
-- sigset_t saveset;
--
- mask &= _BLOCKABLE;
- spin_lock_irq(&current->sighand->siglock);
-- saveset = current->blocked;
-+ current->saved_sigmask = current->blocked;
- siginitset(&current->blocked, mask);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-- regs->ARM_r0 = -EINTR;
--
-- while (1) {
-- current->state = TASK_INTERRUPTIBLE;
-- schedule();
-- if (do_signal(&saveset, regs, 0))
-- return regs->ARM_r0;
-- }
--}
--
--asmlinkage int
--sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize, struct pt_regs *regs)
--{
-- sigset_t saveset, newset;
--
-- /* XXX: Don't preclude handling different sized sigset_t's. */
-- if (sigsetsize != sizeof(sigset_t))
-- return -EINVAL;
--
-- if (copy_from_user(&newset, unewset, sizeof(newset)))
-- return -EFAULT;
-- sigdelsetmask(&newset, ~_BLOCKABLE);
--
-- spin_lock_irq(&current->sighand->siglock);
-- saveset = current->blocked;
-- current->blocked = newset;
-- recalc_sigpending();
-- spin_unlock_irq(&current->sighand->siglock);
-- regs->ARM_r0 = -EINTR;
--
-- while (1) {
-- current->state = TASK_INTERRUPTIBLE;
-- schedule();
-- if (do_signal(&saveset, regs, 0))
-- return regs->ARM_r0;
-- }
-+ current->state = TASK_INTERRUPTIBLE;
-+ schedule();
-+ set_thread_flag(TIF_RESTORE_SIGMASK);
-+ return -ERESTARTNOHAND;
- }
-
- asmlinkage int
-@@ -290,7 +256,7 @@ asmlinkage int sys_sigreturn(struct pt_regs *regs)
-
- badframe:
- force_sig(SIGSEGV, current);
-- return 0;
-+ return -EFAULT;
- }
-
- asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
-@@ -325,7 +291,7 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
-
- badframe:
- force_sig(SIGSEGV, current);
-- return 0;
-+ return -EFAULT;
- }
-
- static int
-@@ -541,7 +507,7 @@ static inline void restart_syscall(struct pt_regs *regs)
- /*
- * OK, we're invoking a handler
- */
--static void
-+static int
- handle_signal(unsigned long sig, struct k_sigaction *ka,
- siginfo_t *info, sigset_t *oldset,
- struct pt_regs * regs, int syscall)
-@@ -592,7 +558,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
-
- if (ret != 0) {
- force_sigsegv(sig, tsk);
-- return;
-+ return ret;
- }
-
- /*
-@@ -606,6 +572,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
- recalc_sigpending();
- spin_unlock_irq(&tsk->sighand->siglock);
-
-+ return ret;
- }
-
- /*
-@@ -617,11 +584,12 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
- * the kernel can handle, and then we build all the user-level signal handling
- * stack-frames in one go after that.
- */
--static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall)
-+static void do_signal(struct pt_regs *regs, int syscall)
- {
- struct k_sigaction ka;
- siginfo_t info;
- int signr;
-+ sigset_t *oldset;
-
- /*
- * We want the common case to go fast, which
-@@ -630,18 +598,29 @@ static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall)
- * if so.
- */
- if (!user_mode(regs))
-- return 0;
-+ return;
-
- if (try_to_freeze())
- goto no_signal;
-
- single_step_clear(current);
-
-+ if (test_thread_flag(TIF_RESTORE_SIGMASK))
-+ oldset = &current->saved_sigmask;
-+ else
-+ oldset = &current->blocked;
-+
- signr = get_signal_to_deliver(&info, &ka, regs, NULL);
- if (signr > 0) {
-- handle_signal(signr, &ka, &info, oldset, regs, syscall);
-+ if (handle_signal(signr, &ka, &info, oldset, regs, syscall) == 0) {
-+ /* a signal was successfully delivered; the saved
-+ * sigmask will have been stored in the signal frame,
-+ * and will be restored by sigreturn, so we can simply
-+ * clear the TIF_RESTORE_SIGMASK flag */
-+ clear_thread_flag(TIF_RESTORE_SIGMASK);
-+ }
- single_step_set(current);
-- return 1;
-+ return;
- }
-
- no_signal:
-@@ -665,7 +644,7 @@ static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall)
- usp = (u32 __user *)regs->ARM_sp;
-
- /*
-- * Either we supports OABI only, or we have
-+ * Either we support OABI only, or we have
- * EABI with the OABI compat layer enabled.
- * In the later case we don't know if user
- * space is EABI or not, and if not we must
-@@ -695,12 +674,17 @@ static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall)
- }
- }
- single_step_set(current);
-- return 0;
-+ /* if there's no signal to deliver, we just put the saved sigmask
-+ back. */
-+ if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-+ clear_thread_flag(TIF_RESTORE_SIGMASK);
-+ sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-+ }
- }
-
- asmlinkage void
- do_notify_resume(struct pt_regs *regs, unsigned int thread_flags, int syscall)
- {
-- if (thread_flags & _TIF_SIGPENDING)
-- do_signal(&current->blocked, regs, syscall);
-+ if (thread_flags & (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK))
-+ do_signal(regs, syscall);
- }
---
-1.6.2.4
-
diff --git a/recipes/linux/linux-omap-pm-2.6.29/0001-implement-TIF_RESTORE_SIGMASK-support-and-enable-the.patch b/recipes/linux/linux-omap-pm-2.6.29/0001-implement-TIF_RESTORE_SIGMASK-support-and-enable-the.patch
deleted file mode 100644
index 7852f0afdb..0000000000
--- a/recipes/linux/linux-omap-pm-2.6.29/0001-implement-TIF_RESTORE_SIGMASK-support-and-enable-the.patch
+++ /dev/null
@@ -1,287 +0,0 @@
-From 8a7643b09856f4f661403dcedbe0455b3cbeeea9 Mon Sep 17 00:00:00 2001
-From: Steven Newbury <s_j_newbury@yahoo.co.uk>
-Date: Fri, 22 May 2009 14:25:40 +0200
-Subject: [PATCH] implement TIF_RESTORE_SIGMASK support and enable the related
- syscalls:
-
-pselect6
-ppoll
-epoll_pwait
-
-Based on http://www.spinics.net/lists/arm-kernel/msg38114.html
----
- arch/arm/include/asm/thread_info.h | 2 +
- arch/arm/include/asm/unistd.h | 7 ++-
- arch/arm/kernel/calls.S | 6 +-
- arch/arm/kernel/signal.c | 90 +++++++++++++++---------------------
- 4 files changed, 46 insertions(+), 59 deletions(-)
-
-diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
-index 4f88482..2cf0917 100644
---- a/arch/arm/include/asm/thread_info.h
-+++ b/arch/arm/include/asm/thread_info.h
-@@ -136,6 +136,7 @@ extern void vfp_sync_state(struct thread_info *thread);
- #define TIF_SIGPENDING 0
- #define TIF_NEED_RESCHED 1
- #define TIF_SYSCALL_TRACE 8
-+#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal */
- #define TIF_POLLING_NRFLAG 16
- #define TIF_USING_IWMMXT 17
- #define TIF_MEMDIE 18
-@@ -144,6 +145,7 @@ extern void vfp_sync_state(struct thread_info *thread);
- #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
- #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
- #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
-+#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
- #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
- #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
- #define _TIF_FREEZE (1 << TIF_FREEZE)
-diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
-index 94cc58e..cd1eaa0 100644
---- a/arch/arm/include/asm/unistd.h
-+++ b/arch/arm/include/asm/unistd.h
-@@ -360,8 +360,8 @@
- #define __NR_readlinkat (__NR_SYSCALL_BASE+332)
- #define __NR_fchmodat (__NR_SYSCALL_BASE+333)
- #define __NR_faccessat (__NR_SYSCALL_BASE+334)
-- /* 335 for pselect6 */
-- /* 336 for ppoll */
-+#define __NR_pselect6 (__NR_SYSCALL_BASE+335)
-+#define __NR_ppoll (__NR_SYSCALL_BASE+336)
- #define __NR_unshare (__NR_SYSCALL_BASE+337)
- #define __NR_set_robust_list (__NR_SYSCALL_BASE+338)
- #define __NR_get_robust_list (__NR_SYSCALL_BASE+339)
-@@ -372,7 +372,7 @@
- #define __NR_vmsplice (__NR_SYSCALL_BASE+343)
- #define __NR_move_pages (__NR_SYSCALL_BASE+344)
- #define __NR_getcpu (__NR_SYSCALL_BASE+345)
-- /* 346 for epoll_pwait */
-+#define __NR_epoll_pwait (__NR_SYSCALL_BASE+346)
- #define __NR_kexec_load (__NR_SYSCALL_BASE+347)
- #define __NR_utimensat (__NR_SYSCALL_BASE+348)
- #define __NR_signalfd (__NR_SYSCALL_BASE+349)
-@@ -430,6 +430,7 @@
- #define __ARCH_WANT_SYS_SIGPENDING
- #define __ARCH_WANT_SYS_SIGPROCMASK
- #define __ARCH_WANT_SYS_RT_SIGACTION
-+#define __ARCH_WANT_SYS_RT_SIGSUSPEND
-
- #if !defined(CONFIG_AEABI) || defined(CONFIG_OABI_COMPAT)
- #define __ARCH_WANT_SYS_TIME
-diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
-index 1680e9e..534000d 100644
---- a/arch/arm/kernel/calls.S
-+++ b/arch/arm/kernel/calls.S
-@@ -344,8 +344,8 @@
- CALL(sys_readlinkat)
- CALL(sys_fchmodat)
- CALL(sys_faccessat)
--/* 335 */ CALL(sys_ni_syscall) /* eventually pselect6 */
-- CALL(sys_ni_syscall) /* eventually ppoll */
-+/* 335 */ CALL(sys_pselect6)
-+ CALL(sys_ppoll)
- CALL(sys_unshare)
- CALL(sys_set_robust_list)
- CALL(sys_get_robust_list)
-@@ -355,7 +355,7 @@
- CALL(sys_vmsplice)
- CALL(sys_move_pages)
- /* 345 */ CALL(sys_getcpu)
-- CALL(sys_ni_syscall) /* eventually epoll_pwait */
-+ CALL(sys_epoll_pwait)
- CALL(sys_kexec_load)
- CALL(sys_utimensat)
- CALL(sys_signalfd)
-diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
-index 80b8b5c..7645048 100644
---- a/arch/arm/kernel/signal.c
-+++ b/arch/arm/kernel/signal.c
-@@ -47,57 +47,23 @@ const unsigned long sigreturn_codes[7] = {
- MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
- };
-
--static int do_signal(sigset_t *oldset, struct pt_regs * regs, int syscall);
-+static void do_signal(struct pt_regs * regs, int syscall);
-
- /*
- * atomically swap in the new signal mask, and wait for a signal.
- */
--asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask, struct pt_regs *regs)
-+asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask)
- {
-- sigset_t saveset;
--
- mask &= _BLOCKABLE;
- spin_lock_irq(&current->sighand->siglock);
-- saveset = current->blocked;
-+ current->saved_sigmask = current->blocked;
- siginitset(&current->blocked, mask);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-- regs->ARM_r0 = -EINTR;
--
-- while (1) {
-- current->state = TASK_INTERRUPTIBLE;
-- schedule();
-- if (do_signal(&saveset, regs, 0))
-- return regs->ARM_r0;
-- }
--}
--
--asmlinkage int
--sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize, struct pt_regs *regs)
--{
-- sigset_t saveset, newset;
--
-- /* XXX: Don't preclude handling different sized sigset_t's. */
-- if (sigsetsize != sizeof(sigset_t))
-- return -EINVAL;
--
-- if (copy_from_user(&newset, unewset, sizeof(newset)))
-- return -EFAULT;
-- sigdelsetmask(&newset, ~_BLOCKABLE);
--
-- spin_lock_irq(&current->sighand->siglock);
-- saveset = current->blocked;
-- current->blocked = newset;
-- recalc_sigpending();
-- spin_unlock_irq(&current->sighand->siglock);
-- regs->ARM_r0 = -EINTR;
--
-- while (1) {
-- current->state = TASK_INTERRUPTIBLE;
-- schedule();
-- if (do_signal(&saveset, regs, 0))
-- return regs->ARM_r0;
-- }
-+ current->state = TASK_INTERRUPTIBLE;
-+ schedule();
-+ set_thread_flag(TIF_RESTORE_SIGMASK);
-+ return -ERESTARTNOHAND;
- }
-
- asmlinkage int
-@@ -290,7 +256,7 @@ asmlinkage int sys_sigreturn(struct pt_regs *regs)
-
- badframe:
- force_sig(SIGSEGV, current);
-- return 0;
-+ return -EFAULT;
- }
-
- asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
-@@ -325,7 +291,7 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
-
- badframe:
- force_sig(SIGSEGV, current);
-- return 0;
-+ return -EFAULT;
- }
-
- static int
-@@ -541,7 +507,7 @@ static inline void restart_syscall(struct pt_regs *regs)
- /*
- * OK, we're invoking a handler
- */
--static void
-+static int
- handle_signal(unsigned long sig, struct k_sigaction *ka,
- siginfo_t *info, sigset_t *oldset,
- struct pt_regs * regs, int syscall)
-@@ -592,7 +558,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
-
- if (ret != 0) {
- force_sigsegv(sig, tsk);
-- return;
-+ return ret;
- }
-
- /*
-@@ -606,6 +572,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
- recalc_sigpending();
- spin_unlock_irq(&tsk->sighand->siglock);
-
-+ return ret;
- }
-
- /*
-@@ -617,11 +584,12 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
- * the kernel can handle, and then we build all the user-level signal handling
- * stack-frames in one go after that.
- */
--static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall)
-+static void do_signal(struct pt_regs *regs, int syscall)
- {
- struct k_sigaction ka;
- siginfo_t info;
- int signr;
-+ sigset_t *oldset;
-
- /*
- * We want the common case to go fast, which
-@@ -630,18 +598,29 @@ static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall)
- * if so.
- */
- if (!user_mode(regs))
-- return 0;
-+ return;
-
- if (try_to_freeze())
- goto no_signal;
-
- single_step_clear(current);
-
-+ if (test_thread_flag(TIF_RESTORE_SIGMASK))
-+ oldset = &current->saved_sigmask;
-+ else
-+ oldset = &current->blocked;
-+
- signr = get_signal_to_deliver(&info, &ka, regs, NULL);
- if (signr > 0) {
-- handle_signal(signr, &ka, &info, oldset, regs, syscall);
-+ if (handle_signal(signr, &ka, &info, oldset, regs, syscall) == 0) {
-+ /* a signal was successfully delivered; the saved
-+ * sigmask will have been stored in the signal frame,
-+ * and will be restored by sigreturn, so we can simply
-+ * clear the TIF_RESTORE_SIGMASK flag */
-+ clear_thread_flag(TIF_RESTORE_SIGMASK);
-+ }
- single_step_set(current);
-- return 1;
-+ return;
- }
-
- no_signal:
-@@ -665,7 +644,7 @@ static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall)
- usp = (u32 __user *)regs->ARM_sp;
-
- /*
-- * Either we supports OABI only, or we have
-+ * Either we support OABI only, or we have
- * EABI with the OABI compat layer enabled.
- * In the later case we don't know if user
- * space is EABI or not, and if not we must
-@@ -695,12 +674,17 @@ static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall)
- }
- }
- single_step_set(current);
-- return 0;
-+ /* if there's no signal to deliver, we just put the saved sigmask
-+ back. */
-+ if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-+ clear_thread_flag(TIF_RESTORE_SIGMASK);
-+ sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-+ }
- }
-
- asmlinkage void
- do_notify_resume(struct pt_regs *regs, unsigned int thread_flags, int syscall)
- {
-- if (thread_flags & _TIF_SIGPENDING)
-- do_signal(&current->blocked, regs, syscall);
-+ if (thread_flags & (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK))
-+ do_signal(regs, syscall);
- }
---
-1.6.2.4
-
diff --git a/recipes/xorg-lib/pixman/nearest-neighbour.patch b/recipes/xorg-lib/pixman/nearest-neighbour.patch
new file mode 100644
index 0000000000..29b140faf9
--- /dev/null
+++ b/recipes/xorg-lib/pixman/nearest-neighbour.patch
@@ -0,0 +1,1040 @@
+From: Siarhei Siamashka <siarhei.siamashka@nokia.com>
+Date: Fri, 17 Jul 2009 10:22:23 +0000 (+0300)
+Subject: Fastpath for nearest neighbour scaled compositing operations.
+X-Git-Url: http://siarhei.siamashka.name/gitweb/?p=pixman.git;a=commitdiff_plain;h=247531c6978725a88fd3706129b9d3e339026f54
+
+Fastpath for nearest neighbour scaled compositing operations.
+
+OVER 8888x8888, OVER 8888x0565, SRC 8888x8888, SRC 8888x0565
+and SRC 0565x0565 cases are supported.
+---
+
+diff --git a/pixman/pixman-fast-path.c b/pixman/pixman-fast-path.c
+index 7f80578..7f3a6ad 100644
+--- a/pixman/pixman-fast-path.c
++++ b/pixman/pixman-fast-path.c
+@@ -1261,6 +1261,993 @@ fast_composite_src_scale_nearest (pixman_implementation_t *imp,
+ }
+ }
+
++/*
++ * Functions, which implement the core inner loops for the nearest neighbour
++ * scaled fastpath compositing operations. The do not need to do clipping
++ * checks, also the loops are unrolled to process two pixels per iteration
++ * for better performance on most CPU architectures (superscalar processors
++ * can issue several operations simultaneously, other processors can hide
++ * instructions latencies by pipelining operations). Unrolling more
++ * does not make much sense because the compiler will start running out
++ * of spare registers soon.
++ */
++
++#undef READ
++#undef WRITE
++#define READ(img,x) (*(x))
++#define WRITE(img,ptr,v) ((*(ptr)) = (v))
++
++#define UN8x4_MUL_UN8_ADD_UN8x4_store_r5g6b5(x, a, y) do { \
++ UN8x4_MUL_UN8_ADD_UN8x4(x, a, y); \
++ x = CONVERT_8888_TO_0565(x); \
++ } while (0)
++
++static void fbCompositeTransformNearestNonrotatedAffineTrivialclipOver_8888x0565 (
++ pixman_image_t *pSrc, pixman_image_t *pDst, int xSrc, int ySrc, int xDst, int yDst,
++ int width, int height, int32_t vx, int32_t vy, int32_t unit_x, int32_t unit_y)
++{
++ uint16_t *dstLine;
++ uint32_t *srcFirstLine;
++ uint32_t d;
++ uint32_t s1, s2;
++ uint8_t a1, a2;
++ int w;
++ int x1, x2, y;
++ int32_t orig_vx = vx;
++
++ uint32_t *src;
++ uint16_t *dst;
++ int srcStride, dstStride;
++ PIXMAN_IMAGE_GET_LINE (pDst, xDst, yDst, uint16_t, dstStride, dstLine, 1);
++ /* pass in 0 instead of xSrc and ySrc because xSrc and ySrc need to be
++ * transformed from destination space to source space */
++ PIXMAN_IMAGE_GET_LINE (pSrc, 0, 0, uint32_t, srcStride, srcFirstLine, 1);
++
++ while (--height >= 0)
++ {
++ dst = dstLine;
++ dstLine += dstStride;
++
++ y = vy >> 16;
++ vy += unit_y;
++
++ if ((y < 0) || (y >= pSrc->bits.height)) {
++ continue;
++ }
++
++ src = srcFirstLine + srcStride * y;
++
++ w = width;
++ vx = orig_vx;
++ while ((w -= 2) >= 0)
++ {
++ x1 = vx >> 16;
++ vx += unit_x;
++ s1 = READ(pSrc, src + x1);
++
++ x2 = vx >> 16;
++ vx += unit_x;
++ s2 = READ(pSrc, src + x2);
++
++ a1 = s1 >> 24;
++ a2 = s2 >> 24;
++
++ if (a1 == 0xff)
++ WRITE(pDst, dst, CONVERT_8888_TO_0565(s1));
++ else if (s1) {
++ d = CONVERT_0565_TO_0888(READ(pDst, dst));
++ a1 ^= 0xff;
++ UN8x4_MUL_UN8_ADD_UN8x4_store_r5g6b5(d, a1, s1);
++ WRITE(pDst, dst, d);
++ }
++ dst++;
++
++ if (a2 == 0xff)
++ WRITE(pDst, dst, CONVERT_8888_TO_0565(s2));
++ else if (s2) {
++ d = CONVERT_0565_TO_0888(READ(pDst, dst));
++ a2 ^= 0xff;
++ UN8x4_MUL_UN8_ADD_UN8x4_store_r5g6b5(d, a2, s2);
++ WRITE(pDst, dst, d);
++ }
++ dst++;
++ }
++ if (w & 1) {
++ x1 = vx >> 16;
++ vx += unit_x;
++ s1 = READ(pSrc, src + x1);
++
++ a1 = s1 >> 24;
++ if (a1 == 0xff)
++ WRITE(pDst, dst, CONVERT_8888_TO_0565(s1));
++ else if (s1) {
++ d = CONVERT_0565_TO_0888(READ(pDst, dst));
++ a1 ^= 0xff;
++ UN8x4_MUL_UN8_ADD_UN8x4_store_r5g6b5(d, a1, s1);
++ WRITE(pDst, dst, d);
++ }
++ dst++;
++ }
++ }
++}
++
++static void fbCompositeTransformNearestNonrotatedAffineTrivialclipRepeatOver_8888x0565 (
++ pixman_image_t *pSrc, pixman_image_t *pDst, int xSrc, int ySrc, int xDst, int yDst,
++ int width, int height, int32_t vx, int32_t vy, int32_t unit_x, int32_t unit_y)
++{
++ uint16_t *dstLine;
++ uint32_t *srcFirstLine;
++ uint32_t d;
++ uint32_t s1, s2;
++ uint8_t a1, a2;
++ int w;
++ int x1, x2, y;
++ int32_t orig_vx = vx;
++ int32_t max_vx, max_vy;
++
++ uint32_t *src;
++ uint16_t *dst;
++ int srcStride, dstStride;
++ PIXMAN_IMAGE_GET_LINE (pDst, xDst, yDst, uint16_t, dstStride, dstLine, 1);
++ /* pass in 0 instead of xSrc and ySrc because xSrc and ySrc need to be
++ * transformed from destination space to source space */
++ PIXMAN_IMAGE_GET_LINE (pSrc, 0, 0, uint32_t, srcStride, srcFirstLine, 1);
++
++ max_vx = pSrc->bits.width << 16;
++ max_vy = pSrc->bits.height << 16;
++
++ while (orig_vx < 0) orig_vx += max_vx;
++ while (vy < 0) vy += max_vy;
++ while (orig_vx >= max_vx) orig_vx -= max_vx;
++ while (vy >= max_vy) vy -= max_vy;
++
++ while (--height >= 0)
++ {
++ dst = dstLine;
++ dstLine += dstStride;
++
++ y = vy >> 16;
++ vy += unit_y;
++ while (vy >= max_vy) vy -= max_vy;
++
++ src = srcFirstLine + srcStride * y;
++
++ w = width;
++ vx = orig_vx;
++ while ((w -= 2) >= 0)
++ {
++ x1 = vx >> 16;
++ vx += unit_x;
++ while (vx >= max_vx) vx -= max_vx;
++ s1 = READ(pSrc, src + x1);
++
++ x2 = vx >> 16;
++ vx += unit_x;
++ while (vx >= max_vx) vx -= max_vx;
++ s2 = READ(pSrc, src + x2);
++
++ a1 = s1 >> 24;
++ a2 = s2 >> 24;
++
++ if (a1 == 0xff)
++ WRITE(pDst, dst, CONVERT_8888_TO_0565(s1));
++ else if (s1) {
++ d = CONVERT_0565_TO_0888(READ(pDst, dst));
++ a1 ^= 0xff;
++ UN8x4_MUL_UN8_ADD_UN8x4_store_r5g6b5(d, a1, s1);
++ WRITE(pDst, dst, d);
++ }
++ dst++;
++
++ if (a2 == 0xff)
++ WRITE(pDst, dst, CONVERT_8888_TO_0565(s2));
++ else if (s2) {
++ d = CONVERT_0565_TO_0888(READ(pDst, dst));
++ a2 ^= 0xff;
++ UN8x4_MUL_UN8_ADD_UN8x4_store_r5g6b5(d, a2, s2);
++ WRITE(pDst, dst, d);
++ }
++ dst++;
++ }
++ if (w & 1) {
++ x1 = vx >> 16;
++ vx += unit_x;
++ while (vx >= max_vx) vx -= max_vx;
++ s1 = READ(pSrc, src + x1);
++
++ a1 = s1 >> 24;
++ if (a1 == 0xff)
++ WRITE(pDst, dst, CONVERT_8888_TO_0565(s1));
++ else if (s1) {
++ d = CONVERT_0565_TO_0888(READ(pDst, dst));
++ a1 ^= 0xff;
++ UN8x4_MUL_UN8_ADD_UN8x4_store_r5g6b5(d, a1, s1);
++ WRITE(pDst, dst, d);
++ }
++ dst++;
++ }
++ }
++}
++
++static void fbCompositeTransformNearestNonrotatedAffineTrivialclipOver_8888x8888 (
++ pixman_image_t *pSrc, pixman_image_t *pDst, int xSrc, int ySrc, int xDst, int yDst,
++ int width, int height, int32_t vx, int32_t vy, int32_t unit_x, int32_t unit_y)
++{
++ uint32_t *dstLine;
++ uint32_t *srcFirstLine;
++ uint32_t d;
++ uint32_t s1, s2;
++ uint8_t a1, a2;
++ int w;
++ int x1, x2, y;
++ int32_t orig_vx = vx;
++
++ uint32_t *src, *dst;
++ int srcStride, dstStride;
++ PIXMAN_IMAGE_GET_LINE (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
++ /* pass in 0 instead of xSrc and ySrc because xSrc and ySrc need to be
++ * transformed from destination space to source space */
++ PIXMAN_IMAGE_GET_LINE (pSrc, 0, 0, uint32_t, srcStride, srcFirstLine, 1);
++
++ while (--height >= 0)
++ {
++ dst = dstLine;
++ dstLine += dstStride;
++
++ y = vy >> 16;
++ vy += unit_y;
++
++ if ((y < 0) || (y >= pSrc->bits.height)) {
++ continue;
++ }
++
++ src = srcFirstLine + srcStride * y;
++
++ w = width;
++ vx = orig_vx;
++ while ((w -= 2) >= 0)
++ {
++ x1 = vx >> 16;
++ vx += unit_x;
++ s1 = READ(pSrc, src + x1);
++
++ x2 = vx >> 16;
++ vx += unit_x;
++ s2 = READ(pSrc, src + x2);
++
++ a1 = s1 >> 24;
++ a2 = s2 >> 24;
++
++ if (a1 == 0xff)
++ WRITE(pDst, dst, s1);
++ else if (s1) {
++ d = READ(pDst, dst);
++ a1 ^= 0xff;
++ UN8x4_MUL_UN8_ADD_UN8x4(d, a1, s1);
++ WRITE(pDst, dst, d);
++ }
++ dst++;
++
++ if (a2 == 0xff)
++ WRITE(pDst, dst, s2);
++ else if (s2) {
++ d = READ(pDst, dst);
++ a2 ^= 0xff;
++ UN8x4_MUL_UN8_ADD_UN8x4(d, a2, s2);
++ WRITE(pDst, dst, d);
++ }
++ dst++;
++ }
++ if (w & 1) {
++ x1 = vx >> 16;
++ vx += unit_x;
++ s1 = READ(pSrc, src + x1);
++
++ a1 = s1 >> 24;
++ if (a1 == 0xff)
++ WRITE(pDst, dst, s1);
++ else if (s1) {
++ d = READ(pDst, dst);
++ a1 ^= 0xff;
++ UN8x4_MUL_UN8_ADD_UN8x4(d, a1, s1);
++ WRITE(pDst, dst, d);
++ }
++ dst++;
++ }
++ }
++}
++
++static void fbCompositeTransformNearestNonrotatedAffineTrivialclipRepeatOver_8888x8888 (
++ pixman_image_t *pSrc, pixman_image_t *pDst, int xSrc, int ySrc, int xDst, int yDst,
++ int width, int height, int32_t vx, int32_t vy, int32_t unit_x, int32_t unit_y)
++{
++ uint32_t *dstLine;
++ uint32_t *srcFirstLine;
++ uint32_t d;
++ uint32_t s1, s2;
++ uint8_t a1, a2;
++ int w;
++ int x1, x2, y;
++ int32_t orig_vx = vx;
++ int32_t max_vx, max_vy;
++
++ uint32_t *src, *dst;
++ int srcStride, dstStride;
++ PIXMAN_IMAGE_GET_LINE (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
++ /* pass in 0 instead of xSrc and ySrc because xSrc and ySrc need to be
++ * transformed from destination space to source space */
++ PIXMAN_IMAGE_GET_LINE (pSrc, 0, 0, uint32_t, srcStride, srcFirstLine, 1);
++
++ max_vx = pSrc->bits.width << 16;
++ max_vy = pSrc->bits.height << 16;
++
++ while (orig_vx < 0) orig_vx += max_vx;
++ while (vy < 0) vy += max_vy;
++ while (orig_vx >= max_vx) orig_vx -= max_vx;
++ while (vy >= max_vy) vy -= max_vy;
++
++ while (--height >= 0)
++ {
++ dst = dstLine;
++ dstLine += dstStride;
++
++ y = vy >> 16;
++ vy += unit_y;
++ while (vy >= max_vy) vy -= max_vy;
++
++ src = srcFirstLine + srcStride * y;
++
++ w = width;
++ vx = orig_vx;
++ while ((w -= 2) >= 0)
++ {
++ x1 = vx >> 16;
++ vx += unit_x;
++ while (vx >= max_vx) vx -= max_vx;
++ s1 = READ(pSrc, src + x1);
++
++ x2 = vx >> 16;
++ vx += unit_x;
++ while (vx >= max_vx) vx -= max_vx;
++ s2 = READ(pSrc, src + x2);
++
++ a1 = s1 >> 24;
++ a2 = s2 >> 24;
++
++ if (a1 == 0xff)
++ WRITE(pDst, dst, s1);
++ else if (s1) {
++ d = READ(pDst, dst);
++ a1 ^= 0xff;
++ UN8x4_MUL_UN8_ADD_UN8x4(d, a1, s1);
++ WRITE(pDst, dst, d);
++ }
++ dst++;
++
++ if (a2 == 0xff)
++ WRITE(pDst, dst, s2);
++ else if (s2) {
++ d = READ(pDst, dst);
++ a2 ^= 0xff;
++ UN8x4_MUL_UN8_ADD_UN8x4(d, a2, s2);
++ WRITE(pDst, dst, d);
++ }
++ dst++;
++ }
++ if (w & 1) {
++ x1 = vx >> 16;
++ vx += unit_x;
++ while (vx >= max_vx) vx -= max_vx;
++ s1 = READ(pSrc, src + x1);
++
++ a1 = s1 >> 24;
++ if (a1 == 0xff)
++ WRITE(pDst, dst, s1);
++ else if (s1) {
++ d = READ(pDst, dst);
++ a1 ^= 0xff;
++ UN8x4_MUL_UN8_ADD_UN8x4(d, a1, s1);
++ WRITE(pDst, dst, d);
++ }
++ dst++;
++ }
++ }
++}
++
++static void fbCompositeTransformNearestNonrotatedAffineTrivialclipSrc_8888x8888 (
++ pixman_image_t *pSrc, pixman_image_t *pDst, int xSrc, int ySrc, int xDst, int yDst,
++ int width, int height, int32_t vx, int32_t vy, int32_t unit_x, int32_t unit_y)
++{
++ uint32_t *dstLine;
++ uint32_t *srcFirstLine;
++ uint32_t s1, s2;
++ int w;
++ int x1, x2, y;
++ int32_t orig_vx = vx;
++
++ uint32_t *src, *dst;
++ int srcStride, dstStride;
++ PIXMAN_IMAGE_GET_LINE (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
++ /* pass in 0 instead of xSrc and ySrc because xSrc and ySrc need to be
++ * transformed from destination space to source space */
++ PIXMAN_IMAGE_GET_LINE (pSrc, 0, 0, uint32_t, srcStride, srcFirstLine, 1);
++
++ while (--height >= 0)
++ {
++ dst = dstLine;
++ dstLine += dstStride;
++
++ y = vy >> 16;
++ vy += unit_y;
++
++ if ((y < 0) || (y >= pSrc->bits.height)) {
++ memset(dst, 0, width * sizeof(*dst));
++ continue;
++ }
++
++ src = srcFirstLine + srcStride * y;
++
++ w = width;
++ vx = orig_vx;
++ while ((w -= 2) >= 0)
++ {
++ x1 = vx >> 16;
++ vx += unit_x;
++ s1 = READ(pSrc, src + x1);
++
++ x2 = vx >> 16;
++ vx += unit_x;
++ s2 = READ(pSrc, src + x2);
++
++ WRITE(pDst, dst, s1);
++ dst++;
++ WRITE(pDst, dst, s2);
++ dst++;
++ }
++ if (w & 1) {
++ x1 = vx >> 16;
++ vx += unit_x;
++ s1 = READ(pSrc, src + x1);
++ WRITE(pDst, dst, s1);
++ dst++;
++ }
++ }
++}
++
++static void fbCompositeTransformNearestNonrotatedAffineTrivialclipRepeatSrc_8888x8888 (
++ pixman_image_t *pSrc, pixman_image_t *pDst, int xSrc, int ySrc, int xDst, int yDst,
++ int width, int height, int32_t vx, int32_t vy, int32_t unit_x, int32_t unit_y)
++{
++ uint32_t *dstLine;
++ uint32_t *srcFirstLine;
++ uint32_t s1, s2;
++ int w;
++ int x1, x2, y;
++ int32_t orig_vx = vx;
++ int32_t max_vx, max_vy;
++
++ uint32_t *src, *dst;
++ int srcStride, dstStride;
++ PIXMAN_IMAGE_GET_LINE (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
++ /* pass in 0 instead of xSrc and ySrc because xSrc and ySrc need to be
++ * transformed from destination space to source space */
++ PIXMAN_IMAGE_GET_LINE (pSrc, 0, 0, uint32_t, srcStride, srcFirstLine, 1);
++
++ max_vx = pSrc->bits.width << 16;
++ max_vy = pSrc->bits.height << 16;
++
++ while (orig_vx < 0) orig_vx += max_vx;
++ while (vy < 0) vy += max_vy;
++ while (orig_vx >= max_vx) orig_vx -= max_vx;
++ while (vy >= max_vy) vy -= max_vy;
++
++ while (--height >= 0)
++ {
++ dst = dstLine;
++ dstLine += dstStride;
++
++ y = vy >> 16;
++ vy += unit_y;
++ while (vy >= max_vy) vy -= max_vy;
++
++ src = srcFirstLine + srcStride * y;
++
++ w = width;
++ vx = orig_vx;
++ while ((w -= 2) >= 0)
++ {
++ x1 = vx >> 16;
++ vx += unit_x;
++ while (vx >= max_vx) vx -= max_vx;
++ s1 = READ(pSrc, src + x1);
++
++ x2 = vx >> 16;
++ vx += unit_x;
++ while (vx >= max_vx) vx -= max_vx;
++ s2 = READ(pSrc, src + x2);
++
++ WRITE(pDst, dst, s1);
++ dst++;
++ WRITE(pDst, dst, s2);
++ dst++;
++ }
++ if (w & 1) {
++ x1 = vx >> 16;
++ vx += unit_x;
++ while (vx >= max_vx) vx -= max_vx;
++ s1 = READ(pSrc, src + x1);
++
++ WRITE(pDst, dst, s1);
++ dst++;
++ }
++ }
++}
++
++static void fbCompositeTransformNearestNonrotatedAffineTrivialclipSrc_0565x0565 (
++ pixman_image_t *pSrc, pixman_image_t *pDst, int xSrc, int ySrc, int xDst, int yDst,
++ int width, int height, int32_t vx, int32_t vy, int32_t unit_x, int32_t unit_y)
++{
++ uint16_t *dstLine;
++ uint16_t *srcFirstLine;
++ uint16_t s1, s2;
++ int w;
++ int x1, x2, y;
++ int32_t orig_vx = vx;
++
++ uint16_t *src, *dst;
++ int srcStride, dstStride;
++ PIXMAN_IMAGE_GET_LINE (pDst, xDst, yDst, uint16_t, dstStride, dstLine, 1);
++ /* pass in 0 instead of xSrc and ySrc because xSrc and ySrc need to be
++ * transformed from destination space to source space */
++ PIXMAN_IMAGE_GET_LINE (pSrc, 0, 0, uint16_t, srcStride, srcFirstLine, 1);
++
++ while (--height >= 0)
++ {
++ dst = dstLine;
++ dstLine += dstStride;
++
++ y = vy >> 16;
++ vy += unit_y;
++
++ if ((y < 0) || (y >= pSrc->bits.height)) {
++ memset(dst, 0, width * sizeof(*dst));
++ continue;
++ }
++
++ src = srcFirstLine + srcStride * y;
++
++ w = width;
++ vx = orig_vx;
++ while ((w -= 2) >= 0)
++ {
++ x1 = vx >> 16;
++ vx += unit_x;
++ s1 = READ(pSrc, src + x1);
++
++ x2 = vx >> 16;
++ vx += unit_x;
++ s2 = READ(pSrc, src + x2);
++
++ WRITE(pDst, dst, s1);
++ dst++;
++ WRITE(pDst, dst, s2);
++ dst++;
++ }
++ if (w & 1) {
++ x1 = vx >> 16;
++ vx += unit_x;
++ s1 = READ(pSrc, src + x1);
++ WRITE(pDst, dst, s1);
++ dst++;
++ }
++ }
++}
++
++static void fbCompositeTransformNearestNonrotatedAffineTrivialclipRepeatSrc_0565x0565 (
++ pixman_image_t *pSrc, pixman_image_t *pDst, int xSrc, int ySrc, int xDst, int yDst,
++ int width, int height, int32_t vx, int32_t vy, int32_t unit_x, int32_t unit_y)
++{
++ uint16_t *dstLine;
++ uint16_t *srcFirstLine;
++ uint16_t s1, s2;
++ int w;
++ int x1, x2, y;
++ int32_t orig_vx = vx;
++ int32_t max_vx, max_vy;
++
++ uint16_t *src, *dst;
++ int srcStride, dstStride;
++ PIXMAN_IMAGE_GET_LINE (pDst, xDst, yDst, uint16_t, dstStride, dstLine, 1);
++ /* pass in 0 instead of xSrc and ySrc because xSrc and ySrc need to be
++ * transformed from destination space to source space */
++ PIXMAN_IMAGE_GET_LINE (pSrc, 0, 0, uint16_t, srcStride, srcFirstLine, 1);
++
++ max_vx = pSrc->bits.width << 16;
++ max_vy = pSrc->bits.height << 16;
++
++ while (orig_vx < 0) orig_vx += max_vx;
++ while (vy < 0) vy += max_vy;
++ while (orig_vx >= max_vx) orig_vx -= max_vx;
++ while (vy >= max_vy) vy -= max_vy;
++
++ while (--height >= 0)
++ {
++ dst = dstLine;
++ dstLine += dstStride;
++
++ y = vy >> 16;
++ vy += unit_y;
++ while (vy >= max_vy) vy -= max_vy;
++
++ src = srcFirstLine + srcStride * y;
++
++ w = width;
++ vx = orig_vx;
++ while ((w -= 2) >= 0)
++ {
++ x1 = vx >> 16;
++ vx += unit_x;
++ while (vx >= max_vx) vx -= max_vx;
++ s1 = READ(pSrc, src + x1);
++
++ x2 = vx >> 16;
++ vx += unit_x;
++ while (vx >= max_vx) vx -= max_vx;
++ s2 = READ(pSrc, src + x2);
++
++ WRITE(pDst, dst, s1);
++ dst++;
++ WRITE(pDst, dst, s2);
++ dst++;
++ }
++ if (w & 1) {
++ x1 = vx >> 16;
++ vx += unit_x;
++ while (vx >= max_vx) vx -= max_vx;
++ s1 = READ(pSrc, src + x1);
++
++ WRITE(pDst, dst, s1);
++ dst++;
++ }
++ }
++}
++
++static void fbCompositeTransformNearestNonrotatedAffineTrivialclipSrc_8888x0565 (
++ pixman_image_t *pSrc, pixman_image_t *pDst, int xSrc, int ySrc, int xDst, int yDst,
++ int width, int height, int32_t vx, int32_t vy, int32_t unit_x, int32_t unit_y)
++{
++ uint16_t *dstLine;
++ uint32_t *srcFirstLine;
++ uint32_t s1, s2;
++ int w;
++ int x1, x2, y;
++ int32_t orig_vx = vx;
++
++ uint32_t *src;
++ uint16_t *dst;
++ int srcStride, dstStride;
++ PIXMAN_IMAGE_GET_LINE (pDst, xDst, yDst, uint16_t, dstStride, dstLine, 1);
++ /* pass in 0 instead of xSrc and ySrc because xSrc and ySrc need to be
++ * transformed from destination space to source space */
++ PIXMAN_IMAGE_GET_LINE (pSrc, 0, 0, uint32_t, srcStride, srcFirstLine, 1);
++
++ while (--height >= 0)
++ {
++ dst = dstLine;
++ dstLine += dstStride;
++
++ y = vy >> 16;
++ vy += unit_y;
++
++ if ((y < 0) || (y >= pSrc->bits.height)) {
++ memset(dst, 0, width * sizeof(*dst));
++ continue;
++ }
++
++ src = srcFirstLine + srcStride * y;
++
++ w = width;
++ vx = orig_vx;
++ while ((w -= 2) >= 0)
++ {
++ x1 = vx >> 16;
++ vx += unit_x;
++ s1 = READ(pSrc, src + x1);
++
++ x2 = vx >> 16;
++ vx += unit_x;
++ s2 = READ(pSrc, src + x2);
++
++ WRITE(pDst, dst, CONVERT_8888_TO_0565(s1));
++ dst++;
++ WRITE(pDst, dst, CONVERT_8888_TO_0565(s2));
++ dst++;
++ }
++ if (w & 1) {
++ x1 = vx >> 16;
++ vx += unit_x;
++ s1 = READ(pSrc, src + x1);
++ WRITE(pDst, dst, CONVERT_8888_TO_0565(s1));
++ dst++;
++ }
++ }
++}
++
++static void fbCompositeTransformNearestNonrotatedAffineTrivialclipRepeatSrc_8888x0565 (
++ pixman_image_t *pSrc, pixman_image_t *pDst, int xSrc, int ySrc, int xDst, int yDst,
++ int width, int height, int32_t vx, int32_t vy, int32_t unit_x, int32_t unit_y)
++{
++ uint16_t *dstLine;
++ uint32_t *srcFirstLine;
++ uint32_t s1, s2;
++ int w;
++ int x1, x2, y;
++ int32_t orig_vx = vx;
++ int32_t max_vx, max_vy;
++
++ uint32_t *src;
++ uint16_t *dst;
++ int srcStride, dstStride;
++ PIXMAN_IMAGE_GET_LINE (pDst, xDst, yDst, uint16_t, dstStride, dstLine, 1);
++ /* pass in 0 instead of xSrc and ySrc because xSrc and ySrc need to be
++ * transformed from destination space to source space */
++ PIXMAN_IMAGE_GET_LINE (pSrc, 0, 0, uint32_t, srcStride, srcFirstLine, 1);
++
++ max_vx = pSrc->bits.width << 16;
++ max_vy = pSrc->bits.height << 16;
++
++ while (orig_vx < 0) orig_vx += max_vx;
++ while (vy < 0) vy += max_vy;
++ while (orig_vx >= max_vx) orig_vx -= max_vx;
++ while (vy >= max_vy) vy -= max_vy;
++
++ while (--height >= 0)
++ {
++ dst = dstLine;
++ dstLine += dstStride;
++
++ y = vy >> 16;
++ vy += unit_y;
++ while (vy >= max_vy) vy -= max_vy;
++
++ src = srcFirstLine + srcStride * y;
++
++ w = width;
++ vx = orig_vx;
++ while ((w -= 2) >= 0)
++ {
++ x1 = vx >> 16;
++ vx += unit_x;
++ while (vx >= max_vx) vx -= max_vx;
++ s1 = READ(pSrc, src + x1);
++
++ x2 = vx >> 16;
++ vx += unit_x;
++ while (vx >= max_vx) vx -= max_vx;
++ s2 = READ(pSrc, src + x2);
++
++ WRITE(pDst, dst, CONVERT_8888_TO_0565(s1));
++ dst++;
++ WRITE(pDst, dst, CONVERT_8888_TO_0565(s2));
++ dst++;
++ }
++ if (w & 1) {
++ x1 = vx >> 16;
++ vx += unit_x;
++ while (vx >= max_vx) vx -= max_vx;
++ s1 = READ(pSrc, src + x1);
++
++ WRITE(pDst, dst, CONVERT_8888_TO_0565(s1));
++ dst++;
++ }
++ }
++}
++
++/*
++ * Check if the clipping boundary is crossed on horizontal scaling
++ */
++static inline pixman_bool_t
++fbTransformVerifyHorizontalClipping(pixman_image_t *pict, int width, int32_t vx, int32_t unit_x)
++{
++ while (--width >= 0) {
++ int x = vx >> 16;
++ if ((x < 0) || (x >= pict->bits.width)) return 1;
++ vx += unit_x;
++ }
++ return 0;
++}
++
++/*
++ * Check if the clipping boundary is crossed on vertical scaling
++ */
++static inline pixman_bool_t
++fbTransformVerifyVerticalClipping(pixman_image_t *pict, int height, int32_t vy, int32_t unit_y)
++{
++ while (--height >= 0) {
++ int y = vy >> 16;
++ if ((y < 0) || (y >= pict->bits.height)) return 1;
++ vy += unit_y;
++ }
++ return 0;
++}
++
++/*
++ * Easy case of transform without rotation or complex clipping
++ * Returns 1 in the case if it was able to handle this operation and 0 otherwise
++ */
++static pixman_bool_t
++fbCompositeTransformNonrotatedAffineTrivialclip (
++ pixman_op_t op,
++ pixman_image_t *pSrc,
++ pixman_image_t *pMask,
++ pixman_image_t *pDst,
++ int16_t xSrc,
++ int16_t ySrc,
++ int16_t xMask,
++ int16_t yMask,
++ int16_t xDst,
++ int16_t yDst,
++ uint16_t width,
++ uint16_t height)
++{
++ pixman_vector_t v, unit;
++ int skipdst_x = 0, skipdst_y = 0;
++
++ /* Handle destination clipping */
++ if (xDst < pDst->common.clip_region.extents.x1) {
++ skipdst_x = pDst->common.clip_region.extents.x1 - xDst;
++ if (skipdst_x >= (int)width)
++ return 1;
++ xDst = pDst->common.clip_region.extents.x1;
++ width -= skipdst_x;
++ }
++
++ if (yDst < pDst->common.clip_region.extents.y1) {
++ skipdst_y = pDst->common.clip_region.extents.y1 - yDst;
++ if (skipdst_y >= (int)height)
++ return 1;
++ yDst = pDst->common.clip_region.extents.y1;
++ height -= skipdst_y;
++ }
++
++ if (xDst >= pDst->common.clip_region.extents.x2 ||
++ yDst >= pDst->common.clip_region.extents.y2)
++ {
++ return 1;
++ }
++
++ if (xDst + width > pDst->common.clip_region.extents.x2)
++ width = pDst->common.clip_region.extents.x2 - xDst;
++ if (yDst + height > pDst->common.clip_region.extents.y2)
++ height = pDst->common.clip_region.extents.y2 - yDst;
++
++ /* reference point is the center of the pixel */
++ v.vector[0] = pixman_int_to_fixed(xSrc) + pixman_fixed_1 / 2;
++ v.vector[1] = pixman_int_to_fixed(ySrc) + pixman_fixed_1 / 2;
++ v.vector[2] = pixman_fixed_1;
++
++ if (!pixman_transform_point_3d (pSrc->common.transform, &v))
++ return 0;
++
++ /* Round down to closest integer, ensuring that 0.5 rounds to 0, not 1 */
++ v.vector[0] -= pixman_fixed_e;
++ v.vector[1] -= pixman_fixed_e;
++
++ unit.vector[0] = pSrc->common.transform->matrix[0][0];
++ unit.vector[1] = pSrc->common.transform->matrix[1][1];
++
++ v.vector[0] += unit.vector[0] * skipdst_x;
++ v.vector[1] += unit.vector[1] * skipdst_y;
++
++ /* Check for possible fixed point arithmetics problems/overflows */
++ if (unit.vector[0] <= 0 || unit.vector[1] <= 0)
++ return 0;
++ if (width == 0 || height == 0)
++ return 0;
++ if ((uint32_t)width + (unit.vector[0] >> 16) >= 0x7FFF)
++ return 0;
++ if ((uint32_t)height + (unit.vector[1] >> 16) >= 0x7FFF)
++ return 0;
++
++ /* Horizontal source clipping is only supported for NORMAL repeat */
++ if (pSrc->common.repeat != PIXMAN_REPEAT_NORMAL
++ && fbTransformVerifyHorizontalClipping(pSrc, width, v.vector[0], unit.vector[0])) {
++ return 0;
++ }
++
++ /* Vertical source clipping is only supported for NONE and NORMAL repeat */
++ if (pSrc->common.repeat != PIXMAN_REPEAT_NONE && pSrc->common.repeat != PIXMAN_REPEAT_NORMAL
++ && fbTransformVerifyVerticalClipping(pSrc, height, v.vector[1], unit.vector[1])) {
++ return 0;
++ }
++
++ if (op == PIXMAN_OP_OVER && pSrc->bits.format == PIXMAN_a8r8g8b8
++ && (pDst->bits.format == PIXMAN_x8r8g8b8 || pDst->bits.format == PIXMAN_a8r8g8b8))
++ {
++ if (pSrc->common.filter == PIXMAN_FILTER_NEAREST && pSrc->common.repeat != PIXMAN_REPEAT_NORMAL) {
++ fbCompositeTransformNearestNonrotatedAffineTrivialclipOver_8888x8888(
++ pSrc, pDst, xSrc, ySrc, xDst, yDst, width, height,
++ v.vector[0], v.vector[1], unit.vector[0], unit.vector[1]);
++ return 1;
++ }
++ if (pSrc->common.filter == PIXMAN_FILTER_NEAREST && pSrc->common.repeat == PIXMAN_REPEAT_NORMAL) {
++ fbCompositeTransformNearestNonrotatedAffineTrivialclipRepeatOver_8888x8888(
++ pSrc, pDst, xSrc, ySrc, xDst, yDst, width, height,
++ v.vector[0], v.vector[1], unit.vector[0], unit.vector[1]);
++ return 1;
++ }
++ }
++
++ if (op == PIXMAN_OP_SRC && (pSrc->bits.format == PIXMAN_x8r8g8b8 || pSrc->bits.format == PIXMAN_a8r8g8b8)
++ && (pDst->bits.format == PIXMAN_x8r8g8b8 || pDst->bits.format == pSrc->bits.format))
++ {
++ if (pSrc->common.filter == PIXMAN_FILTER_NEAREST && pSrc->common.repeat != PIXMAN_REPEAT_NORMAL) {
++ fbCompositeTransformNearestNonrotatedAffineTrivialclipSrc_8888x8888(
++ pSrc, pDst, xSrc, ySrc, xDst, yDst, width, height,
++ v.vector[0], v.vector[1], unit.vector[0], unit.vector[1]);
++ return 1;
++ }
++ if (pSrc->common.filter == PIXMAN_FILTER_NEAREST && pSrc->common.repeat == PIXMAN_REPEAT_NORMAL) {
++ fbCompositeTransformNearestNonrotatedAffineTrivialclipRepeatSrc_8888x8888(
++ pSrc, pDst, xSrc, ySrc, xDst, yDst, width, height,
++ v.vector[0], v.vector[1], unit.vector[0], unit.vector[1]);
++ return 1;
++ }
++ }
++
++ if (op == PIXMAN_OP_OVER && pSrc->bits.format == PIXMAN_a8r8g8b8 && pDst->bits.format == PIXMAN_r5g6b5)
++ {
++ if (pSrc->common.filter == PIXMAN_FILTER_NEAREST && pSrc->common.repeat != PIXMAN_REPEAT_NORMAL) {
++ fbCompositeTransformNearestNonrotatedAffineTrivialclipOver_8888x0565(
++ pSrc, pDst, xSrc, ySrc, xDst, yDst, width, height,
++ v.vector[0], v.vector[1], unit.vector[0], unit.vector[1]);
++ return 1;
++ }
++ if (pSrc->common.filter == PIXMAN_FILTER_NEAREST && pSrc->common.repeat == PIXMAN_REPEAT_NORMAL) {
++ fbCompositeTransformNearestNonrotatedAffineTrivialclipRepeatOver_8888x0565(
++ pSrc, pDst, xSrc, ySrc, xDst, yDst, width, height,
++ v.vector[0], v.vector[1], unit.vector[0], unit.vector[1]);
++ return 1;
++ }
++ }
++
++ if (op == PIXMAN_OP_SRC && pSrc->bits.format == PIXMAN_r5g6b5 && pDst->bits.format == PIXMAN_r5g6b5)
++ {
++ if (pSrc->common.filter == PIXMAN_FILTER_NEAREST && pSrc->common.repeat != PIXMAN_REPEAT_NORMAL) {
++ fbCompositeTransformNearestNonrotatedAffineTrivialclipSrc_0565x0565(
++ pSrc, pDst, xSrc, ySrc, xDst, yDst, width, height,
++ v.vector[0], v.vector[1], unit.vector[0], unit.vector[1]);
++ return 1;
++ }
++ if (pSrc->common.filter == PIXMAN_FILTER_NEAREST && pSrc->common.repeat == PIXMAN_REPEAT_NORMAL) {
++ fbCompositeTransformNearestNonrotatedAffineTrivialclipRepeatSrc_0565x0565(
++ pSrc, pDst, xSrc, ySrc, xDst, yDst, width, height,
++ v.vector[0], v.vector[1], unit.vector[0], unit.vector[1]);
++ return 1;
++ }
++ }
++
++ if (op == PIXMAN_OP_SRC && (pSrc->bits.format == PIXMAN_x8r8g8b8 || pSrc->bits.format == PIXMAN_a8r8g8b8)
++ && pDst->bits.format == PIXMAN_r5g6b5)
++ {
++ if (pSrc->common.filter == PIXMAN_FILTER_NEAREST && pSrc->common.repeat != PIXMAN_REPEAT_NORMAL) {
++ fbCompositeTransformNearestNonrotatedAffineTrivialclipSrc_8888x0565(
++ pSrc, pDst, xSrc, ySrc, xDst, yDst, width, height,
++ v.vector[0], v.vector[1], unit.vector[0], unit.vector[1]);
++ return 1;
++ }
++ if (pSrc->common.filter == PIXMAN_FILTER_NEAREST && pSrc->common.repeat == PIXMAN_REPEAT_NORMAL) {
++ fbCompositeTransformNearestNonrotatedAffineTrivialclipRepeatSrc_8888x0565(
++ pSrc, pDst, xSrc, ySrc, xDst, yDst, width, height,
++ v.vector[0], v.vector[1], unit.vector[0], unit.vector[1]);
++ return 1;
++ }
++ }
++
++ /* No fastpath scaling implemented for this case */
++ return 0;
++}
++
+ static void
+ fast_path_composite (pixman_implementation_t *imp,
+ pixman_op_t op,
+@@ -1279,6 +2266,30 @@ fast_path_composite (pixman_implementation_t *imp,
+ if (src->type == BITS
+ && src->common.transform
+ && !mask
++ && !src->common.alpha_map && !dest->common.alpha_map
++ && (src->common.filter == PIXMAN_FILTER_NEAREST)
++ && !src->bits.read_func && !src->bits.write_func
++ && !dest->bits.read_func && !dest->bits.write_func)
++ {
++ /* ensure that the transform matrix only has a scale */
++ if (src->common.transform->matrix[0][1] == 0 &&
++ src->common.transform->matrix[1][0] == 0 &&
++ src->common.transform->matrix[2][0] == 0 &&
++ src->common.transform->matrix[2][1] == 0 &&
++ src->common.transform->matrix[2][2] == pixman_fixed_1 &&
++ dest->common.clip_region.data == NULL)
++ {
++ if (fbCompositeTransformNonrotatedAffineTrivialclip (op, src, mask, dest,
++ src_x, src_y, mask_x, mask_y, dest_x, dest_y, width, height))
++ {
++ return;
++ }
++ }
++ }
++
++ if (src->type == BITS
++ && src->common.transform
++ && !mask
+ && op == PIXMAN_OP_SRC
+ && !src->common.alpha_map && !dest->common.alpha_map
+ && (src->common.filter == PIXMAN_FILTER_NEAREST)
diff --git a/recipes/xorg-lib/pixman/over-8888-0565.patch b/recipes/xorg-lib/pixman/over-8888-0565.patch
new file mode 100644
index 0000000000..3e27094022
--- /dev/null
+++ b/recipes/xorg-lib/pixman/over-8888-0565.patch
@@ -0,0 +1,296 @@
+From: Siarhei Siamashka <siarhei.siamashka@nokia.com>
+Date: Mon, 27 Jul 2009 04:48:04 +0000 (+0300)
+Subject: ARM: NEON optimized version of composite_over_8888_0565
+X-Git-Url: http://siarhei.siamashka.name/gitweb/?p=pixman.git;a=commitdiff_plain;h=17d8ab82858511f212dfb30c347255393eb12b0c
+
+ARM: NEON optimized version of composite_over_8888_0565
+---
+
+diff --git a/pixman/pixman-arm-neon.c b/pixman/pixman-arm-neon.c
+index 9404c70..f1dcf1f 100644
+--- a/pixman/pixman-arm-neon.c
++++ b/pixman/pixman-arm-neon.c
+@@ -1447,6 +1447,274 @@ neon_composite_src_16_16 (pixman_implementation_t * impl,
+ }
+ }
+
++static inline void
++neon_composite_over_8888_0565_internal (uint32_t *src,
++ uint16_t *dst,
++ int32_t w,
++ int32_t h,
++ int32_t src_stride,
++ int32_t dst_stride)
++{
++ int32_t dst_newline_delta = (dst_stride - w) * 2;
++ int32_t src_newline_delta = (src_stride - w) * 4;
++ asm volatile (
++
++ ".macro process_pixblock_head size\n"
++ /* load pixel data from memory */
++ " .if \\size == 8\n"
++ " vld1.32 {d0, d1, d2, d3}, [%[src]]!\n"
++ " vld1.16 {d4, d5}, [%[dst_r]]!\n"
++ " .elseif \\size == 4\n"
++ " vld1.32 {d0, d1}, [%[src]]!\n"
++ " vld1.16 {d4}, [%[dst_r]]!\n"
++ " .elseif \\size == 2\n"
++ " vld1.32 {d0}, [%[src]]!\n"
++ " vld1.16 {d4[0]}, [%[dst_r]]!\n"
++ " vld1.16 {d4[1]}, [%[dst_r]]!\n"
++ " .elseif \\size == 1\n"
++ " vld1.32 {d0[0]}, [%[src]]!\n"
++ " vld1.16 {d4[0]}, [%[dst_r]]!\n"
++ " .endif\n"
++ /* deinterleave and convert both source and destination
++ to "planar" 8-bit format */
++ " vshrn.u16 d16, q2, #8\n"
++ " vuzp.8 d0, d1\n"
++ " vshrn.u16 d17, q2, #3\n"
++ " vuzp.8 d2, d3\n"
++ " vsli.u16 q2, q2, #5\n"
++ " vuzp.8 d1, d3\n"
++ " vsri.u8 d16, d16, #5\n"
++ " vuzp.8 d0, d2\n"
++ " vmvn.8 d3, d3\n"
++ " vsri.u8 d17, d17, #6\n"
++ " vshrn.u16 d18, q2, #2\n"
++ /* source: d0 - blue, d1 - green, d2 - red, d3 - alpha */
++ /* destination: d16 - red, d17 - green, d18 - blue */
++ /* now do alpha blending */
++ " vmull.u8 q10, d3, d16\n"
++ "pld [%[src], #128]\n"
++ " vmull.u8 q11, d3, d17\n"
++ "pld [%[dst_r], #64]\n"
++ " vmull.u8 q12, d3, d18\n"
++ " vrshr.u16 q13, q10, #8\n"
++ " vrshr.u16 q8, q11, #8\n"
++ " vrshr.u16 q9, q12, #8\n"
++ " vraddhn.u16 d20, q10, q13\n"
++ " vraddhn.u16 d21, q11, q8\n"
++ " vraddhn.u16 d22, q12, q9\n"
++ ".endm\n"
++
++ ".macro process_pixblock_tail size\n"
++ /* result is ready in d28, d29, d30 (R, G, B) */
++ " vqadd.u8 d28, d2, d20\n"
++ " vqadd.u8 d29, d1, d21\n"
++ " vqadd.u8 d30, d0, d22\n"
++ /* convert it to r5g6b5 */
++ " vshll.u8 q3, d28, #8\n"
++ " vshll.u8 q14, d29, #8\n"
++ " vshll.u8 q15, d30, #8\n"
++ " vsri.u16 q3, q14, #5\n"
++ " vsri.u16 q3, q15, #11\n"
++ /* store pixel data to memory */
++ " .if \\size == 8\n"
++ " vst1.16 {d6, d7}, [%[dst_w], :128]!\n"
++ " .elseif \\size == 4\n"
++ " vst1.16 {d6}, [%[dst_w]]!\n"
++ " .elseif \\size == 2\n"
++ " vst1.16 {d6[0]}, [%[dst_w]]!\n"
++ " vst1.16 {d6[1]}, [%[dst_w]]!\n"
++ " .elseif \\size == 1\n"
++ " vst1.16 {d6[0]}, [%[dst_w]]!\n"
++ " .endif\n"
++ ".endm\n"
++
++ /* "tail" of the previous block and "head" of the next block
++ are merged and interleaved for better instructions scheduling */
++ ".macro process_pixblock_tail_head_8\n"
++ " vqadd.u8 d28, d2, d20\n"
++ " vld1.16 {d4, d5}, [%[dst_r], :128]!\n"
++ " vqadd.u8 d29, d1, d21\n" /* TODO: try to join these into a */
++ " vqadd.u8 d30, d0, d22\n" /* single 128-bit operation */
++ " vshrn.u16 d16, q2, #8\n"
++ " vld1.32 {d0, d1, d2, d3}, [%[src]]!\n" /* TODO: maybe split */
++ " vshrn.u16 d17, q2, #3\n"
++ " vsli.u16 q2, q2, #5\n"
++ " vuzp.8 d0, d1\n"
++ " vshll.u8 q3, d28, #8\n"
++ " vuzp.8 d2, d3\n"
++ " vshll.u8 q14, d29, #8\n"
++ " vuzp.8 d1, d3\n"
++ " vsri.u8 d16, d16, #5\n"
++ " vuzp.8 d0, d2\n"
++ " vmvn.8 d3, d3\n"
++ " vsri.u8 d17, d17, #6\n"
++ " vshrn.u16 d18, q2, #2\n"
++ " vmull.u8 q10, d3, d16\n"
++ "pld [%[src], #128]\n"
++ " vmull.u8 q11, d3, d17\n"
++ "pld [%[dst_r], #64]\n"
++ " vmull.u8 q12, d3, d18\n"
++ " vsri.u16 d6, d28, #5\n"
++ " vsri.u16 d7, d29, #5\n"
++ " vshll.u8 q15, d30, #8\n"
++ " vrshr.u16 q13, q10, #8\n"
++ " vrshr.u16 q8, q11, #8\n"
++ " vrshr.u16 q9, q12, #8\n"
++ " vsri.u16 d6, d30, #11\n"
++ " vsri.u16 d7, d31, #11\n"
++ " vraddhn.u16 d20, q10, q13\n"
++ " vraddhn.u16 d21, q11, q8\n"
++ " vraddhn.u16 d22, q12, q9\n"
++ " vst1.16 {d6, d7}, [%[dst_w], :128]!\n"
++ ".endm\n"
++
++ "subs %[h], %[h], #1\n"
++ "blt 9f\n"
++ "0:\n"
++ "cmp %[w], #8\n"
++ "blt 8f\n"
++
++ /* ensure 16 byte alignment of the destination buffer */
++ "tst %[dst_r], #0xF\n"
++ "beq 2f\n"
++ "tst %[dst_r], #2\n"
++ "beq 1f\n"
++ "vld1.32 {d3[0]}, [%[src]]!\n"
++ "vld1.16 {d5[2]}, [%[dst_r]]!\n"
++ "sub %[w], %[w], #1\n"
++ "1:\n"
++ "tst %[dst_r], #4\n"
++ "beq 1f\n"
++ "vld1.32 {d2}, [%[src]]!\n"
++ "vld1.16 {d5[0]}, [%[dst_r]]!\n"
++ "vld1.16 {d5[1]}, [%[dst_r]]!\n"
++ "sub %[w], %[w], #2\n"
++ "1:\n"
++ "tst %[dst_r], #8\n"
++ "beq 1f\n"
++ "vld1.32 {d0, d1}, [%[src]]!\n"
++ "vld1.16 {d4}, [%[dst_r]]!\n"
++ "sub %[w], %[w], #4\n"
++ "1:\n"
++ "process_pixblock_head -1\n"
++ "process_pixblock_tail -1\n"
++ "tst %[dst_w], #2\n"
++ "beq 1f\n"
++ "vst1.16 {d7[2]}, [%[dst_w]]!\n"
++ "1:\n"
++ "tst %[dst_w], #4\n"
++ "beq 1f\n"
++ "vst1.16 {d7[0]}, [%[dst_w]]!\n"
++ "vst1.16 {d7[1]}, [%[dst_w]]!\n"
++ "1:\n"
++ "tst %[dst_w], #8\n"
++ "beq 2f\n"
++ "vst1.16 {d6}, [%[dst_w]]!\n"
++ "2:\n"
++
++ "subs %[w], %[w], #8\n"
++ "blt 8f\n"
++ "process_pixblock_head 8\n"
++ "subs %[w], %[w], #8\n"
++ "blt 2f\n"
++ "1:\n" /* innermost pipelined loop */
++ "process_pixblock_tail_head_8\n"
++ "subs %[w], %[w], #8\n"
++ "bge 1b\n"
++ "2:\n"
++ "process_pixblock_tail 8\n"
++
++ "8:\n"
++ /* process up to 7 remaining pixels */
++ "tst %[w], #7\n"
++ "beq 2f\n"
++ "tst %[w], #4\n"
++ "beq 1f\n"
++ "vld1.32 {d0, d1}, [%[src]]!\n"
++ "vld1.16 {d4}, [%[dst_r]]!\n"
++ "1:\n"
++ "tst %[w], #2\n"
++ "beq 1f\n"
++ "vld1.32 {d2}, [%[src]]!\n"
++ "vld1.16 {d5[0]}, [%[dst_r]]!\n"
++ "vld1.16 {d5[1]}, [%[dst_r]]!\n"
++ "1:\n"
++ "tst %[w], #1\n"
++ "beq 1f\n"
++ "vld1.32 {d3[0]}, [%[src]]!\n"
++ "vld1.16 {d5[2]}, [%[dst_r]]!\n"
++ "1:\n"
++
++ "process_pixblock_head -1\n"
++ "process_pixblock_tail -1\n"
++
++ "tst %[w], #4\n"
++ "beq 1f\n"
++ "vst1.16 {d6}, [%[dst_w]]!\n"
++ "1:\n"
++ "tst %[w], #2\n"
++ "beq 1f\n"
++ "vst1.16 {d7[0]}, [%[dst_w]]!\n"
++ "vst1.16 {d7[1]}, [%[dst_w]]!\n"
++ "1:\n"
++ "tst %[w], #1\n"
++ "beq 2f\n"
++ "vst1.16 {d7[2]}, [%[dst_w]]!\n"
++ "2:\n"
++
++ "add %[src], %[src], %[src_newline_delta]\n"
++ "add %[dst_r], %[dst_r], %[dst_newline_delta]\n"
++ "add %[dst_w], %[dst_w], %[dst_newline_delta]\n"
++ "mov %[w], %[orig_w]\n"
++ "subs %[h], %[h], #1\n"
++ "bge 0b\n"
++ "9:\n"
++ ".purgem process_pixblock_head\n"
++ ".purgem process_pixblock_tail\n"
++ ".purgem process_pixblock_tail_head_8\n"
++
++ : [src] "+&r" (src), [dst_r] "+&r" (dst), [dst_w] "+&r" (dst),
++ [w] "+&r" (w), [h] "+&r" (h)
++ : [dst_newline_delta] "r" (dst_newline_delta),
++ [src_newline_delta] "r" (src_newline_delta), [orig_w] "r" (w)
++ : "cc", "memory",
++ "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
++ /* "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15", */
++ "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
++ "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"
++ );
++}
++
++static void
++neon_composite_over_8888_0565 (pixman_implementation_t *imp,
++ pixman_op_t op,
++ pixman_image_t * src_image,
++ pixman_image_t * mask_image,
++ pixman_image_t * dst_image,
++ int32_t src_x,
++ int32_t src_y,
++ int32_t mask_x,
++ int32_t mask_y,
++ int32_t dest_x,
++ int32_t dest_y,
++ int32_t width,
++ int32_t height)
++{
++ uint16_t *dst_line;
++ uint32_t *src_line;
++ int32_t dst_stride, src_stride;
++
++ PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
++ PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
++
++ neon_composite_over_8888_0565_internal (src_line,
++ dst_line,
++ width,
++ height,
++ src_stride,
++ dst_stride);
++}
++
+ #endif /* USE_GCC_INLINE_ASM */
+
+ static void
+@@ -1908,6 +2176,8 @@ static const pixman_fast_path_t arm_neon_fast_path_array[] =
+ #ifdef USE_GCC_INLINE_ASM
+ { PIXMAN_OP_SRC, PIXMAN_r5g6b5, PIXMAN_null, PIXMAN_r5g6b5, neon_composite_src_16_16, 0 },
+ { PIXMAN_OP_SRC, PIXMAN_b5g6r5, PIXMAN_null, PIXMAN_b5g6r5, neon_composite_src_16_16, 0 },
++ { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_r5g6b5, neon_composite_over_8888_0565, 0 },
++ { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_b5g6r5, neon_composite_over_8888_0565, 0 },
+ #endif
+ { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, neon_composite_over_8888_8888, 0 },
+ { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, neon_composite_over_8888_8888, 0 },
diff --git a/recipes/xorg-lib/pixman/pixman-28986.patch b/recipes/xorg-lib/pixman/pixman-28986.patch
new file mode 100644
index 0000000000..f5ba4c302e
--- /dev/null
+++ b/recipes/xorg-lib/pixman/pixman-28986.patch
@@ -0,0 +1,32 @@
+From 7b7860d61fb1526acdf010dd8fd644bbf1396b9e Mon Sep 17 00:00:00 2001
+From: Siarhei Siamashka <siarhei.siamashka@nokia.com>
+Date: Fri, 28 Aug 2009 22:34:21 +0300
+Subject: [PATCH] ARM: workaround for gcc bug in vshll_n_u8 intrinsic
+
+Some versions of gcc (cs2009q1, 4.4.1) incorrectly reject
+shift operand having value >= 8, claiming that it is out of
+range. So inline assembly is used as a workaround.
+---
+ pixman/pixman-arm-neon.c | 6 ++++++
+ 1 files changed, 6 insertions(+), 0 deletions(-)
+
+diff --git a/pixman/pixman-arm-neon.c b/pixman/pixman-arm-neon.c
+index 4125d1b..3e7f566 100644
+--- a/pixman/pixman-arm-neon.c
++++ b/pixman/pixman-arm-neon.c
+@@ -64,6 +64,12 @@ unpack0565 (uint16x8_t rgb)
+ return res;
+ }
+
++#ifdef USE_GCC_INLINE_ASM
++/* Some versions of gcc have problems with vshll_n_u8 intrinsic (Bug 23576) */
++#define vshll_n_u8(a, n) ({ uint16x8_t r; \
++ asm ("vshll.u8 %q0, %P1, %2\n" : "=w" (r) : "w" (a), "i" (n)); r; })
++#endif
++
+ static force_inline uint16x8_t
+ pack0565 (uint8x8x4_t s)
+ {
+--
+1.5.4.3
+
diff --git a/recipes/xorg-lib/pixman/remove-broken.patch b/recipes/xorg-lib/pixman/remove-broken.patch
new file mode 100644
index 0000000000..fd025b4bbd
--- /dev/null
+++ b/recipes/xorg-lib/pixman/remove-broken.patch
@@ -0,0 +1,826 @@
+From: Siarhei Siamashka <siarhei.siamashka@nokia.com>
+Date: Sun, 26 Jul 2009 22:21:26 +0000 (+0300)
+Subject: ARM: Removal of unused/broken NEON code
+X-Git-Url: http://siarhei.siamashka.name/gitweb/?p=pixman.git;a=commitdiff_plain;h=7ef2322eefcccc28a2d45c0da22c0fee88b8f464
+
+ARM: Removal of unused/broken NEON code
+---
+
+diff --git a/pixman/pixman-arm-neon.c b/pixman/pixman-arm-neon.c
+index 4125d1b..9404c70 100644
+--- a/pixman/pixman-arm-neon.c
++++ b/pixman/pixman-arm-neon.c
+@@ -1895,710 +1895,6 @@ pixman_fill_neon (uint32_t *bits,
+ #endif
+ }
+
+-/* TODO: is there a more generic way of doing this being introduced? */
+-#define NEON_SCANLINE_BUFFER_PIXELS (1024)
+-
+-static inline void
+-neon_quadword_copy (void * dst,
+- void * src,
+- uint32_t count, /* of quadwords */
+- uint32_t trailer_count /* of bytes */)
+-{
+- uint8_t *t_dst = dst, *t_src = src;
+-
+- /* Uses aligned multi-register loads to maximise read bandwidth
+- * on uncached memory such as framebuffers
+- * The accesses do not have the aligned qualifiers, so that the copy
+- * may convert between aligned-uncached and unaligned-cached memory.
+- * It is assumed that the CPU can infer alignedness from the address.
+- */
+-
+-#ifdef USE_GCC_INLINE_ASM
+-
+- asm volatile (
+- " cmp %[count], #8 \n"
+- " blt 1f @ skip oversized fragments \n"
+- "0: @ start with eight quadwords at a time \n"
+- " sub %[count], %[count], #8 \n"
+- " vld1.8 {d16, d17, d18, d19}, [%[src]]! \n"
+- " vld1.8 {d20, d21, d22, d23}, [%[src]]! \n"
+- " vld1.8 {d24, d25, d26, d27}, [%[src]]! \n"
+- " vld1.8 {d28, d29, d30, d31}, [%[src]]! \n"
+- " cmp %[count], #8 \n"
+- " vst1.8 {d16, d17, d18, d19}, [%[dst]]! \n"
+- " vst1.8 {d20, d21, d22, d23}, [%[dst]]! \n"
+- " vst1.8 {d24, d25, d26, d27}, [%[dst]]! \n"
+- " vst1.8 {d28, d29, d30, d31}, [%[dst]]! \n"
+- " bge 0b \n"
+- "1: @ four quadwords \n"
+- " tst %[count], #4 \n"
+- " beq 2f @ skip oversized fragment \n"
+- " vld1.8 {d16, d17, d18, d19}, [%[src]]! \n"
+- " vld1.8 {d20, d21, d22, d23}, [%[src]]! \n"
+- " vst1.8 {d16, d17, d18, d19}, [%[dst]]! \n"
+- " vst1.8 {d20, d21, d22, d23}, [%[dst]]! \n"
+- "2: @ two quadwords \n"
+- " tst %[count], #2 \n"
+- " beq 3f @ skip oversized fragment \n"
+- " vld1.8 {d16, d17, d18, d19}, [%[src]]! \n"
+- " vst1.8 {d16, d17, d18, d19}, [%[dst]]! \n"
+- "3: @ one quadword \n"
+- " tst %[count], #1 \n"
+- " beq 4f @ skip oversized fragment \n"
+- " vld1.8 {d16, d17}, [%[src]]! \n"
+- " vst1.8 {d16, d17}, [%[dst]]! \n"
+- "4: @ end \n"
+-
+- /* Clobbered input registers marked as input/outputs */
+- : [dst] "+r" (t_dst), [src] "+r" (t_src), [count] "+r" (count)
+-
+- /* No unclobbered inputs */
+- :
+-
+- /* Clobbered vector registers */
+- : "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23", "d24", "d25",
+- "d26", "d27", "d28", "d29", "d30", "d31", "cc", "memory");
+-
+-#else
+-
+- while (count >= 8)
+- {
+- uint8x16x4_t t1 = vld4q_u8 (t_src);
+- uint8x16x4_t t2 = vld4q_u8 (t_src + sizeof(uint8x16x4_t));
+-
+- t_src += sizeof(uint8x16x4_t) * 2;
+- vst4q_u8 (t_dst, t1);
+- vst4q_u8 (t_dst + sizeof(uint8x16x4_t), t2);
+- t_dst += sizeof(uint8x16x4_t) * 2;
+- count -= 8;
+- }
+-
+- if (count & 4)
+- {
+- uint8x16x4_t t1 = vld4q_u8 (t_src);
+-
+- t_src += sizeof(uint8x16x4_t);
+- vst4q_u8 (t_dst, t1);
+- t_dst += sizeof(uint8x16x4_t);
+- }
+-
+- if (count & 2)
+- {
+- uint8x8x4_t t1 = vld4_u8 (t_src);
+-
+- t_src += sizeof(uint8x8x4_t);
+- vst4_u8 (t_dst, t1);
+- t_dst += sizeof(uint8x8x4_t);
+- }
+-
+- if (count & 1)
+- {
+- uint8x16_t t1 = vld1q_u8 (t_src);
+-
+- t_src += sizeof(uint8x16_t);
+- vst1q_u8 (t_dst, t1);
+- t_dst += sizeof(uint8x16_t);
+- }
+-
+-#endif /* !USE_GCC_INLINE_ASM */
+-
+- if (trailer_count)
+- {
+- if (trailer_count & 8)
+- {
+- uint8x8_t t1 = vld1_u8 (t_src);
+-
+- t_src += sizeof(uint8x8_t);
+- vst1_u8 (t_dst, t1);
+- t_dst += sizeof(uint8x8_t);
+- }
+-
+- if (trailer_count & 4)
+- {
+- *((uint32_t*) t_dst) = *((uint32_t*) t_src);
+-
+- t_dst += 4;
+- t_src += 4;
+- }
+-
+- if (trailer_count & 2)
+- {
+- *((uint16_t*) t_dst) = *((uint16_t*) t_src);
+-
+- t_dst += 2;
+- t_src += 2;
+- }
+-
+- if (trailer_count & 1)
+- {
+- *t_dst++ = *t_src++;
+- }
+- }
+-}
+-
+-static inline void
+-solid_over_565_8_pix_neon (uint32_t glyph_colour,
+- uint16_t *dest,
+- uint8_t * in_mask,
+- uint32_t dest_stride, /* bytes, not elements */
+- uint32_t mask_stride,
+- uint32_t count /* 8-pixel groups */)
+-{
+- /* Inner loop of glyph blitter (solid colour, alpha mask) */
+-
+-#ifdef USE_GCC_INLINE_ASM
+-
+- asm volatile (
+- " vld4.8 {d20[], d21[], d22[], d23[]}, [%[glyph_colour]] @ splat solid colour components \n"
+- "0: @ loop \n"
+- " vld1.16 {d0, d1}, [%[dest]] @ load first pixels from framebuffer \n"
+- " vld1.8 {d17}, [%[in_mask]] @ load alpha mask of glyph \n"
+- " vmull.u8 q9, d17, d23 @ apply glyph colour alpha to mask \n"
+- " vshrn.u16 d17, q9, #8 @ reformat it to match original mask \n"
+- " vmvn d18, d17 @ we need the inverse mask for the background \n"
+- " vsli.u16 q3, q0, #5 @ duplicate framebuffer blue bits \n"
+- " vshrn.u16 d2, q0, #8 @ unpack red from framebuffer pixels \n"
+- " vshrn.u16 d4, q0, #3 @ unpack green \n"
+- " vsri.u8 d2, d2, #5 @ duplicate red bits (extend 5 to 8) \n"
+- " vshrn.u16 d6, q3, #2 @ unpack extended blue (truncate 10 to 8) \n"
+- " vsri.u8 d4, d4, #6 @ duplicate green bits (extend 6 to 8) \n"
+- " vmull.u8 q1, d2, d18 @ apply inverse mask to background red... \n"
+- " vmull.u8 q2, d4, d18 @ ...green... \n"
+- " vmull.u8 q3, d6, d18 @ ...blue \n"
+- " subs %[count], %[count], #1 @ decrement/test loop counter \n"
+- " vmlal.u8 q1, d17, d22 @ add masked foreground red... \n"
+- " vmlal.u8 q2, d17, d21 @ ...green... \n"
+- " vmlal.u8 q3, d17, d20 @ ...blue \n"
+- " add %[in_mask], %[in_mask], %[mask_stride] @ advance mask pointer, while we wait \n"
+- " vsri.16 q1, q2, #5 @ pack green behind red \n"
+- " vsri.16 q1, q3, #11 @ pack blue into pixels \n"
+- " vst1.16 {d2, d3}, [%[dest]] @ store composited pixels \n"
+- " add %[dest], %[dest], %[dest_stride] @ advance framebuffer pointer \n"
+- " bne 0b @ next please \n"
+-
+- /* Clobbered registers marked as input/outputs */
+- : [dest] "+r" (dest), [in_mask] "+r" (in_mask), [count] "+r" (count)
+-
+- /* Inputs */
+- : [dest_stride] "r" (dest_stride), [mask_stride] "r" (mask_stride), [glyph_colour] "r" (&glyph_colour)
+-
+- /* Clobbers, including the inputs we modify, and potentially lots of memory */
+- : "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d17", "d18", "d19",
+- "d20", "d21", "d22", "d23", "d24", "d25", "cc", "memory"
+- );
+-
+-#else
+-
+- uint8x8x4_t solid_colour = vld4_dup_u8 ((uint8_t*) &glyph_colour);
+-
+- while (count--)
+- {
+- uint16x8_t pixels = vld1q_u16 (dest);
+- uint8x8_t mask = vshrn_n_u16 (vmull_u8 (solid_colour.val[3], vld1_u8 (in_mask)), 8);
+- uint8x8_t mask_image = vmvn_u8 (mask);
+-
+- uint8x8_t t_red = vshrn_n_u16 (pixels, 8);
+- uint8x8_t t_green = vshrn_n_u16 (pixels, 3);
+- uint8x8_t t_blue = vshrn_n_u16 (vsli_n_u8 (pixels, pixels, 5), 2);
+-
+- uint16x8_t s_red = vmull_u8 (vsri_n_u8 (t_red, t_red, 5), mask_image);
+- uint16x8_t s_green = vmull_u8 (vsri_n_u8 (t_green, t_green, 6), mask_image);
+- uint16x8_t s_blue = vmull_u8 (t_blue, mask_image);
+-
+- s_red = vmlal (s_red, mask, solid_colour.val[2]);
+- s_green = vmlal (s_green, mask, solid_colour.val[1]);
+- s_blue = vmlal (s_blue, mask, solid_colour.val[0]);
+-
+- pixels = vsri_n_u16 (s_red, s_green, 5);
+- pixels = vsri_n_u16 (pixels, s_blue, 11);
+- vst1q_u16 (dest, pixels);
+-
+- dest += dest_stride;
+- mask += mask_stride;
+- }
+-
+-#endif
+-}
+-
+-#if 0 /* this is broken currently */
+-static void
+-neon_composite_over_n_8_0565 (pixman_implementation_t * impl,
+- pixman_op_t op,
+- pixman_image_t * src_image,
+- pixman_image_t * mask_image,
+- pixman_image_t * dst_image,
+- int32_t src_x,
+- int32_t src_y,
+- int32_t mask_x,
+- int32_t mask_y,
+- int32_t dest_x,
+- int32_t dest_y,
+- int32_t width,
+- int32_t height)
+-{
+- uint32_t src, srca;
+- uint16_t *dst_line, *aligned_line;
+- uint8_t *mask_line;
+- uint32_t dst_stride, mask_stride;
+- uint32_t kernel_count, copy_count, copy_tail;
+- uint8_t kernel_offset, copy_offset;
+-
+- src = _pixman_image_get_solid (src_image, dst_image->bits.format);
+-
+- /* bail out if fully transparent or degenerate */
+- srca = src >> 24;
+- if (src == 0)
+- return;
+-
+- if (width == 0 || height == 0)
+- return;
+-
+- if (width > NEON_SCANLINE_BUFFER_PIXELS)
+- {
+- /* split the blit, so we can use a fixed-size scanline buffer
+- * TODO: there must be a more elegant way of doing this.
+- */
+- int x;
+- for (x = 0; x < width; x += NEON_SCANLINE_BUFFER_PIXELS)
+- {
+- neon_composite_over_n_8_0565 (
+- impl, op,
+- src_image, mask_image, dst_image,
+- src_x + x, src_y, mask_x + x, mask_y, dest_x + x, dest_y,
+- (x + NEON_SCANLINE_BUFFER_PIXELS > width) ? width - x : NEON_SCANLINE_BUFFER_PIXELS, height);
+- }
+-
+- return;
+- }
+-
+- PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
+- PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
+-
+- /* keep within minimum number of aligned quadwords on width
+- * while also keeping the minimum number of columns to process
+- */
+- {
+- unsigned long aligned_left = (unsigned long)(dst_line) & ~0xF;
+- unsigned long aligned_right = (((unsigned long)(dst_line + width)) + 0xF) & ~0xF;
+- unsigned long ceiling_length = (((unsigned long) width) * sizeof(*dst_line) + 0xF) & ~0xF;
+-
+- /* the fast copy should be quadword aligned */
+- copy_offset = dst_line - ((uint16_t*) aligned_left);
+- aligned_line = dst_line - copy_offset;
+- copy_count = (uint32_t) ((aligned_right - aligned_left) >> 4);
+- copy_tail = 0;
+-
+- if (aligned_right - aligned_left > ceiling_length)
+- {
+- /* unaligned routine is tightest */
+- kernel_count = (uint32_t) (ceiling_length >> 4);
+- kernel_offset = copy_offset;
+- }
+- else
+- {
+- /* aligned routine is equally tight, so it is safer to align */
+- kernel_count = copy_count;
+- kernel_offset = 0;
+- }
+-
+- /* We should avoid reading beyond scanline ends for safety */
+- if (aligned_line < (dst_line - dest_x) ||
+- (aligned_line + (copy_count * 16 / sizeof(*dst_line))) > ((dst_line - dest_x) + dst_image->bits.width))
+- {
+- /* switch to precise read */
+- copy_offset = kernel_offset = 0;
+- aligned_line = dst_line;
+- kernel_count = (uint32_t) (ceiling_length >> 4);
+- copy_count = (width * sizeof(*dst_line)) >> 4;
+- copy_tail = (width * sizeof(*dst_line)) & 0xF;
+- }
+- }
+-
+- {
+- uint16_t scan_line[NEON_SCANLINE_BUFFER_PIXELS + 8]; /* deliberately not initialised */
+- uint8_t glyph_line[NEON_SCANLINE_BUFFER_PIXELS + 8];
+- int y = height;
+-
+- /* row-major order */
+- /* left edge, middle block, right edge */
+- for ( ; y--; mask_line += mask_stride, aligned_line += dst_stride, dst_line += dst_stride)
+- {
+- /* We don't want to overrun the edges of the glyph,
+- * so realign the edge data into known buffers
+- */
+- neon_quadword_copy (glyph_line + copy_offset, mask_line, width >> 4, width & 0xF);
+-
+- /* Uncached framebuffer access is really, really slow
+- * if we do it piecemeal. It should be much faster if we
+- * grab it all at once. One scanline should easily fit in
+- * L1 cache, so this should not waste RAM bandwidth.
+- */
+- neon_quadword_copy (scan_line, aligned_line, copy_count, copy_tail);
+-
+- /* Apply the actual filter */
+- solid_over_565_8_pix_neon (
+- src, scan_line + kernel_offset,
+- glyph_line + kernel_offset, 8 * sizeof(*dst_line),
+- 8, kernel_count);
+-
+- /* Copy the modified scanline back */
+- neon_quadword_copy (dst_line, scan_line + copy_offset,
+- width >> 3, (width & 7) * 2);
+- }
+- }
+-}
+-#endif
+-
+-#ifdef USE_GCC_INLINE_ASM
+-
+-static inline void
+-plain_over_565_8_pix_neon (uint32_t colour,
+- uint16_t *dest,
+- uint32_t dest_stride, /* bytes, not elements */
+- uint32_t count /* 8-pixel groups */)
+-{
+- /* Inner loop for plain translucent rects
+- * (solid colour without alpha mask)
+- */
+- asm volatile (
+- " vld4.8 {d20[], d21[], d22[], d23[]}, [%[colour]] @ solid colour load/splat \n"
+- " vmull.u8 q12, d23, d22 @ premultiply alpha red \n"
+- " vmull.u8 q13, d23, d21 @ premultiply alpha green \n"
+- " vmull.u8 q14, d23, d20 @ premultiply alpha blue \n"
+- " vmvn d18, d23 @ inverse alpha for background \n"
+- "0: @ loop\n"
+- " vld1.16 {d0, d1}, [%[dest]] @ load first pixels from framebuffer \n"
+- " vshrn.u16 d2, q0, #8 @ unpack red from framebuffer pixels \n"
+- " vshrn.u16 d4, q0, #3 @ unpack green \n"
+- " vsli.u16 q3, q0, #5 @ duplicate framebuffer blue bits \n"
+- " vsri.u8 d2, d2, #5 @ duplicate red bits (extend 5 to 8) \n"
+- " vsri.u8 d4, d4, #6 @ duplicate green bits (extend 6 to 8) \n"
+- " vshrn.u16 d6, q3, #2 @ unpack extended blue (truncate 10 to 8) \n"
+- " vmov q0, q12 @ retrieve foreground red \n"
+- " vmlal.u8 q0, d2, d18 @ blend red - my kingdom for a four-operand MLA \n"
+- " vmov q1, q13 @ retrieve foreground green \n"
+- " vmlal.u8 q1, d4, d18 @ blend green \n"
+- " vmov q2, q14 @ retrieve foreground blue \n"
+- " vmlal.u8 q2, d6, d18 @ blend blue \n"
+- " subs %[count], %[count], #1 @ decrement/test loop counter \n"
+- " vsri.16 q0, q1, #5 @ pack green behind red \n"
+- " vsri.16 q0, q2, #11 @ pack blue into pixels \n"
+- " vst1.16 {d0, d1}, [%[dest]] @ store composited pixels \n"
+- " add %[dest], %[dest], %[dest_stride] @ advance framebuffer pointer \n"
+- " bne 0b @ next please \n"
+-
+- /* Clobbered registers marked as input/outputs */
+- : [dest] "+r" (dest), [count] "+r" (count)
+-
+- /* Inputs */
+- : [dest_stride] "r" (dest_stride), [colour] "r" (&colour)
+-
+- /* Clobbers, including the inputs we modify, and
+- * potentially lots of memory
+- */
+- : "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d18", "d19",
+- "d20", "d21", "d22", "d23", "d24", "d25", "d26", "d27", "d28", "d29",
+- "cc", "memory"
+- );
+-}
+-
+-static void
+-neon_composite_over_n_0565 (pixman_implementation_t * impl,
+- pixman_op_t op,
+- pixman_image_t * src_image,
+- pixman_image_t * mask_image,
+- pixman_image_t * dst_image,
+- int32_t src_x,
+- int32_t src_y,
+- int32_t mask_x,
+- int32_t mask_y,
+- int32_t dest_x,
+- int32_t dest_y,
+- int32_t width,
+- int32_t height)
+-{
+- uint32_t src, srca;
+- uint16_t *dst_line, *aligned_line;
+- uint32_t dst_stride;
+- uint32_t kernel_count, copy_count, copy_tail;
+- uint8_t kernel_offset, copy_offset;
+-
+- src = _pixman_image_get_solid (src_image, dst_image->bits.format);
+-
+- /* bail out if fully transparent */
+- srca = src >> 24;
+- if (src == 0)
+- return;
+-
+- if (width == 0 || height == 0)
+- return;
+-
+- if (width > NEON_SCANLINE_BUFFER_PIXELS)
+- {
+- /* split the blit, so we can use a fixed-size scanline buffer *
+- * TODO: there must be a more elegant way of doing this.
+- */
+- int x;
+-
+- for (x = 0; x < width; x += NEON_SCANLINE_BUFFER_PIXELS)
+- {
+- neon_composite_over_n_0565 (
+- impl, op,
+- src_image, mask_image, dst_image,
+- src_x + x, src_y, mask_x + x, mask_y, dest_x + x, dest_y,
+- (x + NEON_SCANLINE_BUFFER_PIXELS > width) ? width - x : NEON_SCANLINE_BUFFER_PIXELS, height);
+- }
+- return;
+- }
+-
+- PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
+-
+- /* keep within minimum number of aligned quadwords on width
+- * while also keeping the minimum number of columns to process
+- */
+- {
+- unsigned long aligned_left = (unsigned long)(dst_line) & ~0xF;
+- unsigned long aligned_right = (((unsigned long)(dst_line + width)) + 0xF) & ~0xF;
+- unsigned long ceiling_length = (((unsigned long) width) * sizeof(*dst_line) + 0xF) & ~0xF;
+-
+- /* the fast copy should be quadword aligned */
+- copy_offset = dst_line - ((uint16_t*) aligned_left);
+- aligned_line = dst_line - copy_offset;
+- copy_count = (uint32_t) ((aligned_right - aligned_left) >> 4);
+- copy_tail = 0;
+-
+- if (aligned_right - aligned_left > ceiling_length)
+- {
+- /* unaligned routine is tightest */
+- kernel_count = (uint32_t) (ceiling_length >> 4);
+- kernel_offset = copy_offset;
+- }
+- else
+- {
+- /* aligned routine is equally tight, so it is safer to align */
+- kernel_count = copy_count;
+- kernel_offset = 0;
+- }
+-
+- /* We should avoid reading beyond scanline ends for safety */
+- if (aligned_line < (dst_line - dest_x) ||
+- (aligned_line + (copy_count * 16 / sizeof(*dst_line))) > ((dst_line - dest_x) + dst_image->bits.width))
+- {
+- /* switch to precise read */
+- copy_offset = kernel_offset = 0;
+- aligned_line = dst_line;
+- kernel_count = (uint32_t) (ceiling_length >> 4);
+- copy_count = (width * sizeof(*dst_line)) >> 4;
+- copy_tail = (width * sizeof(*dst_line)) & 0xF;
+- }
+- }
+-
+- {
+- uint16_t scan_line[NEON_SCANLINE_BUFFER_PIXELS + 8]; /* deliberately not initialised */
+-
+- /* row-major order */
+- /* left edge, middle block, right edge */
+- for ( ; height--; aligned_line += dst_stride, dst_line += dst_stride)
+- {
+- /* Uncached framebuffer access is really, really slow if we do it piecemeal.
+- * It should be much faster if we grab it all at once.
+- * One scanline should easily fit in L1 cache, so this should
+- * not waste RAM bandwidth.
+- */
+- neon_quadword_copy (scan_line, aligned_line, copy_count, copy_tail);
+-
+- /* Apply the actual filter */
+- plain_over_565_8_pix_neon (
+- src, scan_line + kernel_offset, 8 * sizeof(*dst_line), kernel_count);
+-
+- /* Copy the modified scanline back */
+- neon_quadword_copy (
+- dst_line, scan_line + copy_offset, width >> 3, (width & 7) * 2);
+- }
+- }
+-}
+-
+-static inline void
+-ARGB8_over_565_8_pix_neon (uint32_t *src,
+- uint16_t *dest,
+- uint32_t src_stride, /* bytes, not elements */
+- uint32_t count /* 8-pixel groups */)
+-{
+- asm volatile (
+- "0: @ loop\n"
+- " pld [%[src], %[src_stride]] @ preload from next scanline \n"
+- " vld1.16 {d0, d1}, [%[dest]] @ load pixels from framebuffer \n"
+- " vld4.8 {d20, d21, d22, d23},[%[src]]! @ load source image pixels \n"
+- " vsli.u16 q3, q0, #5 @ duplicate framebuffer blue bits \n"
+- " vshrn.u16 d2, q0, #8 @ unpack red from framebuffer pixels \n"
+- " vshrn.u16 d4, q0, #3 @ unpack green \n"
+- " vmvn d18, d23 @ we need the inverse alpha for the background \n"
+- " vsri.u8 d2, d2, #5 @ duplicate red bits (extend 5 to 8) \n"
+- " vshrn.u16 d6, q3, #2 @ unpack extended blue (truncate 10 to 8) \n"
+- " vsri.u8 d4, d4, #6 @ duplicate green bits (extend 6 to 8) \n"
+- " vmull.u8 q1, d2, d18 @ apply inverse alpha to background red... \n"
+- " vmull.u8 q2, d4, d18 @ ...green... \n"
+- " vmull.u8 q3, d6, d18 @ ...blue \n"
+- " subs %[count], %[count], #1 @ decrement/test loop counter \n"
+- " vmlal.u8 q1, d23, d22 @ add blended foreground red... \n"
+- " vmlal.u8 q2, d23, d21 @ ...green... \n"
+- " vmlal.u8 q3, d23, d20 @ ...blue \n"
+- " vsri.16 q1, q2, #5 @ pack green behind red \n"
+- " vsri.16 q1, q3, #11 @ pack blue into pixels \n"
+- " vst1.16 {d2, d3}, [%[dest]]! @ store composited pixels \n"
+- " bne 0b @ next please \n"
+-
+- /* Clobbered registers marked as input/outputs */
+- : [dest] "+r" (dest), [src] "+r" (src), [count] "+r" (count)
+-
+- /* Inputs */
+- : [src_stride] "r" (src_stride)
+-
+- /* Clobbers, including the inputs we modify, and potentially lots of memory */
+- : "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d17", "d18", "d20",
+- "d21", "d22", "d23", "cc", "memory"
+- );
+-}
+-
+-static void
+-neon_composite_over_8888_0565 (pixman_implementation_t * impl,
+- pixman_op_t op,
+- pixman_image_t * src_image,
+- pixman_image_t * mask_image,
+- pixman_image_t * dst_image,
+- int32_t src_x,
+- int32_t src_y,
+- int32_t mask_x,
+- int32_t mask_y,
+- int32_t dest_x,
+- int32_t dest_y,
+- int32_t width,
+- int32_t height)
+-{
+- uint32_t *src_line;
+- uint16_t *dst_line, *aligned_line;
+- uint32_t dst_stride, src_stride;
+- uint32_t kernel_count, copy_count, copy_tail;
+- uint8_t kernel_offset, copy_offset;
+-
+- /* we assume mask is opaque
+- * so the only alpha to deal with is embedded in src
+- */
+- if (width > NEON_SCANLINE_BUFFER_PIXELS)
+- {
+- /* split the blit, so we can use a fixed-size scanline buffer */
+- int x;
+- for (x = 0; x < width; x += NEON_SCANLINE_BUFFER_PIXELS)
+- {
+- neon_composite_over_8888_0565 (
+- impl, op,
+- src_image, mask_image, dst_image,
+- src_x + x, src_y, mask_x + x, mask_y, dest_x + x, dest_y,
+- (x + NEON_SCANLINE_BUFFER_PIXELS > width) ? width - x : NEON_SCANLINE_BUFFER_PIXELS, height);
+- }
+- return;
+- }
+-
+- PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
+- PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
+-
+- /* keep within minimum number of aligned quadwords on width
+- * while also keeping the minimum number of columns to process
+- */
+- {
+- unsigned long aligned_left = (unsigned long)(dst_line) & ~0xF;
+- unsigned long aligned_right = (((unsigned long)(dst_line + width)) + 0xF) & ~0xF;
+- unsigned long ceiling_length = (((unsigned long) width) * sizeof(*dst_line) + 0xF) & ~0xF;
+-
+- /* the fast copy should be quadword aligned */
+- copy_offset = dst_line - ((uint16_t*) aligned_left);
+- aligned_line = dst_line - copy_offset;
+- copy_count = (uint32_t) ((aligned_right - aligned_left) >> 4);
+- copy_tail = 0;
+-
+- if (aligned_right - aligned_left > ceiling_length)
+- {
+- /* unaligned routine is tightest */
+- kernel_count = (uint32_t) (ceiling_length >> 4);
+- kernel_offset = copy_offset;
+- }
+- else
+- {
+- /* aligned routine is equally tight, so it is safer to align */
+- kernel_count = copy_count;
+- kernel_offset = 0;
+- }
+-
+- /* We should avoid reading beyond scanline ends for safety */
+- if (aligned_line < (dst_line - dest_x) ||
+- (aligned_line + (copy_count * 16 / sizeof(*dst_line))) > ((dst_line - dest_x) + dst_image->bits.width))
+- {
+- /* switch to precise read */
+- copy_offset = kernel_offset = 0;
+- aligned_line = dst_line;
+- kernel_count = (uint32_t) (ceiling_length >> 4);
+- copy_count = (width * sizeof(*dst_line)) >> 4;
+- copy_tail = (width * sizeof(*dst_line)) & 0xF;
+- }
+- }
+-
+- /* Preload the first input scanline */
+- {
+- uint8_t *src_ptr = (uint8_t*) src_line;
+- uint32_t count = (width + 15) / 16;
+-
+-#ifdef USE_GCC_INLINE_ASM
+- asm volatile (
+- "0: @ loop \n"
+- " subs %[count], %[count], #1 \n"
+- " pld [%[src]] \n"
+- " add %[src], %[src], #64 \n"
+- " bgt 0b \n"
+-
+- /* Clobbered input registers marked as input/outputs */
+- : [src] "+r" (src_ptr), [count] "+r" (count)
+- : /* no unclobbered inputs */
+- : "cc"
+- );
+-#else
+- do
+- {
+- __pld (src_ptr);
+- src_ptr += 64;
+- }
+- while (--count);
+-#endif
+- }
+-
+- {
+- uint16_t scan_line[NEON_SCANLINE_BUFFER_PIXELS + 8]; /* deliberately not initialised */
+-
+- /* row-major order */
+- /* left edge, middle block, right edge */
+- for ( ; height--; src_line += src_stride, aligned_line += dst_stride)
+- {
+- /* Uncached framebuffer access is really, really slow if we do
+- * it piecemeal. It should be much faster if we grab it all at
+- * once. One scanline should easily fit in L1 cache, so this
+- * should not waste RAM bandwidth.
+- */
+- neon_quadword_copy (scan_line, aligned_line, copy_count, copy_tail);
+-
+- /* Apply the actual filter */
+- ARGB8_over_565_8_pix_neon (
+- src_line, scan_line + kernel_offset,
+- src_stride * sizeof(*src_line), kernel_count);
+-
+- /* Copy the modified scanline back */
+- neon_quadword_copy (dst_line,
+- scan_line + copy_offset,
+- width >> 3, (width & 7) * 2);
+- }
+- }
+-}
+-
+-#endif /* USE_GCC_INLINE_ASM */
+-
+ static const pixman_fast_path_t arm_neon_fast_path_array[] =
+ {
+ { PIXMAN_OP_ADD, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8, neon_composite_add_8888_8_8, 0 },
+@@ -2612,12 +1908,6 @@ static const pixman_fast_path_t arm_neon_fast_path_array[] =
+ #ifdef USE_GCC_INLINE_ASM
+ { PIXMAN_OP_SRC, PIXMAN_r5g6b5, PIXMAN_null, PIXMAN_r5g6b5, neon_composite_src_16_16, 0 },
+ { PIXMAN_OP_SRC, PIXMAN_b5g6r5, PIXMAN_null, PIXMAN_b5g6r5, neon_composite_src_16_16, 0 },
+-#if 0 /* this code has some bugs */
+- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_null, PIXMAN_r5g6b5, neon_composite_over_n_0565, 0 },
+- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_null, PIXMAN_b5g6r5, neon_composite_over_n_0565, 0 },
+- { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_r5g6b5, neon_composite_over_8888_0565, 0 },
+- { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_b5g6r5, neon_composite_over_8888_0565, 0 },
+-#endif
+ #endif
+ { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, neon_composite_over_8888_8888, 0 },
+ { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, neon_composite_over_8888_8888, 0 },
+@@ -2668,79 +1958,6 @@ arm_neon_composite (pixman_implementation_t *imp,
+ }
+
+ static pixman_bool_t
+-pixman_blt_neon (void *src_bits,
+- void *dst_bits,
+- int src_stride,
+- int dst_stride,
+- int src_bpp,
+- int dst_bpp,
+- int src_x,
+- int src_y,
+- int dst_x,
+- int dst_y,
+- int width,
+- int height)
+-{
+- if (!width || !height)
+- return TRUE;
+-
+- /* accelerate only straight copies involving complete bytes */
+- if (src_bpp != dst_bpp || (src_bpp & 7))
+- return FALSE;
+-
+- {
+- uint32_t bytes_per_pixel = src_bpp >> 3;
+- uint32_t byte_width = width * bytes_per_pixel;
+- /* parameter is in words for some reason */
+- int32_t src_stride_bytes = src_stride * 4;
+- int32_t dst_stride_bytes = dst_stride * 4;
+- uint8_t *src_bytes = ((uint8_t*) src_bits) +
+- src_y * src_stride_bytes + src_x * bytes_per_pixel;
+- uint8_t *dst_bytes = ((uint8_t*) dst_bits) +
+- dst_y * dst_stride_bytes + dst_x * bytes_per_pixel;
+- uint32_t quadword_count = byte_width / 16;
+- uint32_t offset = byte_width % 16;
+-
+- while (height--)
+- {
+- neon_quadword_copy (dst_bytes, src_bytes, quadword_count, offset);
+- src_bytes += src_stride_bytes;
+- dst_bytes += dst_stride_bytes;
+- }
+- }
+-
+- return TRUE;
+-}
+-
+-static pixman_bool_t
+-arm_neon_blt (pixman_implementation_t *imp,
+- uint32_t * src_bits,
+- uint32_t * dst_bits,
+- int src_stride,
+- int dst_stride,
+- int src_bpp,
+- int dst_bpp,
+- int src_x,
+- int src_y,
+- int dst_x,
+- int dst_y,
+- int width,
+- int height)
+-{
+- if (pixman_blt_neon (
+- src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp,
+- src_x, src_y, dst_x, dst_y, width, height))
+- {
+- return TRUE;
+- }
+-
+- return _pixman_implementation_blt (
+- imp->delegate,
+- src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp,
+- src_x, src_y, dst_x, dst_y, width, height);
+-}
+-
+-static pixman_bool_t
+ arm_neon_fill (pixman_implementation_t *imp,
+ uint32_t * bits,
+ int stride,
+@@ -2765,9 +1982,6 @@ _pixman_implementation_create_arm_neon (void)
+ pixman_implementation_t *imp = _pixman_implementation_create (simd);
+
+ imp->composite = arm_neon_composite;
+-#if 0 /* this code has some bugs */
+- imp->blt = arm_neon_blt;
+-#endif
+ imp->fill = arm_neon_fill;
+
+ return imp;
diff --git a/recipes/xorg-lib/pixman_git.bb b/recipes/xorg-lib/pixman_git.bb
index 2ba06af67e..b8aa2a43c6 100644
--- a/recipes/xorg-lib/pixman_git.bb
+++ b/recipes/xorg-lib/pixman_git.bb
@@ -3,16 +3,20 @@ PRIORITY = "optional"
DESCRIPTION = "Low-level pixel manipulation library."
LICENSE = "X11"
-PV = "0.15.16"
-PR = "r1"
+PV = "0.17.1"
+PR = "r2"
PR_append = "+gitr${SRCREV}"
-SRCREV = "f9660ce29ed072c6cbaec711c5d18b9f0ba113ae"
+SRCREV = "7af985a69a9147e54dd5946a8062dbc2e534b735"
DEFAULT_PREFERENCE = "-1"
DEFAULT_PREFERENCE_angstrom = "1"
SRC_URI = "git://anongit.freedesktop.org/pixman;protocol=git \
+ file://pixman-28986.patch;patch=1 \
+ file://nearest-neighbour.patch;patch=1 \
+ file://remove-broken.patch;patch=1 \
+ file://over-8888-0565.patch;patch=1 \
"
S = "${WORKDIR}/git"