summaryrefslogtreecommitdiff
path: root/recipes/gcc/gcc-4.4.1/gcc-pr41175.patch
diff options
context:
space:
mode:
authorKhem Raj <raj.khem@gmail.com>2009-10-06 15:29:34 -0700
committerKhem Raj <raj.khem@gmail.com>2009-10-06 15:31:34 -0700
commit84011b0d22f56008f5fd7f9c53a0e34d5861e50d (patch)
treeeb993a5c387fe0239a5a8f3befcdac9d7abbe365 /recipes/gcc/gcc-4.4.1/gcc-pr41175.patch
parent44943f01becfb8e61bcaeb40f47e2fd64552b276 (diff)
gcc-4.4.1: Backport powerpc patches to improve code generation at -Os
* This also fixes the u-boot build errors we get with gcc 4.4 Signed-off-by: Khem Raj <raj.khem@gmail.com>
Diffstat (limited to 'recipes/gcc/gcc-4.4.1/gcc-pr41175.patch')
-rw-r--r--recipes/gcc/gcc-4.4.1/gcc-pr41175.patch1172
1 files changed, 1172 insertions, 0 deletions
diff --git a/recipes/gcc/gcc-4.4.1/gcc-pr41175.patch b/recipes/gcc/gcc-4.4.1/gcc-pr41175.patch
new file mode 100644
index 0000000000..10da543958
--- /dev/null
+++ b/recipes/gcc/gcc-4.4.1/gcc-pr41175.patch
@@ -0,0 +1,1172 @@
+Index: gcc-4.4.1/gcc/testsuite/gcc.target/powerpc/pr41175.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ gcc-4.4.1/gcc/testsuite/gcc.target/powerpc/pr41175.c 2009-10-06 14:09:19.757404626 -0700
+@@ -0,0 +1,461 @@
++/* PR target/41175 */
++/* { dg-do run } */
++/* { dg-options "-Os" } */
++
++#define X2(n) X1(n##0) X1(n##1)
++#define X4(n) X2(n##0) X2(n##1)
++#define X8(n) X4(n##0) X4(n##1)
++
++#ifndef __SPE__
++#define FLOAT_REG_CONSTRAINT "f"
++#else
++#define FLOAT_REG_CONSTRAINT "r"
++#endif
++
++volatile int ll;
++
++__attribute__((noinline)) void
++foo (void)
++{
++ asm volatile ("" : : : "memory");
++}
++
++__attribute__((noinline)) void
++bar (char *p)
++{
++ asm volatile ("" : : "r" (p) : "memory");
++}
++
++__attribute__((noinline)) void
++f1 (void)
++{
++ int mem;
++#undef X1
++#define X1(n) int gpr##n = 0;
++ X8(a) X8(b) X8(c)
++#undef X1
++#define X1(n) "+r" (gpr##n),
++ asm volatile ("" : X8(a) "=m" (mem) : : "memory");
++ asm volatile ("" : X8(b) "=m" (mem) : : "memory");
++ asm volatile ("" : X8(c) "=m" (mem) : : "memory");
++ foo ();
++#undef X1
++#define X1(n) "r" (gpr##n),
++ asm volatile ("" : : X8(a) "m" (mem) : "memory");
++ asm volatile ("" : : X8(b) "m" (mem) : "memory");
++ asm volatile ("" : : X8(c) "m" (mem) : "memory");
++}
++
++__attribute__((noinline)) void
++f2 (void)
++{
++ int mem;
++#undef X1
++#define X1(n) int gpr##n = 0;
++ X8(a) X8(b) X8(c)
++#undef X1
++#define X1(n) "+r" (gpr##n),
++ asm volatile ("" : X8(a) "=m" (mem) : : "memory");
++ asm volatile ("" : X8(b) "=m" (mem) : : "memory");
++ asm volatile ("" : X8(c) "=m" (mem) : : "memory");
++ char *pp = __builtin_alloca (ll);
++ bar (pp);
++#undef X1
++#define X1(n) "r" (gpr##n),
++ asm volatile ("" : : X8(a) "m" (mem) : "memory");
++ asm volatile ("" : : X8(b) "m" (mem) : "memory");
++ asm volatile ("" : : X8(c) "m" (mem) : "memory");
++}
++
++__attribute__((noinline)) void
++f3 (void)
++{
++ int mem;
++#undef X1
++#define X1(n) int gpr##n = 0;
++ X8(a) X8(b) X8(c)
++#undef X1
++#define X1(n) "+r" (gpr##n),
++ asm volatile ("" : X8(a) "=m" (mem) : : "memory");
++ asm volatile ("" : X8(b) "=m" (mem) : : "memory");
++ asm volatile ("" : X8(c) "=m" (mem) : : "memory");
++#undef X1
++#define X1(n) "r" (gpr##n),
++ asm volatile ("" : : X8(a) "m" (mem) : "memory");
++ asm volatile ("" : : X8(b) "m" (mem) : "memory");
++ asm volatile ("" : : X8(c) "m" (mem) : "memory");
++}
++
++#ifndef __NO_FPRS__
++__attribute__((noinline)) void
++f4 (void)
++{
++ int mem;
++#undef X1
++#define X1(n) int gpr##n = 0;
++ X8(a) X8(b) X8(c)
++#undef X1
++#define X1(n) double fpr##n = 0.0;
++ X4(d)
++#undef X1
++#define X1(n) "+r" (gpr##n),
++ asm volatile ("" : X8(a) "=m" (mem) : : "memory");
++ asm volatile ("" : X8(b) "=m" (mem) : : "memory");
++ asm volatile ("" : X8(c) "=m" (mem) : : "memory");
++#undef X1
++#define X1(n) "+" FLOAT_REG_CONSTRAINT (fpr##n),
++ asm volatile ("" : X4(d) "=m" (mem) : : "memory");
++ foo ();
++#undef X1
++#define X1(n) "r" (gpr##n),
++ asm volatile ("" : : X8(a) "m" (mem) : "memory");
++ asm volatile ("" : : X8(b) "m" (mem) : "memory");
++ asm volatile ("" : : X8(c) "m" (mem) : "memory");
++#undef X1
++#define X1(n) FLOAT_REG_CONSTRAINT (fpr##n),
++ asm volatile ("" : : X4(d) "m" (mem) : "memory");
++}
++
++__attribute__((noinline)) void
++f5 (void)
++{
++ int mem;
++#undef X1
++#define X1(n) int gpr##n = 0;
++ X8(a) X8(b) X8(c)
++#undef X1
++#define X1(n) double fpr##n = 0.0;
++ X4(d)
++#undef X1
++#define X1(n) "+r" (gpr##n),
++ asm volatile ("" : X8(a) "=m" (mem) : : "memory");
++ asm volatile ("" : X8(b) "=m" (mem) : : "memory");
++ asm volatile ("" : X8(c) "=m" (mem) : : "memory");
++#undef X1
++#define X1(n) "+" FLOAT_REG_CONSTRAINT (fpr##n),
++ asm volatile ("" : X4(d) "=m" (mem) : : "memory");
++ char *pp = __builtin_alloca (ll);
++ bar (pp);
++#undef X1
++#define X1(n) "r" (gpr##n),
++ asm volatile ("" : : X8(a) "m" (mem) : "memory");
++ asm volatile ("" : : X8(b) "m" (mem) : "memory");
++ asm volatile ("" : : X8(c) "m" (mem) : "memory");
++#undef X1
++#define X1(n) FLOAT_REG_CONSTRAINT (fpr##n),
++ asm volatile ("" : : X4(d) "m" (mem) : "memory");
++}
++
++__attribute__((noinline)) void
++f6 (void)
++{
++ int mem;
++#undef X1
++#define X1(n) int gpr##n = 0;
++ X8(a) X8(b) X8(c)
++#undef X1
++#define X1(n) double fpr##n = 0.0;
++ X4(d)
++#undef X1
++#define X1(n) "+r" (gpr##n),
++ asm volatile ("" : X8(a) "=m" (mem) : : "memory");
++ asm volatile ("" : X8(b) "=m" (mem) : : "memory");
++ asm volatile ("" : X8(c) "=m" (mem) : : "memory");
++#undef X1
++#define X1(n) "+" FLOAT_REG_CONSTRAINT (fpr##n),
++ asm volatile ("" : X4(d) "=m" (mem) : : "memory");
++#undef X1
++#define X1(n) "r" (gpr##n),
++ asm volatile ("" : : X8(a) "m" (mem) : "memory");
++ asm volatile ("" : : X8(b) "m" (mem) : "memory");
++ asm volatile ("" : : X8(c) "m" (mem) : "memory");
++#undef X1
++#define X1(n) FLOAT_REG_CONSTRAINT (fpr##n),
++ asm volatile ("" : : X4(d) "m" (mem) : "memory");
++}
++
++__attribute__((noinline)) void
++f7 (void)
++{
++ int mem;
++#undef X1
++#define X1(n) int gpr##n = 0;
++ X8(a) X8(b) X8(c)
++#undef X1
++#define X1(n) double fpr##n = 0.0;
++ X2(d)
++#undef X1
++#define X1(n) "+r" (gpr##n),
++ asm volatile ("" : X8(a) "=m" (mem) : : "memory");
++ asm volatile ("" : X8(b) "=m" (mem) : : "memory");
++ asm volatile ("" : X8(c) "=m" (mem) : : "memory");
++#undef X1
++#define X1(n) "+" FLOAT_REG_CONSTRAINT (fpr##n),
++ asm volatile ("" : X2(d) "=m" (mem) : : "memory");
++ foo ();
++#undef X1
++#define X1(n) "r" (gpr##n),
++ asm volatile ("" : : X8(a) "m" (mem) : "memory");
++ asm volatile ("" : : X8(b) "m" (mem) : "memory");
++ asm volatile ("" : : X8(c) "m" (mem) : "memory");
++#undef X1
++#define X1(n) FLOAT_REG_CONSTRAINT (fpr##n),
++ asm volatile ("" : : X2(d) "m" (mem) : "memory");
++}
++
++__attribute__((noinline)) void
++f8 (void)
++{
++ int mem;
++#undef X1
++#define X1(n) int gpr##n = 0;
++ X8(a) X8(b) X8(c)
++#undef X1
++#define X1(n) double fpr##n = 0.0;
++ X2(d)
++#undef X1
++#define X1(n) "+r" (gpr##n),
++ asm volatile ("" : X8(a) "=m" (mem) : : "memory");
++ asm volatile ("" : X8(b) "=m" (mem) : : "memory");
++ asm volatile ("" : X8(c) "=m" (mem) : : "memory");
++#undef X1
++#define X1(n) "+" FLOAT_REG_CONSTRAINT (fpr##n),
++ asm volatile ("" : X2(d) "=m" (mem) : : "memory");
++ char *pp = __builtin_alloca (ll);
++ bar (pp);
++#undef X1
++#define X1(n) "r" (gpr##n),
++ asm volatile ("" : : X8(a) "m" (mem) : "memory");
++ asm volatile ("" : : X8(b) "m" (mem) : "memory");
++ asm volatile ("" : : X8(c) "m" (mem) : "memory");
++#undef X1
++#define X1(n) FLOAT_REG_CONSTRAINT (fpr##n),
++ asm volatile ("" : : X2(d) "m" (mem) : "memory");
++}
++
++__attribute__((noinline)) void
++f9 (void)
++{
++ int mem;
++#undef X1
++#define X1(n) int gpr##n = 0;
++ X8(a) X8(b) X8(c)
++#undef X1
++#define X1(n) double fpr##n = 0.0;
++ X2(d)
++#undef X1
++#define X1(n) "+r" (gpr##n),
++ asm volatile ("" : X8(a) "=m" (mem) : : "memory");
++ asm volatile ("" : X8(b) "=m" (mem) : : "memory");
++ asm volatile ("" : X8(c) "=m" (mem) : : "memory");
++#undef X1
++#define X1(n) "+" FLOAT_REG_CONSTRAINT (fpr##n),
++ asm volatile ("" : X2(d) "=m" (mem) : : "memory");
++#undef X1
++#define X1(n) "r" (gpr##n),
++ asm volatile ("" : : X8(a) "m" (mem) : "memory");
++ asm volatile ("" : : X8(b) "m" (mem) : "memory");
++ asm volatile ("" : : X8(c) "m" (mem) : "memory");
++#undef X1
++#define X1(n) FLOAT_REG_CONSTRAINT (fpr##n),
++ asm volatile ("" : : X2(d) "m" (mem) : "memory");
++}
++
++__attribute__((noinline)) void
++f10 (void)
++{
++ int mem;
++#undef X1
++#define X1(n) int gpr##n = 0;
++ X8(a) X8(b) X4(c)
++#undef X1
++#define X1(n) double fpr##n = 0.0;
++ X1(d)
++#undef X1
++#define X1(n) "+r" (gpr##n),
++ asm volatile ("" : X8(a) "=m" (mem) : : "memory");
++ asm volatile ("" : X8(b) "=m" (mem) : : "memory");
++ asm volatile ("" : X4(c) "=m" (mem) : : "memory");
++#undef X1
++#define X1(n) "+" FLOAT_REG_CONSTRAINT (fpr##n),
++ asm volatile ("" : X1(d) "=m" (mem) : : "memory");
++ foo ();
++#undef X1
++#define X1(n) "r" (gpr##n),
++ asm volatile ("" : : X8(a) "m" (mem) : "memory");
++ asm volatile ("" : : X8(b) "m" (mem) : "memory");
++ asm volatile ("" : : X4(c) "m" (mem) : "memory");
++#undef X1
++#define X1(n) FLOAT_REG_CONSTRAINT (fpr##n),
++ asm volatile ("" : : X1(d) "m" (mem) : "memory");
++}
++
++__attribute__((noinline)) void
++f11 (void)
++{
++ int mem;
++#undef X1
++#define X1(n) int gpr##n = 0;
++ X8(a) X8(b) X4(c)
++#undef X1
++#define X1(n) double fpr##n = 0.0;
++ X1(d)
++#undef X1
++#define X1(n) "+r" (gpr##n),
++ asm volatile ("" : X8(a) "=m" (mem) : : "memory");
++ asm volatile ("" : X8(b) "=m" (mem) : : "memory");
++ asm volatile ("" : X4(c) "=m" (mem) : : "memory");
++#undef X1
++#define X1(n) "+" FLOAT_REG_CONSTRAINT (fpr##n),
++ asm volatile ("" : X1(d) "=m" (mem) : : "memory");
++ char *pp = __builtin_alloca (ll);
++ bar (pp);
++#undef X1
++#define X1(n) "r" (gpr##n),
++ asm volatile ("" : : X8(a) "m" (mem) : "memory");
++ asm volatile ("" : : X8(b) "m" (mem) : "memory");
++ asm volatile ("" : : X4(c) "m" (mem) : "memory");
++#undef X1
++#define X1(n) FLOAT_REG_CONSTRAINT (fpr##n),
++ asm volatile ("" : : X1(d) "m" (mem) : "memory");
++}
++
++__attribute__((noinline)) void
++f12 (void)
++{
++ int mem;
++#undef X1
++#define X1(n) int gpr##n = 0;
++ X8(a) X8(b) X4(c)
++#undef X1
++#define X1(n) double fpr##n = 0.0;
++ X1(d)
++#undef X1
++#define X1(n) "+r" (gpr##n),
++ asm volatile ("" : X8(a) "=m" (mem) : : "memory");
++ asm volatile ("" : X8(b) "=m" (mem) : : "memory");
++ asm volatile ("" : X4(c) "=m" (mem) : : "memory");
++#undef X1
++#define X1(n) "+" FLOAT_REG_CONSTRAINT (fpr##n),
++ asm volatile ("" : X1(d) "=m" (mem) : : "memory");
++#undef X1
++#define X1(n) "r" (gpr##n),
++ asm volatile ("" : : X8(a) "m" (mem) : "memory");
++ asm volatile ("" : : X8(b) "m" (mem) : "memory");
++ asm volatile ("" : : X4(c) "m" (mem) : "memory");
++#undef X1
++#define X1(n) FLOAT_REG_CONSTRAINT (fpr##n),
++ asm volatile ("" : : X1(d) "m" (mem) : "memory");
++}
++
++__attribute__((noinline)) void
++f13 (void)
++{
++ int mem;
++#undef X1
++#define X1(n) int gpr##n = 0;
++ X8(a) X8(b) X2(c)
++#undef X1
++#define X1(n) double fpr##n = 0.0;
++ X8(d)
++#undef X1
++#define X1(n) "+r" (gpr##n),
++ asm volatile ("" : X8(a) "=m" (mem) : : "memory");
++ asm volatile ("" : X8(b) "=m" (mem) : : "memory");
++ asm volatile ("" : X2(c) "=m" (mem) : : "memory");
++#undef X1
++#define X1(n) "+" FLOAT_REG_CONSTRAINT (fpr##n),
++ asm volatile ("" : X8(d) "=m" (mem) : : "memory");
++ foo ();
++#undef X1
++#define X1(n) "r" (gpr##n),
++ asm volatile ("" : : X8(a) "m" (mem) : "memory");
++ asm volatile ("" : : X8(b) "m" (mem) : "memory");
++ asm volatile ("" : : X2(c) "m" (mem) : "memory");
++#undef X1
++#define X1(n) FLOAT_REG_CONSTRAINT (fpr##n),
++ asm volatile ("" : : X8(d) "m" (mem) : "memory");
++}
++
++__attribute__((noinline)) void
++f14 (void)
++{
++ int mem;
++#undef X1
++#define X1(n) int gpr##n = 0;
++ X8(a) X8(b) X2(c)
++#undef X1
++#define X1(n) double fpr##n = 0.0;
++ X8(d)
++#undef X1
++#define X1(n) "+r" (gpr##n),
++ asm volatile ("" : X8(a) "=m" (mem) : : "memory");
++ asm volatile ("" : X8(b) "=m" (mem) : : "memory");
++ asm volatile ("" : X2(c) "=m" (mem) : : "memory");
++#undef X1
++#define X1(n) "+" FLOAT_REG_CONSTRAINT (fpr##n),
++ asm volatile ("" : X8(d) "=m" (mem) : : "memory");
++ char *pp = __builtin_alloca (ll);
++ bar (pp);
++#undef X1
++#define X1(n) "r" (gpr##n),
++ asm volatile ("" : : X8(a) "m" (mem) : "memory");
++ asm volatile ("" : : X8(b) "m" (mem) : "memory");
++ asm volatile ("" : : X2(c) "m" (mem) : "memory");
++#undef X1
++#define X1(n) FLOAT_REG_CONSTRAINT (fpr##n),
++ asm volatile ("" : : X8(d) "m" (mem) : "memory");
++}
++
++__attribute__((noinline)) void
++f15 (void)
++{
++ int mem;
++#undef X1
++#define X1(n) int gpr##n = 0;
++ X8(a) X8(b) X2(c)
++#undef X1
++#define X1(n) double fpr##n = 0.0;
++ X8(d)
++#undef X1
++#define X1(n) "+r" (gpr##n),
++ asm volatile ("" : X8(a) "=m" (mem) : : "memory");
++ asm volatile ("" : X8(b) "=m" (mem) : : "memory");
++ asm volatile ("" : X2(c) "=m" (mem) : : "memory");
++#undef X1
++#define X1(n) "+" FLOAT_REG_CONSTRAINT (fpr##n),
++ asm volatile ("" : X8(d) "=m" (mem) : : "memory");
++#undef X1
++#define X1(n) "r" (gpr##n),
++ asm volatile ("" : : X8(a) "m" (mem) : "memory");
++ asm volatile ("" : : X8(b) "m" (mem) : "memory");
++ asm volatile ("" : : X2(c) "m" (mem) : "memory");
++#undef X1
++#define X1(n) FLOAT_REG_CONSTRAINT (fpr##n),
++ asm volatile ("" : : X8(d) "m" (mem) : "memory");
++}
++#endif
++
++int
++main ()
++{
++ ll = 60;
++ f1 ();
++ f2 ();
++ f3 ();
++#ifndef __NO_FPRS__
++ f4 ();
++ f5 ();
++ f6 ();
++ f7 ();
++ f8 ();
++ f9 ();
++ f10 ();
++ f11 ();
++ f12 ();
++ f13 ();
++ f14 ();
++ f15 ();
++#endif
++ return 0;
++}
+Index: gcc-4.4.1/gcc/config/rs6000/spe.md
+===================================================================
+--- gcc-4.4.1.orig/gcc/config/rs6000/spe.md 2009-10-06 13:51:50.387320717 -0700
++++ gcc-4.4.1/gcc/config/rs6000/spe.md 2009-10-06 14:09:19.767365878 -0700
+@@ -3156,9 +3156,9 @@
+ [(match_parallel 0 "any_parallel_operand"
+ [(clobber (reg:P 65))
+ (use (match_operand:P 1 "symbol_ref_operand" "s"))
+- (use (match_operand:P 2 "gpc_reg_operand" "r"))
+- (set (match_operand:V2SI 3 "memory_operand" "=m")
+- (match_operand:V2SI 4 "gpc_reg_operand" "r"))])]
++ (use (reg:P 11))
++ (set (match_operand:V2SI 2 "memory_operand" "=m")
++ (match_operand:V2SI 3 "gpc_reg_operand" "r"))])]
+ "TARGET_SPE_ABI"
+ "bl %z1"
+ [(set_attr "type" "branch")
+@@ -3168,9 +3168,9 @@
+ [(match_parallel 0 "any_parallel_operand"
+ [(clobber (reg:P 65))
+ (use (match_operand:P 1 "symbol_ref_operand" "s"))
+- (use (match_operand:P 2 "gpc_reg_operand" "r"))
+- (set (match_operand:V2SI 3 "gpc_reg_operand" "=r")
+- (match_operand:V2SI 4 "memory_operand" "m"))])]
++ (use (reg:P 11))
++ (set (match_operand:V2SI 2 "gpc_reg_operand" "=r")
++ (match_operand:V2SI 3 "memory_operand" "m"))])]
+ "TARGET_SPE_ABI"
+ "bl %z1"
+ [(set_attr "type" "branch")
+@@ -3181,9 +3181,9 @@
+ [(return)
+ (clobber (reg:P 65))
+ (use (match_operand:P 1 "symbol_ref_operand" "s"))
+- (use (match_operand:P 2 "gpc_reg_operand" "r"))
+- (set (match_operand:V2SI 3 "gpc_reg_operand" "=r")
+- (match_operand:V2SI 4 "memory_operand" "m"))])]
++ (use (reg:P 11))
++ (set (match_operand:V2SI 2 "gpc_reg_operand" "=r")
++ (match_operand:V2SI 3 "memory_operand" "m"))])]
+ "TARGET_SPE_ABI"
+ "b %z1"
+ [(set_attr "type" "branch")
+Index: gcc-4.4.1/gcc/config/rs6000/linux64.h
+===================================================================
+--- gcc-4.4.1.orig/gcc/config/rs6000/linux64.h 2009-10-06 13:51:50.347316402 -0700
++++ gcc-4.4.1/gcc/config/rs6000/linux64.h 2009-10-06 14:09:19.777366800 -0700
+@@ -433,11 +433,11 @@ extern int dot_symbols;
+ #undef SAVE_FP_PREFIX
+ #define SAVE_FP_PREFIX (TARGET_64BIT ? "._savef" : "_savefpr_")
+ #undef SAVE_FP_SUFFIX
+-#define SAVE_FP_SUFFIX (TARGET_64BIT ? "" : "_l")
++#define SAVE_FP_SUFFIX ""
+ #undef RESTORE_FP_PREFIX
+ #define RESTORE_FP_PREFIX (TARGET_64BIT ? "._restf" : "_restfpr_")
+ #undef RESTORE_FP_SUFFIX
+-#define RESTORE_FP_SUFFIX (TARGET_64BIT ? "" : "_l")
++#define RESTORE_FP_SUFFIX ""
+
+ /* Dwarf2 debugging. */
+ #undef PREFERRED_DEBUGGING_TYPE
+Index: gcc-4.4.1/gcc/config/rs6000/rs6000.c
+===================================================================
+--- gcc-4.4.1.orig/gcc/config/rs6000/rs6000.c 2009-10-06 14:09:04.737313663 -0700
++++ gcc-4.4.1/gcc/config/rs6000/rs6000.c 2009-10-06 14:09:19.797315286 -0700
+@@ -15843,7 +15843,8 @@ static bool
+ no_global_regs_above (int first, bool gpr)
+ {
+ int i;
+- for (i = first; i < gpr ? 32 : 64 ; i++)
++ int last = gpr ? 32 : 64;
++ for (i = first; i < last; i++)
+ if (global_regs[i])
+ return false;
+ return true;
+@@ -15860,54 +15861,136 @@ no_global_regs_above (int first, bool gp
+
+ static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][8];
+
+-/* Return the symbol for an out-of-line register save/restore routine.
++/* Temporary holding space for an out-of-line register save/restore
++ routine name. */
++static char savres_routine_name[30];
++
++/* Return the name for an out-of-line register save/restore routine.
++ We are saving/restoring GPRs if GPR is true. */
++
++static char *
++rs6000_savres_routine_name (rs6000_stack_t *info, int regno,
++ bool savep, bool gpr, bool lr)
++{
++ const char *prefix = "";
++ const char *suffix = "";
++
++ /* Different targets are supposed to define
++ {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
++ routine name could be defined with:
++
++ sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
++
++ This is a nice idea in practice, but in reality, things are
++ complicated in several ways:
++
++ - ELF targets have save/restore routines for GPRs.
++
++ - SPE targets use different prefixes for 32/64-bit registers, and
++ neither of them fit neatly in the FOO_{PREFIX,SUFFIX} regimen.
++
++ - PPC64 ELF targets have routines for save/restore of GPRs that
++ differ in what they do with the link register, so having a set
++ prefix doesn't work. (We only use one of the save routines at
++ the moment, though.)
++
++ - PPC32 elf targets have "exit" versions of the restore routines
++ that restore the link register and can save some extra space.
++ These require an extra suffix. (There are also "tail" versions
++ of the restore routines and "GOT" versions of the save routines,
++ but we don't generate those at present. Same problems apply,
++ though.)
++
++ We deal with all this by synthesizing our own prefix/suffix and
++ using that for the simple sprintf call shown above. */
++ if (TARGET_SPE)
++ {
++ /* No floating point saves on the SPE. */
++ gcc_assert (gpr);
++
++ if (savep)
++ prefix = info->spe_64bit_regs_used ? "_save64gpr_" : "_save32gpr_";
++ else
++ prefix = info->spe_64bit_regs_used ? "_rest64gpr_" : "_rest32gpr_";
++
++ if (lr)
++ suffix = "_x";
++ }
++ else if (DEFAULT_ABI == ABI_V4)
++ {
++ if (TARGET_64BIT)
++ goto aix_names;
++
++ if (gpr)
++ prefix = savep ? "_savegpr_" : "_restgpr_";
++ else
++ prefix = savep ? "_savefpr_" : "_restfpr_";
++
++ if (lr)
++ suffix = "_x";
++ }
++ else if (DEFAULT_ABI == ABI_AIX)
++ {
++#ifndef POWERPC_LINUX
++ /* No out-of-line save/restore routines for GPRs on AIX. */
++ gcc_assert (!TARGET_AIX || !gpr);
++#endif
++
++ aix_names:
++ if (gpr)
++ prefix = (savep
++ ? (lr ? "_savegpr0_" : "_savegpr1_")
++ : (lr ? "_restgpr0_" : "_restgpr1_"));
++#ifdef POWERPC_LINUX
++ else if (lr)
++ prefix = (savep ? "_savefpr_" : "_restfpr_");
++#endif
++ else
++ {
++ prefix = savep ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
++ suffix = savep ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
++ }
++ }
++ else if (DEFAULT_ABI == ABI_DARWIN)
++ sorry ("Out-of-line save/restore routines not supported on Darwin");
++
++ sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
++
++ return savres_routine_name;
++}
++
++/* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
+ We are saving/restoring GPRs if GPR is true. */
+
+ static rtx
+-rs6000_savres_routine_sym (rs6000_stack_t *info, bool savep, bool gpr, bool exitp)
++rs6000_savres_routine_sym (rs6000_stack_t *info, bool savep,
++ bool gpr, bool lr)
+ {
+ int regno = gpr ? info->first_gp_reg_save : (info->first_fp_reg_save - 32);
+ rtx sym;
+ int select = ((savep ? 1 : 0) << 2
+- | (gpr
+- /* On the SPE, we never have any FPRs, but we do have
+- 32/64-bit versions of the routines. */
+- ? (TARGET_SPE_ABI && info->spe_64bit_regs_used ? 1 : 0)
+- : 0) << 1
+- | (exitp ? 1: 0));
++ | ((TARGET_SPE_ABI
++ /* On the SPE, we never have any FPRs, but we do have
++ 32/64-bit versions of the routines. */
++ ? (info->spe_64bit_regs_used ? 1 : 0)
++ : (gpr ? 1 : 0)) << 1)
++ | (lr ? 1: 0));
+
+ /* Don't generate bogus routine names. */
+- gcc_assert (FIRST_SAVRES_REGISTER <= regno && regno <= LAST_SAVRES_REGISTER);
++ gcc_assert (FIRST_SAVRES_REGISTER <= regno
++ && regno <= LAST_SAVRES_REGISTER);
+
+ sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
+
+ if (sym == NULL)
+ {
+- char name[30];
+- const char *action;
+- const char *regkind;
+- const char *exit_suffix;
+-
+- action = savep ? "save" : "rest";
+-
+- /* SPE has slightly different names for its routines depending on
+- whether we are saving 32-bit or 64-bit registers. */
+- if (TARGET_SPE_ABI)
+- {
+- /* No floating point saves on the SPE. */
+- gcc_assert (gpr);
+-
+- regkind = info->spe_64bit_regs_used ? "64gpr" : "32gpr";
+- }
+- else
+- regkind = gpr ? "gpr" : "fpr";
+-
+- exit_suffix = exitp ? "_x" : "";
++ char *name;
+
+- sprintf (name, "_%s%s_%d%s", action, regkind, regno, exit_suffix);
++ name = rs6000_savres_routine_name (info, regno, savep, gpr, lr);
+
+ sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
+ = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
++ SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
+ }
+
+ return sym;
+@@ -15935,8 +16018,13 @@ rs6000_emit_stack_reset (rs6000_stack_t
+ {
+ rs6000_emit_stack_tie ();
+ if (sp_offset != 0)
+- emit_insn (gen_addsi3 (sp_reg_rtx, frame_reg_rtx,
+- GEN_INT (sp_offset)));
++ {
++ rtx dest_reg = savres ? gen_rtx_REG (Pmode, 11) : sp_reg_rtx;
++
++ return emit_insn (gen_addsi3 (dest_reg, frame_reg_rtx,
++ GEN_INT (sp_offset)));
++ }
++
+ else if (!savres)
+ emit_move_insn (sp_reg_rtx, frame_reg_rtx);
+ }
+@@ -15965,7 +16053,7 @@ static rtx
+ rs6000_make_savres_rtx (rs6000_stack_t *info,
+ rtx frame_reg_rtx, int save_area_offset,
+ enum machine_mode reg_mode,
+- bool savep, bool gpr, bool exitp)
++ bool savep, bool gpr, bool lr)
+ {
+ int i;
+ int offset, start_reg, end_reg, n_regs;
+@@ -15979,20 +16067,21 @@ rs6000_make_savres_rtx (rs6000_stack_t *
+ : info->first_fp_reg_save);
+ end_reg = gpr ? 32 : 64;
+ n_regs = end_reg - start_reg;
+- p = rtvec_alloc ((exitp ? 4 : 3) + n_regs);
+-
+- /* If we're saving registers, then we should never say we're exiting. */
+- gcc_assert ((savep && !exitp) || !savep);
++ p = rtvec_alloc ((lr ? 4 : 3) + n_regs);
+
+- if (exitp)
++ if (!savep && lr)
+ RTVEC_ELT (p, offset++) = gen_rtx_RETURN (VOIDmode);
+
+ RTVEC_ELT (p, offset++)
+ = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 65));
+
+- sym = rs6000_savres_routine_sym (info, savep, gpr, exitp);
++ sym = rs6000_savres_routine_sym (info, savep, gpr, lr);
+ RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
+- RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 11));
++ RTVEC_ELT (p, offset++)
++ = gen_rtx_USE (VOIDmode,
++ gen_rtx_REG (Pmode, DEFAULT_ABI != ABI_AIX ? 11
++ : gpr && !lr ? 12
++ : 1));
+
+ for (i = 0; i < end_reg - start_reg; i++)
+ {
+@@ -16007,6 +16096,16 @@ rs6000_make_savres_rtx (rs6000_stack_t *
+ savep ? reg : mem);
+ }
+
++ if (savep && lr)
++ {
++ rtx addr, reg, mem;
++ reg = gen_rtx_REG (Pmode, 0);
++ addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
++ GEN_INT (info->lr_save_offset));
++ mem = gen_frame_mem (Pmode, addr);
++ RTVEC_ELT (p, i + offset) = gen_rtx_SET (VOIDmode, mem, reg);
++ }
++
+ return gen_rtx_PARALLEL (VOIDmode, p);
+ }
+
+@@ -16027,7 +16126,10 @@ rs6000_reg_live_or_pic_offset_p (int reg
+ enum {
+ SAVRES_MULTIPLE = 0x1,
+ SAVRES_INLINE_FPRS = 0x2,
+- SAVRES_INLINE_GPRS = 0x4
++ SAVRES_INLINE_GPRS = 0x4,
++ SAVRES_NOINLINE_GPRS_SAVES_LR = 0x8,
++ SAVRES_NOINLINE_FPRS_SAVES_LR = 0x10,
++ SAVRES_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x20
+ };
+
+ /* Determine the strategy for savings/restoring registers. */
+@@ -16042,6 +16144,7 @@ rs6000_savres_strategy (rs6000_stack_t *
+ bool savres_gprs_inline;
+ bool noclobber_global_gprs
+ = no_global_regs_above (info->first_gp_reg_save, /*gpr=*/true);
++ int strategy;
+
+ using_multiple_p = (TARGET_MULTIPLE && ! TARGET_POWERPC64
+ && (!TARGET_SPE_ABI
+@@ -16061,6 +16164,10 @@ rs6000_savres_strategy (rs6000_stack_t *
+ || info->first_fp_reg_save == 64
+ || !no_global_regs_above (info->first_fp_reg_save,
+ /*gpr=*/false)
++ /* The out-of-line FP routines use
++ double-precision stores; we can't use those
++ routines if we don't have such stores. */
++ || (TARGET_HARD_FLOAT && !TARGET_DOUBLE_FLOAT)
+ || FP_SAVE_INLINE (info->first_fp_reg_save));
+ savres_gprs_inline = (common
+ /* Saving CR interferes with the exit routines
+@@ -16098,9 +16205,22 @@ rs6000_savres_strategy (rs6000_stack_t *
+ savres_gprs_inline = savres_gprs_inline || using_multiple_p;
+ }
+
+- return (using_multiple_p
+- | (savres_fprs_inline << 1)
+- | (savres_gprs_inline << 2));
++ strategy = (using_multiple_p
++ | (savres_fprs_inline << 1)
++ | (savres_gprs_inline << 2));
++#ifdef POWERPC_LINUX
++ if (TARGET_64BIT)
++ {
++ if (!savres_fprs_inline)
++ strategy |= SAVRES_NOINLINE_FPRS_SAVES_LR;
++ else if (!savres_gprs_inline && info->first_fp_reg_save == 64)
++ strategy |= SAVRES_NOINLINE_GPRS_SAVES_LR;
++ }
++#else
++ if (TARGET_AIX && !savres_fprs_inline)
++ strategy |= SAVRES_NOINLINE_FPRS_DOESNT_RESTORE_LR;
++#endif
++ return strategy;
+ }
+
+ /* Emit function prologue as insns. */
+@@ -16122,7 +16242,7 @@ rs6000_emit_prologue (void)
+ int using_store_multiple;
+ int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
+ && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
+- && !call_used_regs[STATIC_CHAIN_REGNUM]);
++ && call_used_regs[STATIC_CHAIN_REGNUM]);
+ HOST_WIDE_INT sp_offset = 0;
+
+ if (TARGET_FIX_AND_CONTINUE)
+@@ -16307,24 +16427,30 @@ rs6000_emit_prologue (void)
+ gen_rtx_REG (Pmode, LR_REGNO));
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+- addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
++ if (!(strategy & (SAVRES_NOINLINE_GPRS_SAVES_LR
++ | SAVRES_NOINLINE_FPRS_SAVES_LR)))
++ {
++ addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->lr_save_offset + sp_offset));
+- reg = gen_rtx_REG (Pmode, 0);
+- mem = gen_rtx_MEM (Pmode, addr);
+- /* This should not be of rs6000_sr_alias_set, because of
+- __builtin_return_address. */
++ reg = gen_rtx_REG (Pmode, 0);
++ mem = gen_rtx_MEM (Pmode, addr);
++ /* This should not be of rs6000_sr_alias_set, because of
++ __builtin_return_address. */
+
+- insn = emit_move_insn (mem, reg);
+- rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+- NULL_RTX, NULL_RTX);
++ insn = emit_move_insn (mem, reg);
++ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
++ NULL_RTX, NULL_RTX);
++ }
+ }
+
+- /* If we need to save CR, put it into r12. */
++ /* If we need to save CR, put it into r12 or r11. */
+ if (!WORLD_SAVE_P (info) && info->cr_save_p && frame_reg_rtx != frame_ptr_rtx)
+ {
+ rtx set;
+
+- cr_save_rtx = gen_rtx_REG (SImode, 12);
++ cr_save_rtx
++ = gen_rtx_REG (SImode, DEFAULT_ABI == ABI_AIX && !saving_GPRs_inline
++ ? 11 : 12);
+ insn = emit_insn (gen_movesi_from_cr (cr_save_rtx));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ /* Now, there's no way that dwarf2out_frame_debug_expr is going
+@@ -16363,7 +16489,9 @@ rs6000_emit_prologue (void)
+ info->fp_save_offset + sp_offset,
+ DFmode,
+ /*savep=*/true, /*gpr=*/false,
+- /*exitp=*/false);
++ /*lr=*/(strategy
++ & SAVRES_NOINLINE_FPRS_SAVES_LR)
++ != 0);
+ insn = emit_insn (par);
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ NULL_RTX, NULL_RTX);
+@@ -16459,7 +16587,7 @@ rs6000_emit_prologue (void)
+ par = rs6000_make_savres_rtx (info, gen_rtx_REG (Pmode, 11),
+ 0, reg_mode,
+ /*savep=*/true, /*gpr=*/true,
+- /*exitp=*/false);
++ /*lr=*/false);
+ insn = emit_insn (par);
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ NULL_RTX, NULL_RTX);
+@@ -16474,25 +16602,24 @@ rs6000_emit_prologue (void)
+ {
+ rtx par;
+
+- /* Need to adjust r11 if we saved any FPRs. */
++ /* Need to adjust r11 (r12) if we saved any FPRs. */
+ if (info->first_fp_reg_save != 64)
+ {
+- rtx r11 = gen_rtx_REG (reg_mode, 11);
+- rtx offset = GEN_INT (info->total_size
+- + (-8 * (64-info->first_fp_reg_save)));
+- rtx ptr_reg = (sp_reg_rtx == frame_reg_rtx
+- ? sp_reg_rtx : r11);
+-
+- emit_insn (TARGET_32BIT
+- ? gen_addsi3 (r11, ptr_reg, offset)
+- : gen_adddi3 (r11, ptr_reg, offset));
++ rtx dest_reg = gen_rtx_REG (reg_mode, DEFAULT_ABI == ABI_AIX
++ ? 12 : 11);
++ rtx offset = GEN_INT (sp_offset
++ + (-8 * (64-info->first_fp_reg_save)));
++ emit_insn (gen_add3_insn (dest_reg, frame_reg_rtx, offset));
+ }
+
+ par = rs6000_make_savres_rtx (info, frame_reg_rtx,
+ info->gp_save_offset + sp_offset,
+ reg_mode,
+ /*savep=*/true, /*gpr=*/true,
+- /*exitp=*/false);
++ /*lr=*/(strategy
++ & SAVRES_NOINLINE_GPRS_SAVES_LR)
++ != 0);
++
+ insn = emit_insn (par);
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ NULL_RTX, NULL_RTX);
+@@ -16772,9 +16899,18 @@ rs6000_output_function_prologue (FILE *f
+ fp values. */
+ if (info->first_fp_reg_save < 64
+ && !FP_SAVE_INLINE (info->first_fp_reg_save))
+- fprintf (file, "\t.extern %s%d%s\n\t.extern %s%d%s\n",
+- SAVE_FP_PREFIX, info->first_fp_reg_save - 32, SAVE_FP_SUFFIX,
+- RESTORE_FP_PREFIX, info->first_fp_reg_save - 32, RESTORE_FP_SUFFIX);
++ {
++ char *name;
++ int regno = info->first_fp_reg_save - 32;
++
++ name = rs6000_savres_routine_name (info, regno, /*savep=*/true,
++ /*gpr=*/false, /*lr=*/false);
++ fprintf (file, "\t.extern %s\n", name);
++
++ name = rs6000_savres_routine_name (info, regno, /*savep=*/false,
++ /*gpr=*/false, /*lr=*/true);
++ fprintf (file, "\t.extern %s\n", name);
++ }
+
+ /* Write .extern for AIX common mode routines, if needed. */
+ if (! TARGET_POWER && ! TARGET_POWERPC && ! common_mode_defined)
+@@ -16891,6 +17027,7 @@ rs6000_emit_epilogue (int sibcall)
+ int sp_offset = 0;
+ rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
+ rtx frame_reg_rtx = sp_reg_rtx;
++ rtx cr_save_reg = NULL_RTX;
+ enum machine_mode reg_mode = Pmode;
+ int reg_size = TARGET_32BIT ? 4 : 8;
+ int i;
+@@ -16924,8 +17061,10 @@ rs6000_emit_epilogue (int sibcall)
+ || (cfun->calls_alloca
+ && !frame_pointer_needed));
+ restore_lr = (info->lr_save_p
+- && restoring_GPRs_inline
+- && restoring_FPRs_inline);
++ && (restoring_FPRs_inline
++ || (strategy & SAVRES_NOINLINE_FPRS_DOESNT_RESTORE_LR))
++ && (restoring_GPRs_inline
++ || info->first_fp_reg_save < 64));
+
+ if (WORLD_SAVE_P (info))
+ {
+@@ -17197,7 +17336,7 @@ rs6000_emit_epilogue (int sibcall)
+
+ /* Get the old lr if we saved it. If we are restoring registers
+ out-of-line, then the out-of-line routines can do this for us. */
+- if (restore_lr)
++ if (restore_lr && restoring_GPRs_inline)
+ {
+ rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx,
+ info->lr_save_offset + sp_offset);
+@@ -17211,12 +17350,17 @@ rs6000_emit_epilogue (int sibcall)
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->cr_save_offset + sp_offset));
+ rtx mem = gen_frame_mem (SImode, addr);
++ cr_save_reg = gen_rtx_REG (SImode,
++ DEFAULT_ABI == ABI_AIX
++ && !restoring_GPRs_inline
++ && info->first_fp_reg_save < 64
++ ? 11 : 12);
++ emit_move_insn (cr_save_reg, mem);
+
+- emit_move_insn (gen_rtx_REG (SImode, 12), mem);
+ }
+
+ /* Set LR here to try to overlap restores below. */
+- if (restore_lr)
++ if (restore_lr && restoring_GPRs_inline)
+ emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO),
+ gen_rtx_REG (Pmode, 0));
+
+@@ -17318,7 +17462,7 @@ rs6000_emit_epilogue (int sibcall)
+ par = rs6000_make_savres_rtx (info, gen_rtx_REG (Pmode, 11),
+ 0, reg_mode,
+ /*savep=*/false, /*gpr=*/true,
+- /*exitp=*/true);
++ /*lr=*/true);
+ emit_jump_insn (par);
+
+ /* We don't want anybody else emitting things after we jumped
+@@ -17337,20 +17481,24 @@ rs6000_emit_epilogue (int sibcall)
+ rs6000_emit_stack_reset (info, sp_reg_rtx, frame_reg_rtx,
+ sp_offset, can_use_exit);
+ else
+- emit_insn (gen_addsi3 (gen_rtx_REG (Pmode, 11),
+- sp_reg_rtx,
+- GEN_INT (sp_offset - info->fp_size)));
++ {
++ emit_insn (gen_add3_insn (gen_rtx_REG (Pmode, DEFAULT_ABI == ABI_AIX
++ ? 12 : 11),
++ frame_reg_rtx,
++ GEN_INT (sp_offset - info->fp_size)));
++ if (REGNO (frame_reg_rtx) == 11)
++ sp_offset += info->fp_size;
++ }
+
+ par = rs6000_make_savres_rtx (info, frame_reg_rtx,
+ info->gp_save_offset, reg_mode,
+ /*savep=*/false, /*gpr=*/true,
+- /*exitp=*/can_use_exit);
++ /*lr=*/can_use_exit);
+
+ if (can_use_exit)
+ {
+ if (info->cr_save_p)
+- rs6000_restore_saved_cr (gen_rtx_REG (SImode, 12),
+- using_mtcr_multiple);
++ rs6000_restore_saved_cr (cr_save_reg, using_mtcr_multiple);
+
+ emit_jump_insn (par);
+
+@@ -17396,6 +17544,16 @@ rs6000_emit_epilogue (int sibcall)
+ }
+ }
+
++ if (restore_lr && !restoring_GPRs_inline)
++ {
++ rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx,
++ info->lr_save_offset + sp_offset);
++
++ emit_move_insn (gen_rtx_REG (Pmode, 0), mem);
++ emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO),
++ gen_rtx_REG (Pmode, 0));
++ }
++
+ /* Restore fpr's if we need to do it without calling a function. */
+ if (restoring_FPRs_inline)
+ for (i = 0; i < 64 - info->first_fp_reg_save; i++)
+@@ -17419,7 +17577,7 @@ rs6000_emit_epilogue (int sibcall)
+
+ /* If we saved cr, restore it here. Just those that were used. */
+ if (info->cr_save_p)
+- rs6000_restore_saved_cr (gen_rtx_REG (SImode, 12), using_mtcr_multiple);
++ rs6000_restore_saved_cr (cr_save_reg, using_mtcr_multiple);
+
+ /* If this is V.4, unwind the stack pointer after all of the loads
+ have been done. */
+@@ -17437,13 +17595,14 @@ rs6000_emit_epilogue (int sibcall)
+ if (!sibcall)
+ {
+ rtvec p;
++ bool lr = (strategy & SAVRES_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
+ if (! restoring_FPRs_inline)
+ p = rtvec_alloc (4 + 64 - info->first_fp_reg_save);
+ else
+ p = rtvec_alloc (2);
+
+ RTVEC_ELT (p, 0) = gen_rtx_RETURN (VOIDmode);
+- RTVEC_ELT (p, 1) = (restoring_FPRs_inline
++ RTVEC_ELT (p, 1) = ((restoring_FPRs_inline || !lr)
+ ? gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 65))
+ : gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_REG (Pmode, 65)));
+@@ -17458,10 +17617,12 @@ rs6000_emit_epilogue (int sibcall)
+ sym = rs6000_savres_routine_sym (info,
+ /*savep=*/false,
+ /*gpr=*/false,
+- /*exitp=*/true);
++ /*lr=*/lr);
+ RTVEC_ELT (p, 2) = gen_rtx_USE (VOIDmode, sym);
+ RTVEC_ELT (p, 3) = gen_rtx_USE (VOIDmode,
+- gen_rtx_REG (Pmode, 11));
++ gen_rtx_REG (Pmode,
++ DEFAULT_ABI == ABI_AIX
++ ? 1 : 11));
+ for (i = 0; i < 64 - info->first_fp_reg_save; i++)
+ {
+ rtx addr, mem;
+Index: gcc-4.4.1/gcc/config/rs6000/rs6000.md
+===================================================================
+--- gcc-4.4.1.orig/gcc/config/rs6000/rs6000.md 2009-10-06 14:09:12.087313658 -0700
++++ gcc-4.4.1/gcc/config/rs6000/rs6000.md 2009-10-06 14:09:19.797315286 -0700
+@@ -14868,6 +14868,19 @@
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
++(define_insn "*return_and_restore_fpregs_aix_<mode>"
++ [(match_parallel 0 "any_parallel_operand"
++ [(return)
++ (use (match_operand:P 1 "register_operand" "l"))
++ (use (match_operand:P 2 "symbol_ref_operand" "s"))
++ (use (match_operand:P 3 "gpc_reg_operand" "r"))
++ (set (match_operand:DF 4 "gpc_reg_operand" "=d")
++ (match_operand:DF 5 "memory_operand" "m"))])]
++ ""
++ "b %z2"
++ [(set_attr "type" "branch")
++ (set_attr "length" "4")])
++
+ ; This is used in compiling the unwind routines.
+ (define_expand "eh_return"
+ [(use (match_operand 0 "general_operand" ""))]
+Index: gcc-4.4.1/gcc/config/rs6000/sysv4.h
+===================================================================
+--- gcc-4.4.1.orig/gcc/config/rs6000/sysv4.h 2009-10-06 13:51:50.447316469 -0700
++++ gcc-4.4.1/gcc/config/rs6000/sysv4.h 2009-10-06 14:09:19.807390239 -0700
+@@ -271,27 +271,25 @@ do { \
+ #endif
+
+ /* Define cutoff for using external functions to save floating point.
+- Currently on 64-bit V.4, always use inline stores. When optimizing
+- for size on 32-bit targets, use external functions when
+- profitable. */
+-#define FP_SAVE_INLINE(FIRST_REG) (optimize_size && !TARGET_64BIT \
++ When optimizing for size, use external functions when profitable. */
++#define FP_SAVE_INLINE(FIRST_REG) (optimize_size \
+ ? ((FIRST_REG) == 62 \
+ || (FIRST_REG) == 63) \
+ : (FIRST_REG) < 64)
+ /* And similarly for general purpose registers. */
+ #define GP_SAVE_INLINE(FIRST_REG) ((FIRST_REG) < 32 \
+- && (TARGET_64BIT || !optimize_size))
++ && !optimize_size)
+
+ /* Put jump tables in read-only memory, rather than in .text. */
+ #define JUMP_TABLES_IN_TEXT_SECTION 0
+
+ /* Prefix and suffix to use to saving floating point. */
+ #define SAVE_FP_PREFIX "_savefpr_"
+-#define SAVE_FP_SUFFIX (TARGET_64BIT ? "_l" : "")
++#define SAVE_FP_SUFFIX ""
+
+ /* Prefix and suffix to use to restoring floating point. */
+ #define RESTORE_FP_PREFIX "_restfpr_"
+-#define RESTORE_FP_SUFFIX (TARGET_64BIT ? "_l" : "")
++#define RESTORE_FP_SUFFIX ""
+
+ /* Type used for ptrdiff_t, as a string used in a declaration. */
+ #define PTRDIFF_TYPE "int"