diff -Nrup --ignore-space-change gcc-4.0.2/config.sub gcc-4.0.2-atmel.0.99.2/config.sub
--- gcc-4.0.2/config.sub	2005-04-25 12:36:56.000000000 +0200
+++ gcc-4.0.2-atmel.0.99.2/config.sub	2005-06-07 14:59:22.000000000 +0200
@@ -1,9 +1,9 @@
 #! /bin/sh
 # Configuration validation subroutine script.
 #   Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
-#   2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+#   2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
 
-timestamp='2005-04-22'
+timestamp='2005-06-07'
 
 # This file is (in principle) common to ALL GNU software.
 # The presence of a machine in this file suggests that SOME GNU software
@@ -70,7 +70,7 @@ Report bugs and patches to <config-patch
 version="\
 GNU config.sub ($timestamp)
 
-Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005
+Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004
 Free Software Foundation, Inc.
 
 This is free software; see the source for copying conditions.  There is NO
@@ -230,8 +230,7 @@ case $basic_machine in
 	| alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \
 	| alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \
 	| am33_2.0 \
-	| arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr \
-	| bfin \
+	| arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr | avr32 \
 	| c4x | clipper \
 	| d10v | d30v | dlx | dsp16xx \
 	| fr30 | frv \
@@ -263,8 +262,7 @@ case $basic_machine in
 	| pyramid \
 	| sh | sh[1234] | sh[23]e | sh[34]eb | shbe | shle | sh[1234]le | sh3ele \
 	| sh64 | sh64le \
-	| sparc | sparc64 | sparc64b | sparc86x | sparclet | sparclite \
-	| sparcv8 | sparcv9 | sparcv9b \
+	| sparc | sparc64 | sparc86x | sparclet | sparclite | sparcv8 | sparcv9 | sparcv9b \
 	| strongarm \
 	| tahoe | thumb | tic4x | tic80 | tron \
 	| v850 | v850e \
@@ -299,8 +297,8 @@ case $basic_machine in
 	| alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \
 	| alphapca5[67]-* | alpha64pca5[67]-* | arc-* \
 	| arm-*  | armbe-* | armle-* | armeb-* | armv*-* \
-	| avr-* \
-	| bfin-* | bs2000-* \
+	| avr-* | avr32-* \
+	| bs2000-* \
 	| c[123]* | c30-* | [cjt]90-* | c4x-* | c54x-* | c55x-* | c6x-* \
 	| clipper-* | craynv-* | cydra-* \
 	| d10v-* | d30v-* | dlx-* \
@@ -338,8 +336,7 @@ case $basic_machine in
 	| romp-* | rs6000-* \
 	| sh-* | sh[1234]-* | sh[23]e-* | sh[34]eb-* | shbe-* \
 	| shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \
-	| sparc-* | sparc64-* | sparc64b-* | sparc86x-* | sparclet-* \
-	| sparclite-* \
+	| sparc-* | sparc64-* | sparc86x-* | sparclet-* | sparclite-* \
 	| sparcv8-* | sparcv9-* | sparcv9b-* | strongarm-* | sv1-* | sx?-* \
 	| tahoe-* | thumb-* \
 	| tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \
diff -Nrup --ignore-space-change gcc-4.0.2/configure gcc-4.0.2-atmel.0.99.2/configure
--- gcc-4.0.2/configure	2005-09-13 09:01:28.000000000 +0200
+++ gcc-4.0.2-atmel.0.99.2/configure	2006-01-20 14:31:15.000000000 +0100
@@ -1284,6 +1284,9 @@ case "${target}" in
   arm-*-riscix*)
     noconfigdirs="$noconfigdirs ld target-libgloss ${libgcj}"
     ;;
+  avr32-*-*)
+    noconfigdirs="$noconfigdirs target-libiberty target-libmudflap target-libffi ${libgcj}"
+    ;;
   avr-*-*)
     noconfigdirs="$noconfigdirs target-libiberty target-libstdc++-v3 ${libgcj}"
     ;;
@@ -1804,7 +1807,7 @@ else
   # Extract the first word of "gcc", so it can be a program name with args.
 set dummy gcc; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:1808: checking for $ac_word" >&5
+echo "configure:1811: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_CC'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -1834,7 +1837,7 @@ if test -z "$CC"; then
   # Extract the first word of "cc", so it can be a program name with args.
 set dummy cc; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:1838: checking for $ac_word" >&5
+echo "configure:1841: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_CC'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -1885,7 +1888,7 @@ fi
       # Extract the first word of "cl", so it can be a program name with args.
 set dummy cl; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:1889: checking for $ac_word" >&5
+echo "configure:1892: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_CC'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -1917,7 +1920,7 @@ fi
 fi
 
 echo $ac_n "checking whether the C compiler ($CC $CFLAGS $LDFLAGS) works""... $ac_c" 1>&6
-echo "configure:1921: checking whether the C compiler ($CC $CFLAGS $LDFLAGS) works" >&5
+echo "configure:1924: checking whether the C compiler ($CC $CFLAGS $LDFLAGS) works" >&5
 
 ac_ext=c
 # CFLAGS is not in ac_cpp because -g, -O, etc. are not valid cpp options.
@@ -1928,12 +1931,12 @@ cross_compiling=$ac_cv_prog_cc_cross
 
 cat > conftest.$ac_ext << EOF
 
-#line 1932 "configure"
+#line 1935 "configure"
 #include "confdefs.h"
 
 main(){return(0);}
 EOF
-if { (eval echo configure:1937: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+if { (eval echo configure:1940: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
   ac_cv_prog_cc_works=yes
   # If we can't run a trivial program, we are probably using a cross compiler.
   if (./conftest; exit) 2>/dev/null; then
@@ -1959,12 +1962,12 @@ if test $ac_cv_prog_cc_works = no; then
   { echo "configure: error: installation or configuration problem: C compiler cannot create executables." 1>&2; exit 1; }
 fi
 echo $ac_n "checking whether the C compiler ($CC $CFLAGS $LDFLAGS) is a cross-compiler""... $ac_c" 1>&6
-echo "configure:1963: checking whether the C compiler ($CC $CFLAGS $LDFLAGS) is a cross-compiler" >&5
+echo "configure:1966: checking whether the C compiler ($CC $CFLAGS $LDFLAGS) is a cross-compiler" >&5
 echo "$ac_t""$ac_cv_prog_cc_cross" 1>&6
 cross_compiling=$ac_cv_prog_cc_cross
 
 echo $ac_n "checking whether we are using GNU C""... $ac_c" 1>&6
-echo "configure:1968: checking whether we are using GNU C" >&5
+echo "configure:1971: checking whether we are using GNU C" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_gcc'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -1973,7 +1976,7 @@ else
   yes;
 #endif
 EOF
-if { ac_try='${CC-cc} -E conftest.c'; { (eval echo configure:1977: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }; } | egrep yes >/dev/null 2>&1; then
+if { ac_try='${CC-cc} -E conftest.c'; { (eval echo configure:1980: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }; } | egrep yes >/dev/null 2>&1; then
   ac_cv_prog_gcc=yes
 else
   ac_cv_prog_gcc=no
@@ -1992,7 +1995,7 @@ ac_test_CFLAGS="${CFLAGS+set}"
 ac_save_CFLAGS="$CFLAGS"
 CFLAGS=
 echo $ac_n "checking whether ${CC-cc} accepts -g""... $ac_c" 1>&6
-echo "configure:1996: checking whether ${CC-cc} accepts -g" >&5
+echo "configure:1999: checking whether ${CC-cc} accepts -g" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_cc_g'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -2059,7 +2062,7 @@ fi
 # Extract the first word of "${ac_tool_prefix}gnatbind", so it can be a program name with args.
 set dummy ${ac_tool_prefix}gnatbind; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:2063: checking for $ac_word" >&5
+echo "configure:2066: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_GNATBIND'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -2091,7 +2094,7 @@ if test -n "$ac_tool_prefix"; then
   # Extract the first word of "gnatbind", so it can be a program name with args.
 set dummy gnatbind; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:2095: checking for $ac_word" >&5
+echo "configure:2098: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_GNATBIND'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -2124,7 +2127,7 @@ fi
 fi
 
 echo $ac_n "checking whether compiler driver understands Ada""... $ac_c" 1>&6
-echo "configure:2128: checking whether compiler driver understands Ada" >&5
+echo "configure:2131: checking whether compiler driver understands Ada" >&5
 if eval "test \"`echo '$''{'acx_cv_cc_gcc_supports_ada'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -2156,7 +2159,7 @@ else
 fi
 
 echo $ac_n "checking how to compare bootstrapped objects""... $ac_c" 1>&6
-echo "configure:2160: checking how to compare bootstrapped objects" >&5
+echo "configure:2163: checking how to compare bootstrapped objects" >&5
 if eval "test \"`echo '$''{'gcc_cv_prog_cmp_skip'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -2254,9 +2257,9 @@ saved_CFLAGS="$CFLAGS"
 CFLAGS="$CFLAGS $gmpinc"
 # Check GMP actually works
 echo $ac_n "checking for correct version of gmp.h""... $ac_c" 1>&6
-echo "configure:2258: checking for correct version of gmp.h" >&5
+echo "configure:2261: checking for correct version of gmp.h" >&5
 cat > conftest.$ac_ext <<EOF
-#line 2260 "configure"
+#line 2263 "configure"
 #include "confdefs.h"
 #include "gmp.h"
 int main() {
@@ -2267,7 +2270,7 @@ choke me
 
 ; return 0; }
 EOF
-if { (eval echo configure:2271: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+if { (eval echo configure:2274: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
   rm -rf conftest*
   echo "$ac_t""yes" 1>&6
 else
@@ -2280,12 +2283,12 @@ rm -f conftest*
 
 if test x"$have_gmp" = xyes; then
   echo $ac_n "checking for MPFR""... $ac_c" 1>&6
-echo "configure:2284: checking for MPFR" >&5
+echo "configure:2287: checking for MPFR" >&5
 
   saved_LIBS="$LIBS"
   LIBS="$LIBS $gmplibs"
   cat > conftest.$ac_ext <<EOF
-#line 2289 "configure"
+#line 2292 "configure"
 #include "confdefs.h"
 #include <gmp.h>
 #include <mpfr.h>
@@ -2293,7 +2296,7 @@ int main() {
 mpfr_t n; mpfr_init(n);
 ; return 0; }
 EOF
-if { (eval echo configure:2297: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+if { (eval echo configure:2300: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
   rm -rf conftest*
   echo "$ac_t""yes" 1>&6
 else
@@ -2789,7 +2792,7 @@ do
 # Extract the first word of "$ac_prog", so it can be a program name with args.
 set dummy $ac_prog; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:2793: checking for $ac_word" >&5
+echo "configure:2796: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_CONFIGURED_BISON'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -2824,7 +2827,7 @@ do
 # Extract the first word of "$ac_prog", so it can be a program name with args.
 set dummy $ac_prog; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:2828: checking for $ac_word" >&5
+echo "configure:2831: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_CONFIGURED_YACC'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -2859,7 +2862,7 @@ do
 # Extract the first word of "$ac_prog", so it can be a program name with args.
 set dummy $ac_prog; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:2863: checking for $ac_word" >&5
+echo "configure:2866: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_CONFIGURED_M4'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -2894,7 +2897,7 @@ do
 # Extract the first word of "$ac_prog", so it can be a program name with args.
 set dummy $ac_prog; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:2898: checking for $ac_word" >&5
+echo "configure:2901: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_CONFIGURED_FLEX'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -2929,7 +2932,7 @@ do
 # Extract the first word of "$ac_prog", so it can be a program name with args.
 set dummy $ac_prog; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:2933: checking for $ac_word" >&5
+echo "configure:2936: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_CONFIGURED_LEX'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -2964,7 +2967,7 @@ do
 # Extract the first word of "$ac_prog", so it can be a program name with args.
 set dummy $ac_prog; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:2968: checking for $ac_word" >&5
+echo "configure:2971: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_CONFIGURED_MAKEINFO'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -3611,7 +3614,7 @@ test -n "$target_alias" && ncn_target_to
   # Extract the first word of "${ncn_tool_prefix}ar", so it can be a program name with args.
 set dummy ${ncn_tool_prefix}ar; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:3615: checking for $ac_word" >&5
+echo "configure:3618: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_AR'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -3644,7 +3647,7 @@ if test -z "$ac_cv_prog_AR" ; then
     # Extract the first word of "ar", so it can be a program name with args.
 set dummy ar; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:3648: checking for $ac_word" >&5
+echo "configure:3651: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_ncn_cv_AR'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -3683,7 +3686,7 @@ fi
   # Extract the first word of "${ncn_tool_prefix}as", so it can be a program name with args.
 set dummy ${ncn_tool_prefix}as; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:3687: checking for $ac_word" >&5
+echo "configure:3690: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_AS'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -3716,7 +3719,7 @@ if test -z "$ac_cv_prog_AS" ; then
     # Extract the first word of "as", so it can be a program name with args.
 set dummy as; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:3720: checking for $ac_word" >&5
+echo "configure:3723: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_ncn_cv_AS'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -3755,7 +3758,7 @@ fi
   # Extract the first word of "${ncn_tool_prefix}dlltool", so it can be a program name with args.
 set dummy ${ncn_tool_prefix}dlltool; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:3759: checking for $ac_word" >&5
+echo "configure:3762: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_DLLTOOL'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -3788,7 +3791,7 @@ if test -z "$ac_cv_prog_DLLTOOL" ; then
     # Extract the first word of "dlltool", so it can be a program name with args.
 set dummy dlltool; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:3792: checking for $ac_word" >&5
+echo "configure:3795: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_ncn_cv_DLLTOOL'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -3827,7 +3830,7 @@ fi
   # Extract the first word of "${ncn_tool_prefix}ld", so it can be a program name with args.
 set dummy ${ncn_tool_prefix}ld; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:3831: checking for $ac_word" >&5
+echo "configure:3834: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_LD'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -3860,7 +3863,7 @@ if test -z "$ac_cv_prog_LD" ; then
     # Extract the first word of "ld", so it can be a program name with args.
 set dummy ld; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:3864: checking for $ac_word" >&5
+echo "configure:3867: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_ncn_cv_LD'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -3899,7 +3902,7 @@ fi
   # Extract the first word of "${ncn_tool_prefix}nm", so it can be a program name with args.
 set dummy ${ncn_tool_prefix}nm; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:3903: checking for $ac_word" >&5
+echo "configure:3906: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_NM'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -3932,7 +3935,7 @@ if test -z "$ac_cv_prog_NM" ; then
     # Extract the first word of "nm", so it can be a program name with args.
 set dummy nm; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:3936: checking for $ac_word" >&5
+echo "configure:3939: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_ncn_cv_NM'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -3971,7 +3974,7 @@ fi
   # Extract the first word of "${ncn_tool_prefix}ranlib", so it can be a program name with args.
 set dummy ${ncn_tool_prefix}ranlib; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:3975: checking for $ac_word" >&5
+echo "configure:3978: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_RANLIB'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -4004,7 +4007,7 @@ if test -z "$ac_cv_prog_RANLIB" ; then
     # Extract the first word of "ranlib", so it can be a program name with args.
 set dummy ranlib; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:4008: checking for $ac_word" >&5
+echo "configure:4011: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_ncn_cv_RANLIB'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -4043,7 +4046,7 @@ fi
   # Extract the first word of "${ncn_tool_prefix}windres", so it can be a program name with args.
 set dummy ${ncn_tool_prefix}windres; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:4047: checking for $ac_word" >&5
+echo "configure:4050: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_WINDRES'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -4076,7 +4079,7 @@ if test -z "$ac_cv_prog_WINDRES" ; then
     # Extract the first word of "windres", so it can be a program name with args.
 set dummy windres; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:4080: checking for $ac_word" >&5
+echo "configure:4083: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_ncn_cv_WINDRES'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -4115,7 +4118,7 @@ fi
   # Extract the first word of "${ncn_tool_prefix}objcopy", so it can be a program name with args.
 set dummy ${ncn_tool_prefix}objcopy; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:4119: checking for $ac_word" >&5
+echo "configure:4122: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_OBJCOPY'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -4148,7 +4151,7 @@ if test -z "$ac_cv_prog_OBJCOPY" ; then
     # Extract the first word of "objcopy", so it can be a program name with args.
 set dummy objcopy; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:4152: checking for $ac_word" >&5
+echo "configure:4155: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_ncn_cv_OBJCOPY'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -4187,7 +4190,7 @@ fi
   # Extract the first word of "${ncn_tool_prefix}objdump", so it can be a program name with args.
 set dummy ${ncn_tool_prefix}objdump; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:4191: checking for $ac_word" >&5
+echo "configure:4194: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_OBJDUMP'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -4220,7 +4223,7 @@ if test -z "$ac_cv_prog_OBJDUMP" ; then
     # Extract the first word of "objdump", so it can be a program name with args.
 set dummy objdump; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:4224: checking for $ac_word" >&5
+echo "configure:4227: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_ncn_cv_OBJDUMP'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -4266,7 +4269,7 @@ fi
   # Extract the first word of "${ncn_target_tool_prefix}ar", so it can be a program name with args.
 set dummy ${ncn_target_tool_prefix}ar; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:4270: checking for $ac_word" >&5
+echo "configure:4273: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_CONFIGURED_AR_FOR_TARGET'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -4299,7 +4302,7 @@ if test -z "$ac_cv_prog_CONFIGURED_AR_FO
     # Extract the first word of "ar", so it can be a program name with args.
 set dummy ar; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:4303: checking for $ac_word" >&5
+echo "configure:4306: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_ncn_cv_CONFIGURED_AR_FOR_TARGET'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -4338,7 +4341,7 @@ fi
   # Extract the first word of "${ncn_target_tool_prefix}as", so it can be a program name with args.
 set dummy ${ncn_target_tool_prefix}as; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:4342: checking for $ac_word" >&5
+echo "configure:4345: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_CONFIGURED_AS_FOR_TARGET'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -4371,7 +4374,7 @@ if test -z "$ac_cv_prog_CONFIGURED_AS_FO
     # Extract the first word of "as", so it can be a program name with args.
 set dummy as; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:4375: checking for $ac_word" >&5
+echo "configure:4378: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_ncn_cv_CONFIGURED_AS_FOR_TARGET'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -4410,7 +4413,7 @@ fi
   # Extract the first word of "${ncn_target_tool_prefix}dlltool", so it can be a program name with args.
 set dummy ${ncn_target_tool_prefix}dlltool; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:4414: checking for $ac_word" >&5
+echo "configure:4417: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_CONFIGURED_DLLTOOL_FOR_TARGET'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -4443,7 +4446,7 @@ if test -z "$ac_cv_prog_CONFIGURED_DLLTO
     # Extract the first word of "dlltool", so it can be a program name with args.
 set dummy dlltool; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:4447: checking for $ac_word" >&5
+echo "configure:4450: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_ncn_cv_CONFIGURED_DLLTOOL_FOR_TARGET'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -4482,7 +4485,7 @@ fi
   # Extract the first word of "${ncn_target_tool_prefix}ld", so it can be a program name with args.
 set dummy ${ncn_target_tool_prefix}ld; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:4486: checking for $ac_word" >&5
+echo "configure:4489: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_CONFIGURED_LD_FOR_TARGET'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -4515,7 +4518,7 @@ if test -z "$ac_cv_prog_CONFIGURED_LD_FO
     # Extract the first word of "ld", so it can be a program name with args.
 set dummy ld; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:4519: checking for $ac_word" >&5
+echo "configure:4522: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_ncn_cv_CONFIGURED_LD_FOR_TARGET'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -4554,7 +4557,7 @@ fi
   # Extract the first word of "${ncn_target_tool_prefix}nm", so it can be a program name with args.
 set dummy ${ncn_target_tool_prefix}nm; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:4558: checking for $ac_word" >&5
+echo "configure:4561: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_CONFIGURED_NM_FOR_TARGET'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -4587,7 +4590,7 @@ if test -z "$ac_cv_prog_CONFIGURED_NM_FO
     # Extract the first word of "nm", so it can be a program name with args.
 set dummy nm; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:4591: checking for $ac_word" >&5
+echo "configure:4594: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_ncn_cv_CONFIGURED_NM_FOR_TARGET'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -4626,7 +4629,7 @@ fi
   # Extract the first word of "${ncn_target_tool_prefix}ranlib", so it can be a program name with args.
 set dummy ${ncn_target_tool_prefix}ranlib; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:4630: checking for $ac_word" >&5
+echo "configure:4633: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_CONFIGURED_RANLIB_FOR_TARGET'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -4659,7 +4662,7 @@ if test -z "$ac_cv_prog_CONFIGURED_RANLI
     # Extract the first word of "ranlib", so it can be a program name with args.
 set dummy ranlib; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:4663: checking for $ac_word" >&5
+echo "configure:4666: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_ncn_cv_CONFIGURED_RANLIB_FOR_TARGET'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -4698,7 +4701,7 @@ fi
   # Extract the first word of "${ncn_target_tool_prefix}windres", so it can be a program name with args.
 set dummy ${ncn_target_tool_prefix}windres; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:4702: checking for $ac_word" >&5
+echo "configure:4705: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_CONFIGURED_WINDRES_FOR_TARGET'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -4731,7 +4734,7 @@ if test -z "$ac_cv_prog_CONFIGURED_WINDR
     # Extract the first word of "windres", so it can be a program name with args.
 set dummy windres; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:4735: checking for $ac_word" >&5
+echo "configure:4738: checking for $ac_word" >&5
 if eval "test \"`echo '$''{'ac_cv_prog_ncn_cv_CONFIGURED_WINDRES_FOR_TARGET'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -4816,7 +4819,7 @@ RANLIB_FOR_TARGET=${RANLIB_FOR_TARGET}${
 NM_FOR_TARGET=${NM_FOR_TARGET}${extra_nmflags_for_target}
 
 echo $ac_n "checking whether to enable maintainer-specific portions of Makefiles""... $ac_c" 1>&6
-echo "configure:4820: checking whether to enable maintainer-specific portions of Makefiles" >&5
+echo "configure:4823: checking whether to enable maintainer-specific portions of Makefiles" >&5
 # Check whether --enable-maintainer-mode or --disable-maintainer-mode was given.
 if test "${enable_maintainer_mode+set}" = set; then
   enableval="$enable_maintainer_mode"
@@ -4863,7 +4866,7 @@ esac
 # gcc for stageN-gcc and stagePREV-gcc for stage(N-1).  In case this is not
 # possible, however, we can resort to mv.
 echo $ac_n "checking if symbolic links between directories work""... $ac_c" 1>&6
-echo "configure:4867: checking if symbolic links between directories work" >&5
+echo "configure:4870: checking if symbolic links between directories work" >&5
 if eval "test \"`echo '$''{'gcc_cv_prog_ln_s_dir'+set}'`\" = set"; then
   echo $ac_n "(cached) $ac_c" 1>&6
 else
@@ -4997,15 +5000,34 @@ trap 'rm -f $CONFIG_STATUS conftest*; ex
 # Transform confdefs.h into DEFS.
 # Protect against shell expansion while executing Makefile rules.
 # Protect against Makefile macro expansion.
-cat > conftest.defs <<\EOF
-s%#define \([A-Za-z_][A-Za-z0-9_]*\) *\(.*\)%-D\1=\2%g
-s%[ 	`~#$^&*(){}\\|;'"<>?]%\\&%g
-s%\[%\\&%g
-s%\]%\\&%g
-s%\$%$$%g
-EOF
-DEFS=`sed -f conftest.defs confdefs.h | tr '\012' ' '`
-rm -f conftest.defs
+#
+# If the first sed substitution is executed (which looks for macros that
+# take arguments), then we branch to the quote section.  Otherwise,
+# look for a macro that doesn't take arguments.
+cat >confdef2opt.sed <<\_ACEOF
+t clear
+: clear
+s,^[ 	]*#[ 	]*define[ 	][ 	]*\([^ 	(][^ 	(]*([^)]*)\)[ 	]*\(.*\),-D\1=\2,g
+t quote
+s,^[ 	]*#[ 	]*define[ 	][ 	]*\([^ 	][^ 	]*\)[ 	]*\(.*\),-D\1=\2,g
+t quote
+d
+: quote
+s,[ 	`~#$^&*(){}\\|;'"<>?],\\&,g
+s,\[,\\&,g
+s,\],\\&,g
+s,\$,$$,g
+p
+_ACEOF
+# We use echo to avoid assuming a particular line-breaking character.
+# The extra dot is to prevent the shell from consuming trailing
+# line-breaks from the sub-command output.  A line-break within
+# single-quotes doesn't work because, if this script is created in a
+# platform that uses two characters for line-breaks (e.g., DOS), tr
+# would break.
+ac_LF_and_DOT=`echo; echo .`
+DEFS=`sed -n -f confdef2opt.sed confdefs.h | tr "$ac_LF_and_DOT" ' .'`
+rm -f confdef2opt.sed
 
 
 # Without the "./", some shells look in PATH for config.status.
diff -Nrup --ignore-space-change gcc-4.0.2/configure.in gcc-4.0.2-atmel.0.99.2/configure.in
--- gcc-4.0.2/configure.in	2005-09-13 09:01:28.000000000 +0200
+++ gcc-4.0.2-atmel.0.99.2/configure.in	2006-01-20 14:31:15.000000000 +0100
@@ -493,6 +493,9 @@ case "${target}" in
   arm-*-riscix*)
     noconfigdirs="$noconfigdirs ld target-libgloss ${libgcj}"
     ;;
+  avr32-*-*)
+    noconfigdirs="$noconfigdirs target-libiberty target-libmudflap target-libffi ${libgcj}"
+    ;;
   avr-*-*)
     noconfigdirs="$noconfigdirs target-libiberty target-libstdc++-v3 ${libgcj}"
     ;;
diff -Nrup --ignore-space-change gcc-4.0.2/gcc/ada/s-tpopsp-rtems.adb gcc-4.0.2-atmel.0.99.2/gcc/ada/s-tpopsp-rtems.adb
--- gcc-4.0.2/gcc/ada/s-tpopsp-rtems.adb	2005-01-27 12:56:57.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/gcc/ada/s-tpopsp-rtems.adb	2005-07-15 16:26:03.000000000 +0200
@@ -7,7 +7,7 @@
 --                                                                          --
 --                                  B o d y                                 --
 --                                                                          --
---                             $Revision: 1.2 $
+--                             $Revision: 3484 $
 --                                                                          --
 --            Copyright (C) 1991-2003, Florida State University             --
 --                                                                          --
diff -Nrup --ignore-space-change gcc-4.0.2/gcc/builtins.c gcc-4.0.2-atmel.0.99.2/gcc/builtins.c
--- gcc-4.0.2/gcc/builtins.c	2005-08-28 13:08:55.000000000 +0200
+++ gcc-4.0.2-atmel.0.99.2/gcc/builtins.c	2006-03-23 13:29:00.000000000 +0100
@@ -8387,7 +8387,7 @@ validate_arglist (tree arglist, ...)
 
   do
     {
-      code = va_arg (ap, enum tree_code);
+      code = va_arg (ap, int);
       switch (code)
 	{
 	case 0:
diff -Nrup --ignore-space-change gcc-4.0.2/gcc/calls.c gcc-4.0.2-atmel.0.99.2/gcc/calls.c
--- gcc-4.0.2/gcc/calls.c	2005-07-25 18:36:33.000000000 +0200
+++ gcc-4.0.2-atmel.0.99.2/gcc/calls.c	2006-03-23 13:29:00.000000000 +0100
@@ -3353,7 +3353,7 @@ emit_library_call_value_1 (int retval, r
   for (; count < nargs; count++)
     {
       rtx val = va_arg (p, rtx);
-      enum machine_mode mode = va_arg (p, enum machine_mode);
+      enum machine_mode mode = va_arg (p, int);
 
       /* We cannot convert the arg value to the mode the library wants here;
 	 must do it earlier where we know the signedness of the arg.  */
diff -Nrup --ignore-space-change gcc-4.0.2/gcc/config/avr32/avr32.c gcc-4.0.2-atmel.0.99.2/gcc/config/avr32/avr32.c
--- gcc-4.0.2/gcc/config/avr32/avr32.c	1970-01-01 01:00:00.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/gcc/config/avr32/avr32.c	2006-11-24 17:13:09.000000000 +0100
@@ -0,0 +1,7276 @@
+/*
+   Target hooks and helper functions for AVR32.
+   Copyright 2003-2006 Atmel Corporation.
+
+   Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
+   Initial porting by Anders �dland.
+
+   This file is part of GCC.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 2 of the License, or
+   (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "rtl.h"
+#include "tree.h"
+#include "obstack.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "real.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "output.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "reload.h"
+#include "function.h"
+#include "expr.h"
+#include "optabs.h"
+#include "toplev.h"
+#include "recog.h"
+#include "ggc.h"
+#include "except.h"
+#include "c-pragma.h"
+#include "integrate.h"
+#include "tm_p.h"
+#include "langhooks.h"
+
+#include "target.h"
+#include "target-def.h"
+
+#include <ctype.h>
+
+/* Forward definitions of types.  */
+typedef struct minipool_node Mnode;
+typedef struct minipool_fixup Mfix;
+
+/* Obstack for minipool constant handling.  */
+static struct obstack minipool_obstack;
+static char *minipool_startobj;
+static rtx minipool_vector_label;
+
+/* True if we are currently building a constant table.  */
+int making_const_table;
+
+/* Some forward function declarations */
+static unsigned long avr32_isr_value (tree);
+static unsigned long avr32_compute_func_type (void);
+static tree avr32_handle_isr_attribute (tree *, tree, tree, int, bool *);
+static tree avr32_handle_acall_attribute (tree *, tree, tree, int, bool *);
+static tree avr32_handle_fndecl_attribute (tree * node, tree name, tree args,
+					   int flags, bool * no_add_attrs);
+static void avr32_reorg (void);
+bool avr32_return_in_msb (tree type);
+bool avr32_vector_mode_supported (enum machine_mode mode);
+static void avr32_init_libfuncs (void);
+void avr32_load_pic_register (void);
+
+
+static void
+avr32_add_gc_roots (void)
+{
+  gcc_obstack_init (&minipool_obstack);
+  minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
+}
+
+
+/* List of all known AVR32 parts  */
+static const struct part_type_s avr32_part_types[] = {
+  /* name, part_type, architecture type, macro */
+  {"none", PART_TYPE_AVR32_NONE, ARCH_TYPE_AVR32_AP, "__AVR32__"},
+  {"ap7000", PART_TYPE_AVR32_AP7000, ARCH_TYPE_AVR32_AP, "__AVR32_AP7000__"},
+  {"ap7010", PART_TYPE_AVR32_AP7010, ARCH_TYPE_AVR32_AP, "__AVR32_AP7010__"},
+  {"ap7020", PART_TYPE_AVR32_AP7020, ARCH_TYPE_AVR32_AP, "__AVR32_AP7020__"},
+  {"uc3a0256", PART_TYPE_AVR32_UC3A0256, ARCH_TYPE_AVR32_UC, "__AVR32_UC3A0256__"},
+  {"uc3a0512", PART_TYPE_AVR32_UC3A0512, ARCH_TYPE_AVR32_UC, "__AVR32_UC3A0512__"},
+  {"uc3a1128", PART_TYPE_AVR32_UC3A1128, ARCH_TYPE_AVR32_UC, "__AVR32_UC3A1128__"},
+  {"uc3a1256", PART_TYPE_AVR32_UC3A1256, ARCH_TYPE_AVR32_UC, "__AVR32_UC3A1256__"},
+  {"uc3a1512", PART_TYPE_AVR32_UC3A1512, ARCH_TYPE_AVR32_UC, "__AVR32_UC3A1512__"},
+  {NULL, 0, 0, NULL}
+};
+
+/* List of all known AVR32 architectures  */
+static const struct arch_type_s avr32_arch_types[] = {
+  /* name, architecture type, microarchitecture type, feature flags, macro */
+  {"ap", ARCH_TYPE_AVR32_AP, UARCH_TYPE_AVR32B, FLAG_AVR32_HAS_DSP |
+   FLAG_AVR32_HAS_SIMD | FLAG_AVR32_HAS_UNALIGNED_WORD |
+   FLAG_AVR32_HAS_BRANCH_PRED, "__AVR32_AP__"},
+  {"uc", ARCH_TYPE_AVR32_UC, UARCH_TYPE_AVR32A,
+   FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW, "__AVR32_UC__"},
+  {NULL, 0, 0, 0, NULL}
+};
+
+/* Default arch name */
+const char *avr32_arch_name = "ap";
+const char *avr32_part_name = "none";
+
+const struct part_type_s *avr32_part;
+const struct arch_type_s *avr32_arch;
+
+
+/* Override command line options */
+void
+avr32_override_options (void)
+{
+  const struct part_type_s *part;
+  const struct arch_type_s *arch;
+
+  /* Check if part type is set. */
+  for (part = avr32_part_types; part->name; part++)
+    if (strcmp (part->name, avr32_part_name) == 0)
+      break;
+
+  avr32_part = part;
+
+  if (!part->name)
+    {
+      fprintf (stderr, "Unknown part `%s' specified\nKnown part names:\n",
+	       avr32_part_name);
+      for (part = avr32_part_types; part->name; part++)
+	fprintf (stderr, "\t%s\n", part->name);
+      avr32_part = &avr32_part_types[PART_TYPE_AVR32_NONE];
+    }
+
+  avr32_arch = &avr32_arch_types[avr32_part->arch_type];
+
+  /* If part was set to "none" then check if arch was set. */
+  if (strcmp (avr32_part->name, "none") == 0)
+    {
+      /* Check if arch type is set. */
+      for (arch = avr32_arch_types; arch->name; arch++)
+	if (strcmp (arch->name, avr32_arch_name) == 0)
+	  break;
+
+      avr32_arch = arch;
+
+      if (!arch->name)
+	{
+	  fprintf (stderr, "Unknown arch `%s' specified\nKnown arch names:\n",
+		   avr32_arch_name);
+	  for (arch = avr32_arch_types; arch->name; arch++)
+	    fprintf (stderr, "\t%s\n", arch->name);
+	  avr32_arch = &avr32_arch_types[ARCH_TYPE_AVR32_AP];
+	}
+    }
+
+  /* If optimization level is two or greater, then align start of loops to a
+     word boundary since this will allow folding the first insn of the loop.
+     Do this only for targets supporting branch prediction. */
+  if (optimize >= 2 && TARGET_BRANCH_PRED)
+    align_loops = 2;
+
+  if (AVR32_ALWAYS_PIC)
+    flag_pic = 1;
+
+  if (target_flags & AVR32_FLAG_NO_PIC)
+    flag_pic = 0;
+
+  avr32_add_gc_roots ();
+}
+
+
+/*
+If defined, a function that outputs the assembler code for entry to a
+function.  The prologue is responsible for setting up the stack frame,
+initializing the frame pointer register, saving registers that must be
+saved, and allocating size additional bytes of storage for the
+local variables.  size is an integer.  file is a stdio
+stream to which the assembler code should be output.
+
+The label for the beginning of the function need not be output by this
+macro.  That has already been done when the macro is run.
+
+To determine which registers to save, the macro can refer to the array
+regs_ever_live: element r is nonzero if hard register
+r is used anywhere within the function.  This implies the function
+prologue should save register r, provided it is not one of the
+call-used registers.  (TARGET_ASM_FUNCTION_EPILOGUE must likewise use
+regs_ever_live.)
+
+On machines that have ``register windows'', the function entry code does
+not save on the stack the registers that are in the windows, even if
+they are supposed to be preserved by function calls; instead it takes
+appropriate steps to ``push'' the register stack, if any non-call-used
+registers are used in the function.
+
+On machines where functions may or may not have frame-pointers, the
+function entry code must vary accordingly; it must set up the frame
+pointer if one is wanted, and not otherwise.  To determine whether a
+frame pointer is in wanted, the macro can refer to the variable
+frame_pointer_needed.  The variable's value will be 1 at run
+time in a function that needs a frame pointer.  (see Elimination).
+
+The function entry code is responsible for allocating any stack space
+required for the function.  This stack space consists of the regions
+listed below.  In most cases, these regions are allocated in the
+order listed, with the last listed region closest to the top of the
+stack (the lowest address if STACK_GROWS_DOWNWARD is defined, and
+the highest address if it is not defined).  You can use a different order
+for a machine if doing so is more convenient or required for
+compatibility reasons.  Except in cases where required by standard
+or by a debugger, there is no reason why the stack layout used by GCC
+need agree with that used by other compilers for a machine.
+*/
+
+#undef TARGET_ASM_FUNCTION_PROLOGUE
+#define TARGET_ASM_FUNCTION_PROLOGUE avr32_target_asm_function_prologue
+
+
+#undef TARGET_DEFAULT_SHORT_ENUMS
+#define TARGET_DEFAULT_SHORT_ENUMS hook_bool_void_false
+
+#undef TARGET_PROMOTE_FUNCTION_ARGS
+#define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
+
+#undef TARGET_PROMOTE_FUNCTION_RETURN
+#define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
+
+#undef TARGET_PROMOTE_PROTOTYPES
+#define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
+
+#undef TARGET_MUST_PASS_IN_STACK
+#define TARGET_MUST_PASS_IN_STACK avr32_must_pass_in_stack
+
+#undef TARGET_PASS_BY_REFERENCE
+#define TARGET_PASS_BY_REFERENCE avr32_pass_by_reference
+
+#undef TARGET_STRICT_ARGUMENT_NAMING
+#define TARGET_STRICT_ARGUMENT_NAMING avr32_strict_argument_naming
+
+#undef TARGET_VECTOR_MODE_SUPPORTED_P
+#define TARGET_VECTOR_MODE_SUPPORTED_P avr32_vector_mode_supported
+
+#undef TARGET_RETURN_IN_MEMORY
+#define TARGET_RETURN_IN_MEMORY avr32_return_in_memory
+
+#undef TARGET_RETURN_IN_MSB
+#define TARGET_RETURN_IN_MSB avr32_return_in_msb
+
+#undef TARGET_ARG_PARTIAL_BYTES
+#define TARGET_ARG_PARTIAL_BYTES avr32_arg_partial_bytes
+
+#undef TARGET_STRIP_NAME_ENCODING
+#define TARGET_STRIP_NAME_ENCODING avr32_strip_name_encoding
+
+#define streq(string1, string2) (strcmp (string1, string2) == 0)
+
+#undef  TARGET_ATTRIBUTE_TABLE
+#define TARGET_ATTRIBUTE_TABLE avr32_attribute_table
+
+#undef  TARGET_COMP_TYPE_ATTRIBUTES
+#define TARGET_COMP_TYPE_ATTRIBUTES avr32_comp_type_attributes
+
+
+#undef  TARGET_RTX_COSTS
+#define TARGET_RTX_COSTS avr32_rtx_costs
+
+#undef  TARGET_CANNOT_FORCE_CONST_MEM
+#define  TARGET_CANNOT_FORCE_CONST_MEM avr32_cannot_force_const_mem
+
+#undef  TARGET_ASM_INTEGER
+#define TARGET_ASM_INTEGER avr32_assemble_integer
+
+/*
+ * Switches to the appropriate section for output of constant pool
+ * entry x in mode. You can assume that x is some kind of constant in
+ * RTL. The argument mode is redundant except in the case of a
+ * const_int rtx. Select the section by calling readonly_data_ section
+ * or one of the alternatives for other sections. align is the
+ * constant alignment in bits.
+ *
+ * The default version of this function takes care of putting symbolic
+ * constants in flag_ pic mode in data_section and everything else in
+ * readonly_data_section.
+ */
+#undef TARGET_ASM_SELECT_RTX_SECTION
+#define TARGET_ASM_SELECT_RTX_SECTION avr32_select_rtx_section
+
+
+/*
+ * If non-null, this hook performs a target-specific pass over the
+ * instruction stream. The compiler will run it at all optimization
+ * levels, just before the point at which it normally does
+ * delayed-branch scheduling.
+ *
+ * The exact purpose of the hook varies from target to target. Some
+ * use it to do transformations that are necessary for correctness,
+ * such as laying out in-function constant pools or avoiding hardware
+ * hazards. Others use it as an opportunity to do some
+ * machine-dependent optimizations.
+ *
+ * You need not implement the hook if it has nothing to do. The
+ * default definition is null.
+ */
+#undef TARGET_MACHINE_DEPENDENT_REORG
+#define TARGET_MACHINE_DEPENDENT_REORG avr32_reorg
+
+/* Target hook for assembling integer objects.
+   Need to handle integer vectors */
+static bool
+avr32_assemble_integer (rtx x, unsigned int size, int aligned_p)
+{
+  if (avr32_vector_mode_supported (GET_MODE (x)))
+    {
+      int i, units;
+
+      if (GET_CODE (x) != CONST_VECTOR)
+	abort ();
+
+      units = CONST_VECTOR_NUNITS (x);
+
+      switch (GET_MODE (x))
+	{
+	case V2HImode:
+	  size = 2;
+	  break;
+	case V4QImode:
+	  size = 1;
+	  break;
+	default:
+	  abort ();
+	}
+
+      for (i = 0; i < units; i++)
+	{
+	  rtx elt;
+
+	  elt = CONST_VECTOR_ELT (x, i);
+	  assemble_integer (elt, size, i == 0 ? 32 : size * BITS_PER_UNIT, 1);
+	}
+
+      return true;
+    }
+
+  return default_assemble_integer (x, size, aligned_p);
+}
+
+/*
+ * This target hook describes the relative costs of RTL expressions.
+ *
+ * The cost may depend on the precise form of the expression, which is
+ * available for examination in x, and the rtx code of the expression
+ * in which it is contained, found in outer_code. code is the
+ * expression code--redundant, since it can be obtained with GET_CODE
+ * (x).
+ *
+ * In implementing this hook, you can use the construct COSTS_N_INSNS
+ * (n) to specify a cost equal to n fast instructions.
+ *
+ * On entry to the hook, *total contains a default estimate for the
+ * cost of the expression. The hook should modify this value as
+ * necessary. Traditionally, the default costs are COSTS_N_INSNS (5)
+ * for multiplications, COSTS_N_INSNS (7) for division and modulus
+ * operations, and COSTS_N_INSNS (1) for all other operations.
+ *
+ * When optimizing for code size, i.e. when optimize_size is non-zero,
+ * this target hook should be used to estimate the relative size cost
+ * of an expression, again relative to COSTS_N_INSNS.
+ *
+ * The hook returns true when all subexpressions of x have been
+ * processed, and false when rtx_cost should recurse.
+ */
+
+/* Worker routine for avr32_rtx_costs.  */
+static inline int
+avr32_rtx_costs_1 (rtx x, enum rtx_code code ATTRIBUTE_UNUSED,
+		   enum rtx_code outer ATTRIBUTE_UNUSED)
+{
+  enum machine_mode mode = GET_MODE (x);
+
+  switch (GET_CODE (x))
+    {
+    case MEM:
+      /* Using pre decrement / post increment memory operations on the
+         avr32_uc architecture means that two writebacks must be performed
+         and hence two cycles are needed. */
+      if (!optimize_size
+	  && GET_MODE_SIZE (mode) <= 2 * UNITS_PER_WORD
+	  && avr32_arch->arch_type == ARCH_TYPE_AVR32_UC
+	  && (GET_CODE (XEXP (x, 0)) == PRE_DEC
+	      || GET_CODE (XEXP (x, 0)) == POST_INC))
+	return COSTS_N_INSNS (4);
+
+      /* Memory costs quite a lot for the first word, but subsequent words
+         load at the equivalent of a single insn each.  */
+      if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
+	return COSTS_N_INSNS (2 + (GET_MODE_SIZE (mode) / UNITS_PER_WORD));
+
+      return COSTS_N_INSNS (3);
+    case SYMBOL_REF:
+    case CONST:
+      /* These are valid for the pseudo insns: lda.w and call which operates
+         on direct addresses. We assume that the cost of a lda.w is the same
+         as the cost of a ld.w insn. */
+      return (outer == SET) ? COSTS_N_INSNS (3) : COSTS_N_INSNS (1);
+    case DIV:
+    case MOD:
+    case UDIV:
+    case UMOD:
+      return optimize_size ? COSTS_N_INSNS (1) : COSTS_N_INSNS (16);
+
+    case ROTATE:
+    case ROTATERT:
+      if (mode == TImode)
+	return COSTS_N_INSNS (100);
+
+      if (mode == DImode)
+	return COSTS_N_INSNS (10);
+      return COSTS_N_INSNS (4);
+    case ASHIFT:
+    case LSHIFTRT:
+    case ASHIFTRT:
+    case NOT:
+      if (mode == TImode)
+	return COSTS_N_INSNS (10);
+
+      if (mode == DImode)
+	return COSTS_N_INSNS (4);
+      return COSTS_N_INSNS (1);
+    case PLUS:
+    case MINUS:
+    case NEG:
+    case COMPARE:
+    case ABS:
+      if (GET_MODE_CLASS (mode) == MODE_FLOAT)
+	return COSTS_N_INSNS (100);
+
+      if (mode == TImode)
+	return COSTS_N_INSNS (50);
+
+      if (mode == DImode)
+	return COSTS_N_INSNS (2);
+      return COSTS_N_INSNS (1);
+
+    case MULT:
+      {
+	if (GET_MODE_CLASS (mode) == MODE_FLOAT)
+	  return COSTS_N_INSNS (300);
+
+	if (mode == TImode)
+	  return COSTS_N_INSNS (16);
+
+	if (mode == DImode)
+	  return COSTS_N_INSNS (4);
+
+	if (mode == HImode)
+	  return COSTS_N_INSNS (2);
+
+	return COSTS_N_INSNS (3);
+      }
+    case IF_THEN_ELSE:
+      if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
+	return COSTS_N_INSNS (4);
+      return COSTS_N_INSNS (1);
+    case SIGN_EXTEND:
+    case ZERO_EXTEND:
+      /* Sign/Zero extensions of registers cost quite much since these
+         instrcutions only take one register operand which means that gcc
+         often must insert some move instrcutions */
+      if (mode == QImode || mode == HImode)
+	return (COSTS_N_INSNS (GET_CODE (XEXP (x, 0)) == MEM ? 0 : 1));
+      return COSTS_N_INSNS (4);
+    case UNSPEC:
+      /* divmod operations */
+      if (XINT (x, 1) == UNSPEC_UDIVMODSI4_INTERNAL
+	  || XINT (x, 1) == UNSPEC_DIVMODSI4_INTERNAL)
+	{
+	  return optimize_size ? COSTS_N_INSNS (1) : COSTS_N_INSNS (16);
+	}
+      /* Fallthrough */
+    default:
+      return COSTS_N_INSNS (1);
+    }
+}
+
+static bool
+avr32_rtx_costs (rtx x, int code, int outer_code, int *total)
+{
+  *total = avr32_rtx_costs_1 (x, code, outer_code);
+  return true;
+}
+
+
+bool
+avr32_cannot_force_const_mem (rtx x ATTRIBUTE_UNUSED)
+{
+  /* Do not want symbols in the constant pool when compiling pic or if using
+     address pseudo instructions. */
+  return ((flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS)
+	  && avr32_find_symbol (x) != NULL_RTX);
+}
+
+
+/* Table of machine attributes.  */
+const struct attribute_spec avr32_attribute_table[] = {
+  /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
+  /* Interrupt Service Routines have special prologue and epilogue
+     requirements.  */
+  {"isr", 0, 1, false, false, false, avr32_handle_isr_attribute},
+  {"interrupt", 0, 1, false, false, false, avr32_handle_isr_attribute},
+  {"acall", 0, 1, false, true, true, avr32_handle_acall_attribute},
+  {"naked", 0, 0, true, false, false, avr32_handle_fndecl_attribute},
+  {NULL, 0, 0, false, false, false, NULL}
+};
+
+
+typedef struct
+{
+  const char *const arg;
+  const unsigned long return_value;
+}
+isr_attribute_arg;
+
+static const isr_attribute_arg isr_attribute_args[] = {
+  {"FULL", AVR32_FT_ISR_FULL},
+  {"full", AVR32_FT_ISR_FULL},
+  {"HALF", AVR32_FT_ISR_HALF},
+  {"half", AVR32_FT_ISR_HALF},
+  {"NONE", AVR32_FT_ISR_NONE},
+  {"none", AVR32_FT_ISR_NONE},
+  {"UNDEF", AVR32_FT_ISR_NONE},
+  {"undef", AVR32_FT_ISR_NONE},
+  {"SWI", AVR32_FT_ISR_NONE},
+  {"swi", AVR32_FT_ISR_NONE},
+  {NULL, AVR32_FT_ISR_NONE}
+};
+
+/* Returns the (interrupt) function type of the current
+   function, or AVR32_FT_UNKNOWN if the type cannot be determined.  */
+
+static unsigned long
+avr32_isr_value (tree argument)
+{
+  const isr_attribute_arg *ptr;
+  const char *arg;
+
+  /* No argument - default to ISR_NONE.  */
+  if (argument == NULL_TREE)
+    return AVR32_FT_ISR_NONE;
+
+  /* Get the value of the argument.  */
+  if (TREE_VALUE (argument) == NULL_TREE
+      || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
+    return AVR32_FT_UNKNOWN;
+
+  arg = TREE_STRING_POINTER (TREE_VALUE (argument));
+
+  /* Check it against the list of known arguments.  */
+  for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
+    if (streq (arg, ptr->arg))
+      return ptr->return_value;
+
+  /* An unrecognized interrupt type.  */
+  return AVR32_FT_UNKNOWN;
+}
+
+
+
+/*
+These hooks specify assembly directives for creating certain kinds
+of integer object.  The TARGET_ASM_BYTE_OP directive creates a
+byte-sized object, the TARGET_ASM_ALIGNED_HI_OP one creates an
+aligned two-byte object, and so on.  Any of the hooks may be
+NULL, indicating that no suitable directive is available.
+
+The compiler will print these strings at the start of a new line,
+followed immediately by the object's initial value.  In most cases,
+the string should contain a tab, a pseudo-op, and then another tab.
+*/
+#undef  TARGET_ASM_BYTE_OP
+#define TARGET_ASM_BYTE_OP "\t.byte\t"
+#undef  TARGET_ASM_ALIGNED_HI_OP
+#define TARGET_ASM_ALIGNED_HI_OP "\t.align 1\n\t.short\t"
+#undef  TARGET_ASM_ALIGNED_SI_OP
+#define TARGET_ASM_ALIGNED_SI_OP "\t.align 2\n\t.int\t"
+#undef  TARGET_ASM_ALIGNED_DI_OP
+#define TARGET_ASM_ALIGNED_DI_OP NULL
+#undef  TARGET_ASM_ALIGNED_TI_OP
+#define TARGET_ASM_ALIGNED_TI_OP NULL
+#undef  TARGET_ASM_UNALIGNED_HI_OP
+#define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
+#undef  TARGET_ASM_UNALIGNED_SI_OP
+#define TARGET_ASM_UNALIGNED_SI_OP "\t.int\t"
+#undef  TARGET_ASM_UNALIGNED_DI_OP
+#define TARGET_ASM_UNALIGNED_DI_OP NULL
+#undef  TARGET_ASM_UNALIGNED_TI_OP
+#define TARGET_ASM_UNALIGNED_TI_OP NULL
+
+#undef TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE
+#define TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE avr32_sched_use_dfa_pipeline_interface
+
+#undef TARGET_ASM_OUTPUT_MI_THUNK
+#define TARGET_ASM_OUTPUT_MI_THUNK avr32_output_mi_thunk
+
+
+static void
+avr32_output_mi_thunk (FILE * file,
+		       tree thunk ATTRIBUTE_UNUSED,
+		       HOST_WIDE_INT delta,
+		       HOST_WIDE_INT vcall_offset, tree function)
+{
+  int mi_delta = delta;
+  int this_regno =
+    (avr32_return_in_memory (DECL_RESULT (function), TREE_TYPE (function)) ?
+     INTERNAL_REGNUM (11) : INTERNAL_REGNUM (12));
+
+
+  if (!avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21")
+      || vcall_offset)
+    {
+      fprintf (file, "\tpushm\tr10\n");
+    }
+
+
+  if (mi_delta != 0)
+    {
+      if (avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21"))
+	{
+	  fprintf (file, "\tsub\t%s, -0x%x\n", reg_names[this_regno],
+		   mi_delta);
+	}
+      else
+	{
+	  /* Immediate is larger than k21 we must make us a temp register by
+	     pushing a register to the stack. */
+	  fprintf (file, "\tmov\tr10, lo(%x)\n", mi_delta);
+	  fprintf (file, "\torh\tr10, hi(%x)\n", mi_delta);
+	  fprintf (file, "\tadd\t%s, r10\n", reg_names[this_regno]);
+	}
+    }
+
+
+  if (vcall_offset != 0)
+    {
+      fprintf (file, "\tld.w\tr10, %s[0]\n", reg_names[this_regno]);
+      fprintf (file, "\tld.w\tr10, r10[%i]\n", (int) vcall_offset);
+      fprintf (file, "\tadd\t%s, r10\n", reg_names[this_regno]);
+    }
+
+
+  if (!avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21")
+      || vcall_offset)
+    {
+      fprintf (file, "\tpopm\tr10\n");
+    }
+
+  if (flag_pic)
+    {
+      /* Don't know how we should do this!!! For now we'll just use an
+         extended branch instruction and hope that the function will be
+         reached. */
+      fprintf (file, "\tbral\t");
+      assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
+      fputc ('\n', file);
+    }
+  else
+    {
+      fprintf (file, "\tlddpc\tpc, 0f\n");
+      fprintf (file, "\t.align 2\n");
+      fputs ("0:\t.long\t", file);
+      assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
+      fputc ('\n', file);
+    }
+}
+
+/* Implements target hook vector_mode_supported.  */
+bool
+avr32_vector_mode_supported (enum machine_mode mode)
+{
+  if ((mode == V2HImode) || (mode == V4QImode))
+    return true;
+
+  return false;
+}
+
+
+#undef TARGET_INIT_LIBFUNCS
+#define TARGET_INIT_LIBFUNCS avr32_init_libfuncs
+
+#undef  TARGET_INIT_BUILTINS
+#define TARGET_INIT_BUILTINS avr32_init_builtins
+
+#undef  TARGET_EXPAND_BUILTIN
+#define TARGET_EXPAND_BUILTIN avr32_expand_builtin
+
+tree int_ftype_int, int_ftype_void, short_ftype_short, void_ftype_int_int,
+  void_ftype_ptr_int;
+tree void_ftype_int, void_ftype_void, int_ftype_ptr_int;
+tree short_ftype_short, int_ftype_int_short, int_ftype_short_short,
+  short_ftype_short_short;
+tree int_ftype_int_int, longlong_ftype_int_short, longlong_ftype_short_short;
+tree void_ftype_int_int_int_int_int, void_ftype_int_int_int;
+tree longlong_ftype_int_int, void_ftype_int_int_longlong;
+tree int_ftype_int_int_int, longlong_ftype_longlong_int_short;
+tree longlong_ftype_longlong_short_short, int_ftype_int_short_short;
+
+#define def_builtin(NAME, TYPE, CODE)					\
+  lang_hooks.builtin_function ((NAME), (TYPE), (CODE),			\
+		  	       BUILT_IN_MD, NULL, NULL_TREE)
+
+#define def_mbuiltin(MASK, NAME, TYPE, CODE)				\
+  do									\
+    {									\
+      if ((MASK))							\
+	lang_hooks.builtin_function ((NAME), (TYPE), (CODE),		\
+				     BUILT_IN_MD, NULL, NULL_TREE);	\
+    }									\
+  while (0)
+
+struct builtin_description
+{
+  const unsigned int mask;
+  const enum insn_code icode;
+  const char *const name;
+  const int code;
+  const enum rtx_code comparison;
+  const unsigned int flag;
+  const tree *ftype;
+};
+
+static const struct builtin_description bdesc_2arg[] = {
+#define DSP_BUILTIN(code, builtin, ftype) \
+  { 1, CODE_FOR_##code, "__builtin_" #code , \
+    AVR32_BUILTIN_##builtin, 0, 0, ftype }
+
+  DSP_BUILTIN (mulsathh_h, MULSATHH_H, &short_ftype_short_short),
+  DSP_BUILTIN (mulsathh_w, MULSATHH_W, &int_ftype_short_short),
+  DSP_BUILTIN (mulsatrndhh_h, MULSATRNDHH_H, &short_ftype_short_short),
+  DSP_BUILTIN (mulsatrndwh_w, MULSATRNDWH_W, &int_ftype_int_short),
+  DSP_BUILTIN (mulsatwh_w, MULSATWH_W, &int_ftype_int_short),
+  DSP_BUILTIN (satadd_h, SATADD_H, &short_ftype_short_short),
+  DSP_BUILTIN (satsub_h, SATSUB_H, &short_ftype_short_short),
+  DSP_BUILTIN (satadd_w, SATADD_W, &int_ftype_int_int),
+  DSP_BUILTIN (satsub_w, SATSUB_W, &int_ftype_int_int),
+  DSP_BUILTIN (mulwh_d, MULWH_D, &longlong_ftype_int_short),
+  DSP_BUILTIN (mulnwh_d, MULNWH_D, &longlong_ftype_int_short)
+};
+
+
+void
+avr32_init_builtins (void)
+{
+  unsigned int i;
+  const struct builtin_description *d;
+  tree endlink = void_list_node;
+  tree int_endlink = tree_cons (NULL_TREE, integer_type_node, endlink);
+  tree longlong_endlink =
+    tree_cons (NULL_TREE, long_long_integer_type_node, endlink);
+  tree short_endlink =
+    tree_cons (NULL_TREE, short_integer_type_node, endlink);
+  tree void_endlink = tree_cons (NULL_TREE, void_type_node, endlink);
+
+  /* int func (int) */
+  int_ftype_int = build_function_type (integer_type_node, int_endlink);
+
+  /* short func (short) */
+  short_ftype_short
+    = build_function_type (short_integer_type_node, short_endlink);
+
+  /* short func (short, short) */
+  short_ftype_short_short
+    = build_function_type (short_integer_type_node,
+			   tree_cons (NULL_TREE, short_integer_type_node,
+				      short_endlink));
+
+  /* long long func (long long, short, short) */
+  longlong_ftype_longlong_short_short
+    = build_function_type (long_long_integer_type_node,
+			   tree_cons (NULL_TREE, long_long_integer_type_node,
+				      tree_cons (NULL_TREE,
+						 short_integer_type_node,
+						 short_endlink)));
+
+  /* long long func (short, short) */
+  longlong_ftype_short_short
+    = build_function_type (long_long_integer_type_node,
+			   tree_cons (NULL_TREE, short_integer_type_node,
+				      short_endlink));
+
+  /* int func (int, int) */
+  int_ftype_int_int
+    = build_function_type (integer_type_node,
+			   tree_cons (NULL_TREE, integer_type_node,
+				      int_endlink));
+
+  /* long long func (int, int) */
+  longlong_ftype_int_int
+    = build_function_type (long_long_integer_type_node,
+			   tree_cons (NULL_TREE, integer_type_node,
+				      int_endlink));
+
+  /* long long int func (long long, int, short) */
+  longlong_ftype_longlong_int_short
+    = build_function_type (long_long_integer_type_node,
+			   tree_cons (NULL_TREE, long_long_integer_type_node,
+				      tree_cons (NULL_TREE, integer_type_node,
+						 short_endlink)));
+
+  /* long long int func (int, short) */
+  longlong_ftype_int_short
+    = build_function_type (long_long_integer_type_node,
+			   tree_cons (NULL_TREE, integer_type_node,
+				      short_endlink));
+
+  /* int func (int, short, short) */
+  int_ftype_int_short_short
+    = build_function_type (integer_type_node,
+			   tree_cons (NULL_TREE, integer_type_node,
+				      tree_cons (NULL_TREE,
+						 short_integer_type_node,
+						 short_endlink)));
+
+  /* int func (short, short) */
+  int_ftype_short_short
+    = build_function_type (integer_type_node,
+			   tree_cons (NULL_TREE, short_integer_type_node,
+				      short_endlink));
+
+  /* int func (int, short) */
+  int_ftype_int_short
+    = build_function_type (integer_type_node,
+			   tree_cons (NULL_TREE, integer_type_node,
+				      short_endlink));
+
+  /* void func (int, int) */
+  void_ftype_int_int
+    = build_function_type (void_type_node,
+			   tree_cons (NULL_TREE, integer_type_node,
+				      int_endlink));
+
+  /* void func (int, int, int) */
+  void_ftype_int_int_int
+    = build_function_type (void_type_node,
+			   tree_cons (NULL_TREE, integer_type_node,
+				      tree_cons (NULL_TREE, integer_type_node,
+						 int_endlink)));
+
+  /* void func (int, int, long long) */
+  void_ftype_int_int_longlong
+    = build_function_type (void_type_node,
+			   tree_cons (NULL_TREE, integer_type_node,
+				      tree_cons (NULL_TREE, integer_type_node,
+						 longlong_endlink)));
+
+  /* void func (int, int, int, int, int) */
+  void_ftype_int_int_int_int_int
+    = build_function_type (void_type_node,
+			   tree_cons (NULL_TREE, integer_type_node,
+				      tree_cons (NULL_TREE, integer_type_node,
+						 tree_cons (NULL_TREE,
+							    integer_type_node,
+							    tree_cons
+							    (NULL_TREE,
+							     integer_type_node,
+							     int_endlink)))));
+
+  /* void func (void *, int) */
+  void_ftype_ptr_int
+    = build_function_type (void_type_node,
+			   tree_cons (NULL_TREE, ptr_type_node, int_endlink));
+
+  /* void func (int) */
+  void_ftype_int = build_function_type (void_type_node, int_endlink);
+
+  /* void func (void) */
+  void_ftype_void = build_function_type (void_type_node, void_endlink);
+
+  /* int func (void) */
+  int_ftype_void = build_function_type (integer_type_node, void_endlink);
+
+  /* int func (void *, int) */
+  int_ftype_ptr_int
+    = build_function_type (integer_type_node,
+			   tree_cons (NULL_TREE, ptr_type_node, int_endlink));
+
+  /* int func (int, int, int) */
+  int_ftype_int_int_int
+    = build_function_type (integer_type_node,
+			   tree_cons (NULL_TREE, integer_type_node,
+				      tree_cons (NULL_TREE, integer_type_node,
+						 int_endlink)));
+
+  /* Initialize avr32 builtins.  */
+  def_builtin ("__builtin_mfsr", int_ftype_int, AVR32_BUILTIN_MFSR);
+  def_builtin ("__builtin_mtsr", void_ftype_int_int, AVR32_BUILTIN_MTSR);
+  def_builtin ("__builtin_mfdr", int_ftype_int, AVR32_BUILTIN_MFDR);
+  def_builtin ("__builtin_mtdr", void_ftype_int_int, AVR32_BUILTIN_MTDR);
+  def_builtin ("__builtin_cache", void_ftype_ptr_int, AVR32_BUILTIN_CACHE);
+  def_builtin ("__builtin_sync", void_ftype_int, AVR32_BUILTIN_SYNC);
+  def_builtin ("__builtin_tlbr", void_ftype_void, AVR32_BUILTIN_TLBR);
+  def_builtin ("__builtin_tlbs", void_ftype_void, AVR32_BUILTIN_TLBS);
+  def_builtin ("__builtin_tlbw", void_ftype_void, AVR32_BUILTIN_TLBW);
+  def_builtin ("__builtin_breakpoint", void_ftype_void,
+	       AVR32_BUILTIN_BREAKPOINT);
+  def_builtin ("__builtin_xchg", int_ftype_ptr_int, AVR32_BUILTIN_XCHG);
+  def_builtin ("__builtin_ldxi", int_ftype_ptr_int, AVR32_BUILTIN_LDXI);
+  def_builtin ("__builtin_bswap_16", short_ftype_short,
+	       AVR32_BUILTIN_BSWAP16);
+  def_builtin ("__builtin_bswap_32", int_ftype_int, AVR32_BUILTIN_BSWAP32);
+  def_builtin ("__builtin_cop", void_ftype_int_int_int_int_int,
+	       AVR32_BUILTIN_COP);
+  def_builtin ("__builtin_mvcr_w", int_ftype_int_int, AVR32_BUILTIN_MVCR_W);
+  def_builtin ("__builtin_mvrc_w", void_ftype_int_int_int,
+	       AVR32_BUILTIN_MVRC_W);
+  def_builtin ("__builtin_mvcr_d", longlong_ftype_int_int,
+	       AVR32_BUILTIN_MVCR_D);
+  def_builtin ("__builtin_mvrc_d", void_ftype_int_int_longlong,
+	       AVR32_BUILTIN_MVRC_D);
+  def_builtin ("__builtin_sats", int_ftype_int_int_int, AVR32_BUILTIN_SATS);
+  def_builtin ("__builtin_satu", int_ftype_int_int_int, AVR32_BUILTIN_SATU);
+  def_builtin ("__builtin_satrnds", int_ftype_int_int_int,
+	       AVR32_BUILTIN_SATRNDS);
+  def_builtin ("__builtin_satrndu", int_ftype_int_int_int,
+	       AVR32_BUILTIN_SATRNDU);
+  def_builtin ("__builtin_musfr", void_ftype_int, AVR32_BUILTIN_MUSFR);
+  def_builtin ("__builtin_mustr", int_ftype_void, AVR32_BUILTIN_MUSTR);
+  def_builtin ("__builtin_macsathh_w", int_ftype_int_short_short,
+	       AVR32_BUILTIN_MACSATHH_W);
+  def_builtin ("__builtin_macwh_d", longlong_ftype_longlong_int_short,
+	       AVR32_BUILTIN_MACWH_D);
+  def_builtin ("__builtin_machh_d", longlong_ftype_longlong_short_short,
+	       AVR32_BUILTIN_MACHH_D);
+
+  /* Add all builtins that are more or less simple operations on two
+     operands.  */
+  for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
+    {
+      /* Use one of the operands; the target can have a different mode for
+         mask-generating compares.  */
+
+      if (d->name == 0)
+	continue;
+
+      def_mbuiltin (d->mask, d->name, *(d->ftype), d->code);
+    }
+}
+
+
+/* Subroutine of avr32_expand_builtin to take care of binop insns.  */
+
+static rtx
+avr32_expand_binop_builtin (enum insn_code icode, tree arglist, rtx target)
+{
+  rtx pat;
+  tree arg0 = TREE_VALUE (arglist);
+  tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+  rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+  rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+  enum machine_mode tmode = insn_data[icode].operand[0].mode;
+  enum machine_mode mode0 = insn_data[icode].operand[1].mode;
+  enum machine_mode mode1 = insn_data[icode].operand[2].mode;
+
+  if (!target
+      || GET_MODE (target) != tmode
+      || !(*insn_data[icode].operand[0].predicate) (target, tmode))
+    target = gen_reg_rtx (tmode);
+
+  /* In case the insn wants input operands in modes different from the
+     result, abort.  */
+  if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
+    {
+      /* If op0 is already a reg we must cast it to the correct mode. */
+      if (REG_P (op0))
+	op0 = convert_to_mode (mode0, op0, 1);
+      else
+	op0 = copy_to_mode_reg (mode0, op0);
+    }
+  if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
+    {
+      /* If op1 is already a reg we must cast it to the correct mode. */
+      if (REG_P (op1))
+	op1 = convert_to_mode (mode1, op1, 1);
+      else
+	op1 = copy_to_mode_reg (mode1, op1);
+    }
+  pat = GEN_FCN (icode) (target, op0, op1);
+  if (!pat)
+    return 0;
+  emit_insn (pat);
+  return target;
+}
+
+/* Expand an expression EXP that calls a built-in function,
+   with result going to TARGET if that's convenient
+   (and in mode MODE if that's convenient).
+   SUBTARGET may be used as the target for computing one of EXP's operands.
+   IGNORE is nonzero if the value is to be ignored.  */
+
+rtx
+avr32_expand_builtin (tree exp,
+		      rtx target,
+		      rtx subtarget ATTRIBUTE_UNUSED,
+		      enum machine_mode mode ATTRIBUTE_UNUSED,
+		      int ignore ATTRIBUTE_UNUSED)
+{
+  const struct builtin_description *d;
+  unsigned int i;
+  enum insn_code icode;
+  tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
+  tree arglist = TREE_OPERAND (exp, 1);
+  tree arg0, arg1, arg2;
+  rtx op0, op1, op2, pat;
+  enum machine_mode tmode, mode0, mode1;
+  enum machine_mode arg0_mode;
+  int fcode = DECL_FUNCTION_CODE (fndecl);
+
+  switch (fcode)
+    {
+    default:
+      break;
+
+    case AVR32_BUILTIN_SATS:
+    case AVR32_BUILTIN_SATU:
+    case AVR32_BUILTIN_SATRNDS:
+    case AVR32_BUILTIN_SATRNDU:
+      {
+	const char *fname;
+	switch (fcode)
+	  {
+	  default:
+	  case AVR32_BUILTIN_SATS:
+	    icode = CODE_FOR_sats;
+	    fname = "sats";
+	    break;
+	  case AVR32_BUILTIN_SATU:
+	    icode = CODE_FOR_satu;
+	    fname = "satu";
+	    break;
+	  case AVR32_BUILTIN_SATRNDS:
+	    icode = CODE_FOR_satrnds;
+	    fname = "satrnds";
+	    break;
+	  case AVR32_BUILTIN_SATRNDU:
+	    icode = CODE_FOR_satrndu;
+	    fname = "satrndu";
+	    break;
+	  }
+
+	arg0 = TREE_VALUE (arglist);
+	arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+	arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
+	op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+	op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+	op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
+
+	tmode = insn_data[icode].operand[0].mode;
+
+
+	if (target == 0
+	    || GET_MODE (target) != tmode
+	    || !(*insn_data[icode].operand[0].predicate) (target, tmode))
+	  target = gen_reg_rtx (tmode);
+
+
+	if (!(*insn_data[icode].operand[0].predicate) (op0, GET_MODE (op0)))
+	  {
+	    op0 = copy_to_mode_reg (insn_data[icode].operand[0].mode, op0);
+	  }
+
+	if (!(*insn_data[icode].operand[1].predicate) (op1, SImode))
+	  {
+	    error ("Parameter 2 to __builtin_%s should be a constant number.",
+		   fname);
+	    return NULL_RTX;
+	  }
+
+	if (!(*insn_data[icode].operand[1].predicate) (op2, SImode))
+	  {
+	    error ("Parameter 3 to __builtin_%s should be a constant number.",
+		   fname);
+	    return NULL_RTX;
+	  }
+
+	emit_move_insn (target, op0);
+	pat = GEN_FCN (icode) (target, op1, op2);
+	if (!pat)
+	  return 0;
+	emit_insn (pat);
+
+	return target;
+      }
+    case AVR32_BUILTIN_MUSTR:
+      icode = CODE_FOR_mustr;
+      tmode = insn_data[icode].operand[0].mode;
+
+      if (target == 0
+	  || GET_MODE (target) != tmode
+	  || !(*insn_data[icode].operand[0].predicate) (target, tmode))
+	target = gen_reg_rtx (tmode);
+      pat = GEN_FCN (icode) (target);
+      if (!pat)
+	return 0;
+      emit_insn (pat);
+      return target;
+
+    case AVR32_BUILTIN_MFSR:
+      icode = CODE_FOR_mfsr;
+      arg0 = TREE_VALUE (arglist);
+      op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+      tmode = insn_data[icode].operand[0].mode;
+      mode0 = insn_data[icode].operand[1].mode;
+
+      if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
+	{
+	  error ("Parameter 1 to __builtin_mfsr must be a constant number");
+	}
+
+      if (target == 0
+	  || GET_MODE (target) != tmode
+	  || !(*insn_data[icode].operand[0].predicate) (target, tmode))
+	target = gen_reg_rtx (tmode);
+      pat = GEN_FCN (icode) (target, op0);
+      if (!pat)
+	return 0;
+      emit_insn (pat);
+      return target;
+    case AVR32_BUILTIN_MTSR:
+      icode = CODE_FOR_mtsr;
+      arg0 = TREE_VALUE (arglist);
+      arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+      op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+      op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+      mode0 = insn_data[icode].operand[0].mode;
+      mode1 = insn_data[icode].operand[1].mode;
+
+      if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
+	{
+	  error ("Parameter 1 to __builtin_mtsr must be a constant number");
+	  return gen_reg_rtx (mode0);
+	}
+      if (!(*insn_data[icode].operand[1].predicate) (op1, mode1))
+	op1 = copy_to_mode_reg (mode1, op1);
+      pat = GEN_FCN (icode) (op0, op1);
+      if (!pat)
+	return 0;
+      emit_insn (pat);
+      return NULL_RTX;
+    case AVR32_BUILTIN_MFDR:
+      icode = CODE_FOR_mfdr;
+      arg0 = TREE_VALUE (arglist);
+      op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+      tmode = insn_data[icode].operand[0].mode;
+      mode0 = insn_data[icode].operand[1].mode;
+
+      if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
+	{
+	  error ("Parameter 1 to __builtin_mfdr must be a constant number");
+	}
+
+      if (target == 0
+	  || GET_MODE (target) != tmode
+	  || !(*insn_data[icode].operand[0].predicate) (target, tmode))
+	target = gen_reg_rtx (tmode);
+      pat = GEN_FCN (icode) (target, op0);
+      if (!pat)
+	return 0;
+      emit_insn (pat);
+      return target;
+    case AVR32_BUILTIN_MTDR:
+      icode = CODE_FOR_mtdr;
+      arg0 = TREE_VALUE (arglist);
+      arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+      op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+      op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+      mode0 = insn_data[icode].operand[0].mode;
+      mode1 = insn_data[icode].operand[1].mode;
+
+      if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
+	{
+	  error ("Parameter 1 to __builtin_mtdr must be a constant number");
+	  return gen_reg_rtx (mode0);
+	}
+      if (!(*insn_data[icode].operand[1].predicate) (op1, mode1))
+	op1 = copy_to_mode_reg (mode1, op1);
+      pat = GEN_FCN (icode) (op0, op1);
+      if (!pat)
+	return 0;
+      emit_insn (pat);
+      return NULL_RTX;
+    case AVR32_BUILTIN_CACHE:
+      icode = CODE_FOR_cache;
+      arg0 = TREE_VALUE (arglist);
+      arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+      op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+      op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+      mode0 = insn_data[icode].operand[0].mode;
+      mode1 = insn_data[icode].operand[1].mode;
+
+      if (!(*insn_data[icode].operand[1].predicate) (op1, mode1))
+	{
+	  error ("Parameter 2 to __builtin_cache must be a constant number");
+	  return gen_reg_rtx (mode1);
+	}
+
+      if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
+	op0 = copy_to_mode_reg (mode0, op0);
+
+      pat = GEN_FCN (icode) (op0, op1);
+      if (!pat)
+	return 0;
+      emit_insn (pat);
+      return NULL_RTX;
+    case AVR32_BUILTIN_SYNC:
+    case AVR32_BUILTIN_MUSFR:
+      {
+	const char *fname;
+	switch (fcode)
+	  {
+	  default:
+	  case AVR32_BUILTIN_SYNC:
+	    icode = CODE_FOR_sync;
+	    fname = "sync";
+	    break;
+	  case AVR32_BUILTIN_MUSFR:
+	    icode = CODE_FOR_musfr;
+	    fname = "musfr";
+	    break;
+	  }
+
+	arg0 = TREE_VALUE (arglist);
+	op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+	mode0 = insn_data[icode].operand[0].mode;
+
+	if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
+	  {
+	    if (icode == CODE_FOR_musfr)
+	      op0 = copy_to_mode_reg (mode0, op0);
+	    else
+	      {
+		error ("Parameter to __builtin_%s is illegal.", fname);
+		return gen_reg_rtx (mode0);
+	      }
+	  }
+	pat = GEN_FCN (icode) (op0);
+	if (!pat)
+	  return 0;
+	emit_insn (pat);
+	return NULL_RTX;
+      }
+    case AVR32_BUILTIN_TLBR:
+      icode = CODE_FOR_tlbr;
+      pat = GEN_FCN (icode) (NULL_RTX);
+      if (!pat)
+	return 0;
+      emit_insn (pat);
+      return NULL_RTX;
+    case AVR32_BUILTIN_TLBS:
+      icode = CODE_FOR_tlbs;
+      pat = GEN_FCN (icode) (NULL_RTX);
+      if (!pat)
+	return 0;
+      emit_insn (pat);
+      return NULL_RTX;
+    case AVR32_BUILTIN_TLBW:
+      icode = CODE_FOR_tlbw;
+      pat = GEN_FCN (icode) (NULL_RTX);
+      if (!pat)
+	return 0;
+      emit_insn (pat);
+      return NULL_RTX;
+    case AVR32_BUILTIN_BREAKPOINT:
+      icode = CODE_FOR_breakpoint;
+      pat = GEN_FCN (icode) (NULL_RTX);
+      if (!pat)
+	return 0;
+      emit_insn (pat);
+      return NULL_RTX;
+    case AVR32_BUILTIN_XCHG:
+      icode = CODE_FOR_xchg;
+      arg0 = TREE_VALUE (arglist);
+      arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+      op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+      op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+      tmode = insn_data[icode].operand[0].mode;
+      mode0 = insn_data[icode].operand[1].mode;
+      mode1 = insn_data[icode].operand[3].mode;
+
+      if (!(*insn_data[icode].operand[3].predicate) (op1, mode1))
+	{
+	  op1 = copy_to_mode_reg (mode1, op1);
+	}
+
+      if (!(*insn_data[icode].operand[2].predicate) (op0, mode0))
+	{
+	  op0 = copy_to_mode_reg (mode0, op0);
+	}
+
+      if (target == 0
+	  || GET_MODE (target) != tmode
+	  || !(*insn_data[icode].operand[0].predicate) (target, tmode))
+	target = gen_reg_rtx (tmode);
+      pat = GEN_FCN (icode) (target, op0, op0, op1);
+      if (!pat)
+	return 0;
+      emit_insn (pat);
+      return target;
+    case AVR32_BUILTIN_LDXI:
+      icode = CODE_FOR_ldxi;
+      arg0 = TREE_VALUE (arglist);
+      arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+      arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
+      op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+      op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+      op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
+      tmode = insn_data[icode].operand[0].mode;
+      mode0 = insn_data[icode].operand[1].mode;
+      mode1 = insn_data[icode].operand[2].mode;
+
+      if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
+	{
+	  op0 = copy_to_mode_reg (mode0, op0);
+	}
+
+      if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
+	{
+	  op1 = copy_to_mode_reg (mode1, op1);
+	}
+
+      if (!(*insn_data[icode].operand[3].predicate) (op2, SImode))
+	{
+	  error
+	    ("Parameter 3 to __builtin_ldxi must be a valid extract shift operand: (0|8|16|24)");
+	  return gen_reg_rtx (mode0);
+	}
+
+      if (target == 0
+	  || GET_MODE (target) != tmode
+	  || !(*insn_data[icode].operand[0].predicate) (target, tmode))
+	target = gen_reg_rtx (tmode);
+      pat = GEN_FCN (icode) (target, op0, op1, op2);
+      if (!pat)
+	return 0;
+      emit_insn (pat);
+      return target;
+    case AVR32_BUILTIN_BSWAP16:
+      {
+	icode = CODE_FOR_bswap_16;
+	arg0 = TREE_VALUE (arglist);
+	arg0_mode = TYPE_MODE (TREE_TYPE (arg0));
+	mode0 = insn_data[icode].operand[1].mode;
+	if (arg0_mode != mode0)
+	  arg0 = build1 (NOP_EXPR,
+			 (*lang_hooks.types.type_for_mode) (mode0, 0), arg0);
+
+	op0 = expand_expr (arg0, NULL_RTX, HImode, 0);
+	tmode = insn_data[icode].operand[0].mode;
+
+
+	if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
+	  {
+	    op0 = copy_to_mode_reg (mode0, op0);
+	  }
+
+	if (target == 0
+	    || GET_MODE (target) != tmode
+	    || !(*insn_data[icode].operand[0].predicate) (target, tmode))
+	  {
+	    target = gen_reg_rtx (tmode);
+	  }
+
+
+	pat = GEN_FCN (icode) (target, op0);
+	if (!pat)
+	  return 0;
+	emit_insn (pat);
+
+	return target;
+      }
+    case AVR32_BUILTIN_BSWAP32:
+      {
+	icode = CODE_FOR_bswap_32;
+	arg0 = TREE_VALUE (arglist);
+	op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+	tmode = insn_data[icode].operand[0].mode;
+	mode0 = insn_data[icode].operand[1].mode;
+
+	if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
+	  {
+	    op0 = copy_to_mode_reg (mode0, op0);
+	  }
+
+	if (target == 0
+	    || GET_MODE (target) != tmode
+	    || !(*insn_data[icode].operand[0].predicate) (target, tmode))
+	  target = gen_reg_rtx (tmode);
+
+
+	pat = GEN_FCN (icode) (target, op0);
+	if (!pat)
+	  return 0;
+	emit_insn (pat);
+
+	return target;
+      }
+    case AVR32_BUILTIN_MVCR_W:
+    case AVR32_BUILTIN_MVCR_D:
+      {
+	arg0 = TREE_VALUE (arglist);
+	arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+	op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+	op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+
+	if (fcode == AVR32_BUILTIN_MVCR_W)
+	  icode = CODE_FOR_mvcrsi;
+	else
+	  icode = CODE_FOR_mvcrdi;
+
+	tmode = insn_data[icode].operand[0].mode;
+
+	if (target == 0
+	    || GET_MODE (target) != tmode
+	    || !(*insn_data[icode].operand[0].predicate) (target, tmode))
+	  target = gen_reg_rtx (tmode);
+
+	if (!(*insn_data[icode].operand[1].predicate) (op0, SImode))
+	  {
+	    error
+	      ("Parameter 1 to __builtin_cop is not a valid coprocessor number.");
+	    error ("Number should be between 0 and 7.");
+	    return NULL_RTX;
+	  }
+
+	if (!(*insn_data[icode].operand[2].predicate) (op1, SImode))
+	  {
+	    error
+	      ("Parameter 2 to __builtin_cop is not a valid coprocessor register number.");
+	    error ("Number should be between 0 and 15.");
+	    return NULL_RTX;
+	  }
+
+	pat = GEN_FCN (icode) (target, op0, op1);
+	if (!pat)
+	  return 0;
+	emit_insn (pat);
+
+	return target;
+      }
+    case AVR32_BUILTIN_MACSATHH_W:
+    case AVR32_BUILTIN_MACWH_D:
+    case AVR32_BUILTIN_MACHH_D:
+      {
+	arg0 = TREE_VALUE (arglist);
+	arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+	arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
+	op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+	op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+	op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
+
+	icode = ((fcode == AVR32_BUILTIN_MACSATHH_W) ? CODE_FOR_macsathh_w :
+		 (fcode == AVR32_BUILTIN_MACWH_D) ? CODE_FOR_macwh_d :
+		 CODE_FOR_machh_d);
+
+	tmode = insn_data[icode].operand[0].mode;
+	mode0 = insn_data[icode].operand[1].mode;
+	mode1 = insn_data[icode].operand[2].mode;
+
+
+	if (!target
+	    || GET_MODE (target) != tmode
+	    || !(*insn_data[icode].operand[0].predicate) (target, tmode))
+	  target = gen_reg_rtx (tmode);
+
+	if (!(*insn_data[icode].operand[0].predicate) (op0, tmode))
+	  {
+	    /* If op0 is already a reg we must cast it to the correct mode. */
+	    if (REG_P (op0))
+	      op0 = convert_to_mode (tmode, op0, 1);
+	    else
+	      op0 = copy_to_mode_reg (tmode, op0);
+	  }
+
+	if (!(*insn_data[icode].operand[1].predicate) (op1, mode0))
+	  {
+	    /* If op1 is already a reg we must cast it to the correct mode. */
+	    if (REG_P (op1))
+	      op1 = convert_to_mode (mode0, op1, 1);
+	    else
+	      op1 = copy_to_mode_reg (mode0, op1);
+	  }
+
+	if (!(*insn_data[icode].operand[2].predicate) (op2, mode1))
+	  {
+	    /* If op1 is already a reg we must cast it to the correct mode. */
+	    if (REG_P (op2))
+	      op2 = convert_to_mode (mode1, op2, 1);
+	    else
+	      op2 = copy_to_mode_reg (mode1, op2);
+	  }
+
+	emit_move_insn (target, op0);
+
+	pat = GEN_FCN (icode) (target, op1, op2);
+	if (!pat)
+	  return 0;
+	emit_insn (pat);
+	return target;
+      }
+    case AVR32_BUILTIN_MVRC_W:
+    case AVR32_BUILTIN_MVRC_D:
+      {
+	arg0 = TREE_VALUE (arglist);
+	arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+	arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
+	op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+	op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+	op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
+
+	if (fcode == AVR32_BUILTIN_MVRC_W)
+	  icode = CODE_FOR_mvrcsi;
+	else
+	  icode = CODE_FOR_mvrcdi;
+
+	if (!(*insn_data[icode].operand[0].predicate) (op0, SImode))
+	  {
+	    error ("Parameter 1 is not a valid coprocessor number.");
+	    error ("Number should be between 0 and 7.");
+	    return NULL_RTX;
+	  }
+
+	if (!(*insn_data[icode].operand[1].predicate) (op1, SImode))
+	  {
+	    error ("Parameter 2 is not a valid coprocessor register number.");
+	    error ("Number should be between 0 and 15.");
+	    return NULL_RTX;
+	  }
+
+	if (GET_CODE (op2) == CONST_INT
+	    || GET_CODE (op2) == CONST
+	    || GET_CODE (op2) == SYMBOL_REF || GET_CODE (op2) == LABEL_REF)
+	  {
+	    op2 = force_const_mem (insn_data[icode].operand[2].mode, op2);
+	  }
+
+	if (!(*insn_data[icode].operand[2].predicate) (op2, GET_MODE (op2)))
+	  op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
+
+
+	pat = GEN_FCN (icode) (op0, op1, op2);
+	if (!pat)
+	  return 0;
+	emit_insn (pat);
+
+	return NULL_RTX;
+      }
+    case AVR32_BUILTIN_COP:
+      {
+	rtx op3, op4;
+	tree arg3, arg4;
+	icode = CODE_FOR_cop;
+	arg0 = TREE_VALUE (arglist);
+	arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+	arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
+	arg3 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
+	arg4 =
+	  TREE_VALUE (TREE_CHAIN
+		      (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist)))));
+	op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+	op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+	op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
+	op3 = expand_expr (arg3, NULL_RTX, VOIDmode, 0);
+	op4 = expand_expr (arg4, NULL_RTX, VOIDmode, 0);
+
+	if (!(*insn_data[icode].operand[0].predicate) (op0, SImode))
+	  {
+	    error
+	      ("Parameter 1 to __builtin_cop is not a valid coprocessor number.");
+	    error ("Number should be between 0 and 7.");
+	    return NULL_RTX;
+	  }
+
+	if (!(*insn_data[icode].operand[1].predicate) (op1, SImode))
+	  {
+	    error
+	      ("Parameter 2 to __builtin_cop is not a valid coprocessor register number.");
+	    error ("Number should be between 0 and 15.");
+	    return NULL_RTX;
+	  }
+
+	if (!(*insn_data[icode].operand[2].predicate) (op2, SImode))
+	  {
+	    error
+	      ("Parameter 3 to __builtin_cop is not a valid coprocessor register number.");
+	    error ("Number should be between 0 and 15.");
+	    return NULL_RTX;
+	  }
+
+	if (!(*insn_data[icode].operand[3].predicate) (op3, SImode))
+	  {
+	    error
+	      ("Parameter 4 to __builtin_cop is not a valid coprocessor register number.");
+	    error ("Number should be between 0 and 15.");
+	    return NULL_RTX;
+	  }
+
+	if (!(*insn_data[icode].operand[4].predicate) (op4, SImode))
+	  {
+	    error
+	      ("Parameter 5 to __builtin_cop is not a valid coprocessor operation.");
+	    error ("Number should be between 0 and 127.");
+	    return NULL_RTX;
+	  }
+
+	pat = GEN_FCN (icode) (op0, op1, op2, op3, op4);
+	if (!pat)
+	  return 0;
+	emit_insn (pat);
+
+	return target;
+      }
+    }
+
+  for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
+    if (d->code == fcode)
+      return avr32_expand_binop_builtin (d->icode, arglist, target);
+
+
+  /* @@@ Should really do something sensible here.  */
+  return NULL_RTX;
+}
+
+
+/* Handle an "interrupt" or "isr" attribute;
+   arguments as in struct attribute_spec.handler.  */
+
+static tree
+avr32_handle_isr_attribute (tree * node, tree name, tree args,
+			    int flags, bool * no_add_attrs)
+{
+  if (DECL_P (*node))
+    {
+      if (TREE_CODE (*node) != FUNCTION_DECL)
+	{
+	  warning ("`%s' attribute only applies to functions",
+		   IDENTIFIER_POINTER (name));
+	  *no_add_attrs = true;
+	}
+      /* FIXME: the argument if any is checked for type attributes; should it
+         be checked for decl ones? */
+    }
+  else
+    {
+      if (TREE_CODE (*node) == FUNCTION_TYPE
+	  || TREE_CODE (*node) == METHOD_TYPE)
+	{
+	  if (avr32_isr_value (args) == AVR32_FT_UNKNOWN)
+	    {
+	      warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
+	      *no_add_attrs = true;
+	    }
+	}
+      else if (TREE_CODE (*node) == POINTER_TYPE
+	       && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
+		   || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
+	       && avr32_isr_value (args) != AVR32_FT_UNKNOWN)
+	{
+	  *node = build_variant_type_copy (*node);
+	  TREE_TYPE (*node) = build_type_attribute_variant
+	    (TREE_TYPE (*node),
+	     tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
+	  *no_add_attrs = true;
+	}
+      else
+	{
+	  /* Possibly pass this attribute on from the type to a decl.  */
+	  if (flags & ((int) ATTR_FLAG_DECL_NEXT
+		       | (int) ATTR_FLAG_FUNCTION_NEXT
+		       | (int) ATTR_FLAG_ARRAY_NEXT))
+	    {
+	      *no_add_attrs = true;
+	      return tree_cons (name, args, NULL_TREE);
+	    }
+	  else
+	    {
+	      warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
+	    }
+	}
+    }
+
+  return NULL_TREE;
+}
+
+/* Handle an attribute requiring a FUNCTION_DECL;
+   arguments as in struct attribute_spec.handler.  */
+static tree
+avr32_handle_fndecl_attribute (tree * node, tree name,
+			       tree args ATTRIBUTE_UNUSED,
+			       int flags ATTRIBUTE_UNUSED,
+			       bool * no_add_attrs)
+{
+  if (TREE_CODE (*node) != FUNCTION_DECL)
+    {
+      warning ("%qs attribute only applies to functions",
+	       IDENTIFIER_POINTER (name));
+      *no_add_attrs = true;
+    }
+
+  return NULL_TREE;
+}
+
+
+/* Handle an acall attribute;
+   arguments as in struct attribute_spec.handler.  */
+
+static tree
+avr32_handle_acall_attribute (tree * node, tree name,
+			      tree args ATTRIBUTE_UNUSED,
+			      int flags ATTRIBUTE_UNUSED, bool * no_add_attrs)
+{
+  if (TREE_CODE (*node) == FUNCTION_TYPE || TREE_CODE (*node) == METHOD_TYPE)
+    {
+      warning ("`%s' attribute not yet supported...",
+	       IDENTIFIER_POINTER (name));
+      *no_add_attrs = true;
+      return NULL_TREE;
+    }
+
+  warning ("`%s' attribute only applies to functions",
+	   IDENTIFIER_POINTER (name));
+  *no_add_attrs = true;
+  return NULL_TREE;
+}
+
+
+/* Return 0 if the attributes for two types are incompatible, 1 if they
+   are compatible, and 2 if they are nearly compatible (which causes a
+   warning to be generated).  */
+
+static int
+avr32_comp_type_attributes (tree type1, tree type2)
+{
+  int acall1, acall2, isr1, isr2, naked1, naked2;
+
+  /* Check for mismatch of non-default calling convention.  */
+  if (TREE_CODE (type1) != FUNCTION_TYPE)
+    return 1;
+
+  /* Check for mismatched call attributes.  */
+  acall1 = lookup_attribute ("acall", TYPE_ATTRIBUTES (type1)) != NULL;
+  acall2 = lookup_attribute ("acall", TYPE_ATTRIBUTES (type2)) != NULL;
+  naked1 = lookup_attribute ("naked", TYPE_ATTRIBUTES (type1)) != NULL;
+  naked2 = lookup_attribute ("naked", TYPE_ATTRIBUTES (type2)) != NULL;
+  isr1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
+  if (!isr1)
+    isr1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
+
+  isr2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
+  if (!isr2)
+    isr2 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
+
+  if ((acall1 && isr2)
+      || (acall2 && isr1) || (naked1 && isr2) || (naked2 && isr1))
+    return 0;
+
+  return 1;
+}
+
+
+/* Computes the type of the current function.  */
+
+static unsigned long
+avr32_compute_func_type (void)
+{
+  unsigned long type = AVR32_FT_UNKNOWN;
+  tree a;
+  tree attr;
+
+  if (TREE_CODE (current_function_decl) != FUNCTION_DECL)
+    abort ();
+
+  /* Decide if the current function is volatile.  Such functions never
+     return, and many memory cycles can be saved by not storing register
+     values that will never be needed again.  This optimization was added to
+     speed up context switching in a kernel application.  */
+  if (optimize > 0
+      && TREE_NOTHROW (current_function_decl)
+      && TREE_THIS_VOLATILE (current_function_decl))
+    type |= AVR32_FT_VOLATILE;
+
+  if (cfun->static_chain_decl != NULL)
+    type |= AVR32_FT_NESTED;
+
+  attr = DECL_ATTRIBUTES (current_function_decl);
+
+  a = lookup_attribute ("isr", attr);
+  if (a == NULL_TREE)
+    a = lookup_attribute ("interrupt", attr);
+
+  if (a == NULL_TREE)
+    type |= AVR32_FT_NORMAL;
+  else
+    type |= avr32_isr_value (TREE_VALUE (a));
+
+
+  a = lookup_attribute ("acall", attr);
+  if (a != NULL_TREE)
+    type |= AVR32_FT_ACALL;
+
+  a = lookup_attribute ("naked", attr);
+  if (a != NULL_TREE)
+    type |= AVR32_FT_NAKED;
+
+  return type;
+}
+
+/* Returns the type of the current function.  */
+
+static unsigned long
+avr32_current_func_type (void)
+{
+  if (AVR32_FUNC_TYPE (cfun->machine->func_type) == AVR32_FT_UNKNOWN)
+    cfun->machine->func_type = avr32_compute_func_type ();
+
+  return cfun->machine->func_type;
+}
+
+/*
+   This target hook should return true if we should not pass type solely
+   in registers. The file expr.h defines a definition that is usually appropriate,
+   refer to expr.h for additional documentation.
+*/
+bool
+avr32_must_pass_in_stack (enum machine_mode mode ATTRIBUTE_UNUSED, tree type)
+{
+  if (type && AGGREGATE_TYPE_P (type)
+      /* If the alignment is less than the size then pass in the struct on
+         the stack. */
+      && ((unsigned int) TYPE_ALIGN_UNIT (type) <
+	  (unsigned int) int_size_in_bytes (type))
+      /* If we support unaligned word accesses then structs of size 4 and 8
+         can have any alignment and still be passed in registers. */
+      && !(TARGET_UNALIGNED_WORD
+	   && (int_size_in_bytes (type) == 4
+	       || int_size_in_bytes (type) == 8))
+      /* Double word structs need only a word alignment. */
+      && !(int_size_in_bytes (type) == 8 && TYPE_ALIGN_UNIT (type) >= 4))
+    return true;
+
+  if (type && AGGREGATE_TYPE_P (type)
+      /* Structs of size 3,5,6,7 are always passed in registers. */
+      && (int_size_in_bytes (type) == 3
+	  || int_size_in_bytes (type) == 5
+	  || int_size_in_bytes (type) == 6 || int_size_in_bytes (type) == 7))
+    return true;
+
+
+  return (type && TREE_ADDRESSABLE (type));
+}
+
+
+bool
+avr32_strict_argument_naming (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED)
+{
+  return true;
+}
+
+/*
+   This target hook should return true if an argument at the position indicated
+   by cum should be passed by reference. This predicate is queried after target
+   independent reasons for being passed by reference, such as TREE_ADDRESSABLE (type).
+
+   If the hook returns true, a copy of that argument is made in memory and a
+   pointer to the argument is passed instead of the argument itself. The pointer
+   is passed in whatever way is appropriate for passing a pointer to that type.
+*/
+bool
+avr32_pass_by_reference (CUMULATIVE_ARGS * cum ATTRIBUTE_UNUSED,
+			 enum machine_mode mode ATTRIBUTE_UNUSED,
+			 tree type, bool named ATTRIBUTE_UNUSED)
+{
+  return (type && (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST));
+}
+
+static int
+avr32_arg_partial_bytes (CUMULATIVE_ARGS * pcum ATTRIBUTE_UNUSED,
+			 enum machine_mode mode ATTRIBUTE_UNUSED,
+			 tree type ATTRIBUTE_UNUSED,
+			 bool named ATTRIBUTE_UNUSED)
+{
+  return 0;
+}
+
+
+struct gcc_target targetm = TARGET_INITIALIZER;
+
+/*
+  Table used to convert from register number in the assembler instructions and
+  the register numbers used in gcc.
+*/
+const int avr32_function_arg_reglist[] =
+{
+  INTERNAL_REGNUM (12),
+  INTERNAL_REGNUM (11),
+  INTERNAL_REGNUM (10),
+  INTERNAL_REGNUM (9),
+  INTERNAL_REGNUM (8)
+};
+
+rtx avr32_compare_op0 = NULL_RTX;
+rtx avr32_compare_op1 = NULL_RTX;
+rtx avr32_compare_operator = NULL_RTX;
+rtx avr32_acc_cache = NULL_RTX;
+
+/*
+  Returns nonzero if it is allowed to store a value of mode mode in hard
+  register number regno.
+*/
+int
+avr32_hard_regno_mode_ok (int regnr, enum machine_mode mode)
+{
+  /* We allow only float modes in the fp-registers */
+  if (regnr >= FIRST_FP_REGNUM
+      && regnr <= LAST_FP_REGNUM && GET_MODE_CLASS (mode) != MODE_FLOAT)
+    {
+      return 0;
+    }
+
+  switch (mode)
+    {
+    case DImode:		/* long long */
+    case DFmode:		/* double */
+    case SCmode:		/* __complex__ float */
+    case CSImode:		/* __complex__ int */
+      if (regnr < 4)
+	{			/* long long int not supported in r12, sp, lr
+				   or pc. */
+	  return 0;
+	}
+      else
+	{
+	  if (regnr % 2)	/* long long int has to be refered in even
+				   registers. */
+	    return 0;
+	  else
+	    return 1;
+	}
+    case CDImode:		/* __complex__ long long */
+    case DCmode:		/* __complex__ double */
+    case TImode:		/* 16 bytes */
+      if (regnr < 7)
+	return 0;
+      else if (regnr % 2)
+	return 0;
+      else
+	return 1;
+    default:
+      return 1;
+    }
+}
+
+
+int
+avr32_rnd_operands (rtx add, rtx shift)
+{
+  if (GET_CODE (shift) == CONST_INT &&
+      GET_CODE (add) == CONST_INT && INTVAL (shift) > 0)
+    {
+      if ((1 << (INTVAL (shift) - 1)) == INTVAL (add))
+	return TRUE;
+    }
+
+  return FALSE;
+}
+
+
+
+int
+avr32_const_ok_for_constraint_p (HOST_WIDE_INT value, char c, const char *str)
+{
+  switch (c)
+    {
+    case 'K':
+    case 'I':
+      {
+	HOST_WIDE_INT min_value = 0, max_value = 0;
+	char size_str[3];
+	int const_size;
+
+	size_str[0] = str[2];
+	size_str[1] = str[3];
+	size_str[2] = '\0';
+	const_size = atoi (size_str);
+
+	if (toupper (str[1]) == 'U')
+	  {
+	    min_value = 0;
+	    max_value = (1 << const_size) - 1;
+	  }
+	else if (toupper (str[1]) == 'S')
+	  {
+	    min_value = -(1 << (const_size - 1));
+	    max_value = (1 << (const_size - 1)) - 1;
+	  }
+
+	if (c == 'I')
+	  {
+	    value = -value;
+	  }
+
+	if (value >= min_value && value <= max_value)
+	  {
+	    return 1;
+	  }
+	break;
+      }
+    case 'M':
+      return avr32_mask_upper_bits_operand (GEN_INT (value), VOIDmode);
+    }
+
+  return 0;
+}
+
+
+/*Compute mask of which floating-point registers needs saving upon
+  entry to this function*/
+static unsigned long
+avr32_compute_save_fp_reg_mask (void)
+{
+  unsigned long func_type = avr32_current_func_type ();
+  unsigned int save_reg_mask = 0;
+  unsigned int reg;
+  unsigned int max_reg = 7;
+  int save_all_call_used_regs = FALSE;
+
+  /* This only applies for hardware floating-point implementation. */
+  if (!TARGET_HARD_FLOAT)
+    return 0;
+
+  if (IS_INTERRUPT (func_type))
+    {
+
+      /* Interrupt functions must not corrupt any registers, even call
+         clobbered ones.  If this is a leaf function we can just examine the
+         registers used by the RTL, but otherwise we have to assume that
+         whatever function is called might clobber anything, and so we have
+         to save all the call-clobbered registers as well.  */
+      max_reg = 13;
+      save_all_call_used_regs = !current_function_is_leaf;
+    }
+
+  /* All used registers used must be saved */
+  for (reg = 0; reg <= max_reg; reg++)
+    if (regs_ever_live[INTERNAL_FP_REGNUM (reg)]
+	|| (save_all_call_used_regs
+	    && call_used_regs[INTERNAL_FP_REGNUM (reg)]))
+      save_reg_mask |= (1 << reg);
+
+  return save_reg_mask;
+}
+
+/*Compute mask of registers which needs saving upon function entry */
+static unsigned long
+avr32_compute_save_reg_mask (int push)
+{
+  unsigned long func_type;
+  unsigned int save_reg_mask = 0;
+  unsigned int reg;
+
+  func_type = avr32_current_func_type ();
+
+  if (IS_INTERRUPT (func_type))
+    {
+      unsigned int max_reg = 12;
+
+
+      /* Get the banking scheme for the interrupt */
+      switch (func_type)
+	{
+	case AVR32_FT_ISR_FULL:
+	  max_reg = 0;
+	  break;
+	case AVR32_FT_ISR_HALF:
+	  max_reg = 7;
+	  break;
+	case AVR32_FT_ISR_NONE:
+	  max_reg = 12;
+	  break;
+	}
+
+      /* Interrupt functions must not corrupt any registers, even call
+         clobbered ones.  If this is a leaf function we can just examine the
+         registers used by the RTL, but otherwise we have to assume that
+         whatever function is called might clobber anything, and so we have
+         to save all the call-clobbered registers as well.  */
+
+      /* Need not push the registers r8-r12 for AVR32A architectures, as this
+         is automatially done in hardware. We also do not have any shadow
+         registers. */
+      if (avr32_arch->uarch_type == UARCH_TYPE_AVR32A)
+	{
+	  max_reg = 7;
+	  func_type = AVR32_FT_ISR_NONE;
+	}
+
+      /* All registers which are used and is not shadowed must be saved */
+      for (reg = 0; reg <= max_reg; reg++)
+	if (regs_ever_live[INTERNAL_REGNUM (reg)]
+	    || (!current_function_is_leaf
+		&& call_used_regs[INTERNAL_REGNUM (reg)]))
+	  save_reg_mask |= (1 << reg);
+
+      /* Check LR */
+      if ((regs_ever_live[LR_REGNUM] || !current_function_is_leaf || frame_pointer_needed) && (func_type == AVR32_FT_ISR_NONE)	/* Only
+																   non-shadowed
+																   register
+																   models
+																 */ )
+	save_reg_mask |= (1 << ASM_REGNUM (LR_REGNUM));
+
+      /* Make sure that the GOT register is pushed. */
+      if (max_reg >= ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM)
+	  && current_function_uses_pic_offset_table)
+	save_reg_mask |= (1 << ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM));
+
+    }
+  else
+    {
+      int use_pushm = optimize_size;
+
+      /* In the normal case we only need to save those registers which are
+         call saved and which are used by this function.  */
+      for (reg = 0; reg <= 7; reg++)
+	if (regs_ever_live[INTERNAL_REGNUM (reg)]
+	    && !call_used_regs[INTERNAL_REGNUM (reg)])
+	  save_reg_mask |= (1 << reg);
+
+      /* Make sure that the GOT register is pushed. */
+      if (current_function_uses_pic_offset_table)
+	save_reg_mask |= (1 << ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM));
+
+
+      /* If we optimize for size and do not have anonymous arguments: use
+         popm/pushm always */
+      if (use_pushm)
+	{
+	  if ((save_reg_mask & (1 << 0))
+	      || (save_reg_mask & (1 << 1))
+	      || (save_reg_mask & (1 << 2)) || (save_reg_mask & (1 << 3)))
+	    save_reg_mask |= 0xf;
+
+	  if ((save_reg_mask & (1 << 4))
+	      || (save_reg_mask & (1 << 5))
+	      || (save_reg_mask & (1 << 6)) || (save_reg_mask & (1 << 7)))
+	    save_reg_mask |= 0xf0;
+
+	  if ((save_reg_mask & (1 << 8)) || (save_reg_mask & (1 << 9)))
+	    save_reg_mask |= 0x300;
+	}
+
+
+      /* Check LR */
+      if ((regs_ever_live[LR_REGNUM] || !current_function_is_leaf ||
+	   (optimize_size && save_reg_mask) || frame_pointer_needed))
+	{
+	  if (push)
+	    {
+	      /* Push/Pop LR */
+	      save_reg_mask |= (1 << ASM_REGNUM (LR_REGNUM));
+	    }
+	  else
+	    {
+	      /* Pop PC */
+	      save_reg_mask |= (1 << ASM_REGNUM (PC_REGNUM));
+	    }
+	}
+    }
+
+  return save_reg_mask;
+}
+
+/*Compute total size in bytes of all saved registers  */
+static int
+avr32_get_reg_mask_size (int reg_mask)
+{
+  int reg, size;
+  size = 0;
+
+  for (reg = 0; reg <= 15; reg++)
+    if (reg_mask & (1 << reg))
+      size += 4;
+
+  return size;
+}
+
+/*Get a register from one of the registers which are saved onto the stack
+  upon function entry */
+
+static int
+avr32_get_saved_reg (int save_reg_mask)
+{
+  unsigned int reg;
+
+  /* Find the first register which is saved in the saved_reg_mask */
+  for (reg = 0; reg <= 15; reg++)
+    if (save_reg_mask & (1 << reg))
+      return reg;
+
+  return -1;
+}
+
+/* Return 1 if it is possible to return using a single instruction.  */
+int
+avr32_use_return_insn (int iscond)
+{
+  unsigned int func_type = avr32_current_func_type ();
+  unsigned long saved_int_regs;
+  unsigned long saved_fp_regs;
+
+  /* Never use a return instruction before reload has run.  */
+  if (!reload_completed)
+    return 0;
+
+  /* Must adjust the stack for vararg functions. */
+  if (current_function_args_info.uses_anonymous_args)
+    return 0;
+
+  /* If there a stack adjstment.  */
+  if (get_frame_size ())
+    return 0;
+
+  saved_int_regs = avr32_compute_save_reg_mask (TRUE);
+  saved_fp_regs = avr32_compute_save_fp_reg_mask ();
+
+  /* Functions which have saved fp-regs on the stack can not be performed in
+     one instruction */
+  if (saved_fp_regs)
+    return 0;
+
+  /* Conditional returns can not be performed in one instruction if we need
+     to restore registers from the stack */
+  if (iscond && saved_int_regs)
+    return 0;
+
+  /* Conditional return can not be used for interrupt handlers. */
+  if (iscond && IS_INTERRUPT (func_type))
+    return 0;
+
+  /* For interrupt handlers which needs to pop registers */
+  if (saved_int_regs && IS_INTERRUPT (func_type))
+    return 0;
+
+
+  /* If there are saved registers but the LR isn't saved, then we need two
+     instructions for the return.  */
+  if (saved_int_regs && !(saved_int_regs & (1 << ASM_REGNUM (LR_REGNUM))))
+    return 0;
+
+
+  return 1;
+}
+
+
+/*Generate some function prologue info in the assembly file*/
+
+void
+avr32_target_asm_function_prologue (FILE * f, HOST_WIDE_INT frame_size)
+{
+  if (IS_NAKED (avr32_current_func_type ()))
+    fprintf (f,
+	     "\t# Function is naked: Prologue and epilogue provided by programmer\n");
+
+  if (IS_INTERRUPT (avr32_current_func_type ()))
+    {
+      switch (avr32_current_func_type ())
+	{
+	case AVR32_FT_ISR_FULL:
+	  fprintf (f,
+		   "\t# Interrupt Function: Fully shadowed register file\n");
+	  break;
+	case AVR32_FT_ISR_HALF:
+	  fprintf (f,
+		   "\t# Interrupt Function: Half shadowed register file\n");
+	  break;
+	default:
+	case AVR32_FT_ISR_NONE:
+	  fprintf (f, "\t# Interrupt Function: No shadowed register file\n");
+	  break;
+	}
+    }
+
+
+  fprintf (f, "\t# args = %i, frame = %li, pretend = %i\n",
+	   current_function_args_size, frame_size,
+	   current_function_pretend_args_size);
+
+  fprintf (f, "\t# frame_needed = %i, leaf_function = %i\n",
+	   frame_pointer_needed, current_function_is_leaf);
+
+  fprintf (f, "\t# uses_anonymous_args = %i\n",
+	   current_function_args_info.uses_anonymous_args);
+}
+
+
+/* Generate and emit an insn that we will recognize as a pushm or stm.
+   Unfortunately, since this insn does not reflect very well the actual
+   semantics of the operation, we need to annotate the insn for the benefit
+   of DWARF2 frame unwind information.  */
+
+int avr32_convert_to_reglist16 (int reglist8_vect);
+
+static rtx
+emit_multi_reg_push (int reglist, int usePUSHM)
+{
+  rtx insn;
+  rtx dwarf;
+  rtx tmp;
+  rtx reg;
+  int i;
+  int nr_regs;
+  int index = 0;
+
+  if (usePUSHM)
+    {
+      insn = emit_insn (gen_pushm (gen_rtx_CONST_INT (SImode, reglist)));
+      reglist = avr32_convert_to_reglist16 (reglist);
+    }
+  else
+    {
+      insn = emit_insn (gen_stm (stack_pointer_rtx,
+				 gen_rtx_CONST_INT (SImode, reglist),
+				 gen_rtx_CONST_INT (SImode, 1)));
+    }
+
+  nr_regs = avr32_get_reg_mask_size (reglist) / 4;
+  dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (nr_regs + 1));
+
+  for (i = 15; i >= 0; i--)
+    {
+      if (reglist & (1 << i))
+	{
+	  reg = gen_rtx_REG (SImode, INTERNAL_REGNUM (i));
+	  tmp = gen_rtx_SET (VOIDmode,
+			     gen_rtx_MEM (SImode,
+					  plus_constant (stack_pointer_rtx,
+							 4 * index)), reg);
+	  RTX_FRAME_RELATED_P (tmp) = 1;
+	  XVECEXP (dwarf, 0, 1 + index++) = tmp;
+	}
+    }
+
+  tmp = gen_rtx_SET (SImode,
+		     stack_pointer_rtx,
+		     gen_rtx_PLUS (SImode,
+				   stack_pointer_rtx,
+				   GEN_INT (-4 * nr_regs)));
+  RTX_FRAME_RELATED_P (tmp) = 1;
+  XVECEXP (dwarf, 0, 0) = tmp;
+  REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
+					REG_NOTES (insn));
+  return insn;
+}
+
+
+static rtx
+emit_multi_fp_reg_push (int reglist)
+{
+  rtx insn;
+  rtx dwarf;
+  rtx tmp;
+  rtx reg;
+  int i;
+  int nr_regs;
+  int index = 0;
+
+  insn = emit_insn (gen_stm_fp (stack_pointer_rtx,
+				gen_rtx_CONST_INT (SImode, reglist),
+				gen_rtx_CONST_INT (SImode, 1)));
+
+  nr_regs = avr32_get_reg_mask_size (reglist) / 4;
+  dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (nr_regs + 1));
+
+  for (i = 15; i >= 0; i--)
+    {
+      if (reglist & (1 << i))
+	{
+	  reg = gen_rtx_REG (SImode, INTERNAL_FP_REGNUM (i));
+	  tmp = gen_rtx_SET (VOIDmode,
+			     gen_rtx_MEM (SImode,
+					  plus_constant (stack_pointer_rtx,
+							 4 * index)), reg);
+	  RTX_FRAME_RELATED_P (tmp) = 1;
+	  XVECEXP (dwarf, 0, 1 + index++) = tmp;
+	}
+    }
+
+  tmp = gen_rtx_SET (SImode,
+		     stack_pointer_rtx,
+		     gen_rtx_PLUS (SImode,
+				   stack_pointer_rtx,
+				   GEN_INT (-4 * nr_regs)));
+  RTX_FRAME_RELATED_P (tmp) = 1;
+  XVECEXP (dwarf, 0, 0) = tmp;
+  REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
+					REG_NOTES (insn));
+  return insn;
+}
+
+rtx
+avr32_gen_load_multiple (rtx * regs, int count, rtx from,
+			 int write_back, int in_struct_p, int scalar_p)
+{
+
+  rtx result;
+  int i = 0, j;
+
+  result =
+    gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count + (write_back ? 1 : 0)));
+
+  if (write_back)
+    {
+      XVECEXP (result, 0, 0)
+	= gen_rtx_SET (GET_MODE (from), from,
+		       plus_constant (from, count * 4));
+      i = 1;
+      count++;
+    }
+
+
+  for (j = 0; i < count; i++, j++)
+    {
+      rtx unspec;
+      rtx mem = gen_rtx_MEM (SImode, plus_constant (from, j * 4));
+      MEM_IN_STRUCT_P (mem) = in_struct_p;
+      MEM_SCALAR_P (mem) = scalar_p;
+      unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, mem), UNSPEC_LDM);
+      XVECEXP (result, 0, i) = gen_rtx_SET (VOIDmode, regs[j], unspec);
+    }
+
+  return result;
+}
+
+
+rtx
+avr32_gen_store_multiple (rtx * regs, int count, rtx to,
+			  int in_struct_p, int scalar_p)
+{
+  rtx result;
+  int i = 0, j;
+
+  result = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
+
+  for (j = 0; i < count; i++, j++)
+    {
+      rtx mem = gen_rtx_MEM (SImode, plus_constant (to, j * 4));
+      MEM_IN_STRUCT_P (mem) = in_struct_p;
+      MEM_SCALAR_P (mem) = scalar_p;
+      XVECEXP (result, 0, i)
+	= gen_rtx_SET (VOIDmode, mem,
+		       gen_rtx_UNSPEC (VOIDmode,
+				       gen_rtvec (1, regs[j]),
+				       UNSPEC_STORE_MULTIPLE));
+    }
+
+  return result;
+}
+
+
+/* Move a block of memory if it is word aligned or we support unaligned
+   word memory accesses. The size must be maximum 64 bytes. */
+
+int
+avr32_gen_movmemsi (rtx * operands)
+{
+  HOST_WIDE_INT bytes_to_go;
+  rtx src, dst;
+  rtx st_src, st_dst;
+  int ptr_offset = 0;
+  int block_size;
+  int dst_in_struct_p, src_in_struct_p;
+  int dst_scalar_p, src_scalar_p;
+  int unaligned;
+
+  if (GET_CODE (operands[2]) != CONST_INT
+      || GET_CODE (operands[3]) != CONST_INT
+      || INTVAL (operands[2]) > 64
+      || ((INTVAL (operands[3]) & 3) && !TARGET_UNALIGNED_WORD))
+    return 0;
+  
+  unaligned = (INTVAL (operands[3]) & 3) != 0;
+
+  block_size = 4;
+
+  st_dst = XEXP (operands[0], 0);
+  st_src = XEXP (operands[1], 0);
+
+  dst_in_struct_p = MEM_IN_STRUCT_P (operands[0]);
+  dst_scalar_p = MEM_SCALAR_P (operands[0]);
+  src_in_struct_p = MEM_IN_STRUCT_P (operands[1]);
+  src_scalar_p = MEM_SCALAR_P (operands[1]);
+
+  dst = copy_to_mode_reg (SImode, st_dst);
+  src = copy_to_mode_reg (SImode, st_src);
+
+  bytes_to_go = INTVAL (operands[2]);
+
+  while (bytes_to_go)
+    {
+      enum machine_mode move_mode;
+      /* Seems to be a problem with reloads for the movti pattern so this is
+         disabled until that problem is resolved */
+
+      /* if ( bytes_to_go >= GET_MODE_SIZE(TImode) ) move_mode = TImode; else
+       */
+      if ((bytes_to_go >= GET_MODE_SIZE (DImode)) && !unaligned)
+	move_mode = DImode;
+      else if (bytes_to_go >= GET_MODE_SIZE (SImode))
+	move_mode = SImode;
+      else
+	move_mode = QImode;
+
+      {
+	rtx dst_mem = gen_rtx_MEM (move_mode,
+				   gen_rtx_PLUS (SImode, dst,
+						 GEN_INT (ptr_offset)));
+	rtx src_mem = gen_rtx_MEM (move_mode,
+				   gen_rtx_PLUS (SImode, src,
+						 GEN_INT (ptr_offset)));
+	ptr_offset += GET_MODE_SIZE (move_mode);
+	bytes_to_go -= GET_MODE_SIZE (move_mode);
+
+	MEM_IN_STRUCT_P (dst_mem) = dst_in_struct_p;
+	MEM_SCALAR_P (dst_mem) = dst_scalar_p;
+
+	MEM_IN_STRUCT_P (src_mem) = src_in_struct_p;
+	MEM_SCALAR_P (src_mem) = src_scalar_p;
+	emit_move_insn (dst_mem, src_mem);
+
+      }
+    }
+
+  return 1;
+}
+
+
+
+/*Expand the prologue instruction*/
+void
+avr32_expand_prologue (void)
+{
+  rtx insn, dwarf;
+  unsigned long saved_reg_mask, saved_fp_reg_mask;
+  int reglist8 = 0;
+
+  /* Naked functions does not have a prologue */
+  if (IS_NAKED (avr32_current_func_type ()))
+    return;
+
+  saved_reg_mask = avr32_compute_save_reg_mask (TRUE);
+
+  if (saved_reg_mask)
+    {
+      /* Must push used registers */
+
+      /* Should we use POPM or LDM? */
+      int usePUSHM = TRUE;
+      reglist8 = 0;
+      if (((saved_reg_mask & (1 << 0)) ||
+	   (saved_reg_mask & (1 << 1)) ||
+	   (saved_reg_mask & (1 << 2)) || (saved_reg_mask & (1 << 3))))
+	{
+	  /* One of R0-R3 should at least be pushed */
+	  if (((saved_reg_mask & (1 << 0)) &&
+	       (saved_reg_mask & (1 << 1)) &&
+	       (saved_reg_mask & (1 << 2)) && (saved_reg_mask & (1 << 3))))
+	    {
+	      /* All should be pushed */
+	      reglist8 |= 0x01;
+	    }
+	  else
+	    {
+	      usePUSHM = FALSE;
+	    }
+	}
+
+      if (((saved_reg_mask & (1 << 4)) ||
+	   (saved_reg_mask & (1 << 5)) ||
+	   (saved_reg_mask & (1 << 6)) || (saved_reg_mask & (1 << 7))))
+	{
+	  /* One of R4-R7 should at least be pushed */
+	  if (((saved_reg_mask & (1 << 4)) &&
+	       (saved_reg_mask & (1 << 5)) &&
+	       (saved_reg_mask & (1 << 6)) && (saved_reg_mask & (1 << 7))))
+	    {
+	      if (usePUSHM)
+		/* All should be pushed */
+		reglist8 |= 0x02;
+	    }
+	  else
+	    {
+	      usePUSHM = FALSE;
+	    }
+	}
+
+      if (((saved_reg_mask & (1 << 8)) || (saved_reg_mask & (1 << 9))))
+	{
+	  /* One of R8-R9 should at least be pushed */
+	  if (((saved_reg_mask & (1 << 8)) && (saved_reg_mask & (1 << 9))))
+	    {
+	      if (usePUSHM)
+		/* All should be pushed */
+		reglist8 |= 0x04;
+	    }
+	  else
+	    {
+	      usePUSHM = FALSE;
+	    }
+	}
+
+      if (saved_reg_mask & (1 << 10))
+	reglist8 |= 0x08;
+
+      if (saved_reg_mask & (1 << 11))
+	reglist8 |= 0x10;
+
+      if (saved_reg_mask & (1 << 12))
+	reglist8 |= 0x20;
+
+      if (saved_reg_mask & (1 << ASM_REGNUM (LR_REGNUM)))
+	{
+	  /* Push LR */
+	  reglist8 |= 0x40;
+	}
+
+      if (usePUSHM)
+	{
+	  insn = emit_multi_reg_push (reglist8, TRUE);
+	}
+      else
+	{
+	  insn = emit_multi_reg_push (saved_reg_mask, FALSE);
+	}
+      RTX_FRAME_RELATED_P (insn) = 1;
+
+      /* Prevent this instruction from being scheduled after any other
+         instructions.  */
+      emit_insn (gen_blockage ());
+    }
+
+  saved_fp_reg_mask = avr32_compute_save_fp_reg_mask ();
+  if (saved_fp_reg_mask)
+    {
+      insn = emit_multi_fp_reg_push (saved_fp_reg_mask);
+      RTX_FRAME_RELATED_P (insn) = 1;
+
+      /* Prevent this instruction from being scheduled after any other
+         instructions.  */
+      emit_insn (gen_blockage ());
+    }
+
+  /* Set frame pointer */
+  if (frame_pointer_needed)
+    {
+      insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
+      RTX_FRAME_RELATED_P (insn) = 1;
+    }
+
+  if (get_frame_size () > 0)
+    {
+      if (avr32_const_ok_for_constraint_p (get_frame_size (), 'K', "Ks21"))
+	{
+	  insn = emit_insn (gen_rtx_SET (SImode,
+					 stack_pointer_rtx,
+					 gen_rtx_PLUS (SImode,
+						       stack_pointer_rtx,
+						       gen_rtx_CONST_INT
+						       (SImode,
+							-get_frame_size
+							()))));
+	  RTX_FRAME_RELATED_P (insn) = 1;
+	}
+      else
+	{
+	  /* Immediate is larger than k21 We must either check if we can use
+	     one of the pushed reegisters as temporary storage or we must
+	     make us a temp register by pushing a register to the stack. */
+	  rtx temp_reg, const_pool_entry, insn;
+	  if (saved_reg_mask)
+	    {
+	      temp_reg =
+		gen_rtx_REG (SImode,
+			     INTERNAL_REGNUM (avr32_get_saved_reg
+					      (saved_reg_mask)));
+	    }
+	  else
+	    {
+	      temp_reg = gen_rtx_REG (SImode, INTERNAL_REGNUM (7));
+	      emit_move_insn (gen_rtx_MEM
+			      (SImode,
+			       gen_rtx_PRE_DEC (SImode, stack_pointer_rtx)),
+			      temp_reg);
+	    }
+
+	  const_pool_entry =
+	    force_const_mem (SImode,
+			     gen_rtx_CONST_INT (SImode, get_frame_size ()));
+	  emit_move_insn (temp_reg, const_pool_entry);
+
+	  insn = emit_insn (gen_rtx_SET (SImode,
+					 stack_pointer_rtx,
+					 gen_rtx_MINUS (SImode,
+							stack_pointer_rtx,
+							temp_reg)));
+
+	  dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
+			       gen_rtx_PLUS (SImode, stack_pointer_rtx,
+					     GEN_INT (-get_frame_size ())));
+	  REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
+						dwarf, REG_NOTES (insn));
+	  RTX_FRAME_RELATED_P (insn) = 1;
+
+	  if (!saved_reg_mask)
+	    {
+	      insn =
+		emit_move_insn (temp_reg,
+				gen_rtx_MEM (SImode,
+					     gen_rtx_POST_INC (SImode,
+							       gen_rtx_REG
+							       (SImode,
+								13))));
+	    }
+
+	  /* Mark the temp register as dead */
+	  REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_DEAD, temp_reg,
+						REG_NOTES (insn));
+
+
+	}
+
+      /* Prevent the the stack adjustment to be scheduled after any
+         instructions using the frame pointer.  */
+      emit_insn (gen_blockage ());
+    }
+
+  /* Load GOT */
+  if (flag_pic)
+    {
+      avr32_load_pic_register ();
+
+      /* gcc does not know that load or call instructions might use the pic
+         register so it might schedule these instructions before the loading
+         of the pic register. To avoid this emit a barrier for now. TODO!
+         Find out a better way to let gcc know which instructions might use
+         the pic register. */
+      emit_insn (gen_blockage ());
+    }
+  return;
+}
+
+void
+avr32_set_return_address (rtx source)
+{
+  rtx addr;
+  unsigned long saved_regs;
+
+  saved_regs = avr32_compute_save_reg_mask (TRUE);
+
+  if (!(saved_regs & (1 << ASM_REGNUM (LR_REGNUM))))
+    emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
+  else
+    {
+      if (frame_pointer_needed)
+	addr = gen_rtx_REG (Pmode, FRAME_POINTER_REGNUM);
+      else
+	/* FIXME: Need to use scratch register if frame is large */
+	addr = plus_constant (stack_pointer_rtx, get_frame_size ());
+
+      emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
+    }
+}
+
+
+
+/* Return the length of INSN.  LENGTH is the initial length computed by
+   attributes in the machine-description file.  */
+
+int
+avr32_adjust_insn_length (rtx insn ATTRIBUTE_UNUSED,
+			  int length ATTRIBUTE_UNUSED)
+{
+  return length;
+}
+
+void
+avr32_output_return_instruction (int single_ret_inst ATTRIBUTE_UNUSED,
+				 int iscond ATTRIBUTE_UNUSED,
+				 rtx cond ATTRIBUTE_UNUSED, rtx r12_imm)
+{
+
+  unsigned long saved_reg_mask, saved_fp_reg_mask;
+  int insert_ret = TRUE;
+  int reglist8 = 0;
+  int stack_adjustment = get_frame_size ();
+  unsigned int func_type = avr32_current_func_type ();
+  FILE *f = asm_out_file;
+
+  /* Naked functions does not have an epilogue */
+  if (IS_NAKED (func_type))
+    return;
+
+  saved_fp_reg_mask = avr32_compute_save_fp_reg_mask ();
+
+  saved_reg_mask = avr32_compute_save_reg_mask (FALSE);
+
+  /* Reset frame pointer */
+  if (stack_adjustment > 0)
+    {
+      if (avr32_const_ok_for_constraint_p (stack_adjustment, 'I', "Is21"))
+	{
+	  fprintf (f, "\tsub sp, %i # Reset Frame Pointer\n",
+		   -stack_adjustment);
+	}
+      else
+	{
+	  /* TODO! Is it safe to use r8 as scratch?? */
+	  fprintf (f, "\tmov r8, lo(%i) # Reset Frame Pointer\n",
+		   -stack_adjustment);
+	  fprintf (f, "\torh r8, hi(%i) # Reset Frame Pointer\n",
+		   -stack_adjustment);
+	  fprintf (f, "\tadd sp,r8  # Reset Frame Pointer\n");
+	}
+    }
+
+  if (saved_fp_reg_mask)
+    {
+      char reglist[64];		/* 64 bytes should be enough... */
+      avr32_make_fp_reglist_w (saved_fp_reg_mask, (char *) reglist);
+      fprintf (f, "\tldcm.w\tcp0, sp++, %s\n", reglist);
+      if (saved_fp_reg_mask & ~0xff)
+	{
+	  saved_fp_reg_mask &= ~0xff;
+	  avr32_make_fp_reglist_d (saved_fp_reg_mask, (char *) reglist);
+	  fprintf (f, "\tldcm.d\tcp0, sp++, %s\n", reglist);
+	}
+    }
+
+  if (saved_reg_mask)
+    {
+      /* Must pop used registers */
+
+      /* Should we use POPM or LDM? */
+      int usePOPM = TRUE;
+      if (((saved_reg_mask & (1 << 0)) ||
+	   (saved_reg_mask & (1 << 1)) ||
+	   (saved_reg_mask & (1 << 2)) || (saved_reg_mask & (1 << 3))))
+	{
+	  /* One of R0-R3 should at least be popped */
+	  if (((saved_reg_mask & (1 << 0)) &&
+	       (saved_reg_mask & (1 << 1)) &&
+	       (saved_reg_mask & (1 << 2)) && (saved_reg_mask & (1 << 3))))
+	    {
+	      /* All should be popped */
+	      reglist8 |= 0x01;
+	    }
+	  else
+	    {
+	      usePOPM = FALSE;
+	    }
+	}
+
+      if (((saved_reg_mask & (1 << 4)) ||
+	   (saved_reg_mask & (1 << 5)) ||
+	   (saved_reg_mask & (1 << 6)) || (saved_reg_mask & (1 << 7))))
+	{
+	  /* One of R0-R3 should at least be popped */
+	  if (((saved_reg_mask & (1 << 4)) &&
+	       (saved_reg_mask & (1 << 5)) &&
+	       (saved_reg_mask & (1 << 6)) && (saved_reg_mask & (1 << 7))))
+	    {
+	      if (usePOPM)
+		/* All should be popped */
+		reglist8 |= 0x02;
+	    }
+	  else
+	    {
+	      usePOPM = FALSE;
+	    }
+	}
+
+      if (((saved_reg_mask & (1 << 8)) || (saved_reg_mask & (1 << 9))))
+	{
+	  /* One of R8-R9 should at least be pushed */
+	  if (((saved_reg_mask & (1 << 8)) && (saved_reg_mask & (1 << 9))))
+	    {
+	      if (usePOPM)
+		/* All should be pushed */
+		reglist8 |= 0x04;
+	    }
+	  else
+	    {
+	      usePOPM = FALSE;
+	    }
+	}
+
+      if (saved_reg_mask & (1 << 10))
+	reglist8 |= 0x08;
+
+      if (saved_reg_mask & (1 << 11))
+	reglist8 |= 0x10;
+
+      if (saved_reg_mask & (1 << 12))
+	reglist8 |= 0x20;
+
+      if (saved_reg_mask & (1 << ASM_REGNUM (LR_REGNUM)))
+	/* Pop LR */
+	reglist8 |= 0x40;
+
+      if (saved_reg_mask & (1 << ASM_REGNUM (PC_REGNUM)))
+	/* Pop LR into PC. */
+	reglist8 |= 0x80;
+
+      if (usePOPM)
+	{
+	  char reglist[64];	/* 64 bytes should be enough... */
+	  avr32_make_reglist8 (reglist8, (char *) reglist);
+
+	  if (reglist8 & 0x80)
+	    /* This instruction is also a return */
+	    insert_ret = FALSE;
+
+	  if (r12_imm && !insert_ret)
+	    fprintf (f, "\tpopm    %s, r12=%li\n", reglist, INTVAL (r12_imm));
+	  else
+	    fprintf (f, "\tpopm    %s\n", reglist);
+
+	}
+      else
+	{
+	  char reglist[64];	/* 64 bytes should be enough... */
+	  avr32_make_reglist16 (saved_reg_mask, (char *) reglist);
+	  if (saved_reg_mask & (1 << ASM_REGNUM (PC_REGNUM)))
+	    /* This instruction is also a return */
+	    insert_ret = FALSE;
+
+	  if (r12_imm && !insert_ret)
+	    fprintf (f, "\tldm    sp++, %s, r12=%li\n", reglist,
+		     INTVAL (r12_imm));
+	  else
+	    fprintf (f, "\tldm    sp++, %s\n", reglist);
+
+	}
+
+    }
+
+  if (IS_INTERRUPT (func_type))
+    {
+      fprintf (f, "\trete\n");
+    }
+  else if (insert_ret)
+    {
+      if (r12_imm)
+	fprintf (f, "\tretal    %li\n", INTVAL (r12_imm));
+      else
+	fprintf (f, "\tretal    r12\n");
+    }
+}
+
+/* Function for converting a fp-register mask to a
+   reglistCPD8 register list string. */
+void
+avr32_make_fp_reglist_d (int reglist_mask, char *reglist_string)
+{
+  int i;
+
+  /* Make sure reglist_string is empty */
+  reglist_string[0] = '\0';
+
+  for (i = 0; i < NUM_FP_REGS; i += 2)
+    {
+      if (reglist_mask & (1 << i))
+	{
+	  strlen (reglist_string) ?
+	    sprintf (reglist_string, "%s, %s-%s", reglist_string,
+		     reg_names[INTERNAL_FP_REGNUM (i)],
+		     reg_names[INTERNAL_FP_REGNUM (i + 1)]) :
+	    sprintf (reglist_string, "%s-%s",
+		     reg_names[INTERNAL_FP_REGNUM (i)],
+		     reg_names[INTERNAL_FP_REGNUM (i + 1)]);
+	}
+    }
+}
+
+/* Function for converting a fp-register mask to a
+   reglistCP8 register list string. */
+void
+avr32_make_fp_reglist_w (int reglist_mask, char *reglist_string)
+{
+  int i;
+
+  /* Make sure reglist_string is empty */
+  reglist_string[0] = '\0';
+
+  for (i = 0; i < NUM_FP_REGS; ++i)
+    {
+      if (reglist_mask & (1 << i))
+	{
+	  strlen (reglist_string) ?
+	    sprintf (reglist_string, "%s, %s", reglist_string,
+		     reg_names[INTERNAL_FP_REGNUM (i)]) :
+	    sprintf (reglist_string, "%s", reg_names[INTERNAL_FP_REGNUM (i)]);
+	}
+    }
+}
+
+void
+avr32_make_reglist16 (int reglist16_vect, char *reglist16_string)
+{
+  int i;
+
+  /* Make sure reglist16_string is empty */
+  reglist16_string[0] = '\0';
+
+  for (i = 0; i < 16; ++i)
+    {
+      if (reglist16_vect & (1 << i))
+	{
+	  strlen (reglist16_string) ?
+	    sprintf (reglist16_string, "%s, %s", reglist16_string,
+		     reg_names[INTERNAL_REGNUM (i)]) :
+	    sprintf (reglist16_string, "%s", reg_names[INTERNAL_REGNUM (i)]);
+	}
+    }
+}
+
+int
+avr32_convert_to_reglist16 (int reglist8_vect)
+{
+  int reglist16_vect = 0;
+  if (reglist8_vect & 0x1)
+    reglist16_vect |= 0xF;
+  if (reglist8_vect & 0x2)
+    reglist16_vect |= 0xF0;
+  if (reglist8_vect & 0x4)
+    reglist16_vect |= 0x300;
+  if (reglist8_vect & 0x8)
+    reglist16_vect |= 0x400;
+  if (reglist8_vect & 0x10)
+    reglist16_vect |= 0x800;
+  if (reglist8_vect & 0x20)
+    reglist16_vect |= 0x1000;
+  if (reglist8_vect & 0x40)
+    reglist16_vect |= 0x4000;
+  if (reglist8_vect & 0x80)
+    reglist16_vect |= 0x8000;
+
+  return reglist16_vect;
+}
+
+void
+avr32_make_reglist8 (int reglist8_vect, char *reglist8_string)
+{
+  /* Make sure reglist8_string is empty */
+  reglist8_string[0] = '\0';
+
+  if (reglist8_vect & 0x1)
+    sprintf (reglist8_string, "r0-r3");
+  if (reglist8_vect & 0x2)
+    strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r4-r7",
+					reglist8_string) :
+      sprintf (reglist8_string, "r4-r7");
+  if (reglist8_vect & 0x4)
+    strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r8-r9",
+					reglist8_string) :
+      sprintf (reglist8_string, "r8-r9");
+  if (reglist8_vect & 0x8)
+    strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r10",
+					reglist8_string) :
+      sprintf (reglist8_string, "r10");
+  if (reglist8_vect & 0x10)
+    strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r11",
+					reglist8_string) :
+      sprintf (reglist8_string, "r11");
+  if (reglist8_vect & 0x20)
+    strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r12",
+					reglist8_string) :
+      sprintf (reglist8_string, "r12");
+  if (reglist8_vect & 0x40)
+    strlen (reglist8_string) ? sprintf (reglist8_string, "%s, lr",
+					reglist8_string) :
+      sprintf (reglist8_string, "lr");
+  if (reglist8_vect & 0x80)
+    strlen (reglist8_string) ? sprintf (reglist8_string, "%s, pc",
+					reglist8_string) :
+      sprintf (reglist8_string, "pc");
+}
+
+int
+avr32_eh_return_data_regno (int n)
+{
+  if (n >= 0 && n <= 3)
+    return 8 + n;
+  else
+    return INVALID_REGNUM;
+}
+
+/* Compute the distance from register FROM to register TO.
+   These can be the arg pointer, the frame pointer or
+   the stack pointer.
+   Typical stack layout looks like this:
+
+       old stack pointer -> |    |
+			     ----
+			    |    | \
+			    |    |   saved arguments for
+			    |    |   vararg functions
+ arg_pointer	->	    |    | /
+			      --
+			    |    | \
+			    |    |   call saved
+			    |    |   registers
+			    |    | /
+  frame ptr	 ->	--
+			    |    | \
+			    |    |   local
+			    |    |   variables
+  stack ptr -->	     |    | /
+			      --
+			    |    | \
+			    |    |   outgoing
+			    |    |   arguments
+			    |    | /
+			      --
+
+  For a given funciton some or all of these stack compomnents
+  may not be needed, giving rise to the possibility of
+  eliminating some of the registers.
+
+  The values returned by this function must reflect the behaviour
+  of avr32_expand_prologue() and avr32_compute_save_reg_mask().
+
+  The sign of the number returned reflects the direction of stack
+  growth, so the values are positive for all eliminations except
+  from the soft frame pointer to the hard frame pointer.  */
+
+
+int
+avr32_initial_elimination_offset (int from, int to)
+{
+  int i;
+  int call_saved_regs = 0;
+  unsigned long saved_reg_mask, saved_fp_reg_mask;
+  unsigned int local_vars = get_frame_size ();
+
+  saved_reg_mask = avr32_compute_save_reg_mask (TRUE);
+  saved_fp_reg_mask = avr32_compute_save_fp_reg_mask ();
+
+  for (i = 0; i < 16; ++i)
+    {
+      if (saved_reg_mask & (1 << i))
+	call_saved_regs += 4;
+    }
+
+  for (i = 0; i < NUM_FP_REGS; ++i)
+    {
+      if (saved_fp_reg_mask & (1 << i))
+	call_saved_regs += 4;
+    }
+
+  switch (from)
+    {
+    case ARG_POINTER_REGNUM:
+      switch (to)
+	{
+	case STACK_POINTER_REGNUM:
+	  return call_saved_regs + local_vars;
+	case FRAME_POINTER_REGNUM:
+	  return call_saved_regs;
+	default:
+	  abort ();
+	}
+    case FRAME_POINTER_REGNUM:
+      switch (to)
+	{
+	case STACK_POINTER_REGNUM:
+	  return local_vars;
+	default:
+	  abort ();
+	}
+    default:
+      abort ();
+    }
+}
+
+
+/*
+  Returns a rtx used when passing the next argument to a function.
+  avr32_init_cumulative_args() and avr32_function_arg_advance() sets witch
+  register to use.
+*/
+rtx
+avr32_function_arg (CUMULATIVE_ARGS * cum, enum machine_mode mode,
+		    tree type, int named)
+{
+  int index = -1;
+
+  HOST_WIDE_INT arg_size, arg_rsize;
+  if (type)
+    {
+      arg_size = int_size_in_bytes (type);
+    }
+  else
+    {
+      arg_size = GET_MODE_SIZE (mode);
+    }
+  arg_rsize = PUSH_ROUNDING (arg_size);
+
+  /*
+     The last time this macro is called, it is called with mode == VOIDmode,
+     and its result is passed to the call or call_value pattern as operands 2
+     and 3 respectively. */
+  if (mode == VOIDmode)
+    {
+      return gen_rtx_CONST_INT (SImode, 22);	/* ToDo: fixme. */
+    }
+
+  if ((*targetm.calls.must_pass_in_stack) (mode, type) || !named)
+    {
+      return NULL_RTX;
+    }
+
+  if (arg_rsize == 8)
+    {
+      /* use r11:r10 or r9:r8. */
+      if (!(GET_USED_INDEX (cum, 1) || GET_USED_INDEX (cum, 2)))
+	index = 1;
+      else if (!(GET_USED_INDEX (cum, 3) || GET_USED_INDEX (cum, 4)))
+	index = 3;
+      else
+	index = -1;
+    }
+  else if (arg_rsize == 4)
+    {				/* Use first available register */
+      index = 0;
+      while (index <= LAST_CUM_REG_INDEX && GET_USED_INDEX (cum, index))
+	index++;
+      if (index > LAST_CUM_REG_INDEX)
+	index = -1;
+    }
+
+  SET_REG_INDEX (cum, index);
+
+  if (GET_REG_INDEX (cum) >= 0)
+    return gen_rtx_REG (mode,
+			avr32_function_arg_reglist[GET_REG_INDEX (cum)]);
+
+  return NULL_RTX;
+}
+
+/*
+  Set the register used for passing the first argument to a function.
+*/
+void
+avr32_init_cumulative_args (CUMULATIVE_ARGS * cum, tree fntype,
+			    rtx libname ATTRIBUTE_UNUSED,
+			    tree fndecl ATTRIBUTE_UNUSED)
+{
+  /* Set all registers as unused. */
+  SET_INDEXES_UNUSED (cum);
+
+  /* Reset uses_anonymous_args */
+  cum->uses_anonymous_args = 0;
+
+  /* Reset size of stack pushed arguments */
+  cum->stack_pushed_args_size = 0;
+
+  /* If the function is returning a value passed in memory r12 is used as a
+     Return Value Pointer. */
+
+  if (fntype != 0 && avr32_return_in_memory (TREE_TYPE (fntype), fntype))
+    {
+      SET_REG_INDEX (cum, 0);
+      SET_USED_INDEX (cum, GET_REG_INDEX (cum));
+    }
+}
+
+/*
+  Set register used for passing the next argument to a function. Only the
+  Scratch Registers are used.
+
+		number  name
+		   15   r15  PC
+		   14   r14  LR
+		   13   r13 _SP_________
+     FIRST_CUM_REG 12   r12 _||_
+		   10   r11  ||
+		   11   r10 _||_  Scratch Registers
+		    8   r9   ||
+  LAST_SCRATCH_REG  9   r8  _\/_________
+		    6   r7   /\
+		    7   r6   ||
+		    4   r5   ||
+		    5   r4   ||
+		    2   r3   ||
+		    3   r2   ||
+		    0   r1   ||
+		    1   r0  _||_________
+
+*/
+void
+avr32_function_arg_advance (CUMULATIVE_ARGS * cum, enum machine_mode mode,
+			    tree type, int named ATTRIBUTE_UNUSED)
+{
+  HOST_WIDE_INT arg_size, arg_rsize;
+
+  if (type)
+    {
+      arg_size = int_size_in_bytes (type);
+    }
+  else
+    {
+      arg_size = GET_MODE_SIZE (mode);
+    }
+  arg_rsize = PUSH_ROUNDING (arg_size);
+
+  /* It the argument had to be passed in stack, no register is used. */
+  if ((*targetm.calls.must_pass_in_stack) (mode, type))
+    {
+      cum->stack_pushed_args_size += PUSH_ROUNDING (int_size_in_bytes (type));
+      return;
+    }
+
+  /* Mark the used registers as "used". */
+  if (GET_REG_INDEX (cum) >= 0)
+    {
+      SET_USED_INDEX (cum, GET_REG_INDEX (cum));
+      if (arg_rsize == 8)
+	{
+	  SET_USED_INDEX (cum, (GET_REG_INDEX (cum) + 1));
+	}
+    }
+  else
+    {
+      /* Had to use stack */
+      cum->stack_pushed_args_size += arg_rsize;
+    }
+}
+
+/*
+  Defines witch direction to go to find the next register to use if the
+  argument is larger then one register or for arguments shorter than an
+  int which is not promoted, such as the last part of structures with
+  size not a multiple of 4. */
+enum direction
+avr32_function_arg_padding (enum machine_mode mode ATTRIBUTE_UNUSED,
+			    tree type)
+{
+  /* Pad upward for all aggregates except byte and halfword sized aggregates
+     which can be passed in registers. */
+  if (type
+      && AGGREGATE_TYPE_P (type)
+      && (int_size_in_bytes (type) != 1)
+      && !((int_size_in_bytes (type) == 2)
+	   && TYPE_ALIGN_UNIT (type) >= 2)
+      && (int_size_in_bytes (type) & 0x3))
+    {
+      return upward;
+    }
+
+  return downward;
+}
+
+/*
+  Return a rtx used for the return value from a function call.
+*/
+rtx
+avr32_function_value (tree type, tree func)
+{
+  if (avr32_return_in_memory (type, func))
+    return NULL_RTX;
+
+  if (int_size_in_bytes (type) <= 4)
+    if (avr32_return_in_msb (type))
+      /* Aggregates of size less than a word which does align the data in the
+         MSB must use SImode for r12. */
+      return gen_rtx_REG (SImode, RET_REGISTER);
+    else
+      return gen_rtx_REG (TYPE_MODE (type), RET_REGISTER);
+  else if (int_size_in_bytes (type) <= 8)
+    return gen_rtx_REG (TYPE_MODE (type), INTERNAL_REGNUM (11));
+
+  return NULL_RTX;
+}
+
+/*
+  Return a rtx used for the return value from a library function call.
+*/
+rtx
+avr32_libcall_value (enum machine_mode mode)
+{
+
+  if (GET_MODE_SIZE (mode) <= 4)
+    return gen_rtx_REG (mode, RET_REGISTER);
+  else if (GET_MODE_SIZE (mode) <= 8)
+    return gen_rtx_REG (mode, INTERNAL_REGNUM (11));
+  else
+    return NULL_RTX;
+}
+
+/* Return TRUE if X references a SYMBOL_REF.  */
+int
+symbol_mentioned_p (rtx x)
+{
+  const char *fmt;
+  int i;
+
+  if (GET_CODE (x) == SYMBOL_REF)
+    return 1;
+
+  fmt = GET_RTX_FORMAT (GET_CODE (x));
+
+  for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
+    {
+      if (fmt[i] == 'E')
+	{
+	  int j;
+
+	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+	    if (symbol_mentioned_p (XVECEXP (x, i, j)))
+	      return 1;
+	}
+      else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
+	return 1;
+    }
+
+  return 0;
+}
+
+/* Return TRUE if X references a LABEL_REF.  */
+int
+label_mentioned_p (rtx x)
+{
+  const char *fmt;
+  int i;
+
+  if (GET_CODE (x) == LABEL_REF)
+    return 1;
+
+  fmt = GET_RTX_FORMAT (GET_CODE (x));
+  for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
+    {
+      if (fmt[i] == 'E')
+	{
+	  int j;
+
+	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+	    if (label_mentioned_p (XVECEXP (x, i, j)))
+	      return 1;
+	}
+      else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
+	return 1;
+    }
+
+  return 0;
+}
+
+
+int
+avr32_legitimate_pic_operand_p (rtx x)
+{
+
+  /* We can't have const, this must be broken down to a symbol. */
+  if (GET_CODE (x) == CONST)
+    return FALSE;
+
+  /* Can't access symbols or labels via the constant pool either */
+  if ((GET_CODE (x) == SYMBOL_REF
+       && CONSTANT_POOL_ADDRESS_P (x)
+       && (symbol_mentioned_p (get_pool_constant (x))
+	   || label_mentioned_p (get_pool_constant (x)))))
+    return FALSE;
+
+  return TRUE;
+}
+
+
+rtx
+legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
+			rtx reg)
+{
+
+  if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
+    {
+      int subregs = 0;
+
+      if (reg == 0)
+	{
+	  if (no_new_pseudos)
+	    abort ();
+	  else
+	    reg = gen_reg_rtx (Pmode);
+
+	  subregs = 1;
+	}
+
+      emit_move_insn (reg, orig);
+
+      /* Only set current function as using pic offset table if flag_pic is
+         set. This is because this function is also used if
+         TARGET_HAS_ASM_ADDR_PSEUDOS is set. */
+      if (flag_pic)
+	current_function_uses_pic_offset_table = 1;
+
+      /* Put a REG_EQUAL note on this insn, so that it can be optimized by
+         loop.  */
+      return reg;
+    }
+  else if (GET_CODE (orig) == CONST)
+    {
+      rtx base, offset;
+
+      if (flag_pic
+	  && GET_CODE (XEXP (orig, 0)) == PLUS
+	  && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
+	return orig;
+
+      if (reg == 0)
+	{
+	  if (no_new_pseudos)
+	    abort ();
+	  else
+	    reg = gen_reg_rtx (Pmode);
+	}
+
+      if (GET_CODE (XEXP (orig, 0)) == PLUS)
+	{
+	  base =
+	    legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
+	  offset =
+	    legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
+				    base == reg ? 0 : reg);
+	}
+      else
+	abort ();
+
+      if (GET_CODE (offset) == CONST_INT)
+	{
+	  /* The base register doesn't really matter, we only want to test
+	     the index for the appropriate mode.  */
+	  if (!avr32_const_ok_for_constraint_p (INTVAL (offset), 'I', "Is21"))
+	    {
+	      if (!no_new_pseudos)
+		offset = force_reg (Pmode, offset);
+	      else
+		abort ();
+	    }
+
+	  if (GET_CODE (offset) == CONST_INT)
+	    return plus_constant (base, INTVAL (offset));
+	}
+
+      return gen_rtx_PLUS (Pmode, base, offset);
+    }
+
+  return orig;
+}
+
+/* Generate code to load the PIC register.  */
+void
+avr32_load_pic_register (void)
+{
+  rtx l1, pic_tmp;
+  rtx global_offset_table;
+
+  if ((current_function_uses_pic_offset_table == 0) || TARGET_NO_INIT_GOT)
+    return;
+
+  if (!flag_pic)
+    abort ();
+
+  l1 = gen_label_rtx ();
+
+  global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
+  pic_tmp =
+    gen_rtx_CONST (Pmode,
+		   gen_rtx_MINUS (SImode, gen_rtx_LABEL_REF (Pmode, l1),
+				  global_offset_table));
+  emit_insn (gen_pic_load_addr
+	     (pic_offset_table_rtx, force_const_mem (SImode, pic_tmp)));
+  emit_insn (gen_pic_compute_got_from_pc (pic_offset_table_rtx, l1));
+
+  /* Need to emit this whether or not we obey regdecls, since setjmp/longjmp
+     can cause life info to screw up.  */
+  emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
+}
+
+
+
+/* This hook should return true if values of type type are returned at the most
+   significant end of a register (in other words, if they are padded at the
+   least significant end). You can assume that type is returned in a register;
+   the caller is required to check this.  Note that the register provided by
+   FUNCTION_VALUE must be able to hold the complete return value. For example,
+   if a 1-, 2- or 3-byte structure is returned at the most significant end of a
+   4-byte register, FUNCTION_VALUE should provide an SImode rtx. */
+bool
+avr32_return_in_msb (tree type ATTRIBUTE_UNUSED)
+{
+  /* if ( AGGREGATE_TYPE_P (type) ) if ((int_size_in_bytes(type) == 1) ||
+     ((int_size_in_bytes(type) == 2) && TYPE_ALIGN_UNIT(type) >= 2)) return
+     false; else return true; */
+
+  return false;
+}
+
+
+/*
+  Returns one if a certain function value is going to be returned in memory
+  and zero if it is going to be returned in a register.
+
+  BLKmode and all other modes that is larger than 64 bits are returned in
+  memory.
+*/
+bool
+avr32_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
+{
+  if (TYPE_MODE (type) == VOIDmode)
+    return false;
+
+  if (int_size_in_bytes (type) > (2 * UNITS_PER_WORD)
+      || int_size_in_bytes (type) == -1)
+    {
+      return true;
+    }
+
+  /* If we have an aggregate then use the same mechanism as when checking if
+     it should be passed on the stack. */
+  if (type
+      && AGGREGATE_TYPE_P (type)
+      && (*targetm.calls.must_pass_in_stack) (TYPE_MODE (type), type))
+    return true;
+
+  return false;
+}
+
+
+/* Output the constant part of the trampoline.
+   lddpc    r0, pc[0x8:e] ; load static chain register
+   lddpc    pc, pc[0x8:e] ; jump to subrutine
+   .long    0		 ; Address to static chain,
+			 ; filled in by avr32_initialize_trampoline()
+   .long    0		 ; Address to subrutine,
+			 ; filled in by avr32_initialize_trampoline()
+*/
+void
+avr32_trampoline_template (FILE * file)
+{
+  fprintf (file, "\tlddpc    r0, pc[8]\n");
+  fprintf (file, "\tlddpc    pc, pc[8]\n");
+  /* make room for the address of the static chain. */
+  fprintf (file, "\t.long\t0\n");
+  /* make room for the address to the subrutine. */
+  fprintf (file, "\t.long\t0\n");
+}
+
+
+/*
+  Initialize the variable parts of a trampoline.
+*/
+void
+avr32_initialize_trampoline (rtx addr, rtx fnaddr, rtx static_chain)
+{
+  /* Store the address to the static chain. */
+  emit_move_insn (gen_rtx_MEM
+		  (SImode, plus_constant (addr, TRAMPOLINE_SIZE - 4)),
+		  static_chain);
+
+  /* Store the address to the function. */
+  emit_move_insn (gen_rtx_MEM (SImode, plus_constant (addr, TRAMPOLINE_SIZE)),
+		  fnaddr);
+
+  emit_insn (gen_cache (gen_rtx_REG (SImode, 13),
+			gen_rtx_CONST_INT (SImode,
+					   AVR32_CACHE_INVALIDATE_ICACHE)));
+}
+
+/* Return nonzero if X is valid as an addressing register.  */
+int
+avr32_address_register_rtx_p (rtx x, int strict_p)
+{
+  int regno;
+
+  if (GET_CODE (x) != REG)
+    return 0;
+
+  regno = REGNO (x);
+
+  if (strict_p)
+    return REGNO_OK_FOR_BASE_P (regno);
+
+  return (regno <= LAST_REGNUM || regno >= FIRST_PSEUDO_REGISTER);
+}
+
+/* Return nonzero if INDEX is valid for an address index operand.  */
+int
+avr32_legitimate_index_p (enum machine_mode mode, rtx index, int strict_p)
+{
+  enum rtx_code code = GET_CODE (index);
+
+  if (mode == TImode)
+    return 0;
+
+  /* Standard coprocessor addressing modes.  */
+  if (code == CONST_INT)
+    {
+      if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
+	/* Coprocessor mem insns has a smaller reach than ordinary mem insns */
+	return CONST_OK_FOR_CONSTRAINT_P (INTVAL (index), 'K', "Ku14");
+      else
+	return CONST_OK_FOR_CONSTRAINT_P (INTVAL (index), 'K', "Ks16");
+    }
+
+  if (avr32_address_register_rtx_p (index, strict_p))
+    return 1;
+
+  if (code == MULT)
+    {
+      rtx xiop0 = XEXP (index, 0);
+      rtx xiop1 = XEXP (index, 1);
+      return ((avr32_address_register_rtx_p (xiop0, strict_p)
+	       && power_of_two_operand (xiop1, SImode)
+	       && (INTVAL (xiop1) <= 8))
+	      || (avr32_address_register_rtx_p (xiop1, strict_p)
+		  && power_of_two_operand (xiop0, SImode)
+		  && (INTVAL (xiop0) <= 8)));
+    }
+  else if (code == ASHIFT)
+    {
+      rtx op = XEXP (index, 1);
+
+      return (avr32_address_register_rtx_p (XEXP (index, 0), strict_p)
+	      && GET_CODE (op) == CONST_INT
+	      && INTVAL (op) > 0 && INTVAL (op) <= 3);
+    }
+
+  return 0;
+}
+
+/*
+  Used in the GO_IF_LEGITIMATE_ADDRESS macro. Returns a nonzero value if
+  the RTX x is a legitimate memory address.
+
+  Returns NO_REGS if the address is not legatime, GENERAL_REGS or ALL_REGS
+  if it is.
+*/
+
+/* Forward declaration*/
+int is_minipool_label (rtx label);
+
+int
+avr32_legitimate_address (enum machine_mode mode ATTRIBUTE_UNUSED,
+			  rtx x, int strict)
+{
+
+  switch (GET_CODE (x))
+    {
+    case REG:
+      return avr32_address_register_rtx_p (x, strict);
+    case CONST:
+      {
+	rtx label = avr32_find_symbol (x);
+	if (label
+	    && 
+            ( (CONSTANT_POOL_ADDRESS_P (label)
+               && !(flag_pic
+                    && (symbol_mentioned_p (get_pool_constant (label))
+                        || label_mentioned_p (get_pool_constant(label)))))
+              /* TODO! Can this ever happen??? */
+              || ((GET_CODE (label) == LABEL_REF)
+                  && GET_CODE (XEXP (label, 0)) == CODE_LABEL
+                  && is_minipool_label (XEXP (label, 0)))))
+          {
+            return TRUE;
+          }
+      }
+      break;
+    case LABEL_REF:
+      if (GET_CODE (XEXP (x, 0)) == CODE_LABEL
+	  && is_minipool_label (XEXP (x, 0)))
+	{
+	  return TRUE;
+	}
+      break;
+    case SYMBOL_REF:
+      {
+	if (CONSTANT_POOL_ADDRESS_P (x)
+	    && !(flag_pic
+		 && (symbol_mentioned_p (get_pool_constant (x))
+		     || label_mentioned_p (get_pool_constant (x)))))
+	  return TRUE;
+	/*
+	   A symbol_ref is only legal if it is a function. If all of them are
+	   legal, a pseudo reg that is a constant will be replaced by a
+	   symbol_ref and make illegale code. SYMBOL_REF_FLAG is set by
+	   ENCODE_SECTION_INFO. */
+	else if (SYMBOL_REF_RCALL_FUNCTION_P (x))
+	  return TRUE;
+	break;
+      }
+    case PRE_DEC:		/* (pre_dec (...)) */
+    case POST_INC:		/* (post_inc (...)) */
+      return avr32_address_register_rtx_p (XEXP (x, 0), strict);
+    case PLUS:			/* (plus (...) (...)) */
+      {
+	rtx xop0 = XEXP (x, 0);
+	rtx xop1 = XEXP (x, 1);
+
+	return ((avr32_address_register_rtx_p (xop0, strict)
+		 && avr32_legitimate_index_p (mode, xop1, strict))
+		|| (avr32_address_register_rtx_p (xop1, strict)
+		    && avr32_legitimate_index_p (mode, xop0, strict)));
+      }
+    default:
+      break;
+    }
+
+  return FALSE;
+}
+
+
+int
+avr32_const_double_immediate (rtx value)
+{
+  HOST_WIDE_INT hi, lo;
+
+  if (GET_CODE (value) != CONST_DOUBLE)
+    return FALSE;
+
+  if (GET_MODE (value) == DImode)
+    {
+      hi = CONST_DOUBLE_HIGH (value);
+      lo = CONST_DOUBLE_LOW (value);
+    }
+  else
+    {
+      HOST_WIDE_INT target_float[2];
+      hi = lo = 0;
+      real_to_target (target_float, CONST_DOUBLE_REAL_VALUE (value),
+		      GET_MODE (value));
+      lo = target_float[0];
+      hi = target_float[1];
+    }
+  if (avr32_const_ok_for_constraint_p (lo, 'K', "Ks21")
+      && ((GET_MODE (value) == SFmode)
+	  || avr32_const_ok_for_constraint_p (hi, 'K', "Ks21")))
+    {
+      return TRUE;
+    }
+
+  return FALSE;
+}
+
+
+int
+avr32_legitimate_constant_p (rtx x)
+{
+  switch (GET_CODE (x))
+    {
+    case CONST_INT:
+      return avr32_const_ok_for_constraint_p (INTVAL (x), 'K', "Ks21");
+    case CONST_DOUBLE:
+      if (GET_MODE (x) == SFmode
+	  || GET_MODE (x) == DFmode || GET_MODE (x) == DImode)
+	return avr32_const_double_immediate (x);
+      else
+	return 0;
+    case LABEL_REF:
+      return flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS;
+    case SYMBOL_REF:
+      return flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS;
+    case CONST:
+      /* We must handle this one in the movsi expansion in order for gcc not
+         to put it in the constant pool. */
+      return 0 /* flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS */ ;
+    case HIGH:
+    case CONST_VECTOR:
+      return 0;
+    default:
+      printf ("%s():\n", __FUNCTION__);
+      debug_rtx (x);
+      return 1;
+    }
+}
+
+
+/* Strip any special encoding from labels */
+const char *
+avr32_strip_name_encoding (const char *name)
+{
+  const char *stripped = name;
+
+  while (1)
+    {
+      switch (stripped[0])
+	{
+	case '#':
+	  stripped = strchr (name + 1, '#') + 1;
+	  break;
+	case '*':
+	  stripped = &stripped[1];
+	  break;
+	default:
+	  return stripped;
+	}
+    }
+}
+
+
+
+/* Do anything needed before RTL is emitted for each function.  */
+static struct machine_function *
+avr32_init_machine_status (void)
+{
+  struct machine_function *machine;
+  machine =
+    (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
+
+#if AVR32_FT_UNKNOWN != 0
+  machine->func_type = AVR32_FT_UNKNOWN;
+#endif
+
+  machine->minipool_label_head = 0;
+  machine->minipool_label_tail = 0;
+  return machine;
+}
+
+void
+avr32_init_expanders (void)
+{
+  /* Arrange to initialize and mark the machine per-function status.  */
+  init_machine_status = avr32_init_machine_status;
+}
+
+
+/* Return an RTX indicating where the return address to the
+   calling function can be found.  */
+
+rtx
+avr32_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
+{
+  if (count != 0)
+    return NULL_RTX;
+
+  return get_hard_reg_initial_val (Pmode, LR_REGNUM);
+}
+
+
+void
+avr32_encode_section_info (tree decl, rtx rtl, int first)
+{
+
+  if (first && DECL_P (decl))
+    {
+      /* Set SYMBOL_REG_FLAG for local functions */
+      if (!TREE_PUBLIC (decl) && TREE_CODE (decl) == FUNCTION_DECL)
+	{
+	  if ((*targetm.binds_local_p) (decl))
+	    {
+	      SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
+	    }
+	}
+    }
+}
+
+
+void
+avr32_asm_output_ascii (FILE * stream, char *ptr, int len)
+{
+  int i, i_new = 0;
+  char *new_ptr = xmalloc (4 * len);
+  if (new_ptr == NULL)
+    internal_error ("Out of memory.");
+
+  for (i = 0; i < len; i++)
+    {
+      if (ptr[i] == '\n')
+	{
+	  new_ptr[i_new++] = '\\';
+	  new_ptr[i_new++] = '0';
+	  new_ptr[i_new++] = '1';
+	  new_ptr[i_new++] = '2';
+	}
+      else if (ptr[i] == '\"')
+	{
+	  new_ptr[i_new++] = '\\';
+	  new_ptr[i_new++] = '\"';
+	}
+      else if (ptr[i] == '\\')
+	{
+	  new_ptr[i_new++] = '\\';
+	  new_ptr[i_new++] = '\\';
+	}
+      else if (ptr[i] == '\0' && i + 1 < len)
+	{
+	  new_ptr[i_new++] = '\\';
+	  new_ptr[i_new++] = '0';
+	}
+      else
+	{
+	  new_ptr[i_new++] = ptr[i];
+	}
+    }
+
+  /* Terminate new_ptr. */
+  new_ptr[i_new] = '\0';
+  fprintf (stream, "\t.ascii\t\"%s\"\n", new_ptr);
+  free (new_ptr);
+}
+
+
+void
+avr32_asm_output_label (FILE * stream, const char *name)
+{
+  name = avr32_strip_name_encoding (name);
+
+  /* Print the label. */
+  assemble_name (stream, name);
+  fprintf (stream, ":\n");
+}
+
+
+
+void
+avr32_asm_weaken_label (FILE * stream, const char *name)
+{
+  fprintf (stream, "\t.weak ");
+  assemble_name (stream, name);
+  fprintf (stream, "\n");
+}
+
+/*
+  Checks if a labelref is equal to a reserved word in the assembler. If it is,
+  insert a '_' before the label name.
+*/
+void
+avr32_asm_output_labelref (FILE * stream, const char *name)
+{
+  int verbatim = FALSE;
+  const char *stripped = name;
+  int strip_finished = FALSE;
+
+  while (!strip_finished)
+    {
+      switch (stripped[0])
+	{
+	case '#':
+	  stripped = strchr (name + 1, '#') + 1;
+	  break;
+	case '*':
+	  stripped = &stripped[1];
+	  verbatim = TRUE;
+	  break;
+	default:
+	  strip_finished = TRUE;
+	  break;
+	}
+    }
+
+  if (verbatim)
+    fputs (stripped, stream);
+  else
+    asm_fprintf (stream, "%U%s", stripped);
+}
+
+
+
+/*
+   Check if the comparison in compare_exp is redundant
+   for the condition given in next_cond given that the
+   needed flags are already set by an earlier instruction.
+   Uses cc_prev_status to check this.
+
+   Returns NULL_RTX if the compare is not redundant
+   or the new condition to use in the conditional
+   instruction if the compare is redundant.
+*/
+static rtx
+is_compare_redundant (rtx compare_exp, rtx next_cond)
+{
+  int z_flag_valid = FALSE;
+  int n_flag_valid = FALSE;
+  rtx new_cond;
+
+  if (GET_CODE (compare_exp) != COMPARE)
+    return NULL_RTX;
+
+
+  if (GET_MODE (compare_exp) != SImode)
+    return NULL_RTX;
+
+  if (rtx_equal_p (cc_prev_status.mdep.value, compare_exp))
+    {
+      /* cc0 already contains the correct comparison -> delete cmp insn */
+      return next_cond;
+    }
+
+  switch (cc_prev_status.mdep.flags)
+    {
+    case CC_SET_VNCZ:
+    case CC_SET_NCZ:
+      n_flag_valid = TRUE;
+    case CC_SET_CZ:
+    case CC_SET_Z:
+      z_flag_valid = TRUE;
+    }
+
+  if (cc_prev_status.mdep.value
+      && REG_P (XEXP (compare_exp, 0))
+      && REGNO (XEXP (compare_exp, 0)) == REGNO (cc_prev_status.mdep.value)
+      && GET_CODE (XEXP (compare_exp, 1)) == CONST_INT
+      && next_cond != NULL_RTX)
+    {
+      if (INTVAL (XEXP (compare_exp, 1)) == 0
+	  && z_flag_valid
+	  && (GET_CODE (next_cond) == EQ || GET_CODE (next_cond) == NE))
+	/* We can skip comparison Z flag is already reflecting ops[0] */
+	return next_cond;
+      else if (n_flag_valid
+	       && ((INTVAL (XEXP (compare_exp, 1)) == 0
+		    && (GET_CODE (next_cond) == GE
+			|| GET_CODE (next_cond) == LT))
+		   || (INTVAL (XEXP (compare_exp, 1)) == -1
+		       && (GET_CODE (next_cond) == GT
+			   || GET_CODE (next_cond) == LE))))
+	{
+	  /* We can skip comparison N flag is already reflecting ops[0],
+	     which means that we can use the mi/pl conditions to check if
+	     ops[0] is GE or LT 0. */
+	  if ((GET_CODE (next_cond) == GE) || (GET_CODE (next_cond) == GT))
+	    new_cond =
+	      gen_rtx_UNSPEC (CCmode, gen_rtvec (2, cc0_rtx, const0_rtx),
+			      UNSPEC_COND_PL);
+	  else
+	    new_cond =
+	      gen_rtx_UNSPEC (CCmode, gen_rtvec (2, cc0_rtx, const0_rtx),
+			      UNSPEC_COND_MI);
+	  return new_cond;
+	}
+    }
+  return NULL_RTX;
+}
+
+/* Updates cc_status.  */
+void
+avr32_notice_update_cc (rtx exp, rtx insn)
+{
+  switch (get_attr_cc (insn))
+    {
+    case CC_CALL_SET:
+      CC_STATUS_INIT;
+      FPCC_STATUS_INIT;
+      /* Check if the function call returns a value in r12 */
+      if (REG_P (recog_data.operand[0])
+	  && REGNO (recog_data.operand[0]) == RETVAL_REGNUM)
+	{
+	  cc_status.flags = 0;
+	  cc_status.mdep.value =
+	    gen_rtx_COMPARE (SImode, recog_data.operand[0], const0_rtx);
+	  cc_status.mdep.flags = CC_SET_VNCZ;
+
+	}
+      break;
+    case CC_COMPARE:
+      /* Check that compare will not be optimized away if so nothing should
+         be done */
+      if (is_compare_redundant (SET_SRC (exp), get_next_insn_cond (insn))
+	  == NULL_RTX)
+	{
+
+	  /* Reset the nonstandard flag */
+	  CC_STATUS_INIT;
+	  cc_status.flags = 0;
+	  cc_status.mdep.value = SET_SRC (exp);
+	  cc_status.mdep.flags = CC_SET_VNCZ;
+	}
+      break;
+    case CC_FPCOMPARE:
+      /* Check that floating-point compare will not be optimized away if so
+         nothing should be done */
+      if (!rtx_equal_p (cc_prev_status.mdep.fpvalue, SET_SRC (exp)))
+	{
+	  /* cc0 already contains the correct comparison -> delete cmp insn */
+	  /* Reset the nonstandard flag */
+	  cc_status.mdep.fpvalue = SET_SRC (exp);
+	  cc_status.mdep.fpflags = CC_SET_CZ;
+	}
+      break;
+    case CC_FROM_FPCC:
+      /* Flags are updated with flags from Floating-point coprocessor, set
+         CC_NOT_SIGNED flag since the flags are set so that unsigned
+         condidion codes can be used directly. */
+      CC_STATUS_INIT;
+      cc_status.flags = CC_NOT_SIGNED;
+      cc_status.mdep.value = cc_status.mdep.fpvalue;
+      cc_status.mdep.flags = cc_status.mdep.fpflags;
+      break;
+    case CC_BLD:
+      /* Bit load is kind of like an inverted testsi, because the Z flag is
+         inverted */
+      CC_STATUS_INIT;
+      cc_status.flags = CC_INVERTED;
+      cc_status.mdep.value = SET_SRC (exp);
+      cc_status.mdep.flags = CC_SET_Z;
+      break;
+    case CC_NONE:
+      /* Insn does not affect CC at all. Check if the instruction updates
+         some of the register currently reflected in cc0 */
+
+      if ((GET_CODE (exp) == SET)
+	  && (cc_status.value1 || cc_status.value2 || cc_status.mdep.value)
+	  && (reg_mentioned_p (SET_DEST (exp), cc_status.value1)
+	      || reg_mentioned_p (SET_DEST (exp), cc_status.value2)
+	      || reg_mentioned_p (SET_DEST (exp), cc_status.mdep.value)))
+	{
+	  CC_STATUS_INIT;
+	}
+
+      /* If this is a parallel we must step through each of the parallel
+         expressions */
+      if (GET_CODE (exp) == PARALLEL)
+	{
+	  int i;
+	  for (i = 0; i < XVECLEN (exp, 0); ++i)
+	    {
+	      rtx vec_exp = XVECEXP (exp, 0, i);
+	      if ((GET_CODE (vec_exp) == SET)
+		  && (cc_status.value1 || cc_status.value2
+		      || cc_status.mdep.value)
+		  && (reg_mentioned_p (SET_DEST (vec_exp), cc_status.value1)
+		      || reg_mentioned_p (SET_DEST (vec_exp),
+					  cc_status.value2)
+		      || reg_mentioned_p (SET_DEST (vec_exp),
+					  cc_status.mdep.value)))
+		{
+		  CC_STATUS_INIT;
+		}
+	    }
+	}
+
+      /* Check if we have memory opartions with post_inc or pre_dec on the
+         register currently reflected in cc0 */
+      if (GET_CODE (exp) == SET
+	  && GET_CODE (SET_SRC (exp)) == MEM
+	  && (GET_CODE (XEXP (SET_SRC (exp), 0)) == POST_INC
+	      || GET_CODE (XEXP (SET_SRC (exp), 0)) == PRE_DEC)
+	  &&
+	  (reg_mentioned_p
+	   (XEXP (XEXP (SET_SRC (exp), 0), 0), cc_status.value1)
+	   || reg_mentioned_p (XEXP (XEXP (SET_SRC (exp), 0), 0),
+			       cc_status.value2)
+	   || reg_mentioned_p (XEXP (XEXP (SET_SRC (exp), 0), 0),
+			       cc_status.mdep.value)))
+	CC_STATUS_INIT;
+
+      if (GET_CODE (exp) == SET
+	  && GET_CODE (SET_DEST (exp)) == MEM
+	  && (GET_CODE (XEXP (SET_DEST (exp), 0)) == POST_INC
+	      || GET_CODE (XEXP (SET_DEST (exp), 0)) == PRE_DEC)
+	  &&
+	  (reg_mentioned_p
+	   (XEXP (XEXP (SET_DEST (exp), 0), 0), cc_status.value1)
+	   || reg_mentioned_p (XEXP (XEXP (SET_DEST (exp), 0), 0),
+			       cc_status.value2)
+	   || reg_mentioned_p (XEXP (XEXP (SET_DEST (exp), 0), 0),
+			       cc_status.mdep.value)))
+	CC_STATUS_INIT;
+      break;
+
+    case CC_SET_VNCZ:
+      CC_STATUS_INIT;
+      cc_status.mdep.value = recog_data.operand[0];
+      cc_status.mdep.flags = CC_SET_VNCZ;
+      break;
+
+    case CC_SET_NCZ:
+      CC_STATUS_INIT;
+      cc_status.mdep.value = recog_data.operand[0];
+      cc_status.mdep.flags = CC_SET_NCZ;
+      break;
+
+    case CC_SET_CZ:
+      CC_STATUS_INIT;
+      cc_status.mdep.value = recog_data.operand[0];
+      cc_status.mdep.flags = CC_SET_CZ;
+      break;
+
+    case CC_SET_Z:
+      CC_STATUS_INIT;
+      cc_status.mdep.value = recog_data.operand[0];
+      cc_status.mdep.flags = CC_SET_Z;
+      break;
+
+    case CC_CLOBBER:
+      CC_STATUS_INIT;
+      break;
+
+    default:
+      CC_STATUS_INIT;
+    }
+}
+
+
+/*
+  Outputs to stdio stream stream the assembler syntax for an instruction
+  operand x. x is an RTL expression.
+*/
+void
+avr32_print_operand (FILE * stream, rtx x, int code)
+{
+  int error = 0;
+
+  switch (GET_CODE (x))
+    {
+    case UNSPEC:
+      switch (XINT (x, 1))
+	{
+	case UNSPEC_COND_PL:
+	  if (code == 'i')
+	    fputs ("mi", stream);
+	  else
+	    fputs ("pl", stream);
+	  break;
+	case UNSPEC_COND_MI:
+	  if (code == 'i')
+	    fputs ("pl", stream);
+	  else
+	    fputs ("mi", stream);
+	  break;
+	default:
+	  error = 1;
+	}
+      break;
+    case EQ:
+      if (code == 'i')
+	fputs ("ne", stream);
+      else
+	fputs ("eq", stream);
+      break;
+    case NE:
+      if (code == 'i')
+	fputs ("eq", stream);
+      else
+	fputs ("ne", stream);
+      break;
+    case GT:
+      if (code == 'i')
+	fputs ("le", stream);
+      else
+	fputs ("gt", stream);
+      break;
+    case GTU:
+      if (code == 'i')
+	fputs ("ls", stream);
+      else
+	fputs ("hi", stream);
+      break;
+    case LT:
+      if (code == 'i')
+	fputs ("ge", stream);
+      else
+	fputs ("lt", stream);
+      break;
+    case LTU:
+      if (code == 'i')
+	fputs ("hs", stream);
+      else
+	fputs ("lo", stream);
+      break;
+    case GE:
+      if (code == 'i')
+	fputs ("lt", stream);
+      else
+	fputs ("ge", stream);
+      break;
+    case GEU:
+      if (code == 'i')
+	fputs ("lo", stream);
+      else
+	fputs ("hs", stream);
+      break;
+    case LE:
+      if (code == 'i')
+	fputs ("gt", stream);
+      else
+	fputs ("le", stream);
+      break;
+    case LEU:
+      if (code == 'i')
+	fputs ("hi", stream);
+      else
+	fputs ("ls", stream);
+      break;
+    case CONST_INT:
+      {
+	int value = INTVAL (x);
+
+	if (code == 'i')
+	  {
+	    value++;
+	  }
+
+	if (code == 'p')
+	  {
+	    /* Set to bit position of first bit set in immediate */
+	    int i, bitpos = 32;
+	    for (i = 0; i < 32; i++)
+	      if (value & (1 << i))
+		{
+		  bitpos = i;
+		  break;
+		}
+	    value = bitpos;
+	  }
+
+	if (code == 'r')
+	  {
+	    /* Reglist 8 */
+	    char op[50];
+	    op[0] = '\0';
+
+	    if (value & 0x01)
+	      sprintf (op, "r0-r3");
+	    if (value & 0x02)
+	      strlen (op) ? sprintf (op, "%s, r4-r7", op) : sprintf (op,
+								     "r4-r7");
+	    if (value & 0x04)
+	      strlen (op) ? sprintf (op, "%s, r8-r9", op) : sprintf (op,
+								     "r8-r9");
+	    if (value & 0x08)
+	      strlen (op) ? sprintf (op, "%s, r10", op) : sprintf (op, "r10");
+	    if (value & 0x10)
+	      strlen (op) ? sprintf (op, "%s, r11", op) : sprintf (op, "r11");
+	    if (value & 0x20)
+	      strlen (op) ? sprintf (op, "%s, r12", op) : sprintf (op, "r12");
+	    if (value & 0x40)
+	      strlen (op) ? sprintf (op, "%s, lr", op) : sprintf (op, "lr");
+	    if (value & 0x80)
+	      strlen (op) ? sprintf (op, "%s, pc", op) : sprintf (op, "pc");
+
+	    fputs (op, stream);
+	  }
+	else if (code == 's')
+	  {
+	    /* Reglist 16 */
+	    char reglist16_string[100];
+	    int i;
+	    reglist16_string[0] = '\0';
+
+	    for (i = 0; i < 16; ++i)
+	      {
+		if (value & (1 << i))
+		  {
+		    strlen (reglist16_string) ? sprintf (reglist16_string,
+							 "%s, %s",
+							 reglist16_string,
+							 reg_names
+							 [INTERNAL_REGNUM
+							  (i)]) :
+		      sprintf (reglist16_string, "%s",
+			       reg_names[INTERNAL_REGNUM (i)]);
+		  }
+	      }
+	    fputs (reglist16_string, stream);
+	  }
+	else if (code == 'C')
+	  {
+	    /* RegListCP8 */
+	    char reglist_string[100];
+	    avr32_make_fp_reglist_w (value, (char *) reglist_string);
+	    fputs (reglist_string, stream);
+	  }
+	else if (code == 'D')
+	  {
+	    /* RegListCPD8 */
+	    char reglist_string[100];
+	    avr32_make_fp_reglist_d (value, (char *) reglist_string);
+	    fputs (reglist_string, stream);
+	  }
+	else if (code == 'd')
+	  {
+	    /* Print in decimal format */
+	    fprintf (stream, "%d", value);
+	  }
+	else if (code == 'h')
+	  {
+	    /* Print halfword part of word */
+	    fputs (value ? "b" : "t", stream);
+	  }
+	else
+	  {
+	    /* Normal constant */
+	    fprintf (stream, "%d", value);
+	  }
+	break;
+      }
+    case CONST_DOUBLE:
+      {
+	HOST_WIDE_INT hi, lo;
+	if (GET_MODE (x) == DImode)
+	  {
+	    hi = CONST_DOUBLE_HIGH (x);
+	    lo = CONST_DOUBLE_LOW (x);
+	  }
+	else
+	  {
+	    HOST_WIDE_INT target_float[2];
+	    hi = lo = 0;
+	    real_to_target (target_float, CONST_DOUBLE_REAL_VALUE (x),
+			    GET_MODE (x));
+	    /* For doubles the most significant part starts at index 0. */
+	    if (GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
+	      {
+		hi = target_float[0];
+		lo = target_float[1];
+	      }
+	    else
+	      {
+		lo = target_float[0];
+	      }
+	  }
+
+	if (avr32_const_ok_for_constraint_p (lo, 'K', "Ks21")
+	    && ((GET_MODE (x) == SFmode)
+		|| avr32_const_ok_for_constraint_p (hi, 'K', "Ks21")))
+	  {
+	    if (code == 'm')
+	      fprintf (stream, "%ld", hi);
+	    else
+	      fprintf (stream, "%ld", lo);
+	  }
+	else
+	  {
+	    fprintf (stream, "value too large");
+	  }
+	break;
+      }
+    case CONST:
+      output_addr_const (stream, XEXP (XEXP (x, 0), 0));
+      fprintf (stream, "+%ld", INTVAL (XEXP (XEXP (x, 0), 1)));
+      break;
+    case REG:
+      /* Swap register name if the register is DImode or DFmode. */
+      if (GET_MODE (x) == DImode || GET_MODE (x) == DFmode)
+	{
+	  /* Double register must have an even numbered address */
+	  gcc_assert (!(REGNO (x) % 2));
+	  if (code == 'm')
+	    fputs (reg_names[true_regnum (x)], stream);
+	  else
+	    fputs (reg_names[true_regnum (x) + 1], stream);
+	}
+      else if (GET_MODE (x) == TImode)
+	{
+	  switch (code)
+	    {
+	    case 'T':
+	      fputs (reg_names[true_regnum (x)], stream);
+	      break;
+	    case 'U':
+	      fputs (reg_names[true_regnum (x) + 1], stream);
+	      break;
+	    case 'L':
+	      fputs (reg_names[true_regnum (x) + 2], stream);
+	      break;
+	    case 'B':
+	      fputs (reg_names[true_regnum (x) + 3], stream);
+	      break;
+	    default:
+	      fprintf (stream, "%s, %s, %s, %s",
+		       reg_names[true_regnum (x) + 3],
+		       reg_names[true_regnum (x) + 2],
+		       reg_names[true_regnum (x) + 1],
+		       reg_names[true_regnum (x)]);
+	      break;
+	    }
+	}
+      else
+	{
+	  fputs (reg_names[true_regnum (x)], stream);
+	}
+      break;
+    case CODE_LABEL:
+    case LABEL_REF:
+    case SYMBOL_REF:
+      output_addr_const (stream, x);
+      break;
+    case MEM:
+      switch (GET_CODE (XEXP (x, 0)))
+	{
+	case LABEL_REF:
+	case SYMBOL_REF:
+	  output_addr_const (stream, XEXP (x, 0));
+	  break;
+	case MEM:
+	  switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
+	    {
+	    case SYMBOL_REF:
+	      output_addr_const (stream, XEXP (XEXP (x, 0), 0));
+	      break;
+	    default:
+	      error = 1;
+	      break;
+	    }
+	  break;
+	case REG:
+	  avr32_print_operand (stream, XEXP (x, 0), 0);
+	  if (code != 'p')
+	    fputs ("[0]", stream);
+	  break;
+	case PRE_DEC:
+	  fputs ("--", stream);
+	  avr32_print_operand (stream, XEXP (XEXP (x, 0), 0), 0);
+	  break;
+	case POST_INC:
+	  avr32_print_operand (stream, XEXP (XEXP (x, 0), 0), 0);
+	  fputs ("++", stream);
+	  break;
+	case PLUS:
+	  {
+	    rtx op0 = XEXP (XEXP (x, 0), 0);
+	    rtx op1 = XEXP (XEXP (x, 0), 1);
+	    rtx base = NULL_RTX, offset = NULL_RTX;
+
+	    if (avr32_address_register_rtx_p (op0, 1))
+	      {
+		base = op0;
+		offset = op1;
+	      }
+	    else if (avr32_address_register_rtx_p (op1, 1))
+	      {
+		/* Operands are switched. */
+		base = op1;
+		offset = op0;
+	      }
+
+	    gcc_assert (base && offset
+			&& avr32_address_register_rtx_p (base, 1)
+			&& avr32_legitimate_index_p (GET_MODE (x), offset,
+						     1));
+
+	    avr32_print_operand (stream, base, 0);
+	    fputs ("[", stream);
+	    avr32_print_operand (stream, offset, 0);
+	    fputs ("]", stream);
+	    break;
+	  }
+	case CONST:
+	  output_addr_const (stream, XEXP (XEXP (XEXP (x, 0), 0), 0));
+	  fprintf (stream, " + %ld",
+		   INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)));
+	  break;
+	default:
+	  error = 1;
+	}
+      break;
+    case MULT:
+      {
+	int value = INTVAL (XEXP (x, 1));
+
+	/* Convert immediate in multiplication into a shift immediate */
+	switch (value)
+	  {
+	  case 2:
+	    value = 1;
+	    break;
+	  case 4:
+	    value = 2;
+	    break;
+	  case 8:
+	    value = 3;
+	    break;
+	  default:
+	    value = 0;
+	  }
+	fprintf (stream, "%s << %i", reg_names[true_regnum (XEXP (x, 0))],
+		 value);
+	break;
+      }
+    case ASHIFT:
+      if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+	fprintf (stream, "%s << %i", reg_names[true_regnum (XEXP (x, 0))],
+		 (int) INTVAL (XEXP (x, 1)));
+      else if (REG_P (XEXP (x, 1)))
+	fprintf (stream, "%s << %s", reg_names[true_regnum (XEXP (x, 0))],
+		 reg_names[true_regnum (XEXP (x, 1))]);
+      else
+	{
+	  error = 1;
+	}
+      break;
+    case LSHIFTRT:
+      if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+	fprintf (stream, "%s >> %i", reg_names[true_regnum (XEXP (x, 0))],
+		 (int) INTVAL (XEXP (x, 1)));
+      else if (REG_P (XEXP (x, 1)))
+	fprintf (stream, "%s >> %s", reg_names[true_regnum (XEXP (x, 0))],
+		 reg_names[true_regnum (XEXP (x, 1))]);
+      else
+	{
+	  error = 1;
+	}
+      fprintf (stream, ">>");
+      break;
+    case PARALLEL:
+      {
+	/* Load store multiple */
+	int i;
+	int count = XVECLEN (x, 0);
+	int reglist16 = 0;
+	char reglist16_string[100];
+
+	for (i = 0; i < count; ++i)
+	  {
+	    rtx vec_elm = XVECEXP (x, 0, i);
+	    if (GET_MODE (vec_elm) != SET)
+	      {
+		debug_rtx (vec_elm);
+		internal_error ("Unknown element in parallel expression!");
+	      }
+	    if (GET_MODE (XEXP (vec_elm, 0)) == REG)
+	      {
+		/* Load multiple */
+		reglist16 |= 1 << ASM_REGNUM (REGNO (XEXP (vec_elm, 0)));
+	      }
+	    else
+	      {
+		/* Store multiple */
+		reglist16 |= 1 << ASM_REGNUM (REGNO (XEXP (vec_elm, 1)));
+	      }
+	  }
+
+	avr32_make_reglist16 (reglist16, reglist16_string);
+	fputs (reglist16_string, stream);
+
+	break;
+      }
+
+    default:
+      error = 1;
+    }
+
+  if (error)
+    {
+      debug_rtx (x);
+      internal_error ("Illegal expression for avr32_print_operand");
+    }
+}
+
+rtx
+avr32_get_note_reg_equiv (rtx insn)
+{
+  rtx note;
+
+  note = find_reg_note (insn, REG_EQUIV, NULL_RTX);
+
+  if (note != NULL_RTX)
+    return XEXP (note, 0);
+  else
+    return NULL_RTX;
+}
+
+/*
+  Outputs to stdio stream stream the assembler syntax for an instruction
+  operand that is a memory reference whose address is x. x is an RTL
+  expression.
+
+  ToDo: fixme.
+*/
+void
+avr32_print_operand_address (FILE * stream, rtx x)
+{
+  fprintf (stream, "(%d) /* address */", REGNO (x));
+}
+
+/* Return true if _GLOBAL_OFFSET_TABLE_ symbol is mentioned.  */
+bool
+avr32_got_mentioned_p (rtx addr)
+{
+  if (GET_CODE (addr) == MEM)
+    addr = XEXP (addr, 0);
+  while (GET_CODE (addr) == CONST)
+    addr = XEXP (addr, 0);
+  if (GET_CODE (addr) == SYMBOL_REF)
+    {
+      return streq (XSTR (addr, 0), "_GLOBAL_OFFSET_TABLE_");
+    }
+  if (GET_CODE (addr) == PLUS || GET_CODE (addr) == MINUS)
+    {
+      bool l1, l2;
+
+      l1 = avr32_got_mentioned_p (XEXP (addr, 0));
+      l2 = avr32_got_mentioned_p (XEXP (addr, 1));
+      return l1 || l2;
+    }
+  return false;
+}
+
+
+/* Find the symbol in an address expression.  */
+
+rtx
+avr32_find_symbol (rtx addr)
+{
+  if (GET_CODE (addr) == MEM)
+    addr = XEXP (addr, 0);
+
+  while (GET_CODE (addr) == CONST)
+    addr = XEXP (addr, 0);
+
+  if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == LABEL_REF)
+    return addr;
+  if (GET_CODE (addr) == PLUS)
+    {
+      rtx l1, l2;
+
+      l1 = avr32_find_symbol (XEXP (addr, 0));
+      l2 = avr32_find_symbol (XEXP (addr, 1));
+      if (l1 != NULL_RTX && l2 == NULL_RTX)
+	return l1;
+      else if (l1 == NULL_RTX && l2 != NULL_RTX)
+	return l2;
+    }
+
+  return NULL_RTX;
+}
+
+
+/* Routines for manipulation of the constant pool.  */
+
+/* AVR32 instructions cannot load a large constant directly into a
+   register; they have to come from a pc relative load.  The constant
+   must therefore be placed in the addressable range of the pc
+   relative load.  Depending on the precise pc relative load
+   instruction the range is somewhere between 256 bytes and 4k.  This
+   means that we often have to dump a constant inside a function, and
+   generate code to branch around it.
+
+   It is important to minimize this, since the branches will slow
+   things down and make the code larger.
+
+   Normally we can hide the table after an existing unconditional
+   branch so that there is no interruption of the flow, but in the
+   worst case the code looks like this:
+
+	lddpc	rn, L1
+	...
+	rjmp	L2
+	align
+	L1:	.long value
+	L2:
+	...
+
+	lddpc	rn, L3
+	...
+	rjmp	L4
+	align
+	L3:	.long value
+	L4:
+	...
+
+   We fix this by performing a scan after scheduling, which notices
+   which instructions need to have their operands fetched from the
+   constant table and builds the table.
+
+   The algorithm starts by building a table of all the constants that
+   need fixing up and all the natural barriers in the function (places
+   where a constant table can be dropped without breaking the flow).
+   For each fixup we note how far the pc-relative replacement will be
+   able to reach and the offset of the instruction into the function.
+
+   Having built the table we then group the fixes together to form
+   tables that are as large as possible (subject to addressing
+   constraints) and emit each table of constants after the last
+   barrier that is within range of all the instructions in the group.
+   If a group does not contain a barrier, then we forcibly create one
+   by inserting a jump instruction into the flow.  Once the table has
+   been inserted, the insns are then modified to reference the
+   relevant entry in the pool.
+
+   Possible enhancements to the algorithm (not implemented) are:
+
+   1) For some processors and object formats, there may be benefit in
+   aligning the pools to the start of cache lines; this alignment
+   would need to be taken into account when calculating addressability
+   of a pool.  */
+
+/* These typedefs are located at the start of this file, so that
+   they can be used in the prototypes there.  This comment is to
+   remind readers of that fact so that the following structures
+   can be understood more easily.
+
+     typedef struct minipool_node    Mnode;
+     typedef struct minipool_fixup   Mfix;  */
+
+struct minipool_node
+{
+  /* Doubly linked chain of entries.  */
+  Mnode *next;
+  Mnode *prev;
+  /* The maximum offset into the code that this entry can be placed.  While
+     pushing fixes for forward references, all entries are sorted in order of
+     increasing max_address.  */
+  HOST_WIDE_INT max_address;
+  /* Similarly for an entry inserted for a backwards ref.  */
+  HOST_WIDE_INT min_address;
+  /* The number of fixes referencing this entry.  This can become zero if we
+     "unpush" an entry.  In this case we ignore the entry when we come to
+     emit the code.  */
+  int refcount;
+  /* The offset from the start of the minipool.  */
+  HOST_WIDE_INT offset;
+  /* The value in table.  */
+  rtx value;
+  /* The mode of value.  */
+  enum machine_mode mode;
+  /* The size of the value.  */
+  int fix_size;
+};
+
+struct minipool_fixup
+{
+  Mfix *next;
+  rtx insn;
+  HOST_WIDE_INT address;
+  rtx *loc;
+  enum machine_mode mode;
+  int fix_size;
+  rtx value;
+  Mnode *minipool;
+  HOST_WIDE_INT forwards;
+  HOST_WIDE_INT backwards;
+};
+
+
+/* Fixes less than a word need padding out to a word boundary.  */
+#define MINIPOOL_FIX_SIZE(mode, value)                          \
+  (IS_FORCE_MINIPOOL(value) ? 0 :                               \
+   (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4))
+
+#define IS_FORCE_MINIPOOL(x)                    \
+  (GET_CODE(x) == UNSPEC &&                     \
+   XINT(x, 1) == UNSPEC_FORCE_MINIPOOL)
+
+static Mnode *minipool_vector_head;
+static Mnode *minipool_vector_tail;
+
+/* The linked list of all minipool fixes required for this function.  */
+Mfix *minipool_fix_head;
+Mfix *minipool_fix_tail;
+/* The fix entry for the current minipool, once it has been placed.  */
+Mfix *minipool_barrier;
+
+/* Determines if INSN is the start of a jump table.  Returns the end
+   of the TABLE or NULL_RTX.  */
+static rtx
+is_jump_table (rtx insn)
+{
+  rtx table;
+
+  if (GET_CODE (insn) == JUMP_INSN
+      && JUMP_LABEL (insn) != NULL
+      && ((table = next_real_insn (JUMP_LABEL (insn)))
+	  == next_real_insn (insn))
+      && table != NULL
+      && GET_CODE (table) == JUMP_INSN
+      && (GET_CODE (PATTERN (table)) == ADDR_VEC
+	  || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
+    return table;
+
+  return NULL_RTX;
+}
+
+static HOST_WIDE_INT
+get_jump_table_size (rtx insn)
+{
+  /* ADDR_VECs only take room if read-only data does into the text section.  */
+  if (JUMP_TABLES_IN_TEXT_SECTION
+#if !defined(READONLY_DATA_SECTION) && !defined(READONLY_DATA_SECTION_ASM_OP)
+      || 1
+#endif
+    )
+    {
+      rtx body = PATTERN (insn);
+      int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
+
+      return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
+    }
+
+  return 0;
+}
+
+/* Move a minipool fix MP from its current location to before MAX_MP.
+   If MAX_MP is NULL, then MP doesn't need moving, but the addressing
+   constraints may need updating.  */
+static Mnode *
+move_minipool_fix_forward_ref (Mnode * mp, Mnode * max_mp,
+			       HOST_WIDE_INT max_address)
+{
+  /* This should never be true and the code below assumes these are
+     different.  */
+  if (mp == max_mp)
+    abort ();
+
+  if (max_mp == NULL)
+    {
+      if (max_address < mp->max_address)
+	mp->max_address = max_address;
+    }
+  else
+    {
+      if (max_address > max_mp->max_address - mp->fix_size)
+	mp->max_address = max_mp->max_address - mp->fix_size;
+      else
+	mp->max_address = max_address;
+
+      /* Unlink MP from its current position.  Since max_mp is non-null,
+         mp->prev must be non-null.  */
+      mp->prev->next = mp->next;
+      if (mp->next != NULL)
+	mp->next->prev = mp->prev;
+      else
+	minipool_vector_tail = mp->prev;
+
+      /* Re-insert it before MAX_MP.  */
+      mp->next = max_mp;
+      mp->prev = max_mp->prev;
+      max_mp->prev = mp;
+
+      if (mp->prev != NULL)
+	mp->prev->next = mp;
+      else
+	minipool_vector_head = mp;
+    }
+
+  /* Save the new entry.  */
+  max_mp = mp;
+
+  /* Scan over the preceding entries and adjust their addresses as required.
+     */
+  while (mp->prev != NULL
+	 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
+    {
+      mp->prev->max_address = mp->max_address - mp->prev->fix_size;
+      mp = mp->prev;
+    }
+
+  return max_mp;
+}
+
+/* Add a constant to the minipool for a forward reference.  Returns the
+   node added or NULL if the constant will not fit in this pool.  */
+static Mnode *
+add_minipool_forward_ref (Mfix * fix)
+{
+  /* If set, max_mp is the first pool_entry that has a lower constraint than
+     the one we are trying to add.  */
+  Mnode *max_mp = NULL;
+  HOST_WIDE_INT max_address = fix->address + fix->forwards;
+  Mnode *mp;
+
+  /* If this fix's address is greater than the address of the first entry,
+     then we can't put the fix in this pool.  We subtract the size of the
+     current fix to ensure that if the table is fully packed we still have
+     enough room to insert this value by suffling the other fixes forwards.  */
+  if (minipool_vector_head &&
+      fix->address >= minipool_vector_head->max_address - fix->fix_size)
+    return NULL;
+
+  /* Scan the pool to see if a constant with the same value has already been
+     added.  While we are doing this, also note the location where we must
+     insert the constant if it doesn't already exist.  */
+  for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
+    {
+      if (GET_CODE (fix->value) == GET_CODE (mp->value)
+	  && fix->mode == mp->mode
+	  && (GET_CODE (fix->value) != CODE_LABEL
+	      || (CODE_LABEL_NUMBER (fix->value)
+		  == CODE_LABEL_NUMBER (mp->value)))
+	  && rtx_equal_p (fix->value, mp->value))
+	{
+	  /* More than one fix references this entry.  */
+	  mp->refcount++;
+	  return move_minipool_fix_forward_ref (mp, max_mp, max_address);
+	}
+
+      /* Note the insertion point if necessary.  */
+      if (max_mp == NULL && mp->max_address > max_address)
+	max_mp = mp;
+
+    }
+
+  /* The value is not currently in the minipool, so we need to create a new
+     entry for it.  If MAX_MP is NULL, the entry will be put on the end of
+     the list since the placement is less constrained than any existing
+     entry.  Otherwise, we insert the new fix before MAX_MP and, if
+     necessary, adjust the constraints on the other entries.  */
+  mp = xmalloc (sizeof (*mp));
+  mp->fix_size = fix->fix_size;
+  mp->mode = fix->mode;
+  mp->value = fix->value;
+  mp->refcount = 1;
+  /* Not yet required for a backwards ref.  */
+  mp->min_address = -65536;
+
+  if (max_mp == NULL)
+    {
+      mp->max_address = max_address;
+      mp->next = NULL;
+      mp->prev = minipool_vector_tail;
+
+      if (mp->prev == NULL)
+	{
+	  minipool_vector_head = mp;
+	  minipool_vector_label = gen_label_rtx ();
+	}
+      else
+	mp->prev->next = mp;
+
+      minipool_vector_tail = mp;
+    }
+  else
+    {
+      if (max_address > max_mp->max_address - mp->fix_size)
+	mp->max_address = max_mp->max_address - mp->fix_size;
+      else
+	mp->max_address = max_address;
+
+      mp->next = max_mp;
+      mp->prev = max_mp->prev;
+      max_mp->prev = mp;
+      if (mp->prev != NULL)
+	mp->prev->next = mp;
+      else
+	minipool_vector_head = mp;
+    }
+
+  /* Save the new entry.  */
+  max_mp = mp;
+
+  /* Scan over the preceding entries and adjust their addresses as required.
+     */
+  while (mp->prev != NULL
+	 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
+    {
+      mp->prev->max_address = mp->max_address - mp->prev->fix_size;
+      mp = mp->prev;
+    }
+
+  return max_mp;
+}
+
+static Mnode *
+move_minipool_fix_backward_ref (Mnode * mp, Mnode * min_mp,
+				HOST_WIDE_INT min_address)
+{
+  HOST_WIDE_INT offset;
+
+  /* This should never be true, and the code below assumes these are
+     different.  */
+  if (mp == min_mp)
+    abort ();
+
+  if (min_mp == NULL)
+    {
+      if (min_address > mp->min_address)
+	mp->min_address = min_address;
+    }
+  else
+    {
+      /* We will adjust this below if it is too loose.  */
+      mp->min_address = min_address;
+
+      /* Unlink MP from its current position.  Since min_mp is non-null,
+         mp->next must be non-null.  */
+      mp->next->prev = mp->prev;
+      if (mp->prev != NULL)
+	mp->prev->next = mp->next;
+      else
+	minipool_vector_head = mp->next;
+
+      /* Reinsert it after MIN_MP.  */
+      mp->prev = min_mp;
+      mp->next = min_mp->next;
+      min_mp->next = mp;
+      if (mp->next != NULL)
+	mp->next->prev = mp;
+      else
+	minipool_vector_tail = mp;
+    }
+
+  min_mp = mp;
+
+  offset = 0;
+  for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
+    {
+      mp->offset = offset;
+      if (mp->refcount > 0)
+	offset += mp->fix_size;
+
+      if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
+	mp->next->min_address = mp->min_address + mp->fix_size;
+    }
+
+  return min_mp;
+}
+
+/* Add a constant to the minipool for a backward reference.  Returns the
+   node added or NULL if the constant will not fit in this pool.
+
+   Note that the code for insertion for a backwards reference can be
+   somewhat confusing because the calculated offsets for each fix do
+   not take into account the size of the pool (which is still under
+   construction.  */
+static Mnode *
+add_minipool_backward_ref (Mfix * fix)
+{
+  /* If set, min_mp is the last pool_entry that has a lower constraint than
+     the one we are trying to add.  */
+  Mnode *min_mp = NULL;
+  /* This can be negative, since it is only a constraint.  */
+  HOST_WIDE_INT min_address = fix->address - fix->backwards;
+  Mnode *mp;
+
+  /* If we can't reach the current pool from this insn, or if we can't insert
+     this entry at the end of the pool without pushing other fixes out of
+     range, then we don't try.  This ensures that we can't fail later on.  */
+  if (min_address >= minipool_barrier->address
+      || (minipool_vector_tail->min_address + fix->fix_size
+	  >= minipool_barrier->address))
+    return NULL;
+
+  /* Scan the pool to see if a constant with the same value has already been
+     added.  While we are doing this, also note the location where we must
+     insert the constant if it doesn't already exist.  */
+  for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
+    {
+      if (GET_CODE (fix->value) == GET_CODE (mp->value)
+	  && fix->mode == mp->mode
+	  && (GET_CODE (fix->value) != CODE_LABEL
+	      || (CODE_LABEL_NUMBER (fix->value)
+		  == CODE_LABEL_NUMBER (mp->value)))
+	  && rtx_equal_p (fix->value, mp->value)
+	  /* Check that there is enough slack to move this entry to the end
+	     of the table (this is conservative).  */
+	  && (mp->max_address
+	      > (minipool_barrier->address
+		 + minipool_vector_tail->offset
+		 + minipool_vector_tail->fix_size)))
+	{
+	  mp->refcount++;
+	  return move_minipool_fix_backward_ref (mp, min_mp, min_address);
+	}
+
+      if (min_mp != NULL)
+	mp->min_address += fix->fix_size;
+      else
+	{
+	  /* Note the insertion point if necessary.  */
+	  if (mp->min_address < min_address)
+	    {
+	      min_mp = mp;
+	    }
+	  else if (mp->max_address
+		   < minipool_barrier->address + mp->offset + fix->fix_size)
+	    {
+	      /* Inserting before this entry would push the fix beyond its
+	         maximum address (which can happen if we have re-located a
+	         forwards fix); force the new fix to come after it.  */
+	      min_mp = mp;
+	      min_address = mp->min_address + fix->fix_size;
+	    }
+	}
+    }
+
+  /* We need to create a new entry.  */
+  mp = xmalloc (sizeof (*mp));
+  mp->fix_size = fix->fix_size;
+  mp->mode = fix->mode;
+  mp->value = fix->value;
+  mp->refcount = 1;
+  mp->max_address = minipool_barrier->address + 65536;
+
+  mp->min_address = min_address;
+
+  if (min_mp == NULL)
+    {
+      mp->prev = NULL;
+      mp->next = minipool_vector_head;
+
+      if (mp->next == NULL)
+	{
+	  minipool_vector_tail = mp;
+	  minipool_vector_label = gen_label_rtx ();
+	}
+      else
+	mp->next->prev = mp;
+
+      minipool_vector_head = mp;
+    }
+  else
+    {
+      mp->next = min_mp->next;
+      mp->prev = min_mp;
+      min_mp->next = mp;
+
+      if (mp->next != NULL)
+	mp->next->prev = mp;
+      else
+	minipool_vector_tail = mp;
+    }
+
+  /* Save the new entry.  */
+  min_mp = mp;
+
+  if (mp->prev)
+    mp = mp->prev;
+  else
+    mp->offset = 0;
+
+  /* Scan over the following entries and adjust their offsets.  */
+  while (mp->next != NULL)
+    {
+      if (mp->next->min_address < mp->min_address + mp->fix_size)
+	mp->next->min_address = mp->min_address + mp->fix_size;
+
+      if (mp->refcount)
+	mp->next->offset = mp->offset + mp->fix_size;
+      else
+	mp->next->offset = mp->offset;
+
+      mp = mp->next;
+    }
+
+  return min_mp;
+}
+
+static void
+assign_minipool_offsets (Mfix * barrier)
+{
+  HOST_WIDE_INT offset = 0;
+  Mnode *mp;
+
+  minipool_barrier = barrier;
+
+  for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
+    {
+      mp->offset = offset;
+
+      if (mp->refcount > 0
+	  /* If the value is (const_int 0) then this is a fake entry so don't
+	     add an offset for it since it will not be output. */
+	  && !(GET_CODE (mp->value) == CONST_INT && INTVAL (mp->value) == 0))
+	offset += mp->fix_size;
+    }
+}
+
+/* Print a symbolic form of X to the debug file, F.  */
+static void
+avr32_print_value (FILE * f, rtx x)
+{
+  switch (GET_CODE (x))
+    {
+    case CONST_INT:
+      fprintf (f, "0x%x", (int) INTVAL (x));
+      return;
+
+    case CONST_DOUBLE:
+      fprintf (f, "<0x%lx,0x%lx>", (long) XWINT (x, 2), (long) XWINT (x, 3));
+      return;
+
+    case CONST_VECTOR:
+      {
+	int i;
+
+	fprintf (f, "<");
+	for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
+	  {
+	    fprintf (f, "0x%x", (int) INTVAL (CONST_VECTOR_ELT (x, i)));
+	    if (i < (CONST_VECTOR_NUNITS (x) - 1))
+	      fputc (',', f);
+	  }
+	fprintf (f, ">");
+      }
+      return;
+
+    case CONST_STRING:
+      fprintf (f, "\"%s\"", XSTR (x, 0));
+      return;
+
+    case SYMBOL_REF:
+      fprintf (f, "`%s'", XSTR (x, 0));
+      return;
+
+    case LABEL_REF:
+      fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
+      return;
+
+    case CONST:
+      avr32_print_value (f, XEXP (x, 0));
+      return;
+
+    case PLUS:
+      avr32_print_value (f, XEXP (x, 0));
+      fprintf (f, "+");
+      avr32_print_value (f, XEXP (x, 1));
+      return;
+
+    case PC:
+      fprintf (f, "pc");
+      return;
+
+    default:
+      fprintf (f, "????");
+      return;
+    }
+}
+
+int
+is_minipool_label (rtx label)
+{
+  minipool_labels *cur_mp_label = cfun->machine->minipool_label_head;
+
+  if (GET_CODE (label) != CODE_LABEL)
+    return FALSE;
+
+  while (cur_mp_label)
+    {
+      if (CODE_LABEL_NUMBER (label)
+	  == CODE_LABEL_NUMBER (cur_mp_label->label))
+	return TRUE;
+      cur_mp_label = cur_mp_label->next;
+    }
+  return FALSE;
+}
+
+static void
+new_minipool_label (rtx label)
+{
+  if (!cfun->machine->minipool_label_head)
+    {
+      cfun->machine->minipool_label_head =
+	ggc_alloc (sizeof (minipool_labels));
+      cfun->machine->minipool_label_tail = cfun->machine->minipool_label_head;
+      cfun->machine->minipool_label_head->label = label;
+      cfun->machine->minipool_label_head->next = 0;
+      cfun->machine->minipool_label_head->prev = 0;
+    }
+  else
+    {
+      cfun->machine->minipool_label_tail->next =
+	ggc_alloc (sizeof (minipool_labels));
+      cfun->machine->minipool_label_tail->next->label = label;
+      cfun->machine->minipool_label_tail->next->next = 0;
+      cfun->machine->minipool_label_tail->next->prev =
+	cfun->machine->minipool_label_tail;
+      cfun->machine->minipool_label_tail =
+	cfun->machine->minipool_label_tail->next;
+    }
+}
+
+/* Output the literal table */
+static void
+dump_minipool (rtx scan)
+{
+  Mnode *mp;
+  Mnode *nmp;
+
+  if (dump_file)
+    fprintf (dump_file,
+	     ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
+	     INSN_UID (scan), (unsigned long) minipool_barrier->address, 4);
+
+  scan = emit_insn_after (gen_consttable_start (), scan);
+  scan = emit_insn_after (gen_align_4 (), scan);
+  scan = emit_label_after (minipool_vector_label, scan);
+  new_minipool_label (minipool_vector_label);
+
+  for (mp = minipool_vector_head; mp != NULL; mp = nmp)
+    {
+      if (mp->refcount > 0)
+	{
+	  if (dump_file)
+	    {
+	      fprintf (dump_file,
+		       ";;  Offset %u, min %ld, max %ld ",
+		       (unsigned) mp->offset, (unsigned long) mp->min_address,
+		       (unsigned long) mp->max_address);
+	      avr32_print_value (dump_file, mp->value);
+	      fputc ('\n', dump_file);
+	    }
+
+	  switch (mp->fix_size)
+	    {
+#ifdef HAVE_consttable_4
+	    case 4:
+	      scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
+	      break;
+
+#endif
+#ifdef HAVE_consttable_8
+	    case 8:
+	      scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
+	      break;
+
+#endif
+	    case 0:
+	      /* This can happen for force-minipool entries which just are
+	         there to force the minipool to be generate. */
+	      break;
+	    default:
+	      abort ();
+	      break;
+	    }
+	}
+
+      nmp = mp->next;
+      free (mp);
+    }
+
+  minipool_vector_head = minipool_vector_tail = NULL;
+  scan = emit_insn_after (gen_consttable_end (), scan);
+  scan = emit_barrier_after (scan);
+}
+
+/* Return the cost of forcibly inserting a barrier after INSN.  */
+static int
+avr32_barrier_cost (rtx insn)
+{
+  /* Basing the location of the pool on the loop depth is preferable, but at
+     the moment, the basic block information seems to be corrupt by this
+     stage of the compilation.  */
+  int base_cost = 50;
+  rtx next = next_nonnote_insn (insn);
+
+  if (next != NULL && GET_CODE (next) == CODE_LABEL)
+    base_cost -= 20;
+
+  switch (GET_CODE (insn))
+    {
+    case CODE_LABEL:
+      /* It will always be better to place the table before the label, rather
+         than after it.  */
+      return 50;
+
+    case INSN:
+    case CALL_INSN:
+      return base_cost;
+
+    case JUMP_INSN:
+      return base_cost - 10;
+
+    default:
+      return base_cost + 10;
+    }
+}
+
+/* Find the best place in the insn stream in the range
+   (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
+   Create the barrier by inserting a jump and add a new fix entry for
+   it.  */
+static Mfix *
+create_fix_barrier (Mfix * fix, HOST_WIDE_INT max_address)
+{
+  HOST_WIDE_INT count = 0;
+  rtx barrier;
+  rtx from = fix->insn;
+  rtx selected = from;
+  int selected_cost;
+  HOST_WIDE_INT selected_address;
+  Mfix *new_fix;
+  HOST_WIDE_INT max_count = max_address - fix->address;
+  rtx label = gen_label_rtx ();
+
+  selected_cost = avr32_barrier_cost (from);
+  selected_address = fix->address;
+
+  while (from && count < max_count)
+    {
+      rtx tmp;
+      int new_cost;
+
+      /* This code shouldn't have been called if there was a natural barrier
+         within range.  */
+      if (GET_CODE (from) == BARRIER)
+	abort ();
+
+      /* Count the length of this insn.  */
+      count += get_attr_length (from);
+
+      /* If there is a jump table, add its length.  */
+      tmp = is_jump_table (from);
+      if (tmp != NULL)
+	{
+	  count += get_jump_table_size (tmp);
+
+	  /* Jump tables aren't in a basic block, so base the cost on the
+	     dispatch insn.  If we select this location, we will still put
+	     the pool after the table.  */
+	  new_cost = avr32_barrier_cost (from);
+
+	  if (count < max_count && new_cost <= selected_cost)
+	    {
+	      selected = tmp;
+	      selected_cost = new_cost;
+	      selected_address = fix->address + count;
+	    }
+
+	  /* Continue after the dispatch table.  */
+	  from = NEXT_INSN (tmp);
+	  continue;
+	}
+
+      new_cost = avr32_barrier_cost (from);
+
+      if (count < max_count && new_cost <= selected_cost)
+	{
+	  selected = from;
+	  selected_cost = new_cost;
+	  selected_address = fix->address + count;
+	}
+
+      from = NEXT_INSN (from);
+    }
+
+  /* Create a new JUMP_INSN that branches around a barrier.  */
+  from = emit_jump_insn_after (gen_jump (label), selected);
+  JUMP_LABEL (from) = label;
+  barrier = emit_barrier_after (from);
+  emit_label_after (label, barrier);
+
+  /* Create a minipool barrier entry for the new barrier.  */
+  new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (*new_fix));
+  new_fix->insn = barrier;
+  new_fix->address = selected_address;
+  new_fix->next = fix->next;
+  fix->next = new_fix;
+
+  return new_fix;
+}
+
+/* Record that there is a natural barrier in the insn stream at
+   ADDRESS.  */
+static void
+push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
+{
+  Mfix *fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (*fix));
+
+  fix->insn = insn;
+  fix->address = address;
+
+  fix->next = NULL;
+  if (minipool_fix_head != NULL)
+    minipool_fix_tail->next = fix;
+  else
+    minipool_fix_head = fix;
+
+  minipool_fix_tail = fix;
+}
+
+/* Record INSN, which will need fixing up to load a value from the
+   minipool.  ADDRESS is the offset of the insn since the start of the
+   function; LOC is a pointer to the part of the insn which requires
+   fixing; VALUE is the constant that must be loaded, which is of type
+   MODE.  */
+static void
+push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx * loc,
+		   enum machine_mode mode, rtx value)
+{
+  Mfix *fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (*fix));
+  rtx body = PATTERN (insn);
+
+  fix->insn = insn;
+  fix->address = address;
+  fix->loc = loc;
+  fix->mode = mode;
+  fix->fix_size = MINIPOOL_FIX_SIZE (mode, value);
+  fix->value = value;
+
+  if (GET_CODE (body) == PARALLEL)
+    {
+      /* Mcall : Ks16 << 2 */
+      fix->forwards = ((1 << 15) - 1) << 2;
+      fix->backwards = (1 << 15) << 2;
+    }
+  else if (GET_CODE (body) == SET
+	   && GET_MODE_SIZE (GET_MODE (SET_DEST (body))) == 4)
+    {
+      /* Word Load */
+      if (TARGET_HARD_FLOAT
+	  && GET_MODE_CLASS (GET_MODE (SET_DEST (body))) == MODE_FLOAT)
+	{
+	  /* Ldc0.w : Ku12 << 2 */
+	  fix->forwards = ((1 << 12) - 1) << 2;
+	  fix->backwards = 0;
+	}
+      else
+	{
+	  if (optimize_size)
+	    {
+	      /* Lddpc : Ku7 << 2 */
+	      fix->forwards = ((1 << 7) - 1) << 2;
+	      fix->backwards = 0;
+	    }
+	  else
+	    {
+	      /* Ld.w : Ks16 */
+	      fix->forwards = ((1 << 15) - 4);
+	      fix->backwards = (1 << 15);
+	    }
+	}
+    }
+  else if (GET_CODE (body) == SET
+	   && GET_MODE_SIZE (GET_MODE (SET_DEST (body))) == 8)
+    {
+      /* Double word load */
+      if (TARGET_HARD_FLOAT
+	  && GET_MODE_CLASS (GET_MODE (SET_DEST (body))) == MODE_FLOAT)
+	{
+	  /* Ldc0.d : Ku12 << 2 */
+	  fix->forwards = ((1 << 12) - 1) << 2;
+	  fix->backwards = 0;
+	}
+      else
+	{
+	  /* Ld.d : Ks16 */
+	  fix->forwards = ((1 << 15) - 4);
+	  fix->backwards = (1 << 15);
+	}
+    }
+  else if (GET_CODE (body) == UNSPEC_VOLATILE
+	   && XINT (body, 1) == VUNSPEC_MVRC)
+    {
+      /* Coprocessor load */
+      /* Ldc : Ku8 << 2 */
+      fix->forwards = ((1 << 8) - 1) << 2;
+      fix->backwards = 0;
+    }
+  else
+    {
+      /* Assume worst case which is lddpc insn. */
+      fix->forwards = ((1 << 7) - 1) << 2;
+      fix->backwards = 0;
+    }
+
+  fix->minipool = NULL;
+
+  /* If an insn doesn't have a range defined for it, then it isn't expecting
+     to be reworked by this code.  Better to abort now than to generate duff
+     assembly code.  */
+  if (fix->forwards == 0 && fix->backwards == 0)
+    abort ();
+
+  if (dump_file)
+    {
+      fprintf (dump_file,
+	       ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
+	       GET_MODE_NAME (mode),
+	       INSN_UID (insn), (unsigned long) address,
+	       -1 * (long) fix->backwards, (long) fix->forwards);
+      avr32_print_value (dump_file, fix->value);
+      fprintf (dump_file, "\n");
+    }
+
+  /* Add it to the chain of fixes.  */
+  fix->next = NULL;
+
+  if (minipool_fix_head != NULL)
+    minipool_fix_tail->next = fix;
+  else
+    minipool_fix_head = fix;
+
+  minipool_fix_tail = fix;
+}
+
+/* Scan INSN and note any of its operands that need fixing.
+   If DO_PUSHES is false we do not actually push any of the fixups
+   needed.  The function returns TRUE is any fixups were needed/pushed.
+   This is used by avr32_memory_load_p() which needs to know about loads
+   of constants that will be converted into minipool loads.  */
+static bool
+note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
+{
+  bool result = false;
+  int opno;
+
+  extract_insn (insn);
+
+  if (!constrain_operands (1))
+    fatal_insn_not_found (insn);
+
+  if (recog_data.n_alternatives == 0)
+    return false;
+
+  /* Fill in recog_op_alt with information about the constraints of this
+     insn.  */
+  preprocess_constraints ();
+
+  for (opno = 0; opno < recog_data.n_operands; opno++)
+    {
+      rtx op;
+
+      /* Things we need to fix can only occur in inputs.  */
+      if (recog_data.operand_type[opno] != OP_IN)
+	continue;
+
+      op = recog_data.operand[opno];
+
+      if (avr32_const_pool_ref_operand (op, GET_MODE (op)))
+	{
+	  if (do_pushes)
+	    {
+	      rtx cop = avoid_constant_pool_reference (op);
+
+	      /* Casting the address of something to a mode narrower than a
+	         word can cause avoid_constant_pool_reference() to return the
+	         pool reference itself.  That's no good to us here.  Lets
+	         just hope that we can use the constant pool value directly.
+	         */
+	      if (op == cop)
+		cop = get_pool_constant (XEXP (op, 0));
+
+	      push_minipool_fix (insn, address,
+				 recog_data.operand_loc[opno],
+				 recog_data.operand_mode[opno], cop);
+	    }
+
+	  result = true;
+	}
+      else if (TARGET_HAS_ASM_ADDR_PSEUDOS
+	       && avr32_address_operand (op, GET_MODE (op)))
+	{
+	  /* Handle pseudo instructions using a direct address. These pseudo
+	     instructions might need entries in the constant pool and we must
+	     therefor create a constant pool for them, in case the
+	     assembler/linker needs to insert entries. */
+	  if (do_pushes)
+	    {
+	      /* Push a dummy constant pool entry so that the .cpool
+	         directive should be inserted on the appropriate place in the
+	         code even if there are no real constant pool entries. This
+	         is used by the assembler and linker to know where to put
+	         generated constant pool entries. */
+	      push_minipool_fix (insn, address,
+				 recog_data.operand_loc[opno],
+				 recog_data.operand_mode[opno],
+				 gen_rtx_UNSPEC (VOIDmode,
+						 gen_rtvec (1, const0_rtx),
+						 UNSPEC_FORCE_MINIPOOL));
+	      result = true;
+	    }
+	}
+    }
+  return result;
+}
+
+
+static int
+avr32_insn_is_cast (rtx insn)
+{
+
+  if (NONJUMP_INSN_P (insn)
+      && GET_CODE (PATTERN (insn)) == SET
+      && (GET_CODE (SET_SRC (PATTERN (insn))) == ZERO_EXTEND
+	  || GET_CODE (SET_SRC (PATTERN (insn))) == SIGN_EXTEND)
+      && REG_P (XEXP (SET_SRC (PATTERN (insn)), 0))
+      && REG_P (SET_DEST (PATTERN (insn))))
+    return true;
+  return false;
+}
+
+/* FIXME: The level of nesting in this function is way too deep. It needs to be
+   torn apart.  */
+static void
+avr32_reorg_optimization (void)
+{
+  rtx first = get_insns ();
+  rtx insn;
+
+  if (TARGET_MD_REORG_OPTIMIZATION && (optimize_size || (optimize > 0)))
+    {
+
+      /* Scan through all insns looking for cast operations. */
+      if (dump_file)
+	{
+	  fprintf (dump_file, ";; Deleting redundant cast operations:\n");
+	}
+      for (insn = first; insn; insn = NEXT_INSN (insn))
+	{
+	  rtx reg, src_reg, scan;
+	  enum machine_mode mode;
+	  int unused_cast;
+	  rtx label_ref;
+
+	  if (avr32_insn_is_cast (insn)
+	      && (GET_MODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == QImode
+		  || GET_MODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == HImode))
+	    {
+	      mode = GET_MODE (XEXP (SET_SRC (PATTERN (insn)), 0));
+	      reg = SET_DEST (PATTERN (insn));
+	      src_reg = XEXP (SET_SRC (PATTERN (insn)), 0);
+	    }
+	  else
+	    {
+	      continue;
+	    }
+
+	  unused_cast = false;
+	  label_ref = NULL_RTX;
+	  for (scan = NEXT_INSN (insn); scan; scan = NEXT_INSN (scan))
+	    {
+	      /* Check if we have reached the destination of a simple
+	         conditional jump which we have already scanned past. If so,
+	         we can safely continue scanning. */
+	      if (LABEL_P (scan) && label_ref != NULL_RTX)
+		{
+		  if (CODE_LABEL_NUMBER (scan) ==
+		      CODE_LABEL_NUMBER (XEXP (label_ref, 0)))
+		    label_ref = NULL_RTX;
+		  else
+		    break;
+		}
+
+	      if (!INSN_P (scan))
+		continue;
+
+	      /* For conditional jumps we can manage to keep on scanning if
+	         we meet the destination label later on before any new jump
+	         insns occure. */
+	      if (GET_CODE (scan) == JUMP_INSN)
+		{
+		  if (any_condjump_p (scan) && label_ref == NULL_RTX)
+		    label_ref = condjump_label (scan);
+		  else
+		    break;
+		}
+
+	      if (!reg_mentioned_p (reg, PATTERN (scan)))
+		continue;
+
+	      /* Check if casted register is used in this insn */
+	      if ((regno_use_in (REGNO (reg), PATTERN (scan)) != NULL_RTX)
+		  && (GET_MODE (regno_use_in (REGNO (reg), PATTERN (scan))) ==
+		      GET_MODE (reg)))
+		{
+		  /* If not used in the source to the set or in a memory
+		     expression in the destiantion then the register is used
+		     as a destination and is really dead. */
+		  if (single_set (scan)
+		      && GET_CODE (PATTERN (scan)) == SET
+		      && REG_P (SET_DEST (PATTERN (scan)))
+		      && !regno_use_in (REGNO (reg), SET_SRC (PATTERN (scan)))
+		      && label_ref == NULL_RTX)
+		    {
+		      unused_cast = true;
+		    }
+		  break;
+		}
+
+	      /* Check if register is dead or set in this insn */
+	      if (dead_or_set_p (scan, reg))
+		{
+		  unused_cast = true;
+		  break;
+		}
+	    }
+
+	  /* Check if we have unresolved conditional jumps */
+	  if (label_ref != NULL_RTX)
+	    continue;
+
+	  if (unused_cast)
+	    {
+	      if (REGNO (reg) == REGNO (XEXP (SET_SRC (PATTERN (insn)), 0)))
+		{
+		  /* One operand cast, safe to delete */
+		  if (dump_file)
+		    {
+		      fprintf (dump_file,
+			       ";;  INSN %i removed, casted register %i value not used.\n",
+			       INSN_UID (insn), REGNO (reg));
+		    }
+		  SET_INSN_DELETED (insn);
+		  /* Force the instruction to be recognized again */
+		  INSN_CODE (insn) = -1;
+		}
+	      else
+		{
+		  /* Two operand cast, which really could be substituted with
+		     a move, if the source register is dead after the cast
+		     insn and then the insn which sets the source register
+		     could instead directly set the destination register for
+		     the cast. As long as there are no insns in between which
+		     uses the register. */
+		  rtx link = NULL_RTX;
+		  rtx set;
+		  rtx src_reg = XEXP (SET_SRC (PATTERN (insn)), 0);
+		  unused_cast = false;
+
+		  if (!find_reg_note (insn, REG_DEAD, src_reg))
+		    continue;
+
+		  /* Search for the insn which sets the source register */
+		  for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
+		    {
+		      if (REG_NOTE_KIND (link) != 0)
+			continue;
+		      set = single_set (XEXP (link, 0));
+		      if (set && rtx_equal_p (src_reg, SET_DEST (set)))
+			{
+			  link = XEXP (link, 0);
+			  break;
+			}
+		    }
+
+		  /* Found no link or link is a call insn where we can not
+		     change the destination register */
+		  if (link == NULL_RTX || CALL_P (link))
+		    continue;
+
+		  /* Scan through all insn between link and insn */
+		  for (scan = NEXT_INSN (link); scan; scan = NEXT_INSN (scan))
+		    {
+		      /* Don't try to trace forward past a CODE_LABEL if we
+		         haven't seen INSN yet.  Ordinarily, we will only
+		         find the setting insn in LOG_LINKS if it is in the
+		         same basic block.  However, cross-jumping can insert
+		         code labels in between the load and the call, and
+		         can result in situations where a single call insn
+		         may have two targets depending on where we came
+		         from.  */
+
+		      if (GET_CODE (scan) == CODE_LABEL)
+			break;
+
+		      if (!INSN_P (scan))
+			continue;
+
+		      /* Don't try to trace forward past a JUMP.  To optimize
+		         safely, we would have to check that all the
+		         instructions at the jump destination did not use REG.
+		       */
+
+		      if (GET_CODE (scan) == JUMP_INSN)
+			{
+			  break;
+			}
+
+		      if (!reg_mentioned_p (src_reg, PATTERN (scan)))
+			continue;
+
+		      /* We have reached the cast insn */
+		      if (scan == insn)
+			{
+			  /* We can remove cast and replace the destination
+			     register of the link insn with the destination
+			     of the cast */
+			  if (dump_file)
+			    {
+			      fprintf (dump_file,
+				       ";;  INSN %i removed, casted value unused. "
+				       "Destination of removed cast operation: register %i,  folded into INSN %i.\n",
+				       INSN_UID (insn), REGNO (reg),
+				       INSN_UID (link));
+			    }
+			  /* Update link insn */
+			  SET_DEST (PATTERN (link)) =
+			    gen_rtx_REG (mode, REGNO (reg));
+			  /* Force the instruction to be recognized again */
+			  INSN_CODE (link) = -1;
+
+			  /* Delete insn */
+			  SET_INSN_DELETED (insn);
+			  /* Force the instruction to be recognized again */
+			  INSN_CODE (insn) = -1;
+			  break;
+			}
+		    }
+		}
+	    }
+	}
+    }
+
+  if (TARGET_MD_REORG_OPTIMIZATION && (optimize_size || (optimize > 0)))
+    {
+
+      /* Scan through all insns looking for shifted add operations */
+      if (dump_file)
+	{
+	  fprintf (dump_file,
+		   ";; Deleting redundant shifted add operations:\n");
+	}
+      for (insn = first; insn; insn = NEXT_INSN (insn))
+	{
+	  rtx reg, mem_expr, scan, op0, op1;
+	  int add_only_used_as_pointer;
+
+	  if (INSN_P (insn)
+	      && GET_CODE (PATTERN (insn)) == SET
+	      && GET_CODE (SET_SRC (PATTERN (insn))) == PLUS
+	      && (GET_CODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == MULT
+		  || GET_CODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == ASHIFT)
+	      && GET_CODE (XEXP (XEXP (SET_SRC (PATTERN (insn)), 0), 1)) ==
+	      CONST_INT && REG_P (SET_DEST (PATTERN (insn)))
+	      && REG_P (XEXP (SET_SRC (PATTERN (insn)), 1))
+	      && REG_P (XEXP (XEXP (SET_SRC (PATTERN (insn)), 0), 0)))
+	    {
+	      reg = SET_DEST (PATTERN (insn));
+	      mem_expr = SET_SRC (PATTERN (insn));
+	      op0 = XEXP (XEXP (mem_expr, 0), 0);
+	      op1 = XEXP (mem_expr, 1);
+	    }
+	  else
+	    {
+	      continue;
+	    }
+
+	  /* Scan forward the check if the result of the shifted add
+	     operation is only used as an address in memory operations and
+	     that the operands to the shifted add are not clobbered. */
+	  add_only_used_as_pointer = false;
+	  for (scan = NEXT_INSN (insn); scan; scan = NEXT_INSN (scan))
+	    {
+	      if (!INSN_P (scan))
+		continue;
+
+	      /* Don't try to trace forward past a JUMP or CALL.  To optimize
+	         safely, we would have to check that all the instructions at
+	         the jump destination did not use REG.  */
+
+	      if (GET_CODE (scan) == JUMP_INSN)
+		{
+		  break;
+		}
+
+	      /* If used in a call insn then we cannot optimize it away */
+	      if (CALL_P (scan) && find_regno_fusage (scan, USE, REGNO (reg)))
+		break;
+
+	      /* If any of the operands of the shifted add are clobbered we
+	         cannot optimize the shifted adda away */
+	      if ((reg_set_p (op0, scan) && (REGNO (op0) != REGNO (reg)))
+		  || (reg_set_p (op1, scan) && (REGNO (op1) != REGNO (reg))))
+		break;
+
+	      if (!reg_mentioned_p (reg, PATTERN (scan)))
+		continue;
+
+	      /* If used any other place than as a pointer or as the
+	         destination register we failed */
+	      if (!(single_set (scan)
+		    && GET_CODE (PATTERN (scan)) == SET
+		    && ((MEM_P (SET_DEST (PATTERN (scan)))
+			 && REG_P (XEXP (SET_DEST (PATTERN (scan)), 0))
+			 && REGNO (XEXP (SET_DEST (PATTERN (scan)), 0)) ==
+			 REGNO (reg)) || (MEM_P (SET_SRC (PATTERN (scan)))
+					  &&
+					  REG_P (XEXP
+						 (SET_SRC (PATTERN (scan)),
+						  0))
+					  &&
+					  REGNO (XEXP
+						 (SET_SRC (PATTERN (scan)),
+						  0)) == REGNO (reg))))
+		  && !(GET_CODE (PATTERN (scan)) == SET
+		       && REG_P (SET_DEST (PATTERN (scan)))
+		       && !regno_use_in (REGNO (reg),
+					 SET_SRC (PATTERN (scan)))))
+		break;
+
+	      /* Check if register is dead or set in this insn */
+	      if (dead_or_set_p (scan, reg))
+		{
+		  add_only_used_as_pointer = true;
+		  break;
+		}
+	    }
+
+	  if (add_only_used_as_pointer)
+	    {
+	      /* Lets delete the add insn and replace all memory references
+	         which uses the pointer with the full expression. */
+	      if (dump_file)
+		{
+		  fprintf (dump_file,
+			   ";; Deleting INSN %i since address expression can be folded into all "
+			   "memory references using this expression\n",
+			   INSN_UID (insn));
+		}
+	      SET_INSN_DELETED (insn);
+	      /* Force the instruction to be recognized again */
+	      INSN_CODE (insn) = -1;
+
+	      for (scan = NEXT_INSN (insn); scan; scan = NEXT_INSN (scan))
+		{
+		  if (!INSN_P (scan))
+		    continue;
+
+		  if (!reg_mentioned_p (reg, PATTERN (scan)))
+		    continue;
+
+		  /* If used any other place than as a pointer or as the
+		     destination register we failed */
+		  if ((single_set (scan)
+		       && GET_CODE (PATTERN (scan)) == SET
+		       && ((MEM_P (SET_DEST (PATTERN (scan)))
+			    && REG_P (XEXP (SET_DEST (PATTERN (scan)), 0))
+			    && REGNO (XEXP (SET_DEST (PATTERN (scan)), 0)) ==
+			    REGNO (reg)) || (MEM_P (SET_SRC (PATTERN (scan)))
+					     &&
+					     REG_P (XEXP
+						    (SET_SRC (PATTERN (scan)),
+						     0))
+					     &&
+					     REGNO (XEXP
+						    (SET_SRC (PATTERN (scan)),
+						     0)) == REGNO (reg)))))
+		    {
+		      if (dump_file)
+			{
+			  fprintf (dump_file,
+				   ";; Register %i replaced by indexed address in INSN %i\n",
+				   REGNO (reg), INSN_UID (scan));
+			}
+		      if (MEM_P (SET_DEST (PATTERN (scan))))
+			XEXP (SET_DEST (PATTERN (scan)), 0) = mem_expr;
+		      else
+			XEXP (SET_SRC (PATTERN (scan)), 0) = mem_expr;
+		    }
+
+		  /* Check if register is dead or set in this insn */
+		  if (dead_or_set_p (scan, reg))
+		    {
+		      break;
+		    }
+
+		}
+	    }
+	}
+    }
+}
+
+/* Exported to toplev.c.
+
+   Do a final pass over the function, just before delayed branch
+   scheduling.  */
+
+static void
+avr32_reorg (void)
+{
+  rtx insn;
+  HOST_WIDE_INT address = 0;
+  Mfix *fix;
+
+  minipool_fix_head = minipool_fix_tail = NULL;
+
+  /* The first insn must always be a note, or the code below won't scan it
+     properly.  */
+  insn = get_insns ();
+  if (GET_CODE (insn) != NOTE)
+    abort ();
+
+  /* Scan all the insns and record the operands that will need fixing.  */
+  for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
+    {
+      if (GET_CODE (insn) == BARRIER)
+	push_minipool_barrier (insn, address);
+      else if (INSN_P (insn))
+	{
+	  rtx table;
+
+	  note_invalid_constants (insn, address, true);
+	  address += get_attr_length (insn);
+
+	  /* If the insn is a vector jump, add the size of the table and skip
+	     the table.  */
+	  if ((table = is_jump_table (insn)) != NULL)
+	    {
+	      address += get_jump_table_size (table);
+	      insn = table;
+	    }
+	}
+    }
+
+  fix = minipool_fix_head;
+
+  /* Now scan the fixups and perform the required changes.  */
+  while (fix)
+    {
+      Mfix *ftmp;
+      Mfix *fdel;
+      Mfix *last_added_fix;
+      Mfix *last_barrier = NULL;
+      Mfix *this_fix;
+
+      /* Skip any further barriers before the next fix.  */
+      while (fix && GET_CODE (fix->insn) == BARRIER)
+	fix = fix->next;
+
+      /* No more fixes.  */
+      if (fix == NULL)
+	break;
+
+      last_added_fix = NULL;
+
+      for (ftmp = fix; ftmp; ftmp = ftmp->next)
+	{
+	  if (GET_CODE (ftmp->insn) == BARRIER)
+	    {
+	      if (ftmp->address >= minipool_vector_head->max_address)
+		break;
+
+	      last_barrier = ftmp;
+	    }
+	  else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
+	    break;
+
+	  last_added_fix = ftmp;	/* Keep track of the last fix added.
+					   */
+	}
+
+      /* If we found a barrier, drop back to that; any fixes that we could
+         have reached but come after the barrier will now go in the next
+         mini-pool.  */
+      if (last_barrier != NULL)
+	{
+	  /* Reduce the refcount for those fixes that won't go into this pool
+	     after all.  */
+	  for (fdel = last_barrier->next;
+	       fdel && fdel != ftmp; fdel = fdel->next)
+	    {
+	      fdel->minipool->refcount--;
+	      fdel->minipool = NULL;
+	    }
+
+	  ftmp = last_barrier;
+	}
+      else
+	{
+	  /* ftmp is first fix that we can't fit into this pool and there no
+	     natural barriers that we could use.  Insert a new barrier in the
+	     code somewhere between the previous fix and this one, and
+	     arrange to jump around it.  */
+	  HOST_WIDE_INT max_address;
+
+	  /* The last item on the list of fixes must be a barrier, so we can
+	     never run off the end of the list of fixes without last_barrier
+	     being set.  */
+	  if (ftmp == NULL)
+	    abort ();
+
+	  max_address = minipool_vector_head->max_address;
+	  /* Check that there isn't another fix that is in range that we
+	     couldn't fit into this pool because the pool was already too
+	     large: we need to put the pool before such an instruction.  */
+	  if (ftmp->address < max_address)
+	    max_address = ftmp->address;
+
+	  last_barrier = create_fix_barrier (last_added_fix, max_address);
+	}
+
+      assign_minipool_offsets (last_barrier);
+
+      while (ftmp)
+	{
+	  if (GET_CODE (ftmp->insn) != BARRIER
+	      && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
+		  == NULL))
+	    break;
+
+	  ftmp = ftmp->next;
+	}
+
+      /* Scan over the fixes we have identified for this pool, fixing them up
+         and adding the constants to the pool itself.  */
+      for (this_fix = fix; this_fix && ftmp != this_fix;
+	   this_fix = this_fix->next)
+	if (GET_CODE (this_fix->insn) != BARRIER
+	    /* Do nothing for entries present just to force the insertion of
+	       a minipool. */
+	    && !IS_FORCE_MINIPOOL (this_fix->value))
+	  {
+	    rtx addr = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
+							 minipool_vector_label),
+				      this_fix->minipool->offset);
+	    *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
+	  }
+
+      dump_minipool (last_barrier->insn);
+      fix = ftmp;
+    }
+
+  /* Free the minipool memory.  */
+  obstack_free (&minipool_obstack, minipool_startobj);
+
+  avr32_reorg_optimization ();
+}
+
+
+/*
+ Hook for doing some final scanning of instructions. Does nothing yet...*/
+void
+avr32_final_prescan_insn (rtx insn ATTRIBUTE_UNUSED,
+			  rtx * opvec ATTRIBUTE_UNUSED,
+			  int noperands ATTRIBUTE_UNUSED)
+{
+  return;
+}
+
+
+
+int
+avr32_expand_movcc (enum machine_mode mode, rtx operands[])
+{
+  rtx operator;
+  rtx compare_op0 = avr32_compare_op0;
+  rtx compare_op1 = avr32_compare_op1;
+
+  /* Only allow certain compare operations */
+  if (GET_MODE (compare_op0) != DImode
+      && GET_MODE (compare_op0) != SImode
+      && GET_MODE (compare_op0) != HImode && GET_MODE (compare_op0) != QImode)
+    return FALSE;
+
+  if (GET_CODE (compare_op0) == MEM)
+    {
+      if (no_new_pseudos)
+	return FALSE;
+      else
+	compare_op0 = force_reg (GET_MODE (compare_op0), compare_op0);
+    }
+
+  if (GET_CODE (compare_op1) == MEM)
+    {
+      if (no_new_pseudos)
+	return FALSE;
+      else
+	compare_op1 = force_reg (GET_MODE (compare_op1), compare_op1);
+    }
+
+  /* For DI, HI and QI mode force comparison operands to registers */
+  if (GET_MODE (compare_op0) == DImode
+      || GET_MODE (compare_op0) == HImode || GET_MODE (compare_op0) == QImode)
+    {
+      if (GET_CODE (compare_op0) != REG)
+	{
+	  if (no_new_pseudos)
+	    return FALSE;
+	  else
+	    compare_op0 = force_reg (GET_MODE (compare_op0), compare_op0);
+	}
+
+      if (GET_CODE (compare_op1) != REG)
+	{
+	  if (no_new_pseudos)
+	    return FALSE;
+	  else
+	    compare_op1 = force_reg (GET_MODE (compare_op0), compare_op1);
+	}
+    }
+
+  /* Force any immediate compare operands for SI, larger than the L
+     constraint, to a register */
+  if (GET_MODE (compare_op0) == SImode)
+    {
+      if ((GET_CODE (compare_op0) == CONST_INT
+	   && !avr32_const_ok_for_constraint_p (INTVAL (compare_op0), 'K',
+						"Ks21")))
+	{
+	  if (no_new_pseudos)
+	    return FALSE;
+	  else
+	    compare_op0 = force_reg (SImode, compare_op0);
+	}
+
+      if ((GET_CODE (compare_op1) == CONST_INT
+	   && !avr32_const_ok_for_constraint_p (INTVAL (compare_op1), 'K',
+						"Ks21")))
+	{
+	  if (no_new_pseudos)
+	    return FALSE;
+	  else
+	    compare_op1 = force_reg (SImode, compare_op1);
+	}
+    }
+
+  /* If we have immediates larger than can be allowed in conditional mov
+     instructions, force them to registers */
+  if (GET_CODE (operands[2]) == CONST_INT
+      && !avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks08"))
+    {
+      if (no_new_pseudos)
+	return FALSE;
+      else
+	operands[2] = force_reg (mode, operands[2]);
+    }
+
+  if (GET_CODE (operands[3]) == CONST_INT
+      && !avr32_const_ok_for_constraint_p (INTVAL (operands[3]), 'K', "Ks08"))
+    {
+      if (no_new_pseudos)
+	return FALSE;
+      else
+	operands[3] = force_reg (mode, operands[3]);
+    }
+
+  /* Emit the actual instruction */
+  operator = gen_rtx_EQ (VOIDmode, const0_rtx, const0_rtx);
+  PUT_CODE (operator, GET_CODE (operands[1]));
+  switch (mode)
+    {
+    case SImode:
+      switch (GET_MODE (compare_op0))
+	{
+	case SImode:
+	  emit_insn (gen_movsicc_cmpsi
+		     (operands[0], operator, operands[2], operands[3],
+		      compare_op0, compare_op1));
+	  break;
+	case DImode:
+	  emit_insn (gen_movsicc_cmpdi
+		     (operands[0], operator, operands[2], operands[3],
+		      compare_op0, compare_op1));
+	  break;
+	case HImode:
+	  emit_insn (gen_movsicc_cmphi
+		     (operands[0], operator, operands[2], operands[3],
+		      compare_op0, compare_op1));
+	  break;
+	case QImode:
+	  emit_insn (gen_movsicc_cmpqi
+		     (operands[0], operator, operands[2], operands[3],
+		      compare_op0, compare_op1));
+	  break;
+	default:
+	  return FALSE;
+	}
+      break;
+    case HImode:
+      switch (GET_MODE (compare_op0))
+	{
+	case SImode:
+	  emit_insn (gen_movhicc_cmpsi
+		     (operands[0], operator, operands[2], operands[3],
+		      compare_op0, compare_op1));
+	  break;
+	case DImode:
+	  emit_insn (gen_movhicc_cmpdi
+		     (operands[0], operator, operands[2], operands[3],
+		      compare_op0, compare_op1));
+	  break;
+	case HImode:
+	  emit_insn (gen_movhicc_cmphi
+		     (operands[0], operator, operands[2], operands[3],
+		      compare_op0, compare_op1));
+	  break;
+	case QImode:
+	  emit_insn (gen_movhicc_cmpqi
+		     (operands[0], operator, operands[2], operands[3],
+		      compare_op0, compare_op1));
+	  break;
+	default:
+	  return FALSE;
+	}
+      break;
+    case QImode:
+      switch (GET_MODE (compare_op0))
+	{
+	case SImode:
+	  emit_insn (gen_movqicc_cmpsi
+		     (operands[0], operator, operands[2], operands[3],
+		      compare_op0, compare_op1));
+	  break;
+	case DImode:
+	  emit_insn (gen_movqicc_cmpdi
+		     (operands[0], operator, operands[2], operands[3],
+		      compare_op0, compare_op1));
+	  break;
+	case HImode:
+	  emit_insn (gen_movqicc_cmphi
+		     (operands[0], operator, operands[2], operands[3],
+		      compare_op0, compare_op1));
+	  break;
+	case QImode:
+	  emit_insn (gen_movqicc_cmpqi
+		     (operands[0], operator, operands[2], operands[3],
+		      compare_op0, compare_op1));
+	  break;
+	default:
+	  return FALSE;
+	}
+      break;
+    default:
+      return FALSE;
+    }
+
+  return TRUE;
+}
+
+
+int
+avr32_expand_addcc (enum machine_mode mode, rtx operands[])
+{
+  rtx operator;
+  rtx compare_op0 = avr32_compare_op0;
+  rtx compare_op1 = avr32_compare_op1;
+
+  /* Check if we have an add/sub with an k8 immediate */
+  if (!(GET_CODE (operands[3]) == CONST_INT
+	&& avr32_const_ok_for_constraint_p (-INTVAL (operands[3]), 'K',
+					    "Ks08")))
+    return FALSE;
+  else
+    /* Flip sign */
+    operands[3] = GEN_INT (-INTVAL (operands[3]));
+
+  /* Only allow certain compare operations */
+  if (GET_MODE (compare_op0) != DImode
+      && GET_MODE (compare_op0) != SImode
+      && GET_MODE (compare_op0) != HImode && GET_MODE (compare_op0) != QImode)
+    return FALSE;
+
+  if (GET_CODE (compare_op0) == MEM)
+    {
+      if (no_new_pseudos)
+	return FALSE;
+      else
+	compare_op0 = force_reg (GET_MODE (compare_op0), compare_op0);
+    }
+
+  if (GET_CODE (compare_op1) == MEM)
+    {
+      if (no_new_pseudos)
+	return FALSE;
+      else
+	compare_op1 = force_reg (GET_MODE (compare_op1), compare_op1);
+    }
+
+  /* For DI, HI and QI mode force comparison operands to registers */
+  if (GET_MODE (compare_op0) == DImode
+      || GET_MODE (compare_op0) == HImode || GET_MODE (compare_op0) == QImode)
+    {
+      if (GET_CODE (compare_op0) != REG)
+	{
+	  if (no_new_pseudos)
+	    return FALSE;
+	  else
+	    compare_op0 = force_reg (GET_MODE (compare_op0), compare_op0);
+	}
+
+      if (GET_CODE (compare_op1) != REG)
+	{
+	  if (no_new_pseudos)
+	    return FALSE;
+	  else
+	    compare_op1 = force_reg (GET_MODE (compare_op0), compare_op1);
+	}
+    }
+
+  /* Force any immediate compare operands for SI, larger than the L
+     constraint, to a register */
+  if (GET_MODE (compare_op0) == SImode)
+    {
+      if ((GET_CODE (compare_op0) == CONST_INT
+	   && !avr32_const_ok_for_constraint_p (INTVAL (compare_op0), 'K',
+						"Ks21")))
+	{
+	  if (no_new_pseudos)
+	    return FALSE;
+	  else
+	    compare_op0 = force_reg (SImode, compare_op0);
+	}
+
+      if ((GET_CODE (compare_op1) == CONST_INT
+	   && !avr32_const_ok_for_constraint_p (INTVAL (compare_op1), 'K',
+						"Ks21")))
+	{
+	  if (no_new_pseudos)
+	    return FALSE;
+	  else
+	    compare_op1 = force_reg (SImode, compare_op1);
+	}
+    }
+
+  /* If we have immediates larger than can be allowed in conditional mov
+     instructions, force them to registers */
+  if (GET_CODE (operands[2]) == CONST_INT
+      && !avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks08"))
+    {
+      if (no_new_pseudos)
+	return FALSE;
+      else
+	operands[2] = force_reg (mode, operands[2]);
+    }
+
+  if (GET_CODE (operands[3]) == CONST_INT
+      && !avr32_const_ok_for_constraint_p (INTVAL (operands[3]), 'K', "Ks08"))
+    {
+      if (no_new_pseudos)
+	return FALSE;
+      else
+	operands[3] = force_reg (mode, operands[3]);
+    }
+
+  if (GET_CODE (operands[0]) != REG)
+    {
+      if (no_new_pseudos)
+	return FALSE;
+      else
+	operands[0] = force_reg (GET_MODE (operands[0]), operands[0]);
+    }
+
+  if (GET_CODE (operands[2]) != REG)
+    {
+      if (no_new_pseudos)
+	return FALSE;
+      else
+	operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
+    }
+
+  /* Check if operands[0] and operands[2] are different */
+  if (REGNO (operands[0]) != REGNO (operands[2]))
+    {
+      emit_move_insn (operands[0], operands[2]);
+      operands[2] = operands[0];
+    }
+
+  /* Emit the actual instruction */
+  operator = gen_rtx_EQ (VOIDmode, const0_rtx, const0_rtx);
+  PUT_CODE (operator, GET_CODE (operands[1]));
+  switch (mode)
+    {
+    case SImode:
+      switch (GET_MODE (compare_op0))
+	{
+	case SImode:
+	  emit_insn (gen_addsicc_cmpsi
+		     (operands[0], operator, operands[2], operands[3],
+		      compare_op0, compare_op1));
+	  break;
+	case DImode:
+	  emit_insn (gen_addsicc_cmpdi
+		     (operands[0], operator, operands[2], operands[3],
+		      compare_op0, compare_op1));
+	  break;
+	case HImode:
+	  emit_insn (gen_addsicc_cmphi
+		     (operands[0], operator, operands[2], operands[3],
+		      compare_op0, compare_op1));
+	  break;
+	case QImode:
+	  emit_insn (gen_addsicc_cmpqi
+		     (operands[0], operator, operands[2], operands[3],
+		      compare_op0, compare_op1));
+	  break;
+	default:
+	  return FALSE;
+	}
+      break;
+    case HImode:
+      switch (GET_MODE (compare_op0))
+	{
+	case SImode:
+	  emit_insn (gen_addhicc_cmpsi
+		     (operands[0], operator, operands[2], operands[3],
+		      compare_op0, compare_op1));
+	  break;
+	case DImode:
+	  emit_insn (gen_addhicc_cmpdi
+		     (operands[0], operator, operands[2], operands[3],
+		      compare_op0, compare_op1));
+	  break;
+	case HImode:
+	  emit_insn (gen_addhicc_cmphi
+		     (operands[0], operator, operands[2], operands[3],
+		      compare_op0, compare_op1));
+	  break;
+	case QImode:
+	  emit_insn (gen_addhicc_cmpqi
+		     (operands[0], operator, operands[2], operands[3],
+		      compare_op0, compare_op1));
+	  break;
+	default:
+	  return FALSE;
+	}
+      break;
+    case QImode:
+      switch (GET_MODE (compare_op0))
+	{
+	case SImode:
+	  emit_insn (gen_addqicc_cmpsi
+		     (operands[0], operator, operands[2], operands[3],
+		      compare_op0, compare_op1));
+	  break;
+	case DImode:
+	  emit_insn (gen_addqicc_cmpdi
+		     (operands[0], operator, operands[2], operands[3],
+		      compare_op0, compare_op1));
+	  break;
+	case HImode:
+	  emit_insn (gen_addqicc_cmphi
+		     (operands[0], operator, operands[2], operands[3],
+		      compare_op0, compare_op1));
+	  break;
+	case QImode:
+	  emit_insn (gen_addqicc_cmpqi
+		     (operands[0], operator, operands[2], operands[3],
+		      compare_op0, compare_op1));
+	  break;
+	default:
+	  return FALSE;
+	}
+      break;
+    default:
+      return FALSE;
+    }
+
+  return TRUE;
+}
+
+/* Function for changing the condition on the next instruction,
+   should be used when emmiting compare instructions and
+   the condition of the next instruction needs to change.
+*/
+int
+set_next_insn_cond (rtx cur_insn, rtx new_cond)
+{
+  rtx next_insn = next_nonnote_insn (cur_insn);
+  if ((next_insn != NULL_RTX)
+      && (INSN_P (next_insn))
+      && (GET_CODE (PATTERN (next_insn)) == SET)
+      && (GET_CODE (SET_SRC (PATTERN (next_insn))) == IF_THEN_ELSE))
+    {
+      /* Branch instructions */
+      XEXP (SET_SRC (PATTERN (next_insn)), 0) = new_cond;
+      /* Force the instruction to be recognized again */
+      INSN_CODE (next_insn) = -1;
+      return TRUE;
+    }
+  else if ((next_insn != NULL_RTX)
+	   && (INSN_P (next_insn))
+	   && (GET_CODE (PATTERN (next_insn)) == SET)
+	   && comparison_operator (SET_SRC (PATTERN (next_insn)),
+				   GET_MODE (SET_SRC (PATTERN (next_insn)))))
+    {
+      /* scc with no compare */
+      SET_SRC (PATTERN (next_insn)) = new_cond;
+      /* Force the instruction to be recognized again */
+      INSN_CODE (next_insn) = -1;
+      return TRUE;
+    }
+
+  return FALSE;
+}
+
+/* Function for obtaining the condition for the next instruction
+   after cur_insn.
+*/
+rtx
+get_next_insn_cond (rtx cur_insn)
+{
+  rtx next_insn = next_nonnote_insn (cur_insn);
+  rtx cond = NULL_RTX;
+  if ((next_insn != NULL_RTX)
+      && (INSN_P (next_insn))
+      && (GET_CODE (PATTERN (next_insn)) == SET)
+      && (GET_CODE (SET_SRC (PATTERN (next_insn))) == IF_THEN_ELSE))
+    {
+      /* Branch instructions */
+      cond = XEXP (SET_SRC (PATTERN (next_insn)), 0);
+    }
+  else if ((next_insn != NULL_RTX)
+	   && (INSN_P (next_insn))
+	   && (GET_CODE (PATTERN (next_insn)) == SET)
+	   && comparison_operator (SET_SRC (PATTERN (next_insn)),
+				   GET_MODE (SET_SRC (PATTERN (next_insn)))))
+    {
+      /* scc with no compare */
+      cond = SET_SRC (PATTERN (next_insn));
+    }
+
+  return cond;
+}
+
+int
+avr32_expand_scc (enum rtx_code cond, rtx * operands)
+{
+
+  rtx comparation;
+  /* Only allow certain compare operations */
+  if (GET_MODE (avr32_compare_op0) != DImode
+      && GET_MODE (avr32_compare_op0) != SImode
+      && GET_MODE (avr32_compare_op0) != HImode
+      && GET_MODE (avr32_compare_op0) != QImode)
+    return FALSE;
+
+  /* Delete compare instruction as it is merged into this instruction */
+  remove_insn (get_last_insn_anywhere ());
+
+  if (!REG_P (avr32_compare_op0))
+    avr32_compare_op0 =
+      force_reg (GET_MODE (avr32_compare_op0), avr32_compare_op0);
+
+  if (GET_MODE (avr32_compare_op0) != SImode && !REG_P (avr32_compare_op1))
+    {
+      avr32_compare_op1 =
+	force_reg (GET_MODE (avr32_compare_op0), avr32_compare_op1);
+    }
+  else if (GET_MODE (avr32_compare_op0) == SImode
+	   && !REG_P (avr32_compare_op1)
+	   && (GET_CODE (avr32_compare_op1) != CONST_INT
+	       || (GET_CODE (avr32_compare_op1) == CONST_INT
+		   &&
+		   !avr32_const_ok_for_constraint_p (INTVAL
+						     (avr32_compare_op1), 'K',
+						     "Ks21"))))
+    avr32_compare_op1 =
+      force_reg (GET_MODE (avr32_compare_op0), avr32_compare_op1);
+
+
+  comparation =
+    gen_rtx_EQ (SImode,
+		gen_rtx_COMPARE (GET_MODE (avr32_compare_op0),
+				 avr32_compare_op0, avr32_compare_op1),
+		const0_rtx);
+  /* Set correct condition */
+  PUT_CODE (comparation, cond);
+  emit_insn (gen_rtx_SET (VOIDmode, operands[0], comparation));
+  return TRUE;
+}
+
+rtx
+avr32_output_cmp (rtx cond, enum machine_mode mode, rtx op0, rtx op1)
+{
+
+  rtx new_cond = NULL_RTX;
+  rtx ops[2];
+  rtx compare_pattern;
+  ops[0] = op0;
+  ops[1] = op1;
+
+  compare_pattern = gen_rtx_COMPARE (mode, op0, op1);
+
+  new_cond = is_compare_redundant (compare_pattern, cond);
+
+  if (new_cond != NULL_RTX)
+    return new_cond;
+
+  /* Insert compare */
+  switch (mode)
+    {
+    case QImode:
+      output_asm_insn ("cp.b\t%0, %1", ops);
+      break;
+    case HImode:
+      output_asm_insn ("cp.h\t%0, %1", ops);
+      break;
+    case SImode:
+      output_asm_insn ("cp.w\t%0, %1", ops);
+      break;
+    case DImode:
+      if (rtx_equal_p (op1, const0_rtx))
+	output_asm_insn ("cp.w\t%0, %1\ncpc\t%m0", ops);
+      else
+	output_asm_insn ("cp.w\t%0, %1\ncpc\t%m0, %m1", ops);
+      break;
+    default:
+      internal_error ("Unknown comparison mode");
+      break;
+    }
+
+  return cond;
+}
+
+int
+avr32_load_multiple_operation (rtx op,
+			       enum machine_mode mode ATTRIBUTE_UNUSED)
+{
+  int count = XVECLEN (op, 0);
+  unsigned int dest_regno;
+  rtx src_addr;
+  rtx elt;
+  int i = 1, base = 0;
+
+  if (count <= 1 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
+    return 0;
+
+  /* Check to see if this might be a write-back.  */
+  if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
+    {
+      i++;
+      base = 1;
+
+      /* Now check it more carefully.  */
+      if (GET_CODE (SET_DEST (elt)) != REG
+	  || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
+	  || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
+	  || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 1) * 4)
+	return 0;
+    }
+
+  /* Perform a quick check so we don't blow up below.  */
+  if (count <= 1
+      || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
+      || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != REG
+      || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != UNSPEC)
+    return 0;
+
+  dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, i - 1)));
+  src_addr = XEXP (SET_SRC (XVECEXP (op, 0, i - 1)), 0);
+
+  for (; i < count; i++)
+    {
+      elt = XVECEXP (op, 0, i);
+
+      if (GET_CODE (elt) != SET
+	  || GET_CODE (SET_DEST (elt)) != REG
+	  || GET_MODE (SET_DEST (elt)) != SImode
+	  || GET_CODE (SET_SRC (elt)) != UNSPEC)
+	return 0;
+    }
+
+  return 1;
+}
+
+int
+avr32_store_multiple_operation (rtx op,
+				enum machine_mode mode ATTRIBUTE_UNUSED)
+{
+  int count = XVECLEN (op, 0);
+  int src_regno;
+  rtx dest_addr;
+  rtx elt;
+  int i = 1;
+
+  if (count <= 1 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
+    return 0;
+
+  /* Perform a quick check so we don't blow up below.  */
+  if (count <= i
+      || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
+      || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != MEM
+      || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != UNSPEC)
+    return 0;
+
+  src_regno = REGNO (SET_SRC (XVECEXP (op, 0, i - 1)));
+  dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, i - 1)), 0);
+
+  for (; i < count; i++)
+    {
+      elt = XVECEXP (op, 0, i);
+
+      if (GET_CODE (elt) != SET
+	  || GET_CODE (SET_DEST (elt)) != MEM
+	  || GET_MODE (SET_DEST (elt)) != SImode
+	  || GET_CODE (SET_SRC (elt)) != UNSPEC)
+	return 0;
+    }
+
+  return 1;
+}
+
+int
+avr32_valid_macmac_bypass (rtx insn_out, rtx insn_in)
+{
+  /* Check if they use the same accumulator */
+  if (rtx_equal_p
+      (SET_DEST (PATTERN (insn_out)), SET_DEST (PATTERN (insn_in))))
+    {
+      return TRUE;
+    }
+
+  return FALSE;
+}
+
+int
+avr32_valid_mulmac_bypass (rtx insn_out, rtx insn_in)
+{
+  /*
+     Check if the mul instruction produces the accumulator for the mac
+     instruction. */
+  if (rtx_equal_p
+      (SET_DEST (PATTERN (insn_out)), SET_DEST (PATTERN (insn_in))))
+    {
+      return TRUE;
+    }
+  return FALSE;
+}
+
+int
+avr32_store_bypass (rtx insn_out, rtx insn_in)
+{
+  /* Only valid bypass if the output result is used as an src in the store
+     instruction, NOT if used as a pointer or base. */
+  if (rtx_equal_p
+      (SET_DEST (PATTERN (insn_out)), SET_SRC (PATTERN (insn_in))))
+    {
+      return TRUE;
+    }
+
+  return FALSE;
+}
+
+int
+avr32_mul_waw_bypass (rtx insn_out, rtx insn_in)
+{
+  /* Check if the register holding the result from the mul instruction is
+     used as a result register in the input instruction. */
+  if (rtx_equal_p
+      (SET_DEST (PATTERN (insn_out)), SET_DEST (PATTERN (insn_in))))
+    {
+      return TRUE;
+    }
+
+  return FALSE;
+}
+
+int
+avr32_valid_load_double_bypass (rtx insn_out, rtx insn_in)
+{
+  /* Check if the first loaded word in insn_out is used in insn_in. */
+  rtx dst_reg;
+  rtx second_loaded_reg;
+
+  /* If this is a double alu operation then the bypass is not valid */
+  if ((get_attr_type (insn_in) == TYPE_ALU
+       || get_attr_type (insn_in) == TYPE_ALU2)
+      && (GET_MODE_SIZE (GET_MODE (SET_DEST (PATTERN (insn_out)))) > 4))
+    return FALSE;
+
+  /* Get the destination register in the load */
+  if (!REG_P (SET_DEST (PATTERN (insn_out))))
+    return FALSE;
+
+  dst_reg = SET_DEST (PATTERN (insn_out));
+  second_loaded_reg = gen_rtx_REG (SImode, REGNO (dst_reg) + 1);
+
+  if (!reg_mentioned_p (second_loaded_reg, PATTERN (insn_in)))
+    return TRUE;
+
+  return FALSE;
+}
+
+
+int
+avr32_valid_load_quad_bypass (rtx insn_out, rtx insn_in)
+{
+  /*
+     Check if the two first loaded word in insn_out are used in insn_in. */
+  rtx dst_reg;
+  rtx third_loaded_reg, fourth_loaded_reg;
+
+  /* Get the destination register in the load */
+  if (!REG_P (SET_DEST (PATTERN (insn_out))))
+    return FALSE;
+
+  dst_reg = SET_DEST (PATTERN (insn_out));
+  third_loaded_reg = gen_rtx_REG (SImode, REGNO (dst_reg) + 2);
+  fourth_loaded_reg = gen_rtx_REG (SImode, REGNO (dst_reg) + 3);
+
+  if (!reg_mentioned_p (third_loaded_reg, PATTERN (insn_in))
+      && !reg_mentioned_p (fourth_loaded_reg, PATTERN (insn_in)))
+    {
+      return TRUE;
+    }
+
+  return FALSE;
+}
+
+int
+avr32_sched_use_dfa_pipeline_interface (void)
+{
+  /* No need to scedule on avr32_uc architecture. */
+  return (avr32_arch->arch_type != ARCH_TYPE_AVR32_UC);
+}
+
+void
+avr32_select_rtx_section (enum machine_mode mode ATTRIBUTE_UNUSED,
+			  rtx x ATTRIBUTE_UNUSED,
+			  unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
+{
+  /* Let ASM_OUTPUT_POOL_PROLOGUE take care of this */
+}
+
+/* Set up library functions to comply to AVR32 ABI  */
+
+static void
+avr32_init_libfuncs (void)
+{
+  /* Convert gcc run-time function names to AVR32 ABI names */
+
+  /* Double-precision floating-point arithmetic. */
+  set_optab_libfunc (add_optab, DFmode, "__avr32_f64_add");
+  set_optab_libfunc (sdiv_optab, DFmode, "__avr32_f64_div");
+  set_optab_libfunc (smul_optab, DFmode, "__avr32_f64_mul");
+  set_optab_libfunc (neg_optab, DFmode, NULL);
+  set_optab_libfunc (sub_optab, DFmode, "__avr32_f64_sub");
+
+  /* Double-precision comparisons.  */
+  set_optab_libfunc (eq_optab, DFmode, "__avr32_f64_cmp_eq");
+  set_optab_libfunc (ne_optab, DFmode, NULL);
+  set_optab_libfunc (lt_optab, DFmode, "__avr32_f64_cmp_lt");
+  set_optab_libfunc (le_optab, DFmode, NULL);
+  set_optab_libfunc (ge_optab, DFmode, "__avr32_f64_cmp_ge");
+  set_optab_libfunc (gt_optab, DFmode, NULL);
+
+  /* Single-precision floating-point arithmetic. */
+  set_optab_libfunc (add_optab, SFmode, "__avr32_f32_add");
+  set_optab_libfunc (sdiv_optab, SFmode, "__avr32_f32_div");
+  set_optab_libfunc (smul_optab, SFmode, "__avr32_f32_mul");
+  set_optab_libfunc (neg_optab, SFmode, NULL);
+  set_optab_libfunc (sub_optab, SFmode, "__avr32_f32_sub");
+
+  /* Single-precision comparisons.  */
+  set_optab_libfunc (eq_optab, SFmode, "__avr32_f32_cmp_eq");
+  set_optab_libfunc (ne_optab, SFmode, NULL);
+  set_optab_libfunc (lt_optab, SFmode, "__avr32_f32_cmp_lt");
+  set_optab_libfunc (le_optab, SFmode, NULL);
+  set_optab_libfunc (ge_optab, SFmode, "__avr32_f32_cmp_ge");
+  set_optab_libfunc (gt_optab, SFmode, NULL);
+
+  /* Floating-point to integer conversions. */
+  set_conv_libfunc (sfix_optab, SImode, DFmode, "__avr32_f64_to_s32");
+  set_conv_libfunc (ufix_optab, SImode, DFmode, "__avr32_f64_to_u32");
+  set_conv_libfunc (sfix_optab, DImode, DFmode, "__avr32_f64_to_s64");
+  set_conv_libfunc (ufix_optab, DImode, DFmode, "__avr32_f64_to_u64");
+  set_conv_libfunc (sfix_optab, SImode, SFmode, "__avr32_f32_to_s32");
+  set_conv_libfunc (ufix_optab, SImode, SFmode, "__avr32_f32_to_u32");
+  set_conv_libfunc (sfix_optab, DImode, SFmode, "__avr32_f32_to_s64");
+  set_conv_libfunc (ufix_optab, DImode, SFmode, "__avr32_f32_to_u64");
+
+  /* Conversions between floating types.  */
+  set_conv_libfunc (trunc_optab, SFmode, DFmode, "__avr32_f64_to_f32");
+  set_conv_libfunc (sext_optab, DFmode, SFmode, "__avr32_f32_to_f64");
+
+  /* Integer to floating-point conversions.  Table 8.  */
+  set_conv_libfunc (sfloat_optab, DFmode, SImode, "__avr32_s32_to_f64");
+  set_conv_libfunc (sfloat_optab, DFmode, DImode, "__avr32_s64_to_f64");
+  set_conv_libfunc (sfloat_optab, SFmode, SImode, "__avr32_s32_to_f32");
+  set_conv_libfunc (sfloat_optab, SFmode, DImode, "__avr32_s64_to_f32");
+  set_conv_libfunc (ufloat_optab, DFmode, SImode, "__avr32_u32_to_f64");
+  set_conv_libfunc (ufloat_optab, SFmode, SImode, "__avr32_u32_to_f32");
+  /* TODO: Add these to gcc library functions */
+
+  set_conv_libfunc (ufloat_optab, DFmode, DImode, NULL);
+  set_conv_libfunc (ufloat_optab, SFmode, DImode, NULL);
+
+  /* Long long.  Table 9.  */
+  set_optab_libfunc (smul_optab, DImode, "__avr32_mul64");
+  set_optab_libfunc (sdiv_optab, DImode, "__avr32_sdiv64");
+  set_optab_libfunc (udiv_optab, DImode, "__avr32_udiv64");
+  set_optab_libfunc (smod_optab, DImode, "__avr32_smod64");
+  set_optab_libfunc (umod_optab, DImode, "__avr32_umod64");
+  set_optab_libfunc (ashl_optab, DImode, "__avr32_lsl64");
+  set_optab_libfunc (lshr_optab, DImode, "__avr32_lsr64");
+  set_optab_libfunc (ashr_optab, DImode, "__avr32_asr64");
+}
diff -Nrup --ignore-space-change gcc-4.0.2/gcc/config/avr32/avr32-elf.h gcc-4.0.2-atmel.0.99.2/gcc/config/avr32/avr32-elf.h
--- gcc-4.0.2/gcc/config/avr32/avr32-elf.h	1970-01-01 01:00:00.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/gcc/config/avr32/avr32-elf.h	2006-11-17 10:38:10.000000000 +0100
@@ -0,0 +1,82 @@
+/*
+   Elf specific definitions.
+   Copyright 2003-2006 Atmel Corporation.
+
+   Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
+
+   This file is part of GCC.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 2 of the License, or
+   (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
+
+
+/*****************************************************************************
+ * Controlling the Compilator Driver, 'gcc'
+ *****************************************************************************/
+
+/* Run-time Target Specification.  */
+#undef  TARGET_VERSION
+#define TARGET_VERSION  fputs (" (AVR32 GNU with ELF)", stderr);
+
+/*
+Another C string constant used much like LINK_SPEC.  The
+difference between the two is that STARTFILE_SPEC is used at
+the very beginning of the command given to the linker.
+
+If this macro is not defined, a default is provided that loads the
+standard C startup file from the usual place.  See gcc.c.
+*/
+#undef  STARTFILE_SPEC
+#define STARTFILE_SPEC "crt0%O%s crti%O%s crtbegin%O%s"
+
+#undef LINK_SPEC
+#define LINK_SPEC "%{muse-oscall:--defsym __do_not_use_oscall_coproc__=0} %{mrelax|O*:%{mno-relax|O0|O1: ;:--relax}} %{mpart=*:-mavr32elf_%*} %{mcpu=*:-mavr32elf_%*}"
+
+
+/*
+Another C string constant used much like LINK_SPEC.  The
+difference between the two is that ENDFILE_SPEC is used at
+the very end of the command given to the linker.
+
+Do not define this macro if it does not need to do anything.
+*/
+#undef  ENDFILE_SPEC
+#define ENDFILE_SPEC "crtend%O%s crtn%O%s"
+
+
+/* Target CPU builtins.  */
+#define TARGET_CPU_CPP_BUILTINS()				\
+  do								\
+    {								\
+      builtin_define ("__avr32__");				\
+      builtin_define ("__AVR32__");				\
+      builtin_define ("__AVR32_ELF__");	       	        	\
+      builtin_define (avr32_part->macro);			\
+      builtin_define (avr32_arch->macro);			\
+      if (avr32_arch->uarch_type == UARCH_TYPE_AVR32A)		\
+	builtin_define ("__AVR32_AVR32A__");			\
+      else							\
+	builtin_define ("__AVR32_AVR32B__");			\
+      if (TARGET_UNALIGNED_WORD)				\
+	builtin_define ("__AVR32_HAS_UNALIGNED_WORD__");	\
+      if (TARGET_SIMD)						\
+	builtin_define ("__AVR32_HAS_SIMD__");			\
+      if (TARGET_DSP)						\
+	builtin_define ("__AVR32_HAS_DSP__");			\
+      if (TARGET_RMW)						\
+	builtin_define ("__AVR32_HAS_RMW__");			\
+      if (TARGET_BRANCH_PRED)					\
+	builtin_define ("__AVR32_HAS_BRANCH_PRED__");		\
+    }								\
+  while (0)
diff -Nrup --ignore-space-change gcc-4.0.2/gcc/config/avr32/avr32.h gcc-4.0.2-atmel.0.99.2/gcc/config/avr32/avr32.h
--- gcc-4.0.2/gcc/config/avr32/avr32.h	1970-01-01 01:00:00.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/gcc/config/avr32/avr32.h	2006-11-10 15:14:06.000000000 +0100
@@ -0,0 +1,3374 @@
+/*
+   Definitions of target machine for AVR32.
+   Copyright 2003-2006 Atmel Corporation.
+
+   Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
+   Initial porting by Anders �dland.
+
+   This file is part of GCC.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 2 of the License, or
+   (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
+
+#ifndef GCC_AVR32_H
+#define GCC_AVR32_H
+
+
+#ifndef OBJECT_FORMAT_ELF
+#error avr32.h included before elfos.h
+#endif
+
+#ifndef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX "."
+#endif
+
+#ifndef SUBTARGET_CPP_SPEC
+#define SUBTARGET_CPP_SPEC  "-D__ELF__"
+#endif
+
+
+extern struct rtx_def *avr32_compare_op0;
+extern struct rtx_def *avr32_compare_op1;
+
+
+extern struct rtx_def *avr32_acc_cache;
+
+/* cache instruction op5 codes */
+#define AVR32_CACHE_INVALIDATE_ICACHE 1
+
+/* These bits describe the different types of function supported
+   by the AVR32 backend.  They are exclusive.  ie a function cannot be both a
+   normal function and an interworked function, for example.  Knowing the
+   type of a function is important for determining its prologue and
+   epilogue sequences.
+   Note value 7 is currently unassigned.  Also note that the interrupt
+   function types all have bit 2 set, so that they can be tested for easily.
+   Note that 0 is deliberately chosen for AVR32_FT_UNKNOWN so that when the
+   machine_function structure is initialized (to zero) func_type will
+   default to unknown.  This will force the first use of avr32_current_func_type
+   to call avr32_compute_func_type.  */
+#define AVR32_FT_UNKNOWN	 0	/* Type has not yet been determined.
+					   */
+#define AVR32_FT_NORMAL		 1	/* Your normal, straightforward
+					   function.  */
+#define AVR32_FT_ACALL	         2	/* An acall function.  */
+#define AVR32_FT_EXCEPTION_HANDLER 3	/* A C++ exception handler.  */
+#define AVR32_FT_ISR_FULL	 4	/* A fully shadowed interrupt mode.  */
+#define AVR32_FT_ISR_HALF	 5	/* A half shadowed interrupt mode.  */
+#define AVR32_FT_ISR_NONE	 6	/* No shadow registers.  */
+
+#define AVR32_FT_TYPE_MASK	((1 << 3) - 1)
+
+/* In addition functions can have several type modifiers,
+   outlined by these bit masks:  */
+#define AVR32_FT_INTERRUPT	(1 << 2)	/* Note overlap with FT_ISR
+						   and above.  */
+#define AVR32_FT_NAKED		(1 << 3)	/* No prologue or epilogue.  */
+#define AVR32_FT_VOLATILE	(1 << 4)	/* Does not return.  */
+#define AVR32_FT_NESTED		(1 << 5)	/* Embedded inside another
+						   func. */
+
+/* Some macros to test these flags.  */
+#define AVR32_FUNC_TYPE(t)	(t & AVR32_FT_TYPE_MASK)
+#define IS_INTERRUPT(t)		(t & AVR32_FT_INTERRUPT)
+#define IS_VOLATILE(t)     	(t & AVR32_FT_VOLATILE)
+#define IS_NAKED(t)        	(t & AVR32_FT_NAKED)
+#define IS_NESTED(t)       	(t & AVR32_FT_NESTED)
+
+
+typedef struct minipool_labels
+GTY ((chain_next ("%h.next"), chain_prev ("%h.prev")))
+{
+  rtx label;
+  struct minipool_labels *prev;
+  struct minipool_labels *next;
+} minipool_labels;
+
+/* A C structure for machine-specific, per-function data.
+   This is added to the cfun structure.  */
+
+typedef struct machine_function
+GTY (())
+{
+  /* Records the type of the current function.  */
+  unsigned long func_type;
+  /* List of minipool labels, use for checking if code label is valid in a
+     memory expression */
+  minipool_labels *minipool_label_head;
+  minipool_labels *minipool_label_tail;
+} machine_function;
+
+/* Initialize data used by insn expanders.  This is called from insn_emit,
+   once for every function before code is generated.  */
+#define INIT_EXPANDERS avr32_init_expanders ()
+
+/******************************************************************************
+ * SPECS
+ *****************************************************************************/
+
+#ifndef ASM_SPEC
+#define ASM_SPEC "%{fpic:--pic} %{mrelax|O*:%{mno-relax|O0|O1: ;:--linkrelax}} %{march=*:-march=%*} %{mpart=*:-mpart=%*}"
+#endif
+
+#ifndef MULTILIB_DEFAULTS
+#define MULTILIB_DEFAULTS { "march=ap" }
+#endif
+
+
+/******************************************************************************
+ * Run-time Target Specification
+ *****************************************************************************/
+extern int target_flags;
+
+/* Part types. Keep this in sync with the order of avr32_part_types in avr32.c*/
+enum part_type
+{
+  PART_TYPE_AVR32_NONE,
+  PART_TYPE_AVR32_AP7000,
+  PART_TYPE_AVR32_AP7010,
+  PART_TYPE_AVR32_AP7020,
+  PART_TYPE_AVR32_UC3A0256,
+  PART_TYPE_AVR32_UC3A0512,
+  PART_TYPE_AVR32_UC3A1128,
+  PART_TYPE_AVR32_UC3A1256,
+  PART_TYPE_AVR32_UC3A1512
+};
+
+/* Microarchitectures. */
+enum microarchitecture_type
+{
+  UARCH_TYPE_AVR32A,
+  UARCH_TYPE_AVR32B
+};
+
+/* Architectures types which specifies the pipeline.
+ Keep this in sync with avr32_arch_types in avr32.c*/
+enum architecture_type
+{
+  ARCH_TYPE_AVR32_AP,
+  ARCH_TYPE_AVR32_UC
+};
+
+/* Flag specifying if the cpu has support for DSP instructions.*/
+#define FLAG_AVR32_HAS_DSP (1 << 0)
+/* Flag specifying if the cpu has support for Read-Modify-Write
+   instructions.*/
+#define FLAG_AVR32_HAS_RMW (1 << 1)
+/* Flag specifying if the cpu has support for SIMD instructions. */
+#define FLAG_AVR32_HAS_SIMD (1 << 2)
+/* Flag specifying if the cpu has support for unaligned memory word access. */
+#define FLAG_AVR32_HAS_UNALIGNED_WORD (1 << 3)
+/* Flag specifying if the cpu has support for branch prediction. */
+#define FLAG_AVR32_HAS_BRANCH_PRED (1 << 4)
+
+/* Structure for holding information about different avr32 CPUs/parts */
+struct part_type_s
+{
+  const char *const name;
+  enum part_type part_type;
+  enum architecture_type arch_type;
+  /* Must lie outside user's namespace.  NULL == no macro.  */
+  const char *const macro;
+};
+
+/* Structure for holding information about different avr32 pipeline
+ architectures. */
+struct arch_type_s
+{
+  const char *const name;
+  enum architecture_type arch_type;
+  enum microarchitecture_type uarch_type;
+  const unsigned long feature_flags;
+  /* Must lie outside user's namespace.  NULL == no macro.  */
+  const char *const macro;
+};
+
+extern const struct part_type_s *avr32_part;
+extern const struct arch_type_s *avr32_arch;
+
+#define USE_RODATA_SECTION (1 << 0)
+#define AVR32_FLAG_HARD_FLOAT (1 << 1)
+#define AVR32_FLAG_FORCE_DOUBLE_ALIGN (1 << 2)
+#define AVR32_FLAG_RELAX (1 << 4)
+#define AVR32_FLAG_NO_INIT_GOT (1 << 5)
+#define AVR32_FLAG_NO_REORG_OPT (1 << 6)
+#define AVR32_FLAG_NO_ASM_ADDR_PSEUDOS (1 << 7)
+#define AVR32_FLAG_NO_PIC (1 << 8)
+
+#define TARGET_HARD_FLOAT            (target_flags & AVR32_FLAG_HARD_FLOAT)
+#define TARGET_SOFT_FLOAT            (!TARGET_HARD_FLOAT)
+#define TARGET_FORCE_DOUBLE_ALIGN    (target_flags & AVR32_FLAG_FORCE_DOUBLE_ALIGN)
+#define TARGET_RELAX                 (target_flags & AVR32_FLAG_RELAX)
+#define TARGET_NO_INIT_GOT           (target_flags & AVR32_FLAG_NO_INIT_GOT)
+#define TARGET_MD_REORG_OPTIMIZATION (!(target_flags & AVR32_FLAG_NO_REORG_OPT))
+#define TARGET_HAS_ASM_ADDR_PSEUDOS  (!(target_flags & AVR32_FLAG_NO_ASM_ADDR_PSEUDOS))
+
+#define TARGET_SIMD  (avr32_arch->feature_flags & FLAG_AVR32_HAS_SIMD)
+#define TARGET_DSP  (avr32_arch->feature_flags & FLAG_AVR32_HAS_DSP)
+#define TARGET_RMW  (avr32_arch->feature_flags & FLAG_AVR32_HAS_RMW)
+#define TARGET_UNALIGNED_WORD  (avr32_arch->feature_flags & FLAG_AVR32_HAS_UNALIGNED_WORD)
+#define TARGET_BRANCH_PRED  (avr32_arch->feature_flags & FLAG_AVR32_HAS_BRANCH_PRED)
+
+
+#define TARGET_SWITCHES { \
+  { "use-rodata-section", USE_RODATA_SECTION,                                 \
+  N_("Do not put readonly-data in .text section, but in .rodata.") },         \
+  { "hard-float", AVR32_FLAG_HARD_FLOAT,                                      \
+  N_("Use floating point coprocessor instructions.") },                       \
+  { "soft-float", -AVR32_FLAG_HARD_FLOAT,                                     \
+  N_("Use software floating-point library for floating-point operations.") }, \
+  { "force-double-align", AVR32_FLAG_FORCE_DOUBLE_ALIGN,                      \
+  N_("Force double-word alignment for double-word memory accesses.") },       \
+  { "no-init-got", AVR32_FLAG_NO_INIT_GOT,				\
+  N_("Do not initialize GOT register before using it when compiling PIC code.") }, \
+  { "relax", AVR32_FLAG_RELAX,                               \
+  N_("Let invoked assembler and linker do relaxing (Enabled by default when optimization level is >1).") }, \
+  { "no-relax", -AVR32_FLAG_RELAX,					\
+  N_("Don't let invoked assembler and linker do relaxing.") },	\
+  { "no-reorg-opt", AVR32_FLAG_NO_REORG_OPT,                               \
+  N_("Do not perform machine dependent optimizations in reorg stage.") }, \
+  { "asm-addr-pseudos", -AVR32_FLAG_NO_ASM_ADDR_PSEUDOS,                               \
+  N_("Use assembler pseudo-instructions lda.w and call for handling direct addresses. (Enabled by default)") }, \
+  { "no-asm-addr-pseudos", AVR32_FLAG_NO_ASM_ADDR_PSEUDOS,                               \
+  N_("Do not use assembler pseudo-instructions lda.w and call for handling direct addresses.") }, \
+  { "no-pic", AVR32_FLAG_NO_PIC,					\
+  N_("Do not emit position-independent code (will break dynamic linking.)") }, \
+  { "", 0, NULL }         \
+}
+
+
+extern const char *avr32_part_name;
+extern const char *avr32_arch_name;
+
+#define TARGET_OPTIONS {                                                \
+    { "part=", &avr32_part_name, N_("Specify the AVR32 part name"), 0},    \
+    { "cpu=", &avr32_part_name, N_("Specify the AVR32 part name (deprecated)"), 0}, \
+    { "arch=", &avr32_arch_name, N_("Specify the AVR32 architecture name"), 0} }
+
+#define CAN_DEBUG_WITHOUT_FP
+
+/******************************************************************************
+ * Storage Layout
+ *****************************************************************************/
+
+/*
+Define this macro to have the value 1 if the most significant bit in a
+byte has the lowest number; otherwise define it to have the value zero.
+This means that bit-field instructions count from the most significant
+bit.  If the machine has no bit-field instructions, then this must still
+be defined, but it doesn't matter which value it is defined to.  This
+macro need not be a constant.
+
+This macro does not affect the way structure fields are packed into
+bytes or words; that is controlled by BYTES_BIG_ENDIAN.
+*/
+#define BITS_BIG_ENDIAN 0
+
+/*
+Define this macro to have the value 1 if the most significant byte in a
+word has the lowest number. This macro need not be a constant.
+*/
+/*
+  Data is stored in an big-endian way.
+*/
+#define BYTES_BIG_ENDIAN 1
+
+/*
+Define this macro to have the value 1 if, in a multiword object, the
+most significant word has the lowest number.  This applies to both
+memory locations and registers; GCC fundamentally assumes that the
+order of words in memory is the same as the order in registers.  This
+macro need not be a constant.
+*/
+/*
+  Data is stored in an bin-endian way.
+*/
+#define WORDS_BIG_ENDIAN 1
+
+/*
+Define this macro if WORDS_BIG_ENDIAN is not constant.  This must be a
+constant value with the same meaning as WORDS_BIG_ENDIAN, which will be
+used only when compiling libgcc2.c.  Typically the value will be set
+based on preprocessor defines.
+*/
+#define LIBGCC2_WORDS_BIG_ENDIAN WORDS_BIG_ENDIAN
+
+/*
+Define this macro to have the value 1 if DFmode, XFmode or
+TFmode floating point numbers are stored in memory with the word
+containing the sign bit at the lowest address; otherwise define it to
+have the value 0.  This macro need not be a constant.
+
+You need not define this macro if the ordering is the same as for
+multi-word integers.
+*/
+/* #define FLOAT_WORDS_BIG_ENDIAN 1 */
+
+/*
+Define this macro to be the number of bits in an addressable storage
+unit (byte); normally 8.
+*/
+#define BITS_PER_UNIT 8
+
+/*
+Number of bits in a word; normally 32.
+*/
+#define BITS_PER_WORD 32
+
+/*
+Maximum number of bits in a word.  If this is undefined, the default is
+BITS_PER_WORD.  Otherwise, it is the constant value that is the
+largest value that BITS_PER_WORD can have at run-time.
+*/
+/* MAX_BITS_PER_WORD not defined*/
+
+/*
+Number of storage units in a word; normally 4.
+*/
+#define UNITS_PER_WORD 4
+
+/*
+Minimum number of units in a word.  If this is undefined, the default is
+UNITS_PER_WORD.  Otherwise, it is the constant value that is the
+smallest value that UNITS_PER_WORD can have at run-time.
+*/
+/* MIN_UNITS_PER_WORD not defined */
+
+/*
+Width of a pointer, in bits.  You must specify a value no wider than the
+width of Pmode.  If it is not equal to the width of Pmode,
+you must define POINTERS_EXTEND_UNSIGNED.
+*/
+#define POINTER_SIZE 32
+
+/*
+A C expression whose value is greater than zero if pointers that need to be
+extended from being POINTER_SIZE bits wide to Pmode are to
+be zero-extended and zero if they are to be sign-extended.  If the value
+is less then zero then there must be an "ptr_extend" instruction that
+extends a pointer from POINTER_SIZE to Pmode.
+
+You need not define this macro if the POINTER_SIZE is equal
+to the width of Pmode.
+*/
+/* #define POINTERS_EXTEND_UNSIGNED */
+
+/*
+A Macro to update M and UNSIGNEDP when an object whose type
+is TYPE and which has the specified mode and signedness is to be
+stored in a register.  This macro is only called when TYPE is a
+scalar type.
+
+On most RISC machines, which only have operations that operate on a full
+register, define this macro to set M to word_mode if
+M is an integer mode narrower than BITS_PER_WORD.  In most
+cases, only integer modes should be widened because wider-precision
+floating-point operations are usually more expensive than their narrower
+counterparts.
+
+For most machines, the macro definition does not change UNSIGNEDP.
+However, some machines, have instructions that preferentially handle
+either signed or unsigned quantities of certain modes.  For example, on
+the DEC Alpha, 32-bit loads from memory and 32-bit add instructions
+sign-extend the result to 64 bits.  On such machines, set
+UNSIGNEDP according to which kind of extension is more efficient.
+
+Do not define this macro if it would never modify M.
+*/
+#define PROMOTE_MODE(M, UNSIGNEDP, TYPE)	\
+  do						\
+    {						\
+      if (GET_MODE_CLASS (M) == MODE_INT	\
+	  && GET_MODE_SIZE (M) < 4)		\
+	{					\
+	  (M) = SImode;				\
+	}					\
+    }						\
+  while (0)
+
+/* Define if operations between registers always perform the operation
+   on the full register even if a narrower mode is specified.  */
+#define WORD_REGISTER_OPERATIONS
+
+/* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD
+   will either zero-extend or sign-extend.  The value of this macro should
+   be the code that says which one of the two operations is implicitly
+   done, UNKNOWN if not known.  */
+#define LOAD_EXTEND_OP(MODE)				\
+   (((MODE) == QImode) ? ZERO_EXTEND			\
+   : ((MODE) == HImode) ? SIGN_EXTEND : UNKNOWN)
+
+
+/*
+Define this macro if the promotion described by PROMOTE_MODE
+should only be performed for outgoing function arguments or
+function return values, as specified by PROMOTE_FUNCTION_ARGS
+and PROMOTE_FUNCTION_RETURN, respectively.
+*/
+/* #define PROMOTE_FOR_CALL_ONLY */
+
+/*
+Normal alignment required for function parameters on the stack, in
+bits.  All stack parameters receive at least this much alignment
+regardless of data type.  On most machines, this is the same as the
+size of an integer.
+*/
+#define PARM_BOUNDARY 32
+
+/*
+Define this macro to the minimum alignment enforced by hardware for the
+stack pointer on this machine.  The definition is a C expression for the
+desired alignment (measured in bits).  This value is used as a default
+if PREFERRED_STACK_BOUNDARY is not defined.  On most machines,
+this should be the same as PARM_BOUNDARY.
+*/
+#define STACK_BOUNDARY 32
+
+/*
+Define this macro if you wish to preserve a certain alignment for the
+stack pointer, greater than what the hardware enforces.  The definition
+is a C expression for the desired alignment (measured in bits).  This
+macro must evaluate to a value equal to or larger than
+STACK_BOUNDARY.
+*/
+#define PREFERRED_STACK_BOUNDARY (TARGET_FORCE_DOUBLE_ALIGN ? 64 : 32 )
+
+/*
+Alignment required for a function entry point, in bits.
+*/
+#define FUNCTION_BOUNDARY 16
+
+/*
+Biggest alignment that any data type can require on this machine, in bits.
+*/
+#define BIGGEST_ALIGNMENT  (TARGET_FORCE_DOUBLE_ALIGN ? 64 : 32 )
+
+/*
+If defined, the smallest alignment, in bits, that can be given to an
+object that can be referenced in one operation, without disturbing any
+nearby object.  Normally, this is BITS_PER_UNIT, but may be larger
+on machines that don't have byte or half-word store operations.
+*/
+#define MINIMUM_ATOMIC_ALIGNMENT BITS_PER_UNIT
+
+
+/*
+An integer expression for the size in bits of the largest integer machine mode that
+should actually be used. All integer machine modes of this size or smaller can be
+used for structures and unions with the appropriate sizes. If this macro is undefined,
+GET_MODE_BITSIZE (DImode) is assumed.*/
+#define MAX_FIXED_MODE_SIZE  GET_MODE_BITSIZE (DImode)
+
+
+/*
+If defined, a C expression to compute the alignment given to a constant
+that is being placed in memory.  CONSTANT is the constant and
+BASIC_ALIGN is the alignment that the object would ordinarily
+have.  The value of this macro is used instead of that alignment to
+align the object.
+
+If this macro is not defined, then BASIC_ALIGN is used.
+
+The typical use of this macro is to increase alignment for string
+constants to be word aligned so that strcpy calls that copy
+constants can be done inline.
+*/
+#define CONSTANT_ALIGNMENT(CONSTANT, BASIC_ALIGN) \
+ ((TREE_CODE(CONSTANT) == STRING_CST) ? BITS_PER_WORD : BASIC_ALIGN)
+
+/* Try to align string to a word. */
+#define DATA_ALIGNMENT(TYPE, ALIGN)                                     \
+  ({(TREE_CODE (TYPE) == ARRAY_TYPE                                     \
+     && TYPE_MODE (TREE_TYPE (TYPE)) == QImode                          \
+     && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN));})
+
+/* Try to align local store strings to a word. */
+#define LOCAL_ALIGNMENT(TYPE, ALIGN)                                    \
+  ({(TREE_CODE (TYPE) == ARRAY_TYPE                                     \
+     && TYPE_MODE (TREE_TYPE (TYPE)) == QImode                          \
+     && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN));})
+
+/*
+Define this macro to be the value 1 if instructions will fail to work
+if given data not on the nominal alignment.  If instructions will merely
+go slower in that case, define this macro as 0.
+*/
+#define STRICT_ALIGNMENT 1
+
+/*
+Define this if you wish to imitate the way many other C compilers handle
+alignment of bit-fields and the structures that contain them.
+
+The behavior is that the type written for a bit-field (int,
+short, or other integer type) imposes an alignment for the
+entire structure, as if the structure really did contain an ordinary
+field of that type.  In addition, the bit-field is placed within the
+structure so that it would fit within such a field, not crossing a
+boundary for it.
+
+Thus, on most machines, a bit-field whose type is written as int
+would not cross a four-byte boundary, and would force four-byte
+alignment for the whole structure.  (The alignment used may not be four
+bytes; it is controlled by the other alignment parameters.)
+
+If the macro is defined, its definition should be a C expression;
+a nonzero value for the expression enables this behavior.
+
+Note that if this macro is not defined, or its value is zero, some
+bit-fields may cross more than one alignment boundary.  The compiler can
+support such references if there are insv, extv, and
+extzv insns that can directly reference memory.
+
+The other known way of making bit-fields work is to define
+STRUCTURE_SIZE_BOUNDARY as large as BIGGEST_ALIGNMENT.
+Then every structure can be accessed with fullwords.
+
+Unless the machine has bit-field instructions or you define
+STRUCTURE_SIZE_BOUNDARY that way, you must define
+PCC_BITFIELD_TYPE_MATTERS to have a nonzero value.
+
+If your aim is to make GCC use the same conventions for laying out
+bit-fields as are used by another compiler, here is how to investigate
+what the other compiler does.  Compile and run this program:
+
+struct foo1
+{
+  char x;
+  char :0;
+  char y;
+};
+
+struct foo2
+{
+  char x;
+  int :0;
+  char y;
+};
+
+main ()
+{
+  printf ("Size of foo1 is %d\n",
+          sizeof (struct foo1));
+  printf ("Size of foo2 is %d\n",
+          sizeof (struct foo2));
+  exit (0);
+}
+
+If this prints 2 and 5, then the compiler's behavior is what you would
+get from PCC_BITFIELD_TYPE_MATTERS.
+*/
+#define PCC_BITFIELD_TYPE_MATTERS 1
+
+
+/******************************************************************************
+ * Layout of Source Language Data Types
+ *****************************************************************************/
+
+/*
+A C expression for the size in bits of the type int on the
+target machine.  If you don't define this, the default is one word.
+*/
+#define INT_TYPE_SIZE 32
+
+/*
+A C expression for the size in bits of the type short on the
+target machine.  If you don't define this, the default is half a word. (If
+this would be less than one storage unit, it is rounded up to one unit.)
+*/
+#define SHORT_TYPE_SIZE 16
+
+/*
+A C expression for the size in bits of the type long on the
+target machine.  If you don't define this, the default is one word.
+*/
+#define LONG_TYPE_SIZE 32
+
+
+/*
+A C expression for the size in bits of the type long long on the
+target machine.  If you don't define this, the default is two
+words.  If you want to support GNU Ada on your machine, the value of this
+macro must be at least 64.
+*/
+#define LONG_LONG_TYPE_SIZE 64
+
+/*
+A C expression for the size in bits of the type char on the
+target machine.  If you don't define this, the default is
+BITS_PER_UNIT.
+*/
+#define CHAR_TYPE_SIZE 8
+
+
+/*
+A C expression for the size in bits of the C++ type bool and
+C99 type _Bool on the target machine.  If you don't define
+this, and you probably shouldn't, the default is CHAR_TYPE_SIZE.
+*/
+#define BOOL_TYPE_SIZE 8
+
+
+/*
+An expression whose value is 1 or 0, according to whether the type
+char should be signed or unsigned by default.  The user can
+always override this default with the options -fsigned-char
+and -funsigned-char.
+*/
+/* We are using unsigned char */
+#define DEFAULT_SIGNED_CHAR 0
+
+
+/*
+A C expression for a string describing the name of the data type to use
+for size values.  The typedef name size_t is defined using the
+contents of the string.
+
+The string can contain more than one keyword.  If so, separate them with
+spaces, and write first any length keyword, then unsigned if
+appropriate, and finally int.  The string must exactly match one
+of the data type names defined in the function
+init_decl_processing in the file c-decl.c.  You may not
+omit int or change the order - that would cause the compiler to
+crash on startup.
+
+If you don't define this macro, the default is "long unsigned int".
+*/
+#define SIZE_TYPE "long unsigned int"
+
+/*
+A C expression for a string describing the name of the data type to use
+for the result of subtracting two pointers.  The typedef name
+ptrdiff_t is defined using the contents of the string.  See
+SIZE_TYPE above for more information.
+
+If you don't define this macro, the default is "long int".
+*/
+#define PTRDIFF_TYPE "long int"
+
+
+/*
+A C expression for the size in bits of the data type for wide
+characters.  This is used in cpp, which cannot make use of
+WCHAR_TYPE.
+*/
+#define WCHAR_TYPE_SIZE 32
+
+
+/*
+A C expression for a string describing the name of the data type to
+use for wide characters passed to printf and returned from
+getwc.  The typedef name wint_t is defined using the
+contents of the string.  See SIZE_TYPE above for more
+information.
+
+If you don't define this macro, the default is "unsigned int".
+*/
+#define WINT_TYPE "unsigned int"
+
+/*
+A C expression for a string describing the name of the data type that
+can represent any value of any standard or extended signed integer type.
+The typedef name intmax_t is defined using the contents of the
+string.  See SIZE_TYPE above for more information.
+
+If you don't define this macro, the default is the first of
+"int", "long int", or "long long int" that has as
+much precision as long long int.
+*/
+#define INTMAX_TYPE "long long int"
+
+/*
+A C expression for a string describing the name of the data type that
+can represent any value of any standard or extended unsigned integer
+type.  The typedef name uintmax_t is defined using the contents
+of the string.  See SIZE_TYPE above for more information.
+
+If you don't define this macro, the default is the first of
+"unsigned int", "long unsigned int", or "long long unsigned int"
+that has as much precision as long long unsigned int.
+*/
+#define UINTMAX_TYPE "long long unsigned int"
+
+
+/******************************************************************************
+ * Register Usage
+ *****************************************************************************/
+
+/* Convert from gcc internal register number to register number
+   used in assembly code */
+#define ASM_REGNUM(reg) (LAST_REGNUM - (reg))
+#define ASM_FP_REGNUM(reg) (LAST_FP_REGNUM - (reg))
+
+/* Convert between register number used in assembly to gcc
+   internal register number  */
+#define INTERNAL_REGNUM(reg) (LAST_REGNUM - (reg))
+#define INTERNAL_FP_REGNUM(reg) (LAST_FP_REGNUM - (reg))
+
+/** Basic Characteristics of Registers **/
+
+/*
+Number of hardware registers known to the compiler.  They receive
+numbers 0 through FIRST_PSEUDO_REGISTER-1; thus, the first
+pseudo register's number really is assigned the number
+FIRST_PSEUDO_REGISTER.
+*/
+#define FIRST_PSEUDO_REGISTER (LAST_FP_REGNUM + 1)
+
+#define FIRST_REGNUM 0
+#define LAST_REGNUM 15
+#define NUM_FP_REGS 16
+#define FIRST_FP_REGNUM 16
+#define LAST_FP_REGNUM (16+NUM_FP_REGS-1)
+
+/*
+An initializer that says which registers are used for fixed purposes
+all throughout the compiled code and are therefore not available for
+general allocation.  These would include the stack pointer, the frame
+pointer (except on machines where that can be used as a general
+register when no frame pointer is needed), the program counter on
+machines where that is considered one of the addressable registers,
+and any other numbered register with a standard use.
+
+This information is expressed as a sequence of numbers, separated by
+commas and surrounded by braces.  The nth number is 1 if
+register n is fixed, 0 otherwise.
+
+The table initialized from this macro, and the table initialized by
+the following one, may be overridden at run time either automatically,
+by the actions of the macro CONDITIONAL_REGISTER_USAGE, or by
+the user with the command options -ffixed-[reg],
+-fcall-used-[reg] and -fcall-saved-[reg].
+*/
+
+/* The internal gcc register numbers are reversed
+   compared to the real register numbers since
+   gcc expects data types stored over multiple
+   registers in the register file to be big endian
+   if the memory layout is big endian. But this
+   is not the case for avr32 so we fake a big
+   endian register file. */
+
+#define FIXED_REGISTERS {	\
+  1, /* Program Counter */	\
+  0, /* Link Register */	\
+  1, /* Stack Pointer */	\
+  0, /* r12 */			\
+  0, /* r11 */			\
+  0, /* r10 */			\
+  0, /* r9 */			\
+  0, /* r8 */			\
+  0, /* r7 */			\
+  0, /* r6 */			\
+  0, /* r5 */			\
+  0, /* r4 */			\
+  0, /* r3 */			\
+  0, /* r2 */			\
+  0, /* r1 */			\
+  0, /* r0 */			\
+  0, /* f15 */			\
+  0, /* f14 */			\
+  0, /* f13 */			\
+  0, /* f12 */			\
+  0, /* f11 */			\
+  0, /* f10 */			\
+  0, /* f9 */			\
+  0, /* f8 */			\
+  0, /* f7 */			\
+  0, /* f6 */			\
+  0, /* f5 */			\
+  0, /* f4 */			\
+  0, /* f3 */			\
+  0, /* f2*/			\
+  0, /* f1 */			\
+  0  /* f0 */			\
+}
+
+/*
+Like FIXED_REGISTERS but has 1 for each register that is
+clobbered (in general) by function calls as well as for fixed
+registers.  This macro therefore identifies the registers that are not
+available for general allocation of values that must live across
+function calls.
+
+If a register has 0 in CALL_USED_REGISTERS, the compiler
+automatically saves it on function entry and restores it on function
+exit, if the register is used within the function.
+*/
+#define CALL_USED_REGISTERS {	\
+  1, /* Program Counter */	\
+  0, /* Link Register */	\
+  1, /* Stack Pointer */	\
+  1, /* r12 */			\
+  1, /* r11 */			\
+  1, /* r10 */			\
+  1, /* r9 */			\
+  1, /* r8 */			\
+  0, /* r7 */			\
+  0, /* r6 */			\
+  0, /* r5 */			\
+  0, /* r4 */			\
+  0, /* r3 */			\
+  0, /* r2 */			\
+  0, /* r1 */			\
+  0, /* r0 */			\
+  1, /* f15 */			\
+  1, /* f14 */			\
+  1, /* f13 */			\
+  1, /* f12 */			\
+  1, /* f11 */			\
+  1, /* f10 */			\
+  1, /* f9 */			\
+  1, /* f8 */			\
+  0, /* f7 */			\
+  0, /* f6 */			\
+  0, /* f5 */			\
+  0, /* f4 */			\
+  0, /* f3 */			\
+  0, /* f2*/			\
+  0, /* f1*/			\
+  0, /* f0 */			\
+}
+
+/* Interrupt functions can only use registers that have already been
+   saved by the prologue, even if they would normally be
+   call-clobbered.  */
+#define HARD_REGNO_RENAME_OK(SRC, DST)					\
+	(! IS_INTERRUPT (cfun->machine->func_type) ||			\
+		regs_ever_live[DST])
+
+
+/*
+Zero or more C statements that may conditionally modify five variables
+fixed_regs, call_used_regs, global_regs,
+reg_names, and reg_class_contents, to take into account
+any dependence of these register sets on target flags.  The first three
+of these are of type char [] (interpreted as Boolean vectors).
+global_regs is a const char *[], and
+reg_class_contents is a HARD_REG_SET.  Before the macro is
+called, fixed_regs, call_used_regs,
+reg_class_contents, and reg_names have been initialized
+from FIXED_REGISTERS, CALL_USED_REGISTERS,
+REG_CLASS_CONTENTS, and REGISTER_NAMES, respectively.
+global_regs has been cleared, and any -ffixed-[reg],
+-fcall-used-[reg] and -fcall-saved-[reg]
+command options have been applied.
+
+You need not define this macro if it has no work to do.
+
+If the usage of an entire class of registers depends on the target
+flags, you may indicate this to GCC by using this macro to modify
+fixed_regs and call_used_regs to 1 for each of the
+registers in the classes which should not be used by GCC.  Also define
+the macro REG_CLASS_FROM_LETTER to return NO_REGS if it
+is called with a letter for a class that shouldn't be used.
+
+ (However, if this class is not included in GENERAL_REGS and all
+of the insn patterns whose constraints permit this class are
+controlled by target switches, then GCC will automatically avoid using
+these registers when the target switches are opposed to them.)
+*/
+#define CONDITIONAL_REGISTER_USAGE                              \
+  do								\
+    {								\
+      int regno;						\
+								\
+      if (TARGET_SOFT_FLOAT)			                \
+	{							\
+	  for (regno = FIRST_FP_REGNUM;				\
+	       regno <= LAST_FP_REGNUM; ++regno)	        \
+	    fixed_regs[regno] = call_used_regs[regno] = 1;	\
+	}							\
+      if (flag_pic)						\
+	{							\
+	  fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;		\
+	  call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;		\
+	}							\
+    }								\
+  while (0)
+
+
+/*
+If the program counter has a register number, define this as that
+register number.  Otherwise, do not define it.
+*/
+
+#define LAST_AVR32_REGNUM 16
+
+
+/** Order of Allocation of Registers **/
+
+/*
+If defined, an initializer for a vector of integers, containing the
+numbers of hard registers in the order in which GCC should prefer
+to use them (from most preferred to least).
+
+If this macro is not defined, registers are used lowest numbered first
+(all else being equal).
+
+One use of this macro is on machines where the highest numbered
+registers must always be saved and the save-multiple-registers
+instruction supports only sequences of consecutive registers.  On such
+machines, define REG_ALLOC_ORDER to be an initializer that lists
+the highest numbered allocable register first.
+*/
+#define REG_ALLOC_ORDER 	\
+{				\
+  INTERNAL_REGNUM(8),		\
+  INTERNAL_REGNUM(9),		\
+  INTERNAL_REGNUM(10),		\
+  INTERNAL_REGNUM(11),		\
+  INTERNAL_REGNUM(12),		\
+  LR_REGNUM,			\
+  INTERNAL_REGNUM(7),		\
+  INTERNAL_REGNUM(6),		\
+  INTERNAL_REGNUM(5),		\
+  INTERNAL_REGNUM(4),		\
+  INTERNAL_REGNUM(3),		\
+  INTERNAL_REGNUM(2),		\
+  INTERNAL_REGNUM(1),		\
+  INTERNAL_REGNUM(0),		\
+  INTERNAL_FP_REGNUM(15),	\
+  INTERNAL_FP_REGNUM(14),	\
+  INTERNAL_FP_REGNUM(13),	\
+  INTERNAL_FP_REGNUM(12),	\
+  INTERNAL_FP_REGNUM(11),	\
+  INTERNAL_FP_REGNUM(10),	\
+  INTERNAL_FP_REGNUM(9),	\
+  INTERNAL_FP_REGNUM(8),	\
+  INTERNAL_FP_REGNUM(7),	\
+  INTERNAL_FP_REGNUM(6),	\
+  INTERNAL_FP_REGNUM(5),	\
+  INTERNAL_FP_REGNUM(4),	\
+  INTERNAL_FP_REGNUM(3),	\
+  INTERNAL_FP_REGNUM(2),	\
+  INTERNAL_FP_REGNUM(1),	\
+  INTERNAL_FP_REGNUM(0),	\
+  SP_REGNUM,           		\
+  PC_REGNUM			\
+}
+
+
+/** How Values Fit in Registers **/
+
+/*
+A C expression for the number of consecutive hard registers, starting
+at register number REGNO, required to hold a value of mode
+MODE.
+
+On a machine where all registers are exactly one word, a suitable
+definition of this macro is
+
+#define HARD_REGNO_NREGS(REGNO, MODE)            \
+   ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1)  \
+    / UNITS_PER_WORD)
+*/
+#define HARD_REGNO_NREGS(REGNO, MODE) \
+  ((unsigned int)((GET_MODE_SIZE(MODE) + UNITS_PER_WORD -1 ) / UNITS_PER_WORD))
+
+/*
+A C expression that is nonzero if it is permissible to store a value
+of mode MODE in hard register number REGNO (or in several
+registers starting with that one).  For a machine where all registers
+are equivalent, a suitable definition is
+
+  #define HARD_REGNO_MODE_OK(REGNO, MODE) 1
+
+You need not include code to check for the numbers of fixed registers,
+because the allocation mechanism considers them to be always occupied.
+
+On some machines, double-precision values must be kept in even/odd
+register pairs.  You can implement that by defining this macro to reject
+odd register numbers for such modes.
+
+The minimum requirement for a mode to be OK in a register is that the
+mov[mode] instruction pattern support moves between the
+register and other hard register in the same class and that moving a
+value into the register and back out not alter it.
+
+Since the same instruction used to move word_mode will work for
+all narrower integer modes, it is not necessary on any machine for
+HARD_REGNO_MODE_OK to distinguish between these modes, provided
+you define patterns movhi, etc., to take advantage of this.  This
+is useful because of the interaction between HARD_REGNO_MODE_OK
+and MODES_TIEABLE_P; it is very desirable for all integer modes
+to be tieable.
+
+Many machines have special registers for floating point arithmetic.
+Often people assume that floating point machine modes are allowed only
+in floating point registers.  This is not true.  Any registers that
+can hold integers can safely hold a floating point machine
+mode, whether or not floating arithmetic can be done on it in those
+registers.  Integer move instructions can be used to move the values.
+
+On some machines, though, the converse is true: fixed-point machine
+modes may not go in floating registers.  This is true if the floating
+registers normalize any value stored in them, because storing a
+non-floating value there would garble it.  In this case,
+HARD_REGNO_MODE_OK should reject fixed-point machine modes in
+floating registers.  But if the floating registers do not automatically
+normalize, if you can store any bit pattern in one and retrieve it
+unchanged without a trap, then any machine mode may go in a floating
+register, so you can define this macro to say so.
+
+The primary significance of special floating registers is rather that
+they are the registers acceptable in floating point arithmetic
+instructions.  However, this is of no concern to
+HARD_REGNO_MODE_OK.  You handle it by writing the proper
+constraints for those instructions.
+
+On some machines, the floating registers are especially slow to access,
+so that it is better to store a value in a stack frame than in such a
+register if floating point arithmetic is not being done.  As long as the
+floating registers are not in class GENERAL_REGS, they will not
+be used unless some pattern's constraint asks for one.
+*/
+#define HARD_REGNO_MODE_OK(REGNO, MODE) avr32_hard_regno_mode_ok(REGNO, MODE)
+
+/*
+A C expression that is nonzero if a value of mode
+MODE1 is accessible in mode MODE2 without copying.
+
+If HARD_REGNO_MODE_OK(R, MODE1) and
+HARD_REGNO_MODE_OK(R, MODE2) are always the same for
+any R, then MODES_TIEABLE_P(MODE1, MODE2)
+should be nonzero.  If they differ for any R, you should define
+this macro to return zero unless some other mechanism ensures the
+accessibility of the value in a narrower mode.
+
+You should define this macro to return nonzero in as many cases as
+possible since doing so will allow GCC to perform better register
+allocation.
+*/
+#define MODES_TIEABLE_P(MODE1, MODE2)  \
+  (GET_MODE_CLASS (MODE1) == GET_MODE_CLASS (MODE2))
+
+
+
+/******************************************************************************
+ * Register Classes
+ *****************************************************************************/
+
+/*
+An enumeral type that must be defined with all the register class names
+as enumeral values.  NO_REGS must be first.  ALL_REGS
+must be the last register class, followed by one more enumeral value,
+LIM_REG_CLASSES, which is not a register class but rather
+tells how many classes there are.
+
+Each register class has a number, which is the value of casting
+the class name to type int.  The number serves as an index
+in many of the tables described below.
+*/
+enum reg_class
+{
+  NO_REGS,
+  GENERAL_REGS,
+  FP_REGS,
+  ALL_REGS,
+  LIM_REG_CLASSES
+};
+
+/*
+The number of distinct register classes, defined as follows:
+  #define N_REG_CLASSES (int) LIM_REG_CLASSES
+*/
+#define N_REG_CLASSES (int)LIM_REG_CLASSES
+
+/*
+An initializer containing the names of the register classes as C string
+constants.  These names are used in writing some of the debugging dumps.
+*/
+#define REG_CLASS_NAMES		\
+{				\
+  "NO_REGS",			\
+  "GENERAL_REGS",		\
+  "FLOATING_POINT_REGS",	\
+  "ALL_REGS"			\
+}
+
+/*
+An initializer containing the contents of the register classes, as integers
+which are bit masks.  The nth integer specifies the contents of class
+n.  The way the integer mask is interpreted is that
+register r is in the class if mask & (1 << r) is 1.
+
+When the machine has more than 32 registers, an integer does not suffice.
+Then the integers are replaced by sub-initializers, braced groupings containing
+several integers.  Each sub-initializer must be suitable as an initializer
+for the type HARD_REG_SET which is defined in hard-reg-set.h.
+In this situation, the first integer in each sub-initializer corresponds to
+registers 0 through 31, the second integer to registers 32 through 63, and
+so on.
+*/
+#define REG_CLASS_CONTENTS {		\
+  {0x00000000}, /* NO_REGS */		\
+  {0x0000FFFF}, /* GENERAL_REGS */	\
+  {0xFFFF0000}, /* FP_REGS */		\
+  {0x7FFFFFFF}, /* ALL_REGS */		\
+}
+
+
+/*
+A C expression whose value is a register class containing hard register
+REGNO.  In general there is more than one such class; choose a class
+which is minimal, meaning that no smaller class also contains the
+register.
+*/
+#define REGNO_REG_CLASS(REGNO) ((REGNO < 16) ? GENERAL_REGS : FP_REGS)
+
+/*
+A macro whose definition is the name of the class to which a valid
+base register must belong.  A base register is one used in an address
+which is the register value plus a displacement.
+*/
+#define BASE_REG_CLASS GENERAL_REGS
+
+/*
+This is a variation of the BASE_REG_CLASS macro which allows
+the selection of a base register in a mode depenedent manner.  If
+mode is VOIDmode then it should return the same value as
+BASE_REG_CLASS.
+*/
+#define MODE_BASE_REG_CLASS(MODE) BASE_REG_CLASS
+
+/*
+A macro whose definition is the name of the class to which a valid
+index register must belong.  An index register is one used in an
+address where its value is either multiplied by a scale factor or
+added to another register (as well as added to a displacement).
+*/
+#define INDEX_REG_CLASS BASE_REG_CLASS
+
+/*
+A C expression which defines the machine-dependent operand constraint
+letters for register classes.  If CHAR is such a letter, the
+value should be the register class corresponding to it.  Otherwise,
+the value should be NO_REGS.  The register letter r,
+corresponding to class GENERAL_REGS, will not be passed
+to this macro; you do not need to handle it.
+*/
+#define REG_CLASS_FROM_LETTER(CHAR) ((CHAR) == 'f' ? FP_REGS : NO_REGS)
+
+
+/* These assume that REGNO is a hard or pseudo reg number.
+   They give nonzero only if REGNO is a hard reg of the suitable class
+   or a pseudo reg currently allocated to a suitable hard reg.
+   Since they use reg_renumber, they are safe only once reg_renumber
+   has been allocated, which happens in local-alloc.c.  */
+#define TEST_REGNO(R, TEST, VALUE) \
+  ((R TEST VALUE) || ((unsigned) reg_renumber[R] TEST VALUE))
+
+/*
+A C expression which is nonzero if register number num is suitable for use as a base
+register in operand addresses. It may be either a suitable hard register or a pseudo
+register that has been allocated such a hard register.
+*/
+#define REGNO_OK_FOR_BASE_P(NUM)  TEST_REGNO(NUM, <=, LAST_REGNUM)
+
+/*
+A C expression which is nonzero if register number NUM is
+suitable for use as an index register in operand addresses.  It may be
+either a suitable hard register or a pseudo register that has been
+allocated such a hard register.
+
+The difference between an index register and a base register is that
+the index register may be scaled.  If an address involves the sum of
+two registers, neither one of them scaled, then either one may be
+labeled the ``base'' and the other the ``index''; but whichever
+labeling is used must fit the machine's constraints of which registers
+may serve in each capacity.  The compiler will try both labelings,
+looking for one that is valid, and will reload one or both registers
+only if neither labeling works.
+*/
+#define REGNO_OK_FOR_INDEX_P(NUM) TEST_REGNO(NUM, <=, LAST_REGNUM)
+
+/*
+A C expression that places additional restrictions on the register class
+to use when it is necessary to copy value X into a register in class
+CLASS.  The value is a register class; perhaps CLASS, or perhaps
+another, smaller class.  On many machines, the following definition is
+safe: #define PREFERRED_RELOAD_CLASS(X,CLASS) CLASS
+
+Sometimes returning a more restrictive class makes better code.  For
+example, on the 68000, when X is an integer constant that is in range
+for a 'moveq' instruction, the value of this macro is always
+DATA_REGS as long as CLASS includes the data registers.
+Requiring a data register guarantees that a 'moveq' will be used.
+
+If X is a const_double, by returning NO_REGS
+you can force X into a memory constant.  This is useful on
+certain machines where immediate floating values cannot be loaded into
+certain kinds of registers.
+*/
+#define PREFERRED_RELOAD_CLASS(X, CLASS)  CLASS
+
+
+
+/*
+A C expression for the maximum number of consecutive registers
+of class CLASS needed to hold a value of mode MODE.
+
+This is closely related to the macro HARD_REGNO_NREGS.  In fact,
+the value of the macro CLASS_MAX_NREGS(CLASS, MODE)
+should be the maximum value of HARD_REGNO_NREGS(REGNO, MODE)
+for all REGNO values in the class CLASS.
+
+This macro helps control the handling of multiple-word values
+in the reload pass.
+*/
+#define CLASS_MAX_NREGS(CLASS, MODE) /* ToDo:fixme */ \
+  (unsigned int)((GET_MODE_SIZE(MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+
+
+/*
+  Using CONST_OK_FOR_CONSTRAINT_P instead of CONS_OK_FOR_LETTER_P
+  in order to support constraints with more than one letter.
+  Only two letters are then used for constant constraints,
+  the letter 'K' and the letter 'I'. The constraint starting with
+  these letters must consist of four characters. The character following
+  'K' or 'I' must be either 'u' (unsigned) or 's' (signed) to specify
+  if the constant is zero or sign extended. The last two characters specify
+  the length in bits of the constant. The base constraint letter 'I' means
+  that this is an negated constant, meaning that actually -VAL should be
+  checked to lie withing the valid range instead of VAL which is used when
+  'K' is the base constraint letter.
+
+*/
+
+#define CONSTRAINT_LEN(C, STR)				\
+  ( ((C) == 'K' || (C) == 'I') ?  4 :			\
+    ((C) == 'R') ?  5 :					\
+    ((C) == 'N' || (C) == 'O' ||			\
+     (C) == 'P' || (C) == 'L' || (C) == 'J') ? -1 :	\
+    DEFAULT_CONSTRAINT_LEN((C), (STR)) )
+
+#define CONST_OK_FOR_CONSTRAINT_P(VALUE, C, STR)	\
+  avr32_const_ok_for_constraint_p(VALUE, C, STR)
+
+/*
+A C expression that defines the machine-dependent operand constraint
+letters that specify particular ranges of const_double values ('G' or 'H').
+
+If C is one of those letters, the expression should check that
+VALUE, an RTX of code const_double, is in the appropriate
+range and return 1 if so, 0 otherwise.  If C is not one of those
+letters, the value should be 0 regardless of VALUE.
+
+const_double is used for all floating-point constants and for
+DImode fixed-point constants.  A given letter can accept either
+or both kinds of values.  It can use GET_MODE to distinguish
+between these kinds.
+*/
+#define CONST_DOUBLE_OK_FOR_LETTER_P(OP, C) \
+  ((C) == 'G' ? avr32_const_double_immediate(OP) : 0)
+
+/*
+A C expression that defines the optional machine-dependent constraint
+letters that can be used to segregate specific types of operands, usually
+memory references, for the target machine.  Any letter that is not
+elsewhere defined and not matched by REG_CLASS_FROM_LETTER
+may be used.  Normally this macro will not be defined.
+
+If it is required for a particular target machine, it should return 1
+if VALUE corresponds to the operand type represented by the
+constraint letter C.  If C is not defined as an extra
+constraint, the value returned should be 0 regardless of VALUE.
+
+For example, on the ROMP, load instructions cannot have their output
+in r0 if the memory reference contains a symbolic address.  Constraint
+letter 'Q' is defined as representing a memory address that does
+not contain a symbolic address.  An alternative is specified with
+a 'Q' constraint on the input and 'r' on the output.  The next
+alternative specifies 'm' on the input and a register class that
+does not include r0 on the output.
+*/
+#define EXTRA_CONSTRAINT_STR(OP, C, STR)				\
+  ((C) == 'W' ? avr32_address_operand(OP, GET_MODE(OP)) :		\
+   (C) == 'R' ? (avr32_indirect_register_operand(OP, GET_MODE(OP)) ||	\
+                 (avr32_imm_disp_memory_operand(OP, GET_MODE(OP))	\
+                  && avr32_const_ok_for_constraint_p(			\
+				INTVAL(XEXP(XEXP(OP, 0), 1)),		\
+				(STR)[1], &(STR)[1]))) :		\
+   (C) == 'S' ? avr32_indexed_memory_operand(OP, GET_MODE(OP)) :	\
+   (C) == 'T' ? avr32_const_pool_ref_operand(OP, GET_MODE(OP)) :	\
+   (C) == 'U' ? SYMBOL_REF_RCALL_FUNCTION_P(OP) :			\
+   (C) == 'Z' ? avr32_cop_memory_operand(OP, GET_MODE(OP)) :		\
+   0)
+
+
+#define EXTRA_MEMORY_CONSTRAINT(C, STR) ( ((C) == 'R') ||               \
+                                          ((C) == 'S') ||               \
+                                          ((C) == 'Z') )
+
+
+/* Returns nonzero if op is a function SYMBOL_REF which
+   can be called using an rcall instruction */
+#define SYMBOL_REF_RCALL_FUNCTION_P(op)  \
+  ( GET_CODE(op) == SYMBOL_REF           \
+    && SYMBOL_REF_FUNCTION_P(op)         \
+    && SYMBOL_REF_LOCAL_P(op)            \
+    && !SYMBOL_REF_EXTERNAL_P(op)        \
+    && !TARGET_HAS_ASM_ADDR_PSEUDOS )
+
+/******************************************************************************
+ * Stack Layout and Calling Conventions
+ *****************************************************************************/
+
+/** Basic Stack Layout **/
+
+/*
+Define this macro if pushing a word onto the stack moves the stack
+pointer to a smaller address.
+
+When we say, ``define this macro if ...,'' it means that the
+compiler checks this macro only with #ifdef so the precise
+definition used does not matter.
+*/
+/* pushm decrece SP: *(--SP) <-- Rx */
+#define STACK_GROWS_DOWNWARD
+
+/*
+This macro defines the operation used when something is pushed
+on the stack.  In RTL, a push operation will be
+(set (mem (STACK_PUSH_CODE (reg sp))) ...)
+
+The choices are PRE_DEC, POST_DEC, PRE_INC,
+and POST_INC.  Which of these is correct depends on
+the stack direction and on whether the stack pointer points
+to the last item on the stack or whether it points to the
+space for the next item on the stack.
+
+The default is PRE_DEC when STACK_GROWS_DOWNWARD is
+defined, which is almost always right, and PRE_INC otherwise,
+which is often wrong.
+*/
+/* pushm: *(--SP) <-- Rx */
+#define STACK_PUSH_CODE PRE_DEC
+
+/*
+Define this macro if the addresses of local variable slots are at negative
+offsets from the frame pointer.
+*/
+#define FRAME_GROWS_DOWNWARD
+
+
+/*
+Offset from the frame pointer to the first local variable slot to be allocated.
+
+If FRAME_GROWS_DOWNWARD, find the next slot's offset by
+subtracting the first slot's length from STARTING_FRAME_OFFSET.
+Otherwise, it is found by adding the length of the first slot to the
+value STARTING_FRAME_OFFSET.
+  (i'm not sure if the above is still correct.. had to change it to get
+   rid of an overfull.  --mew 2feb93 )
+*/
+#define STARTING_FRAME_OFFSET 0
+
+/*
+Offset from the stack pointer register to the first location at which
+outgoing arguments are placed.  If not specified, the default value of
+zero is used.  This is the proper value for most machines.
+
+If ARGS_GROW_DOWNWARD, this is the offset to the location above
+the first location at which outgoing arguments are placed.
+*/
+#define STACK_POINTER_OFFSET 0
+
+/*
+Offset from the argument pointer register to the first argument's
+address.  On some machines it may depend on the data type of the
+function.
+
+If ARGS_GROW_DOWNWARD, this is the offset to the location above
+the first argument's address.
+*/
+#define FIRST_PARM_OFFSET(FUNDECL) 0
+
+
+/*
+A C expression whose value is RTL representing the address in a stack
+frame where the pointer to the caller's frame is stored.  Assume that
+FRAMEADDR is an RTL expression for the address of the stack frame
+itself.
+
+If you don't define this macro, the default is to return the value
+of FRAMEADDR - that is, the stack frame address is also the
+address of the stack word that points to the previous frame.
+*/
+#define DYNAMIC_CHAIN_ADDRESS(FRAMEADDR) plus_constant ((FRAMEADDR), 4)
+
+
+/*
+A C expression whose value is RTL representing the value of the return
+address for the frame COUNT steps up from the current frame, after
+the prologue.  FRAMEADDR is the frame pointer of the COUNT
+frame, or the frame pointer of the COUNT - 1 frame if
+RETURN_ADDR_IN_PREVIOUS_FRAME is defined.
+
+The value of the expression must always be the correct address when
+COUNT is zero, but may be NULL_RTX if there is not way to
+determine the return address of other frames.
+*/
+#define RETURN_ADDR_RTX(COUNT, FRAMEADDR) avr32_return_addr(COUNT, FRAMEADDR)
+
+
+/*
+A C expression whose value is RTL representing the location of the
+incoming return address at the beginning of any function, before the
+prologue.  This RTL is either a REG, indicating that the return
+value is saved in 'REG', or a MEM representing a location in
+the stack.
+
+You only need to define this macro if you want to support call frame
+debugging information like that provided by DWARF 2.
+
+If this RTL is a REG, you should also define
+DWARF_FRAME_RETURN_COLUMN to DWARF_FRAME_REGNUM (REGNO).
+*/
+#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, LR_REGNUM)
+
+
+
+/*
+A C expression whose value is an integer giving the offset, in bytes,
+from the value of the stack pointer register to the top of the stack
+frame at the beginning of any function, before the prologue.  The top of
+the frame is defined to be the value of the stack pointer in the
+previous frame, just before the call instruction.
+
+You only need to define this macro if you want to support call frame
+debugging information like that provided by DWARF 2.
+*/
+#define INCOMING_FRAME_SP_OFFSET 0
+
+
+/** Exception Handling Support **/
+
+#define DWARF2_UNWIND_INFO 1
+
+/*
+A C expression whose value is the Nth register number used for
+data by exception handlers, or INVALID_REGNUM if fewer than
+N registers are usable.
+
+The exception handling library routines communicate with the exception
+handlers via a set of agreed upon registers.  Ideally these registers
+should be call-clobbered; it is possible to use call-saved registers,
+but may negatively impact code size.  The target must support at least
+2 data registers, but should define 4 if there are enough free registers.
+
+You must define this macro if you want to support call frame exception
+handling like that provided by DWARF 2.
+*/
+/*
+  Use r8-r11
+*/
+#define EH_RETURN_DATA_REGNO(N)					\
+  ((N) < 4 ? INTERNAL_REGNUM((N) + 8U) : INVALID_REGNUM)
+
+/*
+A C expression whose value is RTL representing a location in which
+to store a stack adjustment to be applied before function return.
+This is used to unwind the stack to an exception handler's call frame.
+It will be assigned zero on code paths that return normally.
+
+Typically this is a call-clobbered hard register that is otherwise
+untouched by the epilogue, but could also be a stack slot.
+
+You must define this macro if you want to support call frame exception
+handling like that provided by DWARF 2.
+*/
+/*
+  I don't think functions that may throw exceptions can ever be leaf
+  functions, so we may safely use LR for this.
+*/
+#define EH_RETURN_STACKADJ_REGNO LR_REGNUM
+#define EH_RETURN_STACKADJ_RTX gen_rtx_REG(SImode, EH_RETURN_STACKADJ_REGNO)
+
+/*
+A C expression whose value is RTL representing a location in which
+to store the address of an exception handler to which we should
+return.  It will not be assigned on code paths that return normally.
+
+Typically this is the location in the call frame at which the normal
+return address is stored.  For targets that return by popping an
+address off the stack, this might be a memory address just below
+the target call frame rather than inside the current call
+frame.  EH_RETURN_STACKADJ_RTX will have already been assigned,
+so it may be used to calculate the location of the target call frame.
+
+Some targets have more complex requirements than storing to an
+address calculable during initial code generation.  In that case
+the eh_return instruction pattern should be used instead.
+
+If you want to support call frame exception handling, you must
+define either this macro or the eh_return instruction pattern.
+*/
+/*
+  We define the eh_return instruction pattern, so this isn't needed.
+*/
+/* #define EH_RETURN_HANDLER_RTX gen_rtx_REG(Pmode, RET_REGISTER) */
+
+/*
+  This macro chooses the encoding of pointers embedded in the
+  exception handling sections. If at all possible, this should be
+  defined such that the exception handling section will not require
+  dynamic relocations, and so may be read-only.
+
+  code is 0 for data, 1 for code labels, 2 for function
+  pointers. global is true if the symbol may be affected by dynamic
+  relocations. The macro should return a combination of the DW_EH_PE_*
+  defines as found in dwarf2.h.
+
+  If this macro is not defined, pointers will not be encoded but
+  represented directly.
+*/
+#define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL)	\
+  ((flag_pic && (GLOBAL) ? DW_EH_PE_indirect : 0)	\
+   | (flag_pic ? DW_EH_PE_pcrel : DW_EH_PE_absptr)	\
+   | DW_EH_PE_sdata4)
+
+/* ToDo: The rest of this subsection */
+
+/** Specifying How Stack Checking is Done **/
+/* ToDo: All in this subsection */
+
+/** Registers That Address the Stack Frame **/
+
+/*
+The register number of the stack pointer register, which must also be a
+fixed register according to FIXED_REGISTERS.  On most machines,
+the hardware determines which register this is.
+*/
+/* Using r13 as stack pointer. */
+#define STACK_POINTER_REGNUM INTERNAL_REGNUM(13)
+
+/*
+The register number of the frame pointer register, which is used to
+access automatic variables in the stack frame.  On some machines, the
+hardware determines which register this is.  On other machines, you can
+choose any register you wish for this purpose.
+*/
+/* Use r7 */
+#define FRAME_POINTER_REGNUM INTERNAL_REGNUM(7)
+
+
+
+/*
+The register number of the arg pointer register, which is used to access
+the function's argument list.  On some machines, this is the same as the
+frame pointer register.  On some machines, the hardware determines which
+register this is.  On other machines, you can choose any register you
+wish for this purpose.  If this is not the same register as the frame
+pointer register, then you must mark it as a fixed register according to
+FIXED_REGISTERS, or arrange to be able to eliminate it (see Section
+10.10.5 [Elimination], page 224).
+*/
+/* Using r5 */
+#define ARG_POINTER_REGNUM INTERNAL_REGNUM(4)
+
+
+/*
+Register numbers used for passing a function's static chain pointer.  If
+register windows are used, the register number as seen by the called
+function is STATIC_CHAIN_INCOMING_REGNUM, while the register
+number as seen by the calling function is STATIC_CHAIN_REGNUM.  If
+these registers are the same, STATIC_CHAIN_INCOMING_REGNUM need
+not be defined.
+
+The static chain register need not be a fixed register.
+
+If the static chain is passed in memory, these macros should not be
+defined; instead, the next two macros should be defined.
+*/
+/* Using r0 */
+#define STATIC_CHAIN_REGNUM INTERNAL_REGNUM(0)
+
+
+/** Eliminating Frame Pointer and Arg Pointer **/
+
+/*
+A C expression which is nonzero if a function must have and use a frame
+pointer.  This expression is evaluated  in the reload pass.  If its value is
+nonzero the function will have a frame pointer.
+
+The expression can in principle examine the current function and decide
+according to the facts, but on most machines the constant 0 or the
+constant 1 suffices.  Use 0 when the machine allows code to be generated
+with no frame pointer, and doing so saves some time or space.  Use 1
+when there is no possible advantage to avoiding a frame pointer.
+
+In certain cases, the compiler does not know how to produce valid code
+without a frame pointer.  The compiler recognizes those cases and
+automatically gives the function a frame pointer regardless of what
+FRAME_POINTER_REQUIRED says.  You don't need to worry about
+them.
+
+In a function that does not require a frame pointer, the frame pointer
+register can be allocated for ordinary usage, unless you mark it as a
+fixed register.  See FIXED_REGISTERS for more information.
+*/
+/* We need the frame pointer when compiling for profiling */
+#define FRAME_POINTER_REQUIRED (current_function_profile)
+
+/*
+A C statement to store in the variable DEPTH_VAR the difference
+between the frame pointer and the stack pointer values immediately after
+the function prologue.  The value would be computed from information
+such as the result of get_frame_size () and the tables of
+registers regs_ever_live and call_used_regs.
+
+If ELIMINABLE_REGS is defined, this macro will be not be used and
+need not be defined.  Otherwise, it must be defined even if
+FRAME_POINTER_REQUIRED is defined to always be true; in that
+case, you may set DEPTH_VAR to anything.
+*/
+#define INITIAL_FRAME_POINTER_OFFSET(DEPTH_VAR) ((DEPTH_VAR) = get_frame_size())
+
+/*
+If defined, this macro specifies a table of register pairs used to
+eliminate unneeded registers that point into the stack frame.  If it is not
+defined, the only elimination attempted by the compiler is to replace
+references to the frame pointer with references to the stack pointer.
+
+The definition of this macro is a list of structure initializations, each
+of which specifies an original and replacement register.
+
+On some machines, the position of the argument pointer is not known until
+the compilation is completed.  In such a case, a separate hard register
+must be used for the argument pointer.  This register can be eliminated by
+replacing it with either the frame pointer or the argument pointer,
+depending on whether or not the frame pointer has been eliminated.
+
+In this case, you might specify:
+  #define ELIMINABLE_REGS  \
+  {{ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+   {ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM}, \
+   {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}}
+
+Note that the elimination of the argument pointer with the stack pointer is
+specified first since that is the preferred elimination.
+*/
+#define ELIMINABLE_REGS					\
+{							\
+  { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM },	\
+  { ARG_POINTER_REGNUM, STACK_POINTER_REGNUM },		\
+  { ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM }		\
+}
+
+/*
+A C expression that returns nonzero if the compiler is allowed to try
+to replace register number FROM with register number
+TO.  This macro need only be defined if ELIMINABLE_REGS
+is defined, and will usually be the constant 1, since most of the cases
+preventing register elimination are things that the compiler already
+knows about.
+*/
+#define CAN_ELIMINATE(FROM, TO) 1
+
+/*
+This macro is similar to INITIAL_FRAME_POINTER_OFFSET.  It
+specifies the initial difference between the specified pair of
+registers.  This macro must be defined if ELIMINABLE_REGS is
+defined.
+*/
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET)		\
+  ((OFFSET) = avr32_initial_elimination_offset(FROM, TO))
+
+/** Passing Function Arguments on the Stack **/
+
+
+/*
+A C expression.  If nonzero, push insns will be used to pass
+outgoing arguments.
+If the target machine does not have a push instruction, set it to zero.
+That directs GCC to use an alternate strategy: to
+allocate the entire argument block and then store the arguments into
+it.  When PUSH_ARGS is nonzero, PUSH_ROUNDING must be defined too.
+*/
+#define PUSH_ARGS 1
+
+
+/*
+A C expression that is the number of bytes actually pushed onto the
+stack when an instruction attempts to push NPUSHED bytes.
+
+On some machines, the definition
+
+  #define PUSH_ROUNDING(BYTES) (BYTES)
+
+will suffice.  But on other machines, instructions that appear
+to push one byte actually push two bytes in an attempt to maintain
+alignment.  Then the definition should be
+
+  #define PUSH_ROUNDING(BYTES) (((BYTES) + 1) & ~1)
+*/
+/* Push 4 bytes at the time. */
+#define PUSH_ROUNDING(NPUSHED) (((NPUSHED) + 3) & ~3)
+
+/*
+A C expression.  If nonzero, the maximum amount of space required for
+outgoing arguments will be computed and placed into the variable
+current_function_outgoing_args_size.  No space will be pushed
+onto the stack for each call; instead, the function prologue should
+increase the stack frame size by this amount.
+
+Setting both PUSH_ARGS and ACCUMULATE_OUTGOING_ARGS is not proper.
+*/
+#define ACCUMULATE_OUTGOING_ARGS 0
+
+
+
+
+/*
+A C expression that should indicate the number of bytes of its own
+arguments that a function pops on returning, or 0 if the
+function pops no arguments and the caller must therefore pop them all
+after the function returns.
+
+FUNDECL is a C variable whose value is a tree node that describes
+the function in question.  Normally it is a node of type
+FUNCTION_DECL that describes the declaration of the function.
+From this you can obtain the DECL_ATTRIBUTES of the function.
+
+FUNTYPE is a C variable whose value is a tree node that
+describes the function in question.  Normally it is a node of type
+FUNCTION_TYPE that describes the data type of the function.
+From this it is possible to obtain the data types of the value and
+arguments (if known).
+
+When a call to a library function is being considered, FUNDECL
+will contain an identifier node for the library function.  Thus, if
+you need to distinguish among various library functions, you can do so
+by their names.  Note that ``library function'' in this context means
+a function used to perform arithmetic, whose name is known specially
+in the compiler and was not mentioned in the C code being compiled.
+
+STACK_SIZE is the number of bytes of arguments passed on the
+stack.  If a variable number of bytes is passed, it is zero, and
+argument popping will always be the responsibility of the calling function.
+
+On the VAX, all functions always pop their arguments, so the definition
+of this macro is STACK_SIZE.  On the 68000, using the standard
+calling convention, no functions pop their arguments, so the value of
+the macro is always 0 in this case.  But an alternative calling
+convention is available in which functions that take a fixed number of
+arguments pop them but other functions (such as printf) pop
+nothing (the caller pops all).  When this convention is in use,
+FUNTYPE is examined to determine whether a function takes a fixed
+number of arguments.
+*/
+#define RETURN_POPS_ARGS(FUNDECL, FUNTYPE, STACK_SIZE) 0
+
+
+/*Return true if this function can we use a single return instruction*/
+#define USE_RETURN_INSN(ISCOND) avr32_use_return_insn(ISCOND)
+
+/*
+A C expression that should indicate the number of bytes a call sequence
+pops off the stack.  It is added to the value of RETURN_POPS_ARGS
+when compiling a function call.
+
+CUM is the variable in which all arguments to the called function
+have been accumulated.
+
+On certain architectures, such as the SH5, a call trampoline is used
+that pops certain registers off the stack, depending on the arguments
+that have been passed to the function.  Since this is a property of the
+call site, not of the called function, RETURN_POPS_ARGS is not
+appropriate.
+*/
+#define CALL_POPS_ARGS(CUM) 0
+
+/* Passing Arguments in Registers */
+
+/*
+A C expression that controls whether a function argument is passed
+in a register, and which register.
+
+The arguments are CUM, which summarizes all the previous
+arguments; MODE, the machine mode of the argument; TYPE,
+the data type of the argument as a tree node or 0 if that is not known
+(which happens for C support library functions); and NAMED,
+which is 1 for an ordinary argument and 0 for nameless arguments that
+correspond to '...' in the called function's prototype.
+TYPE can be an incomplete type if a syntax error has previously
+occurred.
+
+The value of the expression is usually either a reg RTX for the
+hard register in which to pass the argument, or zero to pass the
+argument on the stack.
+
+For machines like the VAX and 68000, where normally all arguments are
+pushed, zero suffices as a definition.
+
+The value of the expression can also be a parallel RTX.  This is
+used when an argument is passed in multiple locations.  The mode of the
+of the parallel should be the mode of the entire argument.  The
+parallel holds any number of expr_list pairs; each one
+describes where part of the argument is passed.  In each
+expr_list the first operand must be a reg RTX for the hard
+register in which to pass this part of the argument, and the mode of the
+register RTX indicates how large this part of the argument is.  The
+second operand of the expr_list is a const_int which gives
+the offset in bytes into the entire argument of where this part starts.
+As a special exception the first expr_list in the parallel
+RTX may have a first operand of zero.  This indicates that the entire
+argument is also stored on the stack.
+
+The last time this macro is called, it is called with MODE == VOIDmode,
+and its result is passed to the call or call_value
+pattern as operands 2 and 3 respectively.
+
+The usual way to make the ISO library 'stdarg.h' work on a machine
+where some arguments are usually passed in registers, is to cause
+nameless arguments to be passed on the stack instead.  This is done
+by making FUNCTION_ARG return 0 whenever NAMED is 0.
+
+You may use the macro MUST_PASS_IN_STACK (MODE, TYPE)
+in the definition of this macro to determine if this argument is of a
+type that must be passed in the stack.  If REG_PARM_STACK_SPACE
+is not defined and FUNCTION_ARG returns nonzero for such an
+argument, the compiler will abort.  If REG_PARM_STACK_SPACE is
+defined, the argument will be computed in the stack and then loaded into
+a register.  */
+
+#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
+  avr32_function_arg(&(CUM), MODE, TYPE, NAMED)
+
+
+
+
+/*
+A C type for declaring a variable that is used as the first argument of
+FUNCTION_ARG and other related values.  For some target machines,
+the type int suffices and can hold the number of bytes of
+argument so far.
+
+There is no need to record in CUMULATIVE_ARGS anything about the
+arguments that have been passed on the stack.  The compiler has other
+variables to keep track of that.  For target machines on which all
+arguments are passed on the stack, there is no need to store anything in
+CUMULATIVE_ARGS; however, the data structure must exist and
+should not be empty, so use int.
+*/
+typedef struct avr32_args
+{
+  /* Index representing the argument register the current function argument
+     will occupy */
+  int index;
+  /* A mask with bits representing the argument registers: if a bit is set
+     then this register is used for an arguemnt */
+  int used_index;
+  /* TRUE if this function has anonymous arguments */
+  int uses_anonymous_args;
+  /* The size in bytes of the named arguments pushed on the stack */
+  int stack_pushed_args_size;
+  /* Set to true if this function needs a Return Value Pointer */
+  int use_rvp;
+
+} CUMULATIVE_ARGS;
+
+
+#define FIRST_CUM_REG_INDEX 0
+#define LAST_CUM_REG_INDEX 4
+#define GET_REG_INDEX(CUM) ((CUM)->index)
+#define SET_REG_INDEX(CUM, INDEX) ((CUM)->index = (INDEX));
+#define GET_USED_INDEX(CUM, INDEX) ((CUM)->used_index & (1 << (INDEX)))
+#define SET_USED_INDEX(CUM, INDEX)		\
+  do						\
+    {						\
+      if (INDEX >= 0)				\
+        (CUM)->used_index |= (1 << (INDEX));	\
+    }						\
+  while (0)
+#define SET_INDEXES_UNUSED(CUM) ((CUM)->used_index = 0)
+
+
+/*
+   A C statement (sans semicolon) for initializing the variable cum for the
+   state at the beginning of the argument list. The variable has type
+   CUMULATIVE_ARGS. The value of FNTYPE is the tree node for the data type of
+   the function which will receive the args, or 0 if the args are to a compiler
+   support library function. For direct calls that are not libcalls, FNDECL
+   contain the declaration node of the function. FNDECL is also set when
+   INIT_CUMULATIVE_ARGS is used to find arguments for the function being
+   compiled.  N_NAMED_ARGS is set to the number of named arguments, including a
+   structure return address if it is passed as a parameter, when making a call.
+   When processing incoming arguments, N_NAMED_ARGS is set to -1.
+
+   When processing a call to a compiler support library function, LIBNAME
+   identifies which one.  It is a symbol_ref rtx which contains the name of the
+   function, as a string. LIBNAME is 0 when an ordinary C function call is
+   being processed. Thus, each time this macro is called, either LIBNAME or
+   FNTYPE is nonzero, but never both of them at once.
+*/
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS) \
+  avr32_init_cumulative_args(&(CUM), FNTYPE, LIBNAME, FNDECL)
+
+
+/*
+A C statement (sans semicolon) to update the summarizer variable
+CUM to advance past an argument in the argument list.  The
+values MODE, TYPE and NAMED describe that argument.
+Once this is done, the variable CUM is suitable for analyzing
+the following argument with FUNCTION_ARG, etc.
+
+This macro need not do anything if the argument in question was passed
+on the stack.  The compiler knows how to track the amount of stack space
+used for arguments without any special help.
+*/
+#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
+  avr32_function_arg_advance(&(CUM), MODE, TYPE, NAMED)
+
+/*
+If defined, a C expression which determines whether, and in which direction,
+to pad out an argument with extra space.  The value should be of type
+enum direction: either 'upward' to pad above the argument,
+'downward' to pad below, or 'none' to inhibit padding.
+
+The amount of padding is always just enough to reach the next
+multiple of FUNCTION_ARG_BOUNDARY; this macro does not control
+it.
+
+This macro has a default definition which is right for most systems.
+For little-endian machines, the default is to pad upward.  For
+big-endian machines, the default is to pad downward for an argument of
+constant size shorter than an int, and upward otherwise.
+*/
+#define FUNCTION_ARG_PADDING(MODE, TYPE) \
+  avr32_function_arg_padding(MODE, TYPE)
+
+/*
+  Specify padding for the last element of a block move between registers
+  and memory. First is nonzero if this is the only element. Defining
+  this macro allows better control of register function parameters on
+  big-endian machines, without using PARALLEL rtl. In particular,
+  MUST_PASS_IN_STACK need not test padding and mode of types in registers,
+  as there is no longer a "wrong" part of a register; For example, a three
+  byte aggregate may be passed in the high part of a register if so required.
+*/
+#define BLOCK_REG_PADDING(MODE, TYPE, FIRST) \
+  avr32_function_arg_padding(MODE, TYPE)
+
+/*
+If defined, a C expression which determines whether the default
+implementation of va_arg will attempt to pad down before reading the
+next argument, if that argument is smaller than its aligned space as
+controlled by PARM_BOUNDARY.  If this macro is not defined, all such
+arguments are padded down if BYTES_BIG_ENDIAN is true.
+*/
+#define PAD_VARARGS_DOWN \
+  (FUNCTION_ARG_PADDING (TYPE_MODE (type), type) == downward)
+
+
+/*
+A C expression that is nonzero if REGNO is the number of a hard
+register in which function arguments are sometimes passed.  This does
+not include implicit arguments such as the static chain and
+the structure-value address.  On many machines, no registers can be
+used for this purpose since all function arguments are pushed on the
+stack.
+*/
+/*
+  Use r8 - r12 for function arguments.
+*/
+#define FUNCTION_ARG_REGNO_P(REGNO) \
+  (REGNO >= 3 && REGNO <= 7)
+
+/* Number of registers used for passing function arguments */
+#define NUM_ARG_REGS 5
+
+/*
+If defined, the order in which arguments are loaded into their
+respective argument registers is reversed so that the last
+argument is loaded first.  This macro only affects arguments
+passed in registers.
+*/
+/* #define LOAD_ARGS_REVERSED */
+
+/** How Scalar Function Values Are Returned **/
+
+/* AVR32 is using r12 as return register. */
+#define RET_REGISTER (15 - 12)
+
+/*
+Define this macro if -traditional should not cause functions
+declared to return float to convert the value to double.
+*/
+/* #define TRADITIONAL_RETURN_FLOAT */
+
+/*
+A C expression to create an RTX representing the place where a
+function returns a value of data type VALTYPE.  VALTYPE is
+a tree node representing a data type.  Write TYPE_MODE(VALTYPE)
+to get the machine mode used to represent that type.
+On many machines, only the mode is relevant.  (Actually, on most
+machines, scalar values are returned in the same place regardless of
+mode).
+
+The value of the expression is usually a reg RTX for the hard
+register where the return value is stored.  The value can also be a
+parallel RTX, if the return value is in multiple places.  See
+FUNCTION_ARG for an explanation of the parallel form.
+
+If PROMOTE_FUNCTION_RETURN is defined, you must apply the same
+promotion rules specified in PROMOTE_MODE if VALTYPE is a
+scalar type.
+
+If the precise function being called is known, FUNC is a tree
+node (FUNCTION_DECL) for it; otherwise, FUNC is a null
+pointer.  This makes it possible to use a different value-returning
+convention for specific functions when all their calls are
+known.
+
+FUNCTION_VALUE is not used for return vales with aggregate data
+types, because these are returned in another way.  See
+STRUCT_VALUE_REGNUM and related macros, below.
+*/
+#define FUNCTION_VALUE(VALTYPE, FUNC) avr32_function_value(VALTYPE, FUNC)
+
+
+/*
+A C expression to create an RTX representing the place where a library
+function returns a value of mode MODE.  If the precise function
+being called is known, FUNC is a tree node
+(FUNCTION_DECL) for it; otherwise, func is a null
+pointer.  This makes it possible to use a different value-returning
+convention for specific functions when all their calls are
+known.
+
+Note that "library function" in this context means a compiler
+support routine, used to perform arithmetic, whose name is known
+specially by the compiler and was not mentioned in the C code being
+compiled.
+
+The definition of LIBRARY_VALUE need not be concerned aggregate
+data types, because none of the library functions returns such types.
+*/
+#define LIBCALL_VALUE(MODE) avr32_libcall_value(MODE)
+
+/*
+A C expression that is nonzero if REGNO is the number of a hard
+register in which the values of called function may come back.
+
+A register whose use for returning values is limited to serving as the
+second of a pair (for a value of type double, say) need not be
+recognized by this macro.  So for most machines, this definition
+suffices:
+  #define FUNCTION_VALUE_REGNO_P(N) ((N) == 0)
+
+If the machine has register windows, so that the caller and the called
+function use different registers for the return value, this macro
+should recognize only the caller's register numbers.
+*/
+/*
+  When returning a value of mode DImode, r11:r10 is used, else r12 is used.
+*/
+#define FUNCTION_VALUE_REGNO_P(REGNO) ((REGNO) == RET_REGISTER \
+                                       || (REGNO) == INTERNAL_REGNUM(11))
+
+
+/** How Large Values Are Returned **/
+
+
+/*
+Define this macro to be 1 if all structure and union return values must be
+in memory.  Since this results in slower code, this should be defined
+only if needed for compatibility with other compilers or with an ABI.
+If you define this macro to be 0, then the conventions used for structure
+and union return values are decided by the RETURN_IN_MEMORY macro.
+
+If not defined, this defaults to the value 1.
+*/
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+
+
+
+/** Generating Code for Profiling **/
+
+/*
+A C statement or compound statement to output to FILE some
+assembler code to call the profiling subroutine mcount.
+
+The details of how mcount expects to be called are determined by
+your operating system environment, not by GCC.  To figure them out,
+compile a small program for profiling using the system's installed C
+compiler and look at the assembler code that results.
+
+Older implementations of mcount expect the address of a counter
+variable to be loaded into some register.  The name of this variable is
+'LP' followed by the number LABELNO, so you would generate
+the name using 'LP%d' in a fprintf.
+*/
+/* ToDo: fixme */
+#ifndef FUNCTION_PROFILER
+#define FUNCTION_PROFILER(FILE, LABELNO) \
+  fprintf((FILE), "/* profiler %d */", (LABELNO))
+#endif
+
+
+/*****************************************************************************
+ * Trampolines for Nested Functions                                          *
+ *****************************************************************************/
+
+/*
+A C statement to output, on the stream FILE, assembler code for a
+block of data that contains the constant parts of a trampoline.  This
+code should not include a label - the label is taken care of
+automatically.
+
+If you do not define this macro, it means no template is needed
+for the target.  Do not define this macro on systems where the block move
+code to copy the trampoline into place would be larger than the code
+to generate it on the spot.
+*/
+/* ToDo: correct? */
+#define TRAMPOLINE_TEMPLATE(FILE) avr32_trampoline_template(FILE);
+
+
+/*
+A C expression for the size in bytes of the trampoline, as an integer.
+*/
+/* ToDo: fixme */
+#define TRAMPOLINE_SIZE 0x0C
+
+/*
+Alignment required for trampolines, in bits.
+
+If you don't define this macro, the value of BIGGEST_ALIGNMENT
+is used for aligning trampolines.
+*/
+#define TRAMPOLINE_ALIGNMENT 16
+
+/*
+A C statement to initialize the variable parts of a trampoline.
+ADDR is an RTX for the address of the trampoline; FNADDR is
+an RTX for the address of the nested function; STATIC_CHAIN is an
+RTX for the static chain value that should be passed to the function
+when it is called.
+*/
+#define INITIALIZE_TRAMPOLINE(ADDR, FNADDR, STATIC_CHAIN) \
+  avr32_initialize_trampoline(ADDR, FNADDR, STATIC_CHAIN)
+
+
+/******************************************************************************
+ * Implicit Calls to Library Routines
+ *****************************************************************************/
+
+/* Tail calling.  */
+
+/* A C expression that evaluates to true if it is ok to perform a sibling
+   call to DECL.  */
+#define FUNCTION_OK_FOR_SIBCALL(DECL) 0
+
+#define OVERRIDE_OPTIONS  avr32_override_options ()
+
+
+
+/******************************************************************************
+ * Addressing Modes
+ *****************************************************************************/
+
+/*
+A C expression that is nonzero if the machine supports pre-increment,
+pre-decrement, post-increment, or post-decrement addressing respectively.
+*/
+/*
+  AVR32 supports Rp++ and --Rp
+*/
+#define HAVE_PRE_INCREMENT 0
+#define HAVE_PRE_DECREMENT 1
+#define HAVE_POST_INCREMENT 1
+#define HAVE_POST_DECREMENT 0
+
+/*
+A C expression that is nonzero if the machine supports pre- or
+post-address side-effect generation involving constants other than
+the size of the memory operand.
+*/
+#define HAVE_PRE_MODIFY_DISP 0
+#define HAVE_POST_MODIFY_DISP 0
+
+/*
+A C expression that is nonzero if the machine supports pre- or
+post-address side-effect generation involving a register displacement.
+*/
+#define HAVE_PRE_MODIFY_REG 0
+#define HAVE_POST_MODIFY_REG 0
+
+/*
+A C expression that is 1 if the RTX X is a constant which
+is a valid address.  On most machines, this can be defined as
+CONSTANT_P (X), but a few machines are more restrictive
+in which constant addresses are supported.
+
+CONSTANT_P accepts integer-values expressions whose values are
+not explicitly known, such as symbol_ref, label_ref, and
+high expressions and const arithmetic expressions, in
+addition to const_int and const_double expressions.
+*/
+#define CONSTANT_ADDRESS_P(X) CONSTANT_P(X)
+
+/*
+A number, the maximum number of registers that can appear in a valid
+memory address.  Note that it is up to you to specify a value equal to
+the maximum number that GO_IF_LEGITIMATE_ADDRESS would ever
+accept.
+*/
+#define MAX_REGS_PER_ADDRESS 2
+
+/*
+A C compound statement with a conditional goto LABEL;
+executed if X (an RTX) is a legitimate memory address on the
+target machine for a memory operand of mode MODE.
+
+It usually pays to define several simpler macros to serve as
+subroutines for this one.  Otherwise it may be too complicated to
+understand.
+
+This macro must exist in two variants: a strict variant and a
+non-strict one.  The strict variant is used in the reload pass.  It
+must be defined so that any pseudo-register that has not been
+allocated a hard register is considered a memory reference.  In
+contexts where some kind of register is required, a pseudo-register
+with no hard register must be rejected.
+
+The non-strict variant is used in other passes.  It must be defined to
+accept all pseudo-registers in every context where some kind of
+register is required.
+
+Compiler source files that want to use the strict variant of this
+macro define the macro REG_OK_STRICT.  You should use an
+#ifdef REG_OK_STRICT conditional to define the strict variant
+in that case and the non-strict variant otherwise.
+
+Subroutines to check for acceptable registers for various purposes (one
+for base registers, one for index registers, and so on) are typically
+among the subroutines used to define GO_IF_LEGITIMATE_ADDRESS.
+Then only these subroutine macros need have two variants; the higher
+levels of macros may be the same whether strict or not.
+
+Normally, constant addresses which are the sum of a symbol_ref
+and an integer are stored inside a const RTX to mark them as
+constant.  Therefore, there is no need to recognize such sums
+specifically as legitimate addresses.  Normally you would simply
+recognize any const as legitimate.
+
+Usually PRINT_OPERAND_ADDRESS is not prepared to handle constant
+sums that are not marked with  const.  It assumes that a naked
+plus indicates indexing.  If so, then you must reject such
+naked constant sums as illegitimate addresses, so that none of them will
+be given to PRINT_OPERAND_ADDRESS.
+
+On some machines, whether a symbolic address is legitimate depends on
+the section that the address refers to.  On these machines, define the
+macro ENCODE_SECTION_INFO to store the information into the
+symbol_ref, and then check for it here.  When you see a
+const, you will have to look inside it to find the
+symbol_ref in order to determine the section.
+
+The best way to modify the name string is by adding text to the
+beginning, with suitable punctuation to prevent any ambiguity.  Allocate
+the new name in saveable_obstack.  You will have to modify
+ASM_OUTPUT_LABELREF to remove and decode the added text and
+output the name accordingly, and define STRIP_NAME_ENCODING to
+access the original name string.
+
+You can check the information stored here into the symbol_ref in
+the definitions of the macros GO_IF_LEGITIMATE_ADDRESS and
+PRINT_OPERAND_ADDRESS.
+*/
+#ifdef REG_OK_STRICT
+#  define GO_IF_LEGITIMATE_ADDRESS(MODE, X, LABEL)	\
+  do							\
+    {							\
+      if (avr32_legitimate_address(MODE, X, 1))		\
+	goto LABEL;					\
+    }							\
+  while (0)
+#else
+#  define GO_IF_LEGITIMATE_ADDRESS(MODE, X, LABEL)	\
+  do							\
+    {							\
+      if (avr32_legitimate_address(MODE, X, 0))		\
+	goto LABEL;					\
+    }							\
+  while (0)
+#endif
+
+/*
+A C expression that is nonzero if X (assumed to be a reg
+RTX) is valid for use as a base register.  For hard registers, it
+should always accept those which the hardware permits and reject the
+others.  Whether the macro accepts or rejects pseudo registers must be
+controlled by REG_OK_STRICT as described above.  This usually
+requires two variant definitions, of which REG_OK_STRICT
+controls the one actually used.
+*/
+#ifdef REG_OK_STRICT
+#  define REG_OK_FOR_BASE_P(X) \
+  REGNO_OK_FOR_BASE_P(REGNO(X))
+#else
+#  define REG_OK_FOR_BASE_P(X) \
+  ((REGNO(X) <= LAST_REGNUM) || (REGNO(X) >= FIRST_PSEUDO_REGISTER))
+#endif
+
+
+/*
+A C expression that is nonzero if X (assumed to be a reg
+RTX) is valid for use as an index register.
+
+The difference between an index register and a base register is that
+the index register may be scaled.  If an address involves the sum of
+two registers, neither one of them scaled, then either one may be
+labeled the "base" and the other the "index"; but whichever
+labeling is used must fit the machine's constraints of which registers
+may serve in each capacity.  The compiler will try both labelings,
+looking for one that is valid, and will reload one or both registers
+only if neither labeling works.
+*/
+#define REG_OK_FOR_INDEX_P(X) \
+  REG_OK_FOR_BASE_P(X)
+
+
+/*
+A C compound statement that attempts to replace X with a valid
+memory address for an operand of mode MODE.  win will be a
+C statement label elsewhere in the code; the macro definition may use
+
+  GO_IF_LEGITIMATE_ADDRESS (MODE, X, WIN);
+
+to avoid further processing if the address has become legitimate.
+
+X will always be the result of a call to break_out_memory_refs,
+and OLDX will be the operand that was given to that function to produce
+X.
+
+The code generated by this macro should not alter the substructure of
+X.  If it transforms X into a more legitimate form, it
+should assign X (which will always be a C variable) a new value.
+
+It is not necessary for this macro to come up with a legitimate
+address.  The compiler has standard ways of doing so in all cases.  In
+fact, it is safe for this macro to do nothing.  But often a
+machine-dependent strategy can generate better code.
+*/
+#define LEGITIMIZE_ADDRESS(X, OLDX, MODE, WIN)			\
+  do								\
+    {								\
+      if (GET_CODE(X) == PLUS					\
+	  && GET_CODE(XEXP(X, 0)) == REG			\
+	  && GET_CODE(XEXP(X, 1)) == CONST_INT			\
+	  && !CONST_OK_FOR_CONSTRAINT_P(INTVAL(XEXP(X, 1)),	\
+					'K', "Ks16"))		\
+	{							\
+	  rtx index = force_reg(SImode, XEXP(X, 1));		\
+	  X = gen_rtx_PLUS( SImode, XEXP(X, 0), index);		\
+	}							\
+      GO_IF_LEGITIMATE_ADDRESS(MODE, X, WIN);			\
+    }								\
+  while(0)
+
+
+/*
+A C statement or compound statement with a conditional
+goto LABEL; executed if memory address X (an RTX) can have
+different meanings depending on the machine mode of the memory
+reference it is used for or if the address is valid for some modes
+but not others.
+
+Autoincrement and autodecrement addresses typically have mode-dependent
+effects because the amount of the increment or decrement is the size
+of the operand being addressed.  Some machines have other mode-dependent
+addresses.  Many RISC machines have no mode-dependent addresses.
+
+You may assume that ADDR is a valid address for the machine.
+*/
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR, LABEL)	\
+  do							\
+    {							\
+      if (GET_CODE (ADDR) == POST_INC			\
+	  || GET_CODE (ADDR) == PRE_DEC)		\
+	goto LABEL;					\
+    }							\
+  while (0)
+
+/*
+A C expression that is nonzero if X is a legitimate constant for
+an immediate operand on the target machine.  You can assume that
+X satisfies CONSTANT_P, so you need not check this.  In fact,
+'1' is a suitable definition for this macro on machines where
+anything CONSTANT_P is valid.
+*/
+#define LEGITIMATE_CONSTANT_P(X) avr32_legitimate_constant_p(X)
+
+
+/******************************************************************************
+ * Condition Code Status
+ *****************************************************************************/
+
+#define HAVE_conditional_move 1
+
+/*
+C code for a data type which is used for declaring the mdep
+component of cc_status.  It defaults to int.
+
+This macro is not used on machines that do not use cc0.
+*/
+
+typedef struct
+{
+  int flags;
+  rtx value;
+  int fpflags;
+  rtx fpvalue;
+} avr32_status_reg;
+
+
+#define CC_STATUS_MDEP avr32_status_reg
+
+/*
+A C expression to initialize the mdep field to "empty".
+The default definition does nothing, since most machines don't use
+the field anyway.  If you want to use the field, you should probably
+define this macro to initialize it.
+
+This macro is not used on machines that do not use cc0.
+*/
+
+#define CC_STATUS_MDEP_INIT  \
+ (cc_status.mdep.flags = CC_NONE , cc_status.mdep.value = 0)
+
+#define FPCC_STATUS_INIT \
+   (cc_status.mdep.fpflags = CC_NONE , cc_status.mdep.fpvalue = 0)
+
+/*
+A C compound statement to set the components of cc_status
+appropriately for an insn INSN whose body is EXP.  It is
+this macro's responsibility to recognize insns that set the condition
+code as a byproduct of other activity as well as those that explicitly
+set (cc0).
+
+This macro is not used on machines that do not use cc0.
+
+If there are insns that do not set the condition code but do alter
+other machine registers, this macro must check to see whether they
+invalidate the expressions that the condition code is recorded as
+reflecting.  For example, on the 68000, insns that store in address
+registers do not set the condition code, which means that usually
+NOTICE_UPDATE_CC can leave cc_status unaltered for such
+insns.  But suppose that the previous insn set the condition code
+based on location 'a4@@(102)' and the current insn stores a new
+value in 'a4'.  Although the condition code is not changed by
+this, it will no longer be true that it reflects the contents of
+'a4@@(102)'.  Therefore, NOTICE_UPDATE_CC must alter
+cc_status in this case to say that nothing is known about the
+condition code value.
+
+The definition of NOTICE_UPDATE_CC must be prepared to deal
+with the results of peephole optimization: insns whose patterns are
+parallel RTXs containing various reg, mem or
+constants which are just the operands.  The RTL structure of these
+insns is not sufficient to indicate what the insns actually do.  What
+NOTICE_UPDATE_CC should do when it sees one is just to run
+CC_STATUS_INIT.
+
+A possible definition of NOTICE_UPDATE_CC is to call a function
+that looks at an attribute (see Insn Attributes) named, for example,
+'cc'.  This avoids having detailed information about patterns in
+two places, the 'md' file and in NOTICE_UPDATE_CC.
+*/
+
+#define NOTICE_UPDATE_CC(EXP, INSN) avr32_notice_update_cc(EXP, INSN)
+
+
+
+
+/******************************************************************************
+ * Describing Relative Costs of Operations
+ *****************************************************************************/
+
+
+
+/*
+A C expression for the cost of moving data of mode MODE from a
+register in class FROM to one in class TO.  The classes are
+expressed using the enumeration values such as GENERAL_REGS.  A
+value of 2 is the default; other values are interpreted relative to
+that.
+
+It is not required that the cost always equal 2 when FROM is the
+same as TO; on some machines it is expensive to move between
+registers if they are not general registers.
+
+If reload sees an insn consisting of a single set between two
+hard registers, and if REGISTER_MOVE_COST applied to their
+classes returns a value of 2, reload does not check to ensure that the
+constraints of the insn are met.  Setting a cost of other than 2 will
+allow reload to verify that the constraints are met.  You should do this
+if the movm pattern's constraints do not allow such copying.
+*/
+#define REGISTER_MOVE_COST(MODE, FROM, TO) \
+  ((GET_MODE_SIZE(MODE) <= 4) ? 2:         \
+   (GET_MODE_SIZE(MODE) <= 8) ? 3:         \
+   4)
+
+/*
+A C expression for the cost of moving data of mode MODE between a
+register of class CLASS and memory; IN is zero if the value
+is to be written to memory, nonzero if it is to be read in.  This cost
+is relative to those in REGISTER_MOVE_COST.  If moving between
+registers and memory is more expensive than between two registers, you
+should define this macro to express the relative cost.
+
+If you do not define this macro, GCC uses a default cost of 4 plus
+the cost of copying via a secondary reload register, if one is
+needed.  If your machine requires a secondary reload register to copy
+between memory and a register of CLASS but the reload mechanism is
+more complex than copying via an intermediate, define this macro to
+reflect the actual cost of the move.
+
+GCC defines the function memory_move_secondary_cost if
+secondary reloads are needed.  It computes the costs due to copying via
+a secondary register.  If your machine copies from memory using a
+secondary register in the conventional way but the default base value of
+4 is not correct for your machine, define this macro to add some other
+value to the result of that function.  The arguments to that function
+are the same as to this macro.
+*/
+/*
+  Memory moves are costly
+*/
+#define MEMORY_MOVE_COST(MODE, CLASS, IN)    10
+/*
+  (((IN) ? ((GET_MODE_SIZE(MODE) < 4) ? 4 :          \
+            (GET_MODE_SIZE(MODE) > 8) ? 6 :          \
+            3)                                       \
+         : ((GET_MODE_SIZE(MODE) > 8) ? 4 : 2)))
+*/
+
+/*
+A C expression for the cost of a branch instruction.  A value of 1 is
+the default; other values are interpreted relative to that.
+*/
+  /* Try to use conditionals as much as possible */
+#define BRANCH_COST (TARGET_BRANCH_PRED ? 3 : 5)
+
+/*A C expression for the maximum number of instructions to execute via conditional
+  execution instructions instead of a branch. A value of BRANCH_COST+1 is the default
+  if the machine does not use cc0, and 1 if it does use cc0.*/
+#define MAX_CONDITIONAL_EXECUTE 3
+
+/*
+Define this macro as a C expression which is nonzero if accessing less
+than a word of memory (i.e.: a char or a short) is no
+faster than accessing a word of memory, i.e., if such access
+require more than one instruction or if there is no difference in cost
+between byte and (aligned) word loads.
+
+When this macro is not defined, the compiler will access a field by
+finding the smallest containing object; when it is defined, a fullword
+load will be used if alignment permits.  Unless bytes accesses are
+faster than word accesses, using word accesses is preferable since it
+may eliminate subsequent memory access if subsequent accesses occur to
+other fields in the same word of the structure, but to different bytes.
+*/
+#define SLOW_BYTE_ACCESS 1
+
+
+/*
+Define this macro if it is as good or better to call a constant
+function address than to call an address kept in a register.
+*/
+#define NO_FUNCTION_CSE
+
+
+/******************************************************************************
+ * Adjusting the Instruction Scheduler
+ *****************************************************************************/
+
+/*****************************************************************************
+ * Dividing the Output into Sections (Texts, Data, ...)                      *
+ *****************************************************************************/
+
+/*
+A C expression whose value is a string, including spacing, containing the
+assembler operation that should precede instructions and read-only data.
+Normally "\t.text" is right.
+*/
+#define TEXT_SECTION_ASM_OP "\t.text"
+/*
+A C statement that switches to the default section containing instructions.
+Normally this is not needed, as simply defining TEXT_SECTION_ASM_OP
+is enough.  The MIPS port uses this to sort all functions after all data
+declarations.
+*/
+/* #define TEXT_SECTION */
+
+/*
+A C expression whose value is a string, including spacing, containing the
+assembler operation to identify the following data as writable initialized
+data.  Normally "\t.data" is right.
+*/
+#define DATA_SECTION_ASM_OP "\t.data"
+
+/*
+If defined, a C expression whose value is a string, including spacing,
+containing the assembler operation to identify the following data as
+shared data.  If not defined, DATA_SECTION_ASM_OP will be used.
+*/
+
+/*
+A C expression whose value is a string, including spacing, containing
+the assembler operation to identify the following data as read-only
+initialized data.
+*/
+#undef READONLY_DATA_SECTION_ASM_OP
+#define READONLY_DATA_SECTION_ASM_OP \
+  ((target_flags & USE_RODATA_SECTION) ?  \
+   "\t.section\t.rodata" :                \
+   TEXT_SECTION_ASM_OP )
+
+
+/*
+If defined, a C expression whose value is a string, including spacing,
+containing the assembler operation to identify the following data as
+uninitialized global data.  If not defined, and neither
+ASM_OUTPUT_BSS nor ASM_OUTPUT_ALIGNED_BSS are defined,
+uninitialized global data will be output in the data section if
+-fno-common is passed, otherwise ASM_OUTPUT_COMMON will be
+used.
+*/
+#define BSS_SECTION_ASM_OP	"\t.section\t.bss"
+
+/*
+If defined, a C expression whose value is a string, including spacing,
+containing the assembler operation to identify the following data as
+uninitialized global shared data.  If not defined, and
+BSS_SECTION_ASM_OP is, the latter will be used.
+*/
+/*#define SHARED_BSS_SECTION_ASM_OP "\trseg\tshared_bbs_section:data:noroot(0)\n"*/
+/*
+If defined, a C expression whose value is a string, including spacing,
+containing the assembler operation to identify the following data as
+initialization code.  If not defined, GCC will assume such a section does
+not exist.
+*/
+#undef  INIT_SECTION_ASM_OP
+#define INIT_SECTION_ASM_OP "\t.section\t.init"
+
+/*
+If defined, a C expression whose value is a string, including spacing,
+containing the assembler operation to identify the following data as
+finalization code.  If not defined, GCC will assume such a section does
+not exist.
+*/
+#undef  FINI_SECTION_ASM_OP
+#define FINI_SECTION_ASM_OP "\t.section\t.fini"
+
+/*
+If defined, an ASM statement that switches to a different section
+via SECTION_OP, calls FUNCTION, and switches back to
+the text section.  This is used in crtstuff.c if
+INIT_SECTION_ASM_OP or FINI_SECTION_ASM_OP to calls
+to initialization and finalization functions from the init and fini
+sections.  By default, this macro uses a simple function call.  Some
+ports need hand-crafted assembly code to avoid dependencies on
+registers initialized in the function prologue or to ensure that
+constant pools don't end up too far way in the text section.
+*/
+#define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC)      \
+   asm ( SECTION_OP "\n" \
+         "mcall   r6[" USER_LABEL_PREFIX #FUNC "@got]\n" \
+         TEXT_SECTION_ASM_OP);
+
+
+/*
+Define this macro to be an expression with a nonzero value if jump
+tables (for tablejump insns) should be output in the text
+section, along with the assembler instructions.  Otherwise, the
+readonly data section is used.
+
+This macro is irrelevant if there is no separate readonly data section.
+*/
+#define JUMP_TABLES_IN_TEXT_SECTION 1
+
+
+/******************************************************************************
+ * Position Independent Code (PIC)
+ *****************************************************************************/
+
+#ifndef AVR32_ALWAYS_PIC
+#define AVR32_ALWAYS_PIC 0
+#endif
+
+/* GOT is set to r6 */
+#define PIC_OFFSET_TABLE_REGNUM INTERNAL_REGNUM(6)
+
+/*
+A C expression that is nonzero if X is a legitimate immediate
+operand on the target machine when generating position independent code.
+You can assume that X satisfies CONSTANT_P, so you need not
+check this.  You can also assume flag_pic is true, so you need not
+check it either.  You need not define this macro if all constants
+(including SYMBOL_REF) can be immediate operands when generating
+position independent code.
+*/
+/* We can't directly access anything that contains a symbol,
+   nor can we indirect via the constant pool.  */
+#define LEGITIMATE_PIC_OPERAND_P(X) avr32_legitimate_pic_operand_p(X)
+
+
+/* We need to know when we are making a constant pool; this determines
+   whether data needs to be in the GOT or can be referenced via a GOT
+   offset.  */
+extern int making_const_table;
+
+/******************************************************************************
+ * Defining the Output Assembler Language
+ *****************************************************************************/
+
+
+/*
+A C string constant describing how to begin a comment in the target
+assembler language.  The compiler assumes that the comment will end at
+the end of the line.
+*/
+#define ASM_COMMENT_START "# "
+
+/*
+A C string constant for text to be output before each asm
+statement or group of consecutive ones.  Normally this is
+"#APP", which is a comment that has no effect on most
+assemblers but tells the GNU assembler that it must check the lines
+that follow for all valid assembler constructs.
+*/
+#undef ASM_APP_ON
+#define ASM_APP_ON "#APP\n"
+
+/*
+A C string constant for text to be output after each asm
+statement or group of consecutive ones.  Normally this is
+"#NO_APP", which tells the GNU assembler to resume making the
+time-saving assumptions that are valid for ordinary compiler output.
+*/
+#undef ASM_APP_OFF
+#define ASM_APP_OFF "#NO_APP\n"
+
+
+
+#define FILE_ASM_OP 		"\t.file\n"
+#define IDENT_ASM_OP 		"\t.ident\t"
+#define SET_ASM_OP		"\t.set\t"
+
+
+/*
+ * Output assembly directives to switch to section name. The section
+ * should have attributes as specified by flags, which is a bit mask
+ * of the SECTION_* flags defined in 'output.h'. If align is nonzero,
+ * it contains an alignment in bytes to be used for the section,
+ * otherwise some target default should be used. Only targets that
+ * must specify an alignment within the section directive need pay
+ * attention to align -- we will still use ASM_OUTPUT_ALIGN.
+ *
+ * NOTE: This one must not be moved to avr32.c
+ */
+#undef TARGET_ASM_NAMED_SECTION
+#define TARGET_ASM_NAMED_SECTION default_elf_asm_named_section
+
+
+/*
+You may define this macro as a C expression.  You should define the
+expression to have a nonzero value if GCC should output the constant
+pool for a function before the code for the function, or a zero value if
+GCC should output the constant pool after the function.  If you do
+not define this macro, the usual case, GCC will output the constant
+pool before the function.
+*/
+#define CONSTANT_POOL_BEFORE_FUNCTION 0
+
+
+/*
+Define this macro as a C expression which is nonzero if the constant
+EXP, of type tree, should be output after the code for a
+function.  The compiler will normally output all constants before the
+function; you need not define this macro if this is OK.
+*/
+#define CONSTANT_AFTER_FUNCTION_P(EXP) 1
+
+
+/*
+Define this macro as a C expression which is nonzero if C is
+used as a logical line separator by the assembler.
+
+If you do not define this macro, the default is that only
+the character ';' is treated as a logical line separator.
+*/
+#define IS_ASM_LOGICAL_LINE_SEPARATOR(C) ((C) == '\n')
+
+
+/** Output of Uninitialized Variables **/
+
+/*
+A C statement (sans semicolon) to output to the stdio stream
+STREAM the assembler definition of a common-label named
+NAME whose size is SIZE bytes.  The variable ROUNDED
+is the size rounded up to whatever alignment the caller wants.
+
+Use the expression assemble_name(STREAM, NAME) to
+output the name itself; before and after that, output the additional
+assembler syntax for defining the name, and a newline.
+
+This macro controls how the assembler definitions of uninitialized
+common global variables are output.
+*/
+/*
+#define ASM_OUTPUT_COMMON(STREAM, NAME, SIZE, ROUNDED) \
+  avr32_asm_output_common(STREAM, NAME, SIZE, ROUNDED)
+*/
+
+#define ASM_OUTPUT_COMMON(FILE, NAME, SIZE, ROUNDED)	\
+  do							\
+    {							\
+      fputs ("\t.comm ", (FILE));			\
+      assemble_name ((FILE), (NAME));			\
+      fprintf ((FILE), ",%d\n", (SIZE));		\
+    }							\
+  while (0)
+
+/*
+ * Like ASM_OUTPUT_BSS except takes the required alignment as a
+ * separate, explicit argument.  If you define this macro, it is used
+ * in place of ASM_OUTPUT_BSS, and gives you more flexibility in
+ * handling the required alignment of the variable.  The alignment is
+ * specified as the number of bits.
+ *
+ * Try to use function asm_output_aligned_bss defined in file varasm.c
+ * when defining this macro.
+ */
+#define ASM_OUTPUT_ALIGNED_BSS(STREAM, DECL, NAME, SIZE, ALIGNMENT) \
+  asm_output_aligned_bss (STREAM, DECL, NAME, SIZE, ALIGNMENT)
+
+/*
+A C statement (sans semicolon) to output to the stdio stream
+STREAM the assembler definition of a local-common-label named
+NAME whose size is SIZE bytes.  The variable ROUNDED
+is the size rounded up to whatever alignment the caller wants.
+
+Use the expression assemble_name(STREAM, NAME) to
+output the name itself; before and after that, output the additional
+assembler syntax for defining the name, and a newline.
+
+This macro controls how the assembler definitions of uninitialized
+static variables are output.
+*/
+#define ASM_OUTPUT_LOCAL(FILE, NAME, SIZE, ROUNDED)	\
+  do							\
+    {							\
+      fputs ("\t.lcomm ", (FILE));			\
+      assemble_name ((FILE), (NAME));			\
+      fprintf ((FILE), ",%d, %d\n", (SIZE), 2);		\
+    }							\
+  while (0)
+
+
+/*
+A C statement (sans semicolon) to output to the stdio stream
+STREAM the assembler definition of a label named NAME.
+Use the expression assemble_name(STREAM, NAME) to
+output the name itself; before and after that, output the additional
+assembler syntax for defining the name, and a newline.
+*/
+#define ASM_OUTPUT_LABEL(STREAM, NAME) avr32_asm_output_label(STREAM, NAME)
+
+/* A C string containing the appropriate assembler directive to
+ * specify the size of a symbol, without any arguments. On systems
+ * that use ELF, the default (in 'config/elfos.h') is '"\t.size\t"';
+ * on other systems, the default is not to define this macro.
+ *
+ * Define this macro only if it is correct to use the default
+ * definitions of ASM_ OUTPUT_SIZE_DIRECTIVE and
+ * ASM_OUTPUT_MEASURED_SIZE for your system. If you need your own
+ * custom definitions of those macros, or if you do not need explicit
+ * symbol sizes at all, do not define this macro.
+ */
+#define SIZE_ASM_OP "\t.size\t"
+
+
+/*
+A C statement (sans semicolon) to output to the stdio stream
+STREAM some commands that will make the label NAME global;
+that is, available for reference from other files.  Use the expression
+assemble_name(STREAM, NAME) to output the name
+itself; before and after that, output the additional assembler syntax
+for making that name global, and a newline.
+*/
+#define GLOBAL_ASM_OP "\t.globl\t"
+
+
+
+/*
+A C expression which evaluates to true if the target supports weak symbols.
+
+If you don't define this macro, defaults.h provides a default
+definition.  If either ASM_WEAKEN_LABEL or ASM_WEAKEN_DECL
+is defined, the default definition is '1'; otherwise, it is
+'0'.  Define this macro if you want to control weak symbol support
+with a compiler flag such as -melf.
+*/
+#define SUPPORTS_WEAK 1
+
+/*
+A C statement (sans semicolon) to output to the stdio stream
+STREAM a reference in assembler syntax to a label named
+NAME.  This should add '_' to the front of the name, if that
+is customary on your operating system, as it is in most Berkeley Unix
+systems.  This macro is used in assemble_name.
+*/
+#define ASM_OUTPUT_LABELREF(STREAM, NAME) \
+  avr32_asm_output_labelref(STREAM, NAME)
+
+
+
+/*
+A C expression to assign to OUTVAR (which is a variable of type
+char *) a newly allocated string made from the string
+NAME and the number NUMBER, with some suitable punctuation
+added.  Use alloca to get space for the string.
+
+The string will be used as an argument to ASM_OUTPUT_LABELREF to
+produce an assembler label for an internal static variable whose name is
+NAME.  Therefore, the string must be such as to result in valid
+assembler code.  The argument NUMBER is different each time this
+macro is executed; it prevents conflicts between similarly-named
+internal static variables in different scopes.
+
+Ideally this string should not be a valid C identifier, to prevent any
+conflict with the user's own symbols.  Most assemblers allow periods
+or percent signs in assembler symbols; putting at least one of these
+between the name and the number will suffice.
+*/
+#define ASM_FORMAT_PRIVATE_NAME(OUTVAR, NAME, NUMBER)		\
+  do								\
+    {								\
+      (OUTVAR) = (char *) alloca (strlen ((NAME)) + 10);	\
+      sprintf ((OUTVAR), "%s.%d", (NAME), (NUMBER));		\
+    }								\
+  while (0)
+
+
+/** Macros Controlling Initialization Routines **/
+
+
+/*
+If defined, main will not call __main as described above.
+This macro should be defined for systems that control start-up code
+on a symbol-by-symbol basis, such as OSF/1, and should not
+be defined explicitly for systems that support INIT_SECTION_ASM_OP.
+*/
+/*
+  __main is not defined when debugging.
+*/
+#define HAS_INIT_SECTION
+
+
+/** Output of Assembler Instructions **/
+
+/*
+A C initializer containing the assembler's names for the machine
+registers, each one as a C string constant.  This is what translates
+register numbers in the compiler into assembler language.
+*/
+
+#define REGISTER_NAMES	\
+{			\
+  "pc",  "lr",		\
+  "sp",  "r12",		\
+  "r11", "r10",		\
+  "r9",  "r8",		\
+  "r7",  "r6",		\
+  "r5",  "r4",		\
+  "r3",  "r2",		\
+  "r1",  "r0",		\
+  "f15","f14",		\
+  "f13","f12",		\
+  "f11","f10",		\
+  "f9", "f8",		\
+  "f7", "f6",		\
+  "f5", "f4",		\
+  "f3", "f2",		\
+  "f1", "f0"		\
+}
+
+/*
+A C compound statement to output to stdio stream STREAM the
+assembler syntax for an instruction operand X.  X is an
+RTL expression.
+
+CODE is a value that can be used to specify one of several ways
+of printing the operand.  It is used when identical operands must be
+printed differently depending on the context.  CODE comes from
+the '%' specification that was used to request printing of the
+operand.  If the specification was just '%digit' then
+CODE is 0; if the specification was '%ltr digit'
+then CODE is the ASCII code for ltr.
+
+If X is a register, this macro should print the register's name.
+The names can be found in an array reg_names whose type is
+char *[].  reg_names is initialized from REGISTER_NAMES.
+
+When the machine description has a specification '%punct'
+(a '%' followed by a punctuation character), this macro is called
+with a null pointer for X and the punctuation character for
+CODE.
+*/
+#define PRINT_OPERAND(STREAM, X, CODE) avr32_print_operand(STREAM, X, CODE)
+
+/* A C statement to be executed just prior to the output of
+   assembler code for INSN, to modify the extracted operands so
+   they will be output differently.
+
+   Here the argument OPVEC is the vector containing the operands
+   extracted from INSN, and NOPERANDS is the number of elements of
+   the vector which contain meaningful data for this insn.
+   The contents of this vector are what will be used to convert the insn
+   template into assembler code, so you can change the assembler output
+   by changing the contents of the vector.  */
+#define FINAL_PRESCAN_INSN(INSN, OPVEC, NOPERANDS) \
+  avr32_final_prescan_insn ((INSN), (OPVEC), (NOPERANDS))
+
+/*
+A C expression which evaluates to true if CODE is a valid
+punctuation character for use in the PRINT_OPERAND macro.  If
+PRINT_OPERAND_PUNCT_VALID_P is not defined, it means that no
+punctuation characters (except for the standard one, '%') are used
+in this way.
+*/
+/*
+  'm' refers to the most significant word in a two-register mode.
+*/
+#define PRINT_OPERAND_PUNCT_VALID_P(CODE) ((CODE) == 'm' || (CODE) == 'e')
+
+/*
+A C compound statement to output to stdio stream STREAM the
+assembler syntax for an instruction operand that is a memory reference
+whose address is X.  X is an RTL expression.
+
+On some machines, the syntax for a symbolic address depends on the
+section that the address refers to.  On these machines, define the macro
+ENCODE_SECTION_INFO to store the information into the
+symbol_ref, and then check for it here.  (see Assembler Format.)
+*/
+#define PRINT_OPERAND_ADDRESS(STREAM, X) avr32_print_operand_address(STREAM, X)
+
+
+/** Output of Dispatch Tables **/
+
+/*
+ * A C statement to output to the stdio stream stream an assembler
+ * pseudo-instruction to generate a difference between two
+ * labels. value and rel are the numbers of two internal labels. The
+ * definitions of these labels are output using
+ * (*targetm.asm_out.internal_label), and they must be printed in the
+ * same way here. For example,
+ *
+ *         fprintf (stream, "\t.word L%d-L%d\n",
+ *                  value, rel)
+ *
+ * You must provide this macro on machines where the addresses in a
+ * dispatch table are relative to the table's own address. If defined,
+ * GCC will also use this macro on all machines when producing
+ * PIC. body is the body of the ADDR_DIFF_VEC; it is provided so that
+ * the mode and flags can be read.
+ */
+#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL)	    \
+    fprintf(STREAM, "\tbral\t%sL%d\n", LOCAL_LABEL_PREFIX, VALUE)
+
+/*
+This macro should be provided on machines where the addresses
+in a dispatch table are absolute.
+
+The definition should be a C statement to output to the stdio stream
+STREAM an assembler pseudo-instruction to generate a reference to
+a label.  VALUE is the number of an internal label whose
+definition is output using ASM_OUTPUT_INTERNAL_LABEL.
+For example,
+
+fprintf(STREAM, "\t.word L%d\n", VALUE)
+*/
+
+#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM, VALUE)  \
+  fprintf(STREAM, "\t.long %sL%d\n", LOCAL_LABEL_PREFIX, VALUE)
+
+/** Assembler Commands for Exception Regions */
+
+/* ToDo: All of this subsection */
+
+/** Assembler Commands for Alignment */
+
+
+/*
+A C statement to output to the stdio stream STREAM an assembler
+command to advance the location counter to a multiple of 2 to the
+POWER bytes.  POWER will be a C expression of type int.
+*/
+#define ASM_OUTPUT_ALIGN(STREAM, POWER)			\
+  do							\
+    {							\
+      if ((POWER) != 0)					\
+	fprintf(STREAM, "\t.align\t%d\n", POWER);	\
+    }							\
+  while (0)
+
+/*
+Like ASM_OUTPUT_ALIGN, except that the \nop" instruction is used for padding, if
+necessary.
+*/
+#define ASM_OUTPUT_ALIGN_WITH_NOP(STREAM, POWER) \
+ fprintf(STREAM, "\t.balignw\t%d, 0xd703\n", (1 << POWER))
+
+
+
+/******************************************************************************
+ * Controlling Debugging Information Format
+ *****************************************************************************/
+
+/* How to renumber registers for dbx and gdb.  */
+#define DBX_REGISTER_NUMBER(REGNO) ASM_REGNUM (REGNO)
+
+/* The DWARF 2 CFA column which tracks the return address.  */
+#define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM(LR_REGNUM)
+
+/*
+Define this macro if GCC should produce dwarf version 2 format
+debugging output in response to the -g option.
+
+To support optional call frame debugging information, you must also
+define INCOMING_RETURN_ADDR_RTX and either set
+RTX_FRAME_RELATED_P on the prologue insns if you use RTL for the
+prologue, or call dwarf2out_def_cfa and dwarf2out_reg_save
+as appropriate from TARGET_ASM_FUNCTION_PROLOGUE if you don't.
+*/
+#define DWARF2_DEBUGGING_INFO 1
+
+
+#define DWARF2_ASM_LINE_DEBUG_INFO 1
+#define DWARF2_FRAME_INFO 1
+
+
+/******************************************************************************
+ * Miscellaneous Parameters
+ *****************************************************************************/
+
+/* ToDo: a lot */
+
+/*
+An alias for a machine mode name.  This is the machine mode that
+elements of a jump-table should have.
+*/
+#define CASE_VECTOR_MODE SImode
+
+/*
+Define this macro to be a C expression to indicate when jump-tables
+should contain relative addresses.  If jump-tables never contain
+relative addresses, then you need not define this macro.
+*/
+#define CASE_VECTOR_PC_RELATIVE 0
+
+/*
+The maximum number of bytes that a single instruction can move quickly
+between memory and registers or between two memory locations.
+*/
+#define MOVE_MAX (2*UNITS_PER_WORD)
+
+
+/* A C expression that is nonzero if on this machine the number of bits actually used
+   for the count of a shift operation is equal to the number of bits needed to represent
+   the size of the object being shifted. When this macro is nonzero, the compiler will
+   assume that it is safe to omit a sign-extend, zero-extend, and certain bitwise 'and'
+   instructions that truncates the count of a shift operation. On machines that have
+   instructions that act on bit-fields at variable positions, which may include 'bit test'
+   378 GNU Compiler Collection (GCC) Internals
+   instructions, a nonzero SHIFT_COUNT_TRUNCATED also enables deletion of truncations
+   of the values that serve as arguments to bit-field instructions.
+   If both types of instructions truncate the count (for shifts) and position (for bit-field
+   operations), or if no variable-position bit-field instructions exist, you should define
+   this macro.
+   However, on some machines, such as the 80386 and the 680x0, truncation only applies
+   to shift operations and not the (real or pretended) bit-field operations. Define SHIFT_
+   COUNT_TRUNCATED to be zero on such machines. Instead, add patterns to the 'md' file
+   that include the implied truncation of the shift instructions.
+   You need not dene this macro if it would always have the value of zero. */
+#define SHIFT_COUNT_TRUNCATED 1
+
+/*
+A C expression which is nonzero if on this machine it is safe to
+convert an integer of INPREC bits to one of OUTPREC
+bits (where OUTPREC is smaller than INPREC) by merely
+operating on it as if it had only OUTPREC bits.
+
+On many machines, this expression can be 1.
+
+When TRULY_NOOP_TRUNCATION returns 1 for a pair of sizes for
+modes for which MODES_TIEABLE_P is 0, suboptimal code can result.
+If this is the case, making TRULY_NOOP_TRUNCATION return 0 in
+such cases may improve things.
+*/
+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
+
+/*
+An alias for the machine mode for pointers.  On most machines, define
+this to be the integer mode corresponding to the width of a hardware
+pointer; SImode on 32-bit machine or DImode on 64-bit machines.
+On some machines you must define this to be one of the partial integer
+modes, such as PSImode.
+
+The width of Pmode must be at least as large as the value of
+POINTER_SIZE.  If it is not equal, you must define the macro
+POINTERS_EXTEND_UNSIGNED to specify how pointers are extended
+to Pmode.
+*/
+#define Pmode SImode
+
+/*
+An alias for the machine mode used for memory references to functions
+being called, in call RTL expressions.  On most machines this
+should be QImode.
+*/
+#define FUNCTION_MODE SImode
+
+
+#define REG_S_P(x) \
+ (REG_P (x) || (GET_CODE (x) == SUBREG && REG_P (XEXP (x, 0))))
+
+
+/* If defined, modifies the length assigned to instruction INSN as a
+   function of the context in which it is used.  LENGTH is an lvalue
+   that contains the initially computed length of the insn and should
+   be updated with the correct length of the insn.  */
+#define ADJUST_INSN_LENGTH(INSN, LENGTH) \
+  ((LENGTH) = avr32_adjust_insn_length ((INSN), (LENGTH)))
+
+
+#define CLZ_DEFINED_VALUE_AT_ZERO(mode, value) \
+  (value = 32, (mode == SImode))
+
+#define CTZ_DEFINED_VALUE_AT_ZERO(mode, value) \
+  (value = 32, (mode == SImode))
+
+#define UNITS_PER_SIMD_WORD UNITS_PER_WORD
+
+#define STORE_FLAG_VALUE 1
+
+enum avr32_builtins
+{
+  AVR32_BUILTIN_MTSR,
+  AVR32_BUILTIN_MFSR,
+  AVR32_BUILTIN_MTDR,
+  AVR32_BUILTIN_MFDR,
+  AVR32_BUILTIN_CACHE,
+  AVR32_BUILTIN_SYNC,
+  AVR32_BUILTIN_TLBR,
+  AVR32_BUILTIN_TLBS,
+  AVR32_BUILTIN_TLBW,
+  AVR32_BUILTIN_BREAKPOINT,
+  AVR32_BUILTIN_XCHG,
+  AVR32_BUILTIN_LDXI,
+  AVR32_BUILTIN_BSWAP16,
+  AVR32_BUILTIN_BSWAP32,
+  AVR32_BUILTIN_COP,
+  AVR32_BUILTIN_MVCR_W,
+  AVR32_BUILTIN_MVRC_W,
+  AVR32_BUILTIN_MVCR_D,
+  AVR32_BUILTIN_MVRC_D,
+  AVR32_BUILTIN_MULSATHH_H,
+  AVR32_BUILTIN_MULSATHH_W,
+  AVR32_BUILTIN_MULSATRNDHH_H,
+  AVR32_BUILTIN_MULSATRNDWH_W,
+  AVR32_BUILTIN_MULSATWH_W,
+  AVR32_BUILTIN_MACSATHH_W,
+  AVR32_BUILTIN_SATADD_H,
+  AVR32_BUILTIN_SATSUB_H,
+  AVR32_BUILTIN_SATADD_W,
+  AVR32_BUILTIN_SATSUB_W,
+  AVR32_BUILTIN_MULWH_D,
+  AVR32_BUILTIN_MULNWH_D,
+  AVR32_BUILTIN_MACWH_D,
+  AVR32_BUILTIN_MACHH_D,
+  AVR32_BUILTIN_MUSFR,
+  AVR32_BUILTIN_MUSTR,
+  AVR32_BUILTIN_SATS,
+  AVR32_BUILTIN_SATU,
+  AVR32_BUILTIN_SATRNDS,
+  AVR32_BUILTIN_SATRNDU
+};
+
+
+#define FLOAT_LIB_COMPARE_RETURNS_BOOL(MODE, COMPARISON) \
+  ((MODE == SFmode) || (MODE == DFmode))
+
+#define RENAME_LIBRARY_SET ".set"
+
+/* Make ABI_NAME an alias for __GCC_NAME.  */
+#define RENAME_LIBRARY(GCC_NAME, ABI_NAME)		\
+  __asm__ (".globl\t__avr32_" #ABI_NAME "\n"		\
+	   ".set\t__avr32_" #ABI_NAME 	\
+	     ", __" #GCC_NAME "\n");
+
+/* Give libgcc functions avr32 ABI name.  */
+#ifdef L_muldi3
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (muldi3, mul64)
+#endif
+#ifdef L_divdi3
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (divdi3, sdiv64)
+#endif
+#ifdef L_udivdi3
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (udivdi3, udiv64)
+#endif
+#ifdef L_moddi3
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (moddi3, smod64)
+#endif
+#ifdef L_umoddi3
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (umoddi3, umod64)
+#endif
+#ifdef L_ashldi3
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (ashldi3, lsl64)
+#endif
+#ifdef L_lshrdi3
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (lshrdi3, lsr64)
+#endif
+#ifdef L_ashrdi3
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (ashrdi3, asr64)
+#endif
+
+#ifdef L_fixsfdi
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixsfdi, f32_to_s64)
+#endif
+#ifdef L_fixunssfdi
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixunssfdi, f32_to_u64)
+#endif
+#ifdef L_floatdidf
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (floatdidf, s64_to_f64)
+#endif
+#ifdef L_floatdisf
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (floatdisf, s64_to_f32)
+#endif
+
+#ifdef L_addsub_sf
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (addsf3, f32_add); RENAME_LIBRARY (subsf3, f32_sub)
+#endif
+
+#endif
diff -Nrup --ignore-space-change gcc-4.0.2/gcc/config/avr32/avr32.md gcc-4.0.2-atmel.0.99.2/gcc/config/avr32/avr32.md
--- gcc-4.0.2/gcc/config/avr32/avr32.md	1970-01-01 01:00:00.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/gcc/config/avr32/avr32.md	2006-11-09 15:04:35.000000000 +0100
@@ -0,0 +1,4694 @@
+;;   AVR32 machine description file.
+;;   Copyright 2003-2006 Atmel Corporation.
+;;
+;;   Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
+;;
+;;   This file is part of GCC.
+;;
+;;   This program is free software; you can redistribute it and/or modify
+;;   it under the terms of the GNU General Public License as published by
+;;   the Free Software Foundation; either version 2 of the License, or
+;;   (at your option) any later version.
+;;
+;;   This program is distributed in the hope that it will be useful,
+;;   but WITHOUT ANY WARRANTY; without even the implied warranty of
+;;   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;;   GNU General Public License for more details.
+;;
+;;   You should have received a copy of the GNU General Public License
+;;   along with this program; if not, write to the Free Software
+;;   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+;; -*- Mode: Scheme -*-
+
+(define_attr "type" "alu,alu2,alu_sat,mulhh,mulwh,mulww_w,mulww_d,div,machh_w,macww_w,macww_d,branch,call,load,load_rm,store,load2,load4,store2,store4,fmul,fcmps,fcmpd,fcast,fmv,fmvcpu,fldd,fstd,flds,fsts,fstm"
+  (const_string "alu"))
+
+
+(define_attr "cc" "none,set_vncz,set_ncz,set_cz,set_z,bld,compare,clobber,call_set,fpcompare,from_fpcc"
+  (const_string "none"))
+
+
+(define_attr "pipeline" "ap,uc"
+  (const_string "ap"))
+
+(define_attr "length" ""
+  (const_int 4))
+
+
+;; Uses of UNSPEC in this file:
+(define_constants
+  [(UNSPEC_PUSHM                0)
+   (UNSPEC_POPM                 1)
+   (UNSPEC_UDIVMODSI4_INTERNAL	2)
+   (UNSPEC_DIVMODSI4_INTERNAL   3)
+   (UNSPEC_STM                  4)
+   (UNSPEC_LDM                  5)
+   (UNSPEC_MOVSICC              6)
+   (UNSPEC_ADDSICC              7)
+   (UNSPEC_COND_MI              8)
+   (UNSPEC_COND_PL              9)
+   (UNSPEC_PIC_SYM              10)
+   (UNSPEC_PIC_BASE             11)
+   (UNSPEC_STORE_MULTIPLE       12)
+   (UNSPEC_STMFP                13)
+   (UNSPEC_FPCC_TO_REG          14)
+   (UNSPEC_REG_TO_CC            15)
+   (UNSPEC_FORCE_MINIPOOL       16)
+   (UNSPEC_SATS                 17)
+   (UNSPEC_SATU                 18)
+   (UNSPEC_SATRNDS              19)
+   (UNSPEC_SATRNDU              20)
+  ])
+
+(define_constants
+  [(VUNSPEC_EPILOGUE   0)
+   (VUNSPEC_CACHE      1)
+   (VUNSPEC_MTSR       2)
+   (VUNSPEC_MFSR       3)
+   (VUNSPEC_BLOCKAGE   4)
+   (VUNSPEC_SYNC       5)
+   (VUNSPEC_TLBR       6)
+   (VUNSPEC_TLBW       7)
+   (VUNSPEC_TLBS       8)
+   (VUNSPEC_BREAKPOINT 9)
+   (VUNSPEC_MTDR       10)
+   (VUNSPEC_MFDR       11)
+   (VUNSPEC_MVCR       12)
+   (VUNSPEC_MVRC       13)
+   (VUNSPEC_COP        14)
+   (VUNSPEC_ALIGN      15)
+   (VUNSPEC_POOL_START 16)
+   (VUNSPEC_POOL_END   17)
+   (VUNSPEC_POOL_4     18)
+   (VUNSPEC_POOL_8     19)
+   (VUNSPEC_MUSFR      20)
+   (VUNSPEC_MUSTR       21)
+   ])
+
+(define_constants
+  [
+   ;; R7 = 15-7 = 8
+   (FP_REGNUM   8)
+   ;; Return Register = R12 = 15 - 12 = 3
+   (RETVAL_REGNUM   3)
+   ;; SP = R13 = 15 - 13 = 2
+   (SP_REGNUM   2)
+   ;; LR = R14 = 15 - 14 = 1
+   (LR_REGNUM   1)
+   ;; PC = R15 = 15 - 15 = 0
+   (PC_REGNUM   0)
+   ;; FPSR = GENERAL_REGS + 1 = 17
+   (FPCC_REGNUM 17)
+   ])
+
+
+
+
+;;******************************************************************************
+;; Macros
+;;******************************************************************************
+
+;; Integer Modes for basic alu insns
+(define_mode_macro INTM [SI HI QI])
+(define_mode_attr  alu_cc_attr [(SI "set_vncz") (HI "clobber") (QI "clobber")])
+
+;; Move word modes
+(define_mode_macro MOVM [SI V2HI V4QI])
+
+;; For mov/addcc insns
+(define_mode_macro ADDCC [SI HI QI])
+(define_mode_macro MOVCC [SI HI QI])
+(define_mode_macro CMP [DI SI HI QI])
+(define_mode_attr  cmp_constraint [(DI "r") (SI "rKs21") (HI "r") (QI "r")])
+(define_mode_attr  cmp_predicate [(DI "register_operand")
+                                  (SI "register_immediate_operand")
+                                  (HI "register_operand")
+                                  (QI "register_operand")])
+
+;; For all conditional insns
+(define_code_macro any_cond [eq ne gt ge lt le gtu geu ltu leu])
+(define_code_attr cond [(eq "eq") (ne "ne") (gt "gt") (ge "ge") (lt "lt") (le "le")
+                        (gtu "hi") (geu "hs") (ltu "lo") (leu "ls")])
+(define_code_attr invcond [(eq "ne") (ne "eq") (gt "le") (ge "lt") (lt "ge") (le "gt")
+                           (gtu "ls") (geu "lo") (ltu "hs") (leu "hi")])
+
+;; For logical operations
+(define_code_macro logical [and ior xor])
+(define_code_attr logical_insn [(and "and") (ior "or") (xor "eor")])
+
+;; Load the predicates
+(include "predicates.md")
+
+
+;;******************************************************************************
+;; Automaton pipeline description for avr32
+;;******************************************************************************
+
+(define_automaton "avr32_ap")
+
+
+(define_cpu_unit "is" "avr32_ap")
+(define_cpu_unit "a1,m1,da" "avr32_ap")
+(define_cpu_unit "a2,m2,d" "avr32_ap")
+
+;;Alu instructions
+(define_insn_reservation "alu_op" 1
+  (and (eq_attr "pipeline" "ap")
+       (eq_attr "type" "alu"))
+  "is,a1,a2")
+
+(define_insn_reservation "alu2_op" 2
+  (and (eq_attr "pipeline" "ap")
+       (eq_attr "type" "alu2"))
+  "is,is+a1,a1+a2,a2")
+
+(define_insn_reservation "alu_sat_op" 2
+  (and (eq_attr "pipeline" "ap")
+       (eq_attr "type" "alu_sat"))
+  "is,a1,a2")
+
+
+;;Mul instructions
+(define_insn_reservation "mulhh_op" 2
+  (and (eq_attr "pipeline" "ap")
+       (eq_attr "type" "mulhh,mulwh"))
+  "is,m1,m2")
+
+(define_insn_reservation "mulww_w_op" 3
+  (and (eq_attr "pipeline" "ap")
+       (eq_attr "type" "mulww_w"))
+  "is,m1,m1+m2,m2")
+
+(define_insn_reservation "mulww_d_op" 5
+  (and (eq_attr "pipeline" "ap")
+       (eq_attr "type" "mulww_d"))
+  "is,m1,m1+m2,m1+m2,m2,m2")
+
+(define_insn_reservation "div_op" 33
+  (and (eq_attr "pipeline" "ap")
+       (eq_attr "type" "div"))
+  "is,m1,m1*31 + m2*31,m2")
+
+(define_insn_reservation "machh_w_op" 3
+  (and (eq_attr "pipeline" "ap")
+       (eq_attr "type" "machh_w"))
+  "is*2,m1,m2")
+
+
+(define_insn_reservation "macww_w_op" 4
+  (and (eq_attr "pipeline" "ap")
+       (eq_attr "type" "macww_w"))
+  "is*2,m1,m1,m2")
+
+
+(define_insn_reservation "macww_d_op" 6
+  (and (eq_attr "pipeline" "ap")
+       (eq_attr "type" "macww_d"))
+  "is*2,m1,m1+m2,m1+m2,m2")
+
+;;Bypasses for Mac instructions, because of accumulator cache.
+;;Set latency as low as possible in order to let the compiler let
+;;mul -> mac and mac -> mac combinations which use the same
+;;accumulator cache be placed close together to avoid any
+;;instructions which can ruin the accumulator cache come inbetween.
+(define_bypass 4 "machh_w_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass")
+(define_bypass 5 "macww_w_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass")
+(define_bypass 7 "macww_d_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass")
+
+(define_bypass 3 "mulhh_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass")
+(define_bypass 4 "mulww_w_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass")
+(define_bypass 6 "mulww_d_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass")
+
+
+;;Bypasses for all mul/mac instructions followed by an instruction
+;;which reads the output AND writes the result to the same register.
+;;This will generate an Write After Write hazard which gives an
+;;extra cycle before the result is ready.
+(define_bypass 0 "machh_w_op" "machh_w_op" "avr32_valid_macmac_bypass")
+(define_bypass 0 "macww_w_op" "macww_w_op" "avr32_valid_macmac_bypass")
+(define_bypass 0 "macww_d_op" "macww_d_op" "avr32_valid_macmac_bypass")
+
+(define_bypass 0 "mulhh_op" "machh_w_op" "avr32_valid_mulmac_bypass")
+(define_bypass 0 "mulww_w_op" "macww_w_op" "avr32_valid_mulmac_bypass")
+(define_bypass 0 "mulww_d_op" "macww_d_op" "avr32_valid_mulmac_bypass")
+
+;;Branch and call instructions
+;;We assume that all branches and rcalls are predicted correctly :-)
+;;while calls use a lot of cycles.
+(define_insn_reservation "branch_op" 0
+  (and (eq_attr "pipeline" "ap")
+       (eq_attr "type" "branch"))
+  "nothing")
+
+(define_insn_reservation "call_op" 10
+  (and (eq_attr "pipeline" "ap")
+       (eq_attr "type" "call"))
+  "nothing")
+
+
+;;Load store instructions
+(define_insn_reservation "load_op" 2
+  (and (eq_attr "pipeline" "ap")
+       (eq_attr "type" "load"))
+  "is,da,d")
+
+(define_insn_reservation "load_rm_op" 3
+  (and (eq_attr "pipeline" "ap")
+       (eq_attr "type" "load_rm"))
+  "is,da,d")
+
+
+(define_insn_reservation "store_op" 0
+  (and (eq_attr "pipeline" "ap")
+       (eq_attr "type" "store"))
+  "is,da,d")
+
+
+(define_insn_reservation "load_double_op" 3
+  (and (eq_attr "pipeline" "ap")
+       (eq_attr "type" "load2"))
+  "is,da,da+d,d")
+
+(define_insn_reservation "load_quad_op" 4
+  (and (eq_attr "pipeline" "ap")
+       (eq_attr "type" "load4"))
+  "is,da,da+d,da+d,d")
+
+(define_insn_reservation "store_double_op" 0
+  (and (eq_attr "pipeline" "ap")
+       (eq_attr "type" "store2"))
+  "is,da,da+d,d")
+
+
+(define_insn_reservation "store_quad_op" 0
+  (and (eq_attr "pipeline" "ap")
+       (eq_attr "type" "store4"))
+  "is,da,da+d,da+d,d")
+
+;;For store the operand to write to memory is read in d and
+;;the real latency between any instruction and a store is therefore
+;;one less than for the instructions which reads the operands in the first
+;;excecution stage
+(define_bypass 2 "load_double_op" "store_double_op" "avr32_store_bypass")
+(define_bypass 3 "load_quad_op" "store_quad_op" "avr32_store_bypass")
+(define_bypass 1 "load_op" "store_op" "avr32_store_bypass")
+(define_bypass 2 "load_rm_op" "store_op" "avr32_store_bypass")
+(define_bypass 1 "alu_sat_op" "store_op" "avr32_store_bypass")
+(define_bypass 1 "alu2_op" "store_op" "avr32_store_bypass")
+(define_bypass 1 "mulhh_op" "store_op" "avr32_store_bypass")
+(define_bypass 2 "mulww_w_op" "store_op" "avr32_store_bypass")
+(define_bypass 4 "mulww_d_op" "store_op" "avr32_store_bypass" )
+(define_bypass 2 "machh_w_op" "store_op" "avr32_store_bypass")
+(define_bypass 3 "macww_w_op" "store_op" "avr32_store_bypass")
+(define_bypass 5 "macww_d_op" "store_op" "avr32_store_bypass")
+
+
+; Bypass for load double operation. If only the first loaded word is needed
+; then the latency is 2
+(define_bypass 2 "load_double_op"
+                 "load_op,load_rm_op,alu_sat_op, alu2_op, alu_op, mulhh_op, mulww_w_op,
+                  mulww_d_op, machh_w_op, macww_w_op, macww_d_op"
+                 "avr32_valid_load_double_bypass")
+
+; Bypass for load quad operation. If only the first or second loaded word is needed
+; we set the latency to 2
+(define_bypass 2 "load_quad_op"
+                 "load_op,load_rm_op,alu_sat_op, alu2_op, alu_op, mulhh_op, mulww_w_op,
+                  mulww_d_op, machh_w_op, macww_w_op, macww_d_op"
+                 "avr32_valid_load_quad_bypass")
+
+
+;;******************************************************************************
+;; End of Automaton pipeline description for avr32
+;;******************************************************************************
+
+
+
+;;=============================================================================
+;; move
+;;-----------------------------------------------------------------------------
+
+;;== char - 8 bits ============================================================
+(define_expand "movqi"
+  [(set (match_operand:QI 0 "nonimmediate_operand" "")
+	(match_operand:QI 1 "general_operand" ""))]
+  ""
+  {
+   if ( !no_new_pseudos ){
+     if (GET_CODE (operands[1]) == MEM && optimize){
+         rtx reg = gen_reg_rtx (SImode);
+
+         emit_insn (gen_zero_extendqisi2 (reg, operands[1]));
+         operands[1] = gen_lowpart (QImode, reg);
+     }
+
+     /* One of the ops has to be in a register.  */
+     if (GET_CODE (operands[0]) == MEM)
+       operands[1] = force_reg (QImode, operands[1]);
+   }
+
+  })
+
+(define_insn "*movqi_internal"
+  [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,m,r")
+	(match_operand:QI 1 "general_operand"  "rKs08,m,r,i"))]
+  ""
+  "@
+   mov\t%0, %1
+   ld.ub\t%0, %1
+   st.b\t%0, %1
+   mov\t%0, %1"
+  [(set_attr "length" "2,4,4,4")
+   (set_attr "type" "alu,load_rm,store,alu")])
+
+
+
+;;== short - 16 bits ==========================================================
+(define_expand "movhi"
+  [(set (match_operand:HI 0 "nonimmediate_operand" "")
+	(match_operand:HI 1 "general_operand" ""))]
+  ""
+  {
+   if ( !no_new_pseudos ){
+     if (GET_CODE (operands[1]) == MEM && optimize){
+         rtx reg = gen_reg_rtx (SImode);
+
+         emit_insn (gen_extendhisi2 (reg, operands[1]));
+         operands[1] = gen_lowpart (HImode, reg);
+     }
+
+     /* One of the ops has to be in a register.  */
+     if (GET_CODE (operands[0]) == MEM)
+       operands[1] = force_reg (HImode, operands[1]);
+   }
+
+  })
+
+(define_insn "*movhi_internal"
+  [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,m,r")
+	(match_operand:HI 1 "general_operand"  "rKs08,m,r,i"))]
+  ""
+  "@
+   mov\t%0, %1
+   ld.sh\t%0, %1
+   st.h\t%0, %1
+   mov\t%0, %1"
+  [(set_attr "length" "2,4,4,4")
+   (set_attr "type" "alu,load_rm,store,alu")])
+
+
+;;== int - 32 bits ============================================================
+
+(define_expand "movmisalignsi"
+  [(set (match_operand:SI 0 "nonimmediate_operand" "")
+	(match_operand:SI 1 "nonimmediate_operand" ""))]
+  "TARGET_UNALIGNED_WORD"
+  {
+  }
+)
+
+(define_expand "mov<mode>"
+  [(set (match_operand:MOVM 0 "nonimmediate_operand" "")
+	(match_operand:MOVM 1 "general_operand" ""))]
+  ""
+  {
+
+    /* One of the ops has to be in a register.  */
+    if (GET_CODE (operands[0]) == MEM)
+      operands[1] = force_reg (<MODE>mode, operands[1]);
+
+
+    /* Check for out of range immediate constants as these may
+       occur during reloading, since it seems like reload does
+       not check if the immediate is legitimate. Don't know if
+       this is a bug? */
+    if ( reload_in_progress
+         && GET_CODE(operands[1]) == CONST_INT
+         && !avr32_const_ok_for_constraint_p(INTVAL(operands[1]), 'K', "Ks21") ){
+        operands[1] = force_const_mem(SImode, operands[1]);
+    }
+
+    if ( (flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS)
+         && !avr32_legitimate_pic_operand_p(operands[1]) )
+      operands[1] = legitimize_pic_address (operands[1], <MODE>mode,
+                                            (no_new_pseudos ? operands[0] : 0));
+    else if ( flag_pic && avr32_address_operand(operands[1], GET_MODE(operands[1])) )
+      /* If we have an address operand then this function uses the pic register. */
+      current_function_uses_pic_offset_table = 1;
+  })
+
+
+(define_insn "mov<mode>_internal"
+  [(set (match_operand:MOVM 0 "nonimmediate_operand"     "=r,r,r,m,r")
+	(match_operand:MOVM 1 "general_operand"  "rKs08,Ks21,m,r,W"))]
+  ""
+  {
+    switch (which_alternative) {
+      case 0:
+      case 1: return "mov\t%0, %1";
+      case 2:
+        if ( (REG_P(XEXP(operands[1], 0))
+              && REGNO(XEXP(operands[1], 0)) == SP_REGNUM)
+             || (GET_CODE(XEXP(operands[1], 0)) == PLUS
+                 && REGNO(XEXP(XEXP(operands[1], 0), 0)) == SP_REGNUM
+	         && GET_CODE(XEXP(XEXP(operands[1], 0), 1)) == CONST_INT
+	         && INTVAL(XEXP(XEXP(operands[1], 0), 1)) % 4 == 0
+	         && INTVAL(XEXP(XEXP(operands[1], 0), 1)) <= 0x1FC) )
+          return "lddsp\t%0, %1";
+	else if ( avr32_const_pool_ref_operand(operands[1], GET_MODE(operands[1])) )
+          return "lddpc\t%0, %1";
+        else
+          return "ld.w\t%0, %1";
+      case 3:
+        if ( (REG_P(XEXP(operands[0], 0))
+              && REGNO(XEXP(operands[0], 0)) == SP_REGNUM)
+             || (GET_CODE(XEXP(operands[0], 0)) == PLUS
+                 && REGNO(XEXP(XEXP(operands[0], 0), 0)) == SP_REGNUM
+	         && GET_CODE(XEXP(XEXP(operands[0], 0), 1)) == CONST_INT
+	         && INTVAL(XEXP(XEXP(operands[0], 0), 1)) % 4 == 0
+	         && INTVAL(XEXP(XEXP(operands[0], 0), 1)) <= 0x1FC) )
+          return "stdsp\t%0, %1";
+	else
+          return "st.w\t%0, %1";
+      case 4:
+        if ( TARGET_HAS_ASM_ADDR_PSEUDOS )
+          return "lda.w\t%0, %1";
+        else
+          return "ld.w\t%0, r6[%1@got]";
+      default:
+	abort();
+    }
+  }
+
+  [(set_attr "length" "2,4,4,4,8")
+   (set_attr "type" "alu,alu,load,store,load")
+   (set_attr "cc" "none,none,none,none,clobber")])
+
+
+;; These instructions are for loading constants which cannot be loaded
+;; directly from the constant pool because the offset is too large
+;; high and lo_sum are used even tough for our case it should be
+;; low and high sum :-)
+(define_insn "mov_symbol_lo"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+	(high:SI (match_operand:SI 1 "immediate_operand" "i" )))]
+  ""
+  "mov\t%0, lo(%1)"
+  [(set_attr "type" "alu")
+   (set_attr "length" "4")]
+)
+
+(define_insn "add_symbol_hi"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+	(lo_sum:SI (match_dup 0)
+                   (match_operand:SI 1 "immediate_operand" "i" )))]
+  ""
+  "orh\t%0, hi(%1)"
+  [(set_attr "type" "alu")
+   (set_attr "length" "4")]
+)
+
+
+
+;; When generating pic, we need to load the symbol offset into a register.
+;; So that the optimizer does not confuse this with a normal symbol load
+;; we use an unspec.  The offset will be loaded from a constant pool entry,
+;; since that is the only type of relocation we can use.
+(define_insn "pic_load_addr"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+	(unspec:SI [(match_operand:SI 1 "" "")] UNSPEC_PIC_SYM))]
+  "flag_pic && CONSTANT_POOL_ADDRESS_P(XEXP(operands[1], 0))"
+  "lddpc\t%0, %1"
+  [(set_attr "type" "load")
+   (set_attr "length" "4")]
+)
+
+(define_insn "pic_compute_got_from_pc"
+  [(set (match_operand:SI 0 "register_operand" "+r")
+	(unspec:SI [(minus:SI (pc)
+                              (match_dup 0))] UNSPEC_PIC_BASE))
+   (use (label_ref (match_operand 1 "" "")))]
+  "flag_pic"
+  {
+   (*targetm.asm_out.internal_label) (asm_out_file, "L",
+	 		     CODE_LABEL_NUMBER (operands[1]));
+   return \"rsub\t%0, pc\";
+  }
+  [(set_attr "cc" "clobber")
+   (set_attr "length" "2")]
+)
+
+;;== long long int - 64 bits ==================================================
+(define_expand "movdi"
+  [(set (match_operand:DI 0 "nonimmediate_operand" "")
+	(match_operand:DI 1 "general_operand" ""))]
+  ""
+  {
+
+    /* One of the ops has to be in a register.  */
+    if (GET_CODE (operands[0]) != REG)
+      operands[1] = force_reg (DImode, operands[1]);
+
+  })
+
+
+(define_insn_and_split "*movdi_internal"
+  [(set (match_operand:DI 0 "nonimmediate_operand"     "=r,r,r,r,r,m")
+	(match_operand:DI 1 "general_operand"  "r,Ks08,Ks21,G,m,r"))]
+  ""
+  {
+    switch (which_alternative ){
+    case 1:
+    case 2:
+      if ( INTVAL(operands[1]) < 0 )
+        return "mov\t%0, %1\;mov\t%m0, -1";
+      else
+        return "mov\t%0, %1\;mov\t%m0, 0";
+    case 0:
+    case 3:
+      return "mov\t%0, %1\;mov\t%m0, %m1";
+    case 4:
+      if ( avr32_const_pool_ref_operand(operands[1], GET_MODE(operands[1])))
+        return "ld.d\t%0, pc[%1 - .]";
+      else
+        return "ld.d\t%0, %1";
+    case 5:
+      return "st.d\t%0, %1";
+    default:
+      abort();
+    }
+  }
+  "reload_completed &&
+   (REG_P(operands[0]) &&
+   (REG_P(operands[1]) || avr32_const_double_immediate(operands[1]) ||
+    ((GET_CODE(operands[1]) == CONST_INT) && avr32_const_ok_for_constraint_p(INTVAL(operands[1]), 'K', \"Ks21\")) ))"
+  [(set (match_dup 0) (match_dup 1))
+   (set (match_dup 2) (match_dup 3))]
+  {
+    operands[2] = gen_highpart (SImode, operands[0]);
+    operands[0] = gen_lowpart (SImode, operands[0]);
+    if ( REG_P(operands[1]) ){
+      operands[3] = gen_highpart(SImode, operands[1]);
+      operands[1] = gen_lowpart(SImode, operands[1]);
+    } else if ( GET_CODE(operands[1]) == CONST_DOUBLE ){
+      operands[3] = GEN_INT(CONST_DOUBLE_LOW(operands[1]));
+      operands[1] = GEN_INT(CONST_DOUBLE_HIGH(operands[1]));
+    } else if ( GET_CODE(operands[1]) == CONST_INT ){
+      operands[3] = GEN_INT((INTVAL(operands[1]) < 0) ? -1 : 0);
+      operands[1] = operands[1];
+    } else {
+      internal_error("Illegal operand[1] for movdi split!");
+    }
+  }
+
+  [(set_attr "length" "4,6,8,8,4,4")
+   (set_attr "type" "alu2,alu2,alu2,alu2,load2,store2")])
+
+
+;;== 128 bits ==================================================
+(define_expand "movti"
+  [(set (match_operand:TI 0 "nonimmediate_operand" "")
+	(match_operand:TI 1 "general_operand" ""))]
+  ""
+  {
+
+    /* One of the ops has to be in a register.  */
+    if (GET_CODE (operands[0]) != REG)
+      operands[1] = force_reg (TImode, operands[1]);
+
+    /* We must fix any pre_dec for loads and post_inc stores */
+    if ( GET_CODE (operands[0]) == MEM
+         && GET_CODE (XEXP(operands[0],0)) == POST_INC ){
+       emit_move_insn(gen_rtx_MEM(TImode, XEXP(XEXP(operands[0],0),0)), operands[1]);
+       emit_insn(gen_addsi3(XEXP(XEXP(operands[0],0),0), XEXP(XEXP(operands[0],0),0), GEN_INT(GET_MODE_SIZE(TImode))));
+       DONE;
+    }
+
+    if ( GET_CODE (operands[1]) == MEM
+         && GET_CODE (XEXP(operands[1],0)) == PRE_DEC ){
+       emit_insn(gen_addsi3(XEXP(XEXP(operands[1],0),0), XEXP(XEXP(operands[1],0),0), GEN_INT(-GET_MODE_SIZE(TImode))));
+       emit_move_insn(operands[0], gen_rtx_MEM(TImode, XEXP(XEXP(operands[1],0),0)));
+       DONE;
+    }
+
+    if (GET_CODE (operands[1]) == CONST_INT){
+      unsigned int sign_extend = (INTVAL(operands[1]) < 0) ? 0xFFFFFFFF : 0;
+      emit_move_insn(gen_rtx_SUBREG(SImode, operands[0], 12), operands[1]);
+      emit_move_insn(gen_rtx_SUBREG(SImode, operands[0], 8), GEN_INT(sign_extend));
+      emit_move_insn(gen_rtx_SUBREG(SImode, operands[0], 4), GEN_INT(sign_extend));
+      emit_move_insn(gen_rtx_SUBREG(SImode, operands[0], 0), GEN_INT(sign_extend));
+      DONE;
+    }
+
+    if (GET_CODE (operands[0]) == REG
+        && GET_CODE (operands[1]) == REG){
+      emit_move_insn(gen_rtx_SUBREG(SImode, operands[0], 12), gen_rtx_SUBREG(SImode, operands[1], 12));
+      emit_move_insn(gen_rtx_SUBREG(SImode, operands[0], 8), gen_rtx_SUBREG(SImode, operands[1], 8));
+      emit_move_insn(gen_rtx_SUBREG(SImode, operands[0], 4), gen_rtx_SUBREG(SImode, operands[1], 4));
+      emit_move_insn(gen_rtx_SUBREG(SImode, operands[0], 0), gen_rtx_SUBREG(SImode, operands[1], 0));
+      DONE;
+    }
+  })
+
+
+(define_insn "*movti_internal"
+  [(set (match_operand:TI 0 "nonimmediate_operand"  "=r,r, <RKu00,r")
+	(match_operand:TI 1 "loadti_operand"   " r,RKu00>,r,m"))]
+  ""
+  "@
+   mov\t%T0, %T1\;mov\t%U0, %U1\;mov\t%L0, %L1\;mov\t%B0, %B1
+   ldm\t%p1, %0
+   stm\t%p0, %1
+   ldm\t%p1, %0"
+  [(set_attr "length" "8,4,4,4")
+   (set_attr "type" "alu,load4,store4,load4")])
+
+
+;;== float - 32 bits ==========================================================
+(define_expand "movsf"
+  [(set (match_operand:SF 0 "nonimmediate_operand" "")
+	(match_operand:SF 1 "general_operand" ""))]
+  ""
+  {
+
+
+    /* One of the ops has to be in a register.  */
+    if (GET_CODE (operands[0]) != REG)
+      operands[1] = force_reg (SFmode, operands[1]);
+
+  })
+
+(define_insn "*movsf_internal"
+  [(set (match_operand:SF 0 "nonimmediate_operand"     "=r,r,r,m")
+	(match_operand:SF 1 "general_operand"  "r,G,m,r"))]
+  "TARGET_SOFT_FLOAT"
+  {
+    switch (which_alternative) {
+      case 0:
+      case 1: return "mov\t%0, %1";
+      case 2:
+        if ( (REG_P(XEXP(operands[1], 0))
+              && REGNO(XEXP(operands[1], 0)) == SP_REGNUM)
+             || (GET_CODE(XEXP(operands[1], 0)) == PLUS
+                 && REGNO(XEXP(XEXP(operands[1], 0), 0)) == SP_REGNUM
+	         && GET_CODE(XEXP(XEXP(operands[1], 0), 1)) == CONST_INT
+	         && INTVAL(XEXP(XEXP(operands[1], 0), 1)) % 4 == 0
+	         && INTVAL(XEXP(XEXP(operands[1], 0), 1)) <= 0x1FC) )
+          return "lddsp\t%0, %1";
+          else if ( avr32_const_pool_ref_operand(operands[1], GET_MODE(operands[1])) )
+          return "lddpc\t%0, %1";
+        else
+          return "ld.w\t%0, %1";
+      case 3:
+        if ( (REG_P(XEXP(operands[0], 0))
+              && REGNO(XEXP(operands[0], 0)) == SP_REGNUM)
+             || (GET_CODE(XEXP(operands[0], 0)) == PLUS
+                 && REGNO(XEXP(XEXP(operands[0], 0), 0)) == SP_REGNUM
+	         && GET_CODE(XEXP(XEXP(operands[0], 0), 1)) == CONST_INT
+	         && INTVAL(XEXP(XEXP(operands[0], 0), 1)) % 4 == 0
+	         && INTVAL(XEXP(XEXP(operands[0], 0), 1)) <= 0x1FC) )
+          return "stdsp\t%0, %1";
+	else
+          return "st.w\t%0, %1";
+      default:
+	abort();
+    }
+  }
+
+  [(set_attr "length" "2,4,4,4")
+   (set_attr "type" "alu,alu,load,store")])
+
+
+
+;;== double - 64 bits =========================================================
+(define_expand "movdf"
+  [(set (match_operand:DF 0 "nonimmediate_operand" "")
+	(match_operand:DF 1 "general_operand" ""))]
+  ""
+  {
+    /* One of the ops has to be in a register.  */
+    if (GET_CODE (operands[0]) != REG){
+      operands[1] = force_reg (DFmode, operands[1]);
+    }
+  })
+
+
+(define_insn_and_split "*movdf_internal"
+  [(set (match_operand:DF 0 "nonimmediate_operand"     "=r,r,r,m")
+	(match_operand:DF 1 "general_operand"  "r,G,m,r"))]
+  "TARGET_SOFT_FLOAT"
+  {
+    switch (which_alternative ){
+    case 0:
+    case 1:
+      return "mov\t%0, %1\;mov\t%m0, %m1";
+    case 2:
+      if ( avr32_const_pool_ref_operand(operands[1], GET_MODE(operands[1])))
+        return "ld.d\t%0, pc[%1 - .]";
+      else
+        return "ld.d\t%0, %1";
+    case 3:
+      return "st.d\t%0, %1";
+    default:
+      abort();
+    }
+  }
+  "TARGET_SOFT_FLOAT
+   && reload_completed
+   && (REG_P(operands[0]) && REG_P(operands[1]))"
+  [(set (match_dup 0) (match_dup 1))
+   (set (match_dup 2) (match_dup 3))]
+  "
+   {
+    operands[2] = gen_highpart (SImode, operands[0]);
+    operands[0] = gen_lowpart (SImode, operands[0]);
+    operands[3] = gen_highpart(SImode, operands[1]);
+    operands[1] = gen_lowpart(SImode, operands[1]);
+   }
+  "
+
+  [(set_attr "length" "4,8,4,4")
+   (set_attr "type" "alu2,alu2,load2,store2")])
+
+
+
+
+;;=============================================================================
+;; Move chunks of memory
+;;=============================================================================
+
+(define_expand "movmemsi"
+  [(match_operand:BLK 0 "general_operand" "")
+   (match_operand:BLK 1 "general_operand" "")
+   (match_operand:SI 2 "const_int_operand" "")
+   (match_operand:SI 3 "const_int_operand" "")]
+  ""
+  "
+   if (avr32_gen_movmemsi (operands))
+     DONE;
+   FAIL;
+  "
+  )
+
+
+
+
+;;=============================================================================
+;; Bit field instructions
+;;-----------------------------------------------------------------------------
+;; Instructions to insert or extract bit-fields
+;;=============================================================================
+
+(define_insn "insv"
+  [ (set (zero_extract:SI (match_operand:SI 0 "register_operand" "+r")
+                          (match_operand:SI 1 "immediate_operand" "Ku05")
+                          (match_operand:SI 2 "immediate_operand" "Ku05"))
+         (match_operand 3 "register_operand" "r"))]
+  ""
+  "bfins\t%0, %3, %2, %1"
+  [(set_attr "type" "alu")
+   (set_attr "length" "4")
+   (set_attr "cc" "set_ncz")])
+
+
+
+
+(define_insn "extv"
+  [ (set (match_operand:SI 0 "register_operand" "=r")
+         (sign_extract:SI (match_operand:SI 1 "register_operand" "r")
+                          (match_operand:SI 2 "immediate_operand" "Ku05")
+                          (match_operand:SI 3 "immediate_operand" "Ku05")))]
+  ""
+  "bfexts\t%0, %1, %3, %2"
+  [(set_attr "type" "alu")
+   (set_attr "length" "4")
+   (set_attr "cc" "set_ncz")])
+
+
+(define_insn "extzv"
+  [ (set (match_operand:SI 0 "register_operand" "=r")
+         (zero_extract:SI (match_operand:SI 1 "register_operand" "r")
+                          (match_operand:SI 2 "immediate_operand" "Ku05")
+                          (match_operand:SI 3 "immediate_operand" "Ku05")))]
+  ""
+  "bfextu\t%0, %1, %3, %2"
+  [(set_attr "type" "alu")
+   (set_attr "length" "4")
+   (set_attr "cc" "set_ncz")])
+
+
+
+;;=============================================================================
+;; Some peepholes for avoiding unnecessary cast instructions
+;; followed by bfins.
+;;-----------------------------------------------------------------------------
+
+(define_peephole2
+  [(set (match_operand:SI 0 "register_operand" "")
+        (zero_extend:SI (match_operand:QI 1 "register_operand" "")))
+   (set (zero_extract:SI (match_operand 2 "register_operand" "")
+                         (match_operand:SI 3 "immediate_operand" "")
+                         (match_operand:SI 4 "immediate_operand" ""))
+        (match_dup 0))]
+  "((peep2_reg_dead_p(2, operands[0]) &&
+    (INTVAL(operands[3]) <= 8)))"
+  [(set (zero_extract:SI (match_dup 2)
+                         (match_dup 3)
+                         (match_dup 4))
+        (match_dup 1))]
+  )
+
+(define_peephole2
+  [(set (match_operand:SI 0 "register_operand" "")
+        (zero_extend:SI (match_operand:HI 1 "register_operand" "")))
+   (set (zero_extract:SI (match_operand 2 "register_operand" "")
+                         (match_operand:SI 3 "immediate_operand" "")
+                         (match_operand:SI 4 "immediate_operand" ""))
+        (match_dup 0))]
+  "((peep2_reg_dead_p(2, operands[0]) &&
+    (INTVAL(operands[3]) <= 16)))"
+  [(set (zero_extract:SI (match_dup 2)
+                         (match_dup 3)
+                         (match_dup 4))
+        (match_dup 1))]
+  )
+
+;;=============================================================================
+;; push bytes
+;;-----------------------------------------------------------------------------
+;; Implements the push instruction
+;;=============================================================================
+(define_insn "pushm"
+  [(set (mem:BLK (pre_dec:BLK (reg:SI SP_REGNUM)))
+        (unspec:BLK [(match_operand 0 "const_int_operand" "")]
+                    UNSPEC_PUSHM))]
+  ""
+  {
+    if (INTVAL(operands[0])) {
+      return "pushm\t%r0";
+    } else {
+      return "";
+    }
+  }
+  [(set_attr "type" "store")
+   (set_attr "length" "2")
+   (set_attr "cc" "none")])
+
+(define_insn "stm"
+  [(unspec [(match_operand 0 "register_operand" "r")
+            (match_operand 1 "const_int_operand" "")
+            (match_operand 2 "const_int_operand" "")]
+	   UNSPEC_STM)]
+  ""
+  {
+    if (INTVAL(operands[1])) {
+      if (INTVAL(operands[2]) != 0)
+         return "stm\t--%0, %s1";
+      else
+         return "stm\t%0, %s1";
+    } else {
+      return "";
+    }
+  }
+  [(set_attr "type" "store")
+   (set_attr "length" "4")
+   (set_attr "cc" "none")])
+
+
+
+(define_insn "popm"
+  [(unspec [(match_operand 0 "const_int_operand" "")]
+	   UNSPEC_POPM)]
+  ""
+  {
+    if (INTVAL(operands[0])) {
+      return "popm   %r0";
+    } else {
+      return "";
+    }
+  }
+  [(set_attr "type" "load")
+   (set_attr "length" "2")])
+
+
+
+;;=============================================================================
+;; add
+;;-----------------------------------------------------------------------------
+;; Adds reg1 with reg2 and puts the result in reg0.
+;;=============================================================================
+(define_insn "add<mode>3"
+  [(set (match_operand:INTM 0 "register_operand" "=r,r,r,r,r")
+	(plus:INTM (match_operand:INTM 1 "register_operand" "%0,r,0,r,0")
+                   (match_operand:INTM 2 "avr32_add_operand" "r,r,Is08,Is16,Is21")))]
+  ""
+  "@
+   add     %0, %2
+   add     %0, %1, %2
+   sub     %0, %n2
+   sub     %0, %1, %n2
+   sub     %0, %n2"
+
+  [(set_attr "length" "2,4,2,4,4")
+   (set_attr "cc" "<INTM:alu_cc_attr>")])
+
+(define_insn "*addsi3_lsl"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+	(plus:SI (ashift:SI (match_operand:SI 1 "register_operand" "r")
+                            (match_operand:SI 3 "avr32_add_shift_immediate_operand" "Ku02"))
+                 (match_operand:SI 2 "register_operand" "r")))]
+  ""
+  "add     %0, %2, %1 << %3"
+  [(set_attr "length" "4")
+   (set_attr "cc" "set_vncz")])
+
+
+(define_insn "*addsi3_mul"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+	(plus:SI (mult:SI (match_operand:SI 1 "register_operand" "r")
+                          (match_operand:SI 3 "immediate_operand" "Ku04" ))
+                 (match_operand:SI 2 "register_operand" "r")))]
+  "(INTVAL(operands[3]) == 0) || (INTVAL(operands[3]) == 2) ||
+   (INTVAL(operands[3]) == 4) || (INTVAL(operands[3]) == 8)"
+  "add     %0, %2, %1 << %p3"
+  [(set_attr "length" "4")
+   (set_attr "cc" "set_vncz")])
+
+
+(define_peephole2
+  [(set (match_operand:SI 0 "register_operand" "")
+        (ashift:SI (match_operand:SI 1 "register_operand" "")
+                   (match_operand:SI 2 "immediate_operand" "")))
+   (set (match_operand:SI 3 "register_operand" "")
+	(plus:SI (match_dup 0)
+                 (match_operand:SI 4 "register_operand" "")))]
+  "(peep2_reg_dead_p(2, operands[0]) &&
+    (INTVAL(operands[2]) < 4 && INTVAL(operands[2]) > 0))"
+  [(set (match_dup 3)
+	(plus:SI (ashift:SI (match_dup 1)
+                            (match_dup 2))
+                 (match_dup 4)))]
+  )
+
+(define_peephole2
+  [(set (match_operand:SI 0 "register_operand" "")
+        (ashift:SI (match_operand:SI 1 "register_operand" "")
+                   (match_operand:SI 2 "immediate_operand" "")))
+   (set (match_operand:SI 3 "register_operand" "")
+	(plus:SI (match_operand:SI 4 "register_operand" "")
+                 (match_dup 0)))]
+  "(peep2_reg_dead_p(2, operands[0]) &&
+    (INTVAL(operands[2]) < 4 && INTVAL(operands[2]) > 0))"
+  [(set (match_dup 3)
+	(plus:SI (ashift:SI (match_dup 1)
+                            (match_dup 2))
+                 (match_dup 4)))]
+  )
+
+(define_insn "adddi3"
+  [(set (match_operand:DI 0 "register_operand" "=r,r")
+	(plus:DI (match_operand:DI 1 "register_operand" "%r,0")
+		 (match_operand:DI 2 "register_operand" "r,r")))]
+  ""
+  "@
+   add     %0, %1, %2\;adc    %m0, %m1, %m2
+   add     %0, %2\;adc    %m0, %m0, %m2"
+  [(set_attr "length" "8,6")
+   (set_attr "type" "alu2")
+   (set_attr "cc" "set_vncz")])
+
+
+
+;;=============================================================================
+;; subtract
+;;-----------------------------------------------------------------------------
+;; Subtract reg2 or immediate value from reg0 and puts the result in reg0.
+;;=============================================================================
+
+(define_peephole2
+  [(set (match_operand:QI 0 "register_operand" "")
+	(minus:QI (match_operand:QI 1 "general_operand" "")
+		  (match_operand:QI 2 "general_operand" "")))
+   (set (match_operand:QI 3 "register_operand" "")
+        (match_dup 0))]
+  "peep2_reg_dead_p(2, operands[0])"
+  [(set (match_dup 3)
+        (minus:QI (match_dup 1) (match_dup 2)))]
+  )
+
+(define_peephole
+  [(set (match_operand:QI 0 "register_operand" "")
+	(minus:QI (match_operand:QI 1 "immediate_operand" "Ks08")
+		  (match_operand:QI 2 "register_operand" "r")))
+   (set (match_operand:QI 3 "register_operand" "r")
+        (match_dup 0))]
+  "dead_or_set_p(insn, operands[0])"
+  "rsub    %3, %2, %1"
+  [(set_attr "length" "4")
+   (set_attr "cc" "clobber")]
+  )
+
+
+
+(define_insn "sub<mode>3"
+  [(set (match_operand:INTM 0 "general_operand" "=r,r,r,r,r,r,r")
+	(minus:INTM (match_operand:INTM 1 "nonmemory_operand" "0,r,0,r,0,r,Ks08")
+		  (match_operand:INTM 2 "nonmemory_operand" "r,r,Ks08,Ks16,Ks21,0,r")))]
+  ""
+  "@
+   sub     %0, %2
+   sub     %0, %1, %2
+   sub     %0, %2
+   sub     %0, %1, %2
+   sub     %0, %2
+   rsub    %0, %1
+   rsub    %0, %2, %1"
+  [(set_attr "length" "2,4,2,4,4,2,4")
+   (set_attr "cc" "<INTM:alu_cc_attr>")])
+
+(define_insn "*sub<mode>3_mul"
+  [(set (match_operand:INTM 0 "register_operand" "=r,r,r")
+	(minus:INTM (match_operand:INTM 1 "register_operand" "r,0,r")
+                    (mult:INTM (match_operand:INTM 2 "register_operand" "r,r,0")
+                               (match_operand:SI 3 "immediate_operand" "Ku04,Ku04,Ku04" ))))]
+  "(INTVAL(operands[3]) == 0) || (INTVAL(operands[3]) == 2) ||
+   (INTVAL(operands[3]) == 4) || (INTVAL(operands[3]) == 8)"
+  "@
+   sub     %0, %1, %2 << %p3
+   sub     %0, %0, %2 << %p3
+   sub     %0, %1, %0 << %p3"
+  [(set_attr "length" "4,4,4")
+   (set_attr "cc" "<INTM:alu_cc_attr>")])
+
+(define_insn "*sub<mode>3_lsl"
+  [(set (match_operand:INTM 0 "register_operand" "=r")
+	(minus:INTM (ashift:INTM (match_operand:INTM 1 "register_operand" "r")
+                                 (match_operand:SI 3 "avr32_add_shift_immediate_operand" "Ku02"))
+                    (match_operand:INTM 2 "register_operand" "r")))]
+  ""
+  "sub     %0, %2, %1 << %3"
+  [(set_attr "length" "4")
+   (set_attr "cc" "<INTM:alu_cc_attr>")])
+
+
+(define_insn "subdi3"
+  [(set (match_operand:DI 0 "register_operand" "=r,r")
+	(minus:DI (match_operand:DI 1 "register_operand" "%r,0")
+		 (match_operand:DI 2 "register_operand" "r,r")))]
+  ""
+  "@
+   sub     %0, %1, %2\;sbc    %m0, %m1, %m2
+   sub     %0, %2\;sbc    %m0, %m0, %m2"
+  [(set_attr "length" "8,6")
+   (set_attr "type" "alu2")
+   (set_attr "cc" "set_vncz")])
+
+
+
+;;=============================================================================
+;; multiply
+;;-----------------------------------------------------------------------------
+;; Multiply op1 and op2 and put the value in op0.
+;;=============================================================================
+
+
+(define_insn "mulqi3"
+  [(set (match_operand:QI 0 "register_operand"         "=r,r,r")
+	(mult:QI (match_operand:QI 1 "register_operand" "%0,r,r")
+		 (match_operand:QI 2 "avr32_mul_operand" "r,r,Ks08")))]
+  ""
+  {
+   switch (which_alternative){
+    case 0:
+      return "mul     %0, %2";
+    case 1:
+      return "mul     %0, %1, %2";
+    case 2:
+      return "mul     %0, %1, %2";
+    default:
+      abort();
+   }
+  }
+  [(set_attr "type" "mulww_w,mulww_w,mulwh")
+   (set_attr "length" "2,4,4")
+   (set_attr "cc" "none")])
+
+(define_insn "mulsi3"
+  [(set (match_operand:SI 0 "register_operand"         "=r,r,r")
+	(mult:SI (match_operand:SI 1 "register_operand" "%0,r,r")
+		 (match_operand:SI 2 "avr32_mul_operand" "r,r,Ks08")))]
+  ""
+  {
+   switch (which_alternative){
+    case 0:
+      return "mul     %0, %2";
+    case 1:
+      return "mul     %0, %1, %2";
+    case 2:
+      return "mul     %0, %1, %2";
+    default:
+      abort();
+   }
+  }
+  [(set_attr "type" "mulww_w,mulww_w,mulwh")
+   (set_attr "length" "2,4,4")
+   (set_attr "cc" "none")])
+
+
+(define_insn "mulhisi3"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+	(mult:SI
+	 (sign_extend:SI (match_operand:HI 1 "register_operand" "%r"))
+	 (sign_extend:SI (match_operand:HI 2 "register_operand" "r"))))]
+  "TARGET_DSP"
+  "mulhh.w %0, %1:b, %2:b"
+  [(set_attr "type" "mulhh")
+   (set_attr "length" "4")
+   (set_attr "cc" "none")])
+
+(define_peephole2
+  [(match_scratch:DI 6 "r")
+   (set (match_operand:SI 0 "register_operand" "")
+	(mult:SI
+	 (sign_extend:SI (match_operand:HI 1 "register_operand" ""))
+         (sign_extend:SI (match_operand:HI 2 "register_operand" ""))))
+   (set (match_operand:SI 3 "register_operand" "")
+        (ashiftrt:SI (match_dup 0)
+                     (const_int 16)))]
+  "TARGET_DSP
+   && (peep2_reg_dead_p(1, operands[0]) || (REGNO(operands[0]) == REGNO(operands[3])))"
+  [(set (match_dup 4) (sign_extend:SI (match_dup 1)))
+   (set (match_dup 6)
+        (ashift:DI (mult:DI (sign_extend:DI (match_dup 4))
+                            (sign_extend:DI (match_dup 2)))
+                   (const_int 16)))
+   (set (match_dup 3) (match_dup 5))]
+
+  "{
+     operands[4] = gen_rtx_REG(SImode, REGNO(operands[1]));
+     operands[5] = gen_highpart (SImode, operands[4]);
+   }"
+  )
+
+(define_insn "mulnhisi3"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+        (mult:SI
+         (sign_extend:SI (neg:HI (match_operand:HI 1 "register_operand" "r")))
+         (sign_extend:SI (match_operand:HI 2 "register_operand" "r"))))]
+  "TARGET_DSP"
+  "mulnhh.w %0, %1:b, %2:b"
+  [(set_attr "type" "mulhh")
+   (set_attr "length" "4")
+   (set_attr "cc" "none")])
+
+(define_insn "machisi3"
+  [(set (match_operand:SI 0 "register_operand" "+r")
+	(plus:SI (mult:SI
+                  (sign_extend:SI (match_operand:HI 1 "register_operand" "%r"))
+                  (sign_extend:SI (match_operand:HI 2 "register_operand" "r")))
+                 (match_dup 0)))]
+  "TARGET_DSP"
+  "machh.w %0, %1:b, %2:b"
+  [(set_attr "type" "machh_w")
+   (set_attr "length" "4")
+   (set_attr "cc" "none")])
+
+
+
+(define_insn "mulsidi3"
+  [(set (match_operand:DI 0 "register_operand" "=r")
+	(mult:DI
+	 (sign_extend:DI (match_operand:SI 1 "register_operand" "%r"))
+	 (sign_extend:DI (match_operand:SI 2 "register_operand" "r"))))]
+  ""
+  "muls.d  %0, %1, %2"
+  [(set_attr "type" "mulww_d")
+   (set_attr "length" "4")
+   (set_attr "cc" "none")])
+
+(define_insn "umulsidi3"
+  [(set (match_operand:DI 0 "register_operand" "=r")
+	(mult:DI
+	 (zero_extend:DI (match_operand:SI 1 "register_operand" "%r"))
+	 (zero_extend:DI (match_operand:SI 2 "register_operand" "r"))))]
+  ""
+  "mulu.d  %0, %1, %2"
+  [(set_attr "type" "mulww_d")
+   (set_attr "length" "4")
+   (set_attr "cc" "none")])
+
+(define_insn "*mulaccsi3"
+  [(set (match_operand:SI 0 "register_operand" "+r")
+	(plus:SI (mult:SI (match_operand:SI 1 "register_operand" "%r")
+			  (match_operand:SI 2 "register_operand" "r"))
+		 (match_dup 0)))]
+  ""
+  "mac     %0, %1, %2"
+  [(set_attr "type" "macww_w")
+   (set_attr "length" "4")
+   (set_attr "cc" "none")])
+
+(define_insn "mulaccsidi3"
+  [(set (match_operand:DI 0 "register_operand" "+r")
+	(plus:DI (mult:DI
+		  (sign_extend:DI (match_operand:SI 1 "register_operand" "%r"))
+		  (sign_extend:DI (match_operand:SI 2 "register_operand" "r")))
+		 (match_dup 0)))]
+  ""
+  "macs.d  %0, %1, %2"
+  [(set_attr "type" "macww_d")
+   (set_attr "length" "4")
+   (set_attr "cc" "none")])
+
+(define_insn "umulaccsidi3"
+  [(set (match_operand:DI 0 "register_operand" "+r")
+	(plus:DI (mult:DI
+		  (zero_extend:DI (match_operand:SI 1 "register_operand" "%r"))
+		  (zero_extend:DI (match_operand:SI 2 "register_operand" "r")))
+		 (match_dup 0)))]
+  ""
+  "macu.d  %0, %1, %2"
+  [(set_attr "type" "macww_d")
+   (set_attr "length" "4")
+   (set_attr "cc" "none")])
+
+
+
+;; Try to avoid Write-After-Write hazards for mul operations
+;; if it can be done
+(define_peephole2
+  [(set (match_operand:SI 0 "register_operand" "")
+	(mult:SI
+	 (sign_extend:SI (match_operand 1 "general_operand" ""))
+         (sign_extend:SI (match_operand 2 "general_operand" ""))))
+   (set (match_dup 0)
+	(match_operator:SI 3 "alu_operator" [(match_dup 0)
+                                             (match_operand 4 "general_operand" "")]))]
+  "peep2_reg_dead_p(1, operands[2])"
+  [(set (match_dup 5)
+        (mult:SI
+         (sign_extend:SI (match_dup 1))
+         (sign_extend:SI (match_dup 2))))
+   (set (match_dup 0)
+	(match_op_dup 3 [(match_dup 5)
+                         (match_dup 4)]))]
+  "{operands[5] = gen_rtx_REG(SImode, REGNO(operands[2]));}"
+  )
+
+
+
+;;=============================================================================
+;; DSP instructions
+;;=============================================================================
+(define_insn "mulsathh_h"
+  [(set (match_operand:HI 0 "register_operand" "=r")
+        (ss_truncate:HI (ashiftrt:SI (mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand" "%r"))
+                                              (sign_extend:SI (match_operand:HI 2 "register_operand" "r")))
+                                     (const_int 15))))]
+  "TARGET_DSP"
+  "mulsathh.h\t%0, %1:b, %2:b"
+  [(set_attr "length" "4")
+   (set_attr "cc" "none")
+   (set_attr "type" "mulhh")])
+
+(define_insn "mulsatrndhh_h"
+  [(set (match_operand:HI 0 "register_operand" "=r")
+        (ss_truncate:HI (ashiftrt:SI
+                         (plus:SI (mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand" "%r"))
+                                           (sign_extend:SI (match_operand:HI 2 "register_operand" "r")))
+                                  (const_int 1073741824))
+                         (const_int 15))))]
+  "TARGET_DSP"
+  "mulsatrndhh.h\t%0, %1:b, %2:b"
+  [(set_attr "length" "4")
+   (set_attr "cc" "none")
+   (set_attr "type" "mulhh")])
+
+(define_insn "mulsathh_w"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+        (ss_truncate:SI (ashift:DI (mult:DI (sign_extend:DI (match_operand:HI 1 "register_operand" "%r"))
+                                            (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
+                                   (const_int 1))))]
+  "TARGET_DSP"
+  "mulsathh.w\t%0, %1:b, %2:b"
+  [(set_attr "length" "4")
+   (set_attr "cc" "none")
+   (set_attr "type" "mulhh")])
+
+(define_insn "mulsatwh_w"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+        (ss_truncate:SI (ashiftrt:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
+                                              (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
+                                     (const_int 15))))]
+  "TARGET_DSP"
+  "mulsatwh.w\t%0, %1, %2:b"
+  [(set_attr "length" "4")
+   (set_attr "cc" "none")
+   (set_attr "type" "mulwh")])
+
+(define_insn "mulsatrndwh_w"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+        (ss_truncate:SI (ashiftrt:DI (plus:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
+                                                       (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
+                                              (const_int 1073741824))
+                                     (const_int 15))))]
+  "TARGET_DSP"
+  "mulsatrndwh.w\t%0, %1, %2:b"
+  [(set_attr "length" "4")
+   (set_attr "cc" "none")
+   (set_attr "type" "mulwh")])
+
+(define_insn "macsathh_w"
+  [(set (match_operand:SI 0 "register_operand" "+r")
+        (plus:SI (match_dup 0)
+                 (ss_truncate:SI (ashift:DI (mult:DI (sign_extend:DI (match_operand:HI 1 "register_operand" "%r"))
+                                                     (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
+                                            (const_int 1)))))]
+  "TARGET_DSP"
+  "macsathh.w\t%0, %1:b, %2:b"
+  [(set_attr "length" "4")
+   (set_attr "cc" "none")
+   (set_attr "type" "mulhh")])
+
+
+(define_insn "mulwh_d"
+  [(set (match_operand:DI 0 "register_operand" "=r")
+        (ashift:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
+                            (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
+                   (const_int 16)))]
+  "TARGET_DSP"
+  "mulwh.d\t%0, %1, %2:b"
+  [(set_attr "length" "4")
+   (set_attr "cc" "none")
+   (set_attr "type" "mulwh")])
+
+
+(define_insn "mulnwh_d"
+  [(set (match_operand:DI 0 "register_operand" "=r")
+        (ashift:DI (mult:DI (not:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r")))
+                            (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
+                   (const_int 16)))]
+  "TARGET_DSP"
+  "mulnwh.d\t%0, %1, %2:b"
+  [(set_attr "length" "4")
+   (set_attr "cc" "none")
+   (set_attr "type" "mulwh")])
+
+(define_insn "macwh_d"
+  [(set (match_operand:DI 0 "register_operand" "+r")
+        (plus:DI (match_dup 0)
+                 (ashift:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "%r"))
+                                     (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
+                            (const_int 16))))]
+  "TARGET_DSP"
+  "macwh.d\t%0, %1, %2:b"
+  [(set_attr "length" "4")
+   (set_attr "cc" "none")
+   (set_attr "type" "mulwh")])
+
+(define_insn "machh_d"
+  [(set (match_operand:DI 0 "register_operand" "+r")
+        (plus:DI (match_dup 0)
+                 (mult:DI (sign_extend:DI (match_operand:HI 1 "register_operand" "%r"))
+                          (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))))]
+  "TARGET_DSP"
+  "machh.d\t%0, %1:b, %2:b"
+  [(set_attr "length" "4")
+   (set_attr "cc" "none")
+   (set_attr "type" "mulwh")])
+
+(define_insn "satadd_w"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+        (ss_plus:SI (match_operand:SI 1 "register_operand" "r")
+                    (match_operand:SI 2 "register_operand" "r")))]
+  "TARGET_DSP"
+  "satadd.w\t%0, %1, %2"
+  [(set_attr "length" "4")
+   (set_attr "cc" "none")
+   (set_attr "type" "alu_sat")])
+
+(define_insn "satsub_w"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+        (ss_minus:SI (match_operand:SI 1 "register_operand" "r")
+                     (match_operand:SI 2 "register_operand" "r")))]
+  "TARGET_DSP"
+  "satsub.w\t%0, %1, %2"
+  [(set_attr "length" "4")
+   (set_attr "cc" "none")
+   (set_attr "type" "alu_sat")])
+
+(define_insn "satadd_h"
+  [(set (match_operand:HI 0 "register_operand" "=r")
+        (ss_plus:HI (match_operand:HI 1 "register_operand" "r")
+                    (match_operand:HI 2 "register_operand" "r")))]
+  "TARGET_DSP"
+  "satadd.h\t%0, %1, %2"
+  [(set_attr "length" "4")
+   (set_attr "cc" "none")
+   (set_attr "type" "alu_sat")])
+
+(define_insn "satsub_h"
+  [(set (match_operand:HI 0 "register_operand" "=r")
+        (ss_minus:HI (match_operand:HI 1 "register_operand" "r")
+                     (match_operand:HI 2 "register_operand" "r")))]
+  "TARGET_DSP"
+  "satsub.h\t%0, %1, %2"
+  [(set_attr "length" "4")
+   (set_attr "cc" "none")
+   (set_attr "type" "alu_sat")])
+
+
+;;=============================================================================
+;; smin
+;;-----------------------------------------------------------------------------
+;; Set reg0 to the smallest value of reg1 and reg2. It is used for signed
+;; values in the registers.
+;;=============================================================================
+(define_insn "sminsi3"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+	(smin:SI (match_operand:SI 1 "register_operand" "r")
+		 (match_operand:SI 2 "register_operand" "r")))]
+  ""
+  "min     %0, %1, %2"
+  [(set_attr "length" "4")
+   (set_attr "cc" "none")])
+
+;;=============================================================================
+;; smax
+;;-----------------------------------------------------------------------------
+;; Set reg0 to the largest value of reg1 and reg2. It is used for signed
+;; values in the registers.
+;;=============================================================================
+(define_insn "smaxsi3"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+	(smax:SI (match_operand:SI 1 "register_operand" "r")
+		 (match_operand:SI 2 "register_operand" "r")))]
+  ""
+  "max     %0, %1, %2"
+  [(set_attr "length" "4")
+   (set_attr "cc" "none")])
+
+
+;;=============================================================================
+;; Logical operations
+;;-----------------------------------------------------------------------------
+
+;; Split up simple DImode logical operations.  Simply perform the logical
+;; operation on the upper and lower halves of the registers.
+(define_split
+  [(set (match_operand:DI 0 "register_operand" "")
+	(match_operator:DI 6 "logical_binary_operator"
+	  [(match_operand:DI 1 "register_operand" "")
+	   (match_operand:DI 2 "register_operand" "")]))]
+  "reload_completed"
+  [(set (match_dup 0) (match_op_dup:SI 6 [(match_dup 1) (match_dup 2)]))
+   (set (match_dup 3) (match_op_dup:SI 6 [(match_dup 4) (match_dup 5)]))]
+  "
+  {
+    operands[3] = gen_highpart (SImode, operands[0]);
+    operands[0] = gen_lowpart (SImode, operands[0]);
+    operands[4] = gen_highpart (SImode, operands[1]);
+    operands[1] = gen_lowpart (SImode, operands[1]);
+    operands[5] = gen_highpart (SImode, operands[2]);
+    operands[2] = gen_lowpart (SImode, operands[2]);
+  }"
+)
+
+;;=============================================================================
+;; Logical operations with shifted operand
+;;=============================================================================
+(define_insn "<code>si_lshift"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+        (logical:SI (match_operator:SI 4 "logical_shift_operator"
+                                       [(match_operand:SI 2 "register_operand" "r")
+                                        (match_operand:SI 3 "immediate_operand" "Ku05")])
+                    (match_operand:SI 1 "register_operand" "r")))]
+  ""
+  {
+   if ( GET_CODE(operands[4]) == ASHIFT )
+      return "<logical_insn>\t%0, %1, %2 << %3";
+   else
+      return "<logical_insn>\t%0, %1, %2 >> %3";
+      }
+
+  [(set_attr "cc" "set_z")]
+)
+
+
+;;************************************************
+;; Peepholes for detecting logical operantions
+;; with shifted operands
+;;************************************************
+
+(define_peephole
+  [(set (match_operand:SI 3 "register_operand" "")
+        (match_operator:SI 5 "logical_shift_operator"
+                           [(match_operand:SI 1 "register_operand" "")
+                            (match_operand:SI 2 "immediate_operand" "")]))
+   (set (match_operand:SI 0 "register_operand" "")
+        (logical:SI (match_operand:SI 4 "register_operand" "")
+                    (match_dup 3)))]
+  "(dead_or_set_p(insn, operands[3])) || (REGNO(operands[3]) == REGNO(operands[0]))"
+  {
+   if ( GET_CODE(operands[5]) == ASHIFT )
+      return "<logical_insn>\t%0, %4, %1 << %2";
+   else
+      return "<logical_insn>\t%0, %4, %1 >> %2";
+  }
+  [(set_attr "cc" "set_z")]
+  )
+
+(define_peephole
+  [(set (match_operand:SI 3 "register_operand" "")
+        (match_operator:SI 5 "logical_shift_operator"
+                           [(match_operand:SI 1 "register_operand" "")
+                            (match_operand:SI 2 "immediate_operand" "")]))
+   (set (match_operand:SI 0 "register_operand" "")
+        (logical:SI (match_dup 3)
+                    (match_operand:SI 4 "register_operand" "")))]
+  "(dead_or_set_p(insn, operands[3])) || (REGNO(operands[3]) == REGNO(operands[0]))"
+  {
+   if ( GET_CODE(operands[5]) == ASHIFT )
+      return "<logical_insn>\t%0, %4, %1 << %2";
+   else
+      return "<logical_insn>\t%0, %4, %1 >> %2";
+  }
+  [(set_attr "cc" "set_z")]
+  )
+
+
+(define_peephole2
+  [(set (match_operand:SI 0 "register_operand" "")
+        (match_operator:SI 5 "logical_shift_operator"
+                           [(match_operand:SI 1 "register_operand" "")
+                            (match_operand:SI 2 "immediate_operand" "")]))
+   (set (match_operand:SI 3 "register_operand" "")
+        (logical:SI (match_operand:SI 4 "register_operand" "")
+                    (match_dup 0)))]
+  "(peep2_reg_dead_p(2, operands[0])) || (REGNO(operands[3]) == REGNO(operands[0]))"
+
+  [(set (match_dup 3)
+        (logical:SI  (match_op_dup:SI 5 [(match_dup 1) (match_dup 2)])
+                     (match_dup 4)))]
+
+  ""
+)
+
+(define_peephole2
+  [(set (match_operand:SI 0 "register_operand" "")
+        (match_operator:SI 5 "logical_shift_operator"
+                           [(match_operand:SI 1 "register_operand" "")
+                            (match_operand:SI 2 "immediate_operand" "")]))
+   (set (match_operand:SI 3 "register_operand" "")
+        (logical:SI (match_dup 0)
+                    (match_operand:SI 4 "register_operand" "")))]
+  "(peep2_reg_dead_p(2, operands[0])) || (REGNO(operands[3]) == REGNO(operands[0]))"
+
+  [(set (match_dup 3)
+        (logical:SI (match_op_dup:SI 5 [(match_dup 1) (match_dup 2)])
+                    (match_dup 4)))]
+
+  ""
+)
+
+
+;;=============================================================================
+;; and
+;;-----------------------------------------------------------------------------
+;; Store the result after a bitwise logical-and between reg0 and reg2 in reg0.
+;;=============================================================================
+
+(define_insn "andnsi"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+        (and:SI (match_operand:SI 1 "register_operand" "0")
+                (not:SI (match_operand:SI 2 "register_operand" "r"))))]
+  ""
+  "andn    %0, %2"
+  [(set_attr "cc" "set_z")
+   (set_attr "length" "2")]
+)
+
+
+
+
+(define_insn "andsi3"
+  [(set (match_operand:SI 0 "register_operand" "=r, r, r, r")
+	(and:SI (match_operand:SI 1 "register_operand" "%0, r, 0, r")
+                (match_operand:SI 2 "nonmemory_operand" "r, M, i, r")))]
+  ""
+  {
+   switch (which_alternative){
+    case 0:
+         return "and\t%0, %2";
+    case 1:
+        {
+         int i, first_set = -1;
+         /* Search for first bit set in mask */
+         for ( i = 31; i >= 0; --i )
+           if ( INTVAL(operands[2]) & (1 << i) ){
+             first_set = i;
+             break;
+           }
+         operands[2] = gen_rtx_CONST_INT(SImode, first_set + 1);
+         return "bfextu\t%0, %1, 0, %2";
+        }
+    case 2:
+         if ( one_bit_cleared_operand(operands[2], VOIDmode) ){
+             int bitpos;
+             for ( bitpos = 0; bitpos < 32; bitpos++ )
+               if ( !(INTVAL(operands[2]) & (1 << bitpos)) )
+                 break;
+             operands[2] = gen_rtx_CONST_INT(SImode, bitpos);
+             return "cbr\t%0, %2";
+         } else if ( (INTVAL(operands[2]) >= 0) &&
+                     (INTVAL(operands[2]) <= 65535) )
+             return "andl\t%0, %2, COH";
+         else if ( (INTVAL(operands[2]) < 0) &&
+                   (INTVAL(operands[2]) >= -65536 ) )
+             return "andl\t%0, lo(%2)";
+         else if ( ((INTVAL(operands[2]) & 0xffff) == 0xffff) )
+             return "andh\t%0, hi(%2)";
+         else if ( ((INTVAL(operands[2]) & 0xffff) == 0x0) )
+             return "andh\t%0, hi(%2), COH";
+         else
+             return "andh\t%0, hi(%2)\;andl\t%0, lo(%2)";
+    case 3:
+         return "and\t%0, %1, %2";
+    default:
+	 abort();
+    }
+  }
+
+  [(set_attr "length" "2,4,8,4")
+   (set_attr "cc" "set_z")])
+
+
+(define_insn "anddi3"
+  [(set (match_operand:DI 0 "register_operand" "=&r,&r")
+	(and:DI (match_operand:DI 1 "register_operand" "%0,r")
+                (match_operand:DI 2 "register_operand" "r,r")))]
+  ""
+  "#"
+  [(set_attr "length" "8")
+   (set_attr "cc" "clobber")]
+)
+
+;;=============================================================================
+;; or
+;;-----------------------------------------------------------------------------
+;; Store the result after a bitwise inclusive-or between reg0 and reg2 in reg0.
+;;=============================================================================
+
+(define_insn "iorsi3"
+  [(set (match_operand:SI 0 "register_operand"          "=r,r,r")
+	(ior:SI (match_operand:SI 1 "register_operand"  "%0,0,r" )
+		(match_operand:SI 2 "nonmemory_operand" "r ,i,r")))]
+  ""
+  {
+   switch (which_alternative){
+    case 0:
+         return "or\t%0, %2";
+    case 1:
+         if ( one_bit_set_operand(operands[2], VOIDmode) ){
+             int bitpos;
+             for (bitpos = 0; bitpos < 32; bitpos++)
+               if (INTVAL(operands[2]) & (1 << bitpos))
+                 break;
+             operands[2] = gen_rtx_CONST_INT( SImode, bitpos);
+             return "sbr\t%0, %2";
+         } else if ( (INTVAL(operands[2]) >= 0) &&
+              (INTVAL(operands[2]) <= 65535) )
+             return "orl\t%0, %2";
+         else if ( ((INTVAL(operands[2]) & 0xffff) == 0x0) )
+             return "orh\t%0, hi(%2)";
+         else
+             return "orh\t%0, hi(%2)\;orl\t%0, lo(%2)";
+    case 2:
+         return "or\t%0, %1, %2";
+    default:
+	 abort();
+    }
+  }
+  [(set_attr "length" "2,8,4")
+   (set_attr "cc" "set_z")])
+
+
+;(define_insn "iorsi3"
+;  [(set (match_operand:SI 0 "register_operand" "=r, r, r")
+;	(ior:SI (match_operand:SI 1 "avr32_logical_insn_operand" "r, r, rA" )
+;		(match_operand:SI 2 "register_operand" "0, i, r")))]
+;  ""
+;  {
+;   switch (which_alternative){
+;    case 0:
+;         return "or     %0, %2";
+;    case 1:
+;         if ( one_bit_set_operand(operands[2], VOIDmode) ){
+;             int i, bitpos;
+;             for ( i = 0; i < 32; i++ )
+;                if ( INTVAL(operands[2]) & (1 << i) ){
+;                   bitpos = i;
+;                   break;
+;                }
+;             operands[2] = gen_rtx_CONST_INT( SImode, bitpos);
+;             return "sbr    %0, %2";
+;         } else if ( (INTVAL(operands[2]) >= 0) &&
+;              (INTVAL(operands[2]) <= 65535) )
+;             return "orl    %0, %2";
+;         else if ( ((INTVAL(operands[2]) & 0xffff) == 0x0) )
+;             return "orh    %0, hi(%2)";
+;         else
+;             return "orh    %0, hi(%2)\;orl    %0, lo(%2)";
+;    case 2:
+;         return "or     %0, %2, %1";
+;    }
+;  }
+;  [(set_attr "length" "2,8,4")
+;   (set_attr "cc" "set_z")])
+
+(define_insn "iordi3"
+  [(set (match_operand:DI 0 "register_operand" "=&r,&r")
+	(ior:DI (match_operand:DI 1 "register_operand" "%0,r")
+                (match_operand:DI 2 "register_operand" "r,r")))]
+  ""
+  "#"
+  [(set_attr "length" "8")
+   (set_attr "cc" "clobber")]
+)
+
+;;=============================================================================
+;; xor bytes
+;;-----------------------------------------------------------------------------
+;; Store the result after a bitwise exclusive-or between reg0 and reg2 in reg0.
+;;=============================================================================
+
+(define_insn "xorsi3"
+  [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+	(xor:SI (match_operand:SI 1 "register_operand" "0,0,r")
+		(match_operand:SI 2 "nonmemory_operand" "r,i,r")))]
+  ""
+  {
+   switch (which_alternative){
+    case 0:
+         return "eor     %0, %2";
+    case 1:
+         if ( (INTVAL(operands[2]) >= 0) &&
+              (INTVAL(operands[2]) <= 65535) )
+             return "eorl    %0, %2";
+         else if ( ((INTVAL(operands[2]) & 0xffff) == 0x0) )
+             return "eorh    %0, hi(%2)";
+         else
+             return "eorh    %0, hi(%2)\;eorl    %0, lo(%2)";
+    case 2:
+         return "eor     %0, %1, %2";
+    default:
+	 abort();
+    }
+  }
+
+  [(set_attr "length" "2,8,4")
+   (set_attr "cc" "set_z")])
+
+(define_insn "xordi3"
+  [(set (match_operand:DI 0 "register_operand" "=&r,&r")
+	(xor:DI (match_operand:DI 1 "register_operand" "%0,r")
+                (match_operand:DI 2 "register_operand" "r,r")))]
+  ""
+  "#"
+  [(set_attr "length" "8")
+   (set_attr "cc" "clobber")]
+)
+
+;;=============================================================================
+;; divmod
+;;-----------------------------------------------------------------------------
+;; Signed division that produces both a quotient and a remainder.
+;;=============================================================================
+(define_expand "divmodsi4"
+  [(parallel [
+     (parallel [
+       (set (match_operand:SI 0 "register_operand" "=r")
+	    (div:SI (match_operand:SI 1 "register_operand" "r")
+		    (match_operand:SI 2 "register_operand" "r")))
+       (set (match_operand:SI 3 "register_operand" "=r")
+	    (mod:SI (match_dup 1)
+		    (match_dup 2)))])
+     (use (match_dup 4))])]
+  ""
+  {
+    if (! no_new_pseudos) {
+      operands[4] = gen_reg_rtx (DImode);
+
+      emit_insn(gen_divmodsi4_internal(operands[4],operands[1],operands[2]));
+      emit_move_insn(operands[0], gen_rtx_SUBREG( SImode, operands[4], 4));
+      emit_move_insn(operands[3], gen_rtx_SUBREG( SImode, operands[4], 0));
+
+      DONE;
+    } else {
+      FAIL;
+    }
+
+  })
+
+
+(define_insn "divmodsi4_internal"
+  [(set (match_operand:DI 0 "register_operand" "=r")
+	(unspec:DI [(match_operand:SI 1 "register_operand" "r")
+		    (match_operand:SI 2 "register_operand" "r")]
+		   UNSPEC_DIVMODSI4_INTERNAL))]
+  ""
+  "divs    %0, %1, %2"
+  [(set_attr "type" "div")
+   (set_attr "cc" "none")])
+
+
+;;=============================================================================
+;; udivmod
+;;-----------------------------------------------------------------------------
+;; Unsigned division that produces both a quotient and a remainder.
+;;=============================================================================
+(define_expand "udivmodsi4"
+ [(parallel [
+    (parallel [
+      (set (match_operand:SI 0 "register_operand" "=r")
+	   (udiv:SI (match_operand:SI 1 "register_operand" "r")
+		    (match_operand:SI 2 "register_operand" "r")))
+      (set (match_operand:SI 3 "register_operand" "=r")
+	   (umod:SI (match_dup 1)
+		    (match_dup 2)))])
+    (use (match_dup 4))])]
+  ""
+  {
+    if (! no_new_pseudos) {
+      operands[4] = gen_reg_rtx (DImode);
+
+      emit_insn(gen_udivmodsi4_internal(operands[4],operands[1],operands[2]));
+      emit_move_insn(operands[0], gen_rtx_SUBREG( SImode, operands[4], 4));
+      emit_move_insn(operands[3], gen_rtx_SUBREG( SImode, operands[4], 0));
+
+      DONE;
+    } else {
+      FAIL;
+    }
+  })
+
+(define_insn "udivmodsi4_internal"
+  [(set (match_operand:DI 0 "register_operand" "=r")
+	(unspec:DI [(match_operand:SI 1 "register_operand" "r")
+		    (match_operand:SI 2 "register_operand" "r")]
+		   UNSPEC_UDIVMODSI4_INTERNAL))]
+  ""
+  "divu    %0, %1, %2"
+  [(set_attr "type" "div")
+   (set_attr "cc" "none")])
+
+
+;;=============================================================================
+;; Arithmetic-shift left
+;;-----------------------------------------------------------------------------
+;; Arithmetic-shift reg0 left by reg2 or immediate value.
+;;=============================================================================
+
+(define_insn "ashlsi3"
+  [(set (match_operand:SI 0 "register_operand"           "=r,r,r")
+	(ashift:SI (match_operand:SI 1 "register_operand" "r,0,r")
+		   (match_operand:SI 2 "nonmemory_operand" "r,Ku05,Ku05")))]
+  ""
+  "@
+   lsl     %0, %1, %2
+   lsl     %0, %2
+   lsl     %0, %1, %2"
+  [(set_attr "length" "4,2,4")
+   (set_attr "cc" "set_ncz")])
+
+;;=============================================================================
+;; Arithmetic-shift right
+;;-----------------------------------------------------------------------------
+;; Arithmetic-shift reg0 right by an immediate value.
+;;=============================================================================
+
+(define_insn "ashrsi3"
+  [(set (match_operand:SI 0 "register_operand"             "=r,r,r")
+	(ashiftrt:SI (match_operand:SI 1 "register_operand" "r,0,r")
+		     (match_operand:SI 2 "nonmemory_operand" "r,Ku05,Ku05")))]
+  ""
+  "@
+   asr     %0, %1, %2
+   asr     %0, %2
+   asr     %0, %1, %2"
+  [(set_attr "length" "4,2,4")
+   (set_attr "cc" "set_ncz")])
+
+;;=============================================================================
+;; Logical shift right
+;;-----------------------------------------------------------------------------
+;; Logical shift reg0 right by an immediate value.
+;;=============================================================================
+
+(define_insn "lshrsi3"
+  [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+	(lshiftrt:SI (match_operand:SI 1 "register_operand" "r,0,r")
+		     (match_operand:SI 2 "nonmemory_operand" "r,Ku05,Ku05")))]
+  ""
+  "@
+   lsr     %0, %1, %2
+   lsr     %0, %2
+   lsr     %0, %1, %2"
+  [(set_attr "length" "4,2,4")
+   (set_attr "cc" "set_ncz")])
+
+
+;;=============================================================================
+;; neg
+;;-----------------------------------------------------------------------------
+;; Negate operand 1 and store the result in operand 0.
+;;=============================================================================
+(define_insn "negsi2"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+	(neg:SI (match_operand:SI 1 "register_operand" "0")))]
+  ""
+  "neg     %0"
+  [(set_attr "length" "2")
+   (set_attr "cc" "set_vncz")])
+
+;;=============================================================================
+;; abs
+;;-----------------------------------------------------------------------------
+;; Store the absolute value of operand 1 into operand 0.
+;;=============================================================================
+(define_insn "abssi2"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+	(abs:SI (match_operand:SI 1 "register_operand" "0")))]
+  ""
+  "abs     %0"
+  [(set_attr "length" "2")
+   (set_attr "cc" "set_z")])
+
+
+;;=============================================================================
+;; one_cmpl
+;;-----------------------------------------------------------------------------
+;; Store the bitwise-complement of operand 1 into operand 0.
+;;=============================================================================
+
+(define_insn "one_cmplsi2"
+  [(set (match_operand:SI 0 "register_operand" "=r,r")
+	(not:SI (match_operand:SI 1 "register_operand" "r,0")))]
+  ""
+  "@
+   rsub    %0, %1, -1
+   com     %0"
+  [(set_attr "length" "4,2")
+   (set_attr "cc" "set_z")])
+
+
+;;=============================================================================
+;; Bit load
+;;-----------------------------------------------------------------------------
+;; Load a bit into Z and C flags
+;;=============================================================================
+(define_insn "bldsi"
+  [(set (cc0)
+        (and:SI (match_operand:SI 0 "register_operand" "r")
+                (match_operand:SI 1 "one_bit_set_operand" "i")))]
+  ""
+  "bld\t%0, %p1"
+  [(set_attr "length" "4")
+   (set_attr "cc" "bld")]
+  )
+
+
+;;=============================================================================
+;; Compare
+;;-----------------------------------------------------------------------------
+;; Compare reg0 with reg1 or an immediate value.
+;;=============================================================================
+
+(define_expand "cmpqi"
+  [(set (cc0)
+	(compare:QI
+	 (match_operand:QI 0 "general_operand" "")
+	 (match_operand:QI 1 "general_operand"  "")))]
+  ""
+  "{
+
+   if ( GET_CODE(operands[0]) != REG
+        && GET_CODE(operands[0]) != SUBREG)
+     operands[0] = force_reg(QImode, operands[0]);
+
+
+   if ( GET_CODE(operands[1]) != REG
+        && GET_CODE(operands[1]) != SUBREG )
+     operands[1] = force_reg(QImode, operands[1]);
+
+   avr32_compare_op0 = operands[0];
+   avr32_compare_op1 = operands[1];
+   emit_insn(gen_cmpqi_internal(operands[0], operands[1]));
+   DONE;
+  }"
+)
+
+(define_insn "cmpqi_internal"
+  [(set (cc0)
+        (compare:QI
+         (match_operand:QI 0 "register_operand" "r")
+         (match_operand:QI 1 "register_operand" "r")))]
+  ""
+  {
+   set_next_insn_cond(insn,
+                      avr32_output_cmp(get_next_insn_cond(insn), QImode, operands[0], operands[1]));
+   return "";
+  }
+  [(set_attr "length" "4")
+   (set_attr "cc" "compare")])
+
+(define_expand "cmphi"
+  [(set (cc0)
+	(compare:HI
+	 (match_operand:HI 0 "general_operand" "")
+	 (match_operand:HI 1 "general_operand"  "")))]
+  ""
+  "{
+   if ( GET_CODE(operands[0]) != REG
+        && GET_CODE(operands[0]) != SUBREG )
+     operands[0] = force_reg(HImode, operands[0]);
+
+
+   if ( GET_CODE(operands[1]) != REG
+        && GET_CODE(operands[1]) != SUBREG)
+     operands[1] = force_reg(HImode, operands[1]);
+
+   avr32_compare_op0 = operands[0];
+   avr32_compare_op1 = operands[1];
+   emit_insn(gen_cmphi_internal(operands[0], operands[1]));
+   DONE;
+  }"
+)
+
+
+(define_insn "cmphi_internal"
+  [(set (cc0)
+        (compare:HI
+         (match_operand:HI 0 "register_operand" "r")
+         (match_operand:HI 1 "register_operand" "r")))]
+  ""
+  {
+   set_next_insn_cond(insn,
+     avr32_output_cmp(get_next_insn_cond(insn), HImode, operands[0], operands[1]));
+   return "";
+  }
+  [(set_attr "length" "4")
+   (set_attr "cc" "compare")])
+
+
+(define_expand "cmpsi"
+  [(set (cc0)
+	(compare:SI
+	 (match_operand:SI 0 "general_operand" "")
+	 (match_operand:SI 1 "general_operand"  "")))]
+  ""
+  "{
+   if ( GET_CODE(operands[0]) != REG
+        && GET_CODE(operands[0]) != SUBREG )
+     operands[0] = force_reg(SImode, operands[0]);
+
+   if ( GET_CODE(operands[1]) != REG
+        && GET_CODE(operands[1]) != SUBREG
+        && GET_CODE(operands[1]) != CONST_INT )
+     operands[1] = force_reg(SImode, operands[1]);
+
+   avr32_compare_op0 = operands[0];
+   avr32_compare_op1 = operands[1];
+
+
+   emit_insn(gen_cmpsi_internal(operands[0], operands[1]));
+   DONE;
+  }"
+)
+
+
+
+
+(define_insn "cmpsi_internal"
+  [(set (cc0)
+	(compare:SI
+	 (match_operand:SI 0 "register_operand" "r, r, r")
+	 (match_operand:SI 1 "nonmemory_operand"  "r, Ks06, Ks21")))]
+  ""
+  {
+   set_next_insn_cond(insn,
+     avr32_output_cmp(get_next_insn_cond(insn), SImode, operands[0], operands[1]));
+   return "";
+  }
+
+  [(set_attr "length" "2,2,4")
+   (set_attr "cc" "compare")])
+
+
+(define_expand "cmpdi"
+  [(set (cc0)
+	(compare:DI
+	 (match_operand:DI 0 "register_operand" "")
+	 (match_operand:DI 1 "register_operand"  "")))]
+  ""
+  {
+   avr32_compare_op0 = operands[0];
+   avr32_compare_op1 = operands[1];
+   emit_insn(gen_cmpdi_internal(operands[0], operands[1]));
+   DONE;
+  }
+)
+
+(define_insn "cmpdi_internal"
+  [(set (cc0)
+	(compare:DI
+	 (match_operand:DI 0 "register_operand" "r")
+	 (match_operand:DI 1 "register_operand"  "r")))]
+  ""
+  {
+   set_next_insn_cond(insn,
+     avr32_output_cmp(get_next_insn_cond(insn), DImode, operands[0], operands[1]));
+   return "";
+  }
+
+  [(set_attr "length" "6")
+   (set_attr "type" "alu2")
+   (set_attr "cc" "compare")])
+
+
+
+;;=============================================================================
+;; Test if zero
+;;-----------------------------------------------------------------------------
+;; Compare reg against zero and set the condition codes.
+;;=============================================================================
+
+
+(define_expand "tstsi"
+  [(set (cc0)
+	(match_operand:SI 0 "register_operand" ""))]
+  ""
+  {
+   avr32_compare_op0 = operands[0];
+   avr32_compare_op1 = gen_rtx_CONST_INT(SImode, 0);
+   emit_insn(gen_tstsi_internal(operands[0]));
+   DONE;
+  }
+)
+
+(define_insn "tstsi_internal"
+  [(set (cc0)
+	(match_operand:SI 0 "register_operand" "r"))]
+  ""
+  {
+   set_next_insn_cond(insn,
+     avr32_output_cmp(get_next_insn_cond(insn), SImode, operands[0], const0_rtx));
+
+   return "";
+  }
+  [(set_attr "length" "2")
+   (set_attr "cc" "compare")])
+
+
+(define_expand "tstdi"
+  [(set (cc0)
+	(match_operand:DI 0 "register_operand" ""))]
+  ""
+  {
+   avr32_compare_op0 = operands[0];
+   avr32_compare_op1 = gen_rtx_CONST_INT(DImode, 0);
+   emit_insn(gen_tstdi_internal(operands[0]));
+   DONE;
+  }
+)
+
+(define_insn "tstdi_internal"
+  [(set (cc0)
+	(match_operand:DI 0 "register_operand" "r"))]
+  ""
+  {
+   set_next_insn_cond(insn,
+     avr32_output_cmp(get_next_insn_cond(insn), DImode, operands[0], const0_rtx));
+   return "";
+  }
+  [(set_attr "length" "4")
+   (set_attr "type" "alu2")
+   (set_attr "cc" "compare")])
+
+
+
+;;=============================================================================
+;; Convert operands
+;;-----------------------------------------------------------------------------
+;;
+;;=============================================================================
+(define_insn "truncdisi2"
+  [(set (match_operand:SI 0 "general_operand" "")
+	(truncate:SI (match_operand:DI 1 "general_operand" "")))]
+  ""
+  "truncdisi2")
+
+;;=============================================================================
+;; Extend
+;;-----------------------------------------------------------------------------
+;;
+;;=============================================================================
+
+
+(define_insn "extendhisi2"
+  [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
+	(sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "0,r,<RKu00>,m")))]
+  ""
+  {
+   switch ( which_alternative ){
+     case 0:
+       return    "casts.h\t%0";
+     case 1:
+       return    "bfexts\t%0, %1, 0, 16";
+     case 2:
+     case 3:
+       return    "ld.sh\t%0, %1";
+     default:
+       abort();
+   }
+  }
+  [(set_attr "length" "2,4,2,4")
+   (set_attr "cc" "set_ncz,set_ncz,none,none")
+   (set_attr "type" "alu,alu,load_rm,load_rm")])
+
+(define_insn "extendqisi2"
+  [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
+	(sign_extend:SI (match_operand:QI 1 "extendqi_operand" "0,r,RKu00,m")))]
+  ""
+  {
+   switch ( which_alternative ){
+     case 0:
+       return    "casts.b\t%0";
+     case 1:
+       return    "bfexts\t%0, %1, 0, 8";
+     case 2:
+     case 3:
+       return    "ld.sb\t%0, %1";
+     default:
+       abort();
+   }
+  }
+  [(set_attr "length" "2,4,2,4")
+   (set_attr "cc" "set_ncz,set_ncz,none,none")
+   (set_attr "type" "alu,alu,load_rm,load_rm")])
+
+(define_insn "extendqihi2"
+  [(set (match_operand:HI 0 "register_operand" "=r,r,r,r")
+	(sign_extend:HI (match_operand:QI 1 "extendqi_operand" "0,r,RKu00,m")))]
+  ""
+  {
+   switch ( which_alternative ){
+     case 0:
+       return    "casts.b\t%0";
+     case 1:
+       return    "bfexts\t%0, %1, 0, 8";
+     case 2:
+     case 3:
+       return    "ld.sb\t%0, %1";
+     default:
+       abort();
+   }
+  }
+  [(set_attr "length" "2,4,2,4")
+   (set_attr "cc" "set_ncz,set_ncz,none,none")
+   (set_attr "type" "alu,alu,load_rm,load_rm")])
+
+
+;;=============================================================================
+;; Zero-extend
+;;-----------------------------------------------------------------------------
+;;
+;;=============================================================================
+
+(define_insn "zero_extendhisi2"
+  [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
+	(zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "0,r,<RKu00>,m")))]
+  ""
+  {
+   switch ( which_alternative ){
+     case 0:
+       return    "castu.h\t%0";
+     case 1:
+       return    "bfextu\t%0, %1, 0, 16";
+     case 2:
+     case 3:
+       return    "ld.uh\t%0, %1";
+     default:
+       abort();
+   }
+  }
+
+  [(set_attr "length" "2,4,2,4")
+   (set_attr "cc" "set_ncz,set_ncz,none,none")
+   (set_attr "type" "alu,alu,load_rm,load_rm")])
+
+(define_insn "zero_extendqisi2"
+  [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
+	(zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "0,r,<RKu00>,m")))]
+  ""
+  {
+   switch ( which_alternative ){
+     case 0:
+       return    "castu.b\t%0";
+     case 1:
+       return    "bfextu\t%0, %1, 0, 8";
+     case 2:
+     case 3:
+       return    "ld.ub\t%0, %1";
+     default:
+       abort();
+   }
+  }
+  [(set_attr "length" "2,4,2,4")
+   (set_attr "cc" "set_ncz, set_ncz, none, none")
+   (set_attr "type" "alu, alu, load_rm, load_rm")])
+
+(define_insn "zero_extendqihi2"
+  [(set (match_operand:HI 0 "register_operand" "=r,r,r,r")
+	(zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "0,r,<RKu00>,m")))]
+  ""
+  {
+   switch ( which_alternative ){
+     case 0:
+       return    "castu.b\t%0";
+     case 1:
+       return    "bfextu\t%0, %1, 0, 8";
+     case 2:
+     case 3:
+       return    "ld.ub\t%0, %1";
+     default:
+       abort();
+   }
+  }
+  [(set_attr "length" "2,4,2,4")
+   (set_attr "cc" "set_ncz, set_ncz, none, none")
+   (set_attr "type" "alu, alu, load_rm, load_rm")])
+
+
+
+;;=============================================================================
+;; Conditional set register
+;; sr{cond4}  rd
+;;-----------------------------------------------------------------------------
+
+;;Because of the same issue as with conditional moves and adds we must
+;;not separate the compare instrcution from the scc instruction as
+;;they might be sheduled "badly".
+
+(define_expand "s<code>"
+  [(set (match_operand:SI 0 "register_operand" "")
+	(any_cond (cc0)
+            (const_int 0)))]
+  ""
+  {
+   if ( !avr32_expand_scc(<CODE>, operands) ){
+      FAIL;
+   }
+   DONE;
+  }
+  )
+
+
+(define_insn "comparesi_and_set"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+	(match_operator 1 "avr32_comparison_operator"
+                        [ (compare (match_operand:SI 2 "register_operand" "r")
+                                   (match_operand:SI 3 "general_operand" "rKs06Ks21"))
+                          (const_int 0)]))]
+  ""
+  {
+   operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[2]), operands[2], operands[3]);
+   return "sr%1\t%0";
+   }
+  [(set_attr "length" "6")
+   (set_attr "cc" "clobber")])
+
+(define_insn "comparehi_and_set"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+	(match_operator 1 "avr32_comparison_operator"
+                        [ (compare (match_operand:HI 2 "register_operand" "r")
+                                      (match_operand:HI 3 "register_operand" "r"))
+                          (const_int 0)]))]
+  ""
+  {
+   operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[2]), operands[2], operands[3]);
+   return "sr%1\t%0";
+  }
+  [(set_attr "length" "6")
+   (set_attr "cc" "clobber")])
+
+(define_insn "compareqi_and_set"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+	(match_operator 1 "avr32_comparison_operator"
+                        [ (compare (match_operand:QI 2 "register_operand" "r")
+                                      (match_operand:QI 3 "register_operand" "r"))
+                          (const_int 0)]))]
+  ""
+  {
+   operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[2]), operands[2], operands[3]);
+   return "sr%1\t%0";
+  }
+  [(set_attr "length" "6")
+   (set_attr "cc" "clobber")])
+
+(define_insn "*comparedi_and_set"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+	(match_operator 1 "avr32_comparison_operator"
+                        [ (compare (match_operand:DI 2 "register_operand" "r")
+                                      (match_operand:DI 3 "register_operand" "r"))
+                          (const_int 0)]))]
+  ""
+  {
+   operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[2]), operands[2], operands[3]);
+   return "sr%1\t%0";
+  }
+  [(set_attr "length" "6")
+   (set_attr "cc" "clobber")])
+
+(define_insn "*tstdi_and_set"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+	(match_operator 1 "avr32_comparison_operator"
+                        [ (compare (match_operand:DI 2 "register_operand" "r")
+                                   (const_int 0))
+                          (const_int 0)]))]
+  ""
+  {
+   operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[2]), operands[2], const0_rtx);
+   return "sr%1\t%0";
+  }
+  [(set_attr "length" "6")
+   (set_attr "cc" "clobber")])
+
+
+
+;;=============================================================================
+;; Conditional branch
+;;-----------------------------------------------------------------------------
+;; Branch to label if the specified condition codes are set.
+;;=============================================================================
+; branch if negative
+(define_insn "bmi"
+  [(set (pc)
+	(if_then_else (unspec:CC [(cc0) (const_int 0)] UNSPEC_COND_MI)
+		      (label_ref (match_operand 0 "" ""))
+		      (pc)))]
+  ""
+  "brmi    %0"
+  [(set_attr "type" "branch")
+   (set (attr "length")
+	(cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
+		    (le (minus (pc) (match_dup 0)) (const_int 256)))
+	       (const_int 2)] ; use compact branch
+              (const_int 4))) ; use extended branch
+   (set_attr "cc" "none")])
+
+(define_insn "*bmi-reverse"
+  [(set (pc)
+	(if_then_else (unspec:CC [(cc0) (const_int 0)] UNSPEC_COND_MI)
+		      (pc)
+		      (label_ref (match_operand 0 "" ""))))]
+  ""
+  "brpl    %0"
+  [(set_attr "type" "branch")
+   (set (attr "length")
+	(cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
+		    (le (minus (pc) (match_dup 0)) (const_int 256)))
+	       (const_int 2)] ; use compact branch
+              (const_int 4))) ; use extended branch
+   (set_attr "cc" "none")])
+
+; branch if positive
+(define_insn "bpl"
+  [(set (pc)
+	(if_then_else (unspec:CC [(cc0) (const_int 0)] UNSPEC_COND_PL)
+		      (label_ref (match_operand 0 "" ""))
+		      (pc)))]
+  ""
+  "brpl    %0"
+  [(set_attr "type" "branch")
+   (set (attr "length")
+	(cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
+		    (le (minus (pc) (match_dup 0)) (const_int 256)))
+	       (const_int 2)] ; use compact branch
+              (const_int 4))) ; use extended branch
+   (set_attr "cc" "none")])
+
+(define_insn "*bpl-reverse"
+  [(set (pc)
+	(if_then_else (unspec:CC [(cc0) (const_int 0)] UNSPEC_COND_PL)
+		      (pc)
+		      (label_ref (match_operand 0 "" ""))))]
+  ""
+  "brmi    %0"
+  [(set_attr "type" "branch")
+   (set (attr "length")
+	(cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
+		    (le (minus (pc) (match_dup 0)) (const_int 256)))
+	       (const_int 2)] ; use compact branch
+              (const_int 4))) ; use extended branch
+   (set_attr "cc" "none")])
+
+; branch if equal
+(define_insn "b<code>"
+  [(set (pc)
+	(if_then_else (any_cond:CC (cc0)
+			  (const_int 0))
+		      (label_ref (match_operand 0 "" ""))
+		      (pc)))]
+  ""
+  "br<cond>    %0 "
+  [(set_attr "type" "branch")
+   (set (attr "length")
+	(cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
+		    (le (minus (pc) (match_dup 0)) (const_int 256)))
+	       (const_int 2)] ; use compact branch
+              (const_int 4))) ; use extended branch
+   (set_attr "cc" "none")])
+
+
+(define_insn "*b<code>-reverse"
+  [(set (pc)
+	(if_then_else (any_cond:CC (cc0)
+			  (const_int 0))
+		      (pc)
+		      (label_ref (match_operand 0 "" ""))))]
+  ""
+  "br<invcond>    %0 "
+  [(set_attr "type" "branch")
+   (set (attr "length")
+	(cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
+		    (le (minus (pc) (match_dup 0)) (const_int 256)))
+	       (const_int 2)] ; use compact branch
+              (const_int 4))) ; use extended branch
+   (set_attr "cc" "none")])
+
+
+
+;=============================================================================
+; Conditional Add/Subtract
+;-----------------------------------------------------------------------------
+; sub{cond4}  Rd, imm
+;=============================================================================
+
+
+(define_expand "add<mode>cc"
+  [(set (match_operand:ADDCC 0 "register_operand" "")
+	(if_then_else:ADDCC (match_operand 1 "avr32_comparison_operator" "")
+                         (match_operand:ADDCC 2 "register_immediate_operand" "")
+                         (match_operand:ADDCC 3 "register_immediate_operand" "")))]
+  ""
+  {
+   if ( avr32_expand_addcc(<MODE>mode, operands ) )
+      DONE;
+   else
+      FAIL;
+  }
+  )
+
+
+(define_insn "add<ADDCC:mode>cc_cmp<CMP:mode>"
+  [(set (match_operand:ADDCC 0 "register_operand" "=&r")
+	(unspec:ADDCC [(match_operand 1 "avr32_comparison_operator" "")
+                       (match_operand:ADDCC 2 "register_operand" "0")
+                       (match_operand:ADDCC 3 "immediate_operand" "Ks08")
+                       (match_operand:CMP 4 "register_operand" "r")
+                       (match_operand:CMP 5 "<CMP:cmp_predicate>" "<CMP:cmp_constraint>")
+                       ]
+                      UNSPEC_ADDSICC ))]
+  ""
+  {
+   operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[4]), operands[4], operands[5]);
+
+   return "sub%1\t%0, %3";
+  }
+  [(set_attr "length" "8")
+   (set_attr "cc" "clobber")])
+
+
+;=============================================================================
+; Conditional Move
+;-----------------------------------------------------------------------------
+; mov{cond4}  Rd, (Rs/imm)
+;=============================================================================
+(define_expand "mov<mode>cc"
+  [(set (match_operand:ADDCC 0 "register_operand" "")
+	(if_then_else:ADDCC (match_operand 1 "avr32_comparison_operator" "")
+                            (match_operand:ADDCC 2 "register_immediate_operand" "")
+                            (match_operand:ADDCC 3 "register_immediate_operand" "")))]
+  ""
+  {
+   if ( avr32_expand_movcc(<MODE>mode, operands ) )
+      DONE;
+   else
+      FAIL;
+  }
+  )
+
+(define_insn "mov<MOVCC:mode>cc_cmp<CMP:mode>"
+  [(set (match_operand:MOVCC 0 "register_operand" "=r,r,r")
+	(unspec:MOVCC [(match_operand 1 "avr32_comparison_operator" "")
+                       (match_operand:MOVCC 2 "register_immediate_operand" "0,rKs08,rKs08")
+                       (match_operand:MOVCC 3 "register_immediate_operand" "rKs08,0,rKs08")
+                       (match_operand:CMP 4 "register_operand" "r, r, r")
+                       (match_operand:CMP 5 "<CMP:cmp_predicate>" "<CMP:cmp_constraint>, <CMP:cmp_constraint>, <CMP:cmp_constraint>")
+                       ]
+                      UNSPEC_MOVSICC ))]
+  ""
+  {
+   operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[4]), operands[4], operands[5]);
+
+   switch( which_alternative ){
+    case 0:
+      return "mov%i1    %0, %3";
+    case 1:
+      return "mov%1    %0, %2";
+    case 2:
+      return "mov%1    %0, %2\;mov%i1    %0, %3";
+    default:
+      abort();
+    }
+
+
+  }
+  [(set_attr "length" "8,8,12")
+   (set_attr "cc" "clobber")])
+
+
+;;=============================================================================
+;; jump
+;;-----------------------------------------------------------------------------
+;; Jump inside a function; an unconditional branch to a label.
+;;=============================================================================
+(define_insn "jump"
+  [(set (pc)
+	(label_ref (match_operand 0 "" "")))]
+  ""
+  {
+    if (get_attr_length(insn) > 4)
+      return "Can't jump this far";
+    return (get_attr_length(insn) == 2 ?
+	    "rjmp    %0" : "bral    %0");
+  }
+  [(set_attr "type" "branch")
+   (set (attr "length")
+	(cond [(and (le (minus (match_dup 0) (pc)) (const_int 1022))
+		    (le (minus (pc) (match_dup 0)) (const_int 1024)))
+	       (const_int 2) ; use rjmp
+	       (le (match_dup 0) (const_int 1048575))
+	       (const_int 4)] ; use bral
+	      (const_int 8))) ; do something else
+   (set_attr "cc" "none")])
+
+;;=============================================================================
+;; call
+;;-----------------------------------------------------------------------------
+;; Subroutine call instruction returning no value.
+;;=============================================================================
+(define_insn "call_internal"
+  [(parallel [(call (mem:SI (match_operand:SI 0 "avr32_call_operand" "r,U,T,W"))
+                    (match_operand 1 "" ""))
+              (clobber (reg:SI LR_REGNUM))])]
+  ""
+  {
+   switch (which_alternative){
+    case 0:
+      return "icall\t%0";
+    case 1:
+      return "rcall\t%0";
+    case 2:
+      return "mcall\t%0";
+    case 3:
+      if ( TARGET_HAS_ASM_ADDR_PSEUDOS )
+        return "call\t%0";
+      else
+        return "mcall\tr6[%0@got]";
+    default:
+      abort();
+   }
+  }
+  [(set_attr "type" "call")
+   (set_attr "length" "2,4,4,10")
+   (set_attr "cc" "clobber")])
+
+
+(define_expand "call"
+  [(parallel [(call (match_operand:SI 0 "" "")
+                    (match_operand 1 "" ""))
+              (clobber (reg:SI LR_REGNUM))])]
+  ""
+  {
+   rtx call_address;
+   if ( GET_CODE(operands[0]) != MEM )
+      FAIL;
+
+   call_address = XEXP(operands[0], 0);
+
+   /* If assembler supports call pseudo insn and the call
+      address is a symbol then nothing special needs to be done. */
+   if ( TARGET_HAS_ASM_ADDR_PSEUDOS
+        && (GET_CODE(call_address) == SYMBOL_REF) ){
+      /* We must however mark the function as using the GOT if
+         flag_pic is set, since the call insn might turn into
+         a mcall using the GOT ptr register. */
+      if ( flag_pic ){
+         current_function_uses_pic_offset_table = 1;
+         emit_call_insn(gen_call_internal(call_address, operands[1]));
+         DONE;
+      }
+   } else {
+     if ( flag_pic &&
+          GET_CODE(call_address) == SYMBOL_REF ){
+       current_function_uses_pic_offset_table = 1;
+       emit_call_insn(gen_call_internal(call_address, operands[1]));
+       DONE;
+     }
+
+     if ( !SYMBOL_REF_RCALL_FUNCTION_P(operands[0]) ){
+       if ( optimize_size &&
+             GET_CODE(call_address) == SYMBOL_REF ){
+         call_address = force_const_mem(SImode, call_address);
+       } else {
+         call_address = force_reg(SImode, call_address);
+       }
+     }
+   }
+   emit_call_insn(gen_call_internal(call_address, operands[1]));
+   DONE;
+  }
+)
+
+;;=============================================================================
+;; call_value
+;;-----------------------------------------------------------------------------
+;; Subrutine call instruction returning a value.
+;;=============================================================================
+(define_expand "call_value"
+   [(parallel [(set (match_operand:SI 0 "" "")
+                    (call (match_operand:SI 1 "" "")
+                          (match_operand 2 "" "")))
+               (clobber (reg:SI LR_REGNUM))])]
+   ""
+   {
+    rtx call_address;
+    if ( GET_CODE(operands[1]) != MEM )
+      FAIL;
+
+    call_address = XEXP(operands[1], 0);
+
+    /* If assembler supports call pseudo insn and the call
+       address is a symbol then nothing special needs to be done. */
+    if ( TARGET_HAS_ASM_ADDR_PSEUDOS
+         && (GET_CODE(call_address) == SYMBOL_REF) ){
+       /* We must however mark the function as using the GOT if
+          flag_pic is set, since the call insn might turn into
+          a mcall using the GOT ptr register. */
+       if ( flag_pic ) {
+          current_function_uses_pic_offset_table = 1;
+          emit_call_insn(gen_call_value_internal(operands[0], call_address, operands[2]));
+          DONE;
+       }
+    } else {
+      if ( flag_pic &&
+           GET_CODE(call_address) == SYMBOL_REF ){
+        current_function_uses_pic_offset_table = 1;
+        emit_call_insn(gen_call_value_internal(operands[0], call_address, operands[2]));
+        DONE;
+      }
+
+      if ( !SYMBOL_REF_RCALL_FUNCTION_P(operands[1]) ){
+        if ( optimize_size &&
+             GET_CODE(call_address) == SYMBOL_REF){
+          call_address = force_const_mem(SImode, call_address);
+        } else {
+          call_address = force_reg(SImode, call_address);
+        }
+      }
+    }
+    emit_call_insn(gen_call_value_internal(operands[0], call_address,
+                                           operands[2]));
+    DONE;
+
+   })
+
+(define_insn "call_value_internal"
+  [(parallel [(set (match_operand 0 "register_operand" "=r,r,r,r")
+                   (call (mem:SI (match_operand:SI 1 "avr32_call_operand" "r,U,T,W"))
+                         (match_operand 2 "" "")))
+              (clobber (reg:SI LR_REGNUM))])]
+  ;; Operand 2 not used on the AVR32.
+  ""
+  {
+   switch (which_alternative){
+    case 0:
+      return "icall\t%1";
+    case 1:
+      return "rcall\t%1";
+    case 2:
+      return "mcall\t%1";
+    case 3:
+      if ( TARGET_HAS_ASM_ADDR_PSEUDOS )
+        return "call\t%1";
+      else
+        return "mcall\tr6[%1@got]";
+    default:
+      abort();
+   }
+  }
+  [(set_attr "type" "call")
+   (set_attr "length" "2,4,4,10")
+   (set_attr "cc" "call_set")])
+
+
+;;=============================================================================
+;; untyped_call
+;;-----------------------------------------------------------------------------
+;; Subrutine call instruction returning a value of any type.
+;; The code is copied from m68k.md (except gen_blockage is removed)
+;; Fixme!
+;;=============================================================================
+(define_expand "untyped_call"
+  [(parallel [(call (match_operand 0 "avr32_call_operand" "")
+		    (const_int 0))
+	      (match_operand 1 "" "")
+	      (match_operand 2 "" "")])]
+  ""
+  {
+    int i;
+
+    emit_call_insn (GEN_CALL (operands[0], const0_rtx, NULL, const0_rtx));
+
+    for (i = 0; i < XVECLEN (operands[2], 0); i++) {
+      rtx set = XVECEXP (operands[2], 0, i);
+      emit_move_insn (SET_DEST (set), SET_SRC (set));
+    }
+
+    /* The optimizer does not know that the call sets the function value
+       registers we stored in the result block.  We avoid problems by
+       claiming that all hard registers are used and clobbered at this
+       point.  */
+    emit_insn (gen_blockage ());
+
+    DONE;
+  })
+
+
+;;=============================================================================
+;; return
+;;=============================================================================
+
+(define_insn "return"
+  [(return)]
+  "USE_RETURN_INSN (FALSE)"
+  {
+   avr32_output_return_instruction(TRUE, FALSE, NULL, NULL);
+   return "";
+  }
+  [(set_attr "length" "4")
+   (set_attr "type" "call")]
+  )
+
+(define_insn "*return_value_imm"
+  [(parallel [(set (reg RETVAL_REGNUM) (match_operand 0 "immediate_operand" "i"))
+              (use (reg RETVAL_REGNUM))
+              (return)])]
+  "USE_RETURN_INSN (FALSE) &&
+   ((INTVAL(operands[0]) == -1) || (INTVAL(operands[0]) == 0) || (INTVAL(operands[0]) == 1))"
+  {
+   avr32_output_return_instruction(TRUE, FALSE, NULL, operands[0]);
+   return "";
+  }
+  [(set_attr "length" "4")
+   (set_attr "type" "call")]
+  )
+
+(define_insn "*return_value_si"
+  [(set (reg RETVAL_REGNUM) (match_operand:SI 0 "register_operand" "r"))
+   (use (reg RETVAL_REGNUM))
+   (return)]
+  "USE_RETURN_INSN (TRUE)"
+  "retal    %0";
+  [(set_attr "type" "call")]
+  )
+
+(define_insn "*return_value_hi"
+  [(parallel [(set (reg RETVAL_REGNUM) (match_operand:HI 0 "register_operand" "r"))
+              (use (reg RETVAL_REGNUM))
+              (return)])]
+  "USE_RETURN_INSN (TRUE)"
+  "retal    %0"
+  [(set_attr "type" "call")]
+  )
+
+(define_insn "*return_value_qi"
+  [(parallel [(set (reg RETVAL_REGNUM) (match_operand:QI 0 "register_operand" "r"))
+              (use (reg RETVAL_REGNUM))
+              (return)])]
+  "USE_RETURN_INSN (TRUE)"
+  "retal    %0"
+  [(set_attr "type" "call")]
+  )
+
+;;=============================================================================
+;; nop
+;;-----------------------------------------------------------------------------
+;; No-op instruction.
+;;=============================================================================
+(define_insn "nop"
+  [(const_int 0)]
+  ""
+  "nop"
+  [(set_attr "length" "2")
+   (set_attr "type" "alu")
+   (set_attr "cc" "none")])
+
+;;=============================================================================
+;; nonlocal_goto
+;;-----------------------------------------------------------------------------
+;; Jump from one function to a label in an outer function.
+;; Must invalidate return stack, since the function will be exited without
+;; a return
+;;=============================================================================
+(define_expand "nonlocal_goto"
+  [(use (match_operand 0 "" ""))
+   (use (match_operand 1 "" ""))
+   (use (match_operand 2 "" ""))
+   (use (match_operand 3 "" ""))]
+  ""
+  {
+   emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__nonlocal_goto"),
+		     0, VOIDmode, 3,
+		     operands[0], SImode,
+		     operands[1], Pmode,
+                     operands[2], SImode);
+
+   DONE;
+   }
+)
+
+
+(define_expand "builtin_longjmp"
+  [(use (match_operand 0 "" ""))]
+  ""
+  {
+   rtx ops[3];
+
+   ops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS(SImode, operands[0], gen_rtx_CONST_INT(SImode,0)));
+   ops[1] = gen_rtx_MEM (Pmode, gen_rtx_PLUS(SImode, operands[0], gen_rtx_CONST_INT(SImode,4)));
+   ops[2] = gen_rtx_MEM (Pmode, gen_rtx_PLUS(SImode, operands[0], gen_rtx_CONST_INT(SImode,8)));
+
+
+   emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__nonlocal_goto"),
+		     0, VOIDmode, 3,
+		     ops[0], SImode,
+		     ops[1], Pmode,
+                     ops[2], SImode);
+
+   DONE;
+   }
+  )
+
+
+;;=============================================================================
+;; indirect_jump
+;;-----------------------------------------------------------------------------
+;; Jump to an address in reg or memory.
+;;=============================================================================
+(define_expand "indirect_jump"
+  [(set (pc)
+	(match_operand:SI 0 "general_operand" "r,m"))]
+  ""
+  {
+    /* One of the ops has to be in a register.  */
+    if ( (flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS )
+         && !avr32_legitimate_pic_operand_p(operands[0]) )
+      operands[0] = legitimize_pic_address (operands[0], SImode, 0);
+    else if ( flag_pic && avr32_address_operand(operands[0], GET_MODE(operands[0])) )
+      /* If we have an address operand then this function uses the pic register. */
+      current_function_uses_pic_offset_table = 1;
+  })
+
+
+(define_insn "indirect_jump_internal"
+  [(set (pc)
+	(match_operand:SI 0 "general_operand" "r,m,W"))]
+  ""
+  {
+    switch( which_alternative ){
+      case 0:
+        return "mov\tpc, %0";
+      case 1:
+        if ( avr32_const_pool_ref_operand(operands[0], GET_MODE(operands[0])) )
+          return "lddpc\tpc, %0";
+        else
+          return "ld.w\tpc, %0";
+      case 2:
+        if ( flag_pic )
+          return "ld.w\tpc, r6[%0@got]";
+        else
+          return "lda.w\tpc, %0";
+      default:
+	abort();
+    }
+   }
+  [(set_attr "length" "2,4,8")
+   (set_attr "type" "call,call,call")
+   (set_attr "cc" "none,none,clobber")])
+
+
+;;=============================================================================
+;; casesi
+;;=============================================================================
+
+
+(define_expand "casesi"
+  [(match_operand:SI 0 "register_operand" "")	; index to jump on
+   (match_operand:SI 1 "const_int_operand" "")	; lower bound
+   (match_operand:SI 2 "const_int_operand" "")	; total range
+   (match_operand:SI 3 "" "")			; table label
+   (match_operand:SI 4 "" "")]			; Out of range label
+  ""
+  "
+  {
+    rtx reg;
+    if (operands[1] != const0_rtx)
+      {
+        if (!avr32_const_ok_for_constraint_p(INTVAL (operands[1]), 'I', \"Is21\")){
+          reg = force_reg(SImode, GEN_INT (INTVAL (operands[1])));
+	  emit_insn (gen_subsi3 (reg, operands[0],
+			         reg));
+        } else {
+          reg = gen_reg_rtx (SImode);
+          emit_insn (gen_addsi3 (reg, operands[0],
+		         	 GEN_INT (-INTVAL (operands[1]))));
+        }
+	operands[0] = reg;
+      }
+
+    if (!avr32_const_ok_for_constraint_p(INTVAL (operands[2]), 'K', \"Ks21\"))
+      operands[2] = force_reg (SImode, operands[2]);
+
+    emit_jump_insn (gen_casesi_internal (operands[0], operands[2], operands[3],
+					 operands[4], gen_reg_rtx(SImode)));
+    DONE;
+  }"
+)
+
+;; The USE in this pattern is needed to tell flow analysis that this is
+;; a CASESI insn.  It has no other purpose.
+(define_insn "casesi_internal"
+  [(parallel [(set (pc)
+	       (if_then_else
+		(leu (match_operand:SI 0 "register_operand" "r")
+		     (match_operand:SI 1 "register_immediate_operand" "rKu03"))
+		(mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
+				 (label_ref (match_operand 2 "" ""))))
+		(label_ref (match_operand 3 "" ""))))
+              (clobber (match_operand:SI 4 "register_operand" "=r"))
+	      (use (label_ref (match_dup 2)))])]
+  ""
+  {
+    if (flag_pic)
+      return "cp.w\t%0, %1\;brhi\t%3\;sub\t%4, pc, -(%2 - .)\;add\tpc, %4, %0 << 2";
+    return   "cp.w\t%0, %1\;brhi\t%3\;sub\t%4, pc, -(%2 - .)\;ld.w\tpc, %4[%0 << 2]";
+  }
+  [(set_attr "cc" "clobber")
+   (set_attr "length" "16")]
+)
+
+
+(define_insn "prefetch"
+  [(prefetch (match_operand:SI 0 "register_operand" "r")
+	     (match_operand 1 "const_int_operand" "")
+	     (match_operand 2 "const_int_operand" ""))]
+  ""
+  {
+     return "pref\t%0[0]";
+  }
+
+  [(set_attr "length" "4")
+   (set_attr "type" "load")
+   (set_attr "cc" "none")])
+
+
+
+;;=============================================================================
+;; prologue
+;;-----------------------------------------------------------------------------
+;; This pattern, if defined, emits RTL for entry to a function. The function
+;; entry i responsible for setting up the stack frame, initializing the frame
+;; pointer register, saving callee saved registers, etc.
+;;=============================================================================
+(define_expand "prologue"
+  [(clobber (const_int 0))]
+  ""
+  "
+  avr32_expand_prologue();
+  DONE;
+  "
+  )
+
+;;=============================================================================
+;; eh_return
+;;-----------------------------------------------------------------------------
+;; This pattern, if defined, affects the way __builtin_eh_return, and
+;; thence the call frame exception handling library routines, are
+;; built. It is intended to handle non-trivial actions needed along
+;; the abnormal return path.
+;;
+;; The address of the exception handler to which the function should
+;; return is passed as operand to this pattern. It will normally need
+;; to copied by the pattern to some special register or memory
+;; location. If the pattern needs to determine the location of the
+;; target call frame in order to do so, it may use
+;; EH_RETURN_STACKADJ_RTX, if defined; it will have already been
+;; assigned.
+;;
+;; If this pattern is not defined, the default action will be to
+;; simply copy the return address to EH_RETURN_HANDLER_RTX. Either
+;; that macro or this pattern needs to be defined if call frame
+;; exception handling is to be used.
+(define_expand "eh_return"
+  [(use (match_operand 0 "general_operand" ""))]
+  ""
+  "
+  avr32_set_return_address (operands[0]);
+  DONE;
+  "
+  )
+
+;;=============================================================================
+;; ffssi2
+;;-----------------------------------------------------------------------------
+(define_insn "ffssi2"
+  [ (set (match_operand:SI 0 "register_operand" "=r")
+         (ffs:SI (match_operand:SI 1 "register_operand" "r"))) ]
+  ""
+  "mov    %0, %1
+   brev   %0
+   clz    %0, %0
+   sub    %0, -1
+   cp     %0, 33
+   moveq  %0, 0"
+  [(set_attr "length" "18")
+   (set_attr "cc" "clobber")]
+  )
+
+
+
+;;=============================================================================
+;; swap_h
+;;-----------------------------------------------------------------------------
+(define_insn "*swap_h"
+  [ (set (match_operand:SI 0 "register_operand" "=r")
+         (ior:SI (ashift:SI (match_dup 0) (const_int 16))
+                 (lshiftrt:SI (match_dup 0) (const_int 16))))]
+  ""
+  "swap.h    %0"
+  [(set_attr "length" "2")]
+  )
+
+(define_insn_and_split "bswap_16"
+  [ (set (match_operand:HI 0 "avr32_bswap_operand" "=r,RKs13,r")
+         (ior:HI (and:HI (lshiftrt:HI (match_operand:HI 1 "avr32_bswap_operand" "r,r,RKs13")
+                                      (const_int 8))
+                         (const_int 255))
+                 (ashift:HI (and:HI (match_dup 1)
+                                    (const_int 255))
+                            (const_int 8))))]
+  ""
+  {
+   switch ( which_alternative ){
+     case 0:
+       if ( REGNO(operands[0]) == REGNO(operands[1]))
+         return "swap.bh\t%0";
+       else
+         return "mov\t%0, %1\;swap.bh\t%0";
+     case 1:
+       return "stswp.h\t%0, %1";
+     case 2:
+       return "ldswp.sh\t%0, %1";
+     default:
+       abort();
+     }
+  }
+
+  "(reload_completed &&
+     REG_P(operands[0]) && REG_P(operands[1])
+     && (REGNO(operands[0]) != REGNO(operands[1])))"
+  [(set (match_dup 0) (match_dup 1))
+   (set (match_dup 0)
+        (ior:HI (and:HI (lshiftrt:HI (match_dup 0)
+                                     (const_int 8))
+                        (const_int 255))
+                (ashift:HI (and:HI (match_dup 0)
+                                   (const_int 255))
+                           (const_int 8))))]
+  ""
+
+  [(set_attr "length" "4,4,4")
+   (set_attr "type" "alu,store,load_rm")]
+  )
+
+(define_insn_and_split "bswap_32"
+  [ (set (match_operand:SI 0 "avr32_bswap_operand" "=r,RKs14,r")
+         (ior:SI (ior:SI (lshiftrt:SI (and:SI (match_operand:SI 1 "avr32_bswap_operand" "=r,r,RKs14")
+                                              (const_int 4278190080))
+                                      (const_int 24))
+                         (lshiftrt:SI (and:SI (match_dup 1)
+                                              (const_int 16711680))
+                                      (const_int 8)))
+                 (ior:SI (ashift:SI (and:SI (match_dup 1)
+                                            (const_int 65280))
+                                    (const_int 8))
+                         (ashift:SI (and:SI (match_dup 1)
+                                            (const_int 255))
+                                    (const_int 24)))))]
+  ""
+  {
+    switch ( which_alternative ){
+     case 0:
+       if ( REGNO(operands[0]) == REGNO(operands[1]))
+         return "swap.b\t%0";
+       else
+         return "mov\t%0, %1\;swap.b\t%0";
+     case 1:
+       return "stswp.w\t%0, %1";
+     case 2:
+       return "ldswp.w\t%0, %1";
+     default:
+       abort();
+    }
+  }
+  "(reload_completed &&
+    REG_P(operands[0]) && REG_P(operands[1])
+    && (REGNO(operands[0]) != REGNO(operands[1])))"
+  [(set (match_dup 0) (match_dup 1))
+   (set (match_dup 0)
+        (ior:SI (ior:SI (lshiftrt:SI (and:SI (match_dup 0)
+                                             (const_int 4278190080))
+                                     (const_int 24))
+                        (lshiftrt:SI (and:SI (match_dup 0)
+                                             (const_int 16711680))
+                                     (const_int 8)))
+                (ior:SI (ashift:SI (and:SI (match_dup 0)
+                                           (const_int 65280))
+                                   (const_int 8))
+                        (ashift:SI (and:SI (match_dup 0)
+                                           (const_int 255))
+                                   (const_int 24)))))]
+  ""
+
+  [(set_attr "length" "4,4,4")
+   (set_attr "type" "alu,store,load_rm")]
+  )
+
+
+;;=============================================================================
+;; blockage
+;;-----------------------------------------------------------------------------
+;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
+;; all of memory.  This blocks insns from being moved across this point.
+
+(define_insn "blockage"
+  [(unspec_volatile [(const_int 0)] VUNSPEC_BLOCKAGE)]
+  ""
+  ""
+  [(set_attr "length" "0")]
+)
+
+;;=============================================================================
+;; clzsi2
+;;-----------------------------------------------------------------------------
+(define_insn "clzsi2"
+  [ (set (match_operand:SI 0 "register_operand" "=r")
+         (clz:SI (match_operand:SI 1 "register_operand" "r"))) ]
+  ""
+  "clz    %0, %1"
+  [(set_attr "length" "4")
+   (set_attr "cc" "set_z")]
+  )
+
+;;=============================================================================
+;; ctzsi2
+;;-----------------------------------------------------------------------------
+(define_insn "ctzsi2"
+  [ (set (match_operand:SI 0 "register_operand" "=r,r")
+         (ctz:SI (match_operand:SI 1 "register_operand" "0,r"))) ]
+  ""
+  "@
+   brev\t%0\;clz\t%0, %0
+   mov\t%0, %1\;brev\t%0\;clz\t%0, %0"
+  [(set_attr "length" "8")
+   (set_attr "cc" "set_z")]
+  )
+
+;;=============================================================================
+;; cache instructions
+;;-----------------------------------------------------------------------------
+(define_insn "cache"
+  [ (unspec_volatile [(match_operand:SI 0 "register_operand" "r")
+                      (match_operand:SI 1 "immediate_operand" "Ku05")] VUNSPEC_CACHE)]
+  ""
+  "cache    %0[0], %1"
+  [(set_attr "length" "4")]
+  )
+
+(define_insn "sync"
+  [ (unspec_volatile [(match_operand:SI 0 "immediate_operand" "Ku08")] VUNSPEC_SYNC)]
+  ""
+  "sync    %0"
+  [(set_attr "length" "4")]
+  )
+
+;;=============================================================================
+;; TLB instructions
+;;-----------------------------------------------------------------------------
+(define_insn "tlbr"
+  [ (unspec_volatile [(const_int 0)] VUNSPEC_TLBR)]
+  ""
+  "tlbr"
+  [(set_attr "length" "2")]
+  )
+
+(define_insn "tlbw"
+  [ (unspec_volatile [(const_int 0)] VUNSPEC_TLBW)]
+  ""
+  "tlbw"
+  [(set_attr "length" "2")]
+  )
+
+(define_insn "tlbs"
+  [ (unspec_volatile [(const_int 0)] VUNSPEC_TLBS)]
+  ""
+  "tlbs"
+  [(set_attr "length" "2")]
+  )
+
+;;=============================================================================
+;; Breakpoint instruction
+;;-----------------------------------------------------------------------------
+(define_insn "breakpoint"
+  [ (unspec_volatile [(const_int 0)] VUNSPEC_BREAKPOINT)]
+  ""
+  "breakpoint"
+  [(set_attr "length" "2")]
+  )
+
+;;=============================================================================
+;; Xchg instruction
+;;-----------------------------------------------------------------------------
+(define_insn "xchg"
+  [ (parallel [(set (match_operand:SI 0 "register_operand" "=&r")
+                    (mem:SI (match_operand:SI 1 "register_operand" "r")))
+               (set (mem:SI (match_operand:SI 2 "register_operand" "=1"))
+                    (match_operand:SI 3 "register_operand" "r"))])]
+  ""
+  "xchg\t%0, %1, %3"
+  [(set_attr "length" "4")]
+  )
+
+;;=============================================================================
+;; mtsr/mfsr instruction
+;;-----------------------------------------------------------------------------
+(define_insn "mtsr"
+  [ (unspec_volatile [(match_operand 0 "immediate_operand" "i")
+                      (match_operand:SI 1 "register_operand" "r")] VUNSPEC_MTSR)]
+  ""
+  "mtsr\t%0, %1"
+  [(set_attr "length" "4")]
+  )
+
+(define_insn "mfsr"
+  [ (set (match_operand:SI 0 "register_operand" "=r")
+         (unspec_volatile:SI [(match_operand 1 "immediate_operand" "i")] VUNSPEC_MFSR)) ]
+  ""
+  "mfsr\t%0, %1"
+  [(set_attr "length" "4")]
+  )
+
+;;=============================================================================
+;; mtdr/mfdr instruction
+;;-----------------------------------------------------------------------------
+(define_insn "mtdr"
+  [ (unspec_volatile [(match_operand 0 "immediate_operand" "i")
+                      (match_operand:SI 1 "register_operand" "r")] VUNSPEC_MTDR)]
+  ""
+  "mtdr\t%0, %1"
+  [(set_attr "length" "4")]
+  )
+
+(define_insn "mfdr"
+  [ (set (match_operand:SI 0 "register_operand" "=r")
+         (unspec_volatile:SI [(match_operand 1 "immediate_operand" "i")] VUNSPEC_MFDR)) ]
+  ""
+  "mfdr\t%0, %1"
+  [(set_attr "length" "4")]
+  )
+
+;;=============================================================================
+;; musfr
+;;-----------------------------------------------------------------------------
+(define_insn "musfr"
+  [ (unspec_volatile [(match_operand:SI 0 "register_operand" "r")] VUNSPEC_MUSFR)]
+  ""
+  "musfr\t%0"
+  [(set_attr "length" "2")
+   (set_attr "cc" "clobber")]
+  )
+
+(define_insn "mustr"
+  [ (set (match_operand:SI 0 "register_operand" "=r")
+         (unspec_volatile:SI [(const_int 0)] VUNSPEC_MUSTR)) ]
+  ""
+  "mustr\t%0"
+  [(set_attr "length" "2")]
+  )
+
+;;=============================================================================
+;; Saturation Round Scale instruction
+;;-----------------------------------------------------------------------------
+(define_insn "sats"
+  [ (set (match_operand:SI 0 "register_operand" "+r")
+         (unspec:SI [(match_dup 0)
+                     (match_operand 1 "immediate_operand" "Ku05")
+                     (match_operand 2 "immediate_operand" "Ku05")]
+                    UNSPEC_SATS)) ]
+  "TARGET_DSP"
+  "sats\t%0 >> %1, %2"
+  [(set_attr "type" "alu_sat")
+   (set_attr "length" "4")]
+  )
+
+(define_insn "satu"
+  [ (set (match_operand:SI 0 "register_operand" "+r")
+         (unspec:SI [(match_dup 0)
+                     (match_operand 1 "immediate_operand" "Ku05")
+                     (match_operand 2 "immediate_operand" "Ku05")]
+                    UNSPEC_SATU)) ]
+  "TARGET_DSP"
+  "satu\t%0 >> %1, %2"
+  [(set_attr "type" "alu_sat")
+   (set_attr "length" "4")]
+  )
+
+(define_insn "satrnds"
+  [ (set (match_operand:SI 0 "register_operand" "+r")
+         (unspec:SI [(match_dup 0)
+                     (match_operand 1 "immediate_operand" "Ku05")
+                     (match_operand 2 "immediate_operand" "Ku05")]
+                    UNSPEC_SATRNDS)) ]
+  "TARGET_DSP"
+  "satrnds\t%0 >> %1, %2"
+  [(set_attr "type" "alu_sat")
+   (set_attr "length" "4")]
+  )
+
+(define_insn "satrndu"
+  [ (set (match_operand:SI 0 "register_operand" "+r")
+         (unspec:SI [(match_dup 0)
+                     (match_operand 1 "immediate_operand" "Ku05")
+                     (match_operand 2 "immediate_operand" "Ku05")]
+                    UNSPEC_SATRNDU)) ]
+  "TARGET_DSP"
+  "sats\t%0 >> %1, %2"
+  [(set_attr "type" "alu_sat")
+   (set_attr "length" "4")]
+  )
+
+;; Special patterns for dealing with the constant pool
+
+(define_insn "align_4"
+  [(unspec_volatile [(const_int 0)] VUNSPEC_ALIGN)]
+  ""
+  {
+   assemble_align (32);
+   return "";
+  }
+  [(set_attr "length" "2")]
+)
+
+(define_insn "consttable_start"
+  [(unspec_volatile [(const_int 0)] VUNSPEC_POOL_START)]
+  ""
+  {
+   return ".cpool";
+  }
+  [(set_attr "length" "0")]
+  )
+
+(define_insn "consttable_end"
+  [(unspec_volatile [(const_int 0)] VUNSPEC_POOL_END)]
+  ""
+  {
+   making_const_table = FALSE;
+   return "";
+  }
+  [(set_attr "length" "0")]
+)
+
+
+(define_insn "consttable_4"
+  [(unspec_volatile [(match_operand 0 "" "")] VUNSPEC_POOL_4)]
+  ""
+  {
+    making_const_table = TRUE;
+    switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+      {
+      case MODE_FLOAT:
+      {
+        REAL_VALUE_TYPE r;
+        char real_string[1024];
+        REAL_VALUE_FROM_CONST_DOUBLE (r, operands[0]);
+        real_to_decimal(real_string, &r, 1024, 0, 1);
+        asm_fprintf (asm_out_file, "\t.float\t%s\n", real_string);
+        break;
+      }
+      default:
+        assemble_integer (operands[0], 4, 0, 1);
+        break;
+      }
+    return "";
+  }
+  [(set_attr "length" "4")]
+)
+
+(define_insn "consttable_8"
+  [(unspec_volatile [(match_operand 0 "" "")] VUNSPEC_POOL_8)]
+  ""
+  {
+    making_const_table = TRUE;
+    switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+      {
+       case MODE_FLOAT:
+        {
+         REAL_VALUE_TYPE r; 
+         char real_string[1024];
+         REAL_VALUE_FROM_CONST_DOUBLE (r, operands[0]);
+         real_to_decimal(real_string, &r, 1024, 0, 1);
+         asm_fprintf (asm_out_file, "\t.double\t%s\n", real_string);
+         break;
+        }
+       default:
+         assemble_integer(operands[0], 8, 0, 1);
+        break;
+     }
+    return "";
+  }
+  [(set_attr "length" "8")]
+)
+
+;;=============================================================================
+;; coprocessor instructions
+;;-----------------------------------------------------------------------------
+(define_insn "cop"
+  [ (unspec_volatile [(match_operand 0 "immediate_operand" "Ku03")
+                      (match_operand 1 "immediate_operand" "Ku04")
+                      (match_operand 2 "immediate_operand" "Ku04")
+                      (match_operand 3 "immediate_operand" "Ku04")
+                      (match_operand 4 "immediate_operand" "Ku07")] VUNSPEC_COP)]
+  ""
+  "cop\tcp%0, cr%1, cr%2, cr%3, %4"
+  [(set_attr "length" "4")]
+  )
+
+(define_insn "mvcrsi"
+  [ (set (match_operand:SI 0 "avr32_cop_move_operand" "=r,<,Z")
+         (unspec_volatile:SI [(match_operand 1 "immediate_operand" "Ku03,Ku03,Ku03")
+                              (match_operand 2 "immediate_operand" "Ku04,Ku04,Ku04")]
+                             VUNSPEC_MVCR)) ]
+  ""
+  "@
+   mvcr.w\tcp%1, %0, cr%2
+   stcm.w\tcp%1, %0, cr%2
+   stc.w\tcp%1, %0, cr%2"
+  [(set_attr "length" "4")]
+  )
+
+(define_insn "mvcrdi"
+  [ (set (match_operand:DI 0 "avr32_cop_move_operand" "=r,<,Z")
+         (unspec_volatile:DI [(match_operand 1 "immediate_operand" "Ku03,Ku03,Ku03")
+                              (match_operand 2 "immediate_operand" "Ku04,Ku04,Ku04")]
+                             VUNSPEC_MVCR)) ]
+  ""
+  "@
+   mvcr.d\tcp%1, %0, cr%2
+   stcm.d\tcp%1, %0, cr%2-cr%i2
+   stc.d\tcp%1, %0, cr%2"
+  [(set_attr "length" "4")]
+  )
+
+(define_insn "mvrcsi"
+  [ (unspec_volatile:SI [(match_operand 0 "immediate_operand" "Ku03,Ku03,Ku03")
+                         (match_operand 1 "immediate_operand" "Ku04,Ku04,Ku04")
+                         (match_operand:SI 2 "avr32_cop_move_operand" "r,>,Z")]
+                        VUNSPEC_MVRC)]
+  ""
+  {
+   switch (which_alternative){
+    case 0:
+      return "mvrc.w\tcp%0, cr%1, %2";
+    case 1:
+      return "ldcm.w\tcp%0, %2, cr%1";
+    case 2:
+      return "ldc.w\tcp%0, cr%1, %2";
+    default:
+      abort();
+   }
+  }
+  [(set_attr "length" "4")]
+  )
+
+(define_insn "mvrcdi"
+  [ (unspec_volatile:DI [(match_operand 0 "immediate_operand" "Ku03,Ku03,Ku03")
+                         (match_operand 1 "immediate_operand" "Ku04,Ku04,Ku04")
+                         (match_operand:DI 2 "avr32_cop_move_operand" "r,>,Z")]
+                        VUNSPEC_MVRC)]
+  ""
+  {
+   switch (which_alternative){
+    case 0:
+      return "mvrc.d\tcp%0, cr%1, %2";
+    case 1:
+      return "ldcm.d\tcp%0, %2, cr%1-cr%i1";
+    case 2:
+      return "ldc.d\tcp%0, cr%1, %2";
+    default:
+      abort();
+   }
+  }
+  [(set_attr "length" "4")]
+  )
+
+;;=============================================================================
+;; epilogue
+;;-----------------------------------------------------------------------------
+;; This pattern emits RTL for exit from a function. The function exit is
+;; responsible for deallocating the stack frame, restoring callee saved
+;; registers and emitting the return instruction.
+;; ToDo: using TARGET_ASM_FUNCTION_PROLOGUE instead.
+;;=============================================================================
+(define_expand "epilogue"
+  [(unspec_volatile [(return)] VUNSPEC_EPILOGUE)]
+  ""
+  "
+  if (USE_RETURN_INSN (FALSE)){
+      emit_jump_insn (gen_return ());
+      DONE;
+  }
+  emit_jump_insn (gen_rtx_UNSPEC_VOLATILE (VOIDmode,
+	gen_rtvec (1,
+		gen_rtx_RETURN (VOIDmode)),
+	VUNSPEC_EPILOGUE));
+  DONE;
+  "
+  )
+
+(define_insn "*epilogue_insns"
+  [(unspec_volatile [(return)] VUNSPEC_EPILOGUE)]
+  ""
+  {
+    avr32_output_return_instruction (FALSE, FALSE, NULL, NULL);
+    return "";
+  }
+  ; Length is absolute worst case
+  [(set_attr "type" "branch")
+   (set_attr "length" "12")]
+  )
+
+(define_insn "*epilogue_insns_ret_imm"
+  [(parallel [(set (reg RETVAL_REGNUM) (match_operand 0 "immediate_operand" "i"))
+              (use (reg RETVAL_REGNUM))
+              (unspec_volatile [(return)] VUNSPEC_EPILOGUE)])]
+  "((INTVAL(operands[0]) == -1) || (INTVAL(operands[0]) == 0) || (INTVAL(operands[0]) == 1))"
+  {
+    avr32_output_return_instruction (FALSE, FALSE, NULL, operands[0]);
+    return "";
+  }
+  ; Length is absolute worst case
+  [(set_attr "type" "branch")
+   (set_attr "length" "12")]
+  )
+
+(define_insn "sibcall_epilogue"
+  [(unspec_volatile [(const_int 0)] VUNSPEC_EPILOGUE)]
+  ""
+  {
+   avr32_output_return_instruction (FALSE, FALSE,  NULL, NULL);
+   return "";
+  }
+;; Length is absolute worst case
+  [(set_attr "type" "branch")
+   (set_attr "length" "12")]
+  )
+
+(define_insn "*sibcall_epilogue_insns_ret_imm"
+  [(parallel [(set (reg RETVAL_REGNUM) (match_operand 0 "immediate_operand" "i"))
+              (use (reg RETVAL_REGNUM))
+              (unspec_volatile [(const_int 0)] VUNSPEC_EPILOGUE)])]
+  "((INTVAL(operands[0]) == -1) || (INTVAL(operands[0]) == 0) || (INTVAL(operands[0]) == 1))"
+  {
+    avr32_output_return_instruction (FALSE, FALSE, NULL, operands[0]);
+    return "";
+  }
+  ; Length is absolute worst case
+  [(set_attr "type" "branch")
+   (set_attr "length" "12")]
+  )
+
+(define_insn "ldxi"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+	(mem:SI (plus:SI
+                 (match_operand:SI 1 "register_operand" "r")
+                 (mult:SI (zero_extract:SI (match_operand:SI 2 "register_operand" "r")
+                                           (const_int 8)
+                                           (match_operand:SI 3 "immediate_operand" "Ku05"))
+                          (const_int 4)))))]
+  "(INTVAL(operands[3]) == 24 || INTVAL(operands[3]) == 16 || INTVAL(operands[3]) == 8
+   || INTVAL(operands[3]) == 0)"
+  {
+   switch ( INTVAL(operands[3]) ){
+    case 0:
+         return "ld.w    %0, %1[%2:b << 2]";
+    case 8:
+         return "ld.w    %0, %1[%2:l << 2]";
+    case 16:
+         return "ld.w    %0, %1[%2:u << 2]";
+    case 24:
+         return "ld.w    %0, %1[%2:t << 2]";
+    default:
+         internal_error("illegal operand for ldxi");
+   }
+  }
+  [(set_attr "type" "load")
+   (set_attr "length" "4")
+   (set_attr "cc" "none")])
+
+
+
+
+
+
+;;=============================================================================
+;; Peephole optimizing
+;;-----------------------------------------------------------------------------
+;; Changing
+;;   sub     r8, r7, 8
+;;   st.w    r8[0x0], r12
+;; to
+;;   sub     r8, r7, 8
+;;   st.w    r7[-0x8], r12
+;;=============================================================================
+; (set (reg:SI 9 r8)
+;      (plus:SI (reg/f:SI 6 r7)
+;               (const_int ...)))
+; (set (mem:SI (reg:SI 9 r8))
+;      (reg:SI 12 r12))
+(define_peephole2
+  [(set (match_operand:SI 0 "register_operand" "")
+	(plus:SI (match_operand:SI 1 "register_operand" "")
+		 (match_operand:SI 2 "immediate_operand" "")))
+   (set (mem:SI (match_dup 0))
+	(match_operand:SI 3 "register_operand" ""))]
+  "REGNO(operands[0]) != REGNO(operands[1])"
+  [(set (match_dup 0)
+	(plus:SI (match_dup 1)
+		 (match_dup 2)))
+   (set (mem:SI (plus:SI (match_dup 1)
+			 (match_dup 2)))
+	(match_dup 3))]
+  "")
+
+;;=============================================================================
+;; Peephole optimizing
+;;-----------------------------------------------------------------------------
+;; Changing
+;;   sub     r6, r7, 4
+;;   ld.w    r6, r6[0x0]
+;; to
+;;   sub     r6, r7, 4
+;;   ld.w    r6, r7[-0x4]
+;;=============================================================================
+; (set (reg:SI 7 r6)
+;      (plus:SI (reg/f:SI 6 r7)
+;               (const_int -4 [0xfffffffc])))
+; (set (reg:SI 7 r6)
+;      (mem:SI (reg:SI 7 r6)))
+(define_peephole2
+  [(set (match_operand:SI 0 "register_operand" "")
+	(plus:SI (match_operand:SI 1 "register_operand" "")
+		 (match_operand:SI 2 "immediate_operand" "")))
+   (set (match_operand:SI 3 "register_operand" "")
+	(mem:SI (match_dup 0)))]
+  "REGNO(operands[0]) != REGNO(operands[1])"
+  [(set (match_dup 0)
+	(plus:SI (match_dup 1)
+		 (match_dup 2)))
+   (set (match_dup 3)
+	(mem:SI (plus:SI (match_dup 1)
+			 (match_dup 2))))]
+  "")
+
+;;=============================================================================
+;; Peephole optimizing
+;;-----------------------------------------------------------------------------
+;; Changing
+;;   ld.sb   r0, r7[-0x6]
+;;   cashs.b r0
+;; to
+;;   ld.sb   r0, r7[-0x6]
+;;=============================================================================
+(define_peephole2
+  [(set (match_operand:QI 0 "register_operand" "")
+	(match_operand:QI 1 "load_sb_memory_operand" ""))
+   (set (match_operand:SI 2 "register_operand" "")
+	(sign_extend:SI (match_dup 0)))]
+  "(REGNO(operands[0]) == REGNO(operands[2]) || peep2_reg_dead_p(2, operands[0]))"
+  [(set (match_dup 2)
+	(sign_extend:SI (match_dup 1)))]
+  "")
+
+;;=============================================================================
+;; Peephole optimizing
+;;-----------------------------------------------------------------------------
+;; Changing
+;;   ld.ub   r0, r7[-0x6]
+;;   cashu.b r0
+;; to
+;;   ld.ub   r0, r7[-0x6]
+;;=============================================================================
+(define_peephole2
+  [(set (match_operand:QI 0 "register_operand" "")
+	(match_operand:QI 1 "memory_operand" ""))
+   (set (match_operand:SI 2 "register_operand" "")
+	(zero_extend:SI (match_dup 0)))]
+  "(REGNO(operands[0]) == REGNO(operands[2])) || peep2_reg_dead_p(2, operands[0])"
+  [(set (match_dup 2)
+	(zero_extend:SI (match_dup 1)))]
+  "")
+
+;;=============================================================================
+;; Peephole optimizing
+;;-----------------------------------------------------------------------------
+;; Changing
+;;   ld.sh   r0, r7[-0x6]
+;;   casts.h r0
+;; to
+;;   ld.sh   r0, r7[-0x6]
+;;=============================================================================
+(define_peephole2
+  [(set (match_operand:HI 0 "register_operand" "")
+	(match_operand:HI 1 "memory_operand" ""))
+   (set (match_operand:SI 2 "register_operand" "")
+	(sign_extend:SI (match_dup 0)))]
+  "(REGNO(operands[0]) == REGNO(operands[2])) || peep2_reg_dead_p(2, operands[0])"
+  [(set (match_dup 2)
+	(sign_extend:SI (match_dup 1)))]
+  "")
+
+;;=============================================================================
+;; Peephole optimizing
+;;-----------------------------------------------------------------------------
+;; Changing
+;;   ld.uh   r0, r7[-0x6]
+;;   castu.h r0
+;; to
+;;   ld.uh   r0, r7[-0x6]
+;;=============================================================================
+(define_peephole2
+  [(set (match_operand:HI 0 "register_operand" "")
+	(match_operand:HI 1 "memory_operand" ""))
+   (set (match_operand:SI 2 "register_operand" "")
+	(zero_extend:SI (match_dup 0)))]
+  "(REGNO(operands[0]) == REGNO(operands[2])) || peep2_reg_dead_p(2, operands[0])"
+  [(set (match_dup 2)
+	(zero_extend:SI (match_dup 1)))]
+  "")
+
+;;=============================================================================
+;; Peephole optimizing
+;;-----------------------------------------------------------------------------
+;; Changing
+;;   mul     rd, rx, ry
+;;   add     rd2, rd
+;; to
+;;   mac     rd2, rx, ry
+;;=============================================================================
+(define_peephole2
+  [(set (match_operand:SI 0 "register_operand" "")
+        (mult:SI (match_operand:SI 1 "register_operand" "")
+                (match_operand:SI 2 "register_operand" "")))
+   (set (match_operand:SI 3 "register_operand" "")
+        (plus:SI (match_dup 3)
+                 (match_dup 0)))]
+  "peep2_reg_dead_p(2, operands[0])"
+  [(set (match_dup 3)
+	(plus:SI (mult:SI (match_dup 1)
+			  (match_dup 2))
+		 (match_dup 3)))]
+  "")
+
+
+
+;;=============================================================================
+;; Peephole optimizing
+;;-----------------------------------------------------------------------------
+;; Changing
+;;   bfextu  rd, rs, k5, 1 or and(h/l) rd, one_bit_set_mask
+;; to
+;;   bld     rs, k5
+;;
+;; If rd is dead after the operation.
+;;=============================================================================
+(define_peephole2
+  [ (set (match_operand:SI 0 "register_operand" "")
+         (zero_extract:SI (match_operand:SI 1 "register_operand" "")
+                          (const_int 1)
+                          (match_operand:SI 2 "immediate_operand" "")))
+    (set (cc0)
+         (match_dup 0))]
+  "peep2_reg_dead_p(2, operands[0])"
+  [(set (cc0)
+        (and:SI (match_dup 1)
+                (match_dup 2)))]
+  "operands[2] = GEN_INT(1 << INTVAL(operands[2]));")
+
+(define_peephole2
+  [ (set (match_operand:SI 0 "register_operand" "")
+         (and:SI (match_operand:SI 1 "register_operand" "")
+                 (match_operand:SI 2 "one_bit_set_operand" "")))
+    (set (cc0)
+         (match_dup 0))]
+  "peep2_reg_dead_p(2, operands[0])"
+  [(set (cc0)
+        (and:SI (match_dup 1)
+                (match_dup 2)))]
+  "")
+
+;;=============================================================================
+;; Peephole optimizing
+;;-----------------------------------------------------------------------------
+;; Load with extracted index: ld.w  Rd, Rb[Ri:{t/u/b/l} << 2]
+;;
+;;=============================================================================
+
+
+(define_peephole
+  [(set (match_operand:SI 0 "register_operand" "")
+        (zero_extract:SI (match_operand:SI 1 "register_operand" "")
+                         (const_int 8)
+                         (match_operand:SI 2 "avr32_extract_shift_operand" "")))
+   (set (match_operand:SI 3 "register_operand" "")
+        (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
+                         (match_operand:SI 4 "register_operand" ""))))]
+
+  "(dead_or_set_p(insn, operands[0]))"
+  {
+   switch ( INTVAL(operands[2]) ){
+    case 0:
+         return "ld.w    %3, %4[%1:b << 2]";
+    case 8:
+         return "ld.w    %3, %4[%1:l << 2]";
+    case 16:
+         return "ld.w    %3, %4[%1:u << 2]";
+    case 24:
+         return "ld.w    %3, %4[%1:t << 2]";
+    default:
+         internal_error("illegal operand for ldxi");
+   }
+  }
+  [(set_attr "type" "load")
+   (set_attr "length" "4")
+   (set_attr "cc" "clobber")]
+  )
+
+
+
+(define_peephole
+  [(set (match_operand:SI 0 "register_operand" "")
+        (and:SI (match_operand:SI 1 "register_operand" "") (const_int 255)))
+   (set (match_operand:SI 2 "register_operand" "")
+        (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
+                         (match_operand:SI 3 "register_operand" ""))))]
+
+  "(dead_or_set_p(insn, operands[0]))"
+
+  "ld.w    %2, %3[%1:b << 2]"
+  [(set_attr "type" "load")
+   (set_attr "length" "4")
+   (set_attr "cc" "clobber")]
+  )
+
+
+(define_peephole2
+  [(set (match_operand:SI 0 "register_operand" "")
+        (zero_extract:SI (match_operand:SI 1 "register_operand" "")
+                         (const_int 8)
+                         (match_operand:SI 2 "avr32_extract_shift_operand" "")))
+   (set (match_operand:SI 3 "register_operand" "")
+        (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
+                         (match_operand:SI 4 "register_operand" ""))))]
+
+  "(peep2_reg_dead_p(2, operands[0]))
+   || (REGNO(operands[0]) == REGNO(operands[3]))"
+  [(set (match_dup 3)
+	(mem:SI (plus:SI
+                 (match_dup 4)
+                 (mult:SI (zero_extract:SI (match_dup 1)
+                                           (const_int 8)
+                                           (match_dup 2))
+                          (const_int 4)))))]
+  )
+
+(define_peephole2
+  [(set (match_operand:SI 0 "register_operand" "")
+        (zero_extend:SI (match_operand:QI 1 "register_operand" "")))
+   (set (match_operand:SI 2 "register_operand" "")
+        (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
+                         (match_operand:SI 3 "register_operand" ""))))]
+
+  "(peep2_reg_dead_p(2, operands[0]))
+   || (REGNO(operands[0]) == REGNO(operands[2]))"
+  [(set (match_dup 2)
+	(mem:SI (plus:SI
+                 (match_dup 3)
+                 (mult:SI (zero_extract:SI (match_dup 1)
+                                           (const_int 8)
+                                           (const_int 0))
+                          (const_int 4)))))]
+  "operands[1] = gen_rtx_REG(SImode, REGNO(operands[1]));"
+  )
+
+
+(define_peephole2
+  [(set (match_operand:SI 0 "register_operand" "")
+        (and:SI (match_operand:SI 1 "register_operand" "")
+                (const_int 255)))
+   (set (match_operand:SI 2 "register_operand" "")
+        (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
+                         (match_operand:SI 3 "register_operand" ""))))]
+
+  "(peep2_reg_dead_p(2, operands[0]))
+   || (REGNO(operands[0]) == REGNO(operands[2]))"
+  [(set (match_dup 2)
+	(mem:SI (plus:SI
+                 (match_dup 3)
+                 (mult:SI (zero_extract:SI (match_dup 1)
+                                           (const_int 8)
+                                           (const_int 0))
+                          (const_int 4)))))]
+  ""
+  )
+
+
+
+(define_peephole2
+  [(set (match_operand:SI 0 "register_operand" "")
+        (lshiftrt:SI (match_operand:SI 1 "register_operand" "")
+                     (const_int 24)))
+   (set (match_operand:SI 2 "register_operand" "")
+        (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
+                         (match_operand:SI 3 "register_operand" ""))))]
+
+  "(peep2_reg_dead_p(2, operands[0]))
+   || (REGNO(operands[0]) == REGNO(operands[2]))"
+  [(set (match_dup 2)
+	(mem:SI (plus:SI
+                 (match_dup 3)
+                 (mult:SI (zero_extract:SI (match_dup 1)
+                                           (const_int 8)
+                                           (const_int 24))
+                          (const_int 4)))))]
+  ""
+  )
+
+
+;;************************************************
+;; ANDN
+;;
+;;************************************************
+
+
+(define_peephole2
+  [(set (match_operand:SI 0 "register_operand" "")
+        (not:SI (match_operand:SI 1 "register_operand" "")))
+   (set (match_operand:SI 2 "register_operand" "")
+        (and:SI (match_dup 2)
+                (match_dup 0)))]
+  "peep2_reg_dead_p(2, operands[0])"
+
+  [(set (match_dup 2)
+        (and:SI  (match_dup 2)
+                 (not:SI (match_dup 1))
+                 ))]
+  ""
+)
+
+(define_peephole2
+  [(set (match_operand:SI 0 "register_operand" "")
+        (not:SI (match_operand:SI 1 "register_operand" "")))
+   (set (match_operand:SI 2 "register_operand" "")
+        (and:SI (match_dup 0)
+                (match_dup 2)
+                ))]
+  "peep2_reg_dead_p(2, operands[0])"
+
+  [(set (match_dup 2)
+        (and:SI  (match_dup 2)
+                 (not:SI (match_dup 1))
+                 ))]
+
+  ""
+)
+
+
+;;=================================================================
+;; Addabs peephole
+;;=================================================================
+
+(define_peephole
+  [(set (match_operand:SI 2 "register_operand" "=r")
+ 	(abs:SI (match_operand:SI 1 "register_operand" "r")))
+   (set (match_operand:SI 0 "register_operand" "=r")
+ 	(plus:SI (match_operand:SI 3 "register_operand" "r")
+ 		 (match_dup 2)))]
+  "dead_or_set_p(insn, operands[2])"
+  "addabs  %0, %3, %1"
+  [(set_attr "length" "4")
+   (set_attr "cc" "set_z")])
+
+(define_peephole
+  [(set (match_operand:SI 2 "register_operand" "=r")
+ 	(abs:SI (match_operand:SI 1 "register_operand" "r")))
+   (set (match_operand:SI 0 "register_operand" "=r")
+ 	(plus:SI (match_dup 2)
+                 (match_operand:SI 3 "register_operand" "r")))]
+  "dead_or_set_p(insn, operands[2])"
+  "addabs  %0, %3, %1"
+  [(set_attr "length" "4")
+   (set_attr "cc" "set_z")])
+
+
+;;=================================================================
+;; Detect roundings
+;;=================================================================
+
+(define_insn "*round"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+        (ashiftrt:SI (plus:SI (match_operand:SI 1 "register_operand" "0")
+                              (match_operand:SI 2 "immediate_operand" "i"))
+                     (match_operand:SI 3 "immediate_operand" "i")))]
+  "avr32_rnd_operands(operands[2], operands[3])"
+
+  "satrnds    %0 >> %3, 31"
+
+  [(set_attr "type" "alu_sat")
+   (set_attr "length" "4")]
+
+  )
+
+
+(define_peephole2
+  [(set (match_operand:SI 0 "register_operand" "")
+	(plus:SI (match_dup 0)
+                 (match_operand:SI 1 "immediate_operand" "")))
+   (set (match_dup 0)
+	(ashiftrt:SI (match_dup 0)
+                     (match_operand:SI 2 "immediate_operand" "")))]
+  "avr32_rnd_operands(operands[1], operands[2])"
+
+  [(set (match_dup 0)
+        (ashiftrt:SI (plus:SI (match_dup 0)
+                              (match_dup 1))
+                     (match_dup 2)))]
+  )
+
+(define_peephole
+  [(set (match_operand:SI 0 "register_operand" "r")
+	(plus:SI (match_dup 0)
+                 (match_operand:SI 1 "immediate_operand" "i")))
+   (set (match_dup 0)
+	(ashiftrt:SI (match_dup 0)
+                     (match_operand:SI 2 "immediate_operand" "i")))]
+  "avr32_rnd_operands(operands[1], operands[2])"
+
+  "satrnds    %0 >> %2, 31"
+
+  [(set_attr "type" "alu_sat")
+   (set_attr "length" "4")
+   (set_attr "cc" "clobber")]
+
+  )
+
+
+
+
+;;=================================================================
+;; Conditional Subtract
+;;=================================================================
+
+
+(define_peephole
+  [(set (match_operand:SI 0 "register_operand" "")
+	(minus:SI (match_operand:SI 1 "register_operand" "")
+		  (match_operand:SI 2 "immediate_operand" "")))
+   (set (match_dup 1)
+	(unspec:SI [(match_operand 5 "avr32_comparison_operator" "")
+		      (match_dup 0)
+		      (match_dup 1)
+                      (match_operand 3 "general_operand" "")
+                      (match_operand 4 "general_operand" "")]
+                      UNSPEC_MOVSICC))]
+
+  "(dead_or_set_p(insn, operands[0])) && avr32_const_ok_for_constraint_p(INTVAL(operands[2]), 'K', \"Ks08\")"
+
+  {
+
+   operands[5] = avr32_output_cmp(operands[5], GET_MODE(operands[3]), operands[3], operands[4]);
+
+   return "sub%5    %1, %2";
+  }
+
+  [(set_attr "length" "10")
+   (set_attr "cc" "clobber")]
+  )
+
+(define_peephole
+  [(set (match_operand:SI 0 "register_operand" "")
+	(plus:SI (match_operand:SI 1 "register_operand" "")
+		  (match_operand:SI 2 "immediate_operand" "")))
+   (set (match_dup 1)
+	(unspec:SI [(match_operand 5 "avr32_comparison_operator" "")
+                    (match_dup 0)
+                    (match_dup 1)
+                    (match_operand 3 "general_operand" "")
+                    (match_operand 4 "general_operand" "")]
+                   UNSPEC_MOVSICC))]
+
+  "(dead_or_set_p(insn, operands[0]) && avr32_const_ok_for_constraint_p(INTVAL(operands[2]), 'I', \"Is08\"))"
+
+  {
+   operands[5] = avr32_output_cmp(operands[5], GET_MODE(operands[3]), operands[3], operands[4]);
+
+   return "sub%5    %1, %n2";
+  }
+  [(set_attr "length" "10")
+   (set_attr "cc" "clobber")]
+  )
+
+;;=================================================================
+;; mcall
+;;=================================================================
+(define_peephole
+  [(set (match_operand:SI 0 "register_operand"        "")
+	(match_operand 1 "avr32_const_pool_ref_operand"  ""))
+   (parallel [(call (mem:SI (match_dup 0))
+                    (match_operand 2 "" ""))
+              (clobber (reg:SI LR_REGNUM))])]
+  "dead_or_set_p(insn, operands[0])"
+  "mcall    %1"
+  [(set_attr "type" "call")
+   (set_attr "length" "4")
+   (set_attr "cc" "clobber")]
+)
+
+(define_peephole
+  [(set (match_operand:SI 2 "register_operand"        "")
+	(match_operand 1 "avr32_const_pool_ref_operand"  ""))
+   (parallel [(set (match_operand 0 "register_operand" "")
+                   (call (mem:SI (match_dup 2))
+                         (match_operand 3 "" "")))
+              (clobber (reg:SI LR_REGNUM))])]
+  "dead_or_set_p(insn, operands[2])"
+  "mcall    %1"
+  [(set_attr "type" "call")
+   (set_attr "length" "4")
+   (set_attr "cc" "call_set")]
+)
+
+
+(define_peephole2
+  [(set (match_operand:SI 0 "register_operand"    "")
+	(match_operand 1 "avr32_const_pool_ref_operand"  ""))
+   (parallel [(call (mem:SI (match_dup 0))
+                    (match_operand 2 "" ""))
+              (clobber (reg:SI LR_REGNUM))])]
+  "peep2_reg_dead_p(2, operands[0])"
+  [(parallel [(call (mem:SI (match_dup 1))
+                    (match_dup 2))
+              (clobber (reg:SI LR_REGNUM))])]
+  ""
+)
+
+(define_peephole2
+  [(set (match_operand:SI 0 "register_operand"        "")
+	(match_operand 1 "avr32_const_pool_ref_operand"  ""))
+   (parallel [(set (match_operand 2 "register_operand" "")
+                   (call (mem:SI (match_dup 0))
+                         (match_operand 3 "" "")))
+              (clobber (reg:SI LR_REGNUM))])]
+  "(peep2_reg_dead_p(2, operands[0]) || (REGNO(operands[2]) == REGNO(operands[0])))"
+  [(parallel [(set (match_dup 2)
+                   (call (mem:SI (match_dup 1))
+                         (match_dup 3)))
+              (clobber (reg:SI LR_REGNUM))])]
+  ""
+)
+
+;;=================================================================
+;; Returning a value
+;;=================================================================
+
+
+(define_peephole
+  [(set (match_operand 0 "register_operand" "")
+        (match_operand 1 "register_operand" ""))
+   (return)]
+  "USE_RETURN_INSN (TRUE) && (REGNO(operands[0]) == RETVAL_REGNUM)
+   && (REGNO(operands[1]) != LR_REGNUM)
+   && (REGNO_REG_CLASS(REGNO(operands[1])) == GENERAL_REGS)"
+  "retal    %1"
+  [(set_attr "type" "call")
+   (set_attr "length" "2")]
+  )
+
+
+(define_peephole
+  [(set (match_operand 0 "register_operand" "r")
+        (match_operand 1 "immediate_operand" "i"))
+   (return)]
+  "(USE_RETURN_INSN (FALSE) && (REGNO(operands[0]) == RETVAL_REGNUM) &&
+   ((INTVAL(operands[1]) == -1) || (INTVAL(operands[1]) == 0) || (INTVAL(operands[1]) == 1)))"
+  {
+    avr32_output_return_instruction (TRUE, FALSE, NULL, operands[1]);
+    return "";
+  }
+  [(set_attr "type" "call")
+   (set_attr "length" "4")]
+  )
+
+(define_peephole
+  [(set (match_operand 0 "register_operand" "r")
+        (match_operand 1 "immediate_operand" "i"))
+   (unspec_volatile [(return)] VUNSPEC_EPILOGUE)]
+  "(REGNO(operands[0]) == RETVAL_REGNUM) &&
+   ((INTVAL(operands[1]) == -1) || (INTVAL(operands[1]) == 0) || (INTVAL(operands[1]) == 1))"
+  {
+    avr32_output_return_instruction (FALSE, FALSE, NULL, operands[1]);
+    return "";
+  }
+  ; Length is absolute worst case
+  [(set_attr "type" "branch")
+   (set_attr "length" "12")]
+  )
+
+(define_peephole
+  [(set (match_operand 0 "register_operand" "r")
+	(unspec [(match_operand 1 "avr32_comparison_operator" "")
+                 (match_operand 2 "register_immediate_operand" "rKs08")
+                 (match_operand 3 "register_immediate_operand" "rKs08")
+                 (match_operand 4 "register_immediate_operand" "r")
+                 (match_operand 5 "register_immediate_operand" "rKs21")
+                 ]
+                UNSPEC_MOVSICC ))
+   (return)]
+  "USE_RETURN_INSN (TRUE) && (REGNO(operands[0]) == RETVAL_REGNUM) &&
+   ((GET_MODE(operands[4]) == SImode) ||
+   ((GET_MODE(operands[4]) != SImode) && (GET_CODE(operands[5]) == REG)))"
+  {
+   operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[4]), operands[4], operands[5]);
+
+   if ( GET_CODE(operands[2]) == REG
+        && GET_CODE(operands[3]) == REG
+        && REGNO(operands[2]) != LR_REGNUM
+        && REGNO(operands[3]) != LR_REGNUM ){
+      return "ret%1    %2\;ret%i1    %3";
+   } else if ( GET_CODE(operands[2]) == REG
+               && GET_CODE(operands[3]) == CONST_INT ){
+      if ( INTVAL(operands[3]) == -1
+           || INTVAL(operands[3]) == 0
+           || INTVAL(operands[3]) == 1 ){
+        return "ret%1    %2\;ret%i1    %d3";
+      } else {
+        return "mov%1    r12, %2\;mov%i1    r12, %3\;retal    r12";
+      }
+   } else if ( GET_CODE(operands[2]) == CONST_INT
+               && GET_CODE(operands[3]) == REG ){
+      if ( INTVAL(operands[2]) == -1
+           || INTVAL(operands[2]) == 0
+           || INTVAL(operands[2]) == 1 ){
+        return "ret%1    %d2\;ret%i1    %3";
+      } else {
+        return "mov%1    r12, %2\;mov%i1    r12, %3\;retal    r12";
+      }
+   } else {
+      if ( (INTVAL(operands[2]) == -1
+            || INTVAL(operands[2]) == 0
+            || INTVAL(operands[2]) == 1 )
+           && (INTVAL(operands[3]) == -1
+               || INTVAL(operands[3]) == 0
+               || INTVAL(operands[3]) == 1 )){
+        return "ret%1    %d2\;ret%i1    %d3";
+      } else {
+        return "mov%1    r12, %2\;mov%i1    r12, %3\;retal    r12";
+      }
+   }
+  }
+
+  [(set_attr "length" "14")
+   (set_attr "cc" "clobber")
+   (set_attr "type" "call")])
+
+
+;;=================================================================
+;; mulnhh.w
+;;=================================================================
+
+(define_peephole2
+  [(set (match_operand:HI 0 "register_operand" "")
+        (neg:HI (match_operand:HI 1 "register_operand" "")))
+   (set (match_operand:SI 2 "register_operand" "")
+        (mult:SI
+         (sign_extend:SI (match_dup 0))
+         (sign_extend:SI (match_operand:HI 3 "register_operand" ""))))]
+  "(peep2_reg_dead_p(2, operands[0])) || (REGNO(operands[2]) == REGNO(operands[0]))"
+  [ (set (match_dup 2)
+         (mult:SI
+          (sign_extend:SI (neg:HI (match_dup 1)))
+          (sign_extend:SI (match_dup 3))))]
+  ""
+  )
+
+(define_peephole2
+  [(set (match_operand:HI 0 "register_operand" "")
+        (neg:HI (match_operand:HI 1 "register_operand" "")))
+   (set (match_operand:SI 2 "register_operand" "")
+        (mult:SI
+         (sign_extend:SI (match_operand:HI 3 "register_operand" ""))
+         (sign_extend:SI (match_dup 0))))]
+  "(peep2_reg_dead_p(2, operands[0])) || (REGNO(operands[2]) == REGNO(operands[0]))"
+  [ (set (match_dup 2)
+         (mult:SI
+          (sign_extend:SI (neg:HI (match_dup 1)))
+          (sign_extend:SI (match_dup 3))))]
+  ""
+  )
+
+
+
+;;=================================================================
+;; sthh.w
+;;=================================================================
+(define_insn "vec_setv2hi"
+  [(set (match_operand:V2HI 0 "register_operand" "=r")
+        (vec_merge:V2HI
+         (match_dup 0)
+         (vec_duplicate:V2HI
+          (match_operand:HI 1 "register_operand" "r"))
+         (const_int 1)))]
+  ""
+  "bfins\t%0, %1, 16, 16"
+  [(set_attr "type" "alu")
+   (set_attr "length" "4")
+   (set_attr "cc" "clobber")])
+
+(define_insn "vec_setv2lo"
+  [(set (match_operand:V2HI 0 "register_operand" "+r")
+        (vec_merge:V2HI
+         (match_dup 0)
+         (vec_duplicate:V2HI
+          (match_operand:HI 1 "register_operand" "r"))
+         (const_int 2)))]
+  ""
+  "bfins\t%0, %1, 0, 16"
+  [(set_attr "type" "alu")
+   (set_attr "length" "4")
+   (set_attr "cc" "clobber")])
+
+(define_expand "vec_setv2"
+  [(set (match_operand:V2HI 0 "register_operand" "")
+        (vec_merge:V2HI
+         (match_dup 0)
+         (vec_duplicate:V2HI
+          (match_operand:HI 1 "register_operand" ""))
+         (match_operand 2 "immediate_operand" "")))]
+  ""
+  { operands[2] = GEN_INT(INTVAL(operands[2]) + 1); }
+  )
+
+(define_insn "vec_extractv2hi"
+  [(set (match_operand:HI 0 "register_operand" "=r")
+        (vec_select:HI
+         (match_operand:V2HI 1 "register_operand" "r")
+         (parallel [(match_operand:SI 2 "immediate_operand" "i")])))]
+  ""
+  {
+   if ( INTVAL(operands[2]) == 0 )
+      return "bfextu\t%0, %1, 16, 16";
+   else
+      return "bfextu\t%0, %1, 0, 16";
+  }
+  [(set_attr "type" "alu")
+   (set_attr "length" "4")
+   (set_attr "cc" "clobber")])
+
+(define_insn "vec_extractv4qi"
+  [(set (match_operand:QI 0 "register_operand" "=r")
+        (vec_select:QI
+         (match_operand:V4QI 1 "register_operand" "r")
+         (parallel [(match_operand:SI 2 "immediate_operand" "i")])))]
+  ""
+  {
+   switch ( INTVAL(operands[2]) ){
+     case 0:
+       return "bfextu\t%0, %1, 24, 8";
+     case 1:
+       return "bfextu\t%0, %1, 16, 8";
+     case 2:
+       return "bfextu\t%0, %1, 8, 8";
+     case 3:
+       return "bfextu\t%0, %1, 0, 8";
+     default:
+       abort();
+   }
+  }
+  [(set_attr "type" "alu")
+   (set_attr "length" "4")
+   (set_attr "cc" "clobber")])
+
+
+(define_insn "concatv2hi"
+  [(set (match_operand:V2HI 0 "register_operand" "=r, r, r")
+        (vec_concat:V2HI
+         (match_operand:HI 1 "register_operand" "r, r, 0")
+         (match_operand:HI 2 "register_operand" "r, 0, r")))]
+  ""
+  "@
+   mov\t%0, %1\;bfins\t%0, %2, 0, 16
+   bfins\t%0, %2, 0, 16
+   bfins\t%0, %1, 16, 16"
+  [(set_attr "length" "6, 4, 4")
+   (set_attr "type" "alu")])
+
+;(define_peephole2
+;  [(set (match_operand:HI 0 "register_operand" "r")
+;        (plus:HI (match_operand:HI 3 "register_operand" "r")
+;                 (match_operand:HI 4 "register_operand" "r")))
+;   (set (match_operand:HI 1 "register_operand" "r")
+;        (minus:HI (match_dup 3)
+;                  (match_dup 4)))]
+;  "REGNO(operands[0]) != REGNO(operands[3])"
+;  [(set (match_dup 2)
+;        (vec_concat:V2HI
+;         (minus:HI (match_dup 3)
+;                  (match_dup 4))
+;         (plus:HI (match_dup 3) (match_dup 4))))
+;   (set (match_dup 1) (vec_select:HI (match_dup 2)
+;                                     (parallel [(const_int 0)])))]
+;
+;  "operands[2] = gen_rtx_REG(V2HImode, REGNO(operands[0]));"
+;  )
+;
+;(define_peephole2
+;  [(set (match_operand:HI 0 "register_operand" "r")
+;        (minus:HI (match_operand:HI 3 "register_operand" "r")
+;                 (match_operand:HI 4 "register_operand" "r")))
+;   (set (match_operand:HI 1 "register_operand" "r")
+;        (plus:HI (match_dup 3)
+;                  (match_dup 4)))]
+;  "REGNO(operands[0]) != REGNO(operands[3])"
+;  [(set (match_dup 2)
+;        (vec_concat:V2HI
+;         (plus:HI (match_dup 3)
+;                  (match_dup 4))
+;         (minus:HI (match_dup 3) (match_dup 4))))
+;   (set (match_dup 1) (vec_select:HI (match_dup 2)
+;                                     (parallel [(const_int 0)])))]
+;
+;  "operands[2] = gen_rtx_REG(V2HImode, REGNO(operands[0]));"
+;  )
+
+
+;(define_peephole2
+;  [(match_scratch:V2HI 5 "r")
+;   (set (mem:HI (plus:SI (match_operand:SI 0 "register_operand" "")
+;                         (match_operand:HI 1 "immediate_operand" "")))
+;        (match_operand:HI 2 "register_operand"  "r"))
+;   (set (mem:HI (plus:SI (match_dup 0)
+;                         (match_operand:HI 3 "immediate_operand" "")))
+;        (match_operand:HI 4 "register_operand"  "r"))]
+;  "(GET_CODE(operands[1]) == CONST_INT) && (GET_CODE(operands[3]) == CONST_INT)
+;   && (INTVAL(operands[3]) == (INTVAL(operands[1]) + 2))"
+;
+;  [(set (match_dup 5)
+;        (vec_concat:V2HI
+;         (match_dup 2)
+;         (match_dup 4)))
+;   (set (mem:V2HI (plus:SI (match_dup 0) (match_dup 1)))
+;        (match_dup 5))]
+;  ""
+;  )
+;
+
+;(define_insn "sthh_w"
+;  [(set (match_operand:V2HI 0 "avr32_sthh_w_memory_operand" "m")
+;        (vec_concat:V2HI
+;         (vec_select:HI (match_operand:V2HI 1 "register_operand" "r")
+;                        (parallel [(match_operand 3 "immediate_operand" "i")]))
+;         (vec_select:HI (match_operand:V2HI 2 "register_operand" "r")
+;                        (parallel [(match_operand 4 "immediate_operand" "i")]))))]
+;  "MEM_ALIGN(operands[0]) >= 32"
+;  "sthh.w\t%0, %1:%h3, %2:%h4"
+;  [(set_attr "length" "4")
+;   (set_attr "type" "store")])
+;
+;(define_peephole2
+;  [(set (mem:HI (plus:SI (match_operand:SI 0 "register_operand" "")
+;                         (match_operand:HI 1 "immediate_operand" "")))
+;        (match_operand:HI 2 "register_operand"  "r"))
+;   (set (mem:HI (plus:SI (match_dup 0)
+;                         (match_operand:HI 3 "avr32_sthh_operand" "")))
+;        (match_operand:HI 4 "register_operand"  "r"))]
+;  "(GET_CODE(operands[1]) == CONST_INT) && (GET_CODE(operands[3]) == CONST_INT)
+;   && (INTVAL(operands[3]) == (INTVAL(operands[1]) - 2))"
+;
+;  [(paralell [(set (mem:HI (plus:SI (match_dup 0)
+;                                    (match_dup 3)))
+;                   (match_dup 4))
+;              (set (mem:HI (plus:SI (match_dup 0)
+;                                    (plus:SI (match_dup 3) (const_int 2))))
+;                   (match_dup 2))])]
+;  ""
+;  )
+
+
+;; Load the SIMD description
+(include "simd.md")
+
+;; Load the FP coprocessor patterns
+(include "fpcp.md")
diff -Nrup --ignore-space-change gcc-4.0.2/gcc/config/avr32/avr32-modes.def gcc-4.0.2-atmel.0.99.2/gcc/config/avr32/avr32-modes.def
--- gcc-4.0.2/gcc/config/avr32/avr32-modes.def	1970-01-01 01:00:00.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/gcc/config/avr32/avr32-modes.def	2005-08-19 14:17:15.000000000 +0200
@@ -0,0 +1 @@
+VECTOR_MODES (INT, 4);        /*            V4QI V2HI */
diff -Nrup --ignore-space-change gcc-4.0.2/gcc/config/avr32/avr32-protos.h gcc-4.0.2-atmel.0.99.2/gcc/config/avr32/avr32-protos.h
--- gcc-4.0.2/gcc/config/avr32/avr32-protos.h	1970-01-01 01:00:00.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/gcc/config/avr32/avr32-protos.h	2006-10-10 13:03:42.000000000 +0200
@@ -0,0 +1,175 @@
+/*
+   Prototypes for exported functions defined in avr32.c
+   Copyright 2003-2006 Atmel Corporation.
+
+   Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
+   Initial porting by Anders �dland.
+
+   This file is part of GCC.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 2 of the License, or
+   (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
+
+
+#ifndef AVR32_PROTOS_H
+#define AVR32_PROTOS_H
+
+extern const int swap_reg[];
+
+extern int avr32_valid_macmac_bypass (rtx, rtx);
+extern int avr32_valid_mulmac_bypass (rtx, rtx);
+
+extern int avr32_decode_lcomm_symbol_offset (rtx, int *);
+extern void avr32_encode_lcomm_symbol_offset (tree, char *, int);
+
+extern const char *avr32_strip_name_encoding (const char *);
+
+extern rtx avr32_get_note_reg_equiv (rtx insn);
+
+extern int avr32_use_return_insn (int iscond);
+
+extern void avr32_make_reglist16 (int reglist16_vect, char *reglist16_string);
+
+extern void avr32_make_reglist8 (int reglist8_vect, char *reglist8_string);
+extern void avr32_make_fp_reglist_w (int reglist_mask, char *reglist_string);
+extern void avr32_make_fp_reglist_d (int reglist_mask, char *reglist_string);
+
+extern void avr32_output_return_instruction (int single_ret_inst,
+					     int iscond, rtx cond,
+					     rtx r12_imm);
+extern void avr32_expand_prologue (void);
+extern void avr32_set_return_address (rtx source);
+
+extern int avr32_hard_regno_mode_ok (int regno, enum machine_mode mode);
+extern int avr32_extra_constraint_s (rtx value, const int strict);
+extern int avr32_eh_return_data_regno (const int n);
+extern int avr32_initial_elimination_offset (const int from, const int to);
+extern rtx avr32_function_arg (CUMULATIVE_ARGS * cum, enum machine_mode mode,
+			       tree type, int named);
+extern void avr32_init_cumulative_args (CUMULATIVE_ARGS * cum, tree fntype,
+					rtx libname, tree fndecl);
+extern void avr32_function_arg_advance (CUMULATIVE_ARGS * cum,
+					enum machine_mode mode,
+					tree type, int named);
+#ifdef ARGS_SIZE_RTX
+/* expr.h defines ARGS_SIZE_RTX and `enum direction'.  */
+extern enum direction avr32_function_arg_padding (enum machine_mode mode,
+						  tree type);
+#endif /* ARGS_SIZE_RTX */
+extern rtx avr32_function_value (tree valtype, tree func);
+extern rtx avr32_libcall_value (enum machine_mode mode);
+extern int avr32_sched_use_dfa_pipeline_interface (void);
+extern bool avr32_return_in_memory (tree type, tree fntype);
+extern void avr32_regs_to_save (char *operand);
+extern void avr32_target_asm_function_prologue (FILE * file,
+						HOST_WIDE_INT size);
+extern void avr32_target_asm_function_epilogue (FILE * file,
+						HOST_WIDE_INT size);
+extern void avr32_trampoline_template (FILE * file);
+extern void avr32_initialize_trampoline (rtx addr, rtx fnaddr,
+					 rtx static_chain);
+extern int avr32_legitimate_address (enum machine_mode mode, rtx x,
+				     int strict);
+extern int avr32_legitimate_constant_p (rtx x);
+
+extern int avr32_legitimate_pic_operand_p (rtx x);
+
+extern rtx avr32_find_symbol (rtx x);
+extern void avr32_select_section (rtx exp, int reloc, int align);
+extern void avr32_encode_section_info (tree decl, rtx rtl, int first);
+extern void avr32_asm_file_end (FILE * stream);
+extern void avr32_asm_output_ascii (FILE * stream, char *ptr, int len);
+extern void avr32_asm_output_common (FILE * stream, const char *name,
+				     int size, int rounded);
+extern void avr32_asm_output_label (FILE * stream, const char *name);
+extern void avr32_asm_declare_object_name (FILE * stream, char *name,
+					   tree decl);
+extern void avr32_asm_globalize_label (FILE * stream, const char *name);
+extern void avr32_asm_weaken_label (FILE * stream, const char *name);
+extern void avr32_asm_output_external (FILE * stream, tree decl,
+				       const char *name);
+extern void avr32_asm_output_external_libcall (FILE * stream, rtx symref);
+extern void avr32_asm_output_labelref (FILE * stream, const char *name);
+extern void avr32_notice_update_cc (rtx exp, rtx insn);
+extern void avr32_print_operand (FILE * stream, rtx x, int code);
+extern void avr32_print_operand_address (FILE * stream, rtx x);
+
+extern int avr32_symbol (rtx x);
+
+extern void avr32_select_rtx_section (enum machine_mode mode, rtx x,
+				      unsigned HOST_WIDE_INT align);
+
+extern int avr32_load_multiple_operation (rtx op, enum machine_mode mode);
+extern int avr32_store_multiple_operation (rtx op, enum machine_mode mode);
+
+extern int avr32_const_ok_for_constraint_p (HOST_WIDE_INT value, char c,
+					    const char *str);
+
+extern bool avr32_cannot_force_const_mem (rtx x);
+
+extern void avr32_init_builtins (void);
+
+extern rtx avr32_expand_builtin (tree exp, rtx target, rtx subtarget,
+				 enum machine_mode mode, int ignore);
+
+extern bool avr32_must_pass_in_stack (enum machine_mode mode, tree type);
+
+extern bool avr32_strict_argument_naming (CUMULATIVE_ARGS * ca);
+
+extern bool avr32_pass_by_reference (CUMULATIVE_ARGS * cum,
+				     enum machine_mode mode,
+				     tree type, bool named);
+
+extern rtx avr32_gen_load_multiple (rtx * regs, int count, rtx from,
+				    int write_back, int in_struct_p,
+				    int scalar_p);
+extern rtx avr32_gen_store_multiple (rtx * regs, int count, rtx to,
+				     int in_struct_p, int scalar_p);
+extern int avr32_gen_movmemsi (rtx * operands);
+
+extern int avr32_rnd_operands (rtx add, rtx shift);
+extern int avr32_adjust_insn_length (rtx insn, int length);
+
+extern int symbol_mentioned_p (rtx x);
+extern int label_mentioned_p (rtx x);
+extern rtx legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg);
+extern int avr32_address_register_rtx_p (rtx x, int strict_p);
+extern int avr32_legitimate_index_p (enum machine_mode mode, rtx index,
+				     int strict_p);
+
+extern int avr32_const_double_immediate (rtx value);
+extern void avr32_init_expanders (void);
+extern rtx avr32_return_addr (int count, rtx frame);
+extern bool avr32_got_mentioned_p (rtx addr);
+
+extern void avr32_final_prescan_insn (rtx insn, rtx * opvec, int noperands);
+
+extern int avr32_expand_movcc (enum machine_mode mode, rtx operands[]);
+extern int avr32_expand_addcc (enum machine_mode mode, rtx operands[]);
+#ifdef RTX_CODE
+extern int avr32_expand_scc (RTX_CODE cond, rtx * operands);
+#endif
+
+extern int avr32_store_bypass (rtx insn_out, rtx insn_in);
+extern int avr32_mul_waw_bypass (rtx insn_out, rtx insn_in);
+extern int avr32_valid_load_double_bypass (rtx insn_out, rtx insn_in);
+extern int avr32_valid_load_quad_bypass (rtx insn_out, rtx insn_in);
+extern rtx avr32_output_cmp (rtx cond, enum machine_mode mode,
+			     rtx op0, rtx op1);
+
+rtx get_next_insn_cond (rtx cur_insn);
+int set_next_insn_cond (rtx cur_insn, rtx cond);
+void avr32_override_options (void);
+
+#endif /* AVR32_PROTOS_H */
diff -Nrup --ignore-space-change gcc-4.0.2/gcc/config/avr32/crti.asm gcc-4.0.2-atmel.0.99.2/gcc/config/avr32/crti.asm
--- gcc-4.0.2/gcc/config/avr32/crti.asm	1970-01-01 01:00:00.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/gcc/config/avr32/crti.asm	2006-10-10 12:36:34.000000000 +0200
@@ -0,0 +1,64 @@
+/*
+   Init/fini stuff for AVR32.
+   Copyright 2003-2006 Atmel Corporation.
+
+   Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
+
+   This file is part of GCC.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 2 of the License, or
+   (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
+
+	
+/* The code in sections .init and .fini is supposed to be a single
+   regular function.  The function in .init is called directly from
+   start in crt1.asm.  The function in .fini is atexit()ed in crt1.asm
+   too.
+
+   crti.asm contributes the prologue of a function to these sections,
+   and crtn.asm comes up the epilogue.  STARTFILE_SPEC should list
+   crti.o before any other object files that might add code to .init
+   or .fini sections, and ENDFILE_SPEC should list crtn.o after any
+   such object files.  */
+		
+	.file		"crti.asm"
+
+	.section	".init"
+/* Just load the GOT */
+	.align 2
+	.global	_init
+_init:
+	stm	--sp, r6, lr
+	lddpc	r6, 1f		
+0:	
+	rsub	r6, pc
+	rjmp	2f
+	.align	2
+1:	.long	0b - _GLOBAL_OFFSET_TABLE_		
+2:	
+				
+	.section	".fini"
+/* Just load the GOT */
+	.align	2
+	.global	_fini
+_fini:
+	stm	--sp, r6, lr
+	lddpc	r6, 1f		
+0:	
+	rsub	r6, pc
+	rjmp	2f
+	.align	2
+1:	.long	0b - _GLOBAL_OFFSET_TABLE_		
+2:	
+
diff -Nrup --ignore-space-change gcc-4.0.2/gcc/config/avr32/crtn.asm gcc-4.0.2-atmel.0.99.2/gcc/config/avr32/crtn.asm
--- gcc-4.0.2/gcc/config/avr32/crtn.asm	1970-01-01 01:00:00.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/gcc/config/avr32/crtn.asm	2006-10-10 12:36:34.000000000 +0200
@@ -0,0 +1,44 @@
+/*   Copyright (C) 2001 Free Software Foundation, Inc.
+    Written By Nick Clifton
+
+  This file is free software; you can redistribute it and/or modify it
+  under the terms of the GNU General Public License as published by the
+  Free Software Foundation; either version 2, or (at your option) any
+  later version.
+
+  In addition to the permissions in the GNU General Public License, the
+  Free Software Foundation gives you unlimited permission to link the
+  compiled version of this file with other programs, and to distribute
+  those programs without any restriction coming from the use of this
+  file.  (The General Public License restrictions do apply in other
+  respects; for example, they cover modification of the file, and
+  distribution when not linked into another program.)
+
+  This file is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; see the file COPYING.  If not, write to
+  the Free Software Foundation, 59 Temple Place - Suite 330,
+  Boston, MA 02111-1307, USA.
+
+     As a special exception, if you link this library with files
+     compiled with GCC to produce an executable, this does not cause
+     the resulting executable to be covered by the GNU General Public License.
+     This exception does not however invalidate any other reasons why
+     the executable file might be covered by the GNU General Public License.
+*/
+
+
+
+	
+	.file		"crtn.asm"
+
+	.section	".init"
+	ldm	sp++, r6, pc
+			
+	.section	".fini"
+	ldm	sp++, r6, pc
+		
diff -Nrup --ignore-space-change gcc-4.0.2/gcc/config/avr32/fpcp.md gcc-4.0.2-atmel.0.99.2/gcc/config/avr32/fpcp.md
--- gcc-4.0.2/gcc/config/avr32/fpcp.md	1970-01-01 01:00:00.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/gcc/config/avr32/fpcp.md	2006-10-10 12:36:34.000000000 +0200
@@ -0,0 +1,551 @@
+;;   AVR32 machine description file for Floating-Point instructions.
+;;   Copyright 2003-2006 Atmel Corporation.
+;;
+;;   Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
+;;
+;;   This file is part of GCC.
+;;
+;;   This program is free software; you can redistribute it and/or modify
+;;   it under the terms of the GNU General Public License as published by
+;;   the Free Software Foundation; either version 2 of the License, or
+;;   (at your option) any later version.
+;;
+;;   This program is distributed in the hope that it will be useful,
+;;   but WITHOUT ANY WARRANTY; without even the implied warranty of
+;;   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;;   GNU General Public License for more details.
+;;
+;;   You should have received a copy of the GNU General Public License
+;;   along with this program; if not, write to the Free Software
+;;   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+;; -*- Mode: Scheme -*-
+
+;;******************************************************************************
+;; Automaton pipeline description for floating-point coprocessor insns
+;;******************************************************************************
+(define_cpu_unit "fid,fm1,fm2,fm3,fm4,fwb,fcmp,fcast" "avr32_ap")
+
+(define_insn_reservation "fmv_op" 1
+  (and (eq_attr "pipeline" "ap")
+       (eq_attr "type" "fmv"))
+  "is,da,d,fid,fwb")
+
+(define_insn_reservation "fmul_op" 5
+  (and (eq_attr "pipeline" "ap")
+       (eq_attr "type" "fmul"))
+  "is,da,d,fid,fm1,fm2,fm3,fm4,fwb")
+
+(define_insn_reservation "fcmps_op" 1
+  (and (eq_attr "pipeline" "ap")
+       (eq_attr "type" "fcmps"))
+  "is,da,d,fid,fcmp")
+
+(define_insn_reservation "fcmpd_op" 2
+  (and (eq_attr "pipeline" "ap")
+       (eq_attr "type" "fcmpd"))
+  "is,da,d,fid*2,fcmp")
+
+(define_insn_reservation "fcast_op" 3
+  (and (eq_attr "pipeline" "ap")
+       (eq_attr "type" "fcast"))
+  "is,da,d,fid,fcmp,fcast,fwb")
+
+(define_insn_reservation "fmvcpu_op" 2
+  (and (eq_attr "pipeline" "ap")
+       (eq_attr "type" "fmvcpu"))
+  "is,da,d")
+
+(define_insn_reservation "fldd_op" 1
+  (and (eq_attr "pipeline" "ap")
+       (eq_attr "type" "fldd"))
+  "is,da,d,fwb")
+
+(define_insn_reservation "flds_op" 1
+  (and (eq_attr "pipeline" "ap")
+       (eq_attr "type" "flds"))
+  "is,da,d,fwb")
+
+(define_insn_reservation "fsts_op" 0
+  (and (eq_attr "pipeline" "ap")
+       (eq_attr "type" "fsts"))
+  "is,da*2,d")
+
+(define_insn_reservation "fstd_op" 0
+  (and (eq_attr "pipeline" "ap")
+       (eq_attr "type" "fstd"))
+  "is,da*2,d")
+
+
+(define_insn "*movsf_fpcp"
+  [(set (match_operand:SF 0 "nonimmediate_operand"     "=f,f,r,f,m,r,r,r,m")
+	(match_operand:SF 1 "general_operand"          " f,r,f,m,f,r,G,m,r"))]
+  "TARGET_HARD_FLOAT"
+  "@
+   fmov.s\t%0, %1
+   fmov.s\t%0, %1
+   fmov.s\t%0, %1
+   fld.s\t%0, %1
+   fst.s\t%0, %1
+   mov\t%0, %1
+   mov\t%0, %1
+   ld.w\t%0, %1
+   st.w\t%0, %1"
+  [(set_attr "length" "4,4,4,4,4,2,4,4,4")
+   (set_attr "type" "fmv,flds,fmvcpu,flds,fsts,alu,alu,load,store")])
+
+(define_insn_and_split "*movdf_fpcp"
+  [(set (match_operand:DF 0 "nonimmediate_operand"     "=f,f,r,f,m,r,r,m")
+	(match_operand:DF 1 "general_operand"          " f,r,f,m,f,r,m,r"))]
+  "TARGET_HARD_FLOAT"
+  "@
+   fmov.d\t%0, %1
+   fmov.d\t%0, %1
+   fmov.d\t%0, %1
+   fld.d\t%0, %1
+   fst.d\t%0, %1
+   mov\t%0, %1\;mov\t%m0, %m1
+   ld.d\t%0, %1
+   st.d\t%0, %1"
+
+  "TARGET_HARD_FLOAT
+   && reload_completed
+   && (REG_P(operands[0]) &&  (REGNO_REG_CLASS(REGNO(operands[0])) == GENERAL_REGS))
+   && (REG_P(operands[1]) &&  (REGNO_REG_CLASS(REGNO(operands[1])) == GENERAL_REGS))"
+  [(set (match_dup 0) (match_dup 1))
+   (set (match_dup 2) (match_dup 3))]
+  "
+   {
+    operands[2] = gen_highpart (SImode, operands[0]);
+    operands[0] = gen_lowpart (SImode, operands[0]);
+    operands[3] = gen_highpart(SImode, operands[1]);
+    operands[1] = gen_lowpart(SImode, operands[1]);
+   }
+  "
+
+  [(set_attr "length" "4,4,4,4,4,4,4,4")
+   (set_attr "type" "fmv,fldd,fmvcpu,fldd,fstd,alu2,load2,store2")])
+
+
+(define_insn "mulsf3"
+  [(set (match_operand:SF          0 "avr32_fp_register_operand" "=f")
+	(mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")
+		 (match_operand:SF 2 "avr32_fp_register_operand" "f")))]
+  "TARGET_HARD_FLOAT"
+  "fmul.s\t%0, %1, %2"
+  [(set_attr "length" "4")
+   (set_attr "type" "fmul")])
+
+(define_insn "nmulsf3"
+  [(set (match_operand:SF          0 "avr32_fp_register_operand" "=f")
+	(neg:SF (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")
+                         (match_operand:SF 2 "avr32_fp_register_operand" "f"))))]
+  "TARGET_HARD_FLOAT"
+  "fnmul.s\t%0, %1, %2"
+  [(set_attr "length" "4")
+   (set_attr "type" "fmul")])
+
+(define_peephole2
+  [(set (match_operand:SF          0 "avr32_fp_register_operand" "")
+	(mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "")
+		 (match_operand:SF 2 "avr32_fp_register_operand" "")))
+   (set (match_operand:SF          3 "avr32_fp_register_operand" "")
+	(neg:SF (match_dup 0)))]
+  "TARGET_HARD_FLOAT &&
+   (peep2_reg_dead_p(2, operands[0]) || (REGNO(operands[3]) == REGNO(operands[0])))"
+  [(set (match_dup 3)
+	(neg:SF (mult:SF (match_dup 1)
+			 (match_dup 2))))]
+)
+
+
+(define_insn "macsf3"
+  [(set (match_operand:SF          0 "avr32_fp_register_operand" "=f")
+	(plus:SF (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")
+                          (match_operand:SF 2 "avr32_fp_register_operand" "f"))
+                 (match_operand:SF 3 "avr32_fp_register_operand" "0")))]
+  "TARGET_HARD_FLOAT"
+  "fmac.s\t%0, %1, %2"
+  [(set_attr "length" "4")
+   (set_attr "type" "fmul")])
+
+(define_insn "nmacsf3"
+  [(set (match_operand:SF          0 "avr32_fp_register_operand" "=f")
+	(plus:SF  (neg:SF (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")
+                                   (match_operand:SF 2 "avr32_fp_register_operand" "f")))
+                  (match_operand:SF 3 "avr32_fp_register_operand" "0")))]
+  "TARGET_HARD_FLOAT"
+  "fnmac.s\t%0, %1, %2"
+  [(set_attr "length" "4")
+   (set_attr "type" "fmul")])
+
+(define_peephole2
+  [(set (match_operand:SF          0 "avr32_fp_register_operand" "")
+	(mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "")
+		 (match_operand:SF 2 "avr32_fp_register_operand" "")))
+   (set (match_operand:SF          3 "avr32_fp_register_operand" "")
+	(minus:SF
+	 (match_dup 3)
+	 (match_dup 0)))]
+  "TARGET_HARD_FLOAT && peep2_reg_dead_p(2, operands[0])"
+  [(set (match_dup 3)
+	(plus:SF  (neg:SF (mult:SF (match_dup 1)
+                                   (match_dup 2)))
+                  (match_dup 3)))]
+)
+
+
+(define_insn "msubacsf3"
+  [(set (match_operand:SF          0 "avr32_fp_register_operand" "=f")
+	(minus:SF (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")
+                           (match_operand:SF 2 "avr32_fp_register_operand" "f"))
+                  (match_operand:SF 3 "avr32_fp_register_operand" "0")))]
+  "TARGET_HARD_FLOAT"
+  "fmsc.s\t%0, %1, %2"
+  [(set_attr "length" "4")
+   (set_attr "type" "fmul")])
+
+(define_peephole2
+  [(set (match_operand:SF          0 "avr32_fp_register_operand" "")
+	(mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "")
+		 (match_operand:SF 2 "avr32_fp_register_operand" "")))
+   (set (match_operand:SF          3 "avr32_fp_register_operand" "")
+	(minus:SF
+	 (match_dup 0)
+	 (match_dup 3)))]
+  "TARGET_HARD_FLOAT && peep2_reg_dead_p(2, operands[0])"
+  [(set (match_dup 3)
+	(minus:SF  (mult:SF (match_dup 1)
+			    (match_dup 2))
+		   (match_dup 3)))]
+)
+
+(define_insn "nmsubacsf3"
+  [(set (match_operand:SF          0 "avr32_fp_register_operand" "=f")
+	(minus:SF  (neg:SF (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")
+                                    (match_operand:SF 2 "avr32_fp_register_operand" "f")))
+                   (match_operand:SF 3 "avr32_fp_register_operand" "0")))]
+  "TARGET_HARD_FLOAT"
+  "fnmsc.s\t%0, %1, %2"
+  [(set_attr "length" "4")
+   (set_attr "type" "fmul")])
+
+
+
+(define_insn "addsf3"
+  [(set (match_operand:SF          0 "avr32_fp_register_operand" "=f")
+	(plus:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")
+		 (match_operand:SF 2 "avr32_fp_register_operand" "f")))]
+  "TARGET_HARD_FLOAT"
+  "fadd.s\t%0, %1, %2"
+  [(set_attr "length" "4")
+   (set_attr "type" "fmul")])
+
+(define_insn "subsf3"
+  [(set (match_operand:SF          0 "avr32_fp_register_operand" "=f")
+	(minus:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")
+                  (match_operand:SF 2 "avr32_fp_register_operand" "f")))]
+  "TARGET_HARD_FLOAT"
+  "fsub.s\t%0, %1, %2"
+  [(set_attr "length" "4")
+   (set_attr "type" "fmul")])
+
+
+(define_insn "negsf2"
+  [(set (match_operand:SF          0 "avr32_fp_register_operand" "=f")
+	(neg:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")))]
+  "TARGET_HARD_FLOAT"
+  "fneg.s\t%0, %1"
+  [(set_attr "length" "4")
+   (set_attr "type" "fmv")])
+
+(define_insn "abssf2"
+  [(set (match_operand:SF          0 "avr32_fp_register_operand" "=f")
+	(abs:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")))]
+  "TARGET_HARD_FLOAT"
+  "fabs.s\t%0, %1"
+  [(set_attr "length" "4")
+   (set_attr "type" "fmv")])
+
+(define_insn "truncdfsf2"
+  [(set (match_operand:SF          0 "avr32_fp_register_operand" "=f")
+	(float_truncate:SF
+         (match_operand:DF 1 "avr32_fp_register_operand" "f")))]
+  "TARGET_HARD_FLOAT"
+  "fcastd.s\t%0, %1"
+  [(set_attr "length" "4")
+   (set_attr "type" "fcast")])
+
+(define_insn "extendsfdf2"
+  [(set (match_operand:DF          0 "avr32_fp_register_operand" "=f")
+	(float_extend:DF
+         (match_operand:SF 1 "avr32_fp_register_operand" "f")))]
+  "TARGET_HARD_FLOAT"
+  "fcasts.d\t%0, %1"
+  [(set_attr "length" "4")
+   (set_attr "type" "fcast")])
+
+(define_insn "muldf3"
+  [(set (match_operand:DF          0 "avr32_fp_register_operand" "=f")
+	(mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")
+		 (match_operand:DF 2 "avr32_fp_register_operand" "f")))]
+  "TARGET_HARD_FLOAT"
+  "fmul.d\t%0, %1, %2"
+  [(set_attr "length" "4")
+   (set_attr "type" "fmul")])
+
+(define_insn "nmuldf3"
+  [(set (match_operand:DF          0 "avr32_fp_register_operand" "=f")
+	(neg:DF (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")
+                         (match_operand:DF 2 "avr32_fp_register_operand" "f"))))]
+  "TARGET_HARD_FLOAT"
+  "fnmul.d\t%0, %1, %2"
+  [(set_attr "length" "4")
+   (set_attr "type" "fmul")])
+
+(define_peephole2
+  [(set (match_operand:DF          0 "avr32_fp_register_operand" "")
+	(mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "")
+		 (match_operand:DF 2 "avr32_fp_register_operand" "")))
+   (set (match_operand:DF          3 "avr32_fp_register_operand" "")
+	(neg:DF (match_dup 0)))]
+  "TARGET_HARD_FLOAT &&
+   (peep2_reg_dead_p(2, operands[0]) || (REGNO(operands[3]) == REGNO(operands[0])))"
+  [(set (match_dup 3)
+	(neg:DF (mult:DF (match_dup 1)
+			 (match_dup 2))))]
+)
+
+(define_insn "macdf3"
+  [(set (match_operand:DF          0 "avr32_fp_register_operand" "=f")
+	(plus:DF (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")
+                          (match_operand:DF 2 "avr32_fp_register_operand" "f"))
+                 (match_operand:DF 3 "avr32_fp_register_operand" "0")))]
+  "TARGET_HARD_FLOAT"
+  "fmac.d\t%0, %1, %2"
+  [(set_attr "length" "4")
+   (set_attr "type" "fmul")])
+
+(define_insn "msubacdf3"
+  [(set (match_operand:DF          0 "avr32_fp_register_operand" "=f")
+	(minus:DF (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")
+                           (match_operand:DF 2 "avr32_fp_register_operand" "f"))
+                  (match_operand:DF 3 "avr32_fp_register_operand" "0")))]
+  "TARGET_HARD_FLOAT"
+  "fmsc.d\t%0, %1, %2"
+  [(set_attr "length" "4")
+   (set_attr "type" "fmul")])
+
+(define_peephole2
+  [(set (match_operand:DF          0 "avr32_fp_register_operand" "")
+	(mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "")
+		 (match_operand:DF 2 "avr32_fp_register_operand" "")))
+   (set (match_operand:DF          3 "avr32_fp_register_operand" "")
+	(minus:DF
+	 (match_dup 0)
+	 (match_dup 3)))]
+  "TARGET_HARD_FLOAT && peep2_reg_dead_p(2, operands[0])"
+  [(set (match_dup 3)
+	(minus:DF  (mult:DF (match_dup 1)
+			    (match_dup 2))
+		   (match_dup 3)))]
+  )
+
+(define_insn "nmsubacdf3"
+  [(set (match_operand:DF          0 "avr32_fp_register_operand" "=f")
+	(minus:DF  (neg:DF (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")
+                                    (match_operand:DF 2 "avr32_fp_register_operand" "f")))
+                   (match_operand:DF 3 "avr32_fp_register_operand" "0")))]
+  "TARGET_HARD_FLOAT"
+  "fnmsc.d\t%0, %1, %2"
+  [(set_attr "length" "4")
+   (set_attr "type" "fmul")])
+
+(define_insn "nmacdf3"
+  [(set (match_operand:DF          0 "avr32_fp_register_operand" "=f")
+	(plus:DF  (neg:DF (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")
+                                   (match_operand:DF 2 "avr32_fp_register_operand" "f")))
+                  (match_operand:DF 3 "avr32_fp_register_operand" "0")))]
+  "TARGET_HARD_FLOAT"
+  "fnmac.d\t%0, %1, %2"
+  [(set_attr "length" "4")
+   (set_attr "type" "fmul")])
+
+(define_peephole2
+  [(set (match_operand:DF          0 "avr32_fp_register_operand" "")
+	(mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "")
+		 (match_operand:DF 2 "avr32_fp_register_operand" "")))
+   (set (match_operand:DF          3 "avr32_fp_register_operand" "")
+	(minus:DF
+	 (match_dup 3)
+	 (match_dup 0)))]
+  "TARGET_HARD_FLOAT && peep2_reg_dead_p(2, operands[0])"
+  [(set (match_dup 3)
+	(plus:DF  (neg:DF (mult:DF (match_dup 1)
+                                   (match_dup 2)))
+                  (match_dup 3)))]
+)
+
+(define_insn "adddf3"
+  [(set (match_operand:DF          0 "avr32_fp_register_operand" "=f")
+	(plus:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")
+		 (match_operand:DF 2 "avr32_fp_register_operand" "f")))]
+  "TARGET_HARD_FLOAT"
+  "fadd.d\t%0, %1, %2"
+  [(set_attr "length" "4")
+   (set_attr "type" "fmul")])
+
+(define_insn "subdf3"
+  [(set (match_operand:DF          0 "avr32_fp_register_operand" "=f")
+	(minus:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")
+                  (match_operand:DF 2 "avr32_fp_register_operand" "f")))]
+  "TARGET_HARD_FLOAT"
+  "fsub.d\t%0, %1, %2"
+  [(set_attr "length" "4")
+   (set_attr "type" "fmul")])
+
+(define_insn "negdf2"
+  [(set (match_operand:DF          0 "avr32_fp_register_operand" "=f")
+	(neg:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")))]
+  "TARGET_HARD_FLOAT"
+  "fneg.d\t%0, %1"
+  [(set_attr "length" "4")
+   (set_attr "type" "fmv")])
+
+(define_insn "absdf2"
+  [(set (match_operand:DF          0 "avr32_fp_register_operand" "=f")
+	(abs:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")))]
+  "TARGET_HARD_FLOAT"
+  "fabs.d\t%0, %1"
+  [(set_attr "length" "4")
+   (set_attr "type" "fmv")])
+
+
+(define_expand "cmpdf"
+  [(set (cc0)
+	(compare:DF
+	 (match_operand:DF 0 "general_operand" "")
+	 (match_operand:DF 1 "general_operand"  "")))]
+  "TARGET_HARD_FLOAT"
+  "{
+   rtx tmpreg;
+   if ( !REG_P(operands[0]) )
+     operands[0] = force_reg(DFmode, operands[0]);
+
+   if ( !REG_P(operands[1]) )
+     operands[1] = force_reg(DFmode, operands[1]);
+
+   avr32_compare_op0 = operands[0];
+   avr32_compare_op1 = operands[1];
+
+   emit_insn(gen_cmpdf_internal(operands[0], operands[1]));
+
+   tmpreg = gen_reg_rtx(SImode);
+   emit_insn(gen_fpcc_to_reg(tmpreg));
+   emit_insn(gen_reg_to_cc(tmpreg));
+
+   DONE;
+  }"
+)
+
+(define_insn "cmpdf_internal"
+  [(set (reg:CC FPCC_REGNUM)
+	(compare:CC
+	 (match_operand:DF 0 "avr32_fp_register_operand" "f")
+	 (match_operand:DF 1 "avr32_fp_register_operand" "f")))]
+  "TARGET_HARD_FLOAT"
+  {
+   if (!rtx_equal_p(cc_prev_status.mdep.fpvalue, SET_SRC(PATTERN (insn))) )
+      return "fcmp.d\t%0, %1";
+   return "";
+  }
+  [(set_attr "length" "4")
+   (set_attr "type" "fcmpd")
+   (set_attr "cc" "fpcompare")])
+
+(define_expand "cmpsf"
+  [(set (cc0)
+	(compare:SF
+	 (match_operand:SF 0 "general_operand" "")
+	 (match_operand:SF 1 "general_operand"  "")))]
+  "TARGET_HARD_FLOAT"
+  "{
+   rtx tmpreg;
+   if ( !REG_P(operands[0]) )
+     operands[0] = force_reg(SFmode, operands[0]);
+
+   if ( !REG_P(operands[1]) )
+     operands[1] = force_reg(SFmode, operands[1]);
+
+   avr32_compare_op0 = operands[0];
+   avr32_compare_op1 = operands[1];
+
+   emit_insn(gen_cmpsf_internal(operands[0], operands[1]));
+
+   tmpreg = gen_reg_rtx(SImode);
+   emit_insn(gen_fpcc_to_reg(tmpreg));
+   emit_insn(gen_reg_to_cc(tmpreg));
+
+   DONE;
+  }"
+)
+
+(define_insn "cmpsf_internal"
+  [(set (reg:CC FPCC_REGNUM)
+	(compare:CC
+	 (match_operand:SF 0 "avr32_fp_register_operand" "f")
+	 (match_operand:SF 1 "avr32_fp_register_operand" "f")))]
+  "TARGET_HARD_FLOAT"
+  {
+   if (!rtx_equal_p(cc_prev_status.mdep.fpvalue, SET_SRC(PATTERN (insn))) )
+      return "fcmp.s\t%0, %1";
+   return "";
+  }
+  [(set_attr "length" "4")
+   (set_attr "type" "fcmps")
+   (set_attr "cc" "fpcompare")])
+
+(define_insn "fpcc_to_reg"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+	(unspec:SI [(reg:CC FPCC_REGNUM)]
+		   UNSPEC_FPCC_TO_REG))]
+  "TARGET_HARD_FLOAT"
+  "fmov.s\t%0, fsr"
+  [(set_attr "length" "4")
+   (set_attr "type" "fmvcpu")])
+
+(define_insn "reg_to_cc"
+  [(set (cc0)
+	(unspec:SI [(match_operand:SI 0 "register_operand" "r")]
+		   UNSPEC_REG_TO_CC))]
+  "TARGET_HARD_FLOAT"
+  "musfr\t%0"
+  [(set_attr "length" "2")
+   (set_attr "type" "alu")
+   (set_attr "cc" "from_fpcc")])
+
+(define_insn "stm_fp"
+  [(unspec [(match_operand 0 "register_operand" "r")
+            (match_operand 1 "const_int_operand" "")
+            (match_operand 2 "const_int_operand" "")]
+	   UNSPEC_STMFP)]
+  "TARGET_HARD_FLOAT"
+  {
+    int cop_reglist = INTVAL(operands[1]);
+
+    if (INTVAL(operands[2]) != 0)
+      return "stcm.w\tcp0, --%0, %C1";
+    else
+      return "stcm.w\tcp0, %0, %C1";
+
+    if ( cop_reglist & ~0xff ){
+      operands[1] = GEN_INT(cop_reglist & ~0xff);
+      if (INTVAL(operands[2]) != 0)
+         return "stcm.d\tcp0, --%0, %D1";
+      else
+         return "stcm.d\tcp0, %0, %D1";
+    }
+  }
+  [(set_attr "type" "fstm")
+   (set_attr "length" "4")
+   (set_attr "cc" "none")])
diff -Nrup --ignore-space-change gcc-4.0.2/gcc/config/avr32/lib1funcs.S gcc-4.0.2-atmel.0.99.2/gcc/config/avr32/lib1funcs.S
--- gcc-4.0.2/gcc/config/avr32/lib1funcs.S	1970-01-01 01:00:00.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/gcc/config/avr32/lib1funcs.S	2006-10-10 12:36:34.000000000 +0200
@@ -0,0 +1,1678 @@
+/*#define __IEEE_LARGE_FLOATS__*/
+
+/* Adjust the unpacked double number if it is a subnormal number.
+   The exponent and mantissa pair are stored
+   in [mant_hi,mant_lo] and [exp]. A register with the correct sign bit in
+   the MSB is passed in [sign]. Needs two scratch
+   registers [scratch1] and [scratch2]. An adjusted and packed double float
+   is present in [mant_hi,mant_lo] after macro has executed */
+.macro	adjust_subnormal_df	exp, mant_lo, mant_hi, sign, scratch1, scratch2	
+        /* We have an exponent which is <=0 indicating a subnormal number
+           As it should be stored as if the exponent was 1 (although the
+           exponent field is all zeros to indicate a subnormal number)
+           we have to shift down the mantissa to its correct position. */
+        neg     \exp
+        sub     \exp,-1                   /* amount to shift down */
+        cp.w    \exp,54
+        brlo    50f			/* if more than 53 shift steps, the
+                                           entire mantissa will disappear
+                                           without any rounding to occur */
+	mov	\mant_hi, 0
+	mov	\mant_lo, 0
+	rjmp	52f
+50:	
+        sub     \exp,-10                /* do the shift to position the
+                                           mantissa at the same time
+                                           note! this does not include the
+                                           final 1 step shift to add the sign */
+
+        /* when shifting, save all shifted out bits in [scratch2]. we may need to
+           look at them to make correct rounding. */
+
+        rsub    \scratch1,\exp,32       /* get inverted shift count */
+        cp.w    \exp,32                 /* handle shifts >= 32 separately */
+        brhs    51f
+
+        /* small (<32) shift amount, both words are part of the shift */
+        lsl     \scratch2,\mant_lo,\scratch1               /* save bits to shift out from lsw*/
+        lsl     \scratch1,\mant_hi,\scratch1               /* get bits from msw destined for lsw*/
+        lsr     \mant_lo,\mant_lo,\exp			   /* shift down lsw */
+        lsr     \mant_hi,\mant_hi,\exp			   /* shift down msw */
+        or      \mant_hi,\scratch1                         /* add bits from msw with prepared lsw */
+        rjmp    50f
+
+        /* large (>=32) shift amount, only lsw will have bits left after shift.
+           note that shift operations will use ((shift count) mod 32) so
+           we do not need to subtract 32 from shift count. */
+51:
+        lsl     \scratch2,\mant_hi,\scratch1               /* save bits to shift out from msw */
+        or      \scratch2,\mant_lo                         /* also save all bits from lsw */
+        mov     \mant_lo,\mant_hi                          /* msw -> lsw (i.e. "shift 32 first") */
+        mov     \mant_hi,0                                 /* clear msw */
+        lsr     \mant_lo,\mant_lo,\exp                     /* make rest of shift inside lsw */
+
+50:
+        /* result is almost ready to return, except that least significant bit
+           and the part we already shifted out may cause the result to be
+           rounded */
+        bld     \mant_lo,0                   /* get bit to be shifted out */
+        brcc    51f			     /* if bit was 0, no rounding */
+
+        /* msb of part to remove is 1, so rounding depends on rest of bits */
+        tst     \scratch2,\scratch2                   /* get shifted out tail */
+        brne    50f     /* if rest > 0, do round */
+        bld     \mant_lo,1                   /* we have to look at lsb in result */
+        brcc    51f   /* if lsb is 0, don't round */
+
+50:
+        /* subnormal result requires rounding
+           rounding may cause subnormal to become smallest normal number
+           luckily, smallest normal number has exactly the representation
+           we got by rippling a one bit up from mantissa into exponent field. */
+        sub     \mant_lo,-1
+        subcc   \mant_hi,-1
+
+51:
+        /* shift and return packed double with correct sign */
+	rol	\sign
+        ror     \mant_hi
+        ror     \mant_lo	
+52:	
+.endm
+
+
+/* Adjust subnormal single float number with exponent [exp]
+   and mantissa [mant] and round.    */
+.macro	adjust_subnormal_sf	sf, exp, mant, sign, scratch
+		/* subnormal number */
+        rsub    \exp,\exp, 1		/* shift amount */
+        cp.w    \exp, 25
+	movhs	\mant, 0		
+        brhs    90f			/* Return zero */
+	rsub	\scratch, \exp, 32
+	lsl	\scratch, \mant,\scratch/* Check if there are any bits set
+					   in the bits discarded in the mantissa */
+	srne	\scratch		/* If so set the lsb of the shifted mantissa */	
+        lsr     \mant,\mant,\exp	/* Shift the mantissa */
+	or	\mant, \scratch		/* Round lsb if any bits were shifted out  */
+	/* Rounding :	For explaination, see round_sf. */
+	mov	\scratch, 0x7f		/* Set rounding constant */
+	bld	\mant, 8		
+	subeq	\scratch, -1		/* For odd numbers use rounding constant 0x80 */
+	add	\mant, \scratch		/* Add rounding constant to mantissa */
+	/* We can't overflow because mantissa is at least shifted one position
+	   to the right so the implicit bit is zero. We can however get the implicit
+	   bit set after rounding which means that we have the lowest normal number
+	   but this is ok since this bit has the same position as the LSB of the
+	   exponent */
+	lsr	\sf, \mant, 7
+	/* Rotate in sign */
+	lsl	\sign, 1
+	ror	\sf
+90:	
+.endm
+
+
+/* Round the unpacked df number with exponent [exp] and
+   mantissa [mant_hi, mant_lo]. Uses scratch register
+   [scratch] */
+.macro	round_df	exp, mant_lo, mant_hi, scratch
+        mov     \scratch, 0x3ff		/* Rounding constant */
+        bld     \mant_lo,11		/* Check if lsb in the final result is	
+					   set */
+	subeq	\scratch, -1		/* Adjust rounding constant to 0x400
+					   if rounding 0.5 upwards */	
+	add	\mant_lo, \scratch	/* Round */
+        acr	\mant_hi		/* If overflowing we know that
+					   we have all zeros in the bits not
+					   scaled out so we can leave them
+					   but we must increase the exponent with
+					   two since we had an implicit bit
+				           which is lost + the extra overflow bit */
+	subcs	\exp, -2		/* Update exponent */
+.endm		
+
+/* Round single float number stored in [mant] and [exp] */
+.macro	round_sf	exp, mant, scratch
+	/* Round:	
+		For 0.5 we round to nearest even integer
+		for all other cases we round to nearest integer.
+		This means that if the digit left of the "point" (.)
+		is 1 we can add 0x80 to the mantissa since the
+		corner case 0x180 will round up to 0x200. If the
+		digit left of the "point" is 0 we will have to
+		add 0x7f since this will give 0xff and hence a
+		truncation/rounding downwards for the corner
+		case when the 9 lowest bits are 0x080 */
+	mov	\scratch, 0x7f	/* Set rounding constant */
+	/* Check if the mantissa is even or odd */
+	bld	\mant, 8
+	subeq	\scratch, -1	/* Rounding constant should be 0x80 */
+	add	\mant, \scratch
+	subcs	\exp, -2	/* Adjust exponent if we overflowed */		
+.endm
+
+/* Scale mantissa [mant_hi, mant_lo] with amount [shift_count].
+   Uses scratch registers [scratch1] and [scratch2] */
+.macro	scale_df	shift_count, mant_lo, mant_hi, scratch1, scratch2	
+        /* Scale [mant_hi, mant_lo] with shift_amount.
+	   Must not forget the sticky bits we intend to shift out. */
+
+        rsub    \scratch1,\shift_count,32/* get (32 - shift count)
+                                           (if shift count > 32 we get a
+                                           negative value, but that will
+                                           work as well in the code below.) */
+
+        cp.w    \shift_count,32          /* handle shifts >= 32 separately */
+        brhs    70f
+
+        /* small (<32) shift amount, both words are part of the shift
+           first remember whether part that is lost contains any 1 bits ... */
+        lsl     \scratch2,\mant_lo,\scratch1  /*shift away bits that are part of
+						final mantissa. only part that goes
+						to scratch2 are bits that will be lost */
+
+        /* ... and now to the actual shift */
+        lsl     \scratch1,\mant_hi,\scratch1  /* get bits from msw destined for lsw*/
+        lsr     \mant_lo,\mant_lo,\shift_count   /* shift down lsw of mantissa */
+        lsr     \mant_hi,\mant_hi,\shift_count   /* shift down msw of mantissa */
+        or      \mant_lo,\scratch1               /* combine these bits with prepared lsw*/
+        rjmp    71f
+
+        /* large (>=32) shift amount, only lsw will have bits left after shift.
+           note that shift operations will use ((shift count) mod 32) so
+           we do not need to subtract 32 from shift count. */
+70:
+        /* first remember whether part that is lost contains any 1 bits ... */
+        lsl     \scratch2,\mant_hi,\scratch1   /* save all lost bits from msw */
+        or      \scratch2,\mant_lo             /* also save lost bits (all) from lsw
+                                                  now scratch2<>0 if we lose any bits */
+
+        /* ... and now to the actual shift */
+        mov     \mant_lo,\mant_hi             /* msw -> lsw (i.e. "shift 32 first")*/
+        mov     \mant_hi,0                    /* clear msw */
+        lsr     \mant_lo,\mant_lo,\shift_count /* make rest of shift inside lsw*/
+
+71:
+        cp.w    \scratch2,0                    /* if any '1' bit in part we lost ...*/
+        breq    70f
+
+        sbr     \mant_lo,0                    /* ... we need to set sticky bit*/
+70:		
+.endm
+		
+/* Unpack exponent and mantissa from the double number
+   stored in [df_hi,df_lo]. The exponent is stored in [exp]
+   while the mantissa is stored in [df_hi,df_lo]. */
+	
+.macro	unpack_df		exp, df_lo, df_hi
+	lsr     \exp, \df_hi,21               /* Extract exponent */
+        lsl     \df_hi,10                     /* Get mantissa */
+        or      \df_hi,\df_hi,\df_lo>>21
+        lsl     \df_lo,11
+
+        neg     \exp			      /* Fix implicit bit */
+        bst     \df_hi,31
+        subeq   \exp,1
+	neg     \exp			      /* negate back exponent */
+	.endm
+	
+/* Unpack exponent and mantissa from the single float number
+   stored in [sf]. The exponent is stored in [exp]
+   while the mantissa is stored in [sf]. */ 	
+.macro	unpack_sf		exp, sf
+	lsr	\exp, \sf, 24
+	brne	80f
+       	/* Fix subnormal number */
+        lsl     \sf,7
+        clz     \exp,\sf
+        lsl     \sf,\sf,\exp
+        rsub	\exp,\exp,1
+	rjmp	81f
+80:	
+        lsl     \sf,7
+	sbr	\sf, 31 /*Implicit bit*/
+81:	
+.endm
+
+
+
+/* Pack a single float number stored in [mant] and [exp]
+   into a single float number in [sf]  */
+.macro	pack_sf	sf, exp, mant
+        bld     \mant,31                  /* implicit bit to z */
+        subne   \exp,1                   /* if subnormal (implicit bit 0)
+                                          adjust exponent to storage format */
+	
+	lsr	\sf, \mant, 7
+	bfins	\sf, \exp, 24, 8
+.endm	
+
+/* Pack exponent [exp] and mantissa [mant_hi, mant_lo]
+   into [df_hi, df_lo].  [df_hi] is shifted
+   one bit up so the sign bit can be shifted into it */
+	
+.macro	pack_df		exp, mant_lo, mant_hi, df_lo, df_hi
+        bld     \mant_hi,31                  /* implicit bit to z */
+        subne   \exp,1                   /* if subnormal (implicit bit 0)
+                                          adjust exponent to storage format */
+
+        lsr     \mant_lo,11                  /* shift back lsw */
+        or      \df_lo,\mant_lo,\mant_hi<<21          /* combine with low bits from msw */
+        lsl     \mant_hi,1                   /* get rid of implicit bit */
+        lsr     \mant_hi,11                  /* shift back msw except for one step*/
+        or      \df_hi,\mant_hi,\exp<<21          /* combine msw with exponent */
+.endm
+
+/* Normalize single float number stored in [mant] and [exp]
+   using scratch register [scratch] */
+.macro	normalize_sf	exp, mant, scratch
+	/* Adjust exponent and mantissa */
+	clz	\scratch, \mant
+	sub	\exp, \scratch
+	lsl	\mant, \mant, \scratch
+.endm
+
+/* Normalize the exponent and mantissa pair stored
+   in [mant_hi,mant_lo] and [exp]. Needs two scratch
+   registers [scratch1] and [scratch2]. */
+.macro	normalize_df		exp, mant_lo, mant_hi, scratch1, scratch2
+        clz     \scratch1,\mant_hi     /* Check if we have zeros in high bits */
+        breq    80f                     /* No need for scaling if no zeros in high bits */
+        cp.w    \scratch1,32           /* Check for all zeros */
+        breq    81f		
+
+        /* shift amount is smaller than 32, and involves both msw and lsw*/
+        rsub    \scratch2,\scratch1,32  /* shift mantissa */
+        lsl     \mant_hi,\mant_hi,\scratch1
+        lsr     \scratch2,\mant_lo,\scratch2
+        or      \mant_hi,\scratch2
+        lsl     \mant_lo,\mant_lo,\scratch1
+        sub     \exp,\scratch1          /* adjust exponent */
+	rjmp	80f			/* Finished */	
+81:
+        /* shift amount is greater than 32 */
+        clz     \scratch1,\mant_lo      /* shift mantissa */
+        sub     \scratch1,-32
+        mov     \mant_hi,\mant_lo
+        lsl     \mant_hi,\mant_hi,\scratch1
+        mov     \mant_lo,0
+        sub     \exp,\scratch1          /* adjust exponent */
+80:	
+.endm
+	
+
+#ifdef L_avr32_f64_mul
+	.align	2
+	.global __avr32_f64_mul
+	.type  __avr32_f64_mul,@function
+
+__avr32_f64_mul:
+	pushm	r0-r3,r4-r7,lr
+	
+        /* Unpack */
+	eor	r12, r11, r9		/* Sign op1 ^ Sign op2 is MSB of r12*/
+        lsl     r11,1                   /* Unpack op1 */
+        lsl     r9,1                    /* Unpack op2 */
+
+        /* Sort operands op1 >= op2 */
+        lddpc   r5, .Linf
+        cp.w    r10,r8
+        cpc     r11,r9
+        brhs    0f
+
+        mov     r7,r11                  /* swap operands if op2 was larger */
+        mov     r6,r10
+        mov     r11,r9
+        mov     r10,r8
+        mov     r9,r7
+        mov     r8,r6
+
+0:
+	/* Check against infinity */
+        cp.w    r11,r5
+        brlo    1f
+	/* infinity or nan */
+        /* we have to check low word as well as nan mantissa may be 0 in msw*/
+        cpc     r10
+        /* we know that op1 is inf or nan. if z != 1 then we have nan.
+            in this case, also return nan. */
+        breq    0f
+	/* Return NaN */
+	mov	r11, -1
+	rjmp	__dfmul_return_op1	
+0:	
+
+        /* op1 is infinity. op2 is smaller or same so it cannot be nan.
+           it can be infinity or a (sub-)normal number.
+           we should return op1 (infinity) except when op2 is zero when
+           result should be nan. */
+        or      r5,r9,r8
+        brne    __dfmul_return_op1       /* op2 is not zero. return op1.*/
+	/* Return NaN */
+	mov	r11, -1
+	rjmp	__dfmul_return_op1	
+	
+1:	
+        /* no operand is inf/nan, and operands have been arranged in order
+           with op1 >= op2, implying that if we have a zero, it is found in
+           op2. in this case, result should be zero (with sign from both ops). */
+
+        or      r5,r9,r8                /* check the smaller value for zero */
+        brne    0f
+	mov	r10, 0
+	mov	r11, 0
+	rjmp	__dfmul_return_op1			/* Early exit */
+0:		
+
+        /* we have two "normal" (can be subnormal) nonzero numbers in r11:r10
+           and r9:r8. sign of result is already calculated in r12.
+           perform a normal multiplication. */
+
+        /* Unpack and normalize*/
+	unpack_df	r7 /*exp*/, r10, r11 /* mantissa */
+	normalize_df	r7 /*exp*/, r10, r11 /* mantissa */, r4, r5 /* scratch */
+	
+
+        /* Unpack and normalize*/
+	unpack_df	r6 /*exp*/, r8, r9 /* mantissa */
+	normalize_df	r6 /*exp*/, r8, r9 /* mantissa */, r4, r5 /* scratch */
+
+        /* Multiply */
+
+        mulu.d  r0,r10,r8
+        add     lr,r7,r6                   /* calculate new exponent after mul */
+        mulu.d  r2,r11,r8
+        sub     lr,(1023-1)             /* remove exponent bias as we have
+                                           included bias from both op1 and op2
+                                           sub one less, or in other words
+                                           add one to exponent. see below why. */
+        mulu.d  r6,r11,r9
+        add     r2,r1
+        mulu.d  r4,r10,r9
+
+
+        adc     r6,r6,r3
+        acr     r7
+
+        add     r4,r2
+        adc     r6,r6,r5
+        acr     r7
+
+        // r7:r6 is now in range [0x4000...0000 - 0xffff...fffe]
+        // remaining bits in r0 and r4 are of no interest, except that we have
+        // to add a sticky bit to r10 in case we had a 1 bit in r4 or r0.
+
+        or      r4,r0
+	movne	r0, 1			/* If we have bits in r4 or r0 */
+        or      r6,r0                   /*   set lsb of result to 1 */
+
+
+        // if msb is set, it was because multiplication gave an "overflow"
+        // of one bit so exponent should be incremented.
+        // we already did that above so we are done.
+        // if msb is *not* set it will be normalized and exponent will be
+        // decremented (which will compensate the one we added above).
+
+	normalize_df	lr /*exp*/, r6, r7 /* mantissa */, r8, r9 /* scratch */
+
+	/* Check if a subnormal result was created */
+	cp.w	lr, 0
+	brgt	0f
+	
+	adjust_subnormal_df	lr, r6, r7, r12, r8, r9			
+	mov	r10, r6
+	mov	r11, r7
+	popm	r0-r3,r4-r7, pc
+0:
+	
+        /* Round result */
+        round_df	lr /*exp*/, r6, r7 /* Mantissa */, r4 /*scratch*/
+        cp.w    lr,0x7ff
+        brlt    0f
+	/*Return infinity */
+	lddpc	r11, .Linf
+	mov	r10, 0
+	rjmp	__dfmul_return_op1
+	
+0:
+
+	/* Pack */
+	pack_df	lr /*exp*/, r6, r7 /* mantissa */, r10, r11 /* Output df number*/
+__dfmul_return_op1:	
+        lsl     r12,1		       /* shift in sign bit */
+        ror     r11
+	
+	popm	r0-r3,r4-r7, pc
+	
+#endif
+		
+
+#ifdef	L_avr32_f64_addsub	
+	.align	2
+	.global __avr32_f64_sub
+	.type  __avr32_f64_sub,@function
+
+__avr32_f64_sub:
+	pushm	r4-r7,lr	
+
+        eor     r12,r11,r9               // compare signs of operands
+        bld     r12,31
+        brcc    __dfsub                 // same sign => subtract
+
+        eorh    r9,0x8000
+        rjmp    __dfadd               // different signs => op1 + (-op2)
+__dfsub:
+
+        lsl     r11,1                   // unpack op1 msw and get sign in c
+        or      r4,r11,r10              // check if all bits zero
+        brne    1f
+
+        // op1 is zero, negate op2 and handle as add
+        eorh    r9,0x8000
+        // op1 is +/-0, and is unpacked with sign in c. add to op2.
+        // also used by sub, but op2 has been negated in this case
+        ror     r12                     // save sign of op1 in msb of r12
+        lsl     r9,1                    // unpack msw and get sign of op2
+        or      r4,r9,r8                // check all bits in op2
+        breq    0f
+
+        // if op2 != 0, then return op2 unchanged.
+        ror     r9                      // pack op2 msw again with sign from c
+        mov     r11,r9
+        mov     r10,r8
+        popm    r4-r7,pc
+
+0:
+        // both op1 and op2 zero, but sign unknown. result should and signs.
+        ror     r9                      // pack op2 msw again with sign from c
+        lsl     r12,1                   // get back sign of op1 into c ...
+        ror     r11                     // and back in original op1
+        and     r11,r9                  // and sign bits. as op1 is zero, the
+                                        // only bit which can be 1 is sign bit
+	popm	r4-r7,pc
+
+1:
+        ror     r12                     // save op1 sign in msb of r12
+
+        lsl     r9,1                    // unpack op2 msw
+        or      r4,r8,r9
+        brne    0f
+					   // op2 is zero, return op1
+                                        // whatever it is. the only case
+                                        // requiring special handling is if
+                                        // op1 is zero, but that was handled
+                                        // above.
+	lsl	r12, 1
+	ror	r11
+	popm	r4-r7,pc
+	
+0:	
+        // make sure that op1 >= op2, flip sign if we swap ops
+        cp.w    r10,r8
+        cpc     r11,r9
+        brhs    0f
+
+        com     r12                     // sign of op1 and result in lsb(r12)
+        mov     r7,r11                  // swap operands if op2 was larger
+        mov     r6,r10
+        mov     r11,r9
+        mov     r10,r8
+        mov     r9,r7
+        mov     r8,r6
+
+0:
+        // check if op1 is nan or inf.
+        lddpc   r5,.Linf
+        cp.w    r11,r5
+        brlo    1f
+	/* Op 1 is nan or inf */
+        // we have to check low word as well as nan mantissa may be 0 in msw
+        cpc     r10
+        // we know that op1 is inf or nan. if z != 1 then we have nan.
+        // if we have nan, return nan.
+        breq  0f
+	mov	r11, -1
+	rjmp	__dfsub_return_op1			
+0:
+
+        // op1 is infinity. check if op2 is nan, infinty or a normal number.
+        cp.w    r9,r5
+	movhs	r11, -1     // op2 is a normal number. return op1.
+
+        // op2 can be infinity (of the same sign as op1) or nan.
+        // in both cases we should return nan.
+        rjmp    __dfsub_return_op1
+1:	
+        // if op1 is not inf or nan, then op2 cannot be since op1 >= op2
+
+        // now prepare the operands by expanding them and shifting op2
+        // to the correct position for the subtract. note! if op2 is
+        // insignificant compared to op1, the function will take care of
+        // this and return op1 directly to the application.
+	
+	/* Unpack operands */
+	unpack_df	r7 /* exp op1*/, r10, r11 /* Mantissa op1 */	
+	unpack_df	r6 /* exp op2*/, r8, r9 /* Mantissa op2 */	
+
+	/* Get shift amount required for aligning op1 and op2 */
+	rsub	r6, r7
+	breq	__perform_dfsub	/* No shift needed */
+
+	cp.w	r6, 63
+	brhs	__dfsub_pack_result	/* Op 2 insignificant compared to op1 */
+
+	/* Shift mantissa of op2 so that op1 and op2 are aligned */
+	scale_df	r6 /* shift_count*/, r8, r9 /* Mantissa */, r4, r5 /*Scratch*/
+	
+__perform_dfsub:			
+        sub     r10,r8                  /* subtract mantissa of op2 from op1 */
+        sbc     r11,r11,r9
+        or      r4,r11,r10              /* check if result is all zeroes */
+	brne	0f
+	popm	r4-r7,pc		/* Early return */
+0:	
+	
+	normalize_df	r7 /*exp*/, r10, r11 /* mantissa */, r8, r9 /* scratch */
+
+	/* Check if a subnormal result was created */
+	cp.w	r7, 0
+	brgt	0f
+	
+	adjust_subnormal_df	r7 /*exp*/, r10, r11 /* Mantissa */, r12 /*sign*/, r8, r9 /*scratch*/			
+	popm	r4-r7,pc
+0:
+	
+        /* Round result */
+        round_df	r7 /*exp*/, r10, r11 /* Mantissa */, r9 /*scratch*/
+        cp.w    r7,0x7ff
+        brlt    __dfsub_pack_result
+	/*Return infinity */
+	lddpc	r11, .Linf
+	mov	r10, 0
+	rjmp	__dfsub_return_op1
+	
+__dfsub_pack_result:
+	/* Pack */
+	pack_df	r7 /*exp*/, r10, r11 /* mantissa */, r10, r11 /* Output df number*/
+
+__dfsub_return_op1:
+	lsl	r12,1
+	ror	r11
+	popm	r4-r7,pc
+	
+	.align	2
+	.global __avr32_f64_add
+	.type  __avr32_f64_add,@function
+__avr32_f64_add:	
+	pushm	r4-r7,lr
+        eor     r12,r11,r9               // compare signs of operands
+        lsl     r12,1
+        brcc    __dfadd                 // same sign => add
+
+        eorh    r9,0x8000
+        rjmp    __dfsub               // different signs => op1 - (-op2)
+__dfadd:
+
+        lsl     r11,1                   // unpack op1 msw and get sign in c
+        or      r4,r11,r10              // check if all bits zero
+        brne    1f
+
+        // op1 is +/-0, and is unpacked with sign in c. add to op2.
+        // also used by sub, but op2 has been negated in this case
+        ror     r12                     // save sign of op1 in msb of r12
+        lsl     r9,1                    // unpack msw and get sign of op2
+        or      r4,r9,r8                // check all bits in op2
+        breq    0f
+
+        // if op2 != 0, then return op2 unchanged.
+        ror     r9                      // pack op2 msw again with sign from c
+        mov     r11,r9
+        mov     r10,r8
+        popm    r4-r7,pc
+
+0:
+        // both op1 and op2 zero, but sign unknown. result should and signs.
+        ror     r9                      // pack op2 msw again with sign from c
+        lsl     r12,1                   // get back sign of op1 into c ...
+        ror     r11                     // and back in original op1
+        and     r11,r9                  // and sign bits. as op1 is zero, the
+                                        // only bit which can be 1 is sign bit
+	popm	r4-r7,pc
+1:	
+        ror     r12                     // save op1 sign in msb of r12
+
+        lsl     r9,1                    // unpack op2 msw
+        or      r4,r8,r9
+        brne    0f
+					   // op2 is zero, return op1
+                                        // whatever it is. the only case
+                                        // requiring special handling is if
+                                        // op1 is zero, but that was handled
+                                        // above.
+	lsl	r12, 1
+	ror	r11
+	popm	r4-r7,pc
+0:	
+        // make sure that exp[op1] >= exp[op2]
+        cp.w    r11,r9
+        brhs    0f
+
+        mov     r7,r11                  // swap operands if op2 was larger
+        mov     r6,r10
+        mov     r11,r9
+        mov     r10,r8
+        mov     r9,r7
+        mov     r8,r6
+
+0:
+        // check if op1 is nan or inf.
+        lddpc   r5,.Linf
+        cp.w    r11,r5
+        brlo    1f
+	/* Op 1 is nan or inf */
+        // we have to check low word as well as nan mantissa may be 0 in msw
+        cpc     r10
+        // we know that op1 is inf or nan. if z != 1 then we have nan.
+        // if we have nan, return nan.
+        breq  0f
+	mov	r11, -1
+	rjmp	__dfadd_return_op1			
+0:
+
+        // op1 is infinity. check if op2 is nan, infinty or a normal number.
+        cp.w    r9,r5
+        // Op2 is NaN of Inf. Return op2 but with sign of result.
+        // If Op2 is NaN, sign doesn't matter but no need to separate NaN
+	movhs	r11, r9
+	movhs	r10, r8
+
+        // op2 can be infinity (of the same sign as op1) or nan.
+        // in both cases we should return nan.
+        rjmp    __dfadd_return_op1
+1:	
+        // if op1 is not inf or nan, then op2 cannot be since exp[op1] >=
+        // exp[op2]
+
+        // now prepare the operands by expanding them and shifting op2
+        // to the correct position for the add. note! if op2 is
+        // insignificant compared to op1, the function will take care of
+        // this and return op1 directly to the application.
+
+	/* Unpack operands */
+	unpack_df	r7 /* exp op1*/, r10, r11 /* Mantissa op1 */	
+	unpack_df	r6 /* exp op2*/, r8, r9 /* Mantissa op2 */	
+
+	/* Get shift amount required for aligning op1 and op2 */
+	rsub	r6, r7
+	breq	__perform_dfadd	/* No shift needed */
+
+	cp.w	r6, 63
+	brhs	__dfadd_pack_result	/* Op 2 insignificant compared to op1 */
+
+	/* Shift mantissa of op2 so that op1 and op2 are aligned */
+	scale_df	r6 /* shift_count*/, r8, r9 /* Mantissa */, r4, r5 /*Scratch*/
+
+__perform_dfadd:	
+        add     r10,r8                  // add mantissas
+        adc     r11,r11,r9
+	brcc	0f		
+        ror     r11                     // overflow => shift down mantissa
+        ror     r10
+        brcc    1f                 // sticky bit shifted out?
+        sbr     r10,0                   // if so, merge it into result again
+1:
+        sub     r7,-1                   // increase exponent with 1
+0:
+	normalize_df	r7 /*exp*/, r10, r11 /* mantissa */, r8, r9 /* scratch */
+
+	/* Check if a subnormal result was created */
+	cp.w	r7, 0
+	brgt	0f
+	
+	adjust_subnormal_df	r7 /*exp*/, r10, r11 /* Mantissa */, r12 /*sign*/, r8, r9 /*scratch*/			
+	popm	r4-r7,pc
+0:
+	
+        /* Round result */
+        round_df	r7 /*exp*/, r10, r11 /* Mantissa */, r9 /*scratch*/
+        cp.w    r7,0x7ff
+        brlt    __dfadd_pack_result
+	/*Return infinity */
+	lddpc	r11, .Linf
+	mov	r10, 0
+	rjmp	__dfadd_return_op1
+	
+__dfadd_pack_result:
+	/* Pack */
+	pack_df	r7 /*exp*/, r10, r11 /* mantissa */, r10, r11 /* Output df number*/
+
+__dfadd_return_op1:
+	lsl	r12,1
+	ror	r11
+	popm	r4-r7,pc
+#endif
+
+#ifdef L_avr32_f64_to_u32
+	/* This goes into L_fixdfsi */
+#endif
+	
+
+#ifdef L_avr32_f64_to_s32
+	.global __avr32_f64_to_u32
+	.type  __avr32_f64_to_u32,@function
+__avr32_f64_to_u32:
+	cp.w	r11, 0
+	retmi	0	/* Negative returns 0 */
+#ifdef __LARGE_FLOATS__
+        lsl     r12,r11,1
+        lsr     r12,21                  /* extract exponent*/
+        sub     r12,1023                /* convert to unbiased exponent.*/
+        retlo   0                       /* too small exponent implies zero. */
+        cp.w    r12,32
+        brcc    0f	
+	rjmp	1f
+#endif
+
+	/* Fallthrough to df to signed si conversion */	
+	.global __avr32_f64_to_s32
+	.type  __avr32_f64_to_s32,@function
+__avr32_f64_to_s32:
+        lsl     r12,r11,1
+        lsr     r12,21                  /* extract exponent*/
+        sub     r12,1023                /* convert to unbiased exponent.*/
+        retlo   0                       /* too small exponent implies zero. */
+
+#ifdef __LARGE_FLOATS__
+        cp.w    r12,31
+        brcc    0f
+#endif
+1:	
+        rsub    r12,r12,31              /* shift count = 31 - exponent */
+        mov     r9,r11                  /* save sign for later...*/
+        lsl     r11,11                  /* remove exponent and sign*/
+        sbr     r11,31                  /* add implicit bit*/
+        or      r11,r11,r10>>21         /* get rest of bits from lsw of double */
+        lsr     r11,r11,r12             /* shift down mantissa to final place */
+        lsl     r9,1                    /* sign -> carry */
+        retcc   r11                     /* if positive, we are done */
+        neg     r11                     /* if negative float, negate result */
+        ret     r11
+
+#ifdef __LARGE_FLOATS__
+0:	
+        mov     r12,-1                  /* r11 = 0xffffffff */
+        lsr     r12,1                   /* r11 = 0x7fffffff */
+        lsl     r11,1                   /* sign -> carry */
+        acr     r12                     /* r11 = signed ? 0x80000000
+                                                        : 0x7fffffff */
+        ret     r12
+#endif
+#endif	/* L_fixdfsi*/
+
+#ifdef L_avr32_f64_to_u64
+	/* Actual function is in L_fixdfdi */
+#endif
+	
+#ifdef L_avr32_f64_to_s64
+	.global __avr32_f64_to_u64
+	.type  __avr32_f64_to_u64,@function
+__avr32_f64_to_u64:
+	cp.w	r11,0
+	/* Negative numbers return zero */
+	movmi	r10, 0
+	movmi	r11, 0
+	retmi	r11
+#ifdef __LARGE_FLOATS__
+        lsl     r9,r11,1
+        lsr     r9,21                   /* get exponent*/
+        sub     r9,1023                 /* convert to correct range*/
+	/* Return zero if exponent to small */
+	movlo	r10, 0
+	movlo	r11, 0
+	retlo	r11
+        cp.w    r9,64
+        mov     r8,r11                  /* save sign for later...*/
+        brcs    1f
+	rjmp	2f			/* Number to large */
+
+#endif
+
+	
+
+	/* Fallthrough */
+	.global __avr32_f64_to_s64
+	.type  __avr32_f64_to_s64,@function
+__avr32_f64_to_s64:
+        lsl     r9,r11,1
+        lsr     r9,21                   /* get exponent*/
+        sub     r9,1023                 /* convert to correct range*/
+	/* Return zero if exponent to small */
+	movlo	r10, 0
+	movlo	r11, 0
+	retlo	r11
+
+#ifdef __LARGE_FLOATS__
+        cp.w    r9,63
+        mov     r8,r11                  /* save sign for later...*/
+        brcc    2f
+#else
+        mov     r8,r11                  /* save sign for later...*/
+#endif
+1:	
+        lsl     r11,11                  /* remove exponent */
+        sbr     r11,31                  /* add implicit bit*/
+        or      r11,r11,r10>>21         /* get rest of bits from lsw of double*/
+        lsl     r10,11                  /* align lsw correctly as well */
+        rsub    r9,r9,63                /* shift count = 63 - exponent */
+        breq    1f
+
+        cp.w    r9,32                   /* is shift count more than one reg? */
+        brhs    0f
+
+        mov     r12,r11                 /* save msw */
+        lsr     r10,r10,r9              /* small shift count, shift down lsw */
+        lsr     r11,r11,r9              /* small shift count, shift down msw */
+        rsub    r9,r9,32                /* get 32-size of shifted out tail */
+        lsl     r12,r12,r9              /* align part to move from msw to lsw */
+        or      r10,r12                 /* combine to get new lsw */
+        rjmp    1f
+
+0:
+        lsr     r10,r11,r9              /* large shift count,only lsw get bits
+                                           note that shift count is modulo 32*/
+        mov     r11,0                   /* msw will be 0 */
+
+1:
+        lsl     r8,1                    /* sign -> carry */
+        retcc   r11                     /* if positive, we are done */
+
+        neg     r11                     /* if negative float, negate result */
+        neg     r10
+        scr     r11
+        ret     r11
+
+
+#ifdef __LARGE_FLOATS__
+2:
+        mov     r11,-1                  /* r11 = 0xffffffff */
+        lsr     r11,1                   /* r11 = 0x7fffffff */
+        lsl     r8,1                    /* sign -> carry */
+        acr     r11                     /* r11 = signed ? 0x80000000 */
+                                        /*              : 0x7fffffff */
+        lsl     r10,r11,31              /* extend last bit of msw*/
+        asr     r10,31
+        ret     r11
+#endif
+#endif
+
+#ifdef L_avr32_u32_to_f64
+	/* Code located in L_floatsidf */
+#endif
+	
+#ifdef L_avr32_s32_to_f64
+	.global __avr32_u32_to_f64
+	.type  __avr32_u32_to_f64,@function
+__avr32_u32_to_f64:
+	sub	r11, r12, 0 /* Move to r11 and force Z flag to be updated */
+	mov	r12, 0	    /* always positive */
+	rjmp	0f	    /* Jump to common code for floatsidf */
+	
+	.global __avr32_s32_to_f64
+	.type  __avr32_s32_to_f64,@function
+__avr32_s32_to_f64:
+	mov	r11, r12	/* Keep original value in r12 for sign */
+	abs	r11		/* Absolute value if r12 */
+0:	
+        mov     r10,0           /* let remaining bits be zero */
+        reteq   r11		/* zero long will return zero float */
+
+	pushm	lr
+        mov     r9,31+1023              /* set exponent */
+		
+	normalize_df	r9 /*exp*/, r10, r11 /* mantissa */, r8, lr /* scratch */
+
+	/* Check if a subnormal result was created */
+	cp.w	r9, 0
+	brgt	0f
+	
+	adjust_subnormal_df	r9 /* exp */, r10, r11 /* Mantissa */, r12 /*sign*/, r8, lr /* scratch */
+	popm	pc
+0:
+	
+        /* Round result */
+        round_df	r9 /*exp*/, r10, r11 /* Mantissa */, r8 /*scratch*/
+        cp.w    r9,0x7ff
+        brlt    0f
+	/*Return infinity */
+	lddpc	r11, .Linf
+	mov	r10, 0
+	rjmp	__floatsidf_return_op1
+	
+0:
+
+	/* Pack */
+	pack_df	r9 /*exp*/, r10, r11 /* mantissa */, r10, r11 /* Output df number*/
+__floatsidf_return_op1:	
+        lsl     r12,1		       /* shift in sign bit */
+        ror     r11
+
+	popm	pc
+#endif
+
+
+#ifdef L_avr32_f32_cmp_eq
+	.global __avr32_f32_cmp_eq
+	.type  __avr32_f32_cmp_eq,@function
+__avr32_f32_cmp_eq:	
+	cp.w	r12, r11
+	brne	0f	/* If not equal check for +/-0 */
+	
+	/* Check for NaN or Inf */
+        lddpc   r11,.Linf_sf
+	lsl	r12, 1
+	cp.w	r12, r11
+	srls	r12	/* 0 if NaN, 1 otherwise */
+	ret	r12	
+0:	
+	/* Or together the two values and shift out the sign bit.
+	   If the result is zero, then the two values are both zero. */
+	or	r12, r11
+	lsl	r12, 1
+	sreq	r12
+	ret	r12
+#endif
+	
+#if defined(L_avr32_f32_cmp_ge) || defined(L_avr32_f32_cmp_lt)
+#ifdef L_avr32_f32_cmp_ge
+	.global __avr32_f32_cmp_ge
+	.type  __avr32_f32_cmp_ge,@function
+__avr32_f32_cmp_ge:
+#endif	
+#ifdef L_avr32_f32_cmp_lt
+	.global __avr32_f32_cmp_lt
+	.type  __avr32_f32_cmp_lt,@function
+__avr32_f32_cmp_lt:
+#endif	
+	lsl	r10, r12, 1	/* Remove sign bits */
+	lsl	r9, r11, 1
+	lddpc	r8, .Linf_sf
+	cp.w	r10, r8
+	rethi	0		/* Op0 is NaN */		
+	cp.w	r9, r8
+	rethi	0		/* Op1 is Nan */
+
+	eor	r8, r11, r12
+	bld	r12, 31
+#ifdef L_avr32_f32_cmp_ge
+	srcc	r8	/* Set result to true if op0 is positive*/
+#endif
+#ifdef L_avr32_f32_cmp_lt
+	srcs	r8	/* Set result to true if op0 is negative*/
+#endif
+	retmi	r8	/* Return if signs are different */
+	brcs	0f	/* Both signs negative? */
+
+	/* Both signs positive */
+	cp.w	r12, r11
+#ifdef L_avr32_f32_cmp_ge
+	srhs	r12
+#endif
+#ifdef L_avr32_f32_cmp_lt
+	srlo	r12
+#endif
+	retal	r12
+0:
+	/* Both signs negative */
+	cp.w	r11, r12
+#ifdef L_avr32_f32_cmp_ge
+	srhs	r12
+#endif
+#ifdef L_avr32_f32_cmp_lt
+	srlo	r12
+#endif
+	retal	r12
+#endif
+	
+
+#ifdef L_avr32_f64_cmp_eq
+	.global __avr32_f64_cmp_eq
+	.type  __avr32_f64_cmp_eq,@function
+__avr32_f64_cmp_eq:	
+        cp.w    r10,r8
+        cpc     r11,r9
+        brne    0f	/* Both args could be zero with different sign bits */
+
+        /* check for NaN */
+        lsl     r11,1
+        lddpc   r12,.Linf
+        cp.w    r10,0
+        cpc     r11,r12                 /* check if nan or inf */
+	srls	r12			/* If Arg is NaN return 0 else 1*/
+	ret	r12			/* Return  */
+
+0:
+        lsl     r11,1                   /* get rid of sign bits */
+        lsl     r9,1
+        or      r11,r10                 /* Check if all bits are zero */
+        or      r11,r9
+        or      r11,r8
+	sreq	r12			/* If all zeros the arguments are equal
+					   so return 1 else return 0 */
+	ret	r12
+#endif
+
+
+#if   defined(L_avr32_f64_cmp_ge) || defined(L_avr32_f64_cmp_lt)
+
+#ifdef L_avr32_f64_cmp_ge
+	.global __avr32_f64_cmp_ge
+	.type  __avr32_f64_cmp_ge,@function
+__avr32_f64_cmp_ge:
+#endif	
+#ifdef L_avr32_f64_cmp_lt
+	.global __avr32_f64_cmp_lt
+	.type  __avr32_f64_cmp_lt,@function
+__avr32_f64_cmp_lt:
+#endif	
+
+        /* compare magnitude of op1 and op2 */
+	pushm	lr
+	
+        lsl     r11,1                   /* Remove sign bit of op1 */
+	srcs	lr			/* Sign op1 to lsb of lr*/
+        lsl     r9,1                    /* Remove sign bit of op2 */
+	rol	lr			/* Sign op2 to lsb of lr, sign bit op1 bit 1 of lr*/
+
+	/* Check for Nan */
+        lddpc   r12,.Linf
+        cp.w    r10,0
+        cpc     r11,r12
+	movhi	r12, 0	/* Return false for NaN */
+        brhi    0f	/* We have NaN */
+        cp.w    r8,0
+        cpc     r9,r12
+	movhi	r12, 0	/* Return false for NaN */
+        brhi    0f	/* We have NaN */
+
+        cp.w    lr,3                   /* both operands negative ?*/	
+        breq    1f
+
+        cp.w    lr,1                   /* both operands positive? */
+        brlo    2f
+
+        /* Different signs. If sign of op1 is negative the difference
+	   between op1 and op2 will always be negative, and if op1 is
+	   positive the difference will always be positive */		
+#ifdef L_avr32_f64_cmp_ge
+	sreq	r12
+#endif
+#ifdef L_avr32_f64_cmp_lt
+	srne	r12
+#endif
+	popm	pc
+
+
+2:
+        /* Both operands positive. Just compute the difference */
+        cp.w    r10,r8
+        cpc     r11,r9
+#ifdef L_avr32_f64_cmp_ge
+	srhs	r12
+#endif
+#ifdef L_avr32_f64_cmp_lt
+	srlo	r12
+#endif
+	popm	pc
+		
+1:
+        /* Both operands negative. Compute the difference with operands switched */
+        cp     r8,r10
+        cpc    r9,r11
+#ifdef L_avr32_f64_cmp_ge
+	srhs	r12
+#endif
+#ifdef L_avr32_f64_cmp_lt
+	srlo	r12
+#endif
+0:	
+	popm	pc
+#endif
+
+	
+
+#ifdef L_avr32_f64_div
+	.global __avr32_f64_div
+	.type  __avr32_f64_div,@function
+__avr32_f64_div:
+	stm	--sp, r2-r7,lr
+	eor	r12, r11, r9		/* Sign(op1) ^ Sign(op2) to msb of r12*/
+        lsl     r11,1                   /* unpack op1*/
+        lddpc   lr,.Linf
+        lsl     r9,1                    /* unpack op2*/
+
+	cp.w    r11,lr
+        brhs	0f			/* op1 is NaN or infinity */
+        cp.w    r9,lr
+        brhs    1f			/* op2 is NaN or infinity */
+        or      r5,r9,r8
+        breq    2f			/* op2 is zero */
+        or      r5,r11,r10
+        breq    __dfdiv_return_op1	/* op1 is zero return zero*/
+
+	/* Unpack and normalize */
+	/* op1 */
+	unpack_df	r7 /*exp*/, r10, r11 /*df number*/
+	normalize_df	r7 /*exp*/, r10, r11 /*Mantissa*/, r4, r5 /*scratch*/
+
+	/* op1 */
+	unpack_df	r6 /*exp*/, r8, r9 /*df number*/
+	normalize_df	r6 /*exp*/, r8, r9 /*Mantissa*/, r4, r5 /*scratch*/
+
+	/* Compute new exponent */
+        sub     r7,r6
+        sub     r7,-1023
+
+	/* Do fixed point division of mantissas*/
+        mov     r6,55
+        lsr     r11,1
+        ror     r10
+        lsr     r9,1
+        ror     r8
+
+3:
+	/* Check if dividend is higher or same than divisor */
+        sub     r2,r10,r8
+        sbc     r3,r11,r9
+	/* If so move the difference back into the dividend */
+	movhs	r10, r2
+	movhs	r11, r3
+	/* Update the Quotient */
+        rol     r4
+        rol     r5
+        eorl    r4,1
+
+	/* Shift the dividend */
+        lsl     r10,1
+        rol     r11
+
+        sub     r6,1
+	brne    3b
+
+	/* Check if we have a remainder which will the propagate into
+	   the last bit */
+
+        or      r11,r11,r10
+        neg     r11
+        rol     r4
+        rol     r5
+
+	/* Adjust mantissa into correct alignment */
+        lsl     r11, r5,(64-56)
+        or      r11,r11,r4>>(32-64+56)
+        lsl     r10,r4, (64-56)
+
+	/* Normalize result */
+	normalize_df	r7 /*exp*/, r10, r11 /* mantissa */, r8, r9 /* scratch */
+
+	/* Check if a subnormal result was created */
+	cp.w	r7, 0
+	brgt	3f
+	
+	adjust_subnormal_df	r7 /*exp*/, r10, r11 /* Mantissa */, r12 /*sign*/, r8, r9 /*scratch*/			
+	ldm	sp++, r2-r7,pc
+3:
+	
+        /* Round result */
+        round_df	r7 /*exp*/, r10, r11 /* Mantissa */, r9 /*scratch*/
+        cp.w    r7,0x7ff
+        brlt    __dfdiv_pack_result
+	/*Return infinity */
+	lddpc	r11, .Linf
+	mov	r10, 0
+	rjmp	__dfdiv_return_op1
+	
+__dfdiv_pack_result:
+	/* Pack */
+	pack_df	r7 /*exp*/, r10, r11 /* mantissa */, r10, r11 /* Output df number*/
+
+__dfdiv_return_op1:
+	lsl	r12,1
+	ror	r11
+	ldm	sp++, r2-r7,pc
+
+0:
+	/* Op1 is NaN or Inf */
+        cpc     r10
+	/* If op1 is a NaN the we should return a NaN */
+        brne    __dfdiv_return_op1
+
+	/* Op1 is infinity, check op2*/
+        cp.w    r9,lr
+        brlo    __dfdiv_return_op1    /* Op2 is a normal number return inf */
+	/* Other combinations:	 return NaN */
+	mov	r11, -1
+	ldm	sp++, r2-r7,pc
+
+1:	
+	/* Op2 is NaN or Inf */
+        cpc     r8
+	/* If inf return zero else return NaN*/
+	mov	r10, 0
+	moveq	r11, 0
+	movne	r11, -1
+	ldm	sp++, r2-r7,pc
+
+2:
+	/* Op2 is zero */
+        or      r6,r11,r10              /* 0.0/0.0 yields NaN */
+	mov	r10, 0
+	moveq	r11, -1			/* Return NaN */
+	movne	r11, lr			/* Return inf */
+        rjmp    __dfdiv_return_op1
+
+#endif	
+		
+
+#ifdef L_avr32_f32_div
+	.global __avr32_f32_div
+	.type  __avr32_f32_div,@function
+__avr32_f32_div:
+	eor	r8, r11, r12		/* MSB(r8) = Sign(op1) ^ Sign(op2) */
+	/* Unpack */
+        lsl     r12,1
+	reteq	0			/* Return zero if op1 is zero */
+        lddpc   r9, .Linf_sf
+        lsl     r11,1
+
+	/* Check op1 for NaN or Inf */
+        cp      r12,r9	
+        brhs    2f
+
+	/* Check op2 for NaN or Inf */
+        cp      r11,r9
+        brhs    3f
+	/* Check op2 for zero */
+        tst     r11,r11
+        breq    4f
+
+	/* If op1 is zero return zero */
+	tst	r12, r12
+	reteq	0
+	
+	/* Unpack op1*/	
+	unpack_sf	r9 /*exp*/, r12 /*sf*/
+
+	/* Unpack op2*/	
+	unpack_sf	r10 /*exp*/, r11 /*sf*/
+
+	/* Calculate new exponent */
+	stm	--sp,r7,lr
+	sub	r9, r10
+        sub     r9,-127
+
+	/* Divide */
+        mov     r7,26
+
+        lsr     r12,1                   /* Make room for one more bit in mantissas */
+        lsr     r11,1
+
+0:
+        sub     r10,r12,r11
+	movcc	r12, r10		/* update dividend if divisor smaller */
+        rol     lr                      /* shift result into lr */
+        eorl    lr,1                    /* flip bit. */
+        lsl     r12,1			/* Shift dividend */
+        sub     r7,1
+        brne    0b
+
+        /* round and scale*/
+        neg	r12                     /* c = 1 iff r12 != 0 */
+        rol     lr
+        lsl     r10,lr,(32-27)		/* Adjust mantissa */
+	ldm	sp++, r7, lr
+	
+				
+	normalize_sf	r9 /*exp*/, r10 /*mant*/, r11 /*scratch*/	
+
+	/* Check for subnormal result */
+	cp.w	r9, 0
+	brgt	0f
+
+	/* Adjust a subnormal result */
+	adjust_subnormal_sf	r12 /*sf*/, r9 /*exp*/, r10 /*mant*/, r8 /*sign*/,r11 /*scratch*/
+	ret	r12
+0:
+	round_sf	r9 /*exp*/, r10 /*mant*/, r11 /*scratch*/	
+	pack_sf		r12 /*sf*/, r9 /*exp*/, r10 /*mant*/
+__divsf_return_op1:	
+	lsl	r8, 1
+	ror	r12
+	ret	r12
+
+2:
+	/* Op1 is NaN or inf */
+	retne	-1	/* Return NaN if op1 is NaN */
+	/* Op1 is inf check op2 */
+	cp	r11, r9
+	brlo	__divsf_return_op1 /* inf/number gives inf */
+	ret	-1	/* The rest gives NaN*/
+3:	
+	/* Op1 is NaN or inf */
+	reteq	0	/* Return zero if number/inf*/
+	ret	-1	/* Return NaN*/
+4:
+	/* Op2 is zero ? */
+	tst	r12,r12
+	reteq	-1	/* 0.0/0.0 is NaN */
+	lddpc	r12, .Linf_sf
+	rjmp	__divsf_return_op1
+				
+#endif
+
+#ifdef L_avr32_f32_mul
+	.global __avr32_f32_mul
+	.type  __avr32_f32_mul,@function
+__avr32_f32_mul:
+	eor	r8, r11, r12		/* MSB(r8) = Sign(op1) ^ Sign(op2) */
+        lsl     r12,1                   /* unpack op1 */
+        lsl     r11,1                   /* unpack op2 */
+
+        /* arrange operands so that that op1 >= op2 */
+        sub     r9,r12,r11
+        brcc    0f
+
+        sub     r12,r9                  /* swap operands if op2 was larger */
+        add     r11,r9
+
+0:
+        lddpc   r9,.Linf_sf
+        cp      r12,r9
+        brhs    2f
+
+	/* Check op2 for zero */
+        tst     r11,r11
+	reteq	0	/* Return zero */
+
+	/* Unpack op1 */	
+	unpack_sf	r9 /*exp*/, r12 /*sf*/
+	/* Unpack op2 */	
+	unpack_sf	r10 /*exp*/, r11 /*sf*/
+
+	/* Calculate new exponent */
+        add     r9,r10
+
+	/* Do the multiplication */
+        mulu.d  r10,r12,r11
+
+        sub     r9,(127-1)              /* remove extra exponent bias */
+
+	/* Check if we have any bits in r10 which
+	   means a rounding bit should be inserted in LSB of result */
+        tst     r10,r10
+        srne    r10
+        or      r12,r11,r10
+
+	/* Normalize */
+	normalize_sf	r9 /*exp*/, r12 /*mant*/, r11 /*scratch*/	
+
+	/* Check for subnormal result */
+	cp.w	r9, 0
+	brgt	0f
+
+	/* Adjust a subnormal result */
+	adjust_subnormal_sf	r12/*sf*/, r9 /*exp*/, r12 /*mant*/, r8 /*sign*/, r11 /*scratch */
+	ret	r12
+0:
+	round_sf	r9 /*exp*/, r12 /*mant*/, r11 /*scratch*/	
+	cp.w		r9, 0xff
+	brlo		1f
+        lddpc		r12,.Linf_sf 	
+	rjmp		__mulsf_return_op1
+1:			
+	pack_sf		r12 /*sf*/, r9 /*exp*/, r12 /*mant*/
+__mulsf_return_op1:	
+	lsl	r8, 1
+	ror	r12
+	ret	r12
+
+2:
+	/* Op1 is inf or NaN */
+        retne   -1             /* Op1 is NaN return NaN */
+
+	/* Op1 is inf and op2 is smaller so it is either infinity
+	   or a subnormal number */
+        cp      r11,0
+        brne    __mulsf_return_op1  /* op2 is not zero. return op1.*/
+        ret     -1		    /* inf * 0  return NaN */		
+#endif	
+
+	
+#ifdef L_avr32_s32_to_f32
+	.global __avr32_s32_to_f32
+	.type  __avr32_s32_to_f32,@function
+__avr32_s32_to_f32:
+	cp	r12, 0
+	reteq	r12	/* If zero then return zero float */
+	mov	r11, r12 /* Keep the sign */
+	abs	r12	/* Compute the absolute value */
+	mov	r10, 31 + 127	/* Set the correct exponent */
+	
+	/* Normalize */
+	normalize_sf	r10 /*exp*/, r12 /*mant*/, r9 /*scratch*/	
+
+	/* Check for subnormal result */
+	cp.w	r10, 0
+	brgt	0f
+
+	/* Adjust a subnormal result */
+	adjust_subnormal_sf	r12/*sf*/, r10 /*exp*/, r12 /*mant*/, r11/*sign*/, r9 /*scratch*/
+	ret	r12
+0:
+	round_sf	r10 /*exp*/, r12 /*mant*/, r9 /*scratch*/	
+	pack_sf		r12 /*sf*/, r10 /*exp*/, r12 /*mant*/
+__floatsisf_return_op1:	
+	lsl	r11, 1
+	ror	r12
+	ret	r12		
+#endif
+
+#ifdef L_avr32_u32_to_f32
+	.global __avr32_u32_to_f32
+	.type  __avr32_u32_to_f32,@function
+__avr32_u32_to_f32:
+	cp	r12, 0
+	reteq	r12	/* If zero then return zero float */
+	mov	r10, 31 + 127	/* Set the correct exponent */
+	
+	/* Normalize */
+	normalize_sf	r10 /*exp*/, r12 /*mant*/, r9 /*scratch*/	
+
+	/* Check for subnormal result */
+	cp.w	r10, 0
+	brgt	0f
+
+	/* Adjust a subnormal result */
+	mov	r8, 0
+	adjust_subnormal_sf	r12/*sf*/,r10 /*exp*/, r12 /*mant*/,r8/*sign*/, r9 /*scratch*/
+	ret	r12
+0:
+	round_sf	r10 /*exp*/, r12 /*mant*/, r9 /*scratch*/	
+	pack_sf		r12 /*sf*/, r10 /*exp*/, r12 /*mant*/
+__floatunsisf_return_op1:	
+	lsr	r12,1	/* Sign bit is 0 for unsigned int */
+	ret	r12		
+#endif
+	
+
+#ifdef L_avr32_f32_to_s32
+	.global __avr32_f32_to_s32
+	.type  __avr32_f32_to_s32,@function
+__avr32_f32_to_s32:
+        lsr     r11,r12,23              /* Extract exponent */
+        castu.b r11
+        sub     r11,127                 /* Fix bias */
+        retlo   0			/* Negative exponent yields zero integer */
+
+#ifdef __IEEE_LARGE_FLOATS__
+        cp      r11,31
+        brcc    0f
+#endif
+	/* Shift mantissa into correct position */
+        rsub    r11,r11,31	/* Shift amount */
+        lsl     r10,r12,8       /* Get mantissa */
+        sbr     r10,31          /* Add implicit bit */
+        lsr     r10,r10,r11     /* Perform shift */
+        lsl     r12,1           /* Check sign */
+        retcc   r10             /* if positive, we are done */
+        neg     r10             /* if negative float, negate result */
+        ret     r10
+
+#ifdef __IEEE_LARGE_FLOATS__
+0:	
+        mov     r11,-1
+        lsr     r11,1
+        lsl     r12,1
+        acr     r11
+
+        ret     r11
+#endif
+#endif	
+	
+#ifdef L_avr32_f32_to_u32
+	.global __avr32_f32_to_u32
+	.type  __avr32_f32_to_u32,@function
+__avr32_f32_to_u32:
+	cp	r12,0
+	retmi	0			/* Negative numbers gives 0 */
+        bfextu  r11, r12, 23, 8		/* Extract exponent */
+        sub     r11,127                 /* Fix bias */
+        retlo   0			/* Negative exponent yields zero integer */
+
+#ifdef __IEEE_LARGE_FLOATS__
+        cp      r11,32
+        brcc    0f
+#endif
+	/* Shift mantissa into correct position */
+        rsub    r11,r11,31	/* Shift amount */
+        lsl     r12,8           /* Get mantissa */
+        sbr     r12,31          /* Add implicit bit */
+        lsr     r12,r12,r11     /* Perform shift */
+        ret	r12
+
+#ifdef __IEEE_LARGE_FLOATS__
+0:	
+        mov     r11,-1
+        lsr     r11,1
+        lsl     r12,1
+        acr     r11
+
+        ret     r11
+#endif
+#endif	
+
+#ifdef L_avr32_f32_to_f64
+	.global __avr32_f32_to_f64
+	.type  __avr32_f32_to_f64,@function
+
+__avr32_f32_to_f64:
+        lsl     r11,r12,1               /* Remove sign bit, keep original value in r12*/
+	moveq	r10, 0
+	reteq	r11			/* Return zero if input is zero */
+
+	bfextu  r9,r11,24,8              /* Get exponent */
+        cp.w    r9,0xff                 /* check for NaN or inf */
+        breq    0f
+
+        lsl     r11,7                   /* Convert sf mantissa to df format */
+        mov     r10,0
+
+	/* Check if implicit bit should be set */
+        cp.w    r9, 0
+        subeq   r9,-1                    /* Adjust exponent if it was 0 */
+	srne	r8
+	or	r11, r11, r8 << 31	/* Set implicit bit if needed */
+        sub     r9,(127-0x3ff)          /* Convert exponent to df format exponent */
+
+	pushm	lr
+	normalize_df	r9 /*exp*/, r10, r11 /*mantissa*/, r8, lr /*scratch*/
+	popm	lr
+	pack_df		r9 /*exp*/, r10, r11 /*mantissa*/, r10, r11 /*df*/
+
+__extendsfdf_return_op1:	
+	/* Rotate in sign bit */
+	lsl	r12, 1
+	ror	r11
+	ret	r11
+			
+0:
+	/* Inf or NaN*/
+	lddpc	r10, .Linf
+        lsl     r11,8                   /* check mantissa */
+	movne	r11, -1			/* Return NaN */
+	moveq	r11, r10		/* Return inf */
+	rjmp	__extendsfdf_return_op1
+#endif			
+
+
+#ifdef L_avr32_f64_to_f32
+	.global __avr32_f64_to_f32
+	.type  __avr32_f64_to_f32,@function
+
+__avr32_f64_to_f32:
+	/* Unpack */
+        lsl     r9,r11,1                /* Unpack exponent */
+        lsr     r9,21
+
+	reteq	0			/* If exponent is 0 the number is so small
+					   that the conversion to single float gives
+					   zero */
+
+        lsl     r8,r11,10                  /* Adjust mantissa */
+        or      r12,r8,r10>>22
+
+        lsl     r10,10                  /* Check if there are any remaining bits
+					   in the low part of the mantissa.*/
+        neg     r10
+        rol     r12                     /* If there were remaining bits then set lsb
+					   of mantissa to 1 */
+
+        cp      r9,0x7ff
+        breq    2f			/* Check for NaN or inf */
+
+        sub     r9,(0x3ff-127)          /* Adjust bias of exponent */
+        sbr     r12,31                  /* set the implicit bit.*/
+
+	cp.w	r9, 0			/* Check for subnormal number */
+	brgt	0f
+
+	/* Adjust a subnormal result */
+	adjust_subnormal_sf	r12/*sf*/,r9 /*exp*/, r12 /*mant*/, r11/*sign*/, r10 /*scratch*/
+	ret	r12
+0:
+	round_sf	r9 /*exp*/, r12 /*mant*/, r10 /*scratch*/	
+	pack_sf		r12 /*sf*/, r9 /*exp*/, r12 /*mant*/
+__truncdfsf_return_op1:	
+	/* Rotate in sign bit */
+	lsl	r11, 1
+	ror	r12
+	ret	r12		
+	
+	
+2:
+	/* NaN or inf */
+        cbr     r12,31                  /* clear implicit bit */
+        retne   -1                      /* Return NaN if mantissa not zero */
+	lddpc	r12,.Linf_sf
+	ret	r12			/* Return inf */
+#endif
+
+	
+	.align 2
+.Linf:	
+	.long	0xffe00000
+
+	.align 2
+.Linf_sf:	
+	.long	0xff000000
+
diff -Nrup --ignore-space-change gcc-4.0.2/gcc/config/avr32/lib2funcs.S gcc-4.0.2-atmel.0.99.2/gcc/config/avr32/lib2funcs.S
--- gcc-4.0.2/gcc/config/avr32/lib2funcs.S	1970-01-01 01:00:00.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/gcc/config/avr32/lib2funcs.S	2006-10-10 12:36:34.000000000 +0200
@@ -0,0 +1,21 @@
+	.align	4
+	.global __nonlocal_goto
+	.type  __nonlocal_goto,@function
+
+/* __nonlocal_goto:	This function handles nonlocal_goto's in gcc.
+
+	parameter 0 (r12) = New Frame Pointer
+	parameter 1 (r11) = Address to goto
+	parameter 2 (r10) = New Stack Pointer
+
+	This function invalidates the return stack, since it returns from a
+	function without using a return instruction.
+*/
+__nonlocal_goto:
+	mov	r7, r12
+	mov	sp, r10
+	frs			# Flush return stack
+	mov	pc, r11
+
+
+		
diff -Nrup --ignore-space-change gcc-4.0.2/gcc/config/avr32/linux-elf.h gcc-4.0.2-atmel.0.99.2/gcc/config/avr32/linux-elf.h
--- gcc-4.0.2/gcc/config/avr32/linux-elf.h	1970-01-01 01:00:00.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/gcc/config/avr32/linux-elf.h	2006-11-10 15:14:06.000000000 +0100
@@ -0,0 +1,154 @@
+/*
+   Linux/Elf specific definitions.
+   Copyright 2003-2006 Atmel Corporation.
+
+   Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
+   and H�vard Skinnemoen, Atmel Norway, <hskinnemoen@atmel.com>
+
+   This file is part of GCC.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 2 of the License, or
+   (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
+
+
+
+/* elfos.h should have already been included.  Now just override
+   any conflicting definitions and add any extras.  */
+
+/* Run-time Target Specification.  */
+#undef  TARGET_VERSION
+#define TARGET_VERSION  fputs (" (AVR32 GNU/Linux with ELF)", stderr);
+
+/* Do not assume anything about header files.  */
+#define NO_IMPLICIT_EXTERN_C
+
+/* The GNU C++ standard library requires that these macros be defined.  */
+#undef CPLUSPLUS_CPP_SPEC
+#define CPLUSPLUS_CPP_SPEC "-D_GNU_SOURCE %(cpp)"
+
+/* Now we define the strings used to build the spec file.  */
+#undef  LIB_SPEC
+#define LIB_SPEC \
+  "%{pthread:-lpthread} \
+   %{shared:-lc} \
+   %{!shared:%{profile:-lc_p}%{!profile:-lc}}"
+
+/* Provide a STARTFILE_SPEC appropriate for GNU/Linux.  Here we add
+   the GNU/Linux magical crtbegin.o file (see crtstuff.c) which
+   provides part of the support for getting C++ file-scope static
+   object constructed before entering `main'.  */
+
+#undef  STARTFILE_SPEC
+#define STARTFILE_SPEC \
+  "%{!shared: \
+     %{pg:gcrt1.o%s} %{!pg:%{p:gcrt1.o%s} \
+		       %{!p:%{profile:gcrt1.o%s} \
+			 %{!profile:crt1.o%s}}}} \
+   crti.o%s %{!shared:crtbegin.o%s} %{shared:crtbeginS.o%s}"
+
+/* Provide a ENDFILE_SPEC appropriate for GNU/Linux.  Here we tack on
+   the GNU/Linux magical crtend.o file (see crtstuff.c) which
+   provides part of the support for getting C++ file-scope static
+   object constructed before entering `main', followed by a normal
+   GNU/Linux "finalizer" file, `crtn.o'.  */
+
+#undef  ENDFILE_SPEC
+#define ENDFILE_SPEC \
+  "%{!shared:crtend.o%s} %{shared:crtendS.o%s} crtn.o%s"
+
+#undef ASM_SPEC
+#define ASM_SPEC "%{!mno-pic:--pic} %{mrelax|O*:%{mno-relax|O0|O1: ;:--linkrelax}} %{mcpu=*:-mcpu=%*}"
+
+#undef  LINK_SPEC
+#define LINK_SPEC "%{version:-v} \
+   %{static:-Bstatic} \
+   %{shared:-shared} \
+   %{symbolic:-Bsymbolic} \
+   %{rdynamic:-export-dynamic} \
+   %{!dynamic-linker:-dynamic-linker /lib/ld-uClibc.so.0} \
+   %{mrelax|O*:%{mno-relax|O0|O1: ;:--relax}}"
+
+#define TARGET_OS_CPP_BUILTINS() LINUX_TARGET_OS_CPP_BUILTINS()
+
+/* This is how we tell the assembler that two symbols have the same value.  */
+#define ASM_OUTPUT_DEF(FILE, NAME1, NAME2) \
+  do					   \
+    {					   \
+      assemble_name (FILE, NAME1); 	   \
+      fputs (" = ", FILE);		   \
+      assemble_name (FILE, NAME2);	   \
+      fputc ('\n', FILE);		   \
+    }					   \
+  while (0)
+
+
+
+#undef  CC1_SPEC
+#define CC1_SPEC "%{profile:-p}"
+
+/* Target CPU builtins.  */
+#define TARGET_CPU_CPP_BUILTINS()				\
+  do								\
+    {								\
+      builtin_define ("__avr32__");				\
+      builtin_define ("__AVR32__");				\
+      builtin_define ("__AVR32_LINUX__");			\
+      builtin_define (avr32_part->macro);			\
+      builtin_define (avr32_arch->macro);			\
+      if (avr32_arch->uarch_type == UARCH_TYPE_AVR32A)		\
+	builtin_define ("__AVR32_AVR32A__");			\
+      else							\
+	builtin_define ("__AVR32_AVR32B__");			\
+      if (TARGET_UNALIGNED_WORD)				\
+	builtin_define ("__AVR32_HAS_UNALIGNED_WORD__");	\
+      if (TARGET_SIMD)						\
+	builtin_define ("__AVR32_HAS_SIMD__");			\
+      if (TARGET_DSP)						\
+	builtin_define ("__AVR32_HAS_DSP__");			\
+      if (TARGET_RMW)						\
+	builtin_define ("__AVR32_HAS_RMW__");			\
+      if (TARGET_BRANCH_PRED)					\
+	builtin_define ("__AVR32_HAS_BRANCH_PRED__");		\
+      if (flag_pic)						\
+	{							\
+	  builtin_define ("__PIC__");				\
+	  builtin_define ("__pic__");				\
+	}							\
+    }								\
+  while (0)
+
+
+
+/* Call the function profiler with a given profile label.  */
+#undef  FUNCTION_PROFILER
+#define FUNCTION_PROFILER(STREAM, LABELNO)				\
+  do									\
+    {									\
+      fprintf (STREAM, "\tmov\tlr, lo(mcount)\n\torh\tlr, hi(mcount)\n"); \
+      fprintf (STREAM, "\ticall lr\n");					\
+    }									\
+  while (0)
+
+#define NO_PROFILE_COUNTERS 1
+
+/* For dynamic libraries to work */
+/* #define PLT_REG_CALL_CLOBBERED 1 */
+#define AVR32_ALWAYS_PIC 1
+
+/* uclibc does not implement sinf, cosf etc. */
+#undef TARGET_C99_FUNCTIONS
+#define TARGET_C99_FUNCTIONS 0
+
+#define LINK_GCC_C_SEQUENCE_SPEC \
+  "%{static:--start-group} %G %L %{static:--end-group}%{!static:%G}"
diff -Nrup --ignore-space-change gcc-4.0.2/gcc/config/avr32/predicates.md gcc-4.0.2-atmel.0.99.2/gcc/config/avr32/predicates.md
--- gcc-4.0.2/gcc/config/avr32/predicates.md	1970-01-01 01:00:00.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/gcc/config/avr32/predicates.md	2006-11-09 15:01:19.000000000 +0100
@@ -0,0 +1,303 @@
+;;   AVR32 predicates file.
+;;   Copyright 2003-2006 Atmel Corporation.
+;;
+;;   Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
+;;
+;;   This file is part of GCC.
+;;
+;;   This program is free software; you can redistribute it and/or modify
+;;   it under the terms of the GNU General Public License as published by
+;;   the Free Software Foundation; either version 2 of the License, or
+;;   (at your option) any later version.
+;;
+;;   This program is distributed in the hope that it will be useful,
+;;   but WITHOUT ANY WARRANTY; without even the implied warranty of
+;;   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;;   GNU General Public License for more details.
+;;
+;;   You should have received a copy of the GNU General Public License
+;;   along with this program; if not, write to the Free Software
+;;   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+
+;; True if the operand is a memory reference which contains an
+;; Address consisting of a single pointer register
+(define_predicate "avr32_indirect_register_operand"
+  (and (match_code "mem")
+       (match_test "register_operand(XEXP(op, 0), SImode)")))
+
+
+
+;; Address expression with a base pointer offset with
+;; a register displacement
+(define_predicate "avr32_indexed_memory_operand"
+  (and (match_code "mem")
+       (match_test "GET_CODE(XEXP(op, 0)) == PLUS"))
+  {
+
+   rtx op0 = XEXP(XEXP(op, 0), 0);
+   rtx op1 = XEXP(XEXP(op, 0), 1);
+
+   return ((avr32_address_register_rtx_p (op0, 0)
+            && avr32_legitimate_index_p (GET_MODE(op), op1, 0))
+	   || (avr32_address_register_rtx_p (op1, 0)
+            && avr32_legitimate_index_p (GET_MODE(op), op0, 0)));
+
+ })
+
+;; Operand suitable for the ld.sb instruction
+(define_predicate "load_sb_memory_operand"
+  (ior (match_operand 0 "avr32_indirect_register_operand")
+       (match_operand 0 "avr32_indexed_memory_operand")))
+
+
+;; Operand suitable as operand to insns sign extending QI values
+(define_predicate "extendqi_operand"
+  (ior (match_operand 0 "load_sb_memory_operand")
+       (match_operand 0 "register_operand")))
+
+(define_predicate "post_inc_memory_operand"
+  (and (match_code "mem")
+       (match_test "(GET_CODE(XEXP(op, 0)) == POST_INC)
+                     && REG_P(XEXP(XEXP(op, 0), 0))")))
+
+;; Operand suitable for loading TImode values
+(define_predicate "loadti_operand"
+  (ior (ior (match_operand 0 "register_operand")
+            (match_operand 0 "avr32_indirect_register_operand"))
+       (match_operand 0 "post_inc_memory_operand")))
+
+;; Operand suitable for add instructions
+(define_predicate "avr32_add_operand"
+  (ior (match_operand 0 "register_operand")
+       (and (match_operand 0 "immediate_operand")
+            (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'I', \"Is21\")"))))
+
+;; Operand is a power of two immediate
+(define_predicate "power_of_two_operand"
+  (match_code "const_int")
+{
+  HOST_WIDE_INT value = INTVAL (op);
+
+  return value != 0 && (value & (value - 1)) == 0;
+})
+
+;; Operand is a multiple of 8 immediate
+(define_predicate "multiple_of_8_operand"
+  (match_code "const_int")
+{
+  HOST_WIDE_INT value = INTVAL (op);
+
+  return (value & 0x7) == 0 ;
+})
+
+;; Operand is a multiple of 16 immediate
+(define_predicate "multiple_of_16_operand"
+  (match_code "const_int")
+{
+  HOST_WIDE_INT value = INTVAL (op);
+
+  return (value & 0xf) == 0 ;
+})
+
+;; Operand is a mask used for masking away upper bits of a reg
+(define_predicate "avr32_mask_upper_bits_operand"
+  (match_code "const_int")
+{
+  HOST_WIDE_INT value = INTVAL (op) + 1;
+
+  return value != 1 && value != 0 && (value & (value - 1)) == 0;
+})
+
+
+;; Operand suitable for mul instructions
+(define_predicate "avr32_mul_operand"
+  (ior (match_operand 0 "register_operand")
+       (and (match_operand 0 "immediate_operand")
+            (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'K', \"Ks08\")"))))
+
+;; True for logical binary operators.
+(define_predicate "logical_binary_operator"
+  (match_code "ior,xor,and"))
+
+;; True for logical shift operators
+(define_predicate "logical_shift_operator"
+  (match_code "ashift,lshiftrt"))
+
+;; True for shift operand for logical and, or and eor insns
+(define_predicate "avr32_logical_shift_operand"
+  (and (match_code "ashift,lshiftrt")
+       (ior (and (match_test "GET_CODE(XEXP(op, 1)) == CONST_INT")
+                 (match_test "register_operand(XEXP(op, 0), GET_MODE(XEXP(op, 0)))"))
+            (and (match_test "GET_CODE(XEXP(op, 0)) == CONST_INT")
+                 (match_test "register_operand(XEXP(op, 1), GET_MODE(XEXP(op, 1)))"))))
+  {
+   return 1;
+  }
+  )
+
+
+;; Predicate for second operand to and, ior and xor insn patterns
+(define_predicate "avr32_logical_insn_operand"
+  (ior (match_operand 0 "register_operand")
+       (match_operand 0 "avr32_logical_shift_operand"))
+  {
+   return 1;
+  }
+)
+
+
+;; True for avr32 comparison operators
+(define_predicate "avr32_comparison_operator"
+  (ior (match_code "eq, ne, gt, ge, lt, le, gtu, geu, ltu, leu")
+       (and (match_code "unspec")
+            (match_test "(XINT(op, 1) == UNSPEC_COND_MI)
+                         || (XINT(op, 1) == UNSPEC_COND_PL)"))))
+
+;; True if this is a const_int with one bit set
+(define_predicate "one_bit_set_operand"
+  (match_code "const_int")
+  {
+   int i;
+   int value;
+   int ones = 0;
+
+   value = INTVAL(op);
+   for ( i = 0 ; i < 32; i++ ){
+     if ( value & ( 1 << i ) ){
+        ones++;
+      }
+   }
+
+   return ( ones == 1 );
+  })
+
+
+;; True if this is a const_int with one bit cleared
+(define_predicate "one_bit_cleared_operand"
+  (match_code "const_int")
+  {
+   int i;
+   int value;
+   int zeroes = 0;
+
+   value = INTVAL(op);
+   for ( i = 0 ; i < 32; i++ ){
+     if ( !(value & ( 1 << i )) ){
+        zeroes++;
+      }
+   }
+
+   return ( zeroes == 1 );
+  })
+
+
+;; True if this is a register or immediate operand
+(define_predicate "register_immediate_operand"
+  (ior (match_operand 0 "register_operand")
+       (match_operand 0 "immediate_operand")))
+
+
+;; True is this is an operand containing a label_ref
+(define_predicate "avr32_label_ref_operand"
+  (and (match_code "mem")
+       (match_test "avr32_find_symbol(op)
+                    && (GET_CODE(avr32_find_symbol(op)) == LABEL_REF)")))
+
+;; True is this is a valid symbol pointing to the constant pool
+(define_predicate "avr32_const_pool_operand"
+  (and (match_code "symbol_ref")
+       (match_test "CONSTANT_POOL_ADDRESS_P(op)"))
+  {
+        return (flag_pic ? (!(symbol_mentioned_p (get_pool_constant (op))
+                        || label_mentioned_p (get_pool_constant (op)))
+                       || avr32_got_mentioned_p(get_pool_constant (op)))
+                    : true);
+  }
+)
+
+;; True is this is a memory reference to the constant or mini pool
+(define_predicate "avr32_const_pool_ref_operand"
+  (ior (match_operand 0 "avr32_label_ref_operand")
+       (and (match_code "mem")
+            (match_test "avr32_const_pool_operand(XEXP(op,0), GET_MODE(XEXP(op,0)))"))))
+
+
+
+;; True is this is a k12 offseted memory operand
+(define_predicate "avr32_k12_memory_operand"
+  (and (match_code "mem")
+       (ior (match_test "REG_P(XEXP(op, 0))")
+            (match_test "GET_CODE(XEXP(op, 0)) == PLUS
+                         && REG_P(XEXP(XEXP(op, 0), 0))
+                         && (GET_CODE(XEXP(XEXP(op, 0), 1)) == CONST_INT)
+                         && (CONST_OK_FOR_CONSTRAINT_P(INTVAL(XEXP(XEXP(op, 0), 0)),
+                                'K', (mode == SImode) ? \"Ks14\" : ((mode == HImode) ? \"Ks13\" : \"Ks12\")))"))))
+
+;; True is this is a memory operand with an immediate displacement
+(define_predicate "avr32_imm_disp_memory_operand"
+  (and (match_code "mem")
+       (match_test "GET_CODE(XEXP(op, 0)) == PLUS
+                    && REG_P(XEXP(XEXP(op, 0), 0))
+                    && (GET_CODE(XEXP(XEXP(op, 0), 1)) == CONST_INT)")))
+
+;; True is this is a bswap operand
+(define_predicate "avr32_bswap_operand"
+  (ior (match_operand 0 "avr32_k12_memory_operand")
+       (match_operand 0 "register_operand")))
+
+;; True is this is a valid coprocessor insn memory operand
+(define_predicate "avr32_cop_memory_operand"
+  (and (match_operand 0 "memory_operand")
+       (not (match_test "GET_CODE(XEXP(op, 0)) == PLUS
+                         && REG_P(XEXP(XEXP(op, 0), 0))
+                         && (GET_CODE(XEXP(XEXP(op, 0), 1)) == CONST_INT)
+                         && !(CONST_OK_FOR_CONSTRAINT_P(INTVAL(XEXP(XEXP(op, 0), 0)), 'K', \"Ku10\"))"))))
+
+;; True is this is a valid source/destination operand
+;; for moving values to/from a coprocessor
+(define_predicate "avr32_cop_move_operand"
+  (ior (match_operand 0 "register_operand")
+       (match_operand 0 "avr32_cop_memory_operand")))
+
+
+;; True is this is a valid extract byte offset for use in
+;; load extracted index insns
+(define_predicate "avr32_extract_shift_operand"
+  (and (match_operand 0 "const_int_operand")
+       (match_test "(INTVAL(op) == 0) || (INTVAL(op) == 8)
+                    || (INTVAL(op) == 16) || (INTVAL(op) == 24)")))
+
+;; True is this is a floating-point register
+(define_predicate "avr32_fp_register_operand"
+  (and (match_operand 0 "register_operand")
+       (match_test "REGNO_REG_CLASS(REGNO(op)) == FP_REGS")))
+
+;; True is this is valid avr32 symbol operand
+(define_predicate "avr32_symbol_operand"
+  (ior (match_code "label_ref, symbol_ref")
+       (and (match_code "const")
+            (match_test "avr32_find_symbol(op)"))))
+
+;; True is this is valid operand for the lda.w and call pseudo insns
+(define_predicate "avr32_address_operand"
+  (and (match_code "label_ref, symbol_ref")
+       (ior (match_test "TARGET_HAS_ASM_ADDR_PSEUDOS")
+            (match_test "flag_pic")) ))
+
+;; True if this is a avr32 call operand
+(define_predicate "avr32_call_operand"
+  (ior (ior (match_operand 0 "register_operand")
+            (ior (match_operand 0 "avr32_const_pool_ref_operand")
+                 (match_operand 0 "avr32_address_operand")))
+       (match_test "SYMBOL_REF_RCALL_FUNCTION_P(op)")))
+
+;; Return true for operators performing ALU operations
+
+(define_predicate "alu_operator"
+  (match_code "ior, xor, and, plus, minus, ashift, lshiftrt, ashiftrt"))
+
+(define_predicate "avr32_add_shift_immediate_operand"
+  (and (match_operand 0 "immediate_operand")
+       (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'K', \"Ku02\")")))
+
diff -Nrup --ignore-space-change gcc-4.0.2/gcc/config/avr32/simd.md gcc-4.0.2-atmel.0.99.2/gcc/config/avr32/simd.md
--- gcc-4.0.2/gcc/config/avr32/simd.md	1970-01-01 01:00:00.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/gcc/config/avr32/simd.md	2006-10-10 12:36:34.000000000 +0200
@@ -0,0 +1,145 @@
+;;   AVR32 machine description file for SIMD instructions.
+;;   Copyright 2003-2006 Atmel Corporation.
+;;
+;;   Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
+;;
+;;   This file is part of GCC.
+;;
+;;   This program is free software; you can redistribute it and/or modify
+;;   it under the terms of the GNU General Public License as published by
+;;   the Free Software Foundation; either version 2 of the License, or
+;;   (at your option) any later version.
+;;
+;;   This program is distributed in the hope that it will be useful,
+;;   but WITHOUT ANY WARRANTY; without even the implied warranty of
+;;   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;;   GNU General Public License for more details.
+;;
+;;   You should have received a copy of the GNU General Public License
+;;   along with this program; if not, write to the Free Software
+;;   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+;; -*- Mode: Scheme -*-
+
+
+;; Vector modes
+(define_mode_macro VECM [V2HI V4QI])
+(define_mode_attr  size [(V2HI "h") (V4QI "b")])
+
+(define_insn "add<mode>3"
+  [(set (match_operand:VECM 0 "register_operand" "=r")
+	(plus:VECM (match_operand:VECM 1 "register_operand" "r")
+                   (match_operand:VECM 2 "register_operand" "r")))]
+  "TARGET_SIMD"
+  "padd.<size>\t%0, %1, %2"
+  [(set_attr "length" "4")
+   (set_attr "type" "alu")])
+
+
+(define_insn "sub<mode>3"
+  [(set (match_operand:VECM 0 "register_operand" "=r")
+	(minus:VECM (match_operand:VECM 1 "register_operand" "r")
+                    (match_operand:VECM 2 "register_operand" "r")))]
+  "TARGET_SIMD"
+  "psub.<size>\t%0, %1, %2"
+  [(set_attr "length" "4")
+   (set_attr "type" "alu")])
+
+
+(define_insn "abs<mode>2"
+  [(set (match_operand:VECM 0 "register_operand" "=r")
+	(abs:VECM (match_operand:VECM 1 "register_operand" "r")))]
+  "TARGET_SIMD"
+  "pabs.s<size>\t%0, %1"
+  [(set_attr "length" "4")
+   (set_attr "type" "alu")])
+
+(define_insn "ashl<mode>3"
+  [(set (match_operand:VECM 0 "register_operand"           "=r")
+	(ashift:VECM (match_operand:VECM 1 "register_operand" "r")
+                     (match_operand:SI 2 "immediate_operand" "Ku04")))]
+  "TARGET_SIMD"
+  "plsl.<size>\t%0, %1, %2"
+  [(set_attr "length" "4")
+   (set_attr "type" "alu")])
+
+(define_insn "ashr<mode>3"
+  [(set (match_operand:VECM 0 "register_operand"           "=r")
+	(ashiftrt:VECM (match_operand:VECM 1 "register_operand" "r")
+                       (match_operand:SI 2 "immediate_operand" "Ku04")))]
+  "TARGET_SIMD"
+  "pasr.<size>\t%0, %1, %2"
+  [(set_attr "length" "4")
+   (set_attr "type" "alu")])
+
+(define_insn "lshr<mode>3"
+  [(set (match_operand:VECM 0 "register_operand"           "=r")
+	(lshiftrt:VECM (match_operand:VECM 1 "register_operand" "r")
+                       (match_operand:SI 2 "immediate_operand" "Ku04")))]
+  "TARGET_SIMD"
+  "plsr.<size>\t%0, %1, %2"
+  [(set_attr "length" "4")
+   (set_attr "type" "alu")])
+
+(define_insn "smaxv2hi3"
+  [(set (match_operand:V2HI 0 "register_operand" "=r")
+	(smax:V2HI (match_operand:V2HI 1 "register_operand" "r")
+                        (match_operand:V2HI 2 "register_operand" "r")))]
+
+  "TARGET_SIMD"
+  "pmax.sh\t%0, %1, %2"
+  [(set_attr "length" "4")
+   (set_attr "type" "alu")])
+
+(define_insn "sminv2hi3"
+  [(set (match_operand:V2HI 0 "register_operand" "=r")
+	(smin:V2HI (match_operand:V2HI 1 "register_operand" "r")
+                        (match_operand:V2HI 2 "register_operand" "r")))]
+
+  "TARGET_SIMD"
+  "pmin.sh\t%0, %1, %2"
+  [(set_attr "length" "4")
+   (set_attr "type" "alu")])
+
+(define_insn "umaxv4qi3"
+  [(set (match_operand:V4QI 0 "register_operand" "=r")
+	(umax:V4QI (match_operand:V4QI 1 "register_operand" "r")
+                   (match_operand:V4QI 2 "register_operand" "r")))]
+
+  "TARGET_SIMD"
+  "pmax.ub\t%0, %1, %2"
+  [(set_attr "length" "4")
+   (set_attr "type" "alu")])
+
+(define_insn "uminv4qi3"
+  [(set (match_operand:V4QI 0 "register_operand" "=r")
+	(umin:V4QI (match_operand:V4QI 1 "register_operand" "r")
+                   (match_operand:V4QI 2 "register_operand" "r")))]
+
+  "TARGET_SIMD"
+  "pmin.ub\t%0, %1, %2"
+  [(set_attr "length" "4")
+   (set_attr "type" "alu")])
+
+
+(define_insn "addsubv2hi"
+  [(set (match_operand:V2HI 0 "register_operand" "=r")
+        (vec_concat:V2HI
+         (plus:HI (match_operand:HI 1 "register_operand" "r")
+                  (match_operand:HI 2 "register_operand" "r"))
+         (minus:HI (match_dup 1) (match_dup 2))))]
+  "TARGET_SIMD"
+  "paddsub.h\t%0, %1:b, %2:b"
+  [(set_attr "length" "4")
+   (set_attr "type" "alu")])
+
+(define_insn "subaddv2hi"
+  [(set (match_operand:V2HI 0 "register_operand" "=r")
+        (vec_concat:V2HI
+         (minus:HI (match_operand:HI 1 "register_operand" "r")
+                  (match_operand:HI 2 "register_operand" "r"))
+         (plus:HI (match_dup 1) (match_dup 2))))]
+  "TARGET_SIMD"
+  "psubadd.h\t%0, %1:b, %2:b"
+  [(set_attr "length" "4")
+   (set_attr "type" "alu")])
diff -Nrup --ignore-space-change gcc-4.0.2/gcc/config/avr32/t-avr32 gcc-4.0.2-atmel.0.99.2/gcc/config/avr32/t-avr32
--- gcc-4.0.2/gcc/config/avr32/t-avr32	1970-01-01 01:00:00.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/gcc/config/avr32/t-avr32	2006-11-24 17:10:48.000000000 +0100
@@ -0,0 +1,63 @@
+
+MD_INCLUDES= 	$(srcdir)/config/avr32/avr32.md \
+		$(srcdir)/config/avr32/fpcp.md \
+		$(srcdir)/config/avr32/simd.md \
+                $(srcdir)/config/avr32/predicates.md
+
+s-config s-conditions s-flags s-codes s-constants s-emit s-recog s-preds \
+	s-opinit s-extract s-peep s-attr s-attrtab s-output: $(MD_INCLUDES)
+
+# We want fine grained libraries, so use the new code
+# to build the floating point emulation libraries.
+FPBIT = fp-bit.c
+DPBIT = dp-bit.c
+
+LIB1ASMSRC = avr32/lib1funcs.S
+LIB1ASMFUNCS =  _avr32_f64_mul _avr32_f64_addsub  _avr32_f64_to_u32 _avr32_f64_to_s32 \
+                _avr32_f64_to_u64 _avr32_f64_to_s64 _avr32_u32_to_f64 _avr32_s32_to_f64 \
+                _avr32_f64_cmp_eq _avr32_f64_cmp_ge _avr32_f64_cmp_lt \
+                _avr32_f32_cmp_eq _avr32_f32_cmp_ge _avr32_f32_cmp_lt \
+                _avr32_f64_div _avr32_f32_div\
+                _avr32_f32_mul _avr32_s32_to_f32 _avr32_u32_to_f32 _avr32_f32_to_s32 \
+                _avr32_f32_to_u32 _avr32_f32_to_f64 _avr32_f64_to_f32
+
+LIB2FUNCS_EXTRA += $(srcdir)/config/avr32/lib2funcs.S
+
+MULTILIB_OPTIONS     = march=ap/march=uc
+MULTILIB_DIRNAMES    = ap uc
+MULTILIB_EXCEPTIONS  =
+MULTILIB_MATCHES     = march?ap=mcpu?ap7000
+MULTILIB_MATCHES     += march?ap=mcpu?ap7010
+MULTILIB_MATCHES     += march?ap=mcpu?ap7020
+MULTILIB_MATCHES     += march?uc=mcpu?uc3a0256
+MULTILIB_MATCHES     += march?uc=mcpu?uc3a0512
+MULTILIB_MATCHES     += march?uc=mcpu?uc3a1128
+MULTILIB_MATCHES     += march?uc=mcpu?uc3a1256
+MULTILIB_MATCHES     += march?uc=mcpu?uc3a1512
+MULTILIB_MATCHES     += march?ap=mpart?ap7000
+MULTILIB_MATCHES     += march?ap=mpart?ap7010
+MULTILIB_MATCHES     += march?ap=mpart?ap7020
+MULTILIB_MATCHES     += march?uc=mpart?uc3a0256
+MULTILIB_MATCHES     += march?uc=mpart?uc3a0512
+MULTILIB_MATCHES     += march?uc=mpart?uc3a1128
+MULTILIB_MATCHES     += march?uc=mpart?uc3a1256
+MULTILIB_MATCHES     += march?uc=mpart?uc3a1512
+
+EXTRA_MULTILIB_PARTS = crtbegin.o crtbeginS.o crtend.o crtendS.o crti.o crtn.o
+
+CRTSTUFF_T_CFLAGS = -mrelax
+CRTSTUFF_T_CFLAGS_S = -mrelax -fPIC
+TARGET_LIBGCC2_CFLAGS += -mrelax
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+	echo '#define FLOAT' > fp-bit.c
+	cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+	cat $(srcdir)/config/fp-bit.c > dp-bit.c
+
+
+
diff -Nrup --ignore-space-change gcc-4.0.2/gcc/config/avr32/t-elf gcc-4.0.2-atmel.0.99.2/gcc/config/avr32/t-elf
--- gcc-4.0.2/gcc/config/avr32/t-elf	1970-01-01 01:00:00.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/gcc/config/avr32/t-elf	2006-03-20 13:59:57.000000000 +0100
@@ -0,0 +1,16 @@
+
+# Assemble startup files.
+$(T)crti.o: $(srcdir)/config/avr32/crti.asm $(GCC_PASSES)
+	$(GCC_FOR_TARGET) $(CRTSTUFF_CFLAGS) $(CRTSTUFF_T_CFLAGS) $(INCLUDES) \
+	-c -o $(T)crti.o -x assembler-with-cpp $(srcdir)/config/avr32/crti.asm
+
+$(T)crtn.o: $(srcdir)/config/avr32/crtn.asm $(GCC_PASSES)
+	$(GCC_FOR_TARGET) $(CRTSTUFF_CFLAGS) $(CRTSTUFF_T_CFLAGS) $(INCLUDES) \
+	-c -o $(T)crtn.o -x assembler-with-cpp $(srcdir)/config/avr32/crtn.asm
+
+
+# Build the libraries for both hard and soft floating point
+EXTRA_MULTILIB_PARTS = crtbegin.o crtbeginS.o crtend.o crtendS.o crti.o crtn.o
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
diff -Nrup --ignore-space-change gcc-4.0.2/gcc/config/avr32/uclinux-elf.h gcc-4.0.2-atmel.0.99.2/gcc/config/avr32/uclinux-elf.h
--- gcc-4.0.2/gcc/config/avr32/uclinux-elf.h	1970-01-01 01:00:00.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/gcc/config/avr32/uclinux-elf.h	2006-01-04 10:57:09.000000000 +0100
@@ -0,0 +1,20 @@
+
+/* Run-time Target Specification.  */
+#undef  TARGET_VERSION
+#define TARGET_VERSION  fputs (" (AVR32 uClinux with ELF)", stderr)
+
+/* We don't want a .jcr section on uClinux. As if this makes a difference... */
+#define TARGET_USE_JCR_SECTION 0
+
+/* Here we go. Drop the crtbegin/crtend stuff completely. */
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC							\
+  "%{!shared: %{pg:gcrt1.o%s} %{!pg:%{p:gcrt1.o%s}"			\
+  " %{!p:%{profile:gcrt1.o%s}"						\
+  " %{!profile:crt1.o%s}}}} crti.o%s"
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "crtn.o%s"
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (AVR32_FLAG_NO_INIT_GOT)
diff -Nrup --ignore-space-change gcc-4.0.2/gcc/config/host-linux.c gcc-4.0.2-atmel.0.99.2/gcc/config/host-linux.c
--- gcc-4.0.2/gcc/config/host-linux.c	2005-08-01 20:00:10.000000000 +0200
+++ gcc-4.0.2-atmel.0.99.2/gcc/config/host-linux.c	2006-03-23 13:33:12.000000000 +0100
@@ -26,6 +26,9 @@
 #include "hosthooks.h"
 #include "hosthooks-def.h"
 
+#ifndef SSIZE_MAX
+#define SSIZE_MAX LONG_MAX
+#endif
 
 /* Linux has a feature called exec-shield-randomize that perturbs the
    address of non-fixed mapped segments by a (relatively) small amount.
diff -Nrup --ignore-space-change gcc-4.0.2/gcc/config.gcc gcc-4.0.2-atmel.0.99.2/gcc/config.gcc
--- gcc-4.0.2/gcc/config.gcc	2005-08-09 12:57:04.000000000 +0200
+++ gcc-4.0.2-atmel.0.99.2/gcc/config.gcc	2006-02-08 17:33:56.000000000 +0100
@@ -189,9 +189,6 @@ case ${target} in
  | ip2k-*-elf		\
  | ns32k-*-netbsdelf*	\
  | ns32k-*-netbsd*	\
- | c4x-*		\
- | tic4x-*		\
- | hppa1.1-*-rtems*	\
  )
     if test "x$enable_obsolete" != xyes; then
       echo "*** Configuration ${target} is obsolete." >&2
@@ -251,9 +248,6 @@ arm*-*-*)
 	cpu_type=arm
 	extra_headers="mmintrin.h"
 	;;
-bfin*-*)
-	cpu_type=bfin
-	;;
 ep9312*-*-*)
 	cpu_type=arm
 	;;
@@ -735,6 +729,24 @@ avr-*-*)
 	tm_file="avr/avr.h dbxelf.h"
 	use_fixproto=yes
 	;;
+avr32*-*-linux*)
+        tm_file="dbxelf.h elfos.h linux.h avr32/linux-elf.h avr32/avr32.h "
+        tmake_file="t-linux avr32/t-avr32 avr32/t-elf"
+	extra_parts="crtbegin.o crtbeginS.o crtend.o crtendS.o"
+	extra_modes=avr32/avr32-modes.def
+	gnu_ld=yes
+	;;
+avr32*-*-uclinux*)
+	tm_file="dbxelf.h elfos.h linux.h avr32/linux-elf.h avr32/uclinux-elf.h avr32/avr32.h"
+	tmake_file="t-linux avr32/t-avr32 avr32/t-elf"
+	extra_modes=avr32/avr32-modes.def
+	gnu_ld=yes
+	;;
+avr32-*-*)
+        tm_file="dbxelf.h elfos.h avr32/avr32.h avr32/avr32-elf.h"
+        tmake_file="avr32/t-avr32 avr32/t-elf"
+	extra_modes=avr32/avr32-modes.def
+	;;
 bfin*-elf*)
 	tm_file="${tm_file} dbxelf.h elfos.h bfin/elf.h"
         tmake_file=bfin/t-bfin-elf
@@ -792,14 +804,10 @@ frv-*-*linux*)
 	         linux.h frv/linux.h frv/frv-abi.h"
 	tmake_file="${tmake_file} frv/t-frv frv/t-linux"
 	;;
-h8300-*-rtemscoff*)
+h8300-*-rtems*)
 	tmake_file="h8300/t-h8300 t-rtems h8300/t-rtems"
 	tm_file="h8300/h8300.h dbxcoff.h h8300/coff.h h8300/rtems.h rtems.h"
 	;;
-h8300-*-rtems*)
-	tmake_file="h8300/t-h8300 h8300/t-elf t-rtems h8300/t-rtems"
-	tm_file="h8300/h8300.h dbxelf.h elfos.h h8300/elf.h h8300/rtems.h rtems.h"
-	;;
 h8300-*-elf*)
 	tmake_file="h8300/t-h8300 h8300/t-elf"
 	tm_file="h8300/h8300.h dbxelf.h elfos.h h8300/elf.h"
@@ -1560,6 +1568,9 @@ pdp11-*-bsd)
 pdp11-*-*)
 	use_fixproto=yes
 	;;
+avr-*-*)
+	use_fixproto=yes
+	;;
 # port not yet contributed
 #powerpc-*-openbsd*)
 #	tmake_file="${tmake_file} rs6000/t-fprules "
@@ -2471,6 +2482,21 @@ case "${target}" in
 		fi
 		;;
 
+	avr32*-*-*)
+		supported_defaults="cpu"
+
+		case "$with_cpu" in
+		"" \
+		| morgan | ap7000 )
+			# OK
+			;;
+		*)
+			echo "Unknown arch used in --with-arch=$with_arch" 1>&2
+			exit 1
+			;;
+		esac
+                ;;
+
 	fr*-*-*linux*)
 		supported_defaults=cpu
 		case "$with_cpu" in
diff -Nrup --ignore-space-change gcc-4.0.2/gcc/cp/decl.c gcc-4.0.2-atmel.0.99.2/gcc/cp/decl.c
--- gcc-4.0.2/gcc/cp/decl.c	2005-09-09 02:51:56.000000000 +0200
+++ gcc-4.0.2-atmel.0.99.2/gcc/cp/decl.c	2006-10-03 15:32:50.000000000 +0200
@@ -6128,6 +6128,7 @@ compute_array_index_type (tree name, tre
                name);
       else
 	error ("size of array is not an integral constant-expression");
+      size = integer_one_node;
     }
   else if (pedantic)
     {
diff -Nrup --ignore-space-change gcc-4.0.2/gcc/c-typeck.c gcc-4.0.2-atmel.0.99.2/gcc/c-typeck.c
--- gcc-4.0.2/gcc/c-typeck.c	2005-09-06 22:10:50.000000000 +0200
+++ gcc-4.0.2-atmel.0.99.2/gcc/c-typeck.c	2006-10-03 15:32:50.000000000 +0200
@@ -2742,8 +2742,13 @@ build_unary_op (enum tree_code code, tre
 	 when we have proper support for integer constant expressions.  */
       val = get_base_address (arg);
       if (val && TREE_CODE (val) == INDIRECT_REF
-	  && integer_zerop (TREE_OPERAND (val, 0)))
-	return fold_convert (argtype, fold_offsetof (arg));
+          && TREE_CONSTANT (TREE_OPERAND (val, 0)))
+	{
+	  tree op0 = fold_convert (argtype, fold_offsetof (arg)), op1;
+          
+	  op1 = fold_convert (argtype, TREE_OPERAND (val, 0));
+	  return fold (build2 (PLUS_EXPR, argtype, op0, op1));
+	}
 
       val = build1 (ADDR_EXPR, argtype, arg);
 
diff -Nrup --ignore-space-change gcc-4.0.2/gcc/doc/extend.texi gcc-4.0.2-atmel.0.99.2/gcc/doc/extend.texi
--- gcc-4.0.2/gcc/doc/extend.texi	2005-07-20 12:36:23.000000000 +0200
+++ gcc-4.0.2-atmel.0.99.2/gcc/doc/extend.texi	2006-06-19 13:14:00.000000000 +0200
@@ -1723,13 +1723,6 @@ on data in the eight bit data area.  Not
 You must use GAS and GLD from GNU binutils version 2.7 or later for
 this attribute to work correctly.
 
-@item exception_handler
-@cindex exception handler functions on the Blackfin processor
-Use this attribute on the Blackfin to indicate that the specified function
-is an exception handler.  The compiler will generate function entry and
-exit sequences suitable for use in an exception handler when this
-attribute is present.
-
 @item far
 @cindex functions which handle memory bank switching
 On 68HC11 and 68HC12 the @code{far} attribute causes the compiler to
@@ -1873,13 +1866,13 @@ this attribute to work correctly.
 
 @item interrupt
 @cindex interrupt handler functions
-Use this attribute on the ARM, AVR, C4x, M32R/D and Xstormy16 ports to indicate
+Use this attribute on the ARM, AVR, AVR32, C4x, M32R/D and Xstormy16 ports to indicate
 that the specified function is an interrupt handler.  The compiler will
 generate function entry and exit sequences suitable for use in an
 interrupt handler when this attribute is present.
 
-Note, interrupt handlers for the Blackfin, m68k, H8/300, H8/300H, H8S, and
-SH processors can be specified via the @code{interrupt_handler} attribute.
+Note, interrupt handlers for the m68k, H8/300, H8/300H, H8S, and SH processors
+can be specified via the @code{interrupt_handler} attribute.
 
 Note, on the AVR, interrupts will be enabled inside the function.
 
@@ -1892,18 +1885,21 @@ void f () __attribute__ ((interrupt ("IR
 
 Permissible values for this parameter are: IRQ, FIQ, SWI, ABORT and UNDEF@.
 
-@item interrupt_handler
-@cindex interrupt handler functions on the Blackfin, m68k, H8/300 and SH processors
-Use this attribute on the Blackfin, m68k, H8/300, H8/300H, H8S, and SH to
-indicate that the specified function is an interrupt handler.  The compiler
-will generate function entry and exit sequences suitable for use in an
-interrupt handler when this attribute is present.
+Note, for the AVR32, you can specify which banking scheme is used for
+the interrupt mode this interrupt handler is used in like this:
+
+@smallexample
+void f () __attribute__ ((interrupt ("FULL")));
+@end smallexample
 
-@item kspisusp
-@cindex User stack pointer in interrupts on the Blackfin
-When used together with @code{interrupt_handler}, @code{exception_handler}
-or @code{nmi_handler}, code will be generated to load the stack pointer
-from the USP register in the function prologue.
+Permissible values for this parameter are: FULL, HALF, NONE and UNDEF.
+
+@item interrupt_handler
+@cindex interrupt handler functions on the m68k, H8/300 and SH processors
+Use this attribute on the m68k, H8/300, H8/300H, H8S, and SH to indicate that
+the specified function is an interrupt handler.  The compiler will generate
+function entry and exit sequences suitable for use in an interrupt
+handler when this attribute is present.
 
 @item long_call/short_call
 @cindex indirect calls on ARM
@@ -1983,19 +1979,6 @@ use the normal calling convention based 
 This attribute can be used to cancel the effect of the @option{-mlong-calls}
 option.
 
-@item nesting
-@cindex Allow nesting in an interrupt handler on the Blackfin processor.
-Use this attribute together with @code{interrupt_handler},
-@code{exception_handler} or @code{nmi_handler} to indicate that the function
-entry code should enable nested interrupts or exceptions.
-
-@item nmi_handler
-@cindex NMI handler functions on the Blackfin processor
-Use this attribute on the Blackfin to indicate that the specified function
-is an NMI handler.  The compiler will generate function entry and
-exit sequences suitable for use in an NMI handler when this
-attribute is present.
-
 @item no_instrument_function
 @cindex @code{no_instrument_function} function attribute
 @opindex finstrument-functions
@@ -2140,8 +2123,8 @@ disabled with the linker or the loader i
 problem.)
 
 @item saveall
-@cindex save all registers on the Blackfin, H8/300, H8/300H, and H8S
-Use this attribute on the Blackfin, H8/300, H8/300H, and H8S to indicate that
+@cindex save all registers on the H8/300, H8/300H, and H8S
+Use this attribute on the H8/300, H8/300H, and H8S to indicate that
 all registers except the stack pointer should be saved in the prologue
 regardless of whether they are used or not.
 
@@ -3221,7 +3204,7 @@ struct my_unpacked_struct
     int i;
  @};
 
-struct __attribute__ ((__packed__)) my_packed_struct
+struct my_packed_struct __attribute__ ((__packed__))
   @{
      char c;
      int  i;
@@ -5449,6 +5432,7 @@ instructions, but allow the compiler to 
 @menu
 * Alpha Built-in Functions::
 * ARM Built-in Functions::
+* AVR32 Built-in Functions::
 * Blackfin Built-in Functions::
 * FR-V Built-in Functions::
 * X86 Built-in Functions::
@@ -5686,6 +5670,54 @@ long long __builtin_arm_wxor (long long,
 long long __builtin_arm_wzero ()
 @end smallexample
 
+@node AVR32 Built-in Functions
+@subsection AVR32 Built-in Functions
+
+
+@smallexample
+
+int __builtin_sats (int /*Rd*/,int /*sa*/, int /*bn*/)
+int __builtin_satu (int /*Rd*/,int /*sa*/, int /*bn*/)
+int __builtin_satrnds (int /*Rd*/,int /*sa*/, int /*bn*/)
+int __builtin_satrndu (int /*Rd*/,int /*sa*/, int /*bn*/)
+short __builtin_mulsathh_h (short, short)
+int __builtin_mulsathh_w (short, short)
+short __builtin_mulsatrndhh_h (short, short)
+int __builtin_mulsatrndwh_w (int, short)
+int __builtin_mulsatwh_w (int, short)
+int __builtin_macsathh_w (int, short, short)
+short __builtin_satadd_h (short, short)
+short __builtin_satsub_h (short, short)
+int __builtin_satadd_w (int, int)
+int __builtin_satsub_w (int, int)
+long long __builtin_mulwh_d(int, short)
+long long __builtin_mulnwh_d(int, short)
+long long __builtin_macwh_d(long long, int, short)
+long long __builtin_machh_d(long long, short, short)
+
+void __builtin_musfr(int);
+int __builtin_mustr(void);
+int __builtin_mfsr(int /*Status Register Address*/)
+void __builtin_mtsr(int /*Status Register Address*/, int /*Value*/)
+int __builtin_mfdr(int /*Debug Register Address*/)
+void __builtin_mtdr(int /*Debug Register Address*/, int /*Value*/)
+void __builtin_cache(void * /*Address*/, int /*Cache Operation*/)
+void __builtin_sync(int /*Sync Operation*/)
+void __builtin_tlbr(void)
+void __builtin_tlbs(void)
+void __builtin_tlbw(void)
+void __builtin_breakpoint(void)
+int __builtin_xchg(void * /*Address*/, int /*Value*/ )
+short __builtin_bswap_16(short)
+int __builtin_bswap_32(int)
+void __builtin_cop(int/*cpnr*/, int/*crd*/, int/*crx*/, int/*cry*/, int/*op*/)
+int __builtin_mvcr_w(int/*cpnr*/, int/*crs*/)
+void __builtin_mvrc_w(int/*cpnr*/, int/*crd*/, int/*value*/)
+long long __builtin_mvcr_d(int/*cpnr*/, int/*crs*/)
+void __builtin_mvrc_d(int/*cpnr*/, int/*crd*/, long long/*value*/)
+
+@end smallexample
+
 @node Blackfin Built-in Functions
 @subsection Blackfin Built-in Functions
 
diff -Nrup --ignore-space-change gcc-4.0.2/gcc/doc/invoke.texi gcc-4.0.2-atmel.0.99.2/gcc/doc/invoke.texi
--- gcc-4.0.2/gcc/doc/invoke.texi	2005-09-02 10:12:30.000000000 +0200
+++ gcc-4.0.2-atmel.0.99.2/gcc/doc/invoke.texi	2006-02-08 17:33:56.000000000 +0100
@@ -185,7 +185,7 @@ in the following sections.
 -fno-default-inline  -fvisibility-inlines-hidden @gol
 -Wabi  -Wctor-dtor-privacy @gol
 -Wnon-virtual-dtor  -Wreorder @gol
--Weffc++  -Wno-deprecated  -Wstrict-null-sentinel @gol
+-Weffc++  -Wno-deprecated @gol
 -Wno-non-template-friend  -Wold-style-cast @gol
 -Woverloaded-virtual  -Wno-pmf-conversions @gol
 -Wsign-promo}
@@ -542,6 +542,10 @@ Objective-C and Objective-C++ Dialects}.
 -mauto-incdec  -minmax  -mlong-calls  -mshort @gol
 -msoft-reg-count=@var{count}}
 
+@emph{AVR32 Options}
+@gccoptlist{-muse-rodata-section -mhard-float -msoft-float -mrelax @gol
+-muse-oscall -mforce-double-align -mno-init-got -mcpu=@var{cpu}}
+
 @emph{MCore Options}
 @gccoptlist{-mhardlit  -mno-hardlit  -mdiv  -mno-div  -mrelax-immediates @gol
 -mno-relax-immediates  -mwide-bitfields  -mno-wide-bitfields @gol
@@ -1738,14 +1742,6 @@ to filter out those warnings.
 @opindex Wno-deprecated
 Do not warn about usage of deprecated features.  @xref{Deprecated Features}.
 
-@item -Wstrict-null-sentinel @r{(C++ only)}
-@opindex Wstrict-null-sentinel
-Warn also about the use of an uncasted @code{NULL} as sentinel.  When
-compiling only with GCC this is a valid sentinel, as @code{NULL} is defined
-to @code{__null}.  Although it is a null pointer constant not a null pointer,
-it is guaranteed to of the same size as a pointer.  But this use is
-not portable across different compilers.
-
 @item -Wno-non-template-friend @r{(C++ only)}
 @opindex Wno-non-template-friend
 Disable warnings when non-templatized friend functions are declared
@@ -2549,13 +2545,11 @@ get these warnings.
 If you want to warn about code which uses the uninitialized value of the
 variable in its own initializer, use the @option{-Winit-self} option.
 
-These warnings occur for individual uninitialized or clobbered
-elements of structure, union or array variables as well as for
-variables which are uninitialized or clobbered as a whole.  They do
-not occur for variables or elements declared @code{volatile}.  Because
-these warnings depend on optimization, the exact variables or elements
-for which there are warnings will depend on the precise optimization
-options and version of GCC used.
+These warnings occur only for variables that are candidates for
+register allocation.  Therefore, they do not occur for a variable that
+is declared @code{volatile}, or whose address is taken, or whose size
+is other than 1, 2, 4 or 8 bytes.  Also, they do not occur for
+structures, unions or arrays, even when they are in registers.
 
 Note that there may be no warning about a variable that is used only
 to compute a value that itself is never used, because such
@@ -5584,10 +5578,6 @@ If number of candidates in the set is sm
 we always try to remove unnecessary ivs from the set during its
 optimization when a new iv is added to the set.
 
-@item scev-max-expr-size
-Bound on size of expressions used in the scalar evolutions analyzer.
-Large expressions slow the analyzer.
-
 @item max-iterations-to-track
 
 The maximum number of iterations of a loop the brute force algorithm
@@ -6695,7 +6685,7 @@ that macro, which enables you to change 
 * ARC Options::
 * ARM Options::
 * AVR Options::
-* Blackfin Options::
+* AVR32 Options::
 * CRIS Options::
 * Darwin Options::
 * DEC Alpha Options::
@@ -7147,81 +7137,55 @@ comply to the C standards, but it will p
 size.
 @end table
 
-@node Blackfin Options
-@subsection Blackfin Options
-@cindex Blackfin Options
+@node AVR32 Options
+@subsection AVR32 Options
+@cindex AVR32 Options
 
-@table @gcctabopt
-@item -momit-leaf-frame-pointer
-@opindex momit-leaf-frame-pointer
-Don't keep the frame pointer in a register for leaf functions.  This
-avoids the instructions to save, set up and restore frame pointers and
-makes an extra register available in leaf functions.  The option
-@option{-fomit-frame-pointer} removes the frame pointer for all functions
-which might make debugging harder.
+These options are defined for AVR32 implementations:
 
-@item -mspecld-anomaly
-@opindex mspecld-anomaly
-When enabled, the compiler will ensure that the generated code does not
-contain speculative loads after jump instructions.  This option is enabled
-by default.
-
-@item -mno-specld-anomaly
-@opindex mno-specld-anomaly
-Don't generate extra code to prevent speculative loads from occurring.
-
-@item -mcsync-anomaly
-@opindex mcsync-anomaly
-When enabled, the compiler will ensure that the generated code does not
-contain CSYNC or SSYNC instructions too soon after conditional branches.
-This option is enabled by default.
-
-@item -mno-csync-anomaly
-@opindex mno-csync-anomaly
-Don't generate extra code to prevent CSYNC or SSYNC instructions from
-occurring too soon after a conditional branch.
-
-@item -mlow-64k
-@opindex mlow-64k
-When enabled, the compiler is free to take advantage of the knowledge that
-the entire program fits into the low 64k of memory.
-
-@item -mno-low-64k
-@opindex mno-low-64k
-Assume that the program is arbitrarily large.  This is the default.
+@table @gcctabopt
+@item -muse-rodata-section
+@opindex muse-rodata-section
+Use section @samp{.rodata} for read-only data instead of @samp{.text}.
 
-@item -mid-shared-library
-@opindex mid-shared-library
-Generate code that supports shared libraries via the library ID method.
-This allows for execute in place and shared libraries in an environment
-without virtual memory management.  This option implies @option{-fPIC}.
+@item -mhard-float
+@opindex mhard-float
+Use floating-point coprocessor instructions. 
 
-@item -mno-id-shared-library
-@opindex mno-id-shared-library
-Generate code that doesn't assume ID based shared libraries are being used.
-This is the default.
+@item -msoft-float
+@opindex msoft-float
+Use software floating-point library. 
 
-@item -mshared-library-id=n
-@opindex mshared-library-id
-Specified the identification number of the ID based shared library being
-compiled.  Specifying a value of 0 will generate more compact code, specifying
-other values will force the allocation of that number to the current
-library but is no more space or time efficient than omitting this option.
+@item -mrelax
+@opindex mrelax
+Enable relaxing in linker. This means that when the address of symbols
+are known at link time, the linker can optimize @samp{icall} and @samp{mcall}
+instructions into a @samp{rcall} instruction if possible. Loading the address
+of a symbol can also be optimized.  
+
+@item -muse-oscall
+@opindex muse-oscall
+When using gcc as a frontend for linking this switch forces the use of
+@samp{fake} system calls in the newlib c-library. These fake system
+calls are handled by some AVR32 simulators which redirects these calls
+to the OS in which the simulator is running. This is practical for
+being able to perform file I/O when running programs in a simulator. 
+
+@item -mforce-double-align
+@opindex mforce-double-align
+Force double-word alignment for double-word memory accesses.
+
+@item -mno-init-got
+@opindex mno-init-got
+Do not initialize the GOT register before using it when compiling PIC
+code.
 
-@item -mlong-calls
-@itemx -mno-long-calls
-@opindex mlong-calls
-@opindex mno-long-calls
-Tells the compiler to perform function calls by first loading the
-address of the function into a register and then performing a subroutine
-call on this register.  This switch is needed if the target function
-will lie outside of the 24 bit addressing range of the offset based
-version of subroutine call instruction.
+@item -mcpu=@var{cpu-type}
+@opindex mcpu
+Generate code for the specified cpu. Permissible names are: @samp{morgan},
+@samp{ap7000} and @samp{default}. @samp{default} is a dummy cpu which
+allows all avr32 instructions. 
 
-This feature is not enabled by default.  Specifying
-@option{-mno-long-calls} will restore the default behavior.  Note these
-switches have no effect on how the compiler generates code to handle
-function calls via function pointers.
 @end table
 
 @node CRIS Options
@@ -10853,6 +10817,7 @@ conventions that adheres to the March 19
 Application Binary Interface, PowerPC processor supplement.  This is the
 default unless you configured GCC using @samp{powerpc-*-eabiaix}.
 
+
 @item -mcall-sysv-eabi
 @opindex mcall-sysv-eabi
 Specify both @option{-mcall-sysv} and @option{-meabi} options.
diff -Nrup --ignore-space-change gcc-4.0.2/gcc/doc/md.texi gcc-4.0.2-atmel.0.99.2/gcc/doc/md.texi
--- gcc-4.0.2/gcc/doc/md.texi	2005-08-14 01:56:45.000000000 +0200
+++ gcc-4.0.2-atmel.0.99.2/gcc/doc/md.texi	2006-07-13 09:53:31.000000000 +0200
@@ -3,6 +3,7 @@
 @c This is part of the GCC manual.
 @c For copying conditions, see the file gcc.texi.
 
+
 @ifset INTERNALS
 @node Machine Desc
 @chapter Machine Descriptions
@@ -1683,6 +1684,59 @@ A memory reference suitable for iWMMXt l
 A memory reference suitable for the ARMv4 ldrsb instruction.
 @end table
 
+@item AVR32 family---@file{avr32.h}
+@table @code
+@item f
+Floating-point registers (f0 to f15)
+
+@item Ku@var{bits}
+Unsigned constant representable with @var{bits} number of bits (Must be
+two digits). I.e: An unsigned 8-bit constant is written as @samp{Ku08}  
+ 
+@item Ks@var{bits}
+Signed constant representable with @var{bits} number of bits (Must be
+two digits). I.e: A signed 12-bit constant is written as @samp{Ks12}  
+
+@item Is@var{bits}
+The negated range of a signed constant representable with  @var{bits} 
+number of bits. The same as @samp{Ks@var{bits}} with a negated range. 
+This means that the constant must be in the range @math{-2^{bits-1}-1} to @math{2^{bits-1}}
+
+@item G
+A single/double precision floating-point immediate or 64-bit integer 
+immediate where the least and most significant words both can be
+loaded with a move instruction. That is the the integer form of the 
+values in the least and most significant words both are in the range 
+@math{-2^{20}} to @math{2^{20}-1}.
+         
+@item RKs@var{bits}
+A memory reference where the address consists of a base register
+plus a signed immediate displacement with range given by @samp{Ks@var{bits}}
+which has the same format as for the signed immediate integer constraint
+given above.  
+
+@item RKu@var{bits}
+A memory reference where the address consists of a base register
+plus an unsigned immediate displacement with range given by @samp{Ku@var{bits}}
+which has the same format as for the unsigned immediate integer constraint
+given above.  
+
+@item S
+A memory reference with an immediate or register offset
+
+@item T
+A memory reference to a constant pool entry
+
+@item W
+A valid operand for use in the @samp{lda.w} instruction macro when
+relaxing is enabled
+
+@item Z
+A memory reference valid for coprocessor memory instructions
+
+@end table
+
+
 @item AVR family---@file{avr.h}
 @table @code
 @item l
@@ -2069,102 +2123,6 @@ range of 1 to 2047.
 
 @end table
 
-@item Blackfin family---@file{bfin.h}
-@table @code
-@item a
-P register
-
-@item d
-D register
-
-@item z
-A call clobbered P register.
-
-@item D
-Even-numbered D register
-
-@item W
-Odd-numbered D register
-
-@item e
-Accumulator register.
-
-@item A
-Even-numbered accumulator register.
-
-@item B
-Odd-numbered accumulator register.
-
-@item b
-I register
-
-@item B
-B register
-
-@item f
-M register
-
-@item c
-Registers used for circular buffering, i.e. I, B, or L registers.
-
-@item C
-The CC register.
-
-@item x
-Any D, P, B, M, I or L register.
-
-@item y
-Additional registers typically used only in prologues and epilogues: RETS,
-RETN, RETI, RETX, RETE, ASTAT, SEQSTAT and USP.
-
-@item w
-Any register except accumulators or CC.
-
-@item Ksh
-Signed 16 bit integer (in the range -32768 to 32767)
-
-@item Kuh
-Unsigned 16 bit integer (in the range 0 to 65535)
-
-@item Ks7
-Signed 7 bit integer (in the range -64 to 63)
-
-@item Ku7
-Unsigned 7 bit integer (in the range 0 to 127)
-
-@item Ku5
-Unsigned 5 bit integer (in the range 0 to 31)
-
-@item Ks4
-Signed 4 bit integer (in the range -8 to 7)
-
-@item Ks3
-Signed 3 bit integer (in the range -3 to 4)
-
-@item Ku3
-Unsigned 3 bit integer (in the range 0 to 7)
-
-@item P@var{n}
-Constant @var{n}, where @var{n} is a single-digit constant in the range 0 to 4.
-
-@item M1
-Constant 255.
-
-@item M2
-Constant 65535.
-
-@item J
-An integer constant with exactly a single bit set.
-
-@item L
-An integer constant with all bits set except exactly one.
-
-@item H
-
-@item Q
-Any SYMBOL_REF.
-@end table
-
 @item IP2K---@file{ip2k.h}
 @table @code
 @item a
diff -Nrup --ignore-space-change gcc-4.0.2/gcc/expr.c gcc-4.0.2-atmel.0.99.2/gcc/expr.c
--- gcc-4.0.2/gcc/expr.c	2005-09-10 03:03:28.000000000 +0200
+++ gcc-4.0.2-atmel.0.99.2/gcc/expr.c	2006-10-10 12:40:42.000000000 +0200
@@ -3314,16 +3314,17 @@ emit_single_push_insn (enum machine_mode
     }
   else
     {
+      emit_move_insn (stack_pointer_rtx,
+		      expand_binop (Pmode,
 #ifdef STACK_GROWS_DOWNWARD
-      /* ??? This seems wrong if STACK_PUSH_CODE == POST_DEC.  */
-      dest_addr = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
-				GEN_INT (-(HOST_WIDE_INT) rounded_size));
+				    sub_optab,
 #else
-      /* ??? This seems wrong if STACK_PUSH_CODE == POST_INC.  */
-      dest_addr = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
-				GEN_INT (rounded_size));
+				    add_optab,
 #endif
-      dest_addr = gen_rtx_PRE_MODIFY (Pmode, stack_pointer_rtx, dest_addr);
+				    stack_pointer_rtx,
+				    GEN_INT (rounded_size),
+				    NULL_RTX, 0, OPTAB_LIB_WIDEN));
+      dest_addr = stack_pointer_rtx;
     }
 
   dest = gen_rtx_MEM (mode, dest_addr);
diff -Nrup --ignore-space-change gcc-4.0.2/gcc/genemit.c gcc-4.0.2-atmel.0.99.2/gcc/genemit.c
--- gcc-4.0.2/gcc/genemit.c	2004-09-09 15:22:33.000000000 +0200
+++ gcc-4.0.2-atmel.0.99.2/gcc/genemit.c	2006-03-23 13:26:49.000000000 +0100
@@ -122,6 +122,24 @@ max_operand_vec (rtx insn, int arg)
 }
 
 static void
+gen_vararg_prologue(int operands)
+{
+  int i;
+
+  if (operands > 1)
+    {
+      for (i = 1; i < operands; i++)
+	printf("  rtx operand%d ATTRIBUTE_UNUSED;\n", i);
+
+      printf("  va_list args;\n\n");
+      printf("  va_start(args, operand0);\n");
+      for (i = 1; i < operands; i++)
+	printf("  operand%d = va_arg(args, rtx);\n", i);
+      printf("  va_end(args);\n\n");
+    }
+}
+
+static void
 print_code (RTX_CODE code)
 {
   const char *p1;
@@ -406,18 +424,16 @@ gen_insn (rtx insn, int lineno)
     fatal ("match_dup operand number has no match_operand");
 
   /* Output the function name and argument declarations.  */
-  printf ("rtx\ngen_%s (", XSTR (insn, 0));
+  printf ("rtx\ngen_%s ", XSTR (insn, 0));
+
   if (operands)
-    for (i = 0; i < operands; i++)
-      if (i)
-	printf (",\n\trtx operand%d ATTRIBUTE_UNUSED", i);
+    printf("(rtx operand0 ATTRIBUTE_UNUSED, ...)\n");
       else
-	printf ("rtx operand%d ATTRIBUTE_UNUSED", i);
-  else
-    printf ("void");
-  printf (")\n");
+    printf("(void)\n");
   printf ("{\n");
 
+  gen_vararg_prologue(operands);
+
   /* Output code to construct and return the rtl for the instruction body.  */
 
   if (XVECLEN (insn, 1) == 1)
@@ -457,16 +473,12 @@ gen_expand (rtx expand)
   operands = max_operand_vec (expand, 1);
 
   /* Output the function name and argument declarations.  */
-  printf ("rtx\ngen_%s (", XSTR (expand, 0));
+  printf ("rtx\ngen_%s ", XSTR (expand, 0));
   if (operands)
-    for (i = 0; i < operands; i++)
-      if (i)
-	printf (",\n\trtx operand%d", i);
-      else
-	printf ("rtx operand%d", i);
+    printf("(rtx operand0 ATTRIBUTE_UNUSED, ...)\n");
   else
-    printf ("void");
-  printf (")\n");
+    printf("(void)\n");
+
   printf ("{\n");
 
   /* If we don't have any C code to write, only one insn is being written,
@@ -476,6 +488,8 @@ gen_expand (rtx expand)
       && operands > max_dup_opno
       && XVECLEN (expand, 1) == 1)
     {
+      gen_vararg_prologue(operands);
+
       printf ("  return ");
       gen_exp (XVECEXP (expand, 1, 0), DEFINE_EXPAND, NULL);
       printf (";\n}\n\n");
@@ -489,6 +503,7 @@ gen_expand (rtx expand)
   for (; i <= max_scratch_opno; i++)
     printf ("  rtx operand%d ATTRIBUTE_UNUSED;\n", i);
   printf ("  rtx _val = 0;\n");
+  gen_vararg_prologue(operands);
   printf ("  start_sequence ();\n");
 
   /* The fourth operand of DEFINE_EXPAND is some code to be executed
diff -Nrup --ignore-space-change gcc-4.0.2/gcc/genflags.c gcc-4.0.2-atmel.0.99.2/gcc/genflags.c
--- gcc-4.0.2/gcc/genflags.c	2004-09-09 15:22:33.000000000 +0200
+++ gcc-4.0.2-atmel.0.99.2/gcc/genflags.c	2006-10-10 12:40:42.000000000 +0200
@@ -128,7 +128,6 @@ static void
 gen_proto (rtx insn)
 {
   int num = num_operands (insn);
-  int i;
   const char *name = XSTR (insn, 0);
   int truth = maybe_eval_c_test (XSTR (insn, 2));
 
@@ -159,12 +158,7 @@ gen_proto (rtx insn)
   if (num == 0)
     fputs ("void", stdout);
   else
-    {
-      for (i = 1; i < num; i++)
-	fputs ("rtx, ", stdout);
-
-      fputs ("rtx", stdout);
-    }
+    fputs("rtx, ...", stdout);
 
   puts (");");
 
@@ -174,12 +168,7 @@ gen_proto (rtx insn)
     {
       printf ("static inline rtx\ngen_%s", name);
       if (num > 0)
-	{
-	  putchar ('(');
-	  for (i = 0; i < num-1; i++)
-	    printf ("rtx ARG_UNUSED (%c), ", 'a' + i);
-	  printf ("rtx ARG_UNUSED (%c))\n", 'a' + i);
-	}
+	puts("(rtx ARG_UNUSED(a), ...)");
       else
 	puts ("(void)");
       puts ("{\n  return 0;\n}");
diff -Nrup --ignore-space-change gcc-4.0.2/gcc/genoutput.c gcc-4.0.2-atmel.0.99.2/gcc/genoutput.c
--- gcc-4.0.2/gcc/genoutput.c	2004-09-09 15:22:34.000000000 +0200
+++ gcc-4.0.2-atmel.0.99.2/gcc/genoutput.c	2006-03-23 13:26:49.000000000 +0100
@@ -383,7 +383,7 @@ output_insn_data (void)
 	}
 
       if (d->name && d->name[0] != '*')
-	printf ("    (insn_gen_fn) gen_%s,\n", d->name);
+	printf ("    gen_%s,\n", d->name);
       else
 	printf ("    0,\n");
 
diff -Nrup --ignore-space-change gcc-4.0.2/gcc/longlong.h gcc-4.0.2-atmel.0.99.2/gcc/longlong.h
--- gcc-4.0.2/gcc/longlong.h	2004-06-15 12:40:44.000000000 +0200
+++ gcc-4.0.2-atmel.0.99.2/gcc/longlong.h	2005-06-07 14:59:22.000000000 +0200
@@ -227,6 +227,39 @@ UDItype __umulsidi3 (USItype, USItype);
 #define UDIV_TIME 100
 #endif /* __arm__ */
 
+#if defined (__avr32__) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+  __asm__ ("add\t%1, %4, %5\n\tadc\t%0, %2, %3"		\
+	   : "=r" ((USItype) (sh)),					\
+	     "=&r" ((USItype) (sl))					\
+	   : "r" ((USItype) (ah)),					\
+	     "r" ((USItype) (bh)),					\
+	     "r" ((USItype) (al)),					\
+	     "r" ((USItype) (bl)) __CLOBBER_CC)
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+  __asm__ ("sub\t%1, %4, %5\n\tsbc\t%0, %2, %3"		\
+	   : "=r" ((USItype) (sh)),					\
+	     "=&r" ((USItype) (sl))					\
+	   : "r" ((USItype) (ah)),					\
+	     "r" ((USItype) (bh)),					\
+	     "r" ((USItype) (al)),					\
+	     "r" ((USItype) (bl)) __CLOBBER_CC)
+
+#define __umulsidi3(a,b) ((UDItype)(a) * (UDItype)(b))
+
+#define umul_ppmm(w1, w0, u, v) \
+{									\
+  DWunion __w;								\
+  __w.ll = __umulsidi3 (u, v);						\
+  w1 = __w.s.high;							\
+  w0 = __w.s.low;							\
+}
+
+#define count_leading_zeros(COUNT,X)	((COUNT) = __builtin_clz (X))
+#define count_trailing_zeros(COUNT,X)	((COUNT) = __builtin_ctz (X))
+#define COUNT_LEADING_ZEROS_0 32
+#endif
+
 #if defined (__hppa) && W_TYPE_SIZE == 32
 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
   __asm__ ("add %4,%5,%1\n\taddc %2,%3,%0"				\
diff -Nrup --ignore-space-change gcc-4.0.2/gcc/optabs.h gcc-4.0.2-atmel.0.99.2/gcc/optabs.h
--- gcc-4.0.2/gcc/optabs.h	2005-02-12 12:34:21.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/gcc/optabs.h	2006-10-10 12:40:42.000000000 +0200
@@ -389,7 +389,7 @@ extern enum insn_code reload_out_optab[N
 extern GTY(()) optab code_to_optab[NUM_RTX_CODE + 1];
 
 
-typedef rtx (*rtxfun) (rtx);
+typedef rtx (*rtxfun) (rtx, ...);
 
 /* Indexed by the rtx-code for a conditional (e.g. EQ, LT,...)
    gives the gen_function to make a branch to test that condition.  */
Binary files gcc-4.0.2/gcc/po/be.gmo and gcc-4.0.2-atmel.0.99.2/gcc/po/be.gmo differ
Binary files gcc-4.0.2/gcc/po/ca.gmo and gcc-4.0.2-atmel.0.99.2/gcc/po/ca.gmo differ
Binary files gcc-4.0.2/gcc/po/da.gmo and gcc-4.0.2-atmel.0.99.2/gcc/po/da.gmo differ
Binary files gcc-4.0.2/gcc/po/de.gmo and gcc-4.0.2-atmel.0.99.2/gcc/po/de.gmo differ
Binary files gcc-4.0.2/gcc/po/el.gmo and gcc-4.0.2-atmel.0.99.2/gcc/po/el.gmo differ
Binary files gcc-4.0.2/gcc/po/es.gmo and gcc-4.0.2-atmel.0.99.2/gcc/po/es.gmo differ
Binary files gcc-4.0.2/gcc/po/fr.gmo and gcc-4.0.2-atmel.0.99.2/gcc/po/fr.gmo differ
Binary files gcc-4.0.2/gcc/po/ja.gmo and gcc-4.0.2-atmel.0.99.2/gcc/po/ja.gmo differ
Binary files gcc-4.0.2/gcc/po/nl.gmo and gcc-4.0.2-atmel.0.99.2/gcc/po/nl.gmo differ
Binary files gcc-4.0.2/gcc/po/sv.gmo and gcc-4.0.2-atmel.0.99.2/gcc/po/sv.gmo differ
Binary files gcc-4.0.2/gcc/po/tr.gmo and gcc-4.0.2-atmel.0.99.2/gcc/po/tr.gmo differ
Binary files gcc-4.0.2/gcc/po/zh_CN.gmo and gcc-4.0.2-atmel.0.99.2/gcc/po/zh_CN.gmo differ
diff -Nrup --ignore-space-change gcc-4.0.2/gcc/testsuite/gcc.dg/cpp/mac-eol-at-eof.c gcc-4.0.2-atmel.0.99.2/gcc/testsuite/gcc.dg/cpp/mac-eol-at-eof.c
--- gcc-4.0.2/gcc/testsuite/gcc.dg/cpp/mac-eol-at-eof.c	2005-02-19 20:48:02.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/gcc/testsuite/gcc.dg/cpp/mac-eol-at-eof.c	2005-06-07 14:06:28.000000000 +0200
@@ -1 +1,3 @@
-/* Test no newline at eof warning when Mac line ending is used*/
/* { dg-do compile } */
int main() { return 0; } 
\ No newline at end of file
+/* Test no newline at eof warning when Mac line ending is used*/
+/* { dg-do compile } */
+int main() { return 0; } 
diff -Nrup --ignore-space-change gcc-4.0.2/gcc/tree-ssa-operands.c gcc-4.0.2-atmel.0.99.2/gcc/tree-ssa-operands.c
--- gcc-4.0.2/gcc/tree-ssa-operands.c	2005-06-24 15:23:42.000000000 +0200
+++ gcc-4.0.2-atmel.0.99.2/gcc/tree-ssa-operands.c	2005-07-15 16:06:29.000000000 +0200
@@ -1135,7 +1135,7 @@ get_expr_operands (tree stmt, tree *expr
 
       if (code == COMPONENT_REF) 
 	{
-	  if (s_ann && TREE_THIS_VOLATILE (TREE_OPERAND (expr, 1)))
+          if (TREE_THIS_VOLATILE (TREE_OPERAND (expr, 1)))
 	    s_ann->has_volatile_ops = true; 
 	  get_expr_operands (stmt, &TREE_OPERAND (expr, 2), opf_none);
 	}
diff -Nrup --ignore-space-change gcc-4.0.2/libcpp/po/sv.po gcc-4.0.2-atmel.0.99.2/libcpp/po/sv.po
--- gcc-4.0.2/libcpp/po/sv.po	2005-09-04 14:30:22.000000000 +0200
+++ gcc-4.0.2-atmel.0.99.2/libcpp/po/sv.po	2006-01-20 10:54:31.000000000 +0100
@@ -2,7 +2,7 @@
 # Copyright � 2000, 2005 Free Software Foundation, Inc.
 # Dennis Bj�rklund <db@zigo.dhs.org>, 2000, 2001, 2002.
 # G�ran Uddeborg <goeran@uddeborg.se>, 2005.
-# $Revision: 1.2 $
+# $Revision: 3550 $
 #
 msgid ""
 msgstr ""
Binary files gcc-4.0.2/libcpp/po/tr.gmo and gcc-4.0.2-atmel.0.99.2/libcpp/po/tr.gmo differ
Binary files gcc-4.0.2/libcpp/po/vi.gmo and gcc-4.0.2-atmel.0.99.2/libcpp/po/vi.gmo differ
diff -Nrup --ignore-space-change gcc-4.0.2/libffi/src/frv/eabi.S gcc-4.0.2-atmel.0.99.2/libffi/src/frv/eabi.S
--- gcc-4.0.2/libffi/src/frv/eabi.S	2004-08-30 17:43:03.000000000 +0200
+++ gcc-4.0.2-atmel.0.99.2/libffi/src/frv/eabi.S	2005-07-15 16:26:03.000000000 +0200
@@ -3,7 +3,7 @@
    
    FR-V Assembly glue.
 
-   $Id: sysv.S,v 1.1.1.1 1998/11/29 16:48:16 green Exp $
+   $Id: eabi.S 3484 2005-07-15 14:26:03Z rpedersen $
 
    Permission is hereby granted, free of charge, to any person obtaining
    a copy of this software and associated documentation files (the
diff -Nrup --ignore-space-change gcc-4.0.2/libjava/external/sax/org/xml/sax/AttributeList.java gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/AttributeList.java
--- gcc-4.0.2/libjava/external/sax/org/xml/sax/AttributeList.java	2005-02-02 01:41:51.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/AttributeList.java	2005-07-15 16:26:03.000000000 +0200
@@ -1,7 +1,7 @@
 // SAX Attribute List Interface.
 // http://www.saxproject.org
 // No warranty; no copyright -- use this as you will.
-// $Id: AttributeList.java,v 1.1 2004/12/23 22:38:42 mark Exp $
+// $Id: AttributeList.java 3484 2005-07-15 14:26:03Z rpedersen $
 
 package org.xml.sax;
 
diff -Nrup --ignore-space-change gcc-4.0.2/libjava/external/sax/org/xml/sax/Attributes.java gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/Attributes.java
--- gcc-4.0.2/libjava/external/sax/org/xml/sax/Attributes.java	2005-02-02 01:41:51.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/Attributes.java	2005-07-15 16:26:03.000000000 +0200
@@ -2,7 +2,7 @@
 // http://www.saxproject.org
 // Written by David Megginson
 // NO WARRANTY!  This class is in the public domain.
-// $Id: Attributes.java,v 1.1 2004/12/23 22:38:42 mark Exp $
+// $Id: Attributes.java 3484 2005-07-15 14:26:03Z rpedersen $
 
 package org.xml.sax;
 
diff -Nrup --ignore-space-change gcc-4.0.2/libjava/external/sax/org/xml/sax/ContentHandler.java gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/ContentHandler.java
--- gcc-4.0.2/libjava/external/sax/org/xml/sax/ContentHandler.java	2005-02-02 01:41:51.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/ContentHandler.java	2005-07-15 16:26:03.000000000 +0200
@@ -2,7 +2,7 @@
 // http://www.saxproject.org
 // Written by David Megginson
 // NO WARRANTY!  This class is in the public domain.
-// $Id: ContentHandler.java,v 1.1 2004/12/23 22:38:42 mark Exp $
+// $Id: ContentHandler.java 3484 2005-07-15 14:26:03Z rpedersen $
 
 package org.xml.sax;
 
diff -Nrup --ignore-space-change gcc-4.0.2/libjava/external/sax/org/xml/sax/DocumentHandler.java gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/DocumentHandler.java
--- gcc-4.0.2/libjava/external/sax/org/xml/sax/DocumentHandler.java	2005-02-02 01:41:51.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/DocumentHandler.java	2005-07-15 16:26:03.000000000 +0200
@@ -1,7 +1,7 @@
 // SAX document handler.
 // http://www.saxproject.org
 // No warranty; no copyright -- use this as you will.
-// $Id: DocumentHandler.java,v 1.1 2004/12/23 22:38:42 mark Exp $
+// $Id: DocumentHandler.java 3484 2005-07-15 14:26:03Z rpedersen $
 
 package org.xml.sax;
 
diff -Nrup --ignore-space-change gcc-4.0.2/libjava/external/sax/org/xml/sax/DTDHandler.java gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/DTDHandler.java
--- gcc-4.0.2/libjava/external/sax/org/xml/sax/DTDHandler.java	2005-02-02 01:41:51.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/DTDHandler.java	2005-07-15 16:26:03.000000000 +0200
@@ -1,7 +1,7 @@
 // SAX DTD handler.
 // http://www.saxproject.org
 // No warranty; no copyright -- use this as you will.
-// $Id: DTDHandler.java,v 1.1 2004/12/23 22:38:42 mark Exp $
+// $Id: DTDHandler.java 3484 2005-07-15 14:26:03Z rpedersen $
 
 package org.xml.sax;
 
diff -Nrup --ignore-space-change gcc-4.0.2/libjava/external/sax/org/xml/sax/EntityResolver.java gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/EntityResolver.java
--- gcc-4.0.2/libjava/external/sax/org/xml/sax/EntityResolver.java	2005-02-02 01:41:51.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/EntityResolver.java	2005-07-15 16:26:03.000000000 +0200
@@ -1,7 +1,7 @@
 // SAX entity resolver.
 // http://www.saxproject.org
 // No warranty; no copyright -- use this as you will.
-// $Id: EntityResolver.java,v 1.1 2004/12/23 22:38:42 mark Exp $
+// $Id: EntityResolver.java 3484 2005-07-15 14:26:03Z rpedersen $
 
 package org.xml.sax;
 
diff -Nrup --ignore-space-change gcc-4.0.2/libjava/external/sax/org/xml/sax/ErrorHandler.java gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/ErrorHandler.java
--- gcc-4.0.2/libjava/external/sax/org/xml/sax/ErrorHandler.java	2005-02-02 01:41:51.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/ErrorHandler.java	2005-07-15 16:26:03.000000000 +0200
@@ -1,7 +1,7 @@
 // SAX error handler.
 // http://www.saxproject.org
 // No warranty; no copyright -- use this as you will.
-// $Id: ErrorHandler.java,v 1.1 2004/12/23 22:38:42 mark Exp $
+// $Id: ErrorHandler.java 3484 2005-07-15 14:26:03Z rpedersen $
 
 package org.xml.sax;
 
diff -Nrup --ignore-space-change gcc-4.0.2/libjava/external/sax/org/xml/sax/ext/Attributes2Impl.java gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/ext/Attributes2Impl.java
--- gcc-4.0.2/libjava/external/sax/org/xml/sax/ext/Attributes2Impl.java	2005-02-02 01:41:52.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/ext/Attributes2Impl.java	2005-07-15 16:26:03.000000000 +0200
@@ -1,7 +1,7 @@
 // Attributes2Impl.java - extended AttributesImpl
 // http://www.saxproject.org
 // Public Domain: no warranty.
-// $Id: Attributes2Impl.java,v 1.1 2004/12/23 22:38:42 mark Exp $
+// $Id: Attributes2Impl.java 3484 2005-07-15 14:26:03Z rpedersen $
 
 package org.xml.sax.ext;
 
diff -Nrup --ignore-space-change gcc-4.0.2/libjava/external/sax/org/xml/sax/ext/Attributes2.java gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/ext/Attributes2.java
--- gcc-4.0.2/libjava/external/sax/org/xml/sax/ext/Attributes2.java	2005-02-02 01:41:52.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/ext/Attributes2.java	2005-07-15 16:26:03.000000000 +0200
@@ -1,7 +1,7 @@
 // Attributes2.java - extended Attributes
 // http://www.saxproject.org
 // Public Domain: no warranty.
-// $Id: Attributes2.java,v 1.1 2004/12/23 22:38:42 mark Exp $
+// $Id: Attributes2.java 3484 2005-07-15 14:26:03Z rpedersen $
 
 package org.xml.sax.ext;
 
diff -Nrup --ignore-space-change gcc-4.0.2/libjava/external/sax/org/xml/sax/ext/DeclHandler.java gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/ext/DeclHandler.java
--- gcc-4.0.2/libjava/external/sax/org/xml/sax/ext/DeclHandler.java	2005-02-02 01:41:52.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/ext/DeclHandler.java	2005-07-15 16:26:03.000000000 +0200
@@ -1,7 +1,7 @@
 // DeclHandler.java - Optional handler for DTD declaration events.
 // http://www.saxproject.org
 // Public Domain: no warranty.
-// $Id: DeclHandler.java,v 1.1 2004/12/23 22:38:42 mark Exp $
+// $Id: DeclHandler.java 3484 2005-07-15 14:26:03Z rpedersen $
 
 package org.xml.sax.ext;
 
diff -Nrup --ignore-space-change gcc-4.0.2/libjava/external/sax/org/xml/sax/ext/DefaultHandler2.java gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/ext/DefaultHandler2.java
--- gcc-4.0.2/libjava/external/sax/org/xml/sax/ext/DefaultHandler2.java	2005-02-02 01:41:52.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/ext/DefaultHandler2.java	2005-07-15 16:26:03.000000000 +0200
@@ -1,7 +1,7 @@
 // DefaultHandler2.java - extended DefaultHandler
 // http://www.saxproject.org
 // Public Domain: no warranty.
-// $Id: DefaultHandler2.java,v 1.1 2004/12/23 22:38:42 mark Exp $
+// $Id: DefaultHandler2.java 3484 2005-07-15 14:26:03Z rpedersen $
 
 package org.xml.sax.ext;
 
diff -Nrup --ignore-space-change gcc-4.0.2/libjava/external/sax/org/xml/sax/ext/EntityResolver2.java gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/ext/EntityResolver2.java
--- gcc-4.0.2/libjava/external/sax/org/xml/sax/ext/EntityResolver2.java	2005-02-02 01:41:52.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/ext/EntityResolver2.java	2005-07-15 16:26:03.000000000 +0200
@@ -1,7 +1,7 @@
 // EntityResolver2.java - Extended SAX entity resolver.
 // http://www.saxproject.org
 // No warranty; no copyright -- use this as you will.
-// $Id: EntityResolver2.java,v 1.1 2004/12/23 22:38:42 mark Exp $
+// $Id: EntityResolver2.java 3484 2005-07-15 14:26:03Z rpedersen $
 
 package org.xml.sax.ext;
 
diff -Nrup --ignore-space-change gcc-4.0.2/libjava/external/sax/org/xml/sax/ext/LexicalHandler.java gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/ext/LexicalHandler.java
--- gcc-4.0.2/libjava/external/sax/org/xml/sax/ext/LexicalHandler.java	2005-02-02 01:41:52.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/ext/LexicalHandler.java	2005-07-15 16:26:03.000000000 +0200
@@ -1,7 +1,7 @@
 // LexicalHandler.java - optional handler for lexical parse events.
 // http://www.saxproject.org
 // Public Domain: no warranty.
-// $Id: LexicalHandler.java,v 1.1 2004/12/23 22:38:42 mark Exp $
+// $Id: LexicalHandler.java 3484 2005-07-15 14:26:03Z rpedersen $
 
 package org.xml.sax.ext;
 
diff -Nrup --ignore-space-change gcc-4.0.2/libjava/external/sax/org/xml/sax/ext/Locator2Impl.java gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/ext/Locator2Impl.java
--- gcc-4.0.2/libjava/external/sax/org/xml/sax/ext/Locator2Impl.java	2005-02-02 01:41:52.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/ext/Locator2Impl.java	2005-07-15 16:26:03.000000000 +0200
@@ -1,7 +1,7 @@
 // Locator2Impl.java - extended LocatorImpl
 // http://www.saxproject.org
 // Public Domain: no warranty.
-// $Id: Locator2Impl.java,v 1.1 2004/12/23 22:38:42 mark Exp $
+// $Id: Locator2Impl.java 3484 2005-07-15 14:26:03Z rpedersen $
 
 package org.xml.sax.ext;
 
diff -Nrup --ignore-space-change gcc-4.0.2/libjava/external/sax/org/xml/sax/ext/Locator2.java gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/ext/Locator2.java
--- gcc-4.0.2/libjava/external/sax/org/xml/sax/ext/Locator2.java	2005-02-02 01:41:52.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/ext/Locator2.java	2005-07-15 16:26:03.000000000 +0200
@@ -1,7 +1,7 @@
 // Locator2.java - extended Locator
 // http://www.saxproject.org
 // Public Domain: no warranty.
-// $Id: Locator2.java,v 1.1 2004/12/23 22:38:42 mark Exp $
+// $Id: Locator2.java 3484 2005-07-15 14:26:03Z rpedersen $
 
 package org.xml.sax.ext;
 
diff -Nrup --ignore-space-change gcc-4.0.2/libjava/external/sax/org/xml/sax/ext/package.html gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/ext/package.html
--- gcc-4.0.2/libjava/external/sax/org/xml/sax/ext/package.html	2005-02-02 01:41:52.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/ext/package.html	2005-07-15 16:26:03.000000000 +0200
@@ -1,5 +1,5 @@
 <HTML><HEAD>
-<!-- $Id: package.html,v 1.1 2004/12/23 22:38:42 mark Exp $ -->
+<!-- $Id: package.html 3484 2005-07-15 14:26:03Z rpedersen $ -->
 </HEAD><BODY>
 
 <p>
diff -Nrup --ignore-space-change gcc-4.0.2/libjava/external/sax/org/xml/sax/HandlerBase.java gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/HandlerBase.java
--- gcc-4.0.2/libjava/external/sax/org/xml/sax/HandlerBase.java	2005-02-02 01:41:51.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/HandlerBase.java	2005-07-15 16:26:03.000000000 +0200
@@ -1,7 +1,7 @@
 // SAX default handler base class.
 // http://www.saxproject.org
 // No warranty; no copyright -- use this as you will.
-// $Id: HandlerBase.java,v 1.1 2004/12/23 22:38:42 mark Exp $
+// $Id: HandlerBase.java 3484 2005-07-15 14:26:03Z rpedersen $
 
 package org.xml.sax;
 
diff -Nrup --ignore-space-change gcc-4.0.2/libjava/external/sax/org/xml/sax/helpers/AttributeListImpl.java gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/helpers/AttributeListImpl.java
--- gcc-4.0.2/libjava/external/sax/org/xml/sax/helpers/AttributeListImpl.java	2005-02-02 01:41:53.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/helpers/AttributeListImpl.java	2005-07-15 16:26:03.000000000 +0200
@@ -1,7 +1,7 @@
 // SAX default implementation for AttributeList.
 // http://www.saxproject.org
 // No warranty; no copyright -- use this as you will.
-// $Id: AttributeListImpl.java,v 1.1 2004/12/23 22:38:42 mark Exp $
+// $Id: AttributeListImpl.java 3484 2005-07-15 14:26:03Z rpedersen $
 
 package org.xml.sax.helpers;
 
diff -Nrup --ignore-space-change gcc-4.0.2/libjava/external/sax/org/xml/sax/helpers/AttributesImpl.java gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/helpers/AttributesImpl.java
--- gcc-4.0.2/libjava/external/sax/org/xml/sax/helpers/AttributesImpl.java	2005-02-02 01:41:54.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/helpers/AttributesImpl.java	2005-07-15 16:26:03.000000000 +0200
@@ -2,7 +2,7 @@
 // http://www.saxproject.org
 // Written by David Megginson
 // NO WARRANTY!  This class is in the public domain.
-// $Id: AttributesImpl.java,v 1.1 2004/12/23 22:38:42 mark Exp $
+// $Id: AttributesImpl.java 3484 2005-07-15 14:26:03Z rpedersen $
 
 package org.xml.sax.helpers;
 
diff -Nrup --ignore-space-change gcc-4.0.2/libjava/external/sax/org/xml/sax/helpers/DefaultHandler.java gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/helpers/DefaultHandler.java
--- gcc-4.0.2/libjava/external/sax/org/xml/sax/helpers/DefaultHandler.java	2005-02-02 01:41:54.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/helpers/DefaultHandler.java	2005-07-15 16:26:03.000000000 +0200
@@ -2,7 +2,7 @@
 // http://www.saxproject.org
 // Written by David Megginson
 // NO WARRANTY!  This class is in the public domain.
-// $Id: DefaultHandler.java,v 1.1 2004/12/23 22:38:42 mark Exp $
+// $Id: DefaultHandler.java 3484 2005-07-15 14:26:03Z rpedersen $
 
 package org.xml.sax.helpers;
 
diff -Nrup --ignore-space-change gcc-4.0.2/libjava/external/sax/org/xml/sax/helpers/LocatorImpl.java gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/helpers/LocatorImpl.java
--- gcc-4.0.2/libjava/external/sax/org/xml/sax/helpers/LocatorImpl.java	2005-02-02 01:41:54.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/helpers/LocatorImpl.java	2005-07-15 16:26:03.000000000 +0200
@@ -1,7 +1,7 @@
 // SAX default implementation for Locator.
 // http://www.saxproject.org
 // No warranty; no copyright -- use this as you will.
-// $Id: LocatorImpl.java,v 1.1 2004/12/23 22:38:42 mark Exp $
+// $Id: LocatorImpl.java 3484 2005-07-15 14:26:03Z rpedersen $
 
 package org.xml.sax.helpers;
 
diff -Nrup --ignore-space-change gcc-4.0.2/libjava/external/sax/org/xml/sax/helpers/NamespaceSupport.java gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/helpers/NamespaceSupport.java
--- gcc-4.0.2/libjava/external/sax/org/xml/sax/helpers/NamespaceSupport.java	2005-04-06 23:38:28.000000000 +0200
+++ gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/helpers/NamespaceSupport.java	2005-06-07 14:06:28.000000000 +0200
@@ -2,7 +2,7 @@
 // http://www.saxproject.org
 // Written by David Megginson
 // This class is in the Public Domain.  NO WARRANTY!
-// $Id: NamespaceSupport.java,v 1.2 2005/03/24 00:04:07 tromey Exp $
+// $Id: NamespaceSupport.java 3462 2005-06-07 12:06:28Z rpedersen $
 
 package org.xml.sax.helpers;
 
diff -Nrup --ignore-space-change gcc-4.0.2/libjava/external/sax/org/xml/sax/helpers/NewInstance.java gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/helpers/NewInstance.java
--- gcc-4.0.2/libjava/external/sax/org/xml/sax/helpers/NewInstance.java	2005-02-02 01:41:54.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/helpers/NewInstance.java	2005-07-15 16:26:03.000000000 +0200
@@ -3,7 +3,7 @@
 // Written by Edwin Goei, edwingo@apache.org
 // and by David Brownell, dbrownell@users.sourceforge.net
 // NO WARRANTY!  This class is in the Public Domain.
-// $Id: NewInstance.java,v 1.1 2004/12/23 22:38:42 mark Exp $
+// $Id: NewInstance.java 3484 2005-07-15 14:26:03Z rpedersen $
 
 package org.xml.sax.helpers;
 
diff -Nrup --ignore-space-change gcc-4.0.2/libjava/external/sax/org/xml/sax/helpers/package.html gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/helpers/package.html
--- gcc-4.0.2/libjava/external/sax/org/xml/sax/helpers/package.html	2005-02-02 01:41:54.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/helpers/package.html	2005-07-15 16:26:03.000000000 +0200
@@ -1,5 +1,5 @@
 <HTML><HEAD>
-<!-- $Id: package.html,v 1.1 2004/12/23 22:38:42 mark Exp $ -->
+<!-- $Id: package.html 3484 2005-07-15 14:26:03Z rpedersen $ -->
 </HEAD><BODY>
 
 <p>This package contains "helper" classes, including
diff -Nrup --ignore-space-change gcc-4.0.2/libjava/external/sax/org/xml/sax/helpers/ParserAdapter.java gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/helpers/ParserAdapter.java
--- gcc-4.0.2/libjava/external/sax/org/xml/sax/helpers/ParserAdapter.java	2005-02-02 01:41:54.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/helpers/ParserAdapter.java	2005-07-15 16:26:03.000000000 +0200
@@ -2,7 +2,7 @@
 // http://www.saxproject.org
 // Written by David Megginson
 // NO WARRANTY!  This class is in the public domain.
-// $Id: ParserAdapter.java,v 1.1 2004/12/23 22:38:42 mark Exp $
+// $Id: ParserAdapter.java 3484 2005-07-15 14:26:03Z rpedersen $
 
 package org.xml.sax.helpers;
 
diff -Nrup --ignore-space-change gcc-4.0.2/libjava/external/sax/org/xml/sax/helpers/ParserFactory.java gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/helpers/ParserFactory.java
--- gcc-4.0.2/libjava/external/sax/org/xml/sax/helpers/ParserFactory.java	2005-02-02 01:41:54.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/helpers/ParserFactory.java	2005-07-15 16:26:03.000000000 +0200
@@ -1,7 +1,7 @@
 // SAX parser factory.
 // http://www.saxproject.org
 // No warranty; no copyright -- use this as you will.
-// $Id: ParserFactory.java,v 1.1 2004/12/23 22:38:42 mark Exp $
+// $Id: ParserFactory.java 3484 2005-07-15 14:26:03Z rpedersen $
 
 package org.xml.sax.helpers;
 
diff -Nrup --ignore-space-change gcc-4.0.2/libjava/external/sax/org/xml/sax/helpers/XMLFilterImpl.java gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/helpers/XMLFilterImpl.java
--- gcc-4.0.2/libjava/external/sax/org/xml/sax/helpers/XMLFilterImpl.java	2005-02-02 01:41:54.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/helpers/XMLFilterImpl.java	2005-07-15 16:26:03.000000000 +0200
@@ -2,7 +2,7 @@
 // http://www.saxproject.org
 // Written by David Megginson
 // NO WARRANTY!  This class is in the Public Domain.
-// $Id: XMLFilterImpl.java,v 1.1 2004/12/23 22:38:42 mark Exp $
+// $Id: XMLFilterImpl.java 3484 2005-07-15 14:26:03Z rpedersen $
 
 package org.xml.sax.helpers;
 
diff -Nrup --ignore-space-change gcc-4.0.2/libjava/external/sax/org/xml/sax/helpers/XMLReaderAdapter.java gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/helpers/XMLReaderAdapter.java
--- gcc-4.0.2/libjava/external/sax/org/xml/sax/helpers/XMLReaderAdapter.java	2005-02-02 01:41:54.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/helpers/XMLReaderAdapter.java	2005-07-15 16:26:03.000000000 +0200
@@ -2,7 +2,7 @@
 // http://www.saxproject.org
 // Written by David Megginson
 // NO WARRANTY!  This class is in the public domain.
-// $Id: XMLReaderAdapter.java,v 1.1 2004/12/23 22:38:42 mark Exp $
+// $Id: XMLReaderAdapter.java 3484 2005-07-15 14:26:03Z rpedersen $
 
 package org.xml.sax.helpers;
 
diff -Nrup --ignore-space-change gcc-4.0.2/libjava/external/sax/org/xml/sax/helpers/XMLReaderFactory.java gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/helpers/XMLReaderFactory.java
--- gcc-4.0.2/libjava/external/sax/org/xml/sax/helpers/XMLReaderFactory.java	2005-02-02 01:41:54.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/helpers/XMLReaderFactory.java	2005-07-15 16:26:03.000000000 +0200
@@ -3,7 +3,7 @@
 // Written by David Megginson
 // and by David Brownell
 // NO WARRANTY!  This class is in the Public Domain.
-// $Id: XMLReaderFactory.java,v 1.1 2004/12/23 22:38:42 mark Exp $
+// $Id: XMLReaderFactory.java 3484 2005-07-15 14:26:03Z rpedersen $
 
 package org.xml.sax.helpers;
 import java.io.BufferedReader;
diff -Nrup --ignore-space-change gcc-4.0.2/libjava/external/sax/org/xml/sax/InputSource.java gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/InputSource.java
--- gcc-4.0.2/libjava/external/sax/org/xml/sax/InputSource.java	2005-02-02 01:41:51.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/InputSource.java	2005-07-15 16:26:03.000000000 +0200
@@ -1,7 +1,7 @@
 // SAX input source.
 // http://www.saxproject.org
 // No warranty; no copyright -- use this as you will.
-// $Id: InputSource.java,v 1.1 2004/12/23 22:38:42 mark Exp $
+// $Id: InputSource.java 3484 2005-07-15 14:26:03Z rpedersen $
 
 package org.xml.sax;
 
diff -Nrup --ignore-space-change gcc-4.0.2/libjava/external/sax/org/xml/sax/Locator.java gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/Locator.java
--- gcc-4.0.2/libjava/external/sax/org/xml/sax/Locator.java	2005-02-02 01:41:51.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/Locator.java	2005-07-15 16:26:03.000000000 +0200
@@ -1,7 +1,7 @@
 // SAX locator interface for document events.
 // http://www.saxproject.org
 // No warranty; no copyright -- use this as you will.
-// $Id: Locator.java,v 1.1 2004/12/23 22:38:42 mark Exp $
+// $Id: Locator.java 3484 2005-07-15 14:26:03Z rpedersen $
 
 package org.xml.sax;
 
diff -Nrup --ignore-space-change gcc-4.0.2/libjava/external/sax/org/xml/sax/package.html gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/package.html
--- gcc-4.0.2/libjava/external/sax/org/xml/sax/package.html	2005-02-02 01:41:51.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/package.html	2005-07-15 16:26:03.000000000 +0200
@@ -1,5 +1,5 @@
 <html><head>
-<!-- $Id: package.html,v 1.1 2004/12/23 22:38:42 mark Exp $ -->
+<!-- $Id: package.html 3484 2005-07-15 14:26:03Z rpedersen $ -->
 </head><body>
 
 <p> This package provides the core SAX APIs.
diff -Nrup --ignore-space-change gcc-4.0.2/libjava/external/sax/org/xml/sax/Parser.java gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/Parser.java
--- gcc-4.0.2/libjava/external/sax/org/xml/sax/Parser.java	2005-02-02 01:41:51.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/Parser.java	2005-07-15 16:26:03.000000000 +0200
@@ -1,7 +1,7 @@
 // SAX parser interface.
 // http://www.saxproject.org
 // No warranty; no copyright -- use this as you will.
-// $Id: Parser.java,v 1.1 2004/12/23 22:38:42 mark Exp $
+// $Id: Parser.java 3484 2005-07-15 14:26:03Z rpedersen $
 
 package org.xml.sax;
 
diff -Nrup --ignore-space-change gcc-4.0.2/libjava/external/sax/org/xml/sax/SAXException.java gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/SAXException.java
--- gcc-4.0.2/libjava/external/sax/org/xml/sax/SAXException.java	2005-02-02 01:41:51.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/SAXException.java	2005-07-15 16:26:03.000000000 +0200
@@ -1,7 +1,7 @@
 // SAX exception class.
 // http://www.saxproject.org
 // No warranty; no copyright -- use this as you will.
-// $Id: SAXException.java,v 1.1 2004/12/23 22:38:42 mark Exp $
+// $Id: SAXException.java 3484 2005-07-15 14:26:03Z rpedersen $
 
 package org.xml.sax;
 
diff -Nrup --ignore-space-change gcc-4.0.2/libjava/external/sax/org/xml/sax/SAXNotRecognizedException.java gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/SAXNotRecognizedException.java
--- gcc-4.0.2/libjava/external/sax/org/xml/sax/SAXNotRecognizedException.java	2005-02-02 01:41:51.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/SAXNotRecognizedException.java	2005-07-15 16:26:03.000000000 +0200
@@ -2,7 +2,7 @@
 // http://www.saxproject.org
 // Written by David Megginson
 // NO WARRANTY!  This class is in the Public Domain.
-// $Id: SAXNotRecognizedException.java,v 1.1 2004/12/23 22:38:42 mark Exp $
+// $Id: SAXNotRecognizedException.java 3484 2005-07-15 14:26:03Z rpedersen $
 
 package org.xml.sax;
 
diff -Nrup --ignore-space-change gcc-4.0.2/libjava/external/sax/org/xml/sax/SAXNotSupportedException.java gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/SAXNotSupportedException.java
--- gcc-4.0.2/libjava/external/sax/org/xml/sax/SAXNotSupportedException.java	2005-02-02 01:41:51.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/SAXNotSupportedException.java	2005-07-15 16:26:03.000000000 +0200
@@ -2,7 +2,7 @@
 // http://www.saxproject.org
 // Written by David Megginson
 // NO WARRANTY!  This class is in the Public Domain.
-// $Id: SAXNotSupportedException.java,v 1.1 2004/12/23 22:38:42 mark Exp $
+// $Id: SAXNotSupportedException.java 3484 2005-07-15 14:26:03Z rpedersen $
 
 package org.xml.sax;
 
diff -Nrup --ignore-space-change gcc-4.0.2/libjava/external/sax/org/xml/sax/SAXParseException.java gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/SAXParseException.java
--- gcc-4.0.2/libjava/external/sax/org/xml/sax/SAXParseException.java	2005-02-02 01:41:51.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/SAXParseException.java	2005-07-15 16:26:03.000000000 +0200
@@ -1,7 +1,7 @@
 // SAX exception class.
 // http://www.saxproject.org
 // No warranty; no copyright -- use this as you will.
-// $Id: SAXParseException.java,v 1.1 2004/12/23 22:38:42 mark Exp $
+// $Id: SAXParseException.java 3484 2005-07-15 14:26:03Z rpedersen $
 
 package org.xml.sax;
 
diff -Nrup --ignore-space-change gcc-4.0.2/libjava/external/sax/org/xml/sax/XMLFilter.java gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/XMLFilter.java
--- gcc-4.0.2/libjava/external/sax/org/xml/sax/XMLFilter.java	2005-02-02 01:41:51.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/XMLFilter.java	2005-07-15 16:26:03.000000000 +0200
@@ -2,7 +2,7 @@
 // http://www.saxproject.org
 // Written by David Megginson
 // NO WARRANTY!  This class is in the Public Domain.
-// $Id: XMLFilter.java,v 1.1 2004/12/23 22:38:42 mark Exp $
+// $Id: XMLFilter.java 3484 2005-07-15 14:26:03Z rpedersen $
 
 package org.xml.sax;
 
diff -Nrup --ignore-space-change gcc-4.0.2/libjava/external/sax/org/xml/sax/XMLReader.java gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/XMLReader.java
--- gcc-4.0.2/libjava/external/sax/org/xml/sax/XMLReader.java	2005-02-02 01:41:51.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/libjava/external/sax/org/xml/sax/XMLReader.java	2005-07-15 16:26:03.000000000 +0200
@@ -2,7 +2,7 @@
 // http://www.saxproject.org
 // Written by David Megginson
 // NO WARRANTY!  This class is in the Public Domain.
-// $Id: XMLReader.java,v 1.1 2004/12/23 22:38:42 mark Exp $
+// $Id: XMLReader.java 3484 2005-07-15 14:26:03Z rpedersen $
 
 package org.xml.sax;
 
Binary files gcc-4.0.2/libjava/gnu/java/awt/doc-files/BitwiseXORComposite-1.png and gcc-4.0.2-atmel.0.99.2/libjava/gnu/java/awt/doc-files/BitwiseXORComposite-1.png differ
Binary files gcc-4.0.2/libjava/java/awt/geom/doc-files/Area-1.png and gcc-4.0.2-atmel.0.99.2/libjava/java/awt/geom/doc-files/Area-1.png differ
Binary files gcc-4.0.2/libjava/java/awt/geom/doc-files/CubicCurve2D-1.png and gcc-4.0.2-atmel.0.99.2/libjava/java/awt/geom/doc-files/CubicCurve2D-1.png differ
Binary files gcc-4.0.2/libjava/java/awt/geom/doc-files/CubicCurve2D-2.png and gcc-4.0.2-atmel.0.99.2/libjava/java/awt/geom/doc-files/CubicCurve2D-2.png differ
Binary files gcc-4.0.2/libjava/java/awt/geom/doc-files/CubicCurve2D-3.png and gcc-4.0.2-atmel.0.99.2/libjava/java/awt/geom/doc-files/CubicCurve2D-3.png differ
Binary files gcc-4.0.2/libjava/java/awt/geom/doc-files/CubicCurve2D-4.png and gcc-4.0.2-atmel.0.99.2/libjava/java/awt/geom/doc-files/CubicCurve2D-4.png differ
Binary files gcc-4.0.2/libjava/java/awt/geom/doc-files/CubicCurve2D-5.png and gcc-4.0.2-atmel.0.99.2/libjava/java/awt/geom/doc-files/CubicCurve2D-5.png differ
Binary files gcc-4.0.2/libjava/java/awt/geom/doc-files/Ellipse-1.png and gcc-4.0.2-atmel.0.99.2/libjava/java/awt/geom/doc-files/Ellipse-1.png differ
Binary files gcc-4.0.2/libjava/java/awt/geom/doc-files/GeneralPath-1.png and gcc-4.0.2-atmel.0.99.2/libjava/java/awt/geom/doc-files/GeneralPath-1.png differ
Binary files gcc-4.0.2/libjava/java/awt/geom/doc-files/QuadCurve2D-1.png and gcc-4.0.2-atmel.0.99.2/libjava/java/awt/geom/doc-files/QuadCurve2D-1.png differ
Binary files gcc-4.0.2/libjava/java/awt/geom/doc-files/QuadCurve2D-2.png and gcc-4.0.2-atmel.0.99.2/libjava/java/awt/geom/doc-files/QuadCurve2D-2.png differ
Binary files gcc-4.0.2/libjava/java/awt/geom/doc-files/QuadCurve2D-3.png and gcc-4.0.2-atmel.0.99.2/libjava/java/awt/geom/doc-files/QuadCurve2D-3.png differ
Binary files gcc-4.0.2/libjava/java/awt/geom/doc-files/QuadCurve2D-4.png and gcc-4.0.2-atmel.0.99.2/libjava/java/awt/geom/doc-files/QuadCurve2D-4.png differ
Binary files gcc-4.0.2/libjava/java/awt/geom/doc-files/QuadCurve2D-5.png and gcc-4.0.2-atmel.0.99.2/libjava/java/awt/geom/doc-files/QuadCurve2D-5.png differ
Binary files gcc-4.0.2/libjava/javax/swing/border/doc-files/BevelBorder-1.png and gcc-4.0.2-atmel.0.99.2/libjava/javax/swing/border/doc-files/BevelBorder-1.png differ
Binary files gcc-4.0.2/libjava/javax/swing/border/doc-files/BevelBorder-2.png and gcc-4.0.2-atmel.0.99.2/libjava/javax/swing/border/doc-files/BevelBorder-2.png differ
Binary files gcc-4.0.2/libjava/javax/swing/border/doc-files/BevelBorder-3.png and gcc-4.0.2-atmel.0.99.2/libjava/javax/swing/border/doc-files/BevelBorder-3.png differ
Binary files gcc-4.0.2/libjava/javax/swing/border/doc-files/EmptyBorder-1.png and gcc-4.0.2-atmel.0.99.2/libjava/javax/swing/border/doc-files/EmptyBorder-1.png differ
Binary files gcc-4.0.2/libjava/javax/swing/border/doc-files/EtchedBorder-1.png and gcc-4.0.2-atmel.0.99.2/libjava/javax/swing/border/doc-files/EtchedBorder-1.png differ
Binary files gcc-4.0.2/libjava/javax/swing/border/doc-files/EtchedBorder-2.png and gcc-4.0.2-atmel.0.99.2/libjava/javax/swing/border/doc-files/EtchedBorder-2.png differ
Binary files gcc-4.0.2/libjava/javax/swing/border/doc-files/LineBorder-1.png and gcc-4.0.2-atmel.0.99.2/libjava/javax/swing/border/doc-files/LineBorder-1.png differ
Binary files gcc-4.0.2/libjava/javax/swing/border/doc-files/MatteBorder-1.png and gcc-4.0.2-atmel.0.99.2/libjava/javax/swing/border/doc-files/MatteBorder-1.png differ
Binary files gcc-4.0.2/libjava/javax/swing/border/doc-files/MatteBorder-2.png and gcc-4.0.2-atmel.0.99.2/libjava/javax/swing/border/doc-files/MatteBorder-2.png differ
Binary files gcc-4.0.2/libjava/javax/swing/border/doc-files/MatteBorder-3.png and gcc-4.0.2-atmel.0.99.2/libjava/javax/swing/border/doc-files/MatteBorder-3.png differ
Binary files gcc-4.0.2/libjava/javax/swing/border/doc-files/MatteBorder-4.png and gcc-4.0.2-atmel.0.99.2/libjava/javax/swing/border/doc-files/MatteBorder-4.png differ
Binary files gcc-4.0.2/libjava/javax/swing/border/doc-files/MatteBorder-5.png and gcc-4.0.2-atmel.0.99.2/libjava/javax/swing/border/doc-files/MatteBorder-5.png differ
Binary files gcc-4.0.2/libjava/javax/swing/border/doc-files/MatteBorder-6.png and gcc-4.0.2-atmel.0.99.2/libjava/javax/swing/border/doc-files/MatteBorder-6.png differ
Binary files gcc-4.0.2/libjava/javax/swing/border/doc-files/SoftBevelBorder-1.png and gcc-4.0.2-atmel.0.99.2/libjava/javax/swing/border/doc-files/SoftBevelBorder-1.png differ
Binary files gcc-4.0.2/libjava/javax/swing/border/doc-files/SoftBevelBorder-2.png and gcc-4.0.2-atmel.0.99.2/libjava/javax/swing/border/doc-files/SoftBevelBorder-2.png differ
Binary files gcc-4.0.2/libjava/javax/swing/border/doc-files/SoftBevelBorder-3.png and gcc-4.0.2-atmel.0.99.2/libjava/javax/swing/border/doc-files/SoftBevelBorder-3.png differ
Binary files gcc-4.0.2/libjava/javax/swing/plaf/basic/doc-files/BasicBorders-1.png and gcc-4.0.2-atmel.0.99.2/libjava/javax/swing/plaf/basic/doc-files/BasicBorders-1.png differ
Binary files gcc-4.0.2/libjava/javax/swing/plaf/basic/doc-files/BasicBorders-2.png and gcc-4.0.2-atmel.0.99.2/libjava/javax/swing/plaf/basic/doc-files/BasicBorders-2.png differ
Binary files gcc-4.0.2/libjava/javax/swing/plaf/basic/doc-files/BasicBorders.ButtonBorder-1.png and gcc-4.0.2-atmel.0.99.2/libjava/javax/swing/plaf/basic/doc-files/BasicBorders.ButtonBorder-1.png differ
Binary files gcc-4.0.2/libjava/javax/swing/plaf/basic/doc-files/BasicBorders.FieldBorder-1.png and gcc-4.0.2-atmel.0.99.2/libjava/javax/swing/plaf/basic/doc-files/BasicBorders.FieldBorder-1.png differ
Binary files gcc-4.0.2/libjava/javax/swing/plaf/basic/doc-files/BasicBorders.MarginBorder-1.png and gcc-4.0.2-atmel.0.99.2/libjava/javax/swing/plaf/basic/doc-files/BasicBorders.MarginBorder-1.png differ
Binary files gcc-4.0.2/libjava/javax/swing/plaf/basic/doc-files/BasicBorders.MenuBarBorder-1.png and gcc-4.0.2-atmel.0.99.2/libjava/javax/swing/plaf/basic/doc-files/BasicBorders.MenuBarBorder-1.png differ
Binary files gcc-4.0.2/libjava/javax/swing/plaf/basic/doc-files/BasicBorders.RadioButtonBorder-1.png and gcc-4.0.2-atmel.0.99.2/libjava/javax/swing/plaf/basic/doc-files/BasicBorders.RadioButtonBorder-1.png differ
Binary files gcc-4.0.2/libjava/javax/swing/plaf/basic/doc-files/BasicBorders.SplitPaneBorder-1.png and gcc-4.0.2-atmel.0.99.2/libjava/javax/swing/plaf/basic/doc-files/BasicBorders.SplitPaneBorder-1.png differ
Binary files gcc-4.0.2/libjava/javax/swing/plaf/basic/doc-files/BasicBorders.SplitPaneBorder-2.png and gcc-4.0.2-atmel.0.99.2/libjava/javax/swing/plaf/basic/doc-files/BasicBorders.SplitPaneBorder-2.png differ
Binary files gcc-4.0.2/libjava/javax/swing/plaf/basic/doc-files/BasicBorders.SplitPaneDividerBorder-1.png and gcc-4.0.2-atmel.0.99.2/libjava/javax/swing/plaf/basic/doc-files/BasicBorders.SplitPaneDividerBorder-1.png differ
Binary files gcc-4.0.2/libjava/javax/swing/plaf/basic/doc-files/BasicBorders.ToggleButtonBorder-1.png and gcc-4.0.2-atmel.0.99.2/libjava/javax/swing/plaf/basic/doc-files/BasicBorders.ToggleButtonBorder-1.png differ
Binary files gcc-4.0.2/libjava/javax/swing/plaf/basic/doc-files/BasicGraphicsUtils-1.png and gcc-4.0.2-atmel.0.99.2/libjava/javax/swing/plaf/basic/doc-files/BasicGraphicsUtils-1.png differ
Binary files gcc-4.0.2/libjava/javax/swing/plaf/basic/doc-files/BasicGraphicsUtils-2.png and gcc-4.0.2-atmel.0.99.2/libjava/javax/swing/plaf/basic/doc-files/BasicGraphicsUtils-2.png differ
Binary files gcc-4.0.2/libjava/javax/swing/plaf/basic/doc-files/BasicGraphicsUtils-3.png and gcc-4.0.2-atmel.0.99.2/libjava/javax/swing/plaf/basic/doc-files/BasicGraphicsUtils-3.png differ
Binary files gcc-4.0.2/libjava/javax/swing/plaf/basic/doc-files/BasicGraphicsUtils-4.png and gcc-4.0.2-atmel.0.99.2/libjava/javax/swing/plaf/basic/doc-files/BasicGraphicsUtils-4.png differ
Binary files gcc-4.0.2/libjava/javax/swing/plaf/basic/doc-files/BasicGraphicsUtils-5.png and gcc-4.0.2-atmel.0.99.2/libjava/javax/swing/plaf/basic/doc-files/BasicGraphicsUtils-5.png differ
Binary files gcc-4.0.2/libjava/javax/swing/plaf/basic/doc-files/BasicGraphicsUtils-6.png and gcc-4.0.2-atmel.0.99.2/libjava/javax/swing/plaf/basic/doc-files/BasicGraphicsUtils-6.png differ
Binary files gcc-4.0.2/libjava/javax/swing/plaf/basic/doc-files/BasicGraphicsUtils-7.png and gcc-4.0.2-atmel.0.99.2/libjava/javax/swing/plaf/basic/doc-files/BasicGraphicsUtils-7.png differ
Binary files gcc-4.0.2/libjava/javax/swing/plaf/doc-files/ComponentUI-1.dia and gcc-4.0.2-atmel.0.99.2/libjava/javax/swing/plaf/doc-files/ComponentUI-1.dia differ
Binary files gcc-4.0.2/libjava/javax/swing/plaf/doc-files/ComponentUI-1.png and gcc-4.0.2-atmel.0.99.2/libjava/javax/swing/plaf/doc-files/ComponentUI-1.png differ
Binary files gcc-4.0.2/libjava/javax/swing/plaf/doc-files/TreeUI-1.png and gcc-4.0.2-atmel.0.99.2/libjava/javax/swing/plaf/doc-files/TreeUI-1.png differ
Binary files gcc-4.0.2/libjava/testsuite/libjava.jar/simple.jar and gcc-4.0.2-atmel.0.99.2/libjava/testsuite/libjava.jar/simple.jar differ
diff -Nrup --ignore-space-change gcc-4.0.2/libstdc++-v3/acinclude.m4 gcc-4.0.2-atmel.0.99.2/libstdc++-v3/acinclude.m4
--- gcc-4.0.2/libstdc++-v3/acinclude.m4	2005-06-23 11:23:59.000000000 +0200
+++ gcc-4.0.2-atmel.0.99.2/libstdc++-v3/acinclude.m4	2006-03-28 10:54:09.000000000 +0200
@@ -139,6 +139,15 @@ AC_DEFUN([GLIBCXX_CONFIGURE], [
   #enable_symvers=no
   #enable_hosted_libstdcxx=yes
 
+  # Check for uClibc since Linux platforms use different configuration
+  # directories depending on the C library in use.
+  AC_EGREP_CPP([_using_uclibc], [
+  #include <stdio.h>
+  #if __UCLIBC__
+    _using_uclibc
+  #endif
+  ], uclibc=yes, uclibc=no)
+
   # Find platform-specific directories containing configuration info.
   # Also possibly modify flags used elsewhere, as needed by the platform.
   GLIBCXX_CHECK_HOST
diff -Nrup --ignore-space-change gcc-4.0.2/libstdc++-v3/configure.host gcc-4.0.2-atmel.0.99.2/libstdc++-v3/configure.host
--- gcc-4.0.2/libstdc++-v3/configure.host	2005-06-17 02:22:20.000000000 +0200
+++ gcc-4.0.2-atmel.0.99.2/libstdc++-v3/configure.host	2006-03-28 10:54:09.000000000 +0200
@@ -193,8 +193,15 @@ case "${host_os}" in
   freebsd*)
     os_include_dir="os/bsd/freebsd"
     ;;
+  linux-uclibc*)
+    os_include_dir="os/uclibc-linux"
+    ;;
   gnu* | linux* | kfreebsd*-gnu | knetbsd*-gnu)
+    if [ "$uclibc" = "yes" ]; then
+     os_include_dir="os/uclibc"
+    else
     os_include_dir="os/gnu-linux"
+    fi
     ;;
   hpux*)
     os_include_dir="os/hpux"
diff -Nrup --ignore-space-change gcc-4.0.2/libstdc++-v3/crossconfig.m4 gcc-4.0.2-atmel.0.99.2/libstdc++-v3/crossconfig.m4
--- gcc-4.0.2/libstdc++-v3/crossconfig.m4	2005-02-01 07:56:19.000000000 +0100
+++ gcc-4.0.2-atmel.0.99.2/libstdc++-v3/crossconfig.m4	2006-03-28 10:54:09.000000000 +0200
@@ -148,9 +148,13 @@ case "${host}" in
       fp.h float.h endian.h inttypes.h locale.h float.h stdint.h])
     SECTION_FLAGS='-ffunction-sections -fdata-sections'
     AC_SUBST(SECTION_FLAGS)
+    GLIBCXX_CHECK_COMPILER_FEATURES
     GLIBCXX_CHECK_LINKER_FEATURES
+    GLIBCXX_CHECK_MATH_SUPPORT
+    GLIBCXX_CHECK_BUILTIN_MATH_SUPPORT
     GLIBCXX_CHECK_COMPLEX_MATH_SUPPORT
-    GLIBCXX_CHECK_WCHAR_T_SUPPORT
+    GLIBCXX_CHECK_ICONV_SUPPORT
+    GLIBCXX_CHECK_STDLIB_SUPPORT
 
     # For LFS.
     AC_DEFINE(HAVE_INT64_T)
Binary files gcc-4.0.2/libstdc++-v3/docs/html/17_intro/confdeps.png and gcc-4.0.2-atmel.0.99.2/libstdc++-v3/docs/html/17_intro/confdeps.png differ
diff -Nrup --ignore-space-change gcc-4.0.2/libstdc++-v3/include/Makefile.in gcc-4.0.2-atmel.0.99.2/libstdc++-v3/include/Makefile.in
--- gcc-4.0.2/libstdc++-v3/include/Makefile.in	2005-06-22 22:39:09.000000000 +0200
+++ gcc-4.0.2-atmel.0.99.2/libstdc++-v3/include/Makefile.in	2006-03-28 10:54:09.000000000 +0200
@@ -1,8 +1,8 @@
-# Makefile.in generated by automake 1.9.3 from Makefile.am.
+# Makefile.in generated by automake 1.9.6 from Makefile.am.
 # @configure_input@
 
 # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
-# 2003, 2004  Free Software Foundation, Inc.
+# 2003, 2004, 2005  Free Software Foundation, Inc.
 # This Makefile.in is free software; the Free Software Foundation
 # gives unlimited permission to copy and/or distribute it,
 # with or without modifications, as long as this notice is preserved.
@@ -36,6 +36,7 @@ POST_UNINSTALL = :
 build_triplet = @build@
 host_triplet = @host@
 target_triplet = @target@
+LIBOBJDIR =
 DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \
 	$(top_srcdir)/fragment.am
 subdir = include
diff -Nrup --ignore-space-change gcc-4.0.2/libstdc++-v3/libmath/Makefile.in gcc-4.0.2-atmel.0.99.2/libstdc++-v3/libmath/Makefile.in
--- gcc-4.0.2/libstdc++-v3/libmath/Makefile.in	2005-06-22 22:39:18.000000000 +0200
+++ gcc-4.0.2-atmel.0.99.2/libstdc++-v3/libmath/Makefile.in	2006-03-28 10:54:09.000000000 +0200
@@ -1,8 +1,8 @@
-# Makefile.in generated by automake 1.9.3 from Makefile.am.
+# Makefile.in generated by automake 1.9.6 from Makefile.am.
 # @configure_input@
 
 # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
-# 2003, 2004  Free Software Foundation, Inc.
+# 2003, 2004, 2005  Free Software Foundation, Inc.
 # This Makefile.in is free software; the Free Software Foundation
 # gives unlimited permission to copy and/or distribute it,
 # with or without modifications, as long as this notice is preserved.
@@ -14,8 +14,6 @@
 
 @SET_MAKE@
 
-SOURCES = $(libmath_la_SOURCES)
-
 srcdir = @srcdir@
 top_srcdir = @top_srcdir@
 VPATH = @srcdir@
@@ -39,6 +37,7 @@ POST_UNINSTALL = :
 build_triplet = @build@
 host_triplet = @host@
 target_triplet = @target@
+LIBOBJDIR =
 subdir = libmath
 DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
 ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
diff -Nrup --ignore-space-change gcc-4.0.2/libstdc++-v3/libsupc++/Makefile.in gcc-4.0.2-atmel.0.99.2/libstdc++-v3/libsupc++/Makefile.in
--- gcc-4.0.2/libstdc++-v3/libsupc++/Makefile.in	2005-06-22 22:39:21.000000000 +0200
+++ gcc-4.0.2-atmel.0.99.2/libstdc++-v3/libsupc++/Makefile.in	2006-03-28 10:54:09.000000000 +0200
@@ -1,8 +1,8 @@
-# Makefile.in generated by automake 1.9.3 from Makefile.am.
+# Makefile.in generated by automake 1.9.6 from Makefile.am.
 # @configure_input@
 
 # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
-# 2003, 2004  Free Software Foundation, Inc.
+# 2003, 2004, 2005  Free Software Foundation, Inc.
 # This Makefile.in is free software; the Free Software Foundation
 # gives unlimited permission to copy and/or distribute it,
 # with or without modifications, as long as this notice is preserved.
@@ -15,8 +15,6 @@
 @SET_MAKE@
 
 
-SOURCES = $(libsupc___la_SOURCES) $(libsupc__convenience_la_SOURCES)
-
 srcdir = @srcdir@
 top_srcdir = @top_srcdir@
 VPATH = @srcdir@
@@ -40,6 +38,7 @@ POST_UNINSTALL = :
 build_triplet = @build@
 host_triplet = @host@
 target_triplet = @target@
+LIBOBJDIR =
 DIST_COMMON = $(glibcxxinstall_HEADERS) $(srcdir)/Makefile.am \
 	$(srcdir)/Makefile.in $(top_srcdir)/fragment.am
 subdir = libsupc++
diff -Nrup --ignore-space-change gcc-4.0.2/libstdc++-v3/Makefile.in gcc-4.0.2-atmel.0.99.2/libstdc++-v3/Makefile.in
--- gcc-4.0.2/libstdc++-v3/Makefile.in	2005-06-22 22:37:12.000000000 +0200
+++ gcc-4.0.2-atmel.0.99.2/libstdc++-v3/Makefile.in	2006-03-28 10:54:09.000000000 +0200
@@ -1,8 +1,8 @@
-# Makefile.in generated by automake 1.9.3 from Makefile.am.
+# Makefile.in generated by automake 1.9.6 from Makefile.am.
 # @configure_input@
 
 # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
-# 2003, 2004  Free Software Foundation, Inc.
+# 2003, 2004, 2005  Free Software Foundation, Inc.
 # This Makefile.in is free software; the Free Software Foundation
 # gives unlimited permission to copy and/or distribute it,
 # with or without modifications, as long as this notice is preserved.
@@ -36,6 +36,7 @@ POST_UNINSTALL = :
 build_triplet = @build@
 host_triplet = @host@
 target_triplet = @target@
+LIBOBJDIR =
 DIST_COMMON = README $(am__configure_deps) $(srcdir)/../config.guess \
 	$(srcdir)/../config.sub $(srcdir)/../install-sh \
 	$(srcdir)/../ltmain.sh $(srcdir)/../missing \
@@ -408,7 +409,13 @@ uninstall-info-am:
 #     (which will cause the Makefiles to be regenerated when you run `make');
 # (2) otherwise, pass the desired values on the `make' command line.
 $(RECURSIVE_TARGETS):
-	@set fnord $$MAKEFLAGS; amf=$$2; \
+	@failcom='exit 1'; \
+	for f in x $$MAKEFLAGS; do \
+	  case $$f in \
+	    *=* | --[!k]*);; \
+	    *k*) failcom='fail=yes';; \
+	  esac; \
+	done; \
 	dot_seen=no; \
 	target=`echo $@ | sed s/-recursive//`; \
 	list='$(SUBDIRS)'; for subdir in $$list; do \
@@ -420,7 +427,7 @@ $(RECURSIVE_TARGETS):
 	    local_target="$$target"; \
 	  fi; \
 	  (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
-	   || case "$$amf" in *=*) exit 1;; *k*) fail=yes;; *) exit 1;; esac; \
+	  || eval $$failcom; \
 	done; \
 	if test "$$dot_seen" = "no"; then \
 	  $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
@@ -428,7 +435,13 @@ $(RECURSIVE_TARGETS):
 
 mostlyclean-recursive clean-recursive distclean-recursive \
 maintainer-clean-recursive:
-	@set fnord $$MAKEFLAGS; amf=$$2; \
+	@failcom='exit 1'; \
+	for f in x $$MAKEFLAGS; do \
+	  case $$f in \
+	    *=* | --[!k]*);; \
+	    *k*) failcom='fail=yes';; \
+	  esac; \
+	done; \
 	dot_seen=no; \
 	case "$@" in \
 	  distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
@@ -449,7 +462,7 @@ maintainer-clean-recursive:
 	    local_target="$$target"; \
 	  fi; \
 	  (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
-	   || case "$$amf" in *=*) exit 1;; *k*) fail=yes;; *) exit 1;; esac; \
+	  || eval $$failcom; \
 	done && test -z "$$fail"
 tags-recursive:
 	list='$(SUBDIRS)'; for subdir in $$list; do \
diff -Nrup --ignore-space-change gcc-4.0.2/libstdc++-v3/po/Makefile.in gcc-4.0.2-atmel.0.99.2/libstdc++-v3/po/Makefile.in
--- gcc-4.0.2/libstdc++-v3/po/Makefile.in	2005-06-22 22:39:24.000000000 +0200
+++ gcc-4.0.2-atmel.0.99.2/libstdc++-v3/po/Makefile.in	2006-03-28 10:54:09.000000000 +0200
@@ -1,8 +1,8 @@
-# Makefile.in generated by automake 1.9.3 from Makefile.am.
+# Makefile.in generated by automake 1.9.6 from Makefile.am.
 # @configure_input@
 
 # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
-# 2003, 2004  Free Software Foundation, Inc.
+# 2003, 2004, 2005  Free Software Foundation, Inc.
 # This Makefile.in is free software; the Free Software Foundation
 # gives unlimited permission to copy and/or distribute it,
 # with or without modifications, as long as this notice is preserved.
@@ -36,6 +36,7 @@ POST_UNINSTALL = :
 build_triplet = @build@
 host_triplet = @host@
 target_triplet = @target@
+LIBOBJDIR =
 DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \
 	$(top_srcdir)/fragment.am
 subdir = po
diff -Nrup --ignore-space-change gcc-4.0.2/libstdc++-v3/src/Makefile.in gcc-4.0.2-atmel.0.99.2/libstdc++-v3/src/Makefile.in
--- gcc-4.0.2/libstdc++-v3/src/Makefile.in	2005-06-22 22:39:26.000000000 +0200
+++ gcc-4.0.2-atmel.0.99.2/libstdc++-v3/src/Makefile.in	2006-03-28 10:54:09.000000000 +0200
@@ -1,8 +1,8 @@
-# Makefile.in generated by automake 1.9.3 from Makefile.am.
+# Makefile.in generated by automake 1.9.6 from Makefile.am.
 # @configure_input@
 
 # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
-# 2003, 2004  Free Software Foundation, Inc.
+# 2003, 2004, 2005  Free Software Foundation, Inc.
 # This Makefile.in is free software; the Free Software Foundation
 # gives unlimited permission to copy and/or distribute it,
 # with or without modifications, as long as this notice is preserved.
@@ -14,8 +14,6 @@
 
 @SET_MAKE@
 
-SOURCES = $(libstdc___la_SOURCES)
-
 srcdir = @srcdir@
 top_srcdir = @top_srcdir@
 pkgdatadir = $(datadir)/@PACKAGE@
@@ -38,6 +36,7 @@ POST_UNINSTALL = :
 build_triplet = @build@
 host_triplet = @host@
 target_triplet = @target@
+LIBOBJDIR =
 DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \
 	$(top_srcdir)/fragment.am
 subdir = src
diff -Nrup --ignore-space-change gcc-4.0.2/libstdc++-v3/testsuite/Makefile.in gcc-4.0.2-atmel.0.99.2/libstdc++-v3/testsuite/Makefile.in
--- gcc-4.0.2/libstdc++-v3/testsuite/Makefile.in	2005-06-22 22:39:30.000000000 +0200
+++ gcc-4.0.2-atmel.0.99.2/libstdc++-v3/testsuite/Makefile.in	2006-03-28 10:54:09.000000000 +0200
@@ -1,8 +1,8 @@
-# Makefile.in generated by automake 1.9.3 from Makefile.am.
+# Makefile.in generated by automake 1.9.6 from Makefile.am.
 # @configure_input@
 
 # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
-# 2003, 2004  Free Software Foundation, Inc.
+# 2003, 2004, 2005  Free Software Foundation, Inc.
 # This Makefile.in is free software; the Free Software Foundation
 # gives unlimited permission to copy and/or distribute it,
 # with or without modifications, as long as this notice is preserved.
@@ -14,8 +14,6 @@
 
 @SET_MAKE@
 
-SOURCES = $(libv3test_a_SOURCES)
-
 srcdir = @srcdir@
 top_srcdir = @top_srcdir@
 VPATH = @srcdir@
@@ -39,6 +37,7 @@ POST_UNINSTALL = :
 build_triplet = @build@
 host_triplet = @host@
 target_triplet = @target@
+LIBOBJDIR =
 DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \
 	$(top_srcdir)/fragment.am
 subdir = testsuite
diff -Nrup --ignore-space-change gcc-4.0.2/zlib/contrib/ada/mtest.adb gcc-4.0.2-atmel.0.99.2/zlib/contrib/ada/mtest.adb
--- gcc-4.0.2/zlib/contrib/ada/mtest.adb	2004-10-11 19:44:23.000000000 +0200
+++ gcc-4.0.2-atmel.0.99.2/zlib/contrib/ada/mtest.adb	2005-07-15 16:26:03.000000000 +0200
@@ -8,7 +8,7 @@
 --  Continuous test for ZLib multithreading. If the test is fail
 --  Wou should provide thread safe allocation routines for the Z_Stream.
 --
---  $Id: mtest.adb,v 1.2 2003/08/12 12:11:05 vagul Exp $
+--  $Id: mtest.adb 3484 2005-07-15 14:26:03Z rpedersen $
 
 with ZLib;
 with Ada.Streams;
diff -Nrup --ignore-space-change gcc-4.0.2/zlib/contrib/ada/read.adb gcc-4.0.2-atmel.0.99.2/zlib/contrib/ada/read.adb
--- gcc-4.0.2/zlib/contrib/ada/read.adb	2004-10-11 19:44:23.000000000 +0200
+++ gcc-4.0.2-atmel.0.99.2/zlib/contrib/ada/read.adb	2005-07-15 16:26:03.000000000 +0200
@@ -6,7 +6,7 @@
 --  Open source license information is in the zlib.ads file.  --
 ----------------------------------------------------------------
 
---  $Id: read.adb,v 1.7 2003/08/12 12:12:35 vagul Exp $
+--  $Id: read.adb 3484 2005-07-15 14:26:03Z rpedersen $
 
 --  Test/demo program for the generic read interface.
 
diff -Nrup --ignore-space-change gcc-4.0.2/zlib/contrib/ada/test.adb gcc-4.0.2-atmel.0.99.2/zlib/contrib/ada/test.adb
--- gcc-4.0.2/zlib/contrib/ada/test.adb	2004-10-11 19:44:23.000000000 +0200
+++ gcc-4.0.2-atmel.0.99.2/zlib/contrib/ada/test.adb	2005-07-15 16:26:03.000000000 +0200
@@ -6,7 +6,7 @@
 --  Open source license information is in the zlib.ads file.  --
 ----------------------------------------------------------------
 
---  $Id: test.adb,v 1.17 2003/08/12 12:13:30 vagul Exp $
+--  $Id: test.adb 3484 2005-07-15 14:26:03Z rpedersen $
 
 --  The program has a few aims.
 --  1. Test ZLib.Ada95 thick binding functionality.
diff -Nrup --ignore-space-change gcc-4.0.2/zlib/contrib/ada/zlib.adb gcc-4.0.2-atmel.0.99.2/zlib/contrib/ada/zlib.adb
--- gcc-4.0.2/zlib/contrib/ada/zlib.adb	2004-10-11 19:44:23.000000000 +0200
+++ gcc-4.0.2-atmel.0.99.2/zlib/contrib/ada/zlib.adb	2005-07-15 16:26:03.000000000 +0200
@@ -6,7 +6,7 @@
 --  Open source license information is in the zlib.ads file.  --
 ----------------------------------------------------------------
 
---  $Id: zlib.adb,v 1.19 2003/07/13 16:02:19 vagul Exp $
+--  $Id: zlib.adb 3484 2005-07-15 14:26:03Z rpedersen $
 
 with Ada.Exceptions;
 with Ada.Unchecked_Conversion;
diff -Nrup --ignore-space-change gcc-4.0.2/zlib/contrib/ada/zlib.ads gcc-4.0.2-atmel.0.99.2/zlib/contrib/ada/zlib.ads
--- gcc-4.0.2/zlib/contrib/ada/zlib.ads	2004-10-11 19:44:23.000000000 +0200
+++ gcc-4.0.2-atmel.0.99.2/zlib/contrib/ada/zlib.ads	2005-07-15 16:26:03.000000000 +0200
@@ -25,7 +25,7 @@
 --  covered by the  GNU Public License.                                     --
 ------------------------------------------------------------------------------
 
---  $Id: zlib.ads,v 1.17 2003/08/12 13:19:07 vagul Exp $
+--  $Id: zlib.ads 3484 2005-07-15 14:26:03Z rpedersen $
 
 with Ada.Streams;
 
diff -Nrup --ignore-space-change gcc-4.0.2/zlib/contrib/ada/zlib-streams.adb gcc-4.0.2-atmel.0.99.2/zlib/contrib/ada/zlib-streams.adb
--- gcc-4.0.2/zlib/contrib/ada/zlib-streams.adb	2004-10-11 19:44:23.000000000 +0200
+++ gcc-4.0.2-atmel.0.99.2/zlib/contrib/ada/zlib-streams.adb	2005-07-15 16:26:03.000000000 +0200
@@ -6,7 +6,7 @@
 --  Open source license information is in the zlib.ads file.  --
 ----------------------------------------------------------------
 
---  $Id: zlib-streams.adb,v 1.9 2003/08/12 13:15:31 vagul Exp $
+--  $Id: zlib-streams.adb 3484 2005-07-15 14:26:03Z rpedersen $
 
 with Ada.Unchecked_Deallocation;
 
diff -Nrup --ignore-space-change gcc-4.0.2/zlib/contrib/ada/zlib-streams.ads gcc-4.0.2-atmel.0.99.2/zlib/contrib/ada/zlib-streams.ads
--- gcc-4.0.2/zlib/contrib/ada/zlib-streams.ads	2004-10-11 19:44:23.000000000 +0200
+++ gcc-4.0.2-atmel.0.99.2/zlib/contrib/ada/zlib-streams.ads	2005-07-15 16:26:03.000000000 +0200
@@ -6,7 +6,7 @@
 --  Open source license information is in the zlib.ads file.  --
 ----------------------------------------------------------------
 
---  $Id: zlib-streams.ads,v 1.11 2003/08/12 13:15:31 vagul Exp $
+--  $Id: zlib-streams.ads 3484 2005-07-15 14:26:03Z rpedersen $
 
 package ZLib.Streams is
 
diff -Nrup --ignore-space-change gcc-4.0.2/zlib/contrib/ada/zlib-thin.adb gcc-4.0.2-atmel.0.99.2/zlib/contrib/ada/zlib-thin.adb
--- gcc-4.0.2/zlib/contrib/ada/zlib-thin.adb	2004-10-11 19:44:23.000000000 +0200
+++ gcc-4.0.2-atmel.0.99.2/zlib/contrib/ada/zlib-thin.adb	2005-07-15 16:26:03.000000000 +0200
@@ -6,7 +6,7 @@
 --  Open source license information is in the zlib.ads file.  --
 ----------------------------------------------------------------
 
---  $Id: zlib-thin.adb,v 1.6 2003/01/21 15:26:37 vagul Exp $
+--  $Id: zlib-thin.adb 3484 2005-07-15 14:26:03Z rpedersen $
 
 package body ZLib.Thin is
 
diff -Nrup --ignore-space-change gcc-4.0.2/zlib/contrib/ada/zlib-thin.ads gcc-4.0.2-atmel.0.99.2/zlib/contrib/ada/zlib-thin.ads
--- gcc-4.0.2/zlib/contrib/ada/zlib-thin.ads	2004-10-11 19:44:23.000000000 +0200
+++ gcc-4.0.2-atmel.0.99.2/zlib/contrib/ada/zlib-thin.ads	2005-07-15 16:26:03.000000000 +0200
@@ -6,7 +6,7 @@
 --  Open source license information is in the zlib.ads file.  --
 ----------------------------------------------------------------
 
---  $Id: zlib-thin.ads,v 1.8 2003/08/12 13:16:51 vagul Exp $
+--  $Id: zlib-thin.ads 3484 2005-07-15 14:26:03Z rpedersen $
 
 with Interfaces.C.Strings;
 with System.Address_To_Access_Conversions;
diff -Nrup --ignore-space-change gcc-4.0.2/zlib/zconf.in.h gcc-4.0.2-atmel.0.99.2/zlib/zconf.in.h
--- gcc-4.0.2/zlib/zconf.in.h	2004-10-11 19:44:14.000000000 +0200
+++ gcc-4.0.2-atmel.0.99.2/zlib/zconf.in.h	2005-07-15 16:26:03.000000000 +0200
@@ -3,7 +3,7 @@
  * For conditions of distribution and use, see copyright notice in zlib.h
  */
 
-/* @(#) $Id$ */
+/* @(#) $Id: zconf.in.h 3484 2005-07-15 14:26:03Z rpedersen $ */
 
 #ifndef ZCONF_H
 #define ZCONF_H