summaryrefslogtreecommitdiff
path: root/recipes/gcc/gcc-4.1.2
diff options
context:
space:
mode:
Diffstat (limited to 'recipes/gcc/gcc-4.1.2')
-rw-r--r--recipes/gcc/gcc-4.1.2/gcc-config-nios2.patch41
-rw-r--r--recipes/gcc/gcc-4.1.2/gcc-nios2.patch11017
-rw-r--r--recipes/gcc/gcc-4.1.2/nios2-lib-flags.patch10
3 files changed, 11068 insertions, 0 deletions
diff --git a/recipes/gcc/gcc-4.1.2/gcc-config-nios2.patch b/recipes/gcc/gcc-4.1.2/gcc-config-nios2.patch
new file mode 100644
index 0000000000..1cd5a955d3
--- /dev/null
+++ b/recipes/gcc/gcc-4.1.2/gcc-config-nios2.patch
@@ -0,0 +1,41 @@
+Index: gcc-4.1.2/config.sub
+===================================================================
+--- gcc-4.1.2.orig/config.sub 2010-05-27 15:30:45.069455792 +0200
++++ gcc-4.1.2/config.sub 2010-05-27 15:36:32.868205558 +0200
+@@ -267,6 +267,7 @@
+ | mn10200 | mn10300 \
+ | mt \
+ | msp430 \
++ | nios2 \
+ | ns16k | ns32k \
+ | or32 \
+ | pdp10 | pdp11 | pj | pjl \
+@@ -349,6 +350,7 @@
+ | mmix-* \
+ | mt-* \
+ | msp430-* \
++ | nios2-* \
+ | none-* | np1-* | ns16k-* | ns32k-* \
+ | orion-* \
+ | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \
+Index: gcc-4.1.2/gcc/config.gcc
+===================================================================
+--- gcc-4.1.2.orig/gcc/config.gcc 2010-05-27 15:30:45.039455207 +0200
++++ gcc-4.1.2/gcc/config.gcc 2010-05-27 15:33:26.249455379 +0200
+@@ -1597,6 +1597,16 @@
+ tm_file="dbxelf.h elfos.h svr4.h ${tm_file}"
+ tmake_file="${tmake_file} mt/t-mt"
+ ;;
++nios2-*-*)
++ tm_file="elfos.h ${tm_file}"
++ tmake_file="${tmake_file} nios2/t-nios2"
++ case ${target} in
++ nios2-*-linux*)
++ tm_file="${tm_file} linux.h nios2/linux.h"
++ tmake_file="${tmake_file} nios2/t-linux"
++ ;;
++ esac
++ ;;
+ ns32k-*-netbsdelf*)
+ echo "GCC does not yet support the ${target} target"; exit 1
+ ;;
diff --git a/recipes/gcc/gcc-4.1.2/gcc-nios2.patch b/recipes/gcc/gcc-4.1.2/gcc-nios2.patch
new file mode 100644
index 0000000000..80a46b7751
--- /dev/null
+++ b/recipes/gcc/gcc-4.1.2/gcc-nios2.patch
@@ -0,0 +1,11017 @@
+Index: gcc-4.1.2/gcc/config/nios2/crti.asm
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ gcc-4.1.2/gcc/config/nios2/crti.asm 2010-06-30 08:50:26.000000000 +0200
+@@ -0,0 +1,97 @@
++/* NOT ASSIGNED TO FSF. COPYRIGHT ALTERA. */
++/*
++ Copyright (C) 2003
++ by Jonah Graham (jgraham@altera.com)
++
++This file is free software; you can redistribute it and/or modify it
++under the terms of the GNU General Public License as published by the
++Free Software Foundation; either version 2, or (at your option) any
++later version.
++
++In addition to the permissions in the GNU General Public License, the
++Free Software Foundation gives you unlimited permission to link the
++compiled version of this file with other programs, and to distribute
++those programs without any restriction coming from the use of this
++file. (The General Public License restrictions do apply in other
++respects; for example, they cover modification of the file, and
++distribution when not linked into another program.)
++
++This file is distributed in the hope that it will be useful, but
++WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with this program; see the file COPYING. If not, write to
++the Free Software Foundation, 59 Temple Place - Suite 330,
++Boston, MA 02111-1307, USA.
++
++ As a special exception, if you link this library with files
++ compiled with GCC to produce an executable, this does not cause
++ the resulting executable to be covered by the GNU General Public License.
++ This exception does not however invalidate any other reasons why
++ the executable file might be covered by the GNU General Public License.
++
++
++This file just make a stack frame for the contents of the .fini and
++.init sections. Users may put any desired instructions in those
++sections.
++
++
++While technically any code can be put in the init and fini sections
++most stuff will not work other than stuff which obeys the call frame
++and ABI. All the call-preserved registers are saved, the call clobbered
++registers should have been saved by the code calling init and fini.
++
++See crtstuff.c for an example of code that inserts itself in the
++init and fini sections.
++
++See crt0.s for the code that calls init and fini.
++*/
++
++ .file "crti.asm"
++
++ .section ".init"
++ .align 2
++ .global _init
++_init:
++ addi sp, sp, -48
++ stw ra, 44(sp)
++ stw r23, 40(sp)
++ stw r22, 36(sp)
++ stw r21, 32(sp)
++ stw r20, 28(sp)
++ stw r19, 24(sp)
++ stw r18, 20(sp)
++ stw r17, 16(sp)
++ stw r16, 12(sp)
++ stw fp, 8(sp)
++ addi fp, sp, 8
++ nextpc r22
++1: movhi r2, %hiadj(_GLOBAL_OFFSET_TABLE_ - 1b)
++ addi r2, r2, %lo(_GLOBAL_OFFSET_TABLE_ - 1b)
++ add r22, r22, r2
++
++
++ .section ".fini"
++ .align 2
++ .global _fini
++_fini:
++ addi sp, sp, -48
++ stw ra, 44(sp)
++ stw r23, 40(sp)
++ stw r22, 36(sp)
++ stw r21, 32(sp)
++ stw r20, 28(sp)
++ stw r19, 24(sp)
++ stw r18, 20(sp)
++ stw r17, 16(sp)
++ stw r16, 12(sp)
++ stw fp, 8(sp)
++ addi fp, sp, 8
++ nextpc r22
++1: movhi r2, %hiadj(_GLOBAL_OFFSET_TABLE_ - 1b)
++ addi r2, r2, %lo(_GLOBAL_OFFSET_TABLE_ - 1b)
++ add r22, r22, r2
++
++
+Index: gcc-4.1.2/gcc/config/nios2/crtn.asm
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ gcc-4.1.2/gcc/config/nios2/crtn.asm 2010-06-30 08:50:26.000000000 +0200
+@@ -0,0 +1,71 @@
++/* NOT ASSIGNED TO FSF. COPYRIGHT ALTERA. */
++/*
++ Copyright (C) 2003
++ by Jonah Graham (jgraham@altera.com)
++
++This file is free software; you can redistribute it and/or modify it
++under the terms of the GNU General Public License as published by the
++Free Software Foundation; either version 2, or (at your option) any
++later version.
++
++In addition to the permissions in the GNU General Public License, the
++Free Software Foundation gives you unlimited permission to link the
++compiled version of this file with other programs, and to distribute
++those programs without any restriction coming from the use of this
++file. (The General Public License restrictions do apply in other
++respects; for example, they cover modification of the file, and
++distribution when not linked into another program.)
++
++This file is distributed in the hope that it will be useful, but
++WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with this program; see the file COPYING. If not, write to
++the Free Software Foundation, 59 Temple Place - Suite 330,
++Boston, MA 02111-1307, USA.
++
++ As a special exception, if you link this library with files
++ compiled with GCC to produce an executable, this does not cause
++ the resulting executable to be covered by the GNU General Public License.
++ This exception does not however invalidate any other reasons why
++ the executable file might be covered by the GNU General Public License.
++
++
++This file just makes sure that the .fini and .init sections do in
++fact return. Users may put any desired instructions in those sections.
++This file is the last thing linked into any executable.
++*/
++ .file "crtn.asm"
++
++
++
++ .section ".init"
++ ldw ra, 44(sp)
++ ldw r23, 40(sp)
++ ldw r22, 36(sp)
++ ldw r21, 32(sp)
++ ldw r20, 28(sp)
++ ldw r19, 24(sp)
++ ldw r18, 20(sp)
++ ldw r17, 16(sp)
++ ldw r16, 12(sp)
++ ldw fp, 8(sp)
++ addi sp, sp, 48
++ ret
++
++ .section ".fini"
++ ldw ra, 44(sp)
++ ldw r23, 40(sp)
++ ldw r22, 36(sp)
++ ldw r21, 32(sp)
++ ldw r20, 28(sp)
++ ldw r19, 24(sp)
++ ldw r18, 20(sp)
++ ldw r17, 16(sp)
++ ldw r16, 12(sp)
++ ldw fp, 8(sp)
++ addi sp, sp, 48
++ ret
++
+Index: gcc-4.1.2/gcc/config/nios2/lib2-divmod-hi.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ gcc-4.1.2/gcc/config/nios2/lib2-divmod-hi.c 2010-06-30 08:50:26.000000000 +0200
+@@ -0,0 +1,133 @@
++
++/* We include auto-host.h here to get HAVE_GAS_HIDDEN. This is
++ supposedly valid even though this is a "target" file. */
++#include "auto-host.h"
++#undef gid_t
++#undef pid_t
++#undef rlim_t
++#undef ssize_t
++#undef uid_t
++#undef vfork
++
++
++#include "tconfig.h"
++#include "tsystem.h"
++#include "coretypes.h"
++#include "tm.h"
++
++
++/* Don't use `fancy_abort' here even if config.h says to use it. */
++#ifdef abort
++#undef abort
++#endif
++
++
++#ifdef HAVE_GAS_HIDDEN
++#define ATTRIBUTE_HIDDEN __attribute__ ((__visibility__ ("hidden")))
++#else
++#define ATTRIBUTE_HIDDEN
++#endif
++
++#ifndef MIN_UNITS_PER_WORD
++#define MIN_UNITS_PER_WORD UNITS_PER_WORD
++#endif
++
++#include "libgcc2.h"
++
++extern HItype __modhi3 (HItype, HItype);
++extern HItype __divhi3 (HItype, HItype);
++extern UHItype __umodhi3 (UHItype, UHItype);
++extern UHItype __udivhi3 (UHItype, UHItype);
++
++static UHItype udivmodhi4(UHItype, UHItype, word_type);
++
++static UHItype
++udivmodhi4(UHItype num, UHItype den, word_type modwanted)
++{
++ UHItype bit = 1;
++ UHItype res = 0;
++
++ while (den < num && bit && !(den & (1L<<15)))
++ {
++ den <<=1;
++ bit <<=1;
++ }
++ while (bit)
++ {
++ if (num >= den)
++ {
++ num -= den;
++ res |= bit;
++ }
++ bit >>=1;
++ den >>=1;
++ }
++ if (modwanted) return num;
++ return res;
++}
++
++
++HItype
++__divhi3 (HItype a, HItype b)
++{
++ word_type neg = 0;
++ HItype res;
++
++ if (a < 0)
++ {
++ a = -a;
++ neg = !neg;
++ }
++
++ if (b < 0)
++ {
++ b = -b;
++ neg = !neg;
++ }
++
++ res = udivmodhi4 (a, b, 0);
++
++ if (neg)
++ res = -res;
++
++ return res;
++}
++
++
++HItype
++__modhi3 (HItype a, HItype b)
++{
++ word_type neg = 0;
++ HItype res;
++
++ if (a < 0)
++ {
++ a = -a;
++ neg = 1;
++ }
++
++ if (b < 0)
++ b = -b;
++
++ res = udivmodhi4 (a, b, 1);
++
++ if (neg)
++ res = -res;
++
++ return res;
++}
++
++
++UHItype
++__udivhi3 (UHItype a, UHItype b)
++{
++ return udivmodhi4 (a, b, 0);
++}
++
++
++UHItype
++__umodhi3 (UHItype a, UHItype b)
++{
++ return udivmodhi4 (a, b, 1);
++}
++
+Index: gcc-4.1.2/gcc/config/nios2/lib2-divmod.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ gcc-4.1.2/gcc/config/nios2/lib2-divmod.c 2010-06-30 08:50:26.000000000 +0200
+@@ -0,0 +1,136 @@
++
++/* We include auto-host.h here to get HAVE_GAS_HIDDEN. This is
++ supposedly valid even though this is a "target" file. */
++#include "auto-host.h"
++#undef gid_t
++#undef pid_t
++#undef rlim_t
++#undef ssize_t
++#undef uid_t
++#undef vfork
++
++
++#include "tconfig.h"
++#include "tsystem.h"
++#include "coretypes.h"
++#include "tm.h"
++
++
++/* Don't use `fancy_abort' here even if config.h says to use it. */
++#ifdef abort
++#undef abort
++#endif
++
++
++#ifdef HAVE_GAS_HIDDEN
++#define ATTRIBUTE_HIDDEN __attribute__ ((__visibility__ ("hidden")))
++#else
++#define ATTRIBUTE_HIDDEN
++#endif
++
++#ifndef MIN_UNITS_PER_WORD
++#define MIN_UNITS_PER_WORD UNITS_PER_WORD
++#endif
++
++#include "libgcc2.h"
++
++extern SItype __modsi3 (SItype, SItype);
++extern SItype __divsi3 (SItype, SItype);
++extern SItype __umodsi3 (SItype, SItype);
++extern SItype __udivsi3 (SItype, SItype);
++
++static USItype udivmodsi4(USItype, USItype, word_type);
++
++/* 16-bit SI divide and modulo as used in NIOS */
++
++
++static USItype
++udivmodsi4(USItype num, USItype den, word_type modwanted)
++{
++ USItype bit = 1;
++ USItype res = 0;
++
++ while (den < num && bit && !(den & (1L<<31)))
++ {
++ den <<=1;
++ bit <<=1;
++ }
++ while (bit)
++ {
++ if (num >= den)
++ {
++ num -= den;
++ res |= bit;
++ }
++ bit >>=1;
++ den >>=1;
++ }
++ if (modwanted) return num;
++ return res;
++}
++
++
++SItype
++__divsi3 (SItype a, SItype b)
++{
++ word_type neg = 0;
++ SItype res;
++
++ if (a < 0)
++ {
++ a = -a;
++ neg = !neg;
++ }
++
++ if (b < 0)
++ {
++ b = -b;
++ neg = !neg;
++ }
++
++ res = udivmodsi4 (a, b, 0);
++
++ if (neg)
++ res = -res;
++
++ return res;
++}
++
++
++SItype
++__modsi3 (SItype a, SItype b)
++{
++ word_type neg = 0;
++ SItype res;
++
++ if (a < 0)
++ {
++ a = -a;
++ neg = 1;
++ }
++
++ if (b < 0)
++ b = -b;
++
++ res = udivmodsi4 (a, b, 1);
++
++ if (neg)
++ res = -res;
++
++ return res;
++}
++
++
++SItype
++__udivsi3 (SItype a, SItype b)
++{
++ return udivmodsi4 (a, b, 0);
++}
++
++
++SItype
++__umodsi3 (SItype a, SItype b)
++{
++ return udivmodsi4 (a, b, 1);
++}
++
+Index: gcc-4.1.2/gcc/config/nios2/lib2-divtable.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ gcc-4.1.2/gcc/config/nios2/lib2-divtable.c 2010-06-30 08:50:26.000000000 +0200
+@@ -0,0 +1,52 @@
++
++/* We include auto-host.h here to get HAVE_GAS_HIDDEN. This is
++ supposedly valid even though this is a "target" file. */
++#include "auto-host.h"
++#undef gid_t
++#undef pid_t
++#undef rlim_t
++#undef ssize_t
++#undef uid_t
++#undef vfork
++
++
++#include "tconfig.h"
++#include "tsystem.h"
++#include "coretypes.h"
++#include "tm.h"
++
++
++/* Don't use `fancy_abort' here even if config.h says to use it. */
++#ifdef abort
++#undef abort
++#endif
++
++
++#ifdef HAVE_GAS_HIDDEN
++#define ATTRIBUTE_HIDDEN __attribute__ ((__visibility__ ("hidden")))
++#else
++#define ATTRIBUTE_HIDDEN
++#endif
++
++#include "libgcc2.h"
++
++UQItype __divsi3_table[] =
++{
++ 0, 0/1, 0/2, 0/3, 0/4, 0/5, 0/6, 0/7, 0/8, 0/9, 0/10, 0/11, 0/12, 0/13, 0/14, 0/15,
++ 0, 1/1, 1/2, 1/3, 1/4, 1/5, 1/6, 1/7, 1/8, 1/9, 1/10, 1/11, 1/12, 1/13, 1/14, 1/15,
++ 0, 2/1, 2/2, 2/3, 2/4, 2/5, 2/6, 2/7, 2/8, 2/9, 2/10, 2/11, 2/12, 2/13, 2/14, 2/15,
++ 0, 3/1, 3/2, 3/3, 3/4, 3/5, 3/6, 3/7, 3/8, 3/9, 3/10, 3/11, 3/12, 3/13, 3/14, 3/15,
++ 0, 4/1, 4/2, 4/3, 4/4, 4/5, 4/6, 4/7, 4/8, 4/9, 4/10, 4/11, 4/12, 4/13, 4/14, 4/15,
++ 0, 5/1, 5/2, 5/3, 5/4, 5/5, 5/6, 5/7, 5/8, 5/9, 5/10, 5/11, 5/12, 5/13, 5/14, 5/15,
++ 0, 6/1, 6/2, 6/3, 6/4, 6/5, 6/6, 6/7, 6/8, 6/9, 6/10, 6/11, 6/12, 6/13, 6/14, 6/15,
++ 0, 7/1, 7/2, 7/3, 7/4, 7/5, 7/6, 7/7, 7/8, 7/9, 7/10, 7/11, 7/12, 7/13, 7/14, 7/15,
++ 0, 8/1, 8/2, 8/3, 8/4, 8/5, 8/6, 8/7, 8/8, 8/9, 8/10, 8/11, 8/12, 8/13, 8/14, 8/15,
++ 0, 9/1, 9/2, 9/3, 9/4, 9/5, 9/6, 9/7, 9/8, 9/9, 9/10, 9/11, 9/12, 9/13, 9/14, 9/15,
++ 0, 10/1, 10/2, 10/3, 10/4, 10/5, 10/6, 10/7, 10/8, 10/9, 10/10, 10/11, 10/12, 10/13, 10/14, 10/15,
++ 0, 11/1, 11/2, 11/3, 11/4, 11/5, 11/6, 11/7, 11/8, 11/9, 11/10, 11/11, 11/12, 11/13, 11/14, 11/15,
++ 0, 12/1, 12/2, 12/3, 12/4, 12/5, 12/6, 12/7, 12/8, 12/9, 12/10, 12/11, 12/12, 12/13, 12/14, 12/15,
++ 0, 13/1, 13/2, 13/3, 13/4, 13/5, 13/6, 13/7, 13/8, 13/9, 13/10, 13/11, 13/12, 13/13, 13/14, 13/15,
++ 0, 14/1, 14/2, 14/3, 14/4, 14/5, 14/6, 14/7, 14/8, 14/9, 14/10, 14/11, 14/12, 14/13, 14/14, 14/15,
++ 0, 15/1, 15/2, 15/3, 15/4, 15/5, 15/6, 15/7, 15/8, 15/9, 15/10, 15/11, 15/12, 15/13, 15/14, 15/15,
++};
++
+Index: gcc-4.1.2/gcc/config/nios2/lib2-mul.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ gcc-4.1.2/gcc/config/nios2/lib2-mul.c 2010-06-30 08:50:26.000000000 +0200
+@@ -0,0 +1,113 @@
++/* while we are debugging (ie compile outside of gcc build)
++ disable gcc specific headers */
++#ifndef DEBUG_MULSI3
++
++
++/* We include auto-host.h here to get HAVE_GAS_HIDDEN. This is
++ supposedly valid even though this is a "target" file. */
++#include "auto-host.h"
++#undef gid_t
++#undef pid_t
++#undef rlim_t
++#undef ssize_t
++#undef uid_t
++#undef vfork
++
++
++#include "tconfig.h"
++#include "tsystem.h"
++#include "coretypes.h"
++#include "tm.h"
++
++
++/* Don't use `fancy_abort' here even if config.h says to use it. */
++#ifdef abort
++#undef abort
++#endif
++
++
++#ifdef HAVE_GAS_HIDDEN
++#define ATTRIBUTE_HIDDEN __attribute__ ((__visibility__ ("hidden")))
++#else
++#define ATTRIBUTE_HIDDEN
++#endif
++
++#ifndef MIN_UNITS_PER_WORD
++#define MIN_UNITS_PER_WORD UNITS_PER_WORD
++#endif
++
++#include "libgcc2.h"
++
++#else
++#define SItype int
++#define USItype unsigned int
++#endif
++
++
++extern SItype __mulsi3 (SItype, SItype);
++
++SItype
++__mulsi3 (SItype a, SItype b)
++{
++ SItype res = 0;
++ USItype cnt = a;
++
++ while (cnt)
++ {
++ if (cnt & 1)
++ {
++ res += b;
++ }
++ b <<= 1;
++ cnt >>= 1;
++ }
++
++ return res;
++}
++/*
++TODO: Choose best alternative implementation.
++
++SItype
++__divsi3 (SItype a, SItype b)
++{
++ SItype res = 0;
++ USItype cnt = 0;
++
++ while (cnt < 32)
++ {
++ if (a & (1L << cnt))
++ {
++ res += b;
++ }
++ b <<= 1;
++ cnt++;
++ }
++
++ return res;
++}
++*/
++
++
++#ifdef DEBUG_MULSI3
++
++int
++main ()
++{
++ int i, j;
++ int error = 0;
++
++ for (i = -1000; i < 1000; i++)
++ for (j = -1000; j < 1000; j++)
++ {
++ int expect = i * j;
++ int actual = A__divsi3 (i, j);
++ if (expect != actual)
++ {
++ printf ("error: %d * %d = %d not %d\n", i, j, expect, actual);
++ error = 1;
++ }
++ }
++
++ return error;
++}
++#endif
+Index: gcc-4.1.2/gcc/config/nios2/linux-atomic.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ gcc-4.1.2/gcc/config/nios2/linux-atomic.c 2010-06-30 08:50:26.000000000 +0200
+@@ -0,0 +1,302 @@
++/* Linux-specific atomic operations for Nios II Linux.
++ Copyright (C) 2008 Free Software Foundation, Inc.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify it under
++the terms of the GNU General Public License as published by the Free
++Software Foundation; either version 2, or (at your option) any later
++version.
++
++In addition to the permissions in the GNU General Public License, the
++Free Software Foundation gives you unlimited permission to link the
++compiled version of this file into combinations with other programs,
++and to distribute those combinations without any restriction coming
++from the use of this file. (The General Public License restrictions
++do apply in other respects; for example, they cover modification of
++the file, and distribution when not linked into a combine
++executable.)
++
++GCC is distributed in the hope that it will be useful, but WITHOUT ANY
++WARRANTY; without even the implied warranty of MERCHANTABILITY or
++FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
++for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING. If not, write to the Free
++Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
++02110-1301, USA. */
++
++#include <asm/unistd.h>
++#define EFAULT 14
++#define EBUSY 16
++#define ENOSYS 38
++
++/* We implement byte, short and int versions of each atomic operation
++ using the kernel helper defined below. There is no support for
++ 64-bit operations yet. */
++
++/* Crash a userspace program with SIGSEV. */
++#define ABORT_INSTRUCTION asm ("stw zero, 0(zero)")
++
++/* Kernel helper for compare-and-exchange a 32-bit value. */
++static inline long
++__kernel_cmpxchg (int oldval, int newval, int *mem)
++{
++ register int r2 asm ("r2") = __NR_nios2cmpxchg;
++ register unsigned long lws_mem asm("r4") = (unsigned long) (mem);
++ register int lws_old asm("r5") = oldval;
++ register int lws_new asm("r6") = newval;
++ register int err asm ("r7");
++ asm volatile ("trap"
++ : "=r" (r2), "=r" (err)
++ : "r" (r2), "r" (lws_mem), "r" (lws_old), "r" (lws_new)
++ : "r1", "r3", "r8", "r9", "r10", "r11", "r12", "r13", "r14",
++ "r15", "r29", "memory");
++
++ /* If the kernel LWS call succeeded (err == 0), r2 contains the old value
++ from memory. If this value is equal to OLDVAL, the new value was written
++ to memory. If not, return EBUSY. */
++ if (__builtin_expect (err, 0))
++ {
++ if(__builtin_expect (r2 == EFAULT || r2 == ENOSYS,0))
++ ABORT_INSTRUCTION;
++ }
++ else
++ {
++ if (__builtin_expect (r2 != oldval, 0))
++ r2 = EBUSY;
++ else
++ r2 = 0;
++ }
++
++ return r2;
++}
++
++#define HIDDEN __attribute__ ((visibility ("hidden")))
++
++/* Big endian masks */
++#define INVERT_MASK_1 24
++#define INVERT_MASK_2 16
++
++#define MASK_1 0xffu
++#define MASK_2 0xffffu
++
++#define FETCH_AND_OP_WORD(OP, PFX_OP, INF_OP) \
++ int HIDDEN \
++ __sync_fetch_and_##OP##_4 (int *ptr, int val) \
++ { \
++ int failure, tmp; \
++ \
++ do { \
++ tmp = *ptr; \
++ failure = __kernel_cmpxchg (tmp, PFX_OP tmp INF_OP val, ptr); \
++ } while (failure != 0); \
++ \
++ return tmp; \
++ }
++
++FETCH_AND_OP_WORD (add, , +)
++FETCH_AND_OP_WORD (sub, , -)
++FETCH_AND_OP_WORD (or, , |)
++FETCH_AND_OP_WORD (and, , &)
++FETCH_AND_OP_WORD (xor, , ^)
++FETCH_AND_OP_WORD (nand, ~, &)
++
++#define NAME_oldval(OP, WIDTH) __sync_fetch_and_##OP##_##WIDTH
++#define NAME_newval(OP, WIDTH) __sync_##OP##_and_fetch_##WIDTH
++
++/* Implement both __sync_<op>_and_fetch and __sync_fetch_and_<op> for
++ subword-sized quantities. */
++
++#define SUBWORD_SYNC_OP(OP, PFX_OP, INF_OP, TYPE, WIDTH, RETURN) \
++ TYPE HIDDEN \
++ NAME##_##RETURN (OP, WIDTH) (TYPE *ptr, TYPE val) \
++ { \
++ int *wordptr = (int *) ((unsigned long) ptr & ~3); \
++ unsigned int mask, shift, oldval, newval; \
++ int failure; \
++ \
++ shift = (((unsigned long) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
++ mask = MASK_##WIDTH << shift; \
++ \
++ do { \
++ oldval = *wordptr; \
++ newval = ((PFX_OP ((oldval & mask) >> shift) \
++ INF_OP (unsigned int) val) << shift) & mask; \
++ newval |= oldval & ~mask; \
++ failure = __kernel_cmpxchg (oldval, newval, wordptr); \
++ } while (failure != 0); \
++ \
++ return (RETURN & mask) >> shift; \
++ }
++
++SUBWORD_SYNC_OP (add, , +, short, 2, oldval)
++SUBWORD_SYNC_OP (sub, , -, short, 2, oldval)
++SUBWORD_SYNC_OP (or, , |, short, 2, oldval)
++SUBWORD_SYNC_OP (and, , &, short, 2, oldval)
++SUBWORD_SYNC_OP (xor, , ^, short, 2, oldval)
++SUBWORD_SYNC_OP (nand, ~, &, short, 2, oldval)
++
++SUBWORD_SYNC_OP (add, , +, char, 1, oldval)
++SUBWORD_SYNC_OP (sub, , -, char, 1, oldval)
++SUBWORD_SYNC_OP (or, , |, char, 1, oldval)
++SUBWORD_SYNC_OP (and, , &, char, 1, oldval)
++SUBWORD_SYNC_OP (xor, , ^, char, 1, oldval)
++SUBWORD_SYNC_OP (nand, ~, &, char, 1, oldval)
++
++#define OP_AND_FETCH_WORD(OP, PFX_OP, INF_OP) \
++ int HIDDEN \
++ __sync_##OP##_and_fetch_4 (int *ptr, int val) \
++ { \
++ int tmp, failure; \
++ \
++ do { \
++ tmp = *ptr; \
++ failure = __kernel_cmpxchg (tmp, PFX_OP tmp INF_OP val, ptr); \
++ } while (failure != 0); \
++ \
++ return PFX_OP tmp INF_OP val; \
++ }
++
++OP_AND_FETCH_WORD (add, , +)
++OP_AND_FETCH_WORD (sub, , -)
++OP_AND_FETCH_WORD (or, , |)
++OP_AND_FETCH_WORD (and, , &)
++OP_AND_FETCH_WORD (xor, , ^)
++OP_AND_FETCH_WORD (nand, ~, &)
++
++SUBWORD_SYNC_OP (add, , +, short, 2, newval)
++SUBWORD_SYNC_OP (sub, , -, short, 2, newval)
++SUBWORD_SYNC_OP (or, , |, short, 2, newval)
++SUBWORD_SYNC_OP (and, , &, short, 2, newval)
++SUBWORD_SYNC_OP (xor, , ^, short, 2, newval)
++SUBWORD_SYNC_OP (nand, ~, &, short, 2, newval)
++
++SUBWORD_SYNC_OP (add, , +, char, 1, newval)
++SUBWORD_SYNC_OP (sub, , -, char, 1, newval)
++SUBWORD_SYNC_OP (or, , |, char, 1, newval)
++SUBWORD_SYNC_OP (and, , &, char, 1, newval)
++SUBWORD_SYNC_OP (xor, , ^, char, 1, newval)
++SUBWORD_SYNC_OP (nand, ~, &, char, 1, newval)
++
++int HIDDEN
++__sync_val_compare_and_swap_4 (int *ptr, int oldval, int newval)
++{
++ int actual_oldval, fail;
++
++ while (1)
++ {
++ actual_oldval = *ptr;
++
++ if (oldval != actual_oldval)
++ return actual_oldval;
++
++ fail = __kernel_cmpxchg (actual_oldval, newval, ptr);
++
++ if (!fail)
++ return oldval;
++ }
++}
++
++#define SUBWORD_VAL_CAS(TYPE, WIDTH) \
++ TYPE HIDDEN \
++ __sync_val_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
++ TYPE newval) \
++ { \
++ int *wordptr = (int *)((unsigned long) ptr & ~3), fail; \
++ unsigned int mask, shift, actual_oldval, actual_newval; \
++ \
++ shift = (((unsigned long) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
++ mask = MASK_##WIDTH << shift; \
++ \
++ while (1) \
++ { \
++ actual_oldval = *wordptr; \
++ \
++ if (((actual_oldval & mask) >> shift) != (unsigned int) oldval) \
++ return (actual_oldval & mask) >> shift; \
++ \
++ actual_newval = (actual_oldval & ~mask) \
++ | (((unsigned int) newval << shift) & mask); \
++ \
++ fail = __kernel_cmpxchg (actual_oldval, actual_newval, \
++ wordptr); \
++ \
++ if (!fail) \
++ return oldval; \
++ } \
++ }
++
++SUBWORD_VAL_CAS (short, 2)
++SUBWORD_VAL_CAS (char, 1)
++
++typedef unsigned char bool;
++
++bool HIDDEN
++__sync_bool_compare_and_swap_4 (int *ptr, int oldval, int newval)
++{
++ int failure = __kernel_cmpxchg (oldval, newval, ptr);
++ return (failure == 0);
++}
++
++#define SUBWORD_BOOL_CAS(TYPE, WIDTH) \
++ bool HIDDEN \
++ __sync_bool_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
++ TYPE newval) \
++ { \
++ TYPE actual_oldval \
++ = __sync_val_compare_and_swap_##WIDTH (ptr, oldval, newval); \
++ return (oldval == actual_oldval); \
++ }
++
++SUBWORD_BOOL_CAS (short, 2)
++SUBWORD_BOOL_CAS (char, 1)
++
++int HIDDEN
++__sync_lock_test_and_set_4 (int *ptr, int val)
++{
++ int failure, oldval;
++
++ do {
++ oldval = *ptr;
++ failure = __kernel_cmpxchg (oldval, val, ptr);
++ } while (failure != 0);
++
++ return oldval;
++}
++
++#define SUBWORD_TEST_AND_SET(TYPE, WIDTH) \
++ TYPE HIDDEN \
++ __sync_lock_test_and_set_##WIDTH (TYPE *ptr, TYPE val) \
++ { \
++ int failure; \
++ unsigned int oldval, newval, shift, mask; \
++ int *wordptr = (int *) ((unsigned long) ptr & ~3); \
++ \
++ shift = (((unsigned long) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
++ mask = MASK_##WIDTH << shift; \
++ \
++ do { \
++ oldval = *wordptr; \
++ newval = (oldval & ~mask) \
++ | (((unsigned int) val << shift) & mask); \
++ failure = __kernel_cmpxchg (oldval, newval, wordptr); \
++ } while (failure != 0); \
++ \
++ return (oldval & mask) >> shift; \
++ }
++
++SUBWORD_TEST_AND_SET (short, 2)
++SUBWORD_TEST_AND_SET (char, 1)
++
++#define SYNC_LOCK_RELEASE(TYPE, WIDTH) \
++ void HIDDEN \
++ __sync_lock_release_##WIDTH (TYPE *ptr) \
++ { \
++ *ptr = 0; \
++ }
++
++SYNC_LOCK_RELEASE (int, 4)
++SYNC_LOCK_RELEASE (short, 2)
++SYNC_LOCK_RELEASE (char, 1)
+Index: gcc-4.1.2/gcc/config/nios2/linux-unwind.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ gcc-4.1.2/gcc/config/nios2/linux-unwind.h 2010-06-30 08:50:26.000000000 +0200
+@@ -0,0 +1,166 @@
++/* DWARF2 EH unwinding support for NIOS2 Linux.
++ Copyright (C) 2008 Free Software Foundation, Inc.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 2, or (at your option)
++any later version.
++
++In addition to the permissions in the GNU General Public License, the
++Free Software Foundation gives you unlimited permission to link the
++compiled version of this file with other programs, and to distribute
++those programs without any restriction coming from the use of this
++file. (The General Public License restrictions do apply in other
++respects; for example, they cover modification of the file, and
++distribution when not linked into another program.)
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING. If not, write to
++the Free Software Foundation, 51 Franklin Street, Fifth Floor,
++Boston, MA 02110-1301, USA. */
++
++#ifndef inhibit_libc
++
++/* Do code reading to identify a signal frame, and set the frame
++ state data appropriately. See unwind-dw2.c for the structs. */
++
++#include <signal.h>
++#include <asm/unistd.h>
++
++/* Unfortunately the kernel headers define the wrong shape of the
++ register file, so we define our own version here. This problem has
++ been reported. */
++
++struct nios2_mcontext
++{
++ int version; /* 2 */
++ unsigned seq_regs[23]; /* regs 1..23 */
++ unsigned ra; /* Return address, r31 */
++ unsigned fp; /* Frame pointer, r28 */
++ unsigned gp; /* Global pointer, r26 */
++ unsigned pad1;
++ unsigned ea; /* Exception return address (pc) */
++ unsigned sp; /* Stack pointer, r27 */
++ unsigned pad2;
++ /* Note r24, r25, r29, r30 are reserved registers */
++};
++
++/* The kernel's definition of this structure also doesn't match
++ reality. Again, this has been reported. */
++
++struct nios2_ucontext {
++ unsigned long uc_flags;
++ unsigned pad1;
++ void *uc_link;
++ stack_t uc_stack;
++ struct siginfo info;
++ struct nios2_mcontext uc_mcontext;
++};
++
++#define MD_FALLBACK_FRAME_STATE_FOR nios2_fallback_frame_state
++
++static _Unwind_Reason_Code
++nios2_fallback_frame_state (struct _Unwind_Context *context,
++ _Unwind_FrameState *fs)
++{
++ u_int32_t *pc = (u_int32_t *) context->ra;
++ _Unwind_Ptr new_cfa;
++ int i;
++
++ /* movi r2,(sigreturn/rt_sigreturn)
++ trap */
++ if (pc[1] != 0x003b683a) /* trap */
++ return _URC_END_OF_STACK;
++
++#define NIOS2_REG(NUM,NAME) \
++ (fs->regs.reg[NUM].how = REG_SAVED_OFFSET, \
++ fs->regs.reg[NUM].loc.offset = (_Unwind_Ptr)&regs->NAME - new_cfa)
++
++ if (pc[0] == (0x00800004 | (__NR_sigreturn << 6)))
++ {
++ struct sigframe {
++ u_int32_t trampoline[2];
++ u_int32_t pad1;
++ u_int32_t pad2;
++ struct sigcontext ctx;
++ } *rt_ = context->ra;
++ struct pt_regs *regs = &rt_->ctx.regs;
++
++ /* The CFA is the user's incoming stack pointer value. */
++ new_cfa = (_Unwind_Ptr)regs->sp;
++ fs->cfa_how = CFA_REG_OFFSET;
++ fs->cfa_reg = STACK_POINTER_REGNUM;
++ fs->cfa_offset = new_cfa - (_Unwind_Ptr) context->cfa;
++
++ /* Regs 1..15 */
++ NIOS2_REG (1, r1);
++ NIOS2_REG (2, r2);
++ NIOS2_REG (3, r3);
++ NIOS2_REG (4, r4);
++ NIOS2_REG (5, r5);
++ NIOS2_REG (6, r6);
++ NIOS2_REG (7, r7);
++ NIOS2_REG (8, r8);
++ NIOS2_REG (9, r9);
++ NIOS2_REG (10, r10);
++ NIOS2_REG (11, r11);
++ NIOS2_REG (12, r12);
++ NIOS2_REG (13, r13);
++ NIOS2_REG (14, r14);
++ NIOS2_REG (15, r15);
++
++ /* Regs 16..23 are not saved here. They are callee saved or
++ special. */
++
++ /* The random registers. */
++ NIOS2_REG (RA_REGNO, ra);
++ NIOS2_REG (FP_REGNO, fp);
++ NIOS2_REG (GP_REGNO, gp);
++ NIOS2_REG (SIGNAL_UNWIND_RETURN_COLUMN, ea);
++
++ fs->retaddr_column = SIGNAL_UNWIND_RETURN_COLUMN;
++
++ return _URC_NO_REASON;
++ }
++ else if (pc[0] == (0x00800004 | (__NR_rt_sigreturn << 6)))
++ {
++ struct sigframe {
++ u_int32_t trampoline[2];
++ struct nios2_ucontext sigctx;
++ } *rt_ = context->ra;
++ struct nios2_mcontext *regs = &rt_->sigctx.uc_mcontext;
++
++ if (regs->version != 2)
++ return _URC_END_OF_STACK;
++
++ /* The CFA is the user's incoming stack pointer value. */
++ new_cfa = (_Unwind_Ptr)regs->sp;
++ fs->cfa_how = CFA_REG_OFFSET;
++ fs->cfa_reg = STACK_POINTER_REGNUM;
++ fs->cfa_offset = new_cfa - (_Unwind_Ptr) context->cfa;
++
++ /* The sequential registers. */
++ for (i = 1; i != 24; i++)
++ NIOS2_REG (i, seq_regs[i-1]);
++
++ /* The random registers. */
++ NIOS2_REG (RA_REGNO, ra);
++ NIOS2_REG (FP_REGNO, fp);
++ NIOS2_REG (GP_REGNO, gp);
++ NIOS2_REG (SIGNAL_UNWIND_RETURN_COLUMN, ea);
++
++ fs->retaddr_column = SIGNAL_UNWIND_RETURN_COLUMN;
++
++ return _URC_NO_REASON;
++ }
++#undef NIOS2_REG
++ return _URC_END_OF_STACK;
++}
++#endif
+Index: gcc-4.1.2/gcc/config/nios2/linux.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ gcc-4.1.2/gcc/config/nios2/linux.h 2010-06-30 08:50:26.000000000 +0200
+@@ -0,0 +1,58 @@
++/* Definitions for Nios II running Linux-based GNU systems with
++ ELF format.
++ Copyright (C) 1995, 1996, 1997, 1998, 1999, 2000, 2002, 2003, 2004, 2008
++ Free Software Foundation, Inc.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 2, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING. If not, write to
++the Free Software Foundation, 51 Franklin Street, Fifth Floor,
++Boston, MA 02110-1301, USA. */
++
++#undef LIB_SPEC
++#define LIB_SPEC "-lc \
++ %{pthread:-lpthread}"
++
++#undef STARTFILE_SPEC
++#define STARTFILE_SPEC \
++"%{!shared: crt1.o%s} \
++ crti.o%s %{static:crtbeginT.o%s;shared|pie:crtbeginS.o%s;:crtbegin.o%s}"
++
++#undef ENDFILE_SPEC
++#define ENDFILE_SPEC \
++"%{shared|pie:crtendS.o%s;:crtend.o%s} crtn.o%s"
++
++#define TARGET_OS_CPP_BUILTINS() \
++ do \
++ { \
++ LINUX_TARGET_OS_CPP_BUILTINS(); \
++ if (flag_pic) \
++ { \
++ builtin_define ("__PIC__"); \
++ builtin_define ("__pic__"); \
++ } \
++ } \
++ while (0)
++
++#undef SYSROOT_SUFFIX_SPEC
++#define SYSROOT_SUFFIX_SPEC \
++ "%{EB:/EB}"
++
++#undef LINK_SPEC
++#define LINK_SPEC LINK_SPEC_ENDIAN \
++ " %{shared:-shared} \
++ %{static:-Bstatic} \
++ %{rdynamic:-export-dynamic}"
++
++#define MD_UNWIND_SUPPORT "config/nios2/linux-unwind.h"
+Index: gcc-4.1.2/gcc/config/nios2/nios2-protos.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ gcc-4.1.2/gcc/config/nios2/nios2-protos.h 2010-06-30 08:50:26.000000000 +0200
+@@ -0,0 +1,92 @@
++/* NOT ASSIGNED TO FSF. COPYRIGHT ALTERA. */
++/* Subroutines for assembler code output for Altera NIOS 2G NIOS2 version.
++ Copyright (C) 2003 Altera
++ Contributed by Jonah Graham (jgraham@altera.com).
++
++This file is part of GNU CC.
++
++GNU CC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 2, or (at your option)
++any later version.
++
++GNU CC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GNU CC; see the file COPYING. If not, write to
++the Free Software Foundation, 59 Temple Place - Suite 330,
++Boston, MA 02111-1307, USA. */
++
++extern void dump_frame_size (FILE *);
++extern HOST_WIDE_INT compute_frame_size (void);
++extern int nios2_initial_elimination_offset (int, int);
++extern void override_options (void);
++extern void optimization_options (int, int);
++extern int nios2_can_use_return_insn (void);
++extern void expand_prologue (void);
++extern void expand_epilogue (bool);
++extern void function_profiler (FILE *, int);
++extern enum reg_class reg_class_from_constraint (char, const char *);
++extern void nios2_register_target_pragmas (void);
++
++#ifdef RTX_CODE
++extern int nios2_legitimate_address (rtx, enum machine_mode, int);
++extern int nios2_legitimate_constant (rtx);
++extern void nios2_print_operand (FILE *, rtx, int);
++extern void nios2_print_operand_address (FILE *, rtx);
++extern rtx nios2_legitimize_address (rtx, rtx, enum machine_mode);
++extern bool nios2_legitimate_pic_operand_p (rtx);
++
++extern int nios2_emit_move_sequence (rtx *, enum machine_mode);
++extern int nios2_emit_expensive_div (rtx *, enum machine_mode);
++extern void nios2_adjust_call_address (rtx *);
++
++extern rtx nios2_get_return_address (int);
++extern void nios2_set_return_address (rtx, rtx);
++
++extern void gen_int_relational (enum rtx_code, rtx, rtx, rtx, rtx);
++extern void gen_conditional_move (rtx *, enum machine_mode);
++extern const char *asm_output_opcode (FILE *, const char *);
++
++/* predicates */
++extern int call_operand (rtx, enum machine_mode);
++extern int arith_operand (rtx, enum machine_mode);
++extern int uns_arith_operand (rtx, enum machine_mode);
++extern int logical_operand (rtx, enum machine_mode);
++extern int shift_operand (rtx, enum machine_mode);
++extern int reg_or_0_operand (rtx, enum machine_mode);
++extern int equality_op (rtx, enum machine_mode);
++extern int custom_insn_opcode (rtx, enum machine_mode);
++extern int rdwrctl_operand (rtx, enum machine_mode);
++
++/* custom fpu instruction output */
++extern const char *nios2_output_fpu_insn_cmps (rtx, enum rtx_code);
++extern const char *nios2_output_fpu_insn_cmpd (rtx, enum rtx_code);
++
++# ifdef HAVE_MACHINE_MODES
++# if defined TREE_CODE
++extern rtx function_arg (const CUMULATIVE_ARGS *, enum machine_mode, tree, int);
++extern bool nios2_must_pass_in_stack (enum machine_mode, tree);
++extern int function_arg_partial_nregs (const CUMULATIVE_ARGS *,
++ enum machine_mode, tree, int);
++extern void function_arg_advance (CUMULATIVE_ARGS *, enum machine_mode, tree,
++ int);
++extern int nios2_function_arg_padding (enum machine_mode, tree);
++extern int nios2_function_arg_padding_upward (enum machine_mode, tree);
++extern int nios2_block_reg_padding (enum machine_mode, tree, int);
++extern int nios2_block_reg_padding_upward (enum machine_mode, tree, int);
++extern void init_cumulative_args (CUMULATIVE_ARGS *, tree, rtx, tree, int);
++extern void nios2_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
++ tree, int *, int);
++
++# endif /* TREE_CODE */
++# endif /* HAVE_MACHINE_MODES */
++#endif
++
++#ifdef TREE_CODE
++extern int nios2_return_in_memory (tree);
++
++#endif /* TREE_CODE */
+Index: gcc-4.1.2/gcc/config/nios2/nios2.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ gcc-4.1.2/gcc/config/nios2/nios2.c 2010-06-30 08:52:12.000000000 +0200
+@@ -0,0 +1,4937 @@
++/* NOT ASSIGNED TO FSF. COPYRIGHT ALTERA. */
++/* Subroutines for assembler code output for Altera NIOS 2G NIOS2 version.
++ Copyright (C) 2005 Altera
++ Contributed by Jonah Graham (jgraham@altera.com), Will Reece (wreece@altera.com),
++ and Jeff DaSilva (jdasilva@altera.com).
++
++This file is part of GNU CC.
++
++GNU CC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 2, or (at your option)
++any later version.
++
++GNU CC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GNU CC; see the file COPYING. If not, write to
++the Free Software Foundation, 59 Temple Place - Suite 330,
++Boston, MA 02111-1307, USA. */
++
++
++#include <stdio.h>
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tm.h"
++#include "rtl.h"
++#include "tree.h"
++#include "tm_p.h"
++#include "regs.h"
++#include "hard-reg-set.h"
++#include "real.h"
++#include "insn-config.h"
++#include "conditions.h"
++#include "output.h"
++#include "insn-attr.h"
++#include "flags.h"
++#include "recog.h"
++#include "expr.h"
++#include "toplev.h"
++#include "basic-block.h"
++#include "function.h"
++#include "integrate.h"
++#include "ggc.h"
++#include "reload.h"
++#include "debug.h"
++#include "optabs.h"
++#include "target.h"
++#include "target-def.h"
++#include "c-pragma.h" /* For c_register_pragma. */
++#include "cpplib.h" /* For CPP_NUMBER. */
++#include "c-tree.h" /* For builtin_function. */
++
++/* Local prototypes. */
++static bool nios2_rtx_costs (rtx, int, int, int *);
++
++static void nios2_asm_function_prologue (FILE *, HOST_WIDE_INT);
++static int nios2_issue_rate (void);
++static struct machine_function *nios2_init_machine_status (void);
++static bool nios2_in_small_data_p (tree);
++static void save_reg (int, HOST_WIDE_INT);
++static void restore_reg (int, HOST_WIDE_INT);
++static unsigned int nios2_section_type_flags (tree, const char *, int);
++
++/* 0 --> no #pragma seen
++ 1 --> in scope of #pragma reverse_bitfields
++ -1 --> in scope of #pragma no_reverse_bitfields. */
++static int nios2_pragma_reverse_bitfields_flag = 0;
++static void nios2_pragma_reverse_bitfields (struct cpp_reader *);
++static void nios2_pragma_no_reverse_bitfields (struct cpp_reader *);
++static tree nios2_handle_struct_attribute (tree *, tree, tree, int, bool *);
++static void nios2_insert_attributes (tree, tree *);
++static void nios2_load_pic_register (void);
++static bool nios2_cannot_force_const_mem (rtx);
++bool nios2_legitimate_pic_operand_p (rtx x);
++static rtx nios2_legitimize_pic_address (rtx orig, enum machine_mode mode,
++ rtx reg);
++rtx nios2_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode);
++static void nios2_init_builtins (void);
++static rtx nios2_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
++static bool nios2_function_ok_for_sibcall (tree, tree);
++static int nios2_arg_partial_bytes (CUMULATIVE_ARGS *cum,
++ enum machine_mode mode, tree type,
++ bool named ATTRIBUTE_UNUSED);
++static bool nios2_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ tree type, bool named ATTRIBUTE_UNUSED);
++static void nios2_encode_section_info (tree, rtx, int);
++int nios2_function_arg_padding_upward (enum machine_mode mode, tree type);
++int nios2_block_reg_padding_upward (enum machine_mode mode, tree type,
++ int first ATTRIBUTE_UNUSED);
++static void nios2_output_dwarf_dtprel (FILE *fuke, int size, rtx x);
++
++/* Initialize the GCC target structure. */
++#undef TARGET_ASM_FUNCTION_PROLOGUE
++#define TARGET_ASM_FUNCTION_PROLOGUE nios2_asm_function_prologue
++
++#undef TARGET_DEFAULT_TARGET_FLAGS
++#define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
++
++#undef TARGET_SCHED_ISSUE_RATE
++#define TARGET_SCHED_ISSUE_RATE nios2_issue_rate
++#undef TARGET_IN_SMALL_DATA_P
++#define TARGET_IN_SMALL_DATA_P nios2_in_small_data_p
++#undef TARGET_ENCODE_SECTION_INFO
++#define TARGET_ENCODE_SECTION_INFO nios2_encode_section_info
++#undef TARGET_SECTION_TYPE_FLAGS
++#define TARGET_SECTION_TYPE_FLAGS nios2_section_type_flags
++
++#undef TARGET_INIT_BUILTINS
++#define TARGET_INIT_BUILTINS nios2_init_builtins
++#undef TARGET_EXPAND_BUILTIN
++#define TARGET_EXPAND_BUILTIN nios2_expand_builtin
++
++#undef TARGET_FUNCTION_OK_FOR_SIBCALL
++#define TARGET_FUNCTION_OK_FOR_SIBCALL nios2_function_ok_for_sibcall
++
++#undef TARGET_PASS_BY_REFERENCE
++#define TARGET_PASS_BY_REFERENCE nios2_pass_by_reference
++
++#undef TARGET_ARG_PARTIAL_BYTES
++#define TARGET_ARG_PARTIAL_BYTES nios2_arg_partial_bytes
++
++#undef TARGET_PROMOTE_PROTOTYPES
++#define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
++
++#undef TARGET_SETUP_INCOMING_VARARGS
++#define TARGET_SETUP_INCOMING_VARARGS nios2_setup_incoming_varargs
++
++#undef TARGET_MUST_PASS_IN_STACK
++#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
++
++#undef TARGET_RTX_COSTS
++#define TARGET_RTX_COSTS nios2_rtx_costs
++
++#undef TARGET_HAVE_TLS
++#define TARGET_HAVE_TLS true
++
++#undef TARGET_CANNOT_FORCE_CONST_MEM
++#define TARGET_CANNOT_FORCE_CONST_MEM nios2_cannot_force_const_mem
++
++#undef TARGET_ASM_OUTPUT_DWARF_DTPREL
++#define TARGET_ASM_OUTPUT_DWARF_DTPREL nios2_output_dwarf_dtprel
++
++const struct attribute_spec nios2_attribute_table[] =
++{
++ /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
++ { "reverse_bitfields", 0, 0, false, false, false,
++ nios2_handle_struct_attribute },
++ { "no_reverse_bitfields", 0, 0, false, false, false,
++ nios2_handle_struct_attribute },
++ { NULL, 0, 0, false, false, false, NULL }
++};
++
++#undef TARGET_ATTRIBUTE_TABLE
++#define TARGET_ATTRIBUTE_TABLE nios2_attribute_table
++
++#undef TARGET_INSERT_ATTRIBUTES
++#define TARGET_INSERT_ATTRIBUTES nios2_insert_attributes
++
++/* ??? Might want to redefine TARGET_RETURN_IN_MSB here to handle
++ big-endian case; depends on what ABI we choose. */
++
++struct gcc_target targetm = TARGET_INITIALIZER;
++
++
++
++/* Threshold for data being put into the small data/bss area, instead
++ of the normal data area (references to the small data/bss area take
++ 1 instruction, and use the global pointer, references to the normal
++ data area takes 2 instructions). */
++unsigned HOST_WIDE_INT nios2_section_threshold = NIOS2_DEFAULT_GVALUE;
++
++/* Structure to be filled in by compute_frame_size with register
++ save masks, and offsets for the current function. */
++
++struct nios2_frame_info
++GTY (())
++{
++ unsigned HOST_WIDE_INT save_mask; /* Mask of registers to save */
++ long total_size; /* # bytes that the entire frame takes up. */
++ long var_size; /* # bytes that variables take up. */
++ long args_size; /* # bytes that outgoing arguments take up. */
++ int save_reg_size; /* # bytes needed to store gp regs. */
++ long save_regs_offset; /* Offset from new sp to store gp registers. */
++ int initialized; /* != 0 if frame size already calculated. */
++};
++
++struct machine_function
++GTY (())
++{
++
++ /* Current frame information, calculated by compute_frame_size. */
++ struct nios2_frame_info frame;
++};
++
++/* Supported TLS relocations. */
++
++enum tls_reloc {
++ TLS_GD16,
++ TLS_LDM16,
++ TLS_LDO16,
++ TLS_IE16,
++ TLS_LE16
++};
++
++#define IS_UNSPEC_TLS(x) ((x)>=UNSPEC_TLS && (x)<=UNSPEC_ADD_TLS_LDO)
++
++
++
++/***************************************
++ * Register Classes
++ ***************************************/
++
++enum reg_class
++reg_class_from_constraint (char chr, const char *str)
++{
++ if (chr == 'D' && ISDIGIT (str[1]) && ISDIGIT (str[2]))
++ {
++ int regno;
++ int ones = str[2] - '0';
++ int tens = str[1] - '0';
++
++ regno = ones + (10 * tens);
++ if (regno < 0 || regno > 31)
++ return NO_REGS;
++
++ return D00_REG + regno;
++ }
++
++ return NO_REGS;
++}
++
++
++/***************************************
++ * Stack Layout and Calling Conventions
++ ***************************************/
++
++
++#define TOO_BIG_OFFSET(X) ((X) > ((1 << 15) - 1))
++#define TEMP_REG_NUM 8
++
++static void
++nios2_asm_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
++{
++ if (flag_verbose_asm || flag_debug_asm)
++ {
++ compute_frame_size ();
++ dump_frame_size (file);
++ }
++}
++
++static void
++save_reg (int regno, HOST_WIDE_INT offset)
++{
++ rtx reg = gen_rtx_REG (SImode, regno);
++ rtx addr = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (offset));
++
++ rtx pattern = gen_rtx_SET (SImode, gen_frame_mem (Pmode, addr), reg);
++ rtx insn = emit_insn (pattern);
++ RTX_FRAME_RELATED_P (insn) = 1;
++}
++
++static void
++restore_reg (int regno, HOST_WIDE_INT offset)
++{
++ rtx reg = gen_rtx_REG (SImode, regno);
++ rtx addr = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (offset));
++
++ rtx pattern = gen_rtx_SET (SImode, reg, gen_frame_mem (Pmode, addr));
++ emit_insn (pattern);
++}
++
++
++void
++expand_prologue (void)
++{
++ int ix;
++ HOST_WIDE_INT total_frame_size = compute_frame_size ();
++ HOST_WIDE_INT sp_offset; /* offset from base_reg to final stack value */
++ HOST_WIDE_INT fp_offset; /* offset from base_reg to final fp value */
++ HOST_WIDE_INT save_offset;
++ rtx insn;
++ unsigned HOST_WIDE_INT save_mask;
++
++ total_frame_size = compute_frame_size ();
++
++ /* Decrement the stack pointer */
++ if (TOO_BIG_OFFSET (total_frame_size))
++ {
++ /* We need an intermediary point, this will point at the spill
++ block */
++ insn = emit_insn
++ (gen_add3_insn (stack_pointer_rtx,
++ stack_pointer_rtx,
++ GEN_INT (cfun->machine->frame.save_regs_offset
++ - total_frame_size)));
++ RTX_FRAME_RELATED_P (insn) = 1;
++
++ fp_offset = 0;
++ sp_offset = -cfun->machine->frame.save_regs_offset;
++ }
++ else if (total_frame_size)
++ {
++ insn = emit_insn (gen_add3_insn (stack_pointer_rtx,
++ stack_pointer_rtx,
++ GEN_INT (-total_frame_size)));
++ RTX_FRAME_RELATED_P (insn) = 1;
++ fp_offset = cfun->machine->frame.save_regs_offset;
++ sp_offset = 0;
++ }
++ else
++ fp_offset = sp_offset = 0;
++
++ if (current_function_limit_stack)
++ emit_insn (gen_stack_overflow_detect_and_trap ());
++
++ save_offset = fp_offset + cfun->machine->frame.save_reg_size;
++ save_mask = cfun->machine->frame.save_mask;
++
++ for (ix = 32; ix--;)
++ if (save_mask & ((unsigned HOST_WIDE_INT)1 << ix))
++ {
++ save_offset -= 4;
++ save_reg (ix, save_offset);
++ }
++
++ if (frame_pointer_needed)
++ {
++ insn = emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
++ stack_pointer_rtx,
++ GEN_INT (fp_offset)));
++
++ RTX_FRAME_RELATED_P (insn) = 1;
++ }
++
++ if (sp_offset)
++ {
++ rtx tmp = gen_rtx_REG (Pmode, TEMP_REG_NUM);
++ emit_insn (gen_rtx_SET (Pmode, tmp, GEN_INT (sp_offset)));
++
++ insn = emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
++ tmp));
++ if (!frame_pointer_needed)
++ {
++ /* Attache a note indicating what just happened */
++ rtx note = gen_rtx_SET (Pmode, stack_pointer_rtx,
++ gen_rtx_PLUS (Pmode, stack_pointer_rtx,
++ GEN_INT (sp_offset)));
++ REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
++ note, REG_NOTES (insn));
++ RTX_FRAME_RELATED_P (insn) = 1;
++ }
++ if (current_function_limit_stack)
++ emit_insn (gen_stack_overflow_detect_and_trap ());
++ }
++
++ /* Load the PIC register if needed. */
++ if (current_function_uses_pic_offset_table)
++ nios2_load_pic_register ();
++
++ /* If we are profiling, make sure no instructions are scheduled before
++ the call to mcount. */
++ if (current_function_profile)
++ emit_insn (gen_blockage ());
++}
++
++void
++expand_epilogue (bool sibcall_p)
++{
++ rtx insn;
++ int ix;
++ HOST_WIDE_INT total_frame_size = compute_frame_size ();
++ unsigned HOST_WIDE_INT save_mask;
++ HOST_WIDE_INT sp_adjust;
++ HOST_WIDE_INT save_offset;
++
++ if (!sibcall_p && nios2_can_use_return_insn ())
++ {
++ insn = emit_jump_insn (gen_return ());
++ return;
++ }
++
++ emit_insn (gen_blockage ());
++
++ if (frame_pointer_needed)
++ {
++ /* Recover the stack pointer. */
++ emit_insn (gen_rtx_SET (Pmode, stack_pointer_rtx,
++ hard_frame_pointer_rtx));
++ save_offset = 0;
++ sp_adjust = total_frame_size - cfun->machine->frame.save_regs_offset;
++ }
++ else if (TOO_BIG_OFFSET (total_frame_size))
++ {
++ rtx tmp = gen_rtx_REG (Pmode, TEMP_REG_NUM);
++
++ emit_insn
++ (gen_rtx_SET
++ (Pmode, tmp, GEN_INT (cfun->machine->frame.save_regs_offset)));
++ emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, tmp));
++ save_offset = 0;
++ sp_adjust = total_frame_size - cfun->machine->frame.save_regs_offset;
++ }
++ else
++ {
++ save_offset = cfun->machine->frame.save_regs_offset;
++ sp_adjust = total_frame_size;
++ }
++
++ save_mask = cfun->machine->frame.save_mask;
++ save_offset += cfun->machine->frame.save_reg_size;
++
++ for (ix = 32; ix--;)
++ if (save_mask & ((unsigned HOST_WIDE_INT)1 << ix))
++ {
++ save_offset -= 4;
++ restore_reg (ix, save_offset);
++ }
++
++ if (sp_adjust)
++ emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
++ GEN_INT (sp_adjust)));
++
++ /* Add in the __builtin_eh_return stack adjustment. */
++ if (current_function_calls_eh_return)
++ emit_insn (gen_add3_insn (stack_pointer_rtx,
++ stack_pointer_rtx,
++ EH_RETURN_STACKADJ_RTX));
++
++ if (!sibcall_p)
++ insn = emit_jump_insn
++ (gen_return_from_epilogue (gen_rtx_REG (Pmode, RA_REGNO)));
++}
++
++/* Implement RETURN_ADDR_RTX. Note, we do not support moving
++ back to a previous frame. */
++rtx
++nios2_get_return_address (int count)
++{
++ if (count != 0)
++ return const0_rtx;
++
++ return get_hard_reg_initial_val (Pmode, RA_REGNO);
++}
++
++/* Emit code to change the current function's return address to
++ ADDRESS. SCRATCH is available as a scratch register, if needed.
++ ADDRESS and SCRATCH are both word-mode GPRs. */
++
++void
++nios2_set_return_address (rtx address, rtx scratch)
++{
++ compute_frame_size ();
++ if ((cfun->machine->frame.save_mask >> RA_REGNO) & 1)
++ {
++ HOST_WIDE_INT offset = cfun->machine->frame.save_reg_size - 4;
++ rtx base;
++
++ if (frame_pointer_needed)
++ base = hard_frame_pointer_rtx;
++ else
++ {
++ base = stack_pointer_rtx;
++ offset += cfun->machine->frame.save_regs_offset;
++
++ if (TOO_BIG_OFFSET (offset))
++ {
++ emit_insn (gen_rtx_SET (Pmode, scratch, GEN_INT (offset)));
++ emit_insn (gen_add3_insn (scratch, scratch, base));
++ base = scratch;
++ offset = 0;
++ }
++ }
++ if (offset)
++ base = gen_rtx_PLUS (Pmode, base, GEN_INT (offset));
++ emit_insn (gen_rtx_SET (Pmode, gen_rtx_MEM (Pmode, base), address));
++ }
++ else
++ emit_insn (gen_rtx_SET (Pmode, gen_rtx_REG (Pmode, RA_REGNO), address));
++}
++
++bool
++nios2_function_ok_for_sibcall (tree a ATTRIBUTE_UNUSED, tree b ATTRIBUTE_UNUSED)
++{
++ return true;
++}
++
++
++
++
++
++/* ----------------------- *
++ * Profiling
++ * ----------------------- */
++
++void
++function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
++{
++ fprintf (file, "\tmov\tr8, ra\n");
++ if (flag_pic)
++ {
++ fprintf (file, "\tnextpc\tr2\n");
++ fprintf (file, "\t1: movhi\tr3, %%hiadj(_GLOBAL_OFFSET_TABLE_ - 1b)\n");
++ fprintf (file, "\taddi\tr3, r3, %%lo(_GLOBAL_OFFSET_TABLE_ - 1b)\n");
++ fprintf (file, "\tadd\tr2, r2, r3\n");
++ fprintf (file, "\tldw\tr2, %%call(_mcount)(r2)\n");
++ fprintf (file, "\tcallr\tr2\n");
++ }
++ else
++ fprintf (file, "\tcall\t_mcount\n");
++ fprintf (file, "\tmov\tra, r8\n");
++}
++
++
++/***************************************
++ * Stack Layout
++ ***************************************/
++
++
++void
++dump_frame_size (FILE *file)
++{
++ fprintf (file, "\t%s Current Frame Info\n", ASM_COMMENT_START);
++
++ fprintf (file, "\t%s total_size = %ld\n", ASM_COMMENT_START,
++ cfun->machine->frame.total_size);
++ fprintf (file, "\t%s var_size = %ld\n", ASM_COMMENT_START,
++ cfun->machine->frame.var_size);
++ fprintf (file, "\t%s args_size = %ld\n", ASM_COMMENT_START,
++ cfun->machine->frame.args_size);
++ fprintf (file, "\t%s save_reg_size = %d\n", ASM_COMMENT_START,
++ cfun->machine->frame.save_reg_size);
++ fprintf (file, "\t%s initialized = %d\n", ASM_COMMENT_START,
++ cfun->machine->frame.initialized);
++ fprintf (file, "\t%s save_regs_offset = %ld\n", ASM_COMMENT_START,
++ cfun->machine->frame.save_regs_offset);
++ fprintf (file, "\t%s current_function_is_leaf = %d\n", ASM_COMMENT_START,
++ current_function_is_leaf);
++ fprintf (file, "\t%s frame_pointer_needed = %d\n", ASM_COMMENT_START,
++ frame_pointer_needed);
++ fprintf (file, "\t%s pretend_args_size = %d\n", ASM_COMMENT_START,
++ current_function_pretend_args_size);
++
++}
++
++/* Return true if RENOG should be saved in a prologue. */
++
++static bool
++save_reg_p (unsigned regno)
++{
++ gcc_assert (GP_REGNO_P (regno));
++
++ if (regs_ever_live[regno] && !call_used_regs[regno])
++ return true;
++
++ if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
++ return true;
++
++ if (regno == PIC_OFFSET_TABLE_REGNUM
++ && current_function_uses_pic_offset_table)
++ return true;
++
++ if (regno == RA_REGNO && regs_ever_live[RA_REGNO])
++ return true;
++
++ return false;
++}
++
++/* Return the bytes needed to compute the frame pointer from the current
++ stack pointer. */
++
++HOST_WIDE_INT
++compute_frame_size (void)
++{
++ unsigned int regno;
++ HOST_WIDE_INT var_size; /* # of var. bytes allocated */
++ HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up. */
++ HOST_WIDE_INT save_reg_size; /* # bytes needed to store callee save regs. */
++ HOST_WIDE_INT out_args_size; /* # bytes needed for outgoing args. */
++ unsigned HOST_WIDE_INT save_mask = 0;
++
++ if (cfun->machine->frame.initialized)
++ return cfun->machine->frame.total_size;
++
++ save_reg_size = 0;
++ var_size = STACK_ALIGN (get_frame_size ());
++ out_args_size = STACK_ALIGN (current_function_outgoing_args_size);
++
++ total_size = var_size + out_args_size;
++
++ /* Calculate space needed for gp registers. */
++ for (regno = 0; GP_REGNO_P (regno); regno++)
++ if (save_reg_p (regno))
++ {
++ save_mask |= (unsigned HOST_WIDE_INT)1 << regno;
++ save_reg_size += 4;
++ }
++
++ /* If we call eh_return, we need to save the EH data registers. */
++ if (current_function_calls_eh_return)
++ {
++ unsigned i;
++ unsigned r;
++
++ for (i = 0; (r = EH_RETURN_DATA_REGNO (i)) != INVALID_REGNUM; i++)
++ if (!(save_mask & (1 << r)))
++ {
++ save_mask |= 1 << r;
++ save_reg_size += 4;
++ }
++ }
++
++ save_reg_size = STACK_ALIGN (save_reg_size);
++ total_size += save_reg_size;
++
++ total_size += STACK_ALIGN (current_function_pretend_args_size);
++
++ /* Save other computed information. */
++ cfun->machine->frame.save_mask = save_mask;
++ cfun->machine->frame.total_size = total_size;
++ cfun->machine->frame.var_size = var_size;
++ cfun->machine->frame.args_size = out_args_size;
++ cfun->machine->frame.save_reg_size = save_reg_size;
++ cfun->machine->frame.initialized = reload_completed;
++
++ cfun->machine->frame.save_regs_offset = out_args_size + var_size;
++
++ return total_size;
++}
++
++
++int
++nios2_initial_elimination_offset (int from, int to)
++{
++ int offset;
++
++ compute_frame_size ();
++
++ /* Set OFFSET to the offset from the stack pointer. */
++ switch (from)
++ {
++ case FRAME_POINTER_REGNUM:
++ offset = cfun->machine->frame.args_size;
++ break;
++
++ case ARG_POINTER_REGNUM:
++ offset = cfun->machine->frame.total_size;
++ offset -= current_function_pretend_args_size;
++ break;
++
++ default:
++ gcc_unreachable ();
++ }
++
++ /* If we are asked for the frame pointer offset, then adjust OFFSET
++ by the offset from the frame pointer to the stack pointer. */
++ if (to == HARD_FRAME_POINTER_REGNUM)
++ offset -= cfun->machine->frame.save_regs_offset;
++
++ return offset;
++}
++
++/* Return nonzero if this function is known to have a null epilogue.
++ This allows the optimizer to omit jumps to jumps if no stack
++ was created. */
++int
++nios2_can_use_return_insn (void)
++{
++ if (!reload_completed)
++ return 0;
++
++ if (regs_ever_live[RA_REGNO] || current_function_profile)
++ return 0;
++
++ if (cfun->machine->frame.initialized)
++ return cfun->machine->frame.total_size == 0;
++
++ return compute_frame_size () == 0;
++}
++
++
++
++
++
++/* Try to take a bit of tedium out of the __builtin_custom_<blah>
++ builtin functions, too. */
++
++#define NIOS2_FOR_ALL_CUSTOM_BUILTINS \
++ NIOS2_DO_BUILTIN (N, n, n ) \
++ NIOS2_DO_BUILTIN (NI, ni, nX ) \
++ NIOS2_DO_BUILTIN (NF, nf, nX ) \
++ NIOS2_DO_BUILTIN (NP, np, nX ) \
++ NIOS2_DO_BUILTIN (NII, nii, nXX ) \
++ NIOS2_DO_BUILTIN (NIF, nif, nXX ) \
++ NIOS2_DO_BUILTIN (NIP, nip, nXX ) \
++ NIOS2_DO_BUILTIN (NFI, nfi, nXX ) \
++ NIOS2_DO_BUILTIN (NFF, nff, nXX ) \
++ NIOS2_DO_BUILTIN (NFP, nfp, nXX ) \
++ NIOS2_DO_BUILTIN (NPI, npi, nXX ) \
++ NIOS2_DO_BUILTIN (NPF, npf, nXX ) \
++ NIOS2_DO_BUILTIN (NPP, npp, nXX ) \
++ NIOS2_DO_BUILTIN (IN, in, Xn ) \
++ NIOS2_DO_BUILTIN (INI, ini, XnX ) \
++ NIOS2_DO_BUILTIN (INF, inf, XnX ) \
++ NIOS2_DO_BUILTIN (INP, inp, XnX ) \
++ NIOS2_DO_BUILTIN (INII, inii, XnXX ) \
++ NIOS2_DO_BUILTIN (INIF, inif, XnXX ) \
++ NIOS2_DO_BUILTIN (INIP, inip, XnXX ) \
++ NIOS2_DO_BUILTIN (INFI, infi, XnXX ) \
++ NIOS2_DO_BUILTIN (INFF, inff, XnXX ) \
++ NIOS2_DO_BUILTIN (INFP, infp, XnXX ) \
++ NIOS2_DO_BUILTIN (INPI, inpi, XnXX ) \
++ NIOS2_DO_BUILTIN (INPF, inpf, XnXX ) \
++ NIOS2_DO_BUILTIN (INPP, inpp, XnXX ) \
++ NIOS2_DO_BUILTIN (FN, fn, Xn ) \
++ NIOS2_DO_BUILTIN (FNI, fni, XnX ) \
++ NIOS2_DO_BUILTIN (FNF, fnf, XnX ) \
++ NIOS2_DO_BUILTIN (FNP, fnp, XnX ) \
++ NIOS2_DO_BUILTIN (FNII, fnii, XnXX ) \
++ NIOS2_DO_BUILTIN (FNIF, fnif, XnXX ) \
++ NIOS2_DO_BUILTIN (FNIP, fnip, XnXX ) \
++ NIOS2_DO_BUILTIN (FNFI, fnfi, XnXX ) \
++ NIOS2_DO_BUILTIN (FNFF, fnff, XnXX ) \
++ NIOS2_DO_BUILTIN (FNFP, fnfp, XnXX ) \
++ NIOS2_DO_BUILTIN (FNPI, fnpi, XnXX ) \
++ NIOS2_DO_BUILTIN (FNPF, fnpf, XnXX ) \
++ NIOS2_DO_BUILTIN (FNPP, fnpp, XnXX ) \
++ NIOS2_DO_BUILTIN (PN, pn, Xn ) \
++ NIOS2_DO_BUILTIN (PNI, pni, XnX ) \
++ NIOS2_DO_BUILTIN (PNF, pnf, XnX ) \
++ NIOS2_DO_BUILTIN (PNP, pnp, XnX ) \
++ NIOS2_DO_BUILTIN (PNII, pnii, XnXX ) \
++ NIOS2_DO_BUILTIN (PNIF, pnif, XnXX ) \
++ NIOS2_DO_BUILTIN (PNIP, pnip, XnXX ) \
++ NIOS2_DO_BUILTIN (PNFI, pnfi, XnXX ) \
++ NIOS2_DO_BUILTIN (PNFF, pnff, XnXX ) \
++ NIOS2_DO_BUILTIN (PNFP, pnfp, XnXX ) \
++ NIOS2_DO_BUILTIN (PNPI, pnpi, XnXX ) \
++ NIOS2_DO_BUILTIN (PNPF, pnpf, XnXX ) \
++ NIOS2_DO_BUILTIN (PNPP, pnpp, XnXX )
++
++#undef NIOS2_FPU_INSN
++#define NIOS2_FPU_INSN(opt, insn, args) \
++static const char *NIOS2_CONCAT (nios2_output_fpu_insn_, insn) (rtx); \
++static void NIOS2_CONCAT (nios2_pragma_, insn) (struct cpp_reader *); \
++static void NIOS2_CONCAT (nios2_pragma_no_, insn) (struct cpp_reader *); \
++int NIOS2_CONCAT (nios2_custom_, opt) = -1;
++NIOS2_FOR_ALL_FPU_INSNS
++
++nios2_fpu_info nios2_fpu_insns[nios2_fpu_max_insn] = {
++#undef NIOS2_FPU_INSN
++#define NIOS2_FPU_INSN(opt, insn, args) \
++ { NIOS2_STRINGIFY (opt), \
++ NIOS2_STRINGIFY (insn), \
++ NIOS2_STRINGIFY (args), \
++ -1, \
++ NIOS2_CONCAT (nios2_output_fpu_insn_, insn), \
++ "custom_" NIOS2_STRINGIFY (opt), \
++ NIOS2_CONCAT (nios2_pragma_, insn), \
++ "no_custom_" NIOS2_STRINGIFY (opt), \
++ NIOS2_CONCAT (nios2_pragma_no_, insn), \
++ 0, \
++ 0, \
++ 0, \
++ 0, \
++ 0, \
++ &NIOS2_CONCAT (nios2_custom_, opt) },
++ NIOS2_FOR_ALL_FPU_INSNS
++};
++
++const char *nios2_custom_fpu_cfg_string;
++
++static const char *builtin_custom_seen[256];
++
++static void
++nios2_custom_switch (int parameter, int *value, const char *opt)
++{
++ /* We only document values from 0-255, but we secretly allow -1 so
++ * that the -mno-custom-<opt> switches work. */
++ if (parameter != -1)
++ {
++ if (parameter < -1 || parameter > 255)
++ error ("switch `-mcustom-%s' value %ld must be between 0 and 255",
++ opt, parameter);
++ *value = (int)parameter;
++ }
++}
++
++static void
++nios2_custom_check_insns (int is_pragma)
++{
++ int i;
++ int has_double = 0;
++ int errors = 0;
++ const char *ns[256];
++ int ps[256];
++
++ for (i = 0; i < nios2_fpu_max_insn; i++)
++ if (nios2_fpu_insns[i].is_double && nios2_fpu_insns[i].N >= 0)
++ has_double = 1;
++
++ if (has_double)
++ {
++ for (i = 0; i < nios2_fpu_max_insn; i++)
++ {
++ if (nios2_fpu_insns[i].needed_by_double
++ && nios2_fpu_insns[i].N < 0)
++ {
++ if (is_pragma)
++ error ("either switch `-mcustom-%s' or `#pragma custom_%s' is "
++ "required for double precision floating point",
++ nios2_fpu_insns[i].option,
++ nios2_fpu_insns[i].option);
++ else
++ error ("switch `-mcustom-%s' is required for double precision "
++ "floating point",
++ nios2_fpu_insns[i].option);
++ errors = 1;
++ }
++ }
++ }
++
++ /* Warn if the user has certain exotic operations that won't get used
++ without -funsafe-math-optimizations, See expand_builtin () in
++ bulitins.c. */
++ if (!flag_unsafe_math_optimizations)
++ {
++ for (i = 0; i < nios2_fpu_max_insn; i++)
++ {
++ if (nios2_fpu_insns[i].needs_unsafe && nios2_fpu_insns[i].N >= 0)
++ {
++ warning (0, "%s%s' has no effect unless "
++ "-funsafe-math-optimizations is specified",
++ is_pragma ? "`#pragma custom_" : "switch `-mcustom-",
++ nios2_fpu_insns[i].option);
++ /* Just one warning per function per compilation unit, please. */
++ nios2_fpu_insns[i].needs_unsafe = 0;
++ }
++ }
++ }
++
++ /* Warn if the user is trying to use -mcustom-fmins et. al, that won't
++ get used without -ffinite-math-only. See fold in fold () in
++ fold-const.c. */
++ if (!flag_finite_math_only)
++ {
++ for (i = 0; i < nios2_fpu_max_insn; i++)
++ {
++ if (nios2_fpu_insns[i].needs_finite && nios2_fpu_insns[i].N >= 0)
++ {
++ warning (0, "%s%s' has no effect unless -ffinite-math-only "
++ "is specified",
++ is_pragma ? "`#pragma custom_" : "switch `-mcustom-",
++ nios2_fpu_insns[i].option);
++ /* Just one warning per function per compilation unit, please. */
++ nios2_fpu_insns[i].needs_finite = 0;
++ }
++ }
++ }
++
++ /* Warn the user about double precision divide braindamage until we
++ can fix it properly. See the RDIV_EXPR case of expand_expr_real in
++ expr.c. */
++ {
++ static int warned = 0;
++ if (flag_unsafe_math_optimizations
++ && !optimize_size
++ && nios2_fpu_insns[nios2_fpu_divdf3].N >= 0
++ && !warned)
++ {
++ warning (0, "%s%s' behaves poorly without -Os",
++ is_pragma ? "`#pragma custom_" : "switch `-mcustom-",
++ nios2_fpu_insns[nios2_fpu_divdf3].option);
++ warned = 1;
++ }
++ }
++
++ /* The following bit of voodoo is lifted from the generated file
++ insn-opinit.c: to allow #pragmas to work properly, we have to tweak
++ the optab_table manually -- it only gets initialized once after the
++ switches are handled and before any #pragmas are seen. */
++ if (is_pragma)
++ {
++ /* Only do this if the optabs have already been defined, not
++ when we're handling command line switches. */
++ addv_optab->handlers[SFmode].insn_code =
++ add_optab->handlers[SFmode].insn_code = CODE_FOR_nothing;
++ addv_optab->handlers[DFmode].insn_code =
++ add_optab->handlers[DFmode].insn_code = CODE_FOR_nothing;
++ subv_optab->handlers[SFmode].insn_code =
++ sub_optab->handlers[SFmode].insn_code = CODE_FOR_nothing;
++ subv_optab->handlers[DFmode].insn_code =
++ sub_optab->handlers[DFmode].insn_code = CODE_FOR_nothing;
++ smulv_optab->handlers[SFmode].insn_code =
++ smul_optab->handlers[SFmode].insn_code = CODE_FOR_nothing;
++ smulv_optab->handlers[DFmode].insn_code =
++ smul_optab->handlers[DFmode].insn_code = CODE_FOR_nothing;
++ sdiv_optab->handlers[SFmode].insn_code = CODE_FOR_nothing;
++ sdiv_optab->handlers[DFmode].insn_code = CODE_FOR_nothing;
++ negv_optab->handlers[SFmode].insn_code =
++ neg_optab->handlers[SFmode].insn_code = CODE_FOR_nothing;
++ negv_optab->handlers[DFmode].insn_code =
++ neg_optab->handlers[DFmode].insn_code = CODE_FOR_nothing;
++ smin_optab->handlers[SFmode].insn_code = CODE_FOR_nothing;
++ smin_optab->handlers[DFmode].insn_code = CODE_FOR_nothing;
++ smax_optab->handlers[SFmode].insn_code = CODE_FOR_nothing;
++ smax_optab->handlers[DFmode].insn_code = CODE_FOR_nothing;
++ absv_optab->handlers[SFmode].insn_code =
++ abs_optab->handlers[SFmode].insn_code = CODE_FOR_nothing;
++ absv_optab->handlers[DFmode].insn_code =
++ abs_optab->handlers[DFmode].insn_code = CODE_FOR_nothing;
++ sqrt_optab->handlers[SFmode].insn_code = CODE_FOR_nothing;
++ sqrt_optab->handlers[DFmode].insn_code = CODE_FOR_nothing;
++ cos_optab->handlers[SFmode].insn_code = CODE_FOR_nothing;
++ cos_optab->handlers[DFmode].insn_code = CODE_FOR_nothing;
++ sin_optab->handlers[SFmode].insn_code = CODE_FOR_nothing;
++ sin_optab->handlers[DFmode].insn_code = CODE_FOR_nothing;
++ tan_optab->handlers[SFmode].insn_code = CODE_FOR_nothing;
++ tan_optab->handlers[DFmode].insn_code = CODE_FOR_nothing;
++ atan_optab->handlers[SFmode].insn_code = CODE_FOR_nothing;
++ atan_optab->handlers[DFmode].insn_code = CODE_FOR_nothing;
++ exp_optab->handlers[SFmode].insn_code = CODE_FOR_nothing;
++ exp_optab->handlers[DFmode].insn_code = CODE_FOR_nothing;
++ log_optab->handlers[SFmode].insn_code = CODE_FOR_nothing;
++ log_optab->handlers[DFmode].insn_code = CODE_FOR_nothing;
++ sfloat_optab->handlers[SFmode][SImode].insn_code = CODE_FOR_nothing;
++ sfloat_optab->handlers[DFmode][SImode].insn_code = CODE_FOR_nothing;
++ ufloat_optab->handlers[SFmode][SImode].insn_code = CODE_FOR_nothing;
++ ufloat_optab->handlers[DFmode][SImode].insn_code = CODE_FOR_nothing;
++ sfix_optab->handlers[SImode][SFmode].insn_code = CODE_FOR_nothing;
++ sfix_optab->handlers[SImode][DFmode].insn_code = CODE_FOR_nothing;
++ ufix_optab->handlers[SImode][SFmode].insn_code = CODE_FOR_nothing;
++ ufix_optab->handlers[SImode][DFmode].insn_code = CODE_FOR_nothing;
++ sext_optab->handlers[DFmode][SFmode].insn_code = CODE_FOR_nothing;
++ trunc_optab->handlers[SFmode][DFmode].insn_code = CODE_FOR_nothing;
++ cmp_optab->handlers[SFmode].insn_code = CODE_FOR_nothing;
++ cmp_optab->handlers[DFmode].insn_code = CODE_FOR_nothing;
++
++ if (HAVE_addsf3)
++ addv_optab->handlers[SFmode].insn_code =
++ add_optab->handlers[SFmode].insn_code = CODE_FOR_addsf3;
++ if (HAVE_adddf3)
++ addv_optab->handlers[DFmode].insn_code =
++ add_optab->handlers[DFmode].insn_code = CODE_FOR_adddf3;
++ if (HAVE_subsf3)
++ subv_optab->handlers[SFmode].insn_code =
++ sub_optab->handlers[SFmode].insn_code = CODE_FOR_subsf3;
++ if (HAVE_subdf3)
++ subv_optab->handlers[DFmode].insn_code =
++ sub_optab->handlers[DFmode].insn_code = CODE_FOR_subdf3;
++ if (HAVE_mulsf3)
++ smulv_optab->handlers[SFmode].insn_code =
++ smul_optab->handlers[SFmode].insn_code = CODE_FOR_mulsf3;
++ if (HAVE_muldf3)
++ smulv_optab->handlers[DFmode].insn_code =
++ smul_optab->handlers[DFmode].insn_code = CODE_FOR_muldf3;
++ if (HAVE_divsf3)
++ sdiv_optab->handlers[SFmode].insn_code = CODE_FOR_divsf3;
++ if (HAVE_divdf3)
++ sdiv_optab->handlers[DFmode].insn_code = CODE_FOR_divdf3;
++ if (HAVE_negsf2)
++ negv_optab->handlers[SFmode].insn_code =
++ neg_optab->handlers[SFmode].insn_code = CODE_FOR_negsf2;
++ if (HAVE_negdf2)
++ negv_optab->handlers[DFmode].insn_code =
++ neg_optab->handlers[DFmode].insn_code = CODE_FOR_negdf2;
++ if (HAVE_minsf3)
++ smin_optab->handlers[SFmode].insn_code = CODE_FOR_minsf3;
++ if (HAVE_mindf3)
++ smin_optab->handlers[DFmode].insn_code = CODE_FOR_mindf3;
++ if (HAVE_maxsf3)
++ smax_optab->handlers[SFmode].insn_code = CODE_FOR_maxsf3;
++ if (HAVE_maxdf3)
++ smax_optab->handlers[DFmode].insn_code = CODE_FOR_maxdf3;
++ if (HAVE_abssf2)
++ absv_optab->handlers[SFmode].insn_code =
++ abs_optab->handlers[SFmode].insn_code = CODE_FOR_abssf2;
++ if (HAVE_absdf2)
++ absv_optab->handlers[DFmode].insn_code =
++ abs_optab->handlers[DFmode].insn_code = CODE_FOR_absdf2;
++ if (HAVE_sqrtsf2)
++ sqrt_optab->handlers[SFmode].insn_code = CODE_FOR_sqrtsf2;
++ if (HAVE_sqrtdf2)
++ sqrt_optab->handlers[DFmode].insn_code = CODE_FOR_sqrtdf2;
++ if (HAVE_cossf2)
++ cos_optab->handlers[SFmode].insn_code = CODE_FOR_cossf2;
++ if (HAVE_cosdf2)
++ cos_optab->handlers[DFmode].insn_code = CODE_FOR_cosdf2;
++ if (HAVE_sinsf2)
++ sin_optab->handlers[SFmode].insn_code = CODE_FOR_sinsf2;
++ if (HAVE_sindf2)
++ sin_optab->handlers[DFmode].insn_code = CODE_FOR_sindf2;
++ if (HAVE_tansf2)
++ tan_optab->handlers[SFmode].insn_code = CODE_FOR_tansf2;
++ if (HAVE_tandf2)
++ tan_optab->handlers[DFmode].insn_code = CODE_FOR_tandf2;
++ if (HAVE_atansf2)
++ atan_optab->handlers[SFmode].insn_code = CODE_FOR_atansf2;
++ if (HAVE_atandf2)
++ atan_optab->handlers[DFmode].insn_code = CODE_FOR_atandf2;
++ if (HAVE_expsf2)
++ exp_optab->handlers[SFmode].insn_code = CODE_FOR_expsf2;
++ if (HAVE_expdf2)
++ exp_optab->handlers[DFmode].insn_code = CODE_FOR_expdf2;
++ if (HAVE_logsf2)
++ log_optab->handlers[SFmode].insn_code = CODE_FOR_logsf2;
++ if (HAVE_logdf2)
++ log_optab->handlers[DFmode].insn_code = CODE_FOR_logdf2;
++ if (HAVE_floatsisf2)
++ sfloat_optab->handlers[SFmode][SImode].insn_code = CODE_FOR_floatsisf2;
++ if (HAVE_floatsidf2)
++ sfloat_optab->handlers[DFmode][SImode].insn_code = CODE_FOR_floatsidf2;
++ if (HAVE_floatunssisf2)
++ ufloat_optab->handlers[SFmode][SImode].insn_code = CODE_FOR_floatunssisf2;
++ if (HAVE_floatunssidf2)
++ ufloat_optab->handlers[DFmode][SImode].insn_code = CODE_FOR_floatunssidf2;
++ if (HAVE_fixsfsi2)
++ sfix_optab->handlers[SImode][SFmode].insn_code = CODE_FOR_fixsfsi2;
++ if (HAVE_fixdfsi2)
++ sfix_optab->handlers[SImode][DFmode].insn_code = CODE_FOR_fixdfsi2;
++ if (HAVE_fixunssfsi2)
++ ufix_optab->handlers[SImode][SFmode].insn_code = CODE_FOR_fixunssfsi2;
++ if (HAVE_fixunsdfsi2)
++ ufix_optab->handlers[SImode][DFmode].insn_code = CODE_FOR_fixunsdfsi2;
++ if (HAVE_extendsfdf2)
++ sext_optab->handlers[DFmode][SFmode].insn_code = CODE_FOR_extendsfdf2;
++ if (HAVE_truncdfsf2)
++ trunc_optab->handlers[SFmode][DFmode].insn_code = CODE_FOR_truncdfsf2;
++ if (HAVE_cmpsf)
++ cmp_optab->handlers[SFmode].insn_code = CODE_FOR_cmpsf;
++ if (HAVE_cmpdf)
++ cmp_optab->handlers[DFmode].insn_code = CODE_FOR_cmpdf;
++ }
++
++ /* Check for duplicate values of N. */
++ for (i = 0; i < 256; i++)
++ {
++ ns[i] = 0;
++ ps[i] = 0;
++ }
++
++ for (i = 0; i < nios2_fpu_max_insn; i++)
++ {
++ int N = nios2_fpu_insns[i].N;
++ if (N >= 0)
++ {
++ if (ns[N])
++ {
++ error ("%s%s' conflicts with %s%s'",
++ is_pragma ? "`#pragma custom_" : "switch `-mcustom-",
++ nios2_fpu_insns[i].option,
++ ps[N] ? "`#pragma custom_" : "switch `-mcustom-",
++ ns[N]);
++ errors = 1;
++ }
++ else if (builtin_custom_seen[N])
++ {
++ error ("call to `%s' conflicts with %s%s'",
++ builtin_custom_seen[N],
++ (nios2_fpu_insns[i].pragma_seen
++ ? "`#pragma custom_" : "switch `-mcustom-"),
++ nios2_fpu_insns[i].option);
++ errors = 1;
++ }
++ else
++ {
++ ns[N] = nios2_fpu_insns[i].option;
++ ps[N] = nios2_fpu_insns[i].pragma_seen;
++ }
++ }
++ }
++
++ if (errors)
++ fatal_error ("conflicting use of -mcustom switches, #pragmas, and/or "
++ "__builtin_custom_ functions");
++}
++
++static void
++nios2_handle_custom_fpu_cfg (const char *cfg, int is_pragma)
++{
++#undef NIOS2_FPU_INSN
++#define NIOS2_FPU_INSN(opt, insn, args) \
++ int opt = nios2_fpu_insns[NIOS2_CONCAT (nios2_fpu_, insn)].N;
++NIOS2_FOR_ALL_FPU_INSNS
++
++ /*
++ * ??? These are just some sample possibilities. We'll change these
++ * at the last minute to match the capabilities of the actual fpu.
++ */
++ if (!strcasecmp (cfg, "60-1"))
++ {
++ fmuls = 252;
++ fadds = 253;
++ fsubs = 254;
++ flag_single_precision_constant = 1;
++ }
++ else if (!strcasecmp (cfg, "60-2"))
++ {
++ fmuls = 252;
++ fadds = 253;
++ fsubs = 254;
++ fdivs = 255;
++ flag_single_precision_constant = 1;
++ }
++ else if (!strcasecmp (cfg, "72-3"))
++ {
++ floatus = 243;
++ fixsi = 244;
++ floatis = 245;
++ fcmpgts = 246;
++ fcmples = 249;
++ fcmpeqs = 250;
++ fcmpnes = 251;
++ fmuls = 252;
++ fadds = 253;
++ fsubs = 254;
++ fdivs = 255;
++ flag_single_precision_constant = 1;
++ }
++ else
++ warning (0, "ignoring unrecognized %sfpu-cfg' value `%s'",
++ is_pragma ? "`#pragma custom_" : "switch -mcustom-", cfg);
++
++#undef NIOS2_FPU_INSN
++#define NIOS2_FPU_INSN(opt, insn, args) \
++ nios2_fpu_insns[NIOS2_CONCAT (nios2_fpu_, insn)].N = opt;
++NIOS2_FOR_ALL_FPU_INSNS
++
++ /* Guard against errors in the standard configurations. */
++ nios2_custom_check_insns (is_pragma);
++}
++
++void
++override_options (void)
++{
++ int i;
++
++ /* Function to allocate machine-dependent function status. */
++ init_machine_status = &nios2_init_machine_status;
++
++ nios2_section_threshold
++ = g_switch_set ? g_switch_value : NIOS2_DEFAULT_GVALUE;
++
++ /* If we don't have mul, we don't have mulx either! */
++ if (!TARGET_HAS_MUL && TARGET_HAS_MULX)
++ target_flags &= ~MASK_HAS_MULX;
++
++ /* Set up for stack limit checking. */
++ if (TARGET_STACK_CHECK)
++ stack_limit_rtx = gen_rtx_REG(SImode, ET_REGNO);
++
++ for (i = 0; i < nios2_fpu_max_insn; i++)
++ {
++ nios2_fpu_insns[i].is_double = (nios2_fpu_insns[i].args[0] == 'd'
++ || nios2_fpu_insns[i].args[0] == 'd'
++ || nios2_fpu_insns[i].args[0] == 'd');
++ nios2_fpu_insns[i].needed_by_double = (i == nios2_fpu_nios2_fwrx
++ || i == nios2_fpu_nios2_fwry
++ || i == nios2_fpu_nios2_frdxlo
++ || i == nios2_fpu_nios2_frdxhi
++ || i == nios2_fpu_nios2_frdy);
++ nios2_fpu_insns[i].needs_unsafe = (i == nios2_fpu_cossf2
++ || i == nios2_fpu_cosdf2
++ || i == nios2_fpu_sinsf2
++ || i == nios2_fpu_sindf2
++ || i == nios2_fpu_tansf2
++ || i == nios2_fpu_tandf2
++ || i == nios2_fpu_atansf2
++ || i == nios2_fpu_atandf2
++ || i == nios2_fpu_expsf2
++ || i == nios2_fpu_expdf2
++ || i == nios2_fpu_logsf2
++ || i == nios2_fpu_logdf2);
++ nios2_fpu_insns[i].needs_finite = (i == nios2_fpu_minsf3
++ || i == nios2_fpu_maxsf3
++ || i == nios2_fpu_mindf3
++ || i == nios2_fpu_maxdf3);
++ }
++
++ /* We haven't seen any __builtin_custom functions yet. */
++ for (i = 0; i < 256; i++)
++ builtin_custom_seen[i] = 0;
++
++ /* Set up default handling for floating point custom instructions.
++
++ Putting things in this order means that the -mcustom-fpu-cfg=
++ switch will always be overridden by individual -mcustom-fadds=
++ switches, regardless of the order in which they were specified
++ on the command line. ??? Remember to document this. */
++ if (nios2_custom_fpu_cfg_string && *nios2_custom_fpu_cfg_string)
++ nios2_handle_custom_fpu_cfg (nios2_custom_fpu_cfg_string, 0);
++
++ for (i = 0; i < nios2_fpu_max_insn; i++)
++ nios2_custom_switch (*nios2_fpu_insns[i].pN,
++ &nios2_fpu_insns[i].N,
++ nios2_fpu_insns[i].option);
++
++ nios2_custom_check_insns (0);
++}
++
++void
++optimization_options (int level, int size)
++{
++ if (level || size)
++ target_flags |= MASK_INLINE_MEMCPY;
++
++ if (level >= 3 && !size)
++ target_flags |= MASK_FAST_SW_DIV;
++}
++
++/* Allocate a chunk of memory for per-function machine-dependent data. */
++static struct machine_function *
++nios2_init_machine_status (void)
++{
++ return ((struct machine_function *)
++ ggc_alloc_cleared (sizeof (struct machine_function)));
++}
++
++
++
++/*****************
++ * Describing Relative Costs of Operations
++ *****************/
++
++/* Compute a (partial) cost for rtx X. Return true if the complete
++ cost has been computed, and false if subexpressions should be
++ scanned. In either case, *TOTAL contains the cost result. */
++
++
++
++static bool
++nios2_rtx_costs (rtx x, int code, int outer_code ATTRIBUTE_UNUSED, int *total)
++{
++ switch (code)
++ {
++ case CONST_INT:
++ if (INTVAL (x) == 0)
++ {
++ *total = COSTS_N_INSNS (0);
++ return true;
++ }
++ else if (SMALL_INT (INTVAL (x))
++ || SMALL_INT_UNSIGNED (INTVAL (x))
++ || UPPER16_INT (INTVAL (x)))
++ {
++ *total = COSTS_N_INSNS (2);
++ return true;
++ }
++ else
++ {
++ *total = COSTS_N_INSNS (4);
++ return true;
++ }
++
++ case LABEL_REF:
++ case SYMBOL_REF:
++ /* ??? gp relative stuff will fit in here. */
++ /* fall through */
++ case CONST:
++ case CONST_DOUBLE:
++ {
++ *total = COSTS_N_INSNS (4);
++ return true;
++ }
++
++ case MULT:
++ {
++ *total = COSTS_N_INSNS (1);
++ return false;
++ }
++ case SIGN_EXTEND:
++ {
++ *total = COSTS_N_INSNS (3);
++ return false;
++ }
++ case ZERO_EXTEND:
++ {
++ *total = COSTS_N_INSNS (1);
++ return false;
++ }
++
++ default:
++ return false;
++ }
++}
++
++
++/***************************************
++ * INSTRUCTION SUPPORT
++ *
++ * These functions are used within the Machine Description to
++ * handle common or complicated output and expansions from
++ * instructions.
++ ***************************************/
++
++/* Return TRUE if X references a SYMBOL_REF. */
++static int
++symbol_mentioned_p (rtx x)
++{
++ const char * fmt;
++ int i;
++
++ if (GET_CODE (x) == SYMBOL_REF)
++ return 1;
++
++ /* UNSPEC_TLS entries for a symbol include the SYMBOL_REF, but they
++ are constant offsets, not symbols. */
++ if (GET_CODE (x) == UNSPEC && IS_UNSPEC_TLS (XINT (x, 1)))
++ return 0;
++
++ fmt = GET_RTX_FORMAT (GET_CODE (x));
++
++ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
++ {
++ if (fmt[i] == 'E')
++ {
++ int j;
++
++ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
++ if (symbol_mentioned_p (XVECEXP (x, i, j)))
++ return 1;
++ }
++ else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
++ return 1;
++ }
++
++ return 0;
++}
++
++/* Return TRUE if X references a LABEL_REF. */
++static int
++label_mentioned_p (rtx x)
++{
++ const char * fmt;
++ int i;
++
++ if (GET_CODE (x) == LABEL_REF)
++ return 1;
++
++ /* UNSPEC_TLS entries for a symbol include a LABEL_REF for the referencing
++ instruction, but they are constant offsets, not symbols. */
++ if (GET_CODE (x) == UNSPEC && IS_UNSPEC_TLS (XINT (x, 1)))
++ return 0;
++
++ fmt = GET_RTX_FORMAT (GET_CODE (x));
++ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
++ {
++ if (fmt[i] == 'E')
++ {
++ int j;
++
++ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
++ if (label_mentioned_p (XVECEXP (x, i, j)))
++ return 1;
++ }
++ else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
++ return 1;
++ }
++
++ return 0;
++}
++
++static int
++tls_mentioned_p (rtx x)
++{
++ switch (GET_CODE (x))
++ {
++ case CONST:
++ return tls_mentioned_p (XEXP (x, 0));
++
++ case UNSPEC:
++ if (IS_UNSPEC_TLS (XINT (x, 1)))
++ return 1;
++
++ default:
++ return 0;
++ }
++}
++
++/* Helper for nios2_tls_referenced_p. */
++
++static int
++nios2_tls_operand_p_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
++{
++ if (GET_CODE (*x) == SYMBOL_REF)
++ return SYMBOL_REF_TLS_MODEL (*x) != 0;
++
++ /* Don't recurse into UNSPEC_TLS looking for TLS symbols; these are
++ TLS offsets, not real symbol references. */
++ if (GET_CODE (*x) == UNSPEC
++ && IS_UNSPEC_TLS (XINT (*x, 1)))
++ return -1;
++
++ return 0;
++}
++
++/* Return TRUE if X contains any TLS symbol references. */
++
++static bool
++nios2_tls_referenced_p (rtx x)
++{
++ if (! TARGET_HAVE_TLS)
++ return false;
++
++ return for_each_rtx (&x, nios2_tls_operand_p_1, NULL);
++}
++
++static bool
++nios2_cannot_force_const_mem (rtx x)
++{
++ return nios2_tls_referenced_p (x);
++}
++
++/* Emit a call to __tls_get_addr. TI is the argument to this function. RET is
++ an RTX for the return value location. The entire insn sequence is
++ returned. */
++
++static GTY(()) rtx nios2_tls_symbol;
++
++static rtx
++nios2_call_tls_get_addr (rtx ti)
++{
++ rtx arg = gen_rtx_REG (Pmode, FIRST_ARG_REGNO);
++ rtx ret = gen_rtx_REG (Pmode, FIRST_RETVAL_REGNO);
++ rtx fn, insn;
++
++ if (!nios2_tls_symbol)
++ nios2_tls_symbol = init_one_libfunc ("__tls_get_addr");
++
++ emit_insn (gen_rtx_SET (Pmode, arg, ti));
++ fn = gen_rtx_MEM (QImode, nios2_tls_symbol);
++ insn = emit_call_insn (gen_call_value (ret, fn, const0_rtx));
++ CONST_OR_PURE_CALL_P (insn) = 1;
++ use_reg (&CALL_INSN_FUNCTION_USAGE (insn), ret);
++ use_reg (&CALL_INSN_FUNCTION_USAGE (insn), arg);
++
++ return ret;
++}
++
++/* Generate the code to access LOC, a thread local SYMBOL_REF. The
++ return value will be a valid address and move_operand (either a REG
++ or a LO_SUM). */
++
++static rtx
++nios2_legitimize_tls_address (rtx loc)
++{
++ rtx dest = gen_reg_rtx (Pmode);
++ rtx ret, tmp1;
++ enum tls_model model = SYMBOL_REF_TLS_MODEL (loc);
++
++ switch (model)
++ {
++ case TLS_MODEL_GLOBAL_DYNAMIC:
++ tmp1 = gen_reg_rtx (Pmode);
++ emit_insn (gen_add_tls_gd (tmp1, pic_offset_table_rtx, loc));
++ current_function_uses_pic_offset_table = 1;
++ ret = nios2_call_tls_get_addr (tmp1);
++ emit_insn (gen_rtx_SET (Pmode, dest, ret));
++ break;
++
++ case TLS_MODEL_LOCAL_DYNAMIC:
++ tmp1 = gen_reg_rtx (Pmode);
++ emit_insn (gen_add_tls_ldm (tmp1, pic_offset_table_rtx, loc));
++ current_function_uses_pic_offset_table = 1;
++ ret = nios2_call_tls_get_addr (tmp1);
++
++ emit_insn (gen_add_tls_ldo (dest, ret, loc));
++
++ break;
++
++ case TLS_MODEL_INITIAL_EXEC:
++ tmp1 = gen_reg_rtx (Pmode);
++ emit_insn (gen_load_tls_ie (tmp1, pic_offset_table_rtx, loc));
++ current_function_uses_pic_offset_table = 1;
++ emit_insn (gen_add3_insn (dest,
++ gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM),
++ tmp1));
++ break;
++
++ case TLS_MODEL_LOCAL_EXEC:
++ emit_insn (gen_add_tls_le (dest,
++ gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM),
++ loc));
++ break;
++
++ default:
++ gcc_unreachable ();
++ }
++
++ return dest;
++}
++
++int
++nios2_emit_move_sequence (rtx *operands, enum machine_mode mode)
++{
++ rtx to = operands[0];
++ rtx from = operands[1];
++
++ if (!register_operand (to, mode) && !reg_or_0_operand (from, mode))
++ {
++ if (no_new_pseudos)
++ internal_error ("Trying to force_reg no_new_pseudos == 1");
++ from = copy_to_mode_reg (mode, from);
++ }
++
++ /* Recognize the case where from is a reference to thread-local
++ data and load its address to a register. */
++ if (nios2_tls_referenced_p (from))
++ {
++ rtx tmp = from;
++ rtx addend = NULL;
++
++ if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
++ {
++ addend = XEXP (XEXP (tmp, 0), 1);
++ tmp = XEXP (XEXP (tmp, 0), 0);
++ }
++
++ gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
++ gcc_assert (SYMBOL_REF_TLS_MODEL (tmp) != 0);
++
++ tmp = nios2_legitimize_tls_address (tmp);
++ if (addend)
++ {
++ tmp = gen_rtx_PLUS (SImode, tmp, addend);
++ tmp = force_operand (tmp, to);
++ }
++ from = tmp;
++ }
++ else if (flag_pic && (CONSTANT_P (from) || symbol_mentioned_p (from) ||
++ label_mentioned_p (from)))
++ from = nios2_legitimize_pic_address (from, SImode,
++ (no_new_pseudos ? to : 0));
++
++ operands[0] = to;
++ operands[1] = from;
++ return 0;
++}
++
++/* Divide Support */
++
++/*
++ If -O3 is used, we want to output a table lookup for
++ divides between small numbers (both num and den >= 0
++ and < 0x10). The overhead of this method in the worse
++ case is 40 bytes in the text section (10 insns) and
++ 256 bytes in the data section. Additional divides do
++ not incur additional penalties in the data section.
++
++ Code speed is improved for small divides by about 5x
++ when using this method in the worse case (~9 cycles
++ vs ~45). And in the worse case divides not within the
++ table are penalized by about 10% (~5 cycles vs ~45).
++ However in the typical case the penalty is not as bad
++ because doing the long divide in only 45 cycles is
++ quite optimistic.
++
++ ??? It would be nice to have some benchmarks other
++ than Dhrystone to back this up.
++
++ This bit of expansion is to create this instruction
++ sequence as rtl.
++ or $8, $4, $5
++ slli $9, $4, 4
++ cmpgeui $3, $8, 16
++ beq $3, $0, .L3
++ or $10, $9, $5
++ add $12, $11, divide_table
++ ldbu $2, 0($12)
++ br .L1
++.L3:
++ call slow_div
++.L1:
++# continue here with result in $2
++
++ ??? Ideally I would like the emit libcall block to contain
++ all of this code, but I don't know how to do that. What it
++ means is that if the divide can be eliminated, it may not
++ completely disappear.
++
++ ??? The __divsi3_table label should ideally be moved out
++ of this block and into a global. If it is placed into the
++ sdata section we can save even more cycles by doing things
++ gp relative.
++*/
++int
++nios2_emit_expensive_div (rtx *operands, enum machine_mode mode)
++{
++ rtx or_result, shift_left_result;
++ rtx lookup_value;
++ rtx lab1, lab3;
++ rtx insns;
++ rtx libfunc;
++ rtx final_result;
++ rtx tmp;
++
++ /* It may look a little generic, but only SImode
++ is supported for now. */
++ gcc_assert (mode == SImode);
++
++ libfunc = sdiv_optab->handlers[(int) SImode].libfunc;
++
++
++
++ lab1 = gen_label_rtx ();
++ lab3 = gen_label_rtx ();
++
++ or_result = expand_simple_binop (SImode, IOR,
++ operands[1], operands[2],
++ 0, 0, OPTAB_LIB_WIDEN);
++
++ emit_cmp_and_jump_insns (or_result, GEN_INT (15), GTU, 0,
++ GET_MODE (or_result), 0, lab3);
++ JUMP_LABEL (get_last_insn ()) = lab3;
++
++ shift_left_result = expand_simple_binop (SImode, ASHIFT,
++ operands[1], GEN_INT (4),
++ 0, 0, OPTAB_LIB_WIDEN);
++
++ lookup_value = expand_simple_binop (SImode, IOR,
++ shift_left_result, operands[2],
++ 0, 0, OPTAB_LIB_WIDEN);
++
++ convert_move (operands[0],
++ gen_rtx_MEM (QImode,
++ gen_rtx_PLUS (SImode,
++ lookup_value,
++ gen_rtx_SYMBOL_REF (SImode, "__divsi3_table"))),
++ 1);
++
++
++ tmp = emit_jump_insn (gen_jump (lab1));
++ JUMP_LABEL (tmp) = lab1;
++ emit_barrier ();
++
++ emit_label (lab3);
++ LABEL_NUSES (lab3) = 1;
++
++ start_sequence ();
++ final_result = emit_library_call_value (libfunc, NULL_RTX,
++ LCT_CONST, SImode, 2,
++ operands[1], SImode,
++ operands[2], SImode);
++
++
++ insns = get_insns ();
++ end_sequence ();
++ emit_libcall_block (insns, operands[0], final_result,
++ gen_rtx_DIV (SImode, operands[1], operands[2]));
++
++ emit_label (lab1);
++ LABEL_NUSES (lab1) = 1;
++ return 1;
++}
++
++/* The function with address *ADDR is being called. If the address
++ needs to be loaded from the GOT, emit the instruction to do so and
++ update *ADDR to point to the rtx for the loaded value. */
++
++void
++nios2_adjust_call_address (rtx *addr)
++{
++ if (flag_pic
++ && (GET_CODE (*addr) == SYMBOL_REF || GET_CODE (*addr) == LABEL_REF))
++ {
++ rtx addr_orig;
++ current_function_uses_pic_offset_table = 1;
++ addr_orig = *addr;
++ *addr = gen_reg_rtx (GET_MODE (addr_orig));
++ emit_insn (gen_pic_load_call_addr (*addr,
++ pic_offset_table_rtx, addr_orig));
++ }
++}
++
++/* Branches/Compares. */
++
++/* The way of handling branches/compares
++ in gcc is heavily borrowed from MIPS. */
++
++enum internal_test
++{
++ ITEST_EQ,
++ ITEST_NE,
++ ITEST_GT,
++ ITEST_GE,
++ ITEST_LT,
++ ITEST_LE,
++ ITEST_GTU,
++ ITEST_GEU,
++ ITEST_LTU,
++ ITEST_LEU,
++ ITEST_MAX
++};
++
++static enum internal_test map_test_to_internal_test (enum rtx_code);
++
++/* Cached operands, and operator to compare for use in set/branch/trap
++ on condition codes. */
++rtx branch_cmp[2];
++enum cmp_type branch_type;
++
++/* Make normal rtx_code into something we can index from an array. */
++
++static enum internal_test
++map_test_to_internal_test (enum rtx_code test_code)
++{
++ enum internal_test test = ITEST_MAX;
++
++ switch (test_code)
++ {
++ case EQ:
++ test = ITEST_EQ;
++ break;
++ case NE:
++ test = ITEST_NE;
++ break;
++ case GT:
++ test = ITEST_GT;
++ break;
++ case GE:
++ test = ITEST_GE;
++ break;
++ case LT:
++ test = ITEST_LT;
++ break;
++ case LE:
++ test = ITEST_LE;
++ break;
++ case GTU:
++ test = ITEST_GTU;
++ break;
++ case GEU:
++ test = ITEST_GEU;
++ break;
++ case LTU:
++ test = ITEST_LTU;
++ break;
++ case LEU:
++ test = ITEST_LEU;
++ break;
++ default:
++ break;
++ }
++
++ return test;
++}
++
++bool have_nios2_fpu_cmp_insn( enum rtx_code cond_t, enum cmp_type cmp_t );
++enum rtx_code get_reverse_cond(enum rtx_code cond_t);
++
++bool
++have_nios2_fpu_cmp_insn( enum rtx_code cond_t, enum cmp_type cmp_t )
++{
++ if (cmp_t == CMP_SF)
++ {
++ switch (cond_t)
++ {
++ case EQ:
++ return (nios2_fpu_insns[nios2_fpu_nios2_seqsf].N >= 0);
++ case NE:
++ return (nios2_fpu_insns[nios2_fpu_nios2_snesf].N >= 0);
++ case GT:
++ return (nios2_fpu_insns[nios2_fpu_nios2_sgtsf].N >= 0);
++ case GE:
++ return (nios2_fpu_insns[nios2_fpu_nios2_sgesf].N >= 0);
++ case LT:
++ return (nios2_fpu_insns[nios2_fpu_nios2_sltsf].N >= 0);
++ case LE:
++ return (nios2_fpu_insns[nios2_fpu_nios2_slesf].N >= 0);
++ default:
++ break;
++ }
++ }
++ else if (cmp_t == CMP_DF)
++ {
++ switch (cond_t)
++ {
++ case EQ:
++ return (nios2_fpu_insns[nios2_fpu_nios2_seqdf].N >= 0);
++ case NE:
++ return (nios2_fpu_insns[nios2_fpu_nios2_snedf].N >= 0);
++ case GT:
++ return (nios2_fpu_insns[nios2_fpu_nios2_sgtdf].N >= 0);
++ case GE:
++ return (nios2_fpu_insns[nios2_fpu_nios2_sgedf].N >= 0);
++ case LT:
++ return (nios2_fpu_insns[nios2_fpu_nios2_sltdf].N >= 0);
++ case LE:
++ return (nios2_fpu_insns[nios2_fpu_nios2_sledf].N >= 0);
++ default:
++ break;
++ }
++ }
++
++ return false;
++}
++
++/* Note that get_reverse_cond() is not the same as get_inverse_cond()
++ get_reverse_cond() means that if the operand order is reversed,
++ what is the operand that is needed to generate the same condition? */
++enum rtx_code
++get_reverse_cond(enum rtx_code cond_t)
++{
++ switch (cond_t)
++ {
++ case GT: return LT;
++ case GE: return LE;
++ case LT: return GT;
++ case LE: return GE;
++ case GTU: return LTU;
++ case GEU: return LEU;
++ case LTU: return GTU;
++ case LEU: return GEU;
++ default: break;
++ }
++
++ return cond_t;
++}
++
++
++/* Generate the code to compare (and possibly branch) two integer values
++ TEST_CODE is the comparison code we are trying to emulate
++ (or implement directly)
++ RESULT is where to store the result of the comparison,
++ or null to emit a branch
++ CMP0 CMP1 are the two comparison operands
++ DESTINATION is the destination of the branch, or null to only compare. */
++
++void
++gen_int_relational (enum rtx_code test_code, /* Relational test (EQ, etc). */
++ rtx result, /* Result to store comp. or 0 if branch. */
++ rtx cmp0, /* First operand to compare. */
++ rtx cmp1, /* Second operand to compare. */
++ rtx destination) /* Destination of the branch,
++ or 0 if compare. */
++{
++ struct cmp_info
++ {
++ /* For register (or 0) compares. */
++ enum rtx_code test_code_reg; /* Code to use in instruction (LT vs. LTU). */
++ int reverse_regs; /* Reverse registers in test. */
++
++ /* for immediate compares */
++ enum rtx_code test_code_const; /* Code to use in instruction (LT vs. LTU). */
++ int const_low; /* Low bound of constant we can accept. */
++ int const_high; /* High bound of constant we can accept. */
++ int const_add; /* Constant to add. */
++
++ /* generic info */
++ int unsignedp; /* != 0 for unsigned comparisons. */
++ };
++
++ static const struct cmp_info info[(int) ITEST_MAX] = {
++
++ {EQ, 0, EQ, -32768, 32767, 0, 0}, /* EQ */
++ {NE, 0, NE, -32768, 32767, 0, 0}, /* NE */
++
++ {LT, 1, GE, -32769, 32766, 1, 0}, /* GT */
++ {GE, 0, GE, -32768, 32767, 0, 0}, /* GE */
++ {LT, 0, LT, -32768, 32767, 0, 0}, /* LT */
++ {GE, 1, LT, -32769, 32766, 1, 0}, /* LE */
++
++ {LTU, 1, GEU, 0, 65534, 1, 0}, /* GTU */
++ {GEU, 0, GEU, 0, 65535, 0, 0}, /* GEU */
++ {LTU, 0, LTU, 0, 65535, 0, 0}, /* LTU */
++ {GEU, 1, LTU, 0, 65534, 1, 0}, /* LEU */
++ };
++
++ enum internal_test test;
++ enum machine_mode mode;
++ const struct cmp_info *p_info;
++ int branch_p;
++
++
++ test = map_test_to_internal_test (test_code);
++ gcc_assert (test != ITEST_MAX);
++
++ p_info = &info[(int) test];
++
++ mode = GET_MODE (cmp0);
++ if (mode == VOIDmode)
++ mode = GET_MODE (cmp1);
++
++ branch_p = (destination != 0);
++
++ /* Handle floating point comparison directly. */
++ if (branch_type == CMP_SF || branch_type == CMP_DF)
++ {
++
++ bool reverse_operands = false;
++
++ enum machine_mode float_mode = (branch_type == CMP_SF) ? SFmode : DFmode;
++
++ gcc_assert (register_operand (cmp0, float_mode) &&
++ register_operand (cmp1, float_mode));
++
++ if (branch_p)
++ {
++ test_code = p_info->test_code_reg;
++ reverse_operands = (p_info->reverse_regs);
++ }
++
++ if (!have_nios2_fpu_cmp_insn(test_code, branch_type) &&
++ have_nios2_fpu_cmp_insn(get_reverse_cond(test_code), branch_type) )
++ {
++ test_code = get_reverse_cond(test_code);
++ reverse_operands = !reverse_operands;
++ }
++
++ if (reverse_operands)
++ {
++ rtx temp = cmp0;
++ cmp0 = cmp1;
++ cmp1 = temp;
++ }
++
++ if (branch_p)
++ {
++ rtx cond = gen_rtx_fmt_ee (test_code, SImode, cmp0, cmp1);
++ rtx label = gen_rtx_LABEL_REF (VOIDmode, destination);
++ rtx insn = gen_rtx_SET (VOIDmode, pc_rtx,
++ gen_rtx_IF_THEN_ELSE (VOIDmode,
++ cond, label, pc_rtx));
++ emit_jump_insn (insn);
++ }
++ else
++ emit_move_insn (result, gen_rtx_fmt_ee (test_code, SImode, cmp0,
++ cmp1));
++ return;
++ }
++
++ /* We can't, under any circumstances, have const_ints in cmp0
++ ??? Actually we could have const0. */
++ if (GET_CODE (cmp0) == CONST_INT)
++ cmp0 = force_reg (mode, cmp0);
++
++ /* If the comparison is against an int not in legal range
++ move it into a register. */
++ if (GET_CODE (cmp1) == CONST_INT)
++ {
++ HOST_WIDE_INT value = INTVAL (cmp1);
++
++ if (value < p_info->const_low || value > p_info->const_high)
++ cmp1 = force_reg (mode, cmp1);
++ }
++
++ /* Comparison to constants, may involve adding 1 to change a GT into GE.
++ Comparison between two registers, may involve switching operands. */
++ if (GET_CODE (cmp1) == CONST_INT)
++ {
++ if (p_info->const_add != 0)
++ {
++ HOST_WIDE_INT new = INTVAL (cmp1) + p_info->const_add;
++
++ /* If modification of cmp1 caused overflow,
++ we would get the wrong answer if we follow the usual path;
++ thus, x > 0xffffffffU would turn into x > 0U. */
++ gcc_assert ((p_info->unsignedp
++ ? (unsigned HOST_WIDE_INT) new >
++ (unsigned HOST_WIDE_INT) INTVAL (cmp1)
++ : new > INTVAL (cmp1)) == (p_info->const_add > 0));
++
++ cmp1 = GEN_INT (new);
++ }
++ }
++
++ else if (p_info->reverse_regs)
++ {
++ rtx temp = cmp0;
++ cmp0 = cmp1;
++ cmp1 = temp;
++ }
++
++
++
++ if (branch_p)
++ {
++ if (register_operand (cmp0, mode) && register_operand (cmp1, mode))
++ {
++ rtx insn;
++ rtx cond = gen_rtx_fmt_ee (p_info->test_code_reg, mode, cmp0, cmp1);
++ rtx label = gen_rtx_LABEL_REF (VOIDmode, destination);
++
++ insn = gen_rtx_SET (VOIDmode, pc_rtx,
++ gen_rtx_IF_THEN_ELSE (VOIDmode,
++ cond, label, pc_rtx));
++ emit_jump_insn (insn);
++ }
++ else
++ {
++ rtx cond, label;
++
++ result = gen_reg_rtx (mode);
++
++ emit_move_insn (result,
++ gen_rtx_fmt_ee (p_info->test_code_const, mode, cmp0,
++ cmp1));
++
++ cond = gen_rtx_NE (mode, result, const0_rtx);
++ label = gen_rtx_LABEL_REF (VOIDmode, destination);
++
++ emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
++ gen_rtx_IF_THEN_ELSE (VOIDmode,
++ cond,
++ label, pc_rtx)));
++ }
++ }
++ else
++ {
++ if (register_operand (cmp0, mode) && register_operand (cmp1, mode))
++ emit_move_insn (result,
++ gen_rtx_fmt_ee (p_info->test_code_reg, mode, cmp0, cmp1));
++ else
++ emit_move_insn (result,
++ gen_rtx_fmt_ee (p_info->test_code_const, mode, cmp0,
++ cmp1));
++ }
++
++}
++
++
++/* ??? For now conditional moves are only supported
++ when the mode of the operands being compared are
++ the same as the ones being moved. */
++
++void
++gen_conditional_move (rtx *operands, enum machine_mode mode)
++{
++ rtx insn, cond;
++ rtx cmp_reg = gen_reg_rtx (mode);
++ enum rtx_code cmp_code = GET_CODE (operands[1]);
++ enum rtx_code move_code = EQ;
++
++ /* Emit a comparison if it is not "simple".
++ Simple comparisons are X eq 0 and X ne 0. */
++ if ((cmp_code == EQ || cmp_code == NE) && branch_cmp[1] == const0_rtx)
++ {
++ cmp_reg = branch_cmp[0];
++ move_code = cmp_code;
++ }
++ else if ((cmp_code == EQ || cmp_code == NE) && branch_cmp[0] == const0_rtx)
++ {
++ cmp_reg = branch_cmp[1];
++ move_code = cmp_code == EQ ? NE : EQ;
++ }
++ else
++ gen_int_relational (cmp_code, cmp_reg, branch_cmp[0], branch_cmp[1],
++ NULL_RTX);
++
++ cond = gen_rtx_fmt_ee (move_code, VOIDmode, cmp_reg, CONST0_RTX (mode));
++ insn = gen_rtx_SET (mode, operands[0],
++ gen_rtx_IF_THEN_ELSE (mode,
++ cond, operands[2], operands[3]));
++ emit_insn (insn);
++}
++
++/*******************
++ * Addressing Modes
++ *******************/
++
++int
++nios2_legitimate_constant (rtx x)
++{
++ switch (GET_CODE (x))
++ {
++ case SYMBOL_REF:
++ return !SYMBOL_REF_TLS_MODEL (x);
++ case CONST:
++ {
++ rtx op = XEXP (x, 0);
++ if (GET_CODE (op) != PLUS)
++ return false;
++ return nios2_legitimate_constant (XEXP (op, 0))
++ && nios2_legitimate_constant (XEXP (op, 1));
++ }
++ default:
++ return true;
++ }
++}
++
++int
++nios2_legitimate_address (rtx operand, enum machine_mode mode ATTRIBUTE_UNUSED,
++ int strict)
++{
++ int ret_val = 0;
++
++ switch (GET_CODE (operand))
++ {
++ /* direct. */
++ case SYMBOL_REF:
++ if (SYMBOL_REF_TLS_MODEL (operand))
++ break;
++
++ if (SYMBOL_REF_IN_NIOS2_SMALL_DATA_P (operand))
++ {
++ ret_val = 1;
++ break;
++ }
++ /* else, fall through */
++ case LABEL_REF:
++ case CONST_INT:
++ case CONST:
++ case CONST_DOUBLE:
++ /* ??? In here I need to add gp addressing */
++ ret_val = 0;
++
++ break;
++
++ /* Register indirect. */
++ case REG:
++ ret_val = REG_OK_FOR_BASE_P2 (operand, strict);
++ break;
++
++ /* Register indirect with displacement. */
++ case PLUS:
++ {
++ rtx op0 = XEXP (operand, 0);
++ rtx op1 = XEXP (operand, 1);
++
++ if (REG_P (op0) && REG_P (op1))
++ ret_val = 0;
++ else if (REG_P (op0) && GET_CODE (op1) == CONST_INT)
++ ret_val = REG_OK_FOR_BASE_P2 (op0, strict)
++ && SMALL_INT (INTVAL (op1));
++ else if (REG_P (op1) && GET_CODE (op0) == CONST_INT)
++ ret_val = REG_OK_FOR_BASE_P2 (op1, strict)
++ && SMALL_INT (INTVAL (op0));
++ else
++ ret_val = 0;
++ }
++ break;
++
++ default:
++ ret_val = 0;
++ break;
++ }
++
++ return ret_val;
++}
++
++/* Return true if EXP should be placed in the small data section. */
++
++static bool
++nios2_in_small_data_p (tree exp)
++{
++ /* We want to merge strings, so we never consider them small data. */
++ if (TREE_CODE (exp) == STRING_CST)
++ return false;
++
++ if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
++ {
++ const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
++ /* ??? these string names need moving into
++ an array in some header file */
++ if (nios2_section_threshold > 0
++ && (strcmp (section, ".sbss") == 0
++ || strncmp (section, ".sbss.", 6) == 0
++ || strcmp (section, ".sdata") == 0
++ || strncmp (section, ".sdata.", 7) == 0))
++ return true;
++ }
++ else if (TREE_CODE (exp) == VAR_DECL)
++ {
++ HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
++
++ /* If this is an incomplete type with size 0, then we can't put it
++ in sdata because it might be too big when completed. */
++ if (size > 0 && (unsigned HOST_WIDE_INT)size <= nios2_section_threshold)
++ return true;
++ }
++
++ return false;
++}
++
++static void
++nios2_encode_section_info (tree decl, rtx rtl, int first)
++{
++
++ rtx symbol;
++ int flags;
++
++ default_encode_section_info (decl, rtl, first);
++
++ /* Careful not to prod global register variables. */
++ if (GET_CODE (rtl) != MEM)
++ return;
++ symbol = XEXP (rtl, 0);
++ if (GET_CODE (symbol) != SYMBOL_REF)
++ return;
++
++ flags = SYMBOL_REF_FLAGS (symbol);
++
++ /* We don't want weak variables to be addressed with gp in case they end up
++ with value 0 which is not within 2^15 of $gp. */
++ if (DECL_P (decl) && DECL_WEAK (decl))
++ flags |= SYMBOL_FLAG_WEAK_DECL;
++
++ SYMBOL_REF_FLAGS (symbol) = flags;
++}
++
++
++static unsigned int
++nios2_section_type_flags (tree decl, const char *name, int reloc)
++{
++ unsigned int flags;
++
++ flags = default_section_type_flags (decl, name, reloc);
++
++ if (strcmp (name, ".sbss") == 0
++ || strncmp (name, ".sbss.", 6) == 0
++ || strcmp (name, ".sdata") == 0
++ || strncmp (name, ".sdata.", 7) == 0)
++ flags |= SECTION_SMALL;
++
++ return flags;
++}
++
++/* Handle a #pragma reverse_bitfields. */
++static void
++nios2_pragma_reverse_bitfields (struct cpp_reader *pfile ATTRIBUTE_UNUSED)
++{
++ nios2_pragma_reverse_bitfields_flag = 1; /* Reverse */
++}
++
++/* Handle a #pragma no_reverse_bitfields. */
++static void
++nios2_pragma_no_reverse_bitfields (struct cpp_reader *pfile ATTRIBUTE_UNUSED)
++{
++ nios2_pragma_reverse_bitfields_flag = -1; /* Forward */
++}
++
++/* Handle the various #pragma custom_<switch>s. */
++static void
++nios2_pragma_fpu (int *value, const char *opt, int *seen)
++{
++ tree t;
++ if (c_lex (&t) != CPP_NUMBER)
++ {
++ error ("`#pragma custom_%s' value must be a number between 0 and 255",
++ opt);
++ return;
++ }
++
++ if (TREE_INT_CST_HIGH (t) == 0
++ && TREE_INT_CST_LOW (t) <= 255)
++ {
++ *value = (int)TREE_INT_CST_LOW (t);
++ *seen = 1;
++ }
++ else
++ error ("`#pragma custom_%s' value must be between 0 and 255", opt);
++ nios2_custom_check_insns (1);
++}
++
++/* Handle the various #pragma no_custom_<switch>s. */
++static void
++nios2_pragma_no_fpu (int *value, const char *opt ATTRIBUTE_UNUSED)
++{
++ *value = -1;
++ nios2_custom_check_insns (1);
++}
++
++#undef NIOS2_FPU_INSN
++#define NIOS2_FPU_INSN(opt, insn, args) \
++static void \
++NIOS2_CONCAT (nios2_pragma_, insn) \
++ (struct cpp_reader *pfile ATTRIBUTE_UNUSED) \
++{ \
++ nios2_fpu_info *inf = &(nios2_fpu_insns[NIOS2_CONCAT (nios2_fpu_, insn)]); \
++ nios2_pragma_fpu (&(inf->N), inf->option, &(inf->pragma_seen)); \
++} \
++static void \
++NIOS2_CONCAT (nios2_pragma_no_, insn) \
++ (struct cpp_reader *pfile ATTRIBUTE_UNUSED) \
++{ \
++ nios2_fpu_info *inf = &(nios2_fpu_insns[NIOS2_CONCAT (nios2_fpu_, insn)]); \
++ nios2_pragma_no_fpu (&(inf->N), inf->option); \
++}
++NIOS2_FOR_ALL_FPU_INSNS
++
++static void
++nios2_pragma_handle_custom_fpu_cfg (struct cpp_reader *pfile ATTRIBUTE_UNUSED)
++{
++ tree t;
++ if (c_lex (&t) != CPP_STRING)
++ {
++ error ("`#pragma custom_fpu_cfg' value must be a string");
++ return;
++ }
++
++ if (TREE_STRING_LENGTH (t) > 0)
++ nios2_handle_custom_fpu_cfg (TREE_STRING_POINTER (t), 1);
++}
++
++void
++nios2_register_target_pragmas (void)
++{
++ int i;
++
++ c_register_pragma (0, "reverse_bitfields",
++ nios2_pragma_reverse_bitfields);
++ c_register_pragma (0, "no_reverse_bitfields",
++ nios2_pragma_no_reverse_bitfields);
++
++ for (i = 0; i < nios2_fpu_max_insn; i++)
++ {
++ nios2_fpu_info *inf = &(nios2_fpu_insns[i]);
++ c_register_pragma (0, inf->pname, inf->pragma);
++ c_register_pragma (0, inf->nopname, inf->nopragma);
++ }
++
++ c_register_pragma (0, "custom_fpu_cfg",
++ nios2_pragma_handle_custom_fpu_cfg);
++}
++
++/* Handle a "reverse_bitfields" or "no_reverse_bitfields" attribute.
++ ??? What do these attributes mean on a union? */
++static tree
++nios2_handle_struct_attribute (tree *node, tree name,
++ tree args ATTRIBUTE_UNUSED,
++ int flags ATTRIBUTE_UNUSED,
++ bool *no_add_attrs)
++{
++ tree *type = NULL;
++ if (DECL_P (*node))
++ {
++ if (TREE_CODE (*node) == TYPE_DECL)
++ type = &TREE_TYPE (*node);
++ }
++ else
++ type = node;
++
++ if (!(type && (TREE_CODE (*type) == RECORD_TYPE
++ || TREE_CODE (*type) == UNION_TYPE)))
++ {
++ warning (0, "`%s' attribute ignored", IDENTIFIER_POINTER (name));
++ *no_add_attrs = true;
++ }
++
++ else if ((is_attribute_p ("reverse_bitfields", name)
++ && lookup_attribute ("no_reverse_bitfields",
++ TYPE_ATTRIBUTES (*type)))
++ || ((is_attribute_p ("no_reverse_bitfields", name)
++ && lookup_attribute ("reverse_bitfields",
++ TYPE_ATTRIBUTES (*type)))))
++ {
++ warning (0, "`%s' incompatible attribute ignored",
++ IDENTIFIER_POINTER (name));
++ *no_add_attrs = true;
++ }
++
++ return NULL_TREE;
++}
++
++/* Add __attribute__ ((pragma_reverse_bitfields)) when in the scope of a
++ #pragma reverse_bitfields, or __attribute__
++ ((pragma_no_reverse_bitfields)) when in the scope of a #pragma
++ no_reverse_bitfields. This gets called before
++ nios2_handle_struct_attribute above, so we can't just reuse the same
++ attributes. */
++static void
++nios2_insert_attributes (tree node, tree *attr_ptr)
++{
++ tree type = NULL;
++ if (DECL_P (node))
++ {
++ if (TREE_CODE (node) == TYPE_DECL)
++ type = TREE_TYPE (node);
++ }
++ else
++ type = node;
++
++ if (!type
++ || (TREE_CODE (type) != RECORD_TYPE
++ && TREE_CODE (type) != UNION_TYPE))
++ {
++ /* We can ignore things other than structs & unions. */
++ return;
++ }
++
++ if (lookup_attribute ("reverse_bitfields", TYPE_ATTRIBUTES (type))
++ || lookup_attribute ("no_reverse_bitfields", TYPE_ATTRIBUTES (type)))
++ {
++ /* If an attribute is already set, it silently overrides the
++ current #pragma, if any. */
++ return;
++ }
++
++ if (nios2_pragma_reverse_bitfields_flag)
++ {
++ const char *id = (nios2_pragma_reverse_bitfields_flag == 1 ?
++ "pragma_reverse_bitfields" :
++ "pragma_no_reverse_bitfields");
++ /* No attribute set, and we are in the scope of a #pragma. */
++ *attr_ptr = tree_cons (get_identifier (id), NULL, *attr_ptr);
++ }
++}
++
++/*****************************************
++ * Position Independent Code
++ *****************************************/
++
++/* Emit code to load the PIC register. */
++
++static void
++nios2_load_pic_register (void)
++{
++ rtx tmp = gen_rtx_REG (Pmode, TEMP_REG_NUM);
++
++ emit_insn (gen_load_got_register (pic_offset_table_rtx, tmp));
++ emit_insn (gen_add3_insn (pic_offset_table_rtx, pic_offset_table_rtx, tmp));
++}
++
++/* Nonzero if the constant value X is a legitimate general operand
++ when generating PIC code. It is given that flag_pic is on and
++ that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
++
++bool
++nios2_legitimate_pic_operand_p (rtx x)
++{
++ rtx inner;
++
++ /* UNSPEC_TLS is always PIC. */
++ if (tls_mentioned_p (x))
++ return true;
++
++ if (GET_CODE (x) == SYMBOL_REF)
++ return false;
++ if (GET_CODE (x) == LABEL_REF)
++ return false;
++ if (GET_CODE (x) == CONST)
++ {
++ inner = XEXP (x, 0);
++ if (GET_CODE (inner) == PLUS &&
++ GET_CODE (XEXP (inner, 0)) == SYMBOL_REF &&
++ GET_CODE (XEXP (inner, 1)) == CONST)
++ return false;
++ }
++ return true;
++}
++
++rtx
++nios2_legitimize_pic_address (rtx orig,
++ enum machine_mode mode ATTRIBUTE_UNUSED, rtx reg)
++{
++ if (GET_CODE (orig) == SYMBOL_REF
++ || GET_CODE (orig) == LABEL_REF)
++ {
++ if (reg == 0)
++ {
++ gcc_assert (!no_new_pseudos);
++ reg = gen_reg_rtx (Pmode);
++ }
++
++ emit_insn (gen_pic_load_addr (reg, pic_offset_table_rtx, orig));
++
++ current_function_uses_pic_offset_table = 1;
++
++ return reg;
++ }
++ else if (GET_CODE (orig) == CONST)
++ {
++ rtx base, offset;
++
++ if (GET_CODE (XEXP (orig, 0)) == PLUS
++ && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
++ return orig;
++
++ if (GET_CODE (XEXP (orig, 0)) == UNSPEC
++ && IS_UNSPEC_TLS (XINT (XEXP (orig, 0), 1)))
++ return orig;
++
++ if (reg == 0)
++ {
++ gcc_assert (!no_new_pseudos);
++ reg = gen_reg_rtx (Pmode);
++ }
++
++ gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
++
++ base = nios2_legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode,
++ reg);
++ offset = nios2_legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
++ base == reg ? 0 : reg);
++
++ if (GET_CODE (offset) == CONST_INT)
++ return plus_constant (base, INTVAL (offset));
++
++ return gen_rtx_PLUS (Pmode, base, offset);
++ }
++
++ return orig;
++}
++
++/* Test for various thread-local symbols. */
++
++/* Return TRUE if X is a thread-local symbol. */
++
++static bool
++nios2_tls_symbol_p (rtx x)
++{
++ if (! TARGET_HAVE_TLS)
++ return false;
++
++ if (GET_CODE (x) != SYMBOL_REF)
++ return false;
++
++ return SYMBOL_REF_TLS_MODEL (x) != 0;
++}
++
++rtx
++nios2_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
++{
++ if (nios2_tls_symbol_p (x))
++ return nios2_legitimize_tls_address (x);
++
++ if (flag_pic)
++ {
++ /* We need to find and carefully transform any SYMBOL and LABEL
++ references; so go back to the original address expression. */
++ rtx new_x = nios2_legitimize_pic_address (orig_x, mode, NULL_RTX);
++
++ if (new_x != orig_x)
++ x = new_x;
++ }
++
++ return x;
++}
++
++/*****************************************
++ * Defining the Output Assembler Language
++ *****************************************/
++
++/* -------------- *
++ * Output of Data
++ * -------------- */
++
++
++/* -------------------------------- *
++ * Output of Assembler Instructions
++ * -------------------------------- */
++
++
++/* print the operand OP to file stream
++ FILE modified by LETTER. LETTER
++ can be one of:
++ i: print "i" if OP is an immediate, except 0
++ o: print "io" if OP is volatile
++
++ z: for const0_rtx print $0 instead of 0
++ H: for %hiadj
++ L: for %lo
++ U: for upper half of 32 bit value
++ D: for the upper 32-bits of a 64-bit double value
++ */
++
++void
++nios2_print_operand (FILE *file, rtx op, int letter)
++{
++
++ switch (letter)
++ {
++ case 'i':
++ if (CONSTANT_P (op) && (op != const0_rtx))
++ fprintf (file, "i");
++ return;
++
++ case 'o':
++ if (GET_CODE (op) == MEM && TARGET_BYPASS_CACHE)
++ fprintf (file, "io");
++ return;
++
++ default:
++ break;
++ }
++
++ if (comparison_operator (op, VOIDmode))
++ {
++ if (letter == 0)
++ {
++ fprintf (file, "%s", GET_RTX_NAME (GET_CODE (op)));
++ return;
++ }
++ }
++
++
++ switch (GET_CODE (op))
++ {
++ case REG:
++ if (letter == 0 || letter == 'z')
++ {
++ fprintf (file, "%s", reg_names[REGNO (op)]);
++ return;
++ }
++ else if (letter == 'D')
++ {
++ fprintf (file, "%s", reg_names[REGNO (op)+1]);
++ return;
++ }
++ break;
++
++ case CONST_INT:
++ if (INTVAL (op) == 0 && letter == 'z')
++ {
++ fprintf (file, "zero");
++ return;
++ }
++ else if (letter == 'U')
++ {
++ HOST_WIDE_INT val = INTVAL (op);
++ rtx new_op;
++ val = (val / 65536) & 0xFFFF;
++ new_op = GEN_INT (val);
++ output_addr_const (file, new_op);
++ return;
++ }
++
++ /* else, fall through. */
++ case CONST:
++ case LABEL_REF:
++ case SYMBOL_REF:
++ case CONST_DOUBLE:
++ if (letter == 0 || letter == 'z')
++ {
++ output_addr_const (file, op);
++ return;
++ }
++ else if (letter == 'H')
++ {
++ fprintf (file, "%%hiadj(");
++ output_addr_const (file, op);
++ fprintf (file, ")");
++ return;
++ }
++ else if (letter == 'L')
++ {
++ fprintf (file, "%%lo(");
++ output_addr_const (file, op);
++ fprintf (file, ")");
++ return;
++ }
++ break;
++
++
++ case SUBREG:
++ case MEM:
++ if (letter == 0)
++ {
++ output_address (op);
++ return;
++ }
++ break;
++
++ case CODE_LABEL:
++ if (letter == 0)
++ {
++ output_addr_const (file, op);
++ return;
++ }
++ break;
++
++ default:
++ break;
++ }
++
++ fprintf (stderr, "Missing way to print (%c) ", letter);
++ debug_rtx (op);
++ gcc_unreachable ();
++}
++
++static int gprel_constant (rtx);
++
++static int
++gprel_constant (rtx op)
++{
++ if (GET_CODE (op) == SYMBOL_REF
++ && SYMBOL_REF_IN_NIOS2_SMALL_DATA_P (op))
++ return 1;
++ else if (GET_CODE (op) == CONST
++ && GET_CODE (XEXP (op, 0)) == PLUS)
++ return gprel_constant (XEXP (XEXP (op, 0), 0));
++ else
++ return 0;
++}
++
++void
++nios2_print_operand_address (FILE *file, rtx op)
++{
++ switch (GET_CODE (op))
++ {
++ case CONST:
++ case CONST_INT:
++ case LABEL_REF:
++ case CONST_DOUBLE:
++ case SYMBOL_REF:
++ if (gprel_constant (op))
++ {
++ fprintf (file, "%%gprel(");
++ output_addr_const (file, op);
++ fprintf (file, ")(%s)", reg_names[GP_REGNO]);
++ return;
++ }
++
++ break;
++
++ case PLUS:
++ {
++ rtx op0 = XEXP (op, 0);
++ rtx op1 = XEXP (op, 1);
++
++ if (REG_P (op0) && CONSTANT_P (op1))
++ {
++ output_addr_const (file, op1);
++ fprintf (file, "(%s)", reg_names[REGNO (op0)]);
++ return;
++ }
++ else if (REG_P (op1) && CONSTANT_P (op0))
++ {
++ output_addr_const (file, op0);
++ fprintf (file, "(%s)", reg_names[REGNO (op1)]);
++ return;
++ }
++ }
++ break;
++
++ case REG:
++ fprintf (file, "0(%s)", reg_names[REGNO (op)]);
++ return;
++
++ case MEM:
++ {
++ rtx base = XEXP (op, 0);
++ PRINT_OPERAND_ADDRESS (file, base);
++ return;
++ }
++ default:
++ break;
++ }
++
++ fprintf (stderr, "Missing way to print address\n");
++ debug_rtx (op);
++ gcc_unreachable ();
++}
++
++
++
++/****************************
++ * Debug information
++ ****************************/
++
++static void
++nios2_output_dwarf_dtprel (FILE *file, int size, rtx x)
++{
++ gcc_assert (size == 4);
++ fputs ("\t.4byte\t%tls_ldo(", file);
++ output_addr_const (file, x);
++ fputs (")", file);
++}
++
++
++/****************************
++ * Predicates
++ ****************************/
++
++/* An operand to a call or sibcall. This must be an immediate operand
++ or a register. */
++int
++call_operand (rtx x, enum machine_mode mode)
++{
++ return (immediate_operand (x, mode)
++ || register_operand (x, mode));
++}
++
++int
++arith_operand (rtx op, enum machine_mode mode)
++{
++ if (GET_CODE (op) == CONST_INT && SMALL_INT (INTVAL (op)))
++ return 1;
++
++ return register_operand (op, mode);
++}
++
++int
++uns_arith_operand (rtx op, enum machine_mode mode)
++{
++ if (GET_CODE (op) == CONST_INT && SMALL_INT_UNSIGNED (INTVAL (op)))
++ return 1;
++
++ return register_operand (op, mode);
++}
++
++int
++logical_operand (rtx op, enum machine_mode mode)
++{
++ if (GET_CODE (op) == CONST_INT
++ && (SMALL_INT_UNSIGNED (INTVAL (op)) || UPPER16_INT (INTVAL (op))))
++ return 1;
++
++ return register_operand (op, mode);
++}
++
++int
++shift_operand (rtx op, enum machine_mode mode)
++{
++ if (GET_CODE (op) == CONST_INT && SHIFT_INT (INTVAL (op)))
++ return 1;
++
++ return register_operand (op, mode);
++}
++
++int
++rdwrctl_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
++{
++ return GET_CODE (op) == CONST_INT && RDWRCTL_INT (INTVAL (op));
++}
++
++/* Return truth value of whether OP is a register or the constant 0. */
++
++int
++reg_or_0_operand (rtx op, enum machine_mode mode)
++{
++ switch (GET_CODE (op))
++ {
++ case CONST_INT:
++ return INTVAL (op) == 0;
++
++ case CONST_DOUBLE:
++ return op == CONST0_RTX (mode);
++
++ default:
++ break;
++ }
++
++ return register_operand (op, mode);
++}
++
++
++int
++equality_op (rtx op, enum machine_mode mode)
++{
++ if (mode != GET_MODE (op))
++ return 0;
++
++ return GET_CODE (op) == EQ || GET_CODE (op) == NE;
++}
++
++int
++custom_insn_opcode (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
++{
++ return GET_CODE (op) == CONST_INT && CUSTOM_INSN_OPCODE (INTVAL (op));
++}
++
++
++
++
++/*****************************************************************************
++**
++** custom fpu instruction output
++**
++*****************************************************************************/
++
++static const char *nios2_custom_fpu_insn_zdz (rtx, int, const char *);
++static const char *nios2_custom_fpu_insn_zsz (rtx, int, const char *);
++static const char *nios2_custom_fpu_insn_szz (rtx, int, const char *);
++static const char *nios2_custom_fpu_insn_sss (rtx, int, const char *);
++static const char *nios2_custom_fpu_insn_ssz (rtx, int, const char *);
++static const char *nios2_custom_fpu_insn_iss (rtx, int, const char *);
++static const char *nios2_custom_fpu_insn_ddd (rtx, int, const char *);
++static const char *nios2_custom_fpu_insn_ddz (rtx, int, const char *);
++static const char *nios2_custom_fpu_insn_idd (rtx, int, const char *);
++static const char *nios2_custom_fpu_insn_siz (rtx, int, const char *);
++static const char *nios2_custom_fpu_insn_suz (rtx, int, const char *);
++static const char *nios2_custom_fpu_insn_diz (rtx, int, const char *);
++static const char *nios2_custom_fpu_insn_duz (rtx, int, const char *);
++static const char *nios2_custom_fpu_insn_isz (rtx, int, const char *);
++static const char *nios2_custom_fpu_insn_usz (rtx, int, const char *);
++static const char *nios2_custom_fpu_insn_idz (rtx, int, const char *);
++static const char *nios2_custom_fpu_insn_udz (rtx, int, const char *);
++static const char *nios2_custom_fpu_insn_dsz (rtx, int, const char *);
++static const char *nios2_custom_fpu_insn_sdz (rtx, int, const char *);
++
++static const char *
++nios2_custom_fpu_insn_zdz (rtx insn, int N, const char *opt)
++{
++ static char buf[1024];
++
++ if (N < 0)
++ fatal_insn ("attempt to use disabled fpu instruction", insn);
++ if (snprintf (buf, sizeof (buf),
++ "custom\t%d, zero, %%0, %%D0 # %s %%0",
++ N, opt) >= (int)sizeof (buf))
++ fatal_insn ("buffer overflow", insn);
++ return buf;
++}
++
++static const char *
++nios2_custom_fpu_insn_zsz (rtx insn, int N, const char *opt)
++{
++ static char buf[1024];
++
++ if (N < 0)
++ fatal_insn ("attempt to use disabled fpu instruction", insn);
++ if (snprintf (buf, sizeof (buf),
++ "custom\t%d, zero, %%0, zero # %s %%0",
++ N, opt) >= (int)sizeof (buf))
++ fatal_insn ("buffer overflow", insn);
++ return buf;
++}
++
++static const char *
++nios2_custom_fpu_insn_szz (rtx insn, int N, const char *opt)
++{
++ static char buf[1024];
++
++ if (N < 0)
++ fatal_insn ("attempt to use disabled fpu instruction", insn);
++ if (snprintf (buf, sizeof (buf),
++ "custom\t%d, %%0, zero, zero # %s %%0",
++ N, opt) >= (int)sizeof (buf))
++ fatal_insn ("buffer overflow", insn);
++ return buf;
++}
++
++static const char *
++nios2_custom_fpu_insn_sss (rtx insn, int N, const char *opt)
++{
++ static char buf[1024];
++
++ if (N < 0)
++ fatal_insn ("attempt to use disabled fpu instruction", insn);
++ if (snprintf (buf, sizeof (buf),
++ "custom\t%d, %%0, %%1, %%2 # %s %%0, %%1, %%2",
++ N, opt) >= (int)sizeof (buf))
++ fatal_insn ("buffer overflow", insn);
++ return buf;
++}
++
++static const char *
++nios2_custom_fpu_insn_ssz (rtx insn, int N, const char *opt)
++{
++ static char buf[1024];
++
++ if (N < 0)
++ fatal_insn ("attempt to use disabled fpu instruction", insn);
++ if (snprintf (buf, sizeof (buf),
++ "custom\t%d, %%0, %%1, zero # %s %%0, %%1",
++ N, opt) >= (int)sizeof (buf))
++ fatal_insn ("buffer overflow", insn);
++ return buf;
++}
++
++static const char *
++nios2_custom_fpu_insn_iss (rtx insn, int N, const char *opt)
++{
++ return nios2_custom_fpu_insn_sss (insn, N, opt);
++}
++
++static const char *
++nios2_custom_fpu_insn_ddd (rtx insn, int N, const char *opt)
++{
++ static char buf[1024];
++
++ if (N < 0
++ || nios2_fpu_insns[nios2_fpu_nios2_frdy].N < 0
++ || nios2_fpu_insns[nios2_fpu_nios2_fwrx].N < 0)
++ fatal_insn ("attempt to use disabled fpu instruction", insn);
++ if (snprintf (buf, sizeof (buf),
++ "custom\t%d, zero, %%1, %%D1 # fwrx %%1\n\t"
++ "custom\t%d, %%D0, %%2, %%D2 # %s %%0, %%1, %%2\n\t"
++ "custom\t%d, %%0, zero, zero # frdy %%0",
++ nios2_fpu_insns[nios2_fpu_nios2_fwrx].N,
++ N, opt,
++ nios2_fpu_insns[nios2_fpu_nios2_frdy].N) >= (int)sizeof (buf))
++ fatal_insn ("buffer overflow", insn);
++ return buf;
++}
++
++static const char *
++nios2_custom_fpu_insn_ddz (rtx insn, int N, const char *opt)
++{
++ static char buf[1024];
++
++ if (N < 0 || nios2_fpu_insns[nios2_fpu_nios2_frdy].N < 0)
++ fatal_insn ("attempt to use disabled fpu instruction", insn);
++ if (snprintf (buf, sizeof (buf),
++ "custom\t%d, %%D0, %%1, %%D1 # %s %%0, %%1\n\t"
++ "custom\t%d, %%0, zero, zero # frdy %%0",
++ N, opt,
++ nios2_fpu_insns[nios2_fpu_nios2_frdy].N) >= (int)sizeof (buf))
++ fatal_insn ("buffer overflow", insn);
++ return buf;
++}
++
++static const char *
++nios2_custom_fpu_insn_idd (rtx insn, int N, const char *opt)
++{
++ static char buf[1024];
++
++ if (N < 0 || nios2_fpu_insns[nios2_fpu_nios2_fwrx].N < 0)
++ fatal_insn ("attempt to use disabled fpu instruction", insn);
++ if (snprintf (buf, sizeof (buf),
++ "custom\t%d, zero, %%1, %%D1 # fwrx %%1\n\t"
++ "custom\t%d, %%0, %%2, %%D2 # %s %%0, %%1, %%2",
++ nios2_fpu_insns[nios2_fpu_nios2_fwrx].N,
++ N, opt) >= (int)sizeof (buf))
++ fatal_insn ("buffer overflow", insn);
++ return buf;
++}
++
++static const char *
++nios2_custom_fpu_insn_siz (rtx insn, int N, const char *opt)
++{
++ return nios2_custom_fpu_insn_ssz (insn, N, opt);
++}
++
++static const char *
++nios2_custom_fpu_insn_suz (rtx insn, int N, const char *opt)
++{
++ return nios2_custom_fpu_insn_ssz (insn, N, opt);
++}
++
++static const char *
++nios2_custom_fpu_insn_diz (rtx insn, int N, const char *opt)
++{
++ return nios2_custom_fpu_insn_dsz (insn, N, opt);
++}
++
++static const char *
++nios2_custom_fpu_insn_duz (rtx insn, int N, const char *opt)
++{
++ return nios2_custom_fpu_insn_dsz (insn, N, opt);
++}
++
++static const char *
++nios2_custom_fpu_insn_isz (rtx insn, int N, const char *opt)
++{
++ return nios2_custom_fpu_insn_ssz (insn, N, opt);
++}
++
++static const char *
++nios2_custom_fpu_insn_usz (rtx insn, int N, const char *opt)
++{
++ return nios2_custom_fpu_insn_ssz (insn, N, opt);
++}
++
++static const char *
++nios2_custom_fpu_insn_idz (rtx insn, int N, const char *opt)
++{
++ return nios2_custom_fpu_insn_sdz (insn, N, opt);
++}
++
++static const char *
++nios2_custom_fpu_insn_udz (rtx insn, int N, const char *opt)
++{
++ return nios2_custom_fpu_insn_sdz (insn, N, opt);
++}
++
++static const char *
++nios2_custom_fpu_insn_dsz (rtx insn, int N, const char *opt)
++{
++ static char buf[1024];
++
++ if (N < 0 || nios2_fpu_insns[nios2_fpu_nios2_frdy].N < 0)
++ fatal_insn ("attempt to use disabled fpu instruction", insn);
++ if (snprintf (buf, sizeof (buf),
++ "custom\t%d, %%D0, %%1, zero # %s %%0, %%1\n\t"
++ "custom\t%d, %%0, zero, zero # frdy %%0",
++ N, opt,
++ nios2_fpu_insns[nios2_fpu_nios2_frdy].N) >= (int)sizeof (buf))
++ fatal_insn ("buffer overflow", insn);
++ return buf;
++}
++
++static const char *
++nios2_custom_fpu_insn_sdz (rtx insn, int N, const char *opt)
++{
++ static char buf[1024];
++
++ if (N < 0)
++ fatal_insn ("attempt to use disabled fpu instruction", insn);
++ if (snprintf (buf, sizeof (buf),
++ "custom\t%d, %%0, %%1, %%D1 # %s %%0, %%1",
++ N, opt) >= (int)sizeof (buf))
++ fatal_insn ("buffer overflow", insn);
++ return buf;
++}
++
++#undef NIOS2_FPU_INSN
++#define NIOS2_FPU_INSN(opt, insn, args) \
++static const char * \
++NIOS2_CONCAT (nios2_output_fpu_insn_, insn) (rtx i) \
++{ \
++ return NIOS2_CONCAT (nios2_custom_fpu_insn_, args) \
++ (i, \
++ nios2_fpu_insns[NIOS2_CONCAT (nios2_fpu_, insn)].N, \
++ nios2_fpu_insns[NIOS2_CONCAT (nios2_fpu_, insn)].option); \
++}
++NIOS2_FOR_ALL_FPU_INSNS
++
++
++
++const char *
++nios2_output_fpu_insn_cmps (rtx insn, enum rtx_code cond)
++{
++ static char buf[1024];
++ int N;
++ const char *opt;
++
++ int operandL = 2;
++ int operandR = 3;
++
++ if (!have_nios2_fpu_cmp_insn(cond, CMP_SF) &&
++ have_nios2_fpu_cmp_insn(get_reverse_cond(cond), CMP_SF) ) {
++
++ int temp = operandL;
++ operandL = operandR;
++ operandR = temp;
++
++ cond = get_reverse_cond(cond);
++ }
++
++ switch (cond)
++ {
++ case EQ:
++ N = nios2_fpu_insns[nios2_fpu_nios2_seqsf].N;
++ opt = "fcmpeqs";
++ break;
++ case NE:
++ N = nios2_fpu_insns[nios2_fpu_nios2_snesf].N;
++ opt = "fcmpnes";
++ break;
++ case GT:
++ N = nios2_fpu_insns[nios2_fpu_nios2_sgtsf].N;
++ opt = "fcmpgts";
++ break;
++ case GE:
++ N = nios2_fpu_insns[nios2_fpu_nios2_sgesf].N;
++ opt = "fcmpges";
++ break;
++ case LT:
++ N = nios2_fpu_insns[nios2_fpu_nios2_sltsf].N;
++ opt = "fcmplts";
++ break;
++ case LE:
++ N = nios2_fpu_insns[nios2_fpu_nios2_slesf].N;
++ opt = "fcmples"; break;
++ default:
++ fatal_insn ("bad single compare", insn);
++ }
++
++ if (N < 0)
++ fatal_insn ("attempt to use disabled fpu instruction", insn);
++
++ /* ??? This raises the whole vexing issue of how to handle
++ out-of-range branches. Punt for now, seeing as how nios2-elf-as
++ doesn't even _try_ to handle out-of-range branches yet! */
++ if (snprintf (buf, sizeof (buf),
++ ".set\tnoat\n\t"
++ "custom\t%d, at, %%%d, %%%d # %s at, %%%d, %%%d\n\t"
++ "bne\tat, zero, %%l1\n\t"
++ ".set\tat",
++ N, operandL, operandR, opt, operandL, operandR
++ ) >= (int)sizeof (buf))
++ fatal_insn ("buffer overflow", insn);
++ return buf;
++}
++
++const char *
++nios2_output_fpu_insn_cmpd (rtx insn, enum rtx_code cond)
++{
++ static char buf[1024];
++ int N;
++ const char *opt;
++
++ int operandL = 2;
++ int operandR = 3;
++
++ if ( !have_nios2_fpu_cmp_insn(cond, CMP_DF) &&
++ have_nios2_fpu_cmp_insn(get_reverse_cond(cond), CMP_DF) )
++ {
++
++ int temp = operandL;
++ operandL = operandR;
++ operandR = temp;
++
++ cond = get_reverse_cond(cond);
++ }
++
++ switch (cond)
++ {
++ case EQ:
++ N = nios2_fpu_insns[nios2_fpu_nios2_seqdf].N;
++ opt = "fcmpeqd";
++ break;
++ case NE:
++ N = nios2_fpu_insns[nios2_fpu_nios2_snedf].N;
++ opt = "fcmpned";
++ break;
++ case GT:
++ N = nios2_fpu_insns[nios2_fpu_nios2_sgtdf].N;
++ opt = "fcmpgtd";
++ break;
++ case GE:
++ N = nios2_fpu_insns[nios2_fpu_nios2_sgedf].N;
++ opt = "fcmpged";
++ break;
++ case LT:
++ N = nios2_fpu_insns[nios2_fpu_nios2_sltdf].N;
++ opt = "fcmpltd";
++ break;
++ case LE:
++ N = nios2_fpu_insns[nios2_fpu_nios2_sledf].N;
++ opt = "fcmpled";
++ break;
++ default:
++ fatal_insn ("bad double compare", insn);
++ }
++
++ if (N < 0 || nios2_fpu_insns[nios2_fpu_nios2_fwrx].N < 0)
++ fatal_insn ("attempt to use disabled fpu instruction", insn);
++ if (snprintf (buf, sizeof (buf),
++ ".set\tnoat\n\t"
++ "custom\t%d, zero, %%%d, %%D%d # fwrx %%%d\n\t"
++ "custom\t%d, at, %%%d, %%D%d # %s at, %%%d, %%%d\n\t"
++ "bne\tat, zero, %%l1\n\t"
++ ".set\tat",
++ nios2_fpu_insns[nios2_fpu_nios2_fwrx].N, operandL, operandL,
++ operandL, N, operandR, operandR, opt, operandL, operandR
++ ) >= (int)sizeof (buf))
++ fatal_insn ("buffer overflow", insn);
++ return buf;
++}
++
++
++
++
++/*****************************************************************************
++**
++** Instruction scheduler
++**
++*****************************************************************************/
++static int
++nios2_issue_rate (void)
++{
++#ifdef MAX_DFA_ISSUE_RATE
++ return MAX_DFA_ISSUE_RATE;
++#else
++ return 1;
++#endif
++}
++
++
++const char *
++asm_output_opcode (FILE *file ATTRIBUTE_UNUSED,
++ const char *ptr ATTRIBUTE_UNUSED)
++{
++ const char *p;
++
++ p = ptr;
++ return ptr;
++}
++
++
++
++/*****************************************************************************
++**
++** Function arguments
++**
++*****************************************************************************/
++
++void
++init_cumulative_args (CUMULATIVE_ARGS *cum,
++ tree fntype ATTRIBUTE_UNUSED,
++ rtx libname ATTRIBUTE_UNUSED,
++ tree fndecl ATTRIBUTE_UNUSED,
++ int n_named_args ATTRIBUTE_UNUSED)
++{
++ cum->regs_used = 0;
++}
++
++
++/* Define where to put the arguments to a function. Value is zero to
++ push the argument on the stack, or a hard register in which to
++ store the argument.
++
++ MODE is the argument's machine mode.
++ TYPE is the data type of the argument (as a tree).
++ This is null for libcalls where that information may
++ not be available.
++ CUM is a variable of type CUMULATIVE_ARGS which gives info about
++ the preceding args and about the function being called.
++ NAMED is nonzero if this argument is a named parameter
++ (otherwise it is an extra parameter matching an ellipsis). */
++rtx
++function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
++ tree type ATTRIBUTE_UNUSED, int named ATTRIBUTE_UNUSED)
++{
++ rtx return_rtx = NULL_RTX;
++
++ if (cum->regs_used < NUM_ARG_REGS)
++ return_rtx = gen_rtx_REG (mode, FIRST_ARG_REGNO + cum->regs_used);
++
++ return return_rtx;
++}
++
++/* Return number of bytes, at the beginning of the argument, that must be
++ put in registers. 0 is the argument is entirely in registers or entirely
++ in memory. */
++
++static int
++nios2_arg_partial_bytes (CUMULATIVE_ARGS *cum,
++ enum machine_mode mode, tree type,
++ bool named ATTRIBUTE_UNUSED)
++{
++ HOST_WIDE_INT param_size;
++
++ if (mode == BLKmode)
++ {
++ param_size = int_size_in_bytes (type);
++ if (param_size < 0)
++ internal_error
++ ("Do not know how to handle large structs or variable length types");
++ }
++ else
++ param_size = GET_MODE_SIZE (mode);
++
++ /* Convert to words (round up). */
++ param_size = (3 + param_size) / 4;
++
++ if (cum->regs_used < NUM_ARG_REGS
++ && cum->regs_used + param_size > NUM_ARG_REGS)
++ return (NUM_ARG_REGS - cum->regs_used) * UNITS_PER_WORD;
++ else
++ return 0;
++}
++
++static bool
++nios2_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ tree type ATTRIBUTE_UNUSED,
++ bool named ATTRIBUTE_UNUSED)
++{
++ return false;
++}
++
++
++/* Update the data in CUM to advance over an argument
++ of mode MODE and data type TYPE.
++ (TYPE is null for libcalls where that information may not be available.) */
++
++void
++function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
++ tree type ATTRIBUTE_UNUSED, int named ATTRIBUTE_UNUSED)
++{
++ HOST_WIDE_INT param_size;
++
++ if (mode == BLKmode)
++ {
++ param_size = int_size_in_bytes (type);
++ if (param_size < 0)
++ internal_error
++ ("Do not know how to handle large structs or variable length types");
++ }
++ else
++ param_size = GET_MODE_SIZE (mode);
++
++ /* Convert to words (round up). */
++ param_size = (3 + param_size) / 4;
++
++ if (cum->regs_used + param_size > NUM_ARG_REGS)
++ cum->regs_used = NUM_ARG_REGS;
++ else
++ cum->regs_used += param_size;
++
++ return;
++}
++
++int
++nios2_function_arg_padding_upward (enum machine_mode mode, tree type)
++{
++ /* On little-endian targets, the first byte of every stack argument
++ is passed in the first byte of the stack slot. */
++ if (!BYTES_BIG_ENDIAN)
++ return 1;
++
++ /* Otherwise, integral types are padded downward: the last byte of a
++ stack argument is passed in the last byte of the stack slot. */
++ if (type != 0
++ ? INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type)
++ : GET_MODE_CLASS (mode) == MODE_INT)
++ return 0;
++
++ /* Arguments smaller than a stack slot are padded downward. */
++ if (mode != BLKmode)
++ return (GET_MODE_BITSIZE (mode) >= PARM_BOUNDARY) ? 1 : 0;
++ else
++ return ((int_size_in_bytes (type) >= (PARM_BOUNDARY / BITS_PER_UNIT))
++ ? 1 : 0);
++}
++
++int
++nios2_block_reg_padding_upward (enum machine_mode mode, tree type,
++ int first ATTRIBUTE_UNUSED)
++{
++ /* ??? Do we need to treat floating point specially, ala MIPS? */
++ return nios2_function_arg_padding_upward (mode, type);
++}
++
++int
++nios2_return_in_memory (tree type)
++{
++ int res = ((int_size_in_bytes (type) > (2 * UNITS_PER_WORD))
++ || (int_size_in_bytes (type) == -1));
++
++ return res;
++}
++
++/* ??? It may be possible to eliminate the copyback and implement
++ my own va_arg type, but that is more work for now. */
++void
++nios2_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
++ enum machine_mode mode, tree type,
++ int *pretend_size, int second_time)
++{
++ CUMULATIVE_ARGS local_cum;
++ int regs_to_push;
++ int pret_size;
++
++ local_cum = *cum;
++ FUNCTION_ARG_ADVANCE (local_cum, mode, type, 1);
++
++ regs_to_push = NUM_ARG_REGS - local_cum.regs_used;
++
++ if (!second_time)
++ {
++ if (regs_to_push > 0)
++ {
++ rtx ptr, mem;
++
++ ptr = virtual_incoming_args_rtx;
++ mem = gen_rtx_MEM (BLKmode, ptr);
++
++ /* va_arg is an array access in this case, which causes
++ it to get MEM_IN_STRUCT_P set. We must set it here
++ so that the insn scheduler won't assume that these
++ stores can't possibly overlap with the va_arg loads. */
++ MEM_SET_IN_STRUCT_P (mem, 1);
++
++ emit_insn (gen_blockage ());
++ move_block_from_reg (local_cum.regs_used + FIRST_ARG_REGNO, mem,
++ regs_to_push);
++ emit_insn (gen_blockage ());
++ }
++ }
++
++ pret_size = regs_to_push * UNITS_PER_WORD;
++
++ if (pret_size)
++ *pretend_size = pret_size;
++}
++
++
++
++/*****************************************************************************
++**
++** builtins
++**
++** This method for handling builtins is from CSP where _many_ more types of
++** expanders have already been written. Check there first before writing
++** new ones.
++**
++*****************************************************************************/
++
++enum nios2_builtins
++{
++ NIOS2_BUILTIN_LDBIO,
++ NIOS2_BUILTIN_LDBUIO,
++ NIOS2_BUILTIN_LDHIO,
++ NIOS2_BUILTIN_LDHUIO,
++ NIOS2_BUILTIN_LDWIO,
++ NIOS2_BUILTIN_STBIO,
++ NIOS2_BUILTIN_STHIO,
++ NIOS2_BUILTIN_STWIO,
++ NIOS2_BUILTIN_SYNC,
++ NIOS2_BUILTIN_RDCTL,
++ NIOS2_BUILTIN_WRCTL,
++
++#undef NIOS2_DO_BUILTIN
++#define NIOS2_DO_BUILTIN(upper, lower, handler) \
++ NIOS2_CONCAT (NIOS2_BUILTIN_CUSTOM_, upper),
++NIOS2_FOR_ALL_CUSTOM_BUILTINS
++
++ NIOS2_FIRST_FPU_INSN,
++
++#undef NIOS2_FPU_INSN
++#define NIOS2_FPU_INSN(opt, insn, args) \
++ NIOS2_CONCAT (NIOS2_BUILTIN_FPU_, opt),
++NIOS2_FOR_ALL_FPU_INSNS
++
++ NIOS2_LAST_FPU_INSN,
++
++ LIM_NIOS2_BUILTINS
++};
++
++struct builtin_description
++{
++ const enum insn_code icode;
++ const char *const name;
++ const enum nios2_builtins code;
++ const tree *type;
++ rtx (* expander) (const struct builtin_description *,
++ tree, rtx, rtx, enum machine_mode, int);
++};
++
++static rtx nios2_expand_STXIO (const struct builtin_description *,
++ tree, rtx, rtx, enum machine_mode, int);
++static rtx nios2_expand_LDXIO (const struct builtin_description *,
++ tree, rtx, rtx, enum machine_mode, int);
++static rtx nios2_expand_sync (const struct builtin_description *,
++ tree, rtx, rtx, enum machine_mode, int);
++static rtx nios2_expand_rdctl (const struct builtin_description *,
++ tree, rtx, rtx, enum machine_mode, int);
++static rtx nios2_expand_wrctl (const struct builtin_description *,
++ tree, rtx, rtx, enum machine_mode, int);
++
++static rtx nios2_expand_custom_n (const struct builtin_description *,
++ tree, rtx, rtx, enum machine_mode, int);
++static rtx nios2_expand_custom_Xn (const struct builtin_description *,
++ tree, rtx, rtx, enum machine_mode, int);
++static rtx nios2_expand_custom_nX (const struct builtin_description *,
++ tree, rtx, rtx, enum machine_mode, int);
++static rtx nios2_expand_custom_XnX (const struct builtin_description *,
++ tree, rtx, rtx, enum machine_mode, int);
++static rtx nios2_expand_custom_nXX (const struct builtin_description *,
++ tree, rtx, rtx, enum machine_mode, int);
++static rtx nios2_expand_custom_XnXX (const struct builtin_description *,
++ tree, rtx, rtx, enum machine_mode, int);
++
++static rtx nios2_expand_custom_zdz (const struct builtin_description *,
++ tree, rtx, rtx, enum machine_mode, int);
++static rtx nios2_expand_custom_zsz (const struct builtin_description *,
++ tree, rtx, rtx, enum machine_mode, int);
++static rtx nios2_expand_custom_szz (const struct builtin_description *,
++ tree, rtx, rtx, enum machine_mode, int);
++static rtx nios2_expand_custom_sss (const struct builtin_description *,
++ tree, rtx, rtx, enum machine_mode, int);
++static rtx nios2_expand_custom_ssz (const struct builtin_description *,
++ tree, rtx, rtx, enum machine_mode, int);
++static rtx nios2_expand_custom_iss (const struct builtin_description *,
++ tree, rtx, rtx, enum machine_mode, int);
++static rtx nios2_expand_custom_ddd (const struct builtin_description *,
++ tree, rtx, rtx, enum machine_mode, int);
++static rtx nios2_expand_custom_ddz (const struct builtin_description *,
++ tree, rtx, rtx, enum machine_mode, int);
++static rtx nios2_expand_custom_idd (const struct builtin_description *,
++ tree, rtx, rtx, enum machine_mode, int);
++static rtx nios2_expand_custom_siz (const struct builtin_description *,
++ tree, rtx, rtx, enum machine_mode, int);
++static rtx nios2_expand_custom_suz (const struct builtin_description *,
++ tree, rtx, rtx, enum machine_mode, int);
++static rtx nios2_expand_custom_diz (const struct builtin_description *,
++ tree, rtx, rtx, enum machine_mode, int);
++static rtx nios2_expand_custom_duz (const struct builtin_description *,
++ tree, rtx, rtx, enum machine_mode, int);
++static rtx nios2_expand_custom_isz (const struct builtin_description *,
++ tree, rtx, rtx, enum machine_mode, int);
++static rtx nios2_expand_custom_usz (const struct builtin_description *,
++ tree, rtx, rtx, enum machine_mode, int);
++static rtx nios2_expand_custom_idz (const struct builtin_description *,
++ tree, rtx, rtx, enum machine_mode, int);
++static rtx nios2_expand_custom_udz (const struct builtin_description *,
++ tree, rtx, rtx, enum machine_mode, int);
++static rtx nios2_expand_custom_dsz (const struct builtin_description *,
++ tree, rtx, rtx, enum machine_mode, int);
++static rtx nios2_expand_custom_sdz (const struct builtin_description *,
++ tree, rtx, rtx, enum machine_mode, int);
++
++static tree endlink;
++
++/* int fn (volatile const void *)
++ */
++static tree int_ftype_volatile_const_void_p;
++
++/* int fn (int)
++ */
++static tree int_ftype_int;
++
++/* void fn (int, int)
++ */
++static tree void_ftype_int_int;
++
++/* void fn (volatile void *, int)
++ */
++static tree void_ftype_volatile_void_p_int;
++
++/* void fn (void)
++ */
++static tree void_ftype_void;
++
++#undef NIOS2_DO_BUILTIN
++#define NIOS2_DO_BUILTIN(upper, lower, handler) \
++ static tree NIOS2_CONCAT (custom_, lower);
++NIOS2_FOR_ALL_CUSTOM_BUILTINS
++
++static tree custom_zdz;
++static tree custom_zsz;
++static tree custom_szz;
++static tree custom_sss;
++static tree custom_ssz;
++static tree custom_iss;
++static tree custom_ddd;
++static tree custom_ddz;
++static tree custom_idd;
++static tree custom_siz;
++static tree custom_suz;
++static tree custom_diz;
++static tree custom_duz;
++static tree custom_isz;
++static tree custom_usz;
++static tree custom_idz;
++static tree custom_udz;
++static tree custom_dsz;
++static tree custom_sdz;
++
++static const struct builtin_description bdesc[] = {
++ {CODE_FOR_ldbio, "__builtin_ldbio", NIOS2_BUILTIN_LDBIO,
++ &int_ftype_volatile_const_void_p, nios2_expand_LDXIO},
++ {CODE_FOR_ldbuio, "__builtin_ldbuio", NIOS2_BUILTIN_LDBUIO,
++ &int_ftype_volatile_const_void_p, nios2_expand_LDXIO},
++ {CODE_FOR_ldhio, "__builtin_ldhio", NIOS2_BUILTIN_LDHIO,
++ &int_ftype_volatile_const_void_p, nios2_expand_LDXIO},
++ {CODE_FOR_ldhuio, "__builtin_ldhuio", NIOS2_BUILTIN_LDHUIO,
++ &int_ftype_volatile_const_void_p, nios2_expand_LDXIO},
++ {CODE_FOR_ldwio, "__builtin_ldwio", NIOS2_BUILTIN_LDWIO,
++ &int_ftype_volatile_const_void_p, nios2_expand_LDXIO},
++
++ {CODE_FOR_stbio, "__builtin_stbio", NIOS2_BUILTIN_STBIO,
++ &void_ftype_volatile_void_p_int, nios2_expand_STXIO},
++ {CODE_FOR_sthio, "__builtin_sthio", NIOS2_BUILTIN_STHIO,
++ &void_ftype_volatile_void_p_int, nios2_expand_STXIO},
++ {CODE_FOR_stwio, "__builtin_stwio", NIOS2_BUILTIN_STWIO,
++ &void_ftype_volatile_void_p_int, nios2_expand_STXIO},
++
++ {CODE_FOR_sync, "__builtin_sync", NIOS2_BUILTIN_SYNC,
++ &void_ftype_void, nios2_expand_sync},
++ {CODE_FOR_rdctl, "__builtin_rdctl", NIOS2_BUILTIN_RDCTL,
++ &int_ftype_int, nios2_expand_rdctl},
++ {CODE_FOR_wrctl, "__builtin_wrctl", NIOS2_BUILTIN_WRCTL,
++ &void_ftype_int_int, nios2_expand_wrctl},
++
++#undef NIOS2_DO_BUILTIN
++#define NIOS2_DO_BUILTIN(upper, lower, handler) \
++ {NIOS2_CONCAT (CODE_FOR_custom_, lower), \
++ "__builtin_custom_" NIOS2_STRINGIFY (lower), \
++ NIOS2_CONCAT (NIOS2_BUILTIN_CUSTOM_, upper), \
++ &NIOS2_CONCAT (custom_, lower), \
++ NIOS2_CONCAT (nios2_expand_custom_, handler)},
++NIOS2_FOR_ALL_CUSTOM_BUILTINS
++
++#undef NIOS2_FPU_INSN
++#define NIOS2_FPU_INSN(opt, insn, args) \
++ {NIOS2_CONCAT (CODE_FOR_, insn), \
++ "__builtin_custom_" NIOS2_STRINGIFY (opt), \
++ NIOS2_CONCAT (NIOS2_BUILTIN_FPU_, opt), \
++ &NIOS2_CONCAT (custom_, args), \
++ NIOS2_CONCAT (nios2_expand_custom_, args)},
++NIOS2_FOR_ALL_FPU_INSNS
++
++ {0, 0, 0, 0, 0},
++};
++
++static void
++nios2_init_builtins (void)
++{
++ const struct builtin_description *d;
++
++
++ endlink = void_list_node;
++
++ /* int fn (volatile const void *)
++ */
++ int_ftype_volatile_const_void_p = build_function_type (
++ integer_type_node,
++ tree_cons (NULL_TREE,
++ build_qualified_type (ptr_type_node,
++ TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE),
++ endlink));
++
++ /* void fn (volatile void *, int)
++ */
++ void_ftype_volatile_void_p_int = build_function_type (
++ void_type_node,
++ tree_cons (NULL_TREE,
++ build_qualified_type (ptr_type_node, TYPE_QUAL_VOLATILE),
++ tree_cons (NULL_TREE, integer_type_node, endlink)));
++
++ /* void fn (void)
++ */
++ void_ftype_void
++ = build_function_type (void_type_node,
++ endlink);
++
++ /* int fn (int)
++ */
++ int_ftype_int
++ = build_function_type (integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ endlink));
++
++ /* void fn (int, int)
++ */
++ void_ftype_int_int
++ = build_function_type (void_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ endlink)));
++
++ custom_n
++ = build_function_type (void_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ endlink));
++ custom_ni
++ = build_function_type (void_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ endlink)));
++ custom_nf
++ = build_function_type (void_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, float_type_node,
++ endlink)));
++ custom_np
++ = build_function_type (void_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, ptr_type_node,
++ endlink)));
++ custom_nii
++ = build_function_type (void_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ endlink))));
++ custom_nif
++ = build_function_type (void_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, float_type_node,
++ endlink))));
++ custom_nip
++ = build_function_type (void_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, ptr_type_node,
++ endlink))));
++ custom_nfi
++ = build_function_type (void_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, float_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ endlink))));
++ custom_nff
++ = build_function_type (void_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, float_type_node,
++ tree_cons (NULL_TREE, float_type_node,
++ endlink))));
++ custom_nfp
++ = build_function_type (void_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, float_type_node,
++ tree_cons (NULL_TREE, ptr_type_node,
++ endlink))));
++ custom_npi
++ = build_function_type (void_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, ptr_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ endlink))));
++ custom_npf
++ = build_function_type (void_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, ptr_type_node,
++ tree_cons (NULL_TREE, float_type_node,
++ endlink))));
++ custom_npp
++ = build_function_type (void_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, ptr_type_node,
++ tree_cons (NULL_TREE, ptr_type_node,
++ endlink))));
++
++ custom_in
++ = build_function_type (integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ endlink));
++ custom_ini
++ = build_function_type (integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ endlink)));
++ custom_inf
++ = build_function_type (integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, float_type_node,
++ endlink)));
++ custom_inp
++ = build_function_type (integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, ptr_type_node,
++ endlink)));
++ custom_inii
++ = build_function_type (integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ endlink))));
++ custom_inif
++ = build_function_type (integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, float_type_node,
++ endlink))));
++ custom_inip
++ = build_function_type (integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, ptr_type_node,
++ endlink))));
++ custom_infi
++ = build_function_type (integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, float_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ endlink))));
++ custom_inff
++ = build_function_type (integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, float_type_node,
++ tree_cons (NULL_TREE, float_type_node,
++ endlink))));
++ custom_infp
++ = build_function_type (integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, float_type_node,
++ tree_cons (NULL_TREE, ptr_type_node,
++ endlink))));
++ custom_inpi
++ = build_function_type (integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, ptr_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ endlink))));
++ custom_inpf
++ = build_function_type (integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, ptr_type_node,
++ tree_cons (NULL_TREE, float_type_node,
++ endlink))));
++ custom_inpp
++ = build_function_type (integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, ptr_type_node,
++ tree_cons (NULL_TREE, ptr_type_node,
++ endlink))));
++
++ custom_fn
++ = build_function_type (float_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ endlink));
++ custom_fni
++ = build_function_type (float_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ endlink)));
++ custom_fnf
++ = build_function_type (float_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, float_type_node,
++ endlink)));
++ custom_fnp
++ = build_function_type (float_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, ptr_type_node,
++ endlink)));
++ custom_fnii
++ = build_function_type (float_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ endlink))));
++ custom_fnif
++ = build_function_type (float_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, float_type_node,
++ endlink))));
++ custom_fnip
++ = build_function_type (float_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, ptr_type_node,
++ endlink))));
++ custom_fnfi
++ = build_function_type (float_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, float_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ endlink))));
++ custom_fnff
++ = build_function_type (float_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, float_type_node,
++ tree_cons (NULL_TREE, float_type_node,
++ endlink))));
++ custom_fnfp
++ = build_function_type (float_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, float_type_node,
++ tree_cons (NULL_TREE, ptr_type_node,
++ endlink))));
++ custom_fnpi
++ = build_function_type (float_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, ptr_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ endlink))));
++ custom_fnpf
++ = build_function_type (float_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, ptr_type_node,
++ tree_cons (NULL_TREE, float_type_node,
++ endlink))));
++ custom_fnpp
++ = build_function_type (float_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, ptr_type_node,
++ tree_cons (NULL_TREE, ptr_type_node,
++ endlink))));
++
++
++ custom_pn
++ = build_function_type (ptr_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ endlink));
++ custom_pni
++ = build_function_type (ptr_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ endlink)));
++ custom_pnf
++ = build_function_type (ptr_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, float_type_node,
++ endlink)));
++ custom_pnp
++ = build_function_type (ptr_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, ptr_type_node,
++ endlink)));
++ custom_pnii
++ = build_function_type (ptr_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ endlink))));
++ custom_pnif
++ = build_function_type (ptr_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, float_type_node,
++ endlink))));
++ custom_pnip
++ = build_function_type (ptr_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, ptr_type_node,
++ endlink))));
++ custom_pnfi
++ = build_function_type (ptr_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, float_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ endlink))));
++ custom_pnff
++ = build_function_type (ptr_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, float_type_node,
++ tree_cons (NULL_TREE, float_type_node,
++ endlink))));
++ custom_pnfp
++ = build_function_type (ptr_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, float_type_node,
++ tree_cons (NULL_TREE, ptr_type_node,
++ endlink))));
++ custom_pnpi
++ = build_function_type (ptr_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, ptr_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ endlink))));
++ custom_pnpf
++ = build_function_type (ptr_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, ptr_type_node,
++ tree_cons (NULL_TREE, float_type_node,
++ endlink))));
++ custom_pnpp
++ = build_function_type (ptr_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, ptr_type_node,
++ tree_cons (NULL_TREE, ptr_type_node,
++ endlink))));
++
++ custom_zdz
++ = build_function_type (void_type_node,
++ tree_cons (NULL_TREE, double_type_node,
++ endlink));
++
++ custom_zsz
++ = build_function_type (void_type_node,
++ tree_cons (NULL_TREE, float_type_node,
++ endlink));
++
++ custom_szz
++ = build_function_type (float_type_node,
++ tree_cons (NULL_TREE, void_type_node,
++ endlink));
++
++ custom_sss
++ = build_function_type (float_type_node,
++ tree_cons (NULL_TREE, float_type_node,
++ tree_cons (NULL_TREE, float_type_node,
++ endlink)));
++
++ custom_ssz
++ = build_function_type (float_type_node,
++ tree_cons (NULL_TREE, float_type_node,
++ endlink));
++
++ custom_iss
++ = build_function_type (integer_type_node,
++ tree_cons (NULL_TREE, float_type_node,
++ tree_cons (NULL_TREE, float_type_node,
++ endlink)));
++
++ custom_ddd
++ = build_function_type (double_type_node,
++ tree_cons (NULL_TREE, double_type_node,
++ tree_cons (NULL_TREE, double_type_node,
++ endlink)));
++
++ custom_ddz
++ = build_function_type (double_type_node,
++ tree_cons (NULL_TREE, double_type_node,
++ endlink));
++
++ custom_idd
++ = build_function_type (integer_type_node,
++ tree_cons (NULL_TREE, double_type_node,
++ tree_cons (NULL_TREE, double_type_node,
++ endlink)));
++
++ custom_siz
++ = build_function_type (float_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ endlink));
++
++ custom_suz
++ = build_function_type (float_type_node,
++ tree_cons (NULL_TREE, unsigned_type_node,
++ endlink));
++
++ custom_diz
++ = build_function_type (double_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ endlink));
++
++ custom_duz
++ = build_function_type (double_type_node,
++ tree_cons (NULL_TREE, unsigned_type_node,
++ endlink));
++
++ custom_isz
++ = build_function_type (integer_type_node,
++ tree_cons (NULL_TREE, float_type_node,
++ endlink));
++
++ custom_usz
++ = build_function_type (unsigned_type_node,
++ tree_cons (NULL_TREE, float_type_node,
++ endlink));
++
++ custom_idz
++ = build_function_type (integer_type_node,
++ tree_cons (NULL_TREE, double_type_node,
++ endlink));
++
++ custom_udz
++ = build_function_type (unsigned_type_node,
++ tree_cons (NULL_TREE, double_type_node,
++ endlink));
++
++ custom_dsz
++ = build_function_type (double_type_node,
++ tree_cons (NULL_TREE, float_type_node,
++ endlink));
++
++ custom_sdz
++ = build_function_type (float_type_node,
++ tree_cons (NULL_TREE, double_type_node,
++ endlink));
++
++
++ for (d = bdesc; d->name; d++)
++ builtin_function (d->name, *d->type, d->code,
++ BUILT_IN_MD, NULL, NULL);
++}
++/* Expand an expression EXP that calls a built-in function,
++ with result going to TARGET if that's convenient
++ (and in mode MODE if that's convenient).
++ SUBTARGET may be used as the target for computing one of EXP's operands.
++ IGNORE is nonzero if the value is to be ignored. */
++
++static rtx
++nios2_expand_builtin (tree exp, rtx target, rtx subtarget,
++ enum machine_mode mode, int ignore)
++{
++ const struct builtin_description *d;
++ tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
++ unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
++
++ for (d = bdesc; d->name; d++)
++ if (d->code == fcode)
++ {
++ if (d->code > NIOS2_FIRST_FPU_INSN && d->code < NIOS2_LAST_FPU_INSN)
++ {
++ nios2_fpu_info *inf = &nios2_fpu_insns[d->code -
++ (NIOS2_FIRST_FPU_INSN + 1)];
++ const struct insn_data *idata = &insn_data[d->icode];
++ if (inf->N < 0)
++ fatal_error ("Cannot call `%s' without specifying switch"
++ " `-mcustom-%s'",
++ d->name,
++ inf->option);
++ if (inf->args[0] != 'z'
++ && (!target
++ || !(idata->operand[0].predicate) (target,
++ idata->operand[0].mode)))
++ target = gen_reg_rtx (idata->operand[0].mode);
++ }
++ return (d->expander) (d, exp, target, subtarget, mode, ignore);
++ }
++
++ /* We should have seen one of the functins we registered. */
++ gcc_unreachable ();
++}
++
++static rtx nios2_create_target (const struct builtin_description *, rtx);
++
++
++static rtx
++nios2_create_target (const struct builtin_description *d, rtx target)
++{
++ if (!target
++ || !(*insn_data[d->icode].operand[0].predicate) (
++ target, insn_data[d->icode].operand[0].mode))
++ {
++ target = gen_reg_rtx (insn_data[d->icode].operand[0].mode);
++ }
++
++ return target;
++}
++
++
++static rtx nios2_extract_opcode (const struct builtin_description *, int, tree);
++static rtx nios2_extract_operand (const struct builtin_description *, int, int,
++ tree);
++
++static rtx
++nios2_extract_opcode (const struct builtin_description *d, int op, tree arglist)
++{
++ enum machine_mode mode = insn_data[d->icode].operand[op].mode;
++ tree arg = TREE_VALUE (arglist);
++ rtx opcode = expand_expr (arg, NULL_RTX, mode, 0);
++
++ if (!(*insn_data[d->icode].operand[op].predicate) (opcode, mode))
++ error ("Custom instruction opcode must be compile time constant in the "
++ "range 0-255 for %s", d->name);
++
++ builtin_custom_seen[INTVAL (opcode)] = d->name;
++ nios2_custom_check_insns (0);
++ return opcode;
++}
++
++static rtx
++nios2_extract_operand (const struct builtin_description *d, int op, int argnum,
++ tree arglist)
++{
++ enum machine_mode mode = insn_data[d->icode].operand[op].mode;
++ tree arg = TREE_VALUE (arglist);
++ rtx operand = expand_expr (arg, NULL_RTX, mode, 0);
++
++ if (!(*insn_data[d->icode].operand[op].predicate) (operand, mode))
++ operand = copy_to_mode_reg (mode, operand);
++
++ /* ??? Better errors would be nice. */
++ if (!(*insn_data[d->icode].operand[op].predicate) (operand, mode))
++ error ("Invalid argument %d to %s", argnum, d->name);
++
++ return operand;
++}
++
++
++static rtx
++nios2_expand_custom_n (const struct builtin_description *d, tree exp,
++ rtx target ATTRIBUTE_UNUSED,
++ rtx subtarget ATTRIBUTE_UNUSED,
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ int ignore ATTRIBUTE_UNUSED)
++{
++ tree arglist = TREE_OPERAND (exp, 1);
++ rtx pat;
++ rtx opcode;
++
++ /* custom_n should have exactly one operand. */
++ gcc_assert (insn_data[d->icode].n_operands == 1);
++
++ opcode = nios2_extract_opcode (d, 0, arglist);
++
++ pat = GEN_FCN (d->icode) (opcode);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
++ return 0;
++}
++
++static rtx
++nios2_expand_custom_Xn (const struct builtin_description *d, tree exp,
++ rtx target, rtx subtarget ATTRIBUTE_UNUSED,
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ int ignore ATTRIBUTE_UNUSED)
++{
++ tree arglist = TREE_OPERAND (exp, 1);
++ rtx pat;
++ rtx opcode;
++
++ /* custom_Xn should have exactly two operands. */
++ gcc_assert (insn_data[d->icode].n_operands == 2);
++
++ target = nios2_create_target (d, target);
++ opcode = nios2_extract_opcode (d, 1, arglist);
++
++ pat = GEN_FCN (d->icode) (target, opcode);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
++ return target;
++}
++
++static rtx
++nios2_expand_custom_nX (const struct builtin_description *d, tree exp,
++ rtx target ATTRIBUTE_UNUSED,
++ rtx subtarget ATTRIBUTE_UNUSED,
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ int ignore ATTRIBUTE_UNUSED)
++{
++ tree arglist = TREE_OPERAND (exp, 1);
++ rtx pat;
++ rtx opcode;
++ rtx operands[1];
++ int i;
++
++
++ /* custom_nX should have exactly two operands. */
++ gcc_assert (insn_data[d->icode].n_operands == 2);
++
++ opcode = nios2_extract_opcode (d, 0, arglist);
++ for (i = 0; i < 1; i++)
++ {
++ arglist = TREE_CHAIN (arglist);
++ operands[i] = nios2_extract_operand (d, i + 1, i + 1, arglist);
++ }
++
++ pat = GEN_FCN (d->icode) (opcode, operands[0]);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
++ return 0;
++}
++
++static rtx
++nios2_expand_custom_XnX (const struct builtin_description *d, tree exp,
++ rtx target, rtx subtarget ATTRIBUTE_UNUSED,
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ int ignore ATTRIBUTE_UNUSED)
++{
++ tree arglist = TREE_OPERAND (exp, 1);
++ rtx pat;
++ rtx opcode;
++ rtx operands[1];
++ int i;
++
++ /* custom_Xn should have exactly three operands. */
++ gcc_assert (insn_data[d->icode].n_operands == 3);
++
++ target = nios2_create_target (d, target);
++ opcode = nios2_extract_opcode (d, 1, arglist);
++
++ for (i = 0; i < 1; i++)
++ {
++ arglist = TREE_CHAIN (arglist);
++ operands[i] = nios2_extract_operand (d, i + 2, i + 1, arglist);
++ }
++
++ pat = GEN_FCN (d->icode) (target, opcode, operands[0]);
++
++ if (!pat)
++ return 0;
++ emit_insn (pat);
++ return target;
++}
++
++static rtx
++nios2_expand_custom_nXX (const struct builtin_description *d, tree exp,
++ rtx target ATTRIBUTE_UNUSED,
++ rtx subtarget ATTRIBUTE_UNUSED,
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ int ignore ATTRIBUTE_UNUSED)
++{
++ tree arglist = TREE_OPERAND (exp, 1);
++ rtx pat;
++ rtx opcode;
++ rtx operands[2];
++ int i;
++
++
++ /* custom_nX should have exactly three operands. */
++ gcc_assert (insn_data[d->icode].n_operands == 3);
++
++ opcode = nios2_extract_opcode (d, 0, arglist);
++ for (i = 0; i < 2; i++)
++ {
++ arglist = TREE_CHAIN (arglist);
++ operands[i] = nios2_extract_operand (d, i + 1, i + 1, arglist);
++ }
++
++ pat = GEN_FCN (d->icode) (opcode, operands[0], operands[1]);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
++ return 0;
++}
++
++static rtx
++nios2_expand_custom_XnXX (const struct builtin_description *d, tree exp,
++ rtx target, rtx subtarget ATTRIBUTE_UNUSED,
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ int ignore ATTRIBUTE_UNUSED)
++{
++ tree arglist = TREE_OPERAND (exp, 1);
++ rtx pat;
++ rtx opcode;
++ rtx operands[2];
++ int i;
++
++
++ /* custom_XnX should have exactly four operands. */
++ gcc_assert (insn_data[d->icode].n_operands == 4);
++
++ target = nios2_create_target (d, target);
++ opcode = nios2_extract_opcode (d, 1, arglist);
++ for (i = 0; i < 2; i++)
++ {
++ arglist = TREE_CHAIN (arglist);
++ operands[i] = nios2_extract_operand (d, i + 2, i + 1, arglist);
++ }
++
++ pat = GEN_FCN (d->icode) (target, opcode, operands[0], operands[1]);
++
++ if (!pat)
++ return 0;
++ emit_insn (pat);
++ return target;
++}
++
++
++
++static rtx
++nios2_expand_STXIO (const struct builtin_description *d, tree exp,
++ rtx target ATTRIBUTE_UNUSED,
++ rtx subtarget ATTRIBUTE_UNUSED,
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ int ignore ATTRIBUTE_UNUSED)
++{
++ tree arglist = TREE_OPERAND (exp, 1);
++ rtx pat;
++ rtx store_dest, store_val;
++ enum insn_code icode = d->icode;
++
++ /* Stores should have exactly two operands. */
++ gcc_assert (insn_data[icode].n_operands == 2);
++
++ /* Process the destination of the store. */
++ {
++ enum machine_mode mode = insn_data[icode].operand[0].mode;
++ tree arg = TREE_VALUE (arglist);
++ store_dest = expand_expr (arg, NULL_RTX, VOIDmode, 0);
++
++ store_dest = gen_rtx_MEM (mode, copy_to_mode_reg (Pmode, store_dest));
++
++ /* ??? Better errors would be nice. */
++ if (!(*insn_data[icode].operand[0].predicate) (store_dest, mode))
++ error ("Invalid argument 1 to %s", d->name);
++ }
++
++
++ /* Process the value to store. */
++ {
++ enum machine_mode mode = insn_data[icode].operand[1].mode;
++ tree arg = TREE_VALUE (TREE_CHAIN (arglist));
++ store_val = expand_expr (arg, NULL_RTX, mode, 0);
++
++ if (!(*insn_data[icode].operand[1].predicate) (store_val, mode))
++ store_val = copy_to_mode_reg (mode, store_val);
++
++ /* ??? Better errors would be nice. */
++ if (!(*insn_data[icode].operand[1].predicate) (store_val, mode))
++ error ("Invalid argument 2 to %s", d->name);
++ }
++
++ pat = GEN_FCN (d->icode) (store_dest, store_val);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
++ return 0;
++}
++
++
++static rtx
++nios2_expand_LDXIO (const struct builtin_description * d, tree exp, rtx target,
++ rtx subtarget ATTRIBUTE_UNUSED,
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ int ignore ATTRIBUTE_UNUSED)
++{
++ tree arglist = TREE_OPERAND (exp, 1);
++ rtx pat;
++ rtx ld_src;
++ enum insn_code icode = d->icode;
++
++ /* Loads should have exactly two operands. */
++ gcc_assert (insn_data[icode].n_operands == 2);
++
++ target = nios2_create_target (d, target);
++
++ {
++ enum machine_mode mode = insn_data[icode].operand[1].mode;
++ tree arg = TREE_VALUE (arglist);
++ ld_src = expand_expr (arg, NULL_RTX, VOIDmode, 0);
++
++ ld_src = gen_rtx_MEM (mode, copy_to_mode_reg (Pmode, ld_src));
++
++ /* ??? Better errors would be nice. */
++ if (!(*insn_data[icode].operand[1].predicate) (ld_src, mode))
++ error ("Invalid argument 1 to %s", d->name);
++ }
++
++ pat = GEN_FCN (d->icode) (target, ld_src);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
++ return target;
++}
++
++
++static rtx
++nios2_expand_sync (const struct builtin_description * d ATTRIBUTE_UNUSED,
++ tree exp ATTRIBUTE_UNUSED, rtx target ATTRIBUTE_UNUSED,
++ rtx subtarget ATTRIBUTE_UNUSED,
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ int ignore ATTRIBUTE_UNUSED)
++{
++ emit_insn (gen_sync ());
++ return 0;
++}
++
++static rtx
++nios2_expand_rdctl (const struct builtin_description * d ATTRIBUTE_UNUSED,
++ tree exp ATTRIBUTE_UNUSED, rtx target ATTRIBUTE_UNUSED,
++ rtx subtarget ATTRIBUTE_UNUSED,
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ int ignore ATTRIBUTE_UNUSED)
++{
++ tree arglist = TREE_OPERAND (exp, 1);
++ rtx pat;
++ rtx rdctl_reg;
++ enum insn_code icode = d->icode;
++
++ /* rdctl should have exactly two operands. */
++ gcc_assert (insn_data[icode].n_operands == 2);
++
++ target = nios2_create_target (d, target);
++
++ {
++ enum machine_mode mode = insn_data[icode].operand[1].mode;
++ tree arg = TREE_VALUE (arglist);
++ rdctl_reg = expand_expr (arg, NULL_RTX, VOIDmode, 0);
++
++ if (!(*insn_data[icode].operand[1].predicate) (rdctl_reg, mode))
++ error ("Control register number must be in range 0-31 for %s", d->name);
++ }
++
++ pat = GEN_FCN (d->icode) (target, rdctl_reg);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
++ return target;
++}
++
++static rtx
++nios2_expand_wrctl (const struct builtin_description * d ATTRIBUTE_UNUSED,
++ tree exp ATTRIBUTE_UNUSED, rtx target ATTRIBUTE_UNUSED,
++ rtx subtarget ATTRIBUTE_UNUSED,
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ int ignore ATTRIBUTE_UNUSED)
++{
++ tree arglist = TREE_OPERAND (exp, 1);
++ rtx pat;
++ rtx wrctl_reg, store_val;
++ enum insn_code icode = d->icode;
++
++ /* Stores should have exactly two operands. */
++ gcc_assert (insn_data[icode].n_operands == 2);
++
++ /* Process the destination of the store. */
++ {
++ enum machine_mode mode = insn_data[icode].operand[0].mode;
++ tree arg = TREE_VALUE (arglist);
++ wrctl_reg = expand_expr (arg, NULL_RTX, VOIDmode, 0);
++
++ if (!(*insn_data[icode].operand[0].predicate) (wrctl_reg, mode))
++ error ("Control register number must be in range 0-31 for %s", d->name);
++ }
++
++
++ /* Process the value to store. */
++ {
++ enum machine_mode mode = insn_data[icode].operand[1].mode;
++ tree arg = TREE_VALUE (TREE_CHAIN (arglist));
++ store_val = expand_expr (arg, NULL_RTX, mode, 0);
++
++ if (!(*insn_data[icode].operand[1].predicate) (store_val, mode))
++ store_val = copy_to_mode_reg (mode, store_val);
++
++ /* ??? Better errors would be nice. */
++ if (!(*insn_data[icode].operand[1].predicate) (store_val, mode))
++ error ("Invalid argument 2 to %s", d->name);
++ }
++
++ pat = GEN_FCN (d->icode) (wrctl_reg, store_val);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
++ return 0;
++}
++
++static rtx
++nios2_extract_double (const struct insn_data *idata, tree arglist, int index)
++{
++ rtx arg;
++
++ while (index--)
++ arglist = TREE_CHAIN (arglist);
++ arg = expand_expr (TREE_VALUE (arglist), NULL_RTX, DFmode, 0);
++ if (!(*(idata->operand[index+1].predicate)) (arg, DFmode))
++ arg = copy_to_mode_reg (DFmode, arg);
++ return arg;
++}
++
++static rtx
++nios2_extract_float (const struct insn_data *idata, tree arglist, int index)
++{
++ rtx arg;
++
++ while (index--)
++ arglist = TREE_CHAIN (arglist);
++ arg = expand_expr (TREE_VALUE (arglist), NULL_RTX, SFmode, 0);
++ if (!(*(idata->operand[index+1].predicate)) (arg, SFmode))
++ arg = copy_to_mode_reg (SFmode, arg);
++ return arg;
++}
++
++static rtx
++nios2_extract_integer (const struct insn_data *idata, tree arglist, int index)
++{
++ rtx arg;
++
++ while (index--)
++ arglist = TREE_CHAIN (arglist);
++ arg = expand_expr (TREE_VALUE (arglist), NULL_RTX, SImode, 0);
++ if (!(*(idata->operand[index+1].predicate)) (arg, SImode))
++ arg = copy_to_mode_reg (SImode, arg);
++ return arg;
++}
++
++static rtx
++nios2_expand_custom_zdz (const struct builtin_description *d,
++ tree exp,
++ rtx target ATTRIBUTE_UNUSED,
++ rtx subtarget ATTRIBUTE_UNUSED,
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ int ignore ATTRIBUTE_UNUSED)
++{
++ tree arglist = TREE_OPERAND (exp, 1);
++ rtx pat = GEN_FCN (d->icode) (nios2_extract_double (&insn_data[d->icode],
++ arglist, 0));
++ if (pat)
++ emit_insn (pat);
++ return 0;
++}
++
++static rtx
++nios2_expand_custom_zsz (const struct builtin_description *d,
++ tree exp,
++ rtx target ATTRIBUTE_UNUSED,
++ rtx subtarget ATTRIBUTE_UNUSED,
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ int ignore ATTRIBUTE_UNUSED)
++{
++ tree arglist = TREE_OPERAND (exp, 1);
++ rtx pat = GEN_FCN (d->icode) (nios2_extract_float (&insn_data[d->icode],
++ arglist, 0));
++ if (pat)
++ emit_insn (pat);
++ return 0;
++}
++
++static rtx
++nios2_expand_custom_szz (const struct builtin_description *d,
++ tree exp ATTRIBUTE_UNUSED,
++ rtx target,
++ rtx subtarget ATTRIBUTE_UNUSED,
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ int ignore ATTRIBUTE_UNUSED)
++{
++ rtx pat = GEN_FCN (d->icode) (target);
++ if (pat)
++ emit_insn (pat);
++ return target;
++}
++
++static rtx
++nios2_expand_custom_sss (const struct builtin_description *d,
++ tree exp,
++ rtx target,
++ rtx subtarget ATTRIBUTE_UNUSED,
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ int ignore ATTRIBUTE_UNUSED)
++{
++ tree arglist = TREE_OPERAND (exp, 1);
++ rtx pat = GEN_FCN (d->icode) (target,
++ nios2_extract_float (&insn_data[d->icode],
++ arglist, 0),
++ nios2_extract_float (&insn_data[d->icode],
++ arglist, 1));
++ if (pat)
++ emit_insn (pat);
++ return target;
++}
++
++static rtx
++nios2_expand_custom_ssz (const struct builtin_description *d,
++ tree exp,
++ rtx target,
++ rtx subtarget ATTRIBUTE_UNUSED,
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ int ignore ATTRIBUTE_UNUSED)
++{
++ tree arglist = TREE_OPERAND (exp, 1);
++ rtx pat = GEN_FCN (d->icode) (target,
++ nios2_extract_float (&insn_data[d->icode],
++ arglist, 0));
++ if (pat)
++ emit_insn (pat);
++ return target;
++}
++
++static rtx
++nios2_expand_custom_iss (const struct builtin_description *d,
++ tree exp,
++ rtx target,
++ rtx subtarget ATTRIBUTE_UNUSED,
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ int ignore ATTRIBUTE_UNUSED)
++{
++ tree arglist = TREE_OPERAND (exp, 1);
++ rtx pat = GEN_FCN (d->icode) (target,
++ nios2_extract_float (&insn_data[d->icode],
++ arglist, 0),
++ nios2_extract_float (&insn_data[d->icode],
++ arglist, 1));
++ if (pat)
++ emit_insn (pat);
++ return target;
++}
++
++static rtx
++nios2_expand_custom_ddd (const struct builtin_description *d,
++ tree exp,
++ rtx target,
++ rtx subtarget ATTRIBUTE_UNUSED,
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ int ignore ATTRIBUTE_UNUSED)
++{
++ tree arglist = TREE_OPERAND (exp, 1);
++ rtx pat = GEN_FCN (d->icode) (target,
++ nios2_extract_double (&insn_data[d->icode],
++ arglist, 0),
++ nios2_extract_double (&insn_data[d->icode],
++ arglist, 1));
++ if (pat)
++ emit_insn (pat);
++ return target;
++}
++
++static rtx
++nios2_expand_custom_ddz (const struct builtin_description *d,
++ tree exp,
++ rtx target,
++ rtx subtarget ATTRIBUTE_UNUSED,
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ int ignore ATTRIBUTE_UNUSED)
++{
++ tree arglist = TREE_OPERAND (exp, 1);
++ rtx pat = GEN_FCN (d->icode) (target,
++ nios2_extract_double (&insn_data[d->icode],
++ arglist, 0));
++ if (pat)
++ emit_insn (pat);
++ return target;
++}
++
++static rtx
++nios2_expand_custom_idd (const struct builtin_description *d,
++ tree exp,
++ rtx target,
++ rtx subtarget ATTRIBUTE_UNUSED,
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ int ignore ATTRIBUTE_UNUSED)
++{
++ tree arglist = TREE_OPERAND (exp, 1);
++ rtx pat = GEN_FCN (d->icode) (target,
++ nios2_extract_double (&insn_data[d->icode],
++ arglist, 0),
++ nios2_extract_double (&insn_data[d->icode],
++ arglist, 1));
++ if (pat)
++ emit_insn (pat);
++ return target;
++}
++
++static rtx
++nios2_expand_custom_siz (const struct builtin_description *d,
++ tree exp,
++ rtx target,
++ rtx subtarget ATTRIBUTE_UNUSED,
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ int ignore ATTRIBUTE_UNUSED)
++{
++ tree arglist = TREE_OPERAND (exp, 1);
++ rtx pat = GEN_FCN (d->icode) (target,
++ nios2_extract_integer (&insn_data[d->icode],
++ arglist, 0));
++ if (pat)
++ emit_insn (pat);
++ return target;
++}
++
++static rtx
++nios2_expand_custom_suz (const struct builtin_description *d,
++ tree exp,
++ rtx target,
++ rtx subtarget ATTRIBUTE_UNUSED,
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ int ignore ATTRIBUTE_UNUSED)
++{
++ tree arglist = TREE_OPERAND (exp, 1);
++ rtx pat = GEN_FCN (d->icode) (target,
++ nios2_extract_integer (&insn_data[d->icode],
++ arglist, 0));
++ if (pat)
++ emit_insn (pat);
++ return target;
++}
++
++static rtx
++nios2_expand_custom_diz (const struct builtin_description *d,
++ tree exp,
++ rtx target,
++ rtx subtarget ATTRIBUTE_UNUSED,
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ int ignore ATTRIBUTE_UNUSED)
++{
++ tree arglist = TREE_OPERAND (exp, 1);
++ rtx pat = GEN_FCN (d->icode) (target,
++ nios2_extract_integer (&insn_data[d->icode],
++ arglist, 0));
++ if (pat)
++ emit_insn (pat);
++ return target;
++}
++
++static rtx
++nios2_expand_custom_duz (const struct builtin_description *d,
++ tree exp,
++ rtx target,
++ rtx subtarget ATTRIBUTE_UNUSED,
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ int ignore ATTRIBUTE_UNUSED)
++{
++ tree arglist = TREE_OPERAND (exp, 1);
++ rtx pat = GEN_FCN (d->icode) (target,
++ nios2_extract_integer (&insn_data[d->icode],
++ arglist, 0));
++ if (pat)
++ emit_insn (pat);
++ return target;
++}
++
++static rtx
++nios2_expand_custom_isz (const struct builtin_description *d,
++ tree exp,
++ rtx target,
++ rtx subtarget ATTRIBUTE_UNUSED,
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ int ignore ATTRIBUTE_UNUSED)
++{
++ tree arglist = TREE_OPERAND (exp, 1);
++ rtx pat = GEN_FCN (d->icode) (target,
++ nios2_extract_float (&insn_data[d->icode],
++ arglist, 0));
++ if (pat)
++ emit_insn (pat);
++ return target;
++}
++
++static rtx
++nios2_expand_custom_usz (const struct builtin_description *d,
++ tree exp,
++ rtx target,
++ rtx subtarget ATTRIBUTE_UNUSED,
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ int ignore ATTRIBUTE_UNUSED)
++{
++ tree arglist = TREE_OPERAND (exp, 1);
++ rtx pat = GEN_FCN (d->icode) (target,
++ nios2_extract_float (&insn_data[d->icode],
++ arglist, 0));
++ if (pat)
++ emit_insn (pat);
++ return target;
++}
++
++static rtx
++nios2_expand_custom_idz (const struct builtin_description *d,
++ tree exp,
++ rtx target,
++ rtx subtarget ATTRIBUTE_UNUSED,
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ int ignore ATTRIBUTE_UNUSED)
++{
++ tree arglist = TREE_OPERAND (exp, 1);
++ rtx pat = GEN_FCN (d->icode) (target,
++ nios2_extract_double (&insn_data[d->icode],
++ arglist, 0));
++ if (pat)
++ emit_insn (pat);
++ return target;
++}
++
++static rtx
++nios2_expand_custom_udz (const struct builtin_description *d,
++ tree exp,
++ rtx target,
++ rtx subtarget ATTRIBUTE_UNUSED,
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ int ignore ATTRIBUTE_UNUSED)
++{
++ tree arglist = TREE_OPERAND (exp, 1);
++ rtx pat = GEN_FCN (d->icode) (target,
++ nios2_extract_double (&insn_data[d->icode],
++ arglist, 0));
++ if (pat)
++ emit_insn (pat);
++ return target;
++}
++
++static rtx
++nios2_expand_custom_dsz (const struct builtin_description *d,
++ tree exp,
++ rtx target,
++ rtx subtarget ATTRIBUTE_UNUSED,
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ int ignore ATTRIBUTE_UNUSED)
++{
++ tree arglist = TREE_OPERAND (exp, 1);
++ rtx pat = GEN_FCN (d->icode) (target,
++ nios2_extract_float (&insn_data[d->icode],
++ arglist, 0));
++ if (pat)
++ emit_insn (pat);
++ return target;
++}
++
++static rtx
++nios2_expand_custom_sdz (const struct builtin_description *d,
++ tree exp,
++ rtx target,
++ rtx subtarget ATTRIBUTE_UNUSED,
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ int ignore ATTRIBUTE_UNUSED)
++{
++ tree arglist = TREE_OPERAND (exp, 1);
++ rtx pat = GEN_FCN (d->icode) (target,
++ nios2_extract_double (&insn_data[d->icode],
++ arglist, 0));
++ if (pat)
++ emit_insn (pat);
++ return target;
++}
++
++#include "gt-nios2.h"
++
+Index: gcc-4.1.2/gcc/config/nios2/nios2.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ gcc-4.1.2/gcc/config/nios2/nios2.h 2010-06-30 08:50:26.000000000 +0200
+@@ -0,0 +1,1064 @@
++/* NOT ASSIGNED TO FSF. COPYRIGHT ALTERA. */
++/* Definitions of target machine for Altera NIOS 2G NIOS2 version.
++ Copyright (C) 2007 Altera
++ Contributed by Jonah Graham (jgraham@altera.com),
++ Will Reece (wreece@altera.com), and Jeff DaSilva (jdasilva@altera.com).
++
++This file is part of GNU CC.
++
++GNU CC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 2, or (at your option)
++any later version.
++
++GNU CC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GNU CC; see the file COPYING. If not, write to
++the Free Software Foundation, 59 Temple Place - Suite 330,
++Boston, MA 02111-1307, USA. */
++
++
++
++#define TARGET_CPU_CPP_BUILTINS() \
++ do \
++ { \
++ builtin_define_std ("NIOS2"); \
++ builtin_define_std ("nios2"); \
++ if (TARGET_BIG_ENDIAN) \
++ builtin_define_std ("nios2_big_endian"); \
++ else \
++ builtin_define_std ("nios2_little_endian"); \
++ } \
++ while (0)
++#define TARGET_VERSION fprintf (stderr, " (Altera Nios II)")
++
++
++
++
++
++/*********************************
++ * Run-time Target Specification
++ *********************************/
++
++extern int target_flags;
++
++/*
++ * There's a lot of error-prone tedium with all the different
++ * custom floating point instructions. Try to automate it a bit
++ * to make it easier to deal with.
++ */
++#define NIOS2_STRINGIFY_INNER(x) #x
++#define NIOS2_STRINGIFY(x) NIOS2_STRINGIFY_INNER(x)
++#define NIOS2_CONCAT_INNER(x, y) x ## y
++#define NIOS2_CONCAT(x, y) NIOS2_CONCAT_INNER (x, y)
++
++#define NIOS2_FOR_ALL_FPU_INSNS \
++ NIOS2_FPU_INSN (fwrx, nios2_fwrx, zdz) \
++ NIOS2_FPU_INSN (fwry, nios2_fwry, zsz) \
++ NIOS2_FPU_INSN (frdxlo, nios2_frdxlo, szz) \
++ NIOS2_FPU_INSN (frdxhi, nios2_frdxhi, szz) \
++ NIOS2_FPU_INSN (frdy, nios2_frdy, szz) \
++\
++ NIOS2_FPU_INSN (fadds, addsf3, sss) \
++ NIOS2_FPU_INSN (fsubs, subsf3, sss) \
++ NIOS2_FPU_INSN (fmuls, mulsf3, sss) \
++ NIOS2_FPU_INSN (fdivs, divsf3, sss) \
++ NIOS2_FPU_INSN (fmins, minsf3, sss) \
++ NIOS2_FPU_INSN (fmaxs, maxsf3, sss) \
++ NIOS2_FPU_INSN (fnegs, negsf2, ssz) \
++ NIOS2_FPU_INSN (fabss, abssf2, ssz) \
++ NIOS2_FPU_INSN (fsqrts, sqrtsf2, ssz) \
++ NIOS2_FPU_INSN (fcoss, cossf2, ssz) \
++ NIOS2_FPU_INSN (fsins, sinsf2, ssz) \
++ NIOS2_FPU_INSN (ftans, tansf2, ssz) \
++ NIOS2_FPU_INSN (fatans, atansf2, ssz) \
++ NIOS2_FPU_INSN (fexps, expsf2, ssz) \
++ NIOS2_FPU_INSN (flogs, logsf2, ssz) \
++ NIOS2_FPU_INSN (fcmplts, nios2_sltsf, iss) \
++ NIOS2_FPU_INSN (fcmples, nios2_slesf, iss) \
++ NIOS2_FPU_INSN (fcmpgts, nios2_sgtsf, iss) \
++ NIOS2_FPU_INSN (fcmpges, nios2_sgesf, iss) \
++ NIOS2_FPU_INSN (fcmpeqs, nios2_seqsf, iss) \
++ NIOS2_FPU_INSN (fcmpnes, nios2_snesf, iss) \
++\
++ NIOS2_FPU_INSN (faddd, adddf3, ddd) \
++ NIOS2_FPU_INSN (fsubd, subdf3, ddd) \
++ NIOS2_FPU_INSN (fmuld, muldf3, ddd) \
++ NIOS2_FPU_INSN (fdivd, divdf3, ddd) \
++ NIOS2_FPU_INSN (fmind, mindf3, ddd) \
++ NIOS2_FPU_INSN (fmaxd, maxdf3, ddd) \
++ NIOS2_FPU_INSN (fnegd, negdf2, ddz) \
++ NIOS2_FPU_INSN (fabsd, absdf2, ddz) \
++ NIOS2_FPU_INSN (fsqrtd, sqrtdf2, ddz) \
++ NIOS2_FPU_INSN (fcosd, cosdf2, ddz) \
++ NIOS2_FPU_INSN (fsind, sindf2, ddz) \
++ NIOS2_FPU_INSN (ftand, tandf2, ddz) \
++ NIOS2_FPU_INSN (fatand, atandf2, ddz) \
++ NIOS2_FPU_INSN (fexpd, expdf2, ddz) \
++ NIOS2_FPU_INSN (flogd, logdf2, ddz) \
++ NIOS2_FPU_INSN (fcmpltd, nios2_sltdf, idd) \
++ NIOS2_FPU_INSN (fcmpled, nios2_sledf, idd) \
++ NIOS2_FPU_INSN (fcmpgtd, nios2_sgtdf, idd) \
++ NIOS2_FPU_INSN (fcmpged, nios2_sgedf, idd) \
++ NIOS2_FPU_INSN (fcmpeqd, nios2_seqdf, idd) \
++ NIOS2_FPU_INSN (fcmpned, nios2_snedf, idd) \
++\
++ NIOS2_FPU_INSN (floatis, floatsisf2, siz) \
++ NIOS2_FPU_INSN (floatus, floatunssisf2, suz) \
++ NIOS2_FPU_INSN (floatid, floatsidf2, diz) \
++ NIOS2_FPU_INSN (floatud, floatunssidf2, duz) \
++ NIOS2_FPU_INSN (fixsi, fixsfsi2, isz) \
++ NIOS2_FPU_INSN (fixsu, fixunssfsi2, usz) \
++ NIOS2_FPU_INSN (fixdi, fixdfsi2, idz) \
++ NIOS2_FPU_INSN (fixdu, fixunsdfsi2, udz) \
++ NIOS2_FPU_INSN (fextsd, extendsfdf2, dsz) \
++ NIOS2_FPU_INSN (ftruncds, truncdfsf2, sdz)
++
++enum
++{
++#define NIOS2_FPU_INSN(opt, insn, args) \
++ NIOS2_CONCAT (nios2_fpu_, insn),
++NIOS2_FOR_ALL_FPU_INSNS
++ nios2_fpu_max_insn
++};
++
++struct cpp_reader;
++typedef const char * (*nios2_outputfn) (rtx);
++typedef void (*nios2_pragmafn) (struct cpp_reader *);
++
++typedef struct
++{
++ const char *option; /* name of switch, e.g. fadds */
++ const char *insnnm; /* name of gcc insn, e.g. addsf3 */
++ const char *args; /* args to gcc insn, e.g. sss */
++ int N; /* value of switch as an integer, -1 = not used */
++ nios2_outputfn output; /* output function for use in .md file */
++ const char *pname; /* name of corresponding #pragma custom- */
++ nios2_pragmafn pragma; /* pragma function for register_target_pragmas */
++ const char *nopname; /* name of corresponding #pragma no-custom- */
++ nios2_pragmafn nopragma; /* pragma function for register_target_pragmas */
++ int is_double; /* does this insn have any double operands */
++ int needed_by_double; /* is this insn needed if doubles are used? */
++ int needs_unsafe; /* does this insn require
++ -funsafe-math-optimizations to work? */
++ int needs_finite; /* does this insn require
++ -ffinite-math-only to work? */
++ int pragma_seen; /* have we seen the corresponding #pragma? */
++ int* pN; /* pointer to the value as set by the option
++ parser. */
++} nios2_fpu_info;
++
++extern nios2_fpu_info nios2_fpu_insns[nios2_fpu_max_insn];
++extern const char *nios2_custom_fpu_cfg_string;
++
++#undef NIOS2_FPU_INSN
++#define NIOS2_FPU_INSN(opt, insn, args) \
++ extern int NIOS2_CONCAT (nios2_custom_, opt);
++NIOS2_FOR_ALL_FPU_INSNS
++
++/* We're little endian, unless otherwise specified by defining BIG_ENDIAN_FLAG
++*/
++#ifndef TARGET_ENDIAN_DEFAULT
++# define TARGET_ENDIAN_DEFAULT 0
++#endif
++
++/* Default target_flags if no switches specified. */
++#ifndef TARGET_DEFAULT
++# define TARGET_DEFAULT (MASK_HAS_MUL | \
++ TARGET_ENDIAN_DEFAULT)
++#endif
++
++/* Switch Recognition by gcc.c. Add -G xx support */
++#undef SWITCH_TAKES_ARG
++#define SWITCH_TAKES_ARG(CHAR) \
++ (DEFAULT_SWITCH_TAKES_ARG (CHAR) || (CHAR) == 'G')
++
++#define OVERRIDE_OPTIONS override_options ()
++#define OPTIMIZATION_OPTIONS(LEVEL, SIZE) optimization_options (LEVEL, SIZE)
++#define CAN_DEBUG_WITHOUT_FP
++
++#define CC1_SPEC "\
++%{G*} %{EB:-meb} %{EL:-mel} %{EB:%{EL:%emay not use both -EB and -EL}}"
++
++#if TARGET_ENDIAN_DEFAULT == 0
++# define ASM_SPEC "\
++%{!EB:%{!meb:-EL}} %{EB|meb:-EB}"
++# define LINK_SPEC_ENDIAN "\
++%{!EB:%{!meb:-EL}} %{EB|meb:-EB}"
++# define MULTILIB_DEFAULTS { "EL" }
++#else
++# define ASM_SPEC "\
++%{!EL:%{!mel:-EB}} %{EL|mel:-EL}"
++# define LINK_SPEC_ENDIAN "\
++%{!EL:%{!mel:-EB}} %{EL|mel:-EL}"
++# define MULTILIB_DEFAULTS { "EB" }
++#endif
++
++#define LINK_SPEC LINK_SPEC_ENDIAN \
++ " %{shared:-shared} \
++ %{static:-Bstatic}"
++
++#undef LIB_SPEC
++#define LIB_SPEC \
++"--start-group -lc -lgcc \
++ -lnosys -lstack \
++ --end-group \
++"
++
++
++#undef STARTFILE_SPEC
++#define STARTFILE_SPEC \
++"crt0%O%s"
++
++#undef ENDFILE_SPEC
++#define ENDFILE_SPEC \
++ ""
++
++
++/***********************
++ * Storage Layout
++ ***********************/
++
++#define DEFAULT_SIGNED_CHAR 1
++#define BITS_BIG_ENDIAN 0
++#define BYTES_BIG_ENDIAN (TARGET_BIG_ENDIAN != 0)
++#define WORDS_BIG_ENDIAN (TARGET_BIG_ENDIAN != 0)
++#if defined(__nios2_big_endian__)
++#define LIBGCC2_WORDS_BIG_ENDIAN 1
++#else
++#define LIBGCC2_WORDS_BIG_ENDIAN 0
++#endif
++#define BITS_PER_UNIT 8
++#define BITS_PER_WORD 32
++#define UNITS_PER_WORD 4
++#define POINTER_SIZE 32
++#define BIGGEST_ALIGNMENT 32
++#define STRICT_ALIGNMENT 1
++#define FUNCTION_BOUNDARY 32
++#define PARM_BOUNDARY 32
++#define STACK_BOUNDARY 32
++#define PREFERRED_STACK_BOUNDARY 32
++#define MAX_FIXED_MODE_SIZE 64
++
++#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
++ ((TREE_CODE (EXP) == STRING_CST) \
++ && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
++
++
++/**********************
++ * Layout of Source Language Data Types
++ **********************/
++
++#define INT_TYPE_SIZE 32
++#define SHORT_TYPE_SIZE 16
++#define LONG_TYPE_SIZE 32
++#define LONG_LONG_TYPE_SIZE 64
++#define FLOAT_TYPE_SIZE 32
++#define DOUBLE_TYPE_SIZE 64
++#define LONG_DOUBLE_TYPE_SIZE DOUBLE_TYPE_SIZE
++
++
++/*************************
++ * Condition Code Status
++ ************************/
++
++/* comparison type */
++/* ??? Currently CMP_DI is unused. CMP_SF and CMP_DF are only used if
++ the corresponding -mcustom-<opcode> switches are present. */
++enum cmp_type {
++ CMP_SI, /* compare four byte integers */
++ CMP_DI, /* compare eight byte integers */
++ CMP_SF, /* compare single precision floats */
++ CMP_DF, /* compare double precision floats */
++ CMP_MAX /* max comparison type */
++};
++
++extern GTY(()) rtx branch_cmp[2]; /* operands for compare */
++extern enum cmp_type branch_type; /* what type of branch to use */
++
++/**********************
++ * Register Usage
++ **********************/
++
++/* ---------------------------------- *
++ * Basic Characteristics of Registers
++ * ---------------------------------- */
++
++/*
++Register Number
++ Register Name
++ Alternate Name
++ Purpose
++0 r0 zero always zero
++1 r1 at Assembler Temporary
++2-3 r2-r3 Return Location
++4-7 r4-r7 Register Arguments
++8-15 r8-r15 Caller Saved Registers
++16-22 r16-r22 Callee Saved Registers
++21 r21 sc Static Chain (Callee Saved)
++ ??? Does $sc want to be caller or callee
++ saved. If caller, 15, else 21.
++22 r22 Global Offset Table pointer
++23 r23 Thread pointer
++24 r24 et Exception Temporary
++25 r25 bt Breakpoint Temporary
++26 r26 gp Global Pointer
++27 r27 sp Stack Pointer
++28 r28 fp Frame Pointer
++29 r29 ea Exception Return Address
++30 r30 ba Breakpoint Return Address
++31 r31 ra Return Address
++
++32 ctl0 status
++33 ctl1 estatus STATUS saved by exception ?
++34 ctl2 bstatus STATUS saved by break ?
++35 ctl3 ipri Interrupt Priority Mask ?
++36 ctl4 ecause Exception Cause ?
++
++37 pc Not an actual register
++
++38 fake_fp Fake Frame Pointer which will always be eliminated.
++39 fake_ap Fake Argument Pointer which will always be eliminated.
++
++40 First Pseudo Register
++
++
++The definitions for all the hard register numbers
++are located in nios2.md.
++*/
++
++#define ET_REGNO (24)
++#define GP_REGNO (26)
++#define SP_REGNO (27)
++#define FP_REGNO (28)
++#define EA_REGNO (29)
++#define RA_REGNO (31)
++#define FIRST_RETVAL_REGNO (2)
++#define LAST_RETVAL_REGNO (3)
++#define FIRST_ARG_REGNO (4)
++#define LAST_ARG_REGNO (7)
++#define SC_REGNO (21)
++#define PC_REGNO (37)
++#define FAKE_FP_REGNO (38)
++#define FAKE_AP_REGNO (39)
++
++#define FIRST_PSEUDO_REGISTER 40
++#define NUM_ARG_REGS (LAST_ARG_REGNO - FIRST_ARG_REGNO + 1)
++
++
++
++#define FIXED_REGISTERS \
++ { \
++/* +0 1 2 3 4 5 6 7 8 9 */ \
++/* 0 */ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, \
++/* 10 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
++/* 20 */ 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, \
++/* 30 */ 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, \
++ }
++
++/* call used is the same as caller saved
++ + fixed regs + args + ret vals */
++#define CALL_USED_REGISTERS \
++ { \
++/* +0 1 2 3 4 5 6 7 8 9 */ \
++/* 0 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
++/* 10 */ 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, \
++/* 20 */ 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, \
++/* 30 */ 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, \
++ }
++
++#define THREAD_POINTER_REGNUM 23
++
++#define HARD_REGNO_NREGS(REGNO, MODE) \
++ ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) \
++ / UNITS_PER_WORD)
++
++/* --------------------------- *
++ * How Values Fit in Registers
++ * --------------------------- */
++
++#define HARD_REGNO_MODE_OK(REGNO, MODE) 1
++
++#define MODES_TIEABLE_P(MODE1, MODE2) 1
++
++
++/*************************
++ * Register Classes
++ *************************/
++
++enum reg_class
++{
++ NO_REGS,
++ D00_REG,
++ D01_REG,
++ D02_REG,
++ D03_REG,
++ D04_REG,
++ D05_REG,
++ D06_REG,
++ D07_REG,
++ D08_REG,
++ D09_REG,
++ D10_REG,
++ D11_REG,
++ D12_REG,
++ D13_REG,
++ D14_REG,
++ D15_REG,
++ D16_REG,
++ D17_REG,
++ D18_REG,
++ D19_REG,
++ D20_REG,
++ D21_REG,
++ D22_REG,
++ D23_REG,
++ D24_REG,
++ D25_REG,
++ D26_REG,
++ D27_REG,
++ D28_REG,
++ D29_REG,
++ D30_REG,
++ D31_REG,
++ SIB_REGS,
++ GP_REGS,
++ ALL_REGS,
++ LIM_REG_CLASSES
++};
++
++#define N_REG_CLASSES (int) LIM_REG_CLASSES
++
++#define REG_CLASS_NAMES \
++ {"NO_REGS", \
++ "D00_REG", \
++ "D01_REG", \
++ "D02_REG", \
++ "D03_REG", \
++ "D04_REG", \
++ "D05_REG", \
++ "D06_REG", \
++ "D07_REG", \
++ "D08_REG", \
++ "D09_REG", \
++ "D10_REG", \
++ "D11_REG", \
++ "D12_REG", \
++ "D13_REG", \
++ "D14_REG", \
++ "D15_REG", \
++ "D16_REG", \
++ "D17_REG", \
++ "D18_REG", \
++ "D19_REG", \
++ "D20_REG", \
++ "D21_REG", \
++ "D22_REG", \
++ "D23_REG", \
++ "D24_REG", \
++ "D25_REG", \
++ "D26_REG", \
++ "D27_REG", \
++ "D28_REG", \
++ "D29_REG", \
++ "D30_REG", \
++ "D31_REG", \
++ "SIB_REGS", \
++ "GP_REGS", \
++ "ALL_REGS"}
++
++#define GENERAL_REGS ALL_REGS
++
++#define REG_CLASS_CONTENTS \
++/* NO_REGS */ {{ 0, 0}, \
++/* D00_REG */ { 1 << 0, 0}, \
++/* D01_REG */ { 1 << 1, 0}, \
++/* D02_REG */ { 1 << 2, 0}, \
++/* D03_REG */ { 1 << 3, 0}, \
++/* D04_REG */ { 1 << 4, 0}, \
++/* D05_REG */ { 1 << 5, 0}, \
++/* D06_REG */ { 1 << 6, 0}, \
++/* D07_REG */ { 1 << 7, 0}, \
++/* D08_REG */ { 1 << 8, 0}, \
++/* D09_REG */ { 1 << 9, 0}, \
++/* D10_REG */ { 1 << 10, 0}, \
++/* D11_REG */ { 1 << 11, 0}, \
++/* D12_REG */ { 1 << 12, 0}, \
++/* D13_REG */ { 1 << 13, 0}, \
++/* D14_REG */ { 1 << 14, 0}, \
++/* D15_REG */ { 1 << 15, 0}, \
++/* D16_REG */ { 1 << 16, 0}, \
++/* D17_REG */ { 1 << 17, 0}, \
++/* D18_REG */ { 1 << 18, 0}, \
++/* D19_REG */ { 1 << 19, 0}, \
++/* D20_REG */ { 1 << 20, 0}, \
++/* D21_REG */ { 1 << 21, 0}, \
++/* D22_REG */ { 1 << 22, 0}, \
++/* D23_REG */ { 1 << 23, 0}, \
++/* D24_REG */ { 1 << 24, 0}, \
++/* D25_REG */ { 1 << 25, 0}, \
++/* D26_REG */ { 1 << 26, 0}, \
++/* D27_REG */ { 1 << 27, 0}, \
++/* D28_REG */ { 1 << 28, 0}, \
++/* D29_REG */ { 1 << 29, 0}, \
++/* D30_REG */ { 1 << 30, 0}, \
++/* D31_REG */ { 1 << 31, 0}, \
++/* SIB_REGS */ { 0xfe0c, 0}, \
++/* GP_REGS */ {~0, 0}, \
++/* ALL_REGS */ {~0,~0}} \
++
++#define GP_REGNO_P(REGNO) ((REGNO) < 32)
++#define REGNO_REG_CLASS(REGNO) (GP_REGNO_P (REGNO) ? GP_REGS : ALL_REGS)
++
++#define BASE_REG_CLASS ALL_REGS
++#define INDEX_REG_CLASS ALL_REGS
++
++/* 'r', is handled automatically */
++#define REG_CLASS_FROM_LETTER(C) ((C) == 'j' ? SIB_REGS : NO_REGS)
++
++#define REG_CLASS_FROM_CONSTRAINT(CHAR, STR) \
++ ((CHAR) == 'j' ? SIB_REGS : \
++ reg_class_from_constraint ((CHAR), (STR)))
++
++
++#define REGNO_OK_FOR_BASE_P2(REGNO, STRICT) \
++ ((STRICT) \
++ ? (REGNO) < FIRST_PSEUDO_REGISTER \
++ : (REGNO) < FIRST_PSEUDO_REGISTER || \
++ (reg_renumber && reg_renumber[REGNO] < FIRST_PSEUDO_REGISTER))
++
++#define REGNO_OK_FOR_INDEX_P2(REGNO, STRICT) \
++ (REGNO_OK_FOR_BASE_P2 (REGNO, STRICT))
++
++#define REGNO_OK_FOR_BASE_P(REGNO) \
++ (REGNO_OK_FOR_BASE_P2 (REGNO, 1))
++
++#define REGNO_OK_FOR_INDEX_P(REGNO) \
++ (REGNO_OK_FOR_INDEX_P2 (REGNO, 1))
++
++#define REG_OK_FOR_BASE_P2(X, STRICT) \
++ (STRICT \
++ ? REGNO_OK_FOR_BASE_P2 (REGNO (X), 1) \
++ : REGNO_OK_FOR_BASE_P2 (REGNO (X), 1) || \
++ REGNO(X) >= FIRST_PSEUDO_REGISTER)
++
++#define REG_OK_FOR_INDEX_P2(X, STRICT) \
++ (STRICT \
++ ? REGNO_OK_FOR_INDEX_P2 (REGNO (X), 1) \
++ : REGNO_OK_FOR_INDEX_P2 (REGNO (X), 1) || \
++ REGNO(X) >= FIRST_PSEUDO_REGISTER)
++
++#define CLASS_MAX_NREGS(CLASS, MODE) \
++ ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) \
++ / UNITS_PER_WORD)
++
++
++#define SMALL_INT(X) ((unsigned HOST_WIDE_INT) ((X) + 0x8000) < 0x10000)
++#define SMALL_INT_UNSIGNED(X) ((unsigned HOST_WIDE_INT) (X) < 0x10000)
++#define UPPER16_INT(X) (((X) & 0xffff) == 0)
++#define SHIFT_INT(X) ((X) >= 0 && (X) <= 31)
++#define RDWRCTL_INT(X) ((X) >= 0 && (X) <= 31)
++#define CUSTOM_INSN_OPCODE(X) ((X) >= 0 && (X) <= 255)
++
++#define CONST_OK_FOR_LETTER_P(VALUE, C) \
++ ( \
++ (C) == 'I' ? SMALL_INT (VALUE) : \
++ (C) == 'J' ? SMALL_INT_UNSIGNED (VALUE) : \
++ (C) == 'K' ? UPPER16_INT (VALUE) : \
++ (C) == 'L' ? SHIFT_INT (VALUE) : \
++ (C) == 'M' ? (VALUE) == 0 : \
++ (C) == 'N' ? CUSTOM_INSN_OPCODE (VALUE) : \
++ (C) == 'O' ? RDWRCTL_INT (VALUE) : \
++ 0)
++
++#define CONST_DOUBLE_OK_FOR_LETTER_P(VALUE, C) 0
++
++#define PREFERRED_RELOAD_CLASS(X, CLASS) \
++ ((CLASS) == NO_REGS ? GENERAL_REGS : (CLASS))
++
++#define CONSTRAINT_LEN(C, STR) \
++ ((C) == 'D' ? 3 : DEFAULT_CONSTRAINT_LEN ((C), (STR)))
++
++/* 'S' matches immediates which are in small data
++ and therefore can be added to gp to create a
++ 32-bit value. */
++#define EXTRA_CONSTRAINT(VALUE, C) \
++ ((C) == 'S' \
++ && (GET_CODE (VALUE) == SYMBOL_REF) \
++ && SYMBOL_REF_IN_NIOS2_SMALL_DATA_P (VALUE))
++
++
++
++
++/* Say that the epilogue uses the return address register. Note that
++ in the case of sibcalls, the values "used by the epilogue" are
++ considered live at the start of the called function. */
++#define EPILOGUE_USES(REGNO) ((REGNO) == RA_REGNO)
++
++
++/**********************************
++ * Trampolines for Nested Functions
++ ***********************************/
++
++#define TRAMPOLINE_TEMPLATE(FILE) \
++ error ("trampolines not yet implemented")
++#define TRAMPOLINE_SIZE 20
++#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
++ error ("trampolines not yet implemented")
++
++/***************************
++ * Stack Layout and Calling Conventions
++ ***************************/
++
++/* ------------------ *
++ * Basic Stack Layout
++ * ------------------ */
++
++/* The downward variants are used by the compiler,
++ the upward ones serve as documentation */
++#define STACK_GROWS_DOWNWARD
++#define FRAME_GROWS_UPWARD
++#define ARGS_GROW_UPWARD
++
++#define STARTING_FRAME_OFFSET 0
++#define FIRST_PARM_OFFSET(FUNDECL) 0
++
++/* Before the prologue, RA lives in r31. */
++#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (VOIDmode, RA_REGNO)
++#define RETURN_ADDR_RTX(C,F) nios2_get_return_address (C)
++
++/* -------------------------------------- *
++ * Registers That Address the Stack Frame
++ * -------------------------------------- */
++
++#define STACK_POINTER_REGNUM SP_REGNO
++#define STATIC_CHAIN_REGNUM SC_REGNO
++#define PC_REGNUM PC_REGNO
++#define DWARF_FRAME_RETURN_COLUMN RA_REGNO
++
++/* The DWARF 2 CFA column which tracks the return address from a
++ signal handler context. */
++#define SIGNAL_UNWIND_RETURN_COLUMN EA_REGNO
++
++/* Base register for access to local variables of the function. We
++ pretend that the frame pointer is a non-existent hard register, and
++ then eliminate it to HARD_FRAME_POINTER_REGNUM. */
++#define FRAME_POINTER_REGNUM FAKE_FP_REGNO
++
++#define HARD_FRAME_POINTER_REGNUM FP_REGNO
++/* the argumnet pointer needs to always be eliminated
++ so it is set to a fake hard register. */
++#define ARG_POINTER_REGNUM FAKE_AP_REGNO
++
++/* The CFA includes the pretend args */
++#define ARG_POINTER_CFA_OFFSET(FNDECL) \
++ (gcc_assert ((FNDECL) == current_function_decl), \
++ FIRST_PARM_OFFSET (FNDECL) + current_function_pretend_args_size)
++
++/* ----------------------------------------- *
++ * Eliminating Frame Pointer and Arg Pointer
++ * ----------------------------------------- */
++
++#define FRAME_POINTER_REQUIRED 0
++
++#define ELIMINABLE_REGS \
++{{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
++ { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \
++ { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
++ { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}}
++
++#define CAN_ELIMINATE(FROM, TO) \
++ ((TO) == STACK_POINTER_REGNUM ? ! frame_pointer_needed : 1)
++
++#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
++ (OFFSET) = nios2_initial_elimination_offset ((FROM), (TO))
++
++/* Treat LOC as a byte offset from the stack pointer and round it up
++ to the next fully-aligned offset. */
++#define STACK_ALIGN(LOC) \
++ (((LOC) + ((PREFERRED_STACK_BOUNDARY / 8) - 1)) & \
++ ~((PREFERRED_STACK_BOUNDARY / 8) - 1))
++
++
++/* ------------------------------ *
++ * Passing Arguments in Registers
++ * ------------------------------ */
++
++/* see nios2.c */
++#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
++ (function_arg (&CUM, MODE, TYPE, NAMED))
++
++#define FUNCTION_ARG_CALLEE_COPIES(CUM, MODE, TYPE, NAMED) 0
++
++typedef struct nios2_args
++{
++ int regs_used;
++} CUMULATIVE_ARGS;
++
++/* This is to initialize the above unused CUM data type */
++#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS) \
++ (init_cumulative_args (&CUM, FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS))
++
++#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
++ (function_arg_advance (&CUM, MODE, TYPE, NAMED))
++
++#define FUNCTION_ARG_PADDING(MODE, TYPE) \
++ (nios2_function_arg_padding_upward ((MODE), (TYPE)) ? upward : downward)
++
++#define PAD_VARARGS_DOWN \
++ (FUNCTION_ARG_PADDING (TYPE_MODE (type), type) == downward)
++
++#define BLOCK_REG_PADDING(MODE, TYPE, FIRST) \
++ (nios2_block_reg_padding_upward ((MODE), (TYPE), (FIRST)) ? upward : \
++ downward)
++
++#define FUNCTION_ARG_REGNO_P(REGNO) \
++ ((REGNO) >= FIRST_ARG_REGNO && (REGNO) <= LAST_ARG_REGNO)
++
++/* ----------------------------- *
++ * Generating Code for Profiling
++ * ----------------------------- */
++
++
++#define PROFILE_BEFORE_PROLOGUE
++#define NO_PROFILE_COUNTERS 1
++#define FUNCTION_PROFILER(FILE, LABELNO) \
++ function_profiler ((FILE), (LABELNO))
++
++/* --------------------------------------- *
++ * Passing Function Arguments on the Stack
++ * --------------------------------------- */
++
++#define PUSH_ARGS 0
++#define ACCUMULATE_OUTGOING_ARGS 1
++
++#define RETURN_POPS_ARGS(FUNDECL, FUNTYPE, STACKSIZE) 0
++
++/* --------------------------------------- *
++ * How Scalar Function Values Are Returned
++ * --------------------------------------- */
++
++#define FUNCTION_VALUE(VALTYPE, FUNC) \
++ gen_rtx_REG(TYPE_MODE(VALTYPE), FIRST_RETVAL_REGNO)
++
++#define LIBCALL_VALUE(MODE) \
++ gen_rtx_REG(MODE, FIRST_RETVAL_REGNO)
++
++#define FUNCTION_VALUE_REGNO_P(REGNO) ((REGNO) == FIRST_RETVAL_REGNO)
++
++/* ----------------------------- *
++ * How Large Values Are Returned
++ * ----------------------------- */
++
++
++#define RETURN_IN_MEMORY(TYPE) \
++ nios2_return_in_memory (TYPE)
++
++
++#define STRUCT_VALUE 0
++
++#define DEFAULT_PCC_STRUCT_RETURN 0
++
++/*******************
++ * Addressing Modes
++ *******************/
++
++
++#define LEGITIMIZE_ADDRESS(X, OLDX, MODE, WIN) \
++ do \
++ { \
++ X = nios2_legitimize_address (X, OLDX, MODE); \
++ if (memory_address_p (MODE, X)) \
++ goto WIN; \
++ } \
++ while (0)
++
++#define CONSTANT_ADDRESS_P(X) \
++ (CONSTANT_P (X) && nios2_legitimate_address (X, Pmode, 0))
++
++#define MAX_REGS_PER_ADDRESS 1
++
++/* Go to ADDR if X is a valid address. */
++#ifndef REG_OK_STRICT
++#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \
++ { \
++ if (nios2_legitimate_address ((X), (MODE), 0)) \
++ goto ADDR; \
++ }
++#else
++#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \
++ { \
++ if (nios2_legitimate_address ((X), (MODE), 1)) \
++ goto ADDR; \
++ }
++#endif
++
++#ifndef REG_OK_STRICT
++#define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P2 (REGNO (X), 0)
++#define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P2 (REGNO (X), 0)
++#else
++#define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P2 (REGNO (X), 1)
++#define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P2 (REGNO (X), 1)
++#endif
++
++#define LEGITIMATE_CONSTANT_P(X) nios2_legitimate_constant (X)
++
++/* Nios II has no mode dependent addresses. */
++#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR, LABEL)
++
++/* Set if this has a weak declaration */
++#define SYMBOL_FLAG_WEAK_DECL (1 << SYMBOL_FLAG_MACH_DEP_SHIFT)
++#define SYMBOL_REF_WEAK_DECL_P(RTX) \
++ ((SYMBOL_REF_FLAGS (RTX) & SYMBOL_FLAG_WEAK_DECL) != 0)
++
++
++/* true if a symbol is both small and not weak. In this case, gp
++ relative access can be used. gp relative access cannot be used in
++ position independent code. gp relative access cannot be used for externally
++ defined symbols, because the compilation unit which defines the symbol may
++ place it in a section that cannot be reached from gp. */
++#define SYMBOL_REF_IN_NIOS2_SMALL_DATA_P(RTX) \
++ (!flag_pic && SYMBOL_REF_SMALL_P(RTX) && !SYMBOL_REF_WEAK_DECL_P(RTX) && \
++ !SYMBOL_REF_EXTERNAL_P(RTX) && SYMBOL_REF_TLS_MODEL(RTX)==0)
++
++/*****************
++ * Describing Relative Costs of Operations
++ *****************/
++
++#define SLOW_BYTE_ACCESS 1
++
++/* It is as good to call a constant function address as to call an address
++ kept in a register.
++ ??? Not true anymore really. Now that call cannot address full range
++ of memory callr may need to be used */
++
++#define NO_FUNCTION_CSE
++
++/***************************
++ * Position Independent Code
++ ***************************/
++
++#define PIC_OFFSET_TABLE_REGNUM 22
++
++#define LEGITIMATE_PIC_OPERAND_P(X) nios2_legitimate_pic_operand_p (X)
++
++/*****************************************
++ * Defining the Output Assembler Language
++ *****************************************/
++
++/* ------------------------------------------ *
++ * The Overall Framework of an Assembler File
++ * ------------------------------------------ */
++
++#define ASM_APP_ON "#APP\n"
++#define ASM_APP_OFF "#NO_APP\n"
++
++#define ASM_COMMENT_START "# "
++
++/* ------------------------------- *
++ * Output and Generation of Labels
++ * ------------------------------- */
++
++#define GLOBAL_ASM_OP "\t.global\t"
++
++
++/* -------------- *
++ * Output of Data
++ * -------------- */
++
++/* -------------------------------- *
++ * Assembler Commands for Alignment
++ * -------------------------------- */
++
++#define ASM_OUTPUT_ALIGN(FILE, LOG) \
++ do { \
++ fprintf ((FILE), "%s%d\n", ALIGN_ASM_OP, (LOG)); \
++ } while (0)
++
++
++/* -------------------------------- *
++ * Output of Assembler Instructions
++ * -------------------------------- */
++
++#define REGISTER_NAMES \
++{ \
++ "zero", \
++ "at", \
++ "r2", \
++ "r3", \
++ "r4", \
++ "r5", \
++ "r6", \
++ "r7", \
++ "r8", \
++ "r9", \
++ "r10", \
++ "r11", \
++ "r12", \
++ "r13", \
++ "r14", \
++ "r15", \
++ "r16", \
++ "r17", \
++ "r18", \
++ "r19", \
++ "r20", \
++ "r21", \
++ "r22", \
++ "r23", \
++ "et", \
++ "bt", \
++ "gp", \
++ "sp", \
++ "fp", \
++ "ta", \
++ "ba", \
++ "ra", \
++ "status", \
++ "estatus", \
++ "bstatus", \
++ "ipri", \
++ "ecause", \
++ "pc", \
++ "fake_fp", \
++ "fake_ap", \
++}
++
++#define ADDITIONAL_REGISTER_NAMES \
++{ \
++ {"r0", 0}, \
++ {"r1", 1}, \
++ {"r24", 24}, \
++ {"r25", 25}, \
++ {"r26", 26}, \
++ {"r27", 27}, \
++ {"r28", 28}, \
++ {"r29", 29}, \
++ {"r30", 30}, \
++ {"r31", 31} \
++}
++
++
++#define ASM_OUTPUT_OPCODE(STREAM, PTR)\
++ (PTR) = asm_output_opcode (STREAM, PTR)
++
++#define PRINT_OPERAND(STREAM, X, CODE) \
++ nios2_print_operand (STREAM, X, CODE)
++
++#define PRINT_OPERAND_ADDRESS(STREAM, X) \
++ nios2_print_operand_address (STREAM, X)
++
++#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \
++do { fputs (integer_asm_op (POINTER_SIZE / BITS_PER_UNIT, TRUE), FILE); \
++ fprintf (FILE, ".L%u\n", (unsigned) (VALUE)); \
++ } while (0)
++
++#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL)\
++do { fputs (integer_asm_op (POINTER_SIZE / BITS_PER_UNIT, TRUE), STREAM); \
++ fprintf (STREAM, ".L%u-.L%u\n", (unsigned) (VALUE), (unsigned) (REL)); \
++ } while (0)
++
++
++/* ------------ *
++ * Label Output
++ * ------------ */
++
++
++/* ---------------------------------------------------- *
++ * Dividing the Output into Sections (Texts, Data, ...)
++ * ---------------------------------------------------- */
++
++/* Output before read-only data. */
++#define TEXT_SECTION_ASM_OP "\t.section\t.text"
++
++/* Output before writable data. */
++#define DATA_SECTION_ASM_OP "\t.section\t.data"
++
++
++/* Default the definition of "small data" to 8 bytes. */
++/* ??? How come I can't use HOST_WIDE_INT here? */
++extern unsigned long nios2_section_threshold;
++#define NIOS2_DEFAULT_GVALUE 8
++
++
++
++/* This says how to output assembler code to declare an
++ uninitialized external linkage data object. Under SVR4,
++ the linker seems to want the alignment of data objects
++ to depend on their types. We do exactly that here. */
++
++#undef COMMON_ASM_OP
++#define COMMON_ASM_OP "\t.comm\t"
++
++#undef ASM_OUTPUT_ALIGNED_COMMON
++#define ASM_OUTPUT_ALIGNED_COMMON(FILE, NAME, SIZE, ALIGN) \
++do \
++{ \
++ fprintf ((FILE), "%s", COMMON_ASM_OP); \
++ assemble_name ((FILE), (NAME)); \
++ fprintf ((FILE), ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n", (SIZE), \
++ (ALIGN) / BITS_PER_UNIT); \
++} \
++while (0)
++
++
++/* This says how to output assembler code to declare an
++ uninitialized internal linkage data object. Under SVR4,
++ the linker seems to want the alignment of data objects
++ to depend on their types. We do exactly that here. */
++
++#undef ASM_OUTPUT_ALIGNED_LOCAL
++#define ASM_OUTPUT_ALIGNED_LOCAL(FILE, NAME, SIZE, ALIGN) \
++do { \
++ if ((SIZE) <= nios2_section_threshold) \
++ named_section (0, ".sbss", 0); \
++ else \
++ named_section (0, ".bss", 0); \
++ ASM_OUTPUT_TYPE_DIRECTIVE (FILE, NAME, "object"); \
++ if (!flag_inhibit_size_directive) \
++ ASM_OUTPUT_SIZE_DIRECTIVE (FILE, NAME, SIZE); \
++ ASM_OUTPUT_ALIGN ((FILE), exact_log2((ALIGN) / BITS_PER_UNIT)); \
++ ASM_OUTPUT_LABEL(FILE, NAME); \
++ ASM_OUTPUT_SKIP((FILE), (SIZE) ? (SIZE) : 1); \
++} while (0)
++
++/* Put the jump tables in .text because when using position independent code,
++ Nios II elf has no relocation that can represent arbitrary differences
++ between symbols in different sections. */
++#define JUMP_TABLES_IN_TEXT_SECTION 1
++
++/* Exception Handling */
++
++/* Describe __builtin_eh_return */
++#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, LAST_RETVAL_REGNO)
++#define EH_RETURN_DATA_REGNO(N) ((N) <= (LAST_ARG_REGNO - FIRST_ARG_REGNO) \
++ ? (N) + FIRST_ARG_REGNO : INVALID_REGNUM)
++#define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \
++ (!flag_pic ? DW_EH_PE_sdata4 \
++ /* FIXME: These get expanded to dynamic relocs, which is wrong */ \
++ /* : !(GLOBAL) ? DW_EH_PE_pcrel | DW_EH_PE_sdata4 */ \
++ : DW_EH_PE_aligned)
++
++/***************************
++ * Miscellaneous Parameters
++ ***************************/
++
++#define MOVE_MAX 4
++
++#define STORE_FLAG_VALUE 1
++#define Pmode SImode
++#define FUNCTION_MODE QImode
++
++#define REGISTER_TARGET_PRAGMAS() nios2_register_target_pragmas ()
++
++#define CASE_VECTOR_MODE Pmode
++
++#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
++
++#define LOAD_EXTEND_OP(MODE) (ZERO_EXTEND)
++
++#define WORD_REGISTER_OPERATIONS
+Index: gcc-4.1.2/gcc/config/nios2/nios2.md
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ gcc-4.1.2/gcc/config/nios2/nios2.md 2010-06-30 08:50:26.000000000 +0200
+@@ -0,0 +1,3012 @@
++;; NOT ASSIGNED TO FSF. COPYRIGHT ALTERA.
++;;
++;; Machine Description for Altera NIOS 2G NIOS2 version.
++;; Copyright (C) 2005 Altera
++;; Contributed by Jonah Graham (jgraham@altera.com) and
++;; Will Reece (wreece@altera.com).
++;;
++;; This file is part of GNU CC.
++;;
++;; GNU CC is free software; you can redistribute it and/or modify
++;; it under the terms of the GNU General Public License as published by
++;; the Free Software Foundation; either version 2, or (at your option)
++;; any later version.
++;;
++;; GNU CC is distributed in the hope that it will be useful,
++;; but WITHOUT ANY WARRANTY; without even the implied warranty of
++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++;; GNU General Public License for more details.
++;;
++;; You should have received a copy of the GNU General Public License
++;; along with GNU CC; see the file COPYING. If not, write to
++;; the Free Software Foundation, 59 Temple Place - Suite 330,
++;; Boston, MA 02111-1307, USA. */
++
++
++
++
++;*****************************************************************************
++;*
++;* constraint strings
++;*
++;*****************************************************************************
++;
++; We use the following constraint letters for constants
++;
++; I: -32768 to -32767
++; J: 0 to 65535
++; K: $nnnn0000 for some nnnn
++; L: 0 to 31 (for shift counts)
++; M: 0
++; N: 0 to 255 (for custom instruction numbers)
++; O: 0 to 31 (for control register numbers)
++;
++; We use the following built-in register classes:
++;
++; r: general purpose register (r0..r31)
++; m: memory operand
++;
++; Plus, we define the following constraint strings:
++;
++; S: symbol that is in the "small data" area
++; Dnn: Dnn_REG (just rnn)
++;
++
++
++
++;*****************************************************************************
++;*
++;* constants
++;*
++;*****************************************************************************
++(define_constants [
++ (UNSPEC_BLOCKAGE 0)
++ (UNSPEC_LDBIO 1)
++ (UNSPEC_LDBUIO 2)
++ (UNSPEC_LDHIO 3)
++ (UNSPEC_LDHUIO 4)
++ (UNSPEC_LDWIO 5)
++ (UNSPEC_STBIO 6)
++ (UNSPEC_STHIO 7)
++ (UNSPEC_STWIO 8)
++ (UNSPEC_SYNC 9)
++ (UNSPEC_WRCTL 10)
++ (UNSPEC_RDCTL 11)
++ (UNSPEC_TRAP 12)
++ (UNSPEC_STACK_OVERFLOW_DETECT_AND_TRAP 13)
++ (UNSPEC_FCOSS 14)
++ (UNSPEC_FCOSD 15)
++ (UNSPEC_FSINS 16)
++ (UNSPEC_FSIND 17)
++ (UNSPEC_FTANS 18)
++ (UNSPEC_FTAND 19)
++ (UNSPEC_FATANS 20)
++ (UNSPEC_FATAND 21)
++ (UNSPEC_FEXPS 22)
++ (UNSPEC_FEXPD 23)
++ (UNSPEC_FLOGS 24)
++ (UNSPEC_FLOGD 25)
++ (UNSPEC_FWRX 26)
++ (UNSPEC_FWRY 27)
++ (UNSPEC_FRDXLO 28)
++ (UNSPEC_FRDXHI 29)
++ (UNSPEC_FRDY 30)
++ (UNSPEC_LOAD_GOT_REGISTER 31)
++ (UNSPEC_PIC_SYM 32)
++ (UNSPEC_PIC_CALL_SYM 33)
++ (UNSPEC_TLS 34)
++ (UNSPEC_TLS_LDM 35)
++ (UNSPEC_LOAD_TLS_IE 36)
++ (UNSPEC_ADD_TLS_LE 37)
++ (UNSPEC_ADD_TLS_GD 38)
++ (UNSPEC_ADD_TLS_LDM 39)
++ (UNSPEC_ADD_TLS_LDO 40)
++
++ (UNSPEC_EH_RETURN 41)
++
++ ;; Note that values 100..151 are used by custom instructions, see below.
++])
++
++
++
++;*****************************************************************************
++;*
++;* instruction scheduler
++;*
++;*****************************************************************************
++
++; No schedule info is currently available, using an assumption that no
++; instruction can use the results of the previous instruction without
++; incuring a stall.
++
++; length of an instruction (in bytes)
++(define_attr "length" "" (const_int 4))
++(define_attr "type"
++ "unknown,complex,control,alu,cond_alu,st,ld,shift,mul,div,custom"
++ (const_string "complex"))
++
++(define_asm_attributes
++ [(set_attr "length" "4")
++ (set_attr "type" "complex")])
++
++(define_automaton "nios2")
++(automata_option "v")
++;(automata_option "no-minimization")
++(automata_option "ndfa")
++
++; The nios2 pipeline is fairly straightforward for the fast model.
++; Every alu operation is pipelined so that an instruction can
++; be issued every cycle. However, there are still potential
++; stalls which this description tries to deal with.
++
++(define_cpu_unit "cpu" "nios2")
++
++(define_insn_reservation "complex" 1
++ (eq_attr "type" "complex")
++ "cpu")
++
++(define_insn_reservation "control" 1
++ (eq_attr "type" "control")
++ "cpu")
++
++(define_insn_reservation "alu" 1
++ (eq_attr "type" "alu")
++ "cpu")
++
++(define_insn_reservation "cond_alu" 1
++ (eq_attr "type" "cond_alu")
++ "cpu")
++
++(define_insn_reservation "st" 1
++ (eq_attr "type" "st")
++ "cpu")
++
++(define_insn_reservation "custom" 1
++ (eq_attr "type" "custom")
++ "cpu")
++
++; shifts, muls and lds have three cycle latency
++(define_insn_reservation "ld" 3
++ (eq_attr "type" "ld")
++ "cpu")
++
++(define_insn_reservation "shift" 3
++ (eq_attr "type" "shift")
++ "cpu")
++
++(define_insn_reservation "mul" 3
++ (eq_attr "type" "mul")
++ "cpu")
++
++(define_insn_reservation "div" 1
++ (eq_attr "type" "div")
++ "cpu")
++
++
++;*****************************************************************************
++;*
++;* MOV Instructions
++;*
++;*****************************************************************************
++
++(define_expand "movqi"
++ [(set (match_operand:QI 0 "nonimmediate_operand" "")
++ (match_operand:QI 1 "general_operand" ""))]
++ ""
++{
++ if (nios2_emit_move_sequence (operands, QImode))
++ DONE;
++})
++
++(define_insn "movqi_internal"
++ [(set (match_operand:QI 0 "nonimmediate_operand" "=m, r,r, r")
++ (match_operand:QI 1 "general_operand" "rM,m,rM,I"))]
++ "(register_operand (operands[0], QImode)
++ || reg_or_0_operand (operands[1], QImode))"
++ "@
++ stb%o0\\t%z1, %0
++ ldbu%o1\\t%0, %1
++ mov\\t%0, %z1
++ movi\\t%0, %1"
++ [(set_attr "type" "st,ld,alu,alu")])
++
++(define_insn "ldbio"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec_volatile:SI [(const_int 0)] UNSPEC_LDBIO))
++ (use (match_operand:SI 1 "memory_operand" "m"))]
++ ""
++ "ldbio\\t%0, %1"
++ [(set_attr "type" "ld")])
++
++(define_insn "ldbuio"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec_volatile:SI [(const_int 0)] UNSPEC_LDBUIO))
++ (use (match_operand:SI 1 "memory_operand" "m"))]
++ ""
++ "ldbuio\\t%0, %1"
++ [(set_attr "type" "ld")])
++
++(define_insn "stbio"
++ [(set (match_operand:SI 0 "memory_operand" "=m")
++ (match_operand:SI 1 "reg_or_0_operand" "rM"))
++ (unspec_volatile:SI [(const_int 0)] UNSPEC_STBIO)]
++ ""
++ "stbio\\t%z1, %0"
++ [(set_attr "type" "st")])
++
++
++(define_expand "movhi"
++ [(set (match_operand:HI 0 "nonimmediate_operand" "")
++ (match_operand:HI 1 "general_operand" ""))]
++ ""
++{
++ if (nios2_emit_move_sequence (operands, HImode))
++ DONE;
++})
++
++(define_insn "movhi_internal"
++ [(set (match_operand:HI 0 "nonimmediate_operand" "=m, r,r, r,r")
++ (match_operand:HI 1 "general_operand" "rM,m,rM,I,J"))]
++ "(register_operand (operands[0], HImode)
++ || reg_or_0_operand (operands[1], HImode))"
++ "@
++ sth%o0\\t%z1, %0
++ ldhu%o1\\t%0, %1
++ mov\\t%0, %z1
++ movi\\t%0, %1
++ movui\\t%0, %1"
++ [(set_attr "type" "st,ld,alu,alu,alu")])
++
++(define_insn "ldhio"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec_volatile:SI [(const_int 0)] UNSPEC_LDHIO))
++ (use (match_operand:SI 1 "memory_operand" "m"))]
++ ""
++ "ldhio\\t%0, %1"
++ [(set_attr "type" "ld")])
++
++(define_insn "ldhuio"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec_volatile:SI [(const_int 0)] UNSPEC_LDHUIO))
++ (use (match_operand:SI 1 "memory_operand" "m"))]
++ ""
++ "ldhuio\\t%0, %1"
++ [(set_attr "type" "ld")])
++
++(define_insn "sthio"
++ [(set (match_operand:SI 0 "memory_operand" "=m")
++ (match_operand:SI 1 "reg_or_0_operand" "rM"))
++ (unspec_volatile:SI [(const_int 0)] UNSPEC_STHIO)]
++ ""
++ "sthio\\t%z1, %0"
++ [(set_attr "type" "st")])
++
++(define_expand "movsi"
++ [(set (match_operand:SI 0 "nonimmediate_operand" "")
++ (match_operand:SI 1 "general_operand" ""))]
++ ""
++{
++ if (nios2_emit_move_sequence (operands, SImode))
++ DONE;
++})
++
++(define_insn "movsi_internal"
++ [(set (match_operand:SI 0 "nonimmediate_operand" "=m, r,r, r,r,r,r")
++ (match_operand:SI 1 "general_operand" "rM,m,rM,I,J,S,i"))]
++ "(register_operand (operands[0], SImode)
++ || reg_or_0_operand (operands[1], SImode))"
++ "@
++ stw%o0\\t%z1, %0
++ ldw%o1\\t%0, %1
++ mov\\t%0, %z1
++ movi\\t%0, %1
++ movui\\t%0, %1
++ addi\\t%0, gp, %%gprel(%1)
++ movhi\\t%0, %H1\;addi\\t%0, %0, %L1"
++ [(set_attr "type" "st,ld,alu,alu,alu,alu,alu")])
++
++(define_insn "ldwio"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec_volatile:SI [(const_int 0)] UNSPEC_LDWIO))
++ (use (match_operand:SI 1 "memory_operand" "m"))]
++ ""
++ "ldwio\\t%0, %1"
++ [(set_attr "type" "ld")])
++
++(define_insn "stwio"
++ [(set (match_operand:SI 0 "memory_operand" "=m")
++ (match_operand:SI 1 "reg_or_0_operand" "rM"))
++ (unspec_volatile:SI [(const_int 0)] UNSPEC_STWIO)]
++ ""
++ "stwio\\t%z1, %0"
++ [(set_attr "type" "st")])
++
++
++
++;*****************************************************************************
++;*
++;* zero extension
++;*
++;*****************************************************************************
++
++
++(define_insn "zero_extendhisi2"
++ [(set (match_operand:SI 0 "register_operand" "=r,r")
++ (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "r,m")))]
++ ""
++ "@
++ andi\\t%0, %1, 0xffff
++ ldhu%o1\\t%0, %1"
++ [(set_attr "type" "alu,ld")])
++
++(define_insn "zero_extendqihi2"
++ [(set (match_operand:HI 0 "register_operand" "=r,r")
++ (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
++ ""
++ "@
++ andi\\t%0, %1, 0xff
++ ldbu%o1\\t%0, %1"
++ [(set_attr "type" "alu,ld")])
++
++(define_insn "zero_extendqisi2"
++ [(set (match_operand:SI 0 "register_operand" "=r,r")
++ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
++ ""
++ "@
++ andi\\t%0, %1, 0xff
++ ldbu%o1\\t%0, %1"
++ [(set_attr "type" "alu,ld")])
++
++
++
++;*****************************************************************************
++;*
++;* sign extension
++;*
++;*****************************************************************************
++(define_expand "extendhisi2"
++ [(set (match_operand:SI 0 "register_operand" "")
++ (sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "")))]
++ ""
++{
++})
++
++(define_insn "*extendhisi2"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (sign_extend:SI (match_operand:HI 1 "register_operand" "r")))]
++ ""
++ "#")
++
++(define_split
++ [(set (match_operand:SI 0 "register_operand" "")
++ (sign_extend:SI (match_operand:HI 1 "register_operand" "")))]
++ "reload_completed"
++ [(set (match_dup 0)
++ (and:SI (match_dup 1) (const_int 65535)))
++ (set (match_dup 0)
++ (xor:SI (match_dup 0) (const_int 32768)))
++ (set (match_dup 0)
++ (plus:SI (match_dup 0) (const_int -32768)))]
++ "operands[1] = gen_lowpart (SImode, operands[1]);")
++
++(define_insn "extendhisi2_internal"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (sign_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
++ ""
++ "ldh%o1\\t%0, %1"
++ [(set_attr "type" "ld")])
++
++
++(define_expand "extendqihi2"
++ [(set (match_operand:HI 0 "register_operand" "")
++ (sign_extend:HI (match_operand:QI 1 "nonimmediate_operand" "")))]
++ ""
++{
++})
++
++(define_insn "*extendqihi2"
++ [(set (match_operand:HI 0 "register_operand" "=r")
++ (sign_extend:HI (match_operand:QI 1 "register_operand" "r")))]
++ ""
++ "#")
++
++(define_split
++ [(set (match_operand:HI 0 "register_operand" "")
++ (sign_extend:HI (match_operand:QI 1 "register_operand" "")))]
++ "reload_completed"
++ [(set (match_dup 0)
++ (and:SI (match_dup 1) (const_int 255)))
++ (set (match_dup 0)
++ (xor:SI (match_dup 0) (const_int 128)))
++ (set (match_dup 0)
++ (plus:SI (match_dup 0) (const_int -128)))]
++ "operands[0] = gen_lowpart (SImode, operands[0]);
++ operands[1] = gen_lowpart (SImode, operands[1]);")
++
++
++
++(define_insn "extendqihi2_internal"
++ [(set (match_operand:HI 0 "register_operand" "=r")
++ (sign_extend:HI (match_operand:QI 1 "memory_operand" "m")))]
++ ""
++ "ldb%o1\\t%0, %1"
++ [(set_attr "type" "ld")])
++
++
++(define_expand "extendqisi2"
++ [(set (match_operand:SI 0 "register_operand" "")
++ (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))]
++ ""
++{
++})
++
++(define_insn "*extendqisi2"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (sign_extend:SI (match_operand:QI 1 "register_operand" "r")))]
++ ""
++ "#")
++
++(define_split
++ [(set (match_operand:SI 0 "register_operand" "")
++ (sign_extend:SI (match_operand:QI 1 "register_operand" "")))]
++ "reload_completed"
++ [(set (match_dup 0)
++ (and:SI (match_dup 1) (const_int 255)))
++ (set (match_dup 0)
++ (xor:SI (match_dup 0) (const_int 128)))
++ (set (match_dup 0)
++ (plus:SI (match_dup 0) (const_int -128)))]
++ "operands[1] = gen_lowpart (SImode, operands[1]);")
++
++(define_insn "extendqisi2_insn"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (sign_extend:SI (match_operand:QI 1 "memory_operand" "m")))]
++ ""
++ "ldb%o1\\t%0, %1"
++ [(set_attr "type" "ld")])
++
++
++
++
++;*****************************************************************************
++;*
++;* Arithmetic Operations
++;*
++;*****************************************************************************
++
++(define_insn "addsi3"
++ [(set (match_operand:SI 0 "register_operand" "=r,r")
++ (plus:SI (match_operand:SI 1 "register_operand" "%r,r")
++ (match_operand:SI 2 "arith_operand" "r,I")))]
++ ""
++ "add%i2\\t%0, %1, %z2"
++ [(set_attr "type" "alu")])
++
++(define_insn "addsf3"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (plus:SF (match_operand:SF 1 "register_operand" "%r")
++ (match_operand:SF 2 "register_operand" "r")))]
++ "nios2_fpu_insns[nios2_fpu_addsf3].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_addsf3].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_insn "adddf3"
++ [(set (match_operand:DF 0 "register_operand" "=r")
++ (plus:DF (match_operand:DF 1 "register_operand" "%r")
++ (match_operand:DF 2 "register_operand" "r")))]
++ "nios2_fpu_insns[nios2_fpu_adddf3].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_adddf3].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_insn "subsi3"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rM")
++ (match_operand:SI 2 "register_operand" "r")))]
++ ""
++ "sub\\t%0, %z1, %2"
++ [(set_attr "type" "alu")])
++
++(define_insn "subsf3"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (minus:SF (match_operand:SF 1 "register_operand" "r")
++ (match_operand:SF 2 "register_operand" "r")))]
++ "nios2_fpu_insns[nios2_fpu_subsf3].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_subsf3].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_insn "subdf3"
++ [(set (match_operand:DF 0 "register_operand" "=r")
++ (minus:DF (match_operand:DF 1 "register_operand" "r")
++ (match_operand:DF 2 "register_operand" "r")))]
++ "nios2_fpu_insns[nios2_fpu_subdf3].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_subdf3].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_insn "mulsi3"
++ [(set (match_operand:SI 0 "register_operand" "=r,r")
++ (mult:SI (match_operand:SI 1 "register_operand" "r,r")
++ (match_operand:SI 2 "arith_operand" "r,I")))]
++ "TARGET_HAS_MUL"
++ "mul%i2\\t%0, %1, %z2"
++ [(set_attr "type" "mul")])
++
++(define_insn "mulsf3"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (mult:SF (match_operand:SF 1 "register_operand" "%r")
++ (match_operand:SF 2 "register_operand" "r")))]
++ "nios2_fpu_insns[nios2_fpu_mulsf3].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_mulsf3].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_insn "muldf3"
++ [(set (match_operand:DF 0 "register_operand" "=r")
++ (mult:DF (match_operand:DF 1 "register_operand" "%r")
++ (match_operand:DF 2 "register_operand" "r")))]
++ "nios2_fpu_insns[nios2_fpu_muldf3].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_muldf3].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_expand "divsi3"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (div:SI (match_operand:SI 1 "register_operand" "r")
++ (match_operand:SI 2 "register_operand" "r")))]
++ ""
++{
++ if (!TARGET_HAS_DIV)
++ {
++ if (!TARGET_FAST_SW_DIV)
++ FAIL;
++ else
++ {
++ if (nios2_emit_expensive_div (operands, SImode))
++ DONE;
++ }
++ }
++})
++
++(define_insn "divsi3_insn"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (div:SI (match_operand:SI 1 "register_operand" "r")
++ (match_operand:SI 2 "register_operand" "r")))]
++ "TARGET_HAS_DIV"
++ "div\\t%0, %1, %2"
++ [(set_attr "type" "div")])
++
++(define_insn "divsf3"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (div:SF (match_operand:SF 1 "register_operand" "r")
++ (match_operand:SF 2 "register_operand" "r")))]
++ "nios2_fpu_insns[nios2_fpu_divsf3].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_divsf3].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_insn "divdf3"
++ [(set (match_operand:DF 0 "register_operand" "=r")
++ (div:DF (match_operand:DF 1 "register_operand" "r")
++ (match_operand:DF 2 "register_operand" "r")))]
++ "nios2_fpu_insns[nios2_fpu_divdf3].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_divdf3].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_insn "udivsi3"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (udiv:SI (match_operand:SI 1 "register_operand" "r")
++ (match_operand:SI 2 "register_operand" "r")))]
++ "TARGET_HAS_DIV"
++ "divu\\t%0, %1, %2"
++ [(set_attr "type" "div")])
++
++(define_insn "smulsi3_highpart"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (truncate:SI
++ (lshiftrt:DI
++ (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
++ (sign_extend:DI (match_operand:SI 2 "register_operand" "r")))
++ (const_int 32))))]
++ "TARGET_HAS_MULX"
++ "mulxss\\t%0, %1, %2"
++ [(set_attr "type" "mul")])
++
++(define_insn "umulsi3_highpart"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (truncate:SI
++ (lshiftrt:DI
++ (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
++ (zero_extend:DI (match_operand:SI 2 "register_operand" "r")))
++ (const_int 32))))]
++ "TARGET_HAS_MULX"
++ "mulxuu\\t%0, %1, %2"
++ [(set_attr "type" "mul")])
++
++
++(define_expand "mulsidi3_little_endian"
++ [(set (subreg:SI (match_operand:DI 0 "register_operand" "") 0)
++ (mult:SI (match_operand:SI 1 "register_operand" "")
++ (match_operand:SI 2 "register_operand" "")))
++ (set (subreg:SI (match_dup 0) 4)
++ (truncate:SI (lshiftrt:DI (mult:DI (sign_extend:DI (match_dup 1))
++ (sign_extend:DI (match_dup 2)))
++ (const_int 32))))]
++ "TARGET_HAS_MULX && !WORDS_BIG_ENDIAN"
++ "")
++
++(define_expand "mulsidi3_big_endian"
++ [(set (subreg:SI (match_operand:DI 0 "register_operand" "") 4)
++ (mult:SI (match_operand:SI 1 "register_operand" "")
++ (match_operand:SI 2 "register_operand" "")))
++ (set (subreg:SI (match_dup 0) 0)
++ (truncate:SI (lshiftrt:DI (mult:DI (sign_extend:DI (match_dup 1))
++ (sign_extend:DI (match_dup 2)))
++ (const_int 32))))]
++ "TARGET_HAS_MULX && WORDS_BIG_ENDIAN"
++ "")
++
++(define_expand "mulsidi3"
++ [(match_operand:DI 0 "register_operand" "")
++ (match_operand:SI 1 "register_operand" "")
++ (match_operand:SI 2 "register_operand" "")]
++ "TARGET_HAS_MULX"
++ {
++ if (WORDS_BIG_ENDIAN)
++ {
++ emit_insn (gen_mulsidi3_big_endian (operands[0],
++ operands[1],
++ operands[2]));
++ }
++ else
++ {
++ emit_insn (gen_mulsidi3_little_endian (operands[0],
++ operands[1],
++ operands[2]));
++ }
++ DONE;
++ })
++
++(define_expand "umulsidi3_little_endian"
++ [(set (subreg:SI (match_operand:DI 0 "register_operand" "") 0)
++ (mult:SI (match_operand:SI 1 "register_operand" "")
++ (match_operand:SI 2 "register_operand" "")))
++ (set (subreg:SI (match_dup 0) 4)
++ (truncate:SI (lshiftrt:DI (mult:DI (zero_extend:DI (match_dup 1))
++ (zero_extend:DI (match_dup 2)))
++ (const_int 32))))]
++ "TARGET_HAS_MULX && !WORDS_BIG_ENDIAN"
++ "")
++
++(define_expand "umulsidi3_big_endian"
++ [(set (subreg:SI (match_operand:DI 0 "register_operand" "") 4)
++ (mult:SI (match_operand:SI 1 "register_operand" "")
++ (match_operand:SI 2 "register_operand" "")))
++ (set (subreg:SI (match_dup 0) 0)
++ (truncate:SI (lshiftrt:DI (mult:DI (zero_extend:DI (match_dup 1))
++ (zero_extend:DI (match_dup 2)))
++ (const_int 32))))]
++ "TARGET_HAS_MULX && WORDS_BIG_ENDIAN"
++ "")
++
++(define_expand "umulsidi3"
++ [(match_operand:DI 0 "register_operand" "")
++ (match_operand:SI 1 "register_operand" "")
++ (match_operand:SI 2 "register_operand" "")]
++ "TARGET_HAS_MULX"
++ {
++ if (WORDS_BIG_ENDIAN)
++ {
++ emit_insn (gen_umulsidi3_big_endian (operands[0],
++ operands[1],
++ operands[2]));
++ }
++ else
++ {
++ emit_insn (gen_umulsidi3_little_endian (operands[0],
++ operands[1],
++ operands[2]));
++ }
++ DONE;
++ })
++
++
++;*****************************************************************************
++;*
++;* Negate and ones complement
++;*
++;*****************************************************************************
++
++(define_insn "negsi2"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (neg:SI (match_operand:SI 1 "register_operand" "r")))]
++ ""
++{
++ operands[2] = const0_rtx;
++ return "sub\\t%0, %z2, %1";
++}
++ [(set_attr "type" "alu")])
++
++(define_insn "negsf2"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (neg:SF (match_operand:SF 1 "register_operand" "r")))]
++ "nios2_fpu_insns[nios2_fpu_negsf2].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_negsf2].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_insn "negdf2"
++ [(set (match_operand:DF 0 "register_operand" "=r")
++ (neg:DF (match_operand:DF 1 "register_operand" "r")))]
++ "nios2_fpu_insns[nios2_fpu_negdf2].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_negdf2].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_insn "one_cmplsi2"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (not:SI (match_operand:SI 1 "register_operand" "r")))]
++ ""
++{
++ operands[2] = const0_rtx;
++ return "nor\\t%0, %z2, %1";
++}
++ [(set_attr "type" "alu")])
++
++
++;*****************************************************************************
++;*
++;* Miscellaneous floating point
++;*
++;*****************************************************************************
++(define_insn "nios2_fwrx"
++ [(unspec_volatile [(match_operand:DF 0 "register_operand" "r")] UNSPEC_FWRX)]
++ "nios2_fpu_insns[nios2_fpu_nios2_fwrx].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_nios2_fwrx].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_insn "nios2_fwry"
++ [(unspec_volatile [(match_operand:SF 0 "register_operand" "r")] UNSPEC_FWRY)]
++ "nios2_fpu_insns[nios2_fpu_nios2_fwry].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_nios2_fwry].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_insn "nios2_frdxlo"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (unspec_volatile:SF [(const_int 0)] UNSPEC_FRDXLO))]
++ "nios2_fpu_insns[nios2_fpu_nios2_frdxlo].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_nios2_frdxlo].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_insn "nios2_frdxhi"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (unspec_volatile:SF [(const_int 0)] UNSPEC_FRDXHI))]
++ "nios2_fpu_insns[nios2_fpu_nios2_frdxhi].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_nios2_frdxhi].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_insn "nios2_frdy"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (unspec_volatile:SF [(const_int 0)] UNSPEC_FRDY))]
++ "nios2_fpu_insns[nios2_fpu_nios2_frdy].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_nios2_frdy].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_insn "minsf3"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (if_then_else:SF (lt:SF (match_operand:SF 1 "register_operand" "%r")
++ (match_operand:SF 2 "register_operand" "r"))
++ (match_dup 1)
++ (match_dup 2)))]
++ "nios2_fpu_insns[nios2_fpu_minsf3].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_minsf3].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_insn "mindf3"
++ [(set (match_operand:DF 0 "register_operand" "=r")
++ (if_then_else:DF (lt:DF (match_operand:DF 1 "register_operand" "%r")
++ (match_operand:DF 2 "register_operand" "r"))
++ (match_dup 1)
++ (match_dup 2)))]
++ "nios2_fpu_insns[nios2_fpu_mindf3].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_mindf3].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_insn "maxsf3"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (if_then_else:SF (lt:SF (match_operand:SF 1 "register_operand" "%r")
++ (match_operand:SF 2 "register_operand" "r"))
++ (match_dup 2)
++ (match_dup 1)))]
++ "nios2_fpu_insns[nios2_fpu_maxsf3].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_maxsf3].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_insn "maxdf3"
++ [(set (match_operand:DF 0 "register_operand" "=r")
++ (if_then_else:DF (lt:DF (match_operand:DF 1 "register_operand" "%r")
++ (match_operand:DF 2 "register_operand" "r"))
++ (match_dup 2)
++ (match_dup 1)))]
++ "nios2_fpu_insns[nios2_fpu_maxdf3].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_maxdf3].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_insn "abssf2"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (abs:SF (match_operand:SF 1 "register_operand" "r")))]
++ "nios2_fpu_insns[nios2_fpu_abssf2].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_abssf2].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_insn "absdf2"
++ [(set (match_operand:DF 0 "register_operand" "=r")
++ (abs:DF (match_operand:DF 1 "register_operand" "r")))]
++ "nios2_fpu_insns[nios2_fpu_absdf2].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_absdf2].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_insn "sqrtsf2"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (sqrt:SF (match_operand:SF 1 "register_operand" "r")))]
++ "nios2_fpu_insns[nios2_fpu_sqrtsf2].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_sqrtsf2].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_insn "sqrtdf2"
++ [(set (match_operand:DF 0 "register_operand" "=r")
++ (sqrt:DF (match_operand:DF 1 "register_operand" "r")))]
++ "nios2_fpu_insns[nios2_fpu_sqrtdf2].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_sqrtdf2].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_insn "cossf2"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (unspec:SF [(match_operand:SF 1 "register_operand" "r")] UNSPEC_FCOSS))]
++ "nios2_fpu_insns[nios2_fpu_cossf2].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_cossf2].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_insn "cosdf2"
++ [(set (match_operand:DF 0 "register_operand" "=r")
++ (unspec:DF [(match_operand:DF 1 "register_operand" "r")] UNSPEC_FCOSD))]
++ "nios2_fpu_insns[nios2_fpu_cosdf2].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_cosdf2].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_insn "sinsf2"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (unspec:SF [(match_operand:SF 1 "register_operand" "r")] UNSPEC_FSINS))]
++ "nios2_fpu_insns[nios2_fpu_sinsf2].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_sinsf2].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_insn "sindf2"
++ [(set (match_operand:DF 0 "register_operand" "=r")
++ (unspec:DF [(match_operand:DF 1 "register_operand" "r")] UNSPEC_FSIND))]
++ "nios2_fpu_insns[nios2_fpu_sindf2].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_sindf2].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_insn "tansf2"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (unspec:SF [(match_operand:SF 1 "register_operand" "r")] UNSPEC_FTANS))]
++ "nios2_fpu_insns[nios2_fpu_tansf2].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_tansf2].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_insn "tandf2"
++ [(set (match_operand:DF 0 "register_operand" "=r")
++ (unspec:DF [(match_operand:DF 1 "register_operand" "r")] UNSPEC_FTAND))]
++ "nios2_fpu_insns[nios2_fpu_tandf2].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_tandf2].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_insn "atansf2"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (unspec:SF [(match_operand:SF 1 "register_operand" "r")] UNSPEC_FATANS))]
++ "nios2_fpu_insns[nios2_fpu_atansf2].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_atansf2].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_insn "atandf2"
++ [(set (match_operand:DF 0 "register_operand" "=r")
++ (unspec:DF [(match_operand:DF 1 "register_operand" "r")] UNSPEC_FATAND))]
++ "nios2_fpu_insns[nios2_fpu_atandf2].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_atandf2].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_insn "expsf2"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (unspec:SF [(match_operand:SF 1 "register_operand" "r")] UNSPEC_FEXPS))]
++ "nios2_fpu_insns[nios2_fpu_expsf2].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_expsf2].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_insn "expdf2"
++ [(set (match_operand:DF 0 "register_operand" "=r")
++ (unspec:DF [(match_operand:DF 1 "register_operand" "r")] UNSPEC_FEXPD))]
++ "nios2_fpu_insns[nios2_fpu_expdf2].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_expdf2].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_insn "logsf2"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (unspec:SF [(match_operand:SF 1 "register_operand" "r")] UNSPEC_FLOGS))]
++ "nios2_fpu_insns[nios2_fpu_logsf2].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_logsf2].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_insn "logdf2"
++ [(set (match_operand:DF 0 "register_operand" "=r")
++ (unspec:DF [(match_operand:DF 1 "register_operand" "r")] UNSPEC_FLOGD))]
++ "nios2_fpu_insns[nios2_fpu_logdf2].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_logdf2].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++
++;*****************************************************************************
++;*
++;* Logical Operantions
++;*
++;*****************************************************************************
++
++(define_insn "andsi3"
++ [(set (match_operand:SI 0 "register_operand" "=r, r,r")
++ (and:SI (match_operand:SI 1 "register_operand" "%r, r,r")
++ (match_operand:SI 2 "logical_operand" "rM,J,K")))]
++ ""
++ "@
++ and\\t%0, %1, %z2
++ and%i2\\t%0, %1, %2
++ andh%i2\\t%0, %1, %U2"
++ [(set_attr "type" "alu")])
++
++(define_insn "iorsi3"
++ [(set (match_operand:SI 0 "register_operand" "=r, r,r")
++ (ior:SI (match_operand:SI 1 "register_operand" "%r, r,r")
++ (match_operand:SI 2 "logical_operand" "rM,J,K")))]
++ ""
++ "@
++ or\\t%0, %1, %z2
++ or%i2\\t%0, %1, %2
++ orh%i2\\t%0, %1, %U2"
++ [(set_attr "type" "alu")])
++
++(define_insn "*norsi3"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (and:SI (not:SI (match_operand:SI 1 "register_operand" "%r"))
++ (not:SI (match_operand:SI 2 "reg_or_0_operand" "rM"))))]
++ ""
++ "nor\\t%0, %1, %z2"
++ [(set_attr "type" "alu")])
++
++(define_insn "xorsi3"
++ [(set (match_operand:SI 0 "register_operand" "=r, r,r")
++ (xor:SI (match_operand:SI 1 "register_operand" "%r, r,r")
++ (match_operand:SI 2 "logical_operand" "rM,J,K")))]
++ ""
++ "@
++ xor\\t%0, %1, %z2
++ xor%i2\\t%0, %1, %2
++ xorh%i2\\t%0, %1, %U2"
++ [(set_attr "type" "alu")])
++
++
++
++;*****************************************************************************
++;*
++;* Shifts
++;*
++;*****************************************************************************
++
++(define_insn "ashlsi3"
++ [(set (match_operand:SI 0 "register_operand" "=r,r")
++ (ashift:SI (match_operand:SI 1 "register_operand" "r,r")
++ (match_operand:SI 2 "shift_operand" "r,L")))]
++ ""
++
++{
++ if( GET_CODE ( operands[2] ) == CONST_INT && INTVAL( operands[2] ) == 1 )
++ return "add\t%0,%1,%1";
++ return "sll%i2\t%0,%1,%z2";
++}
++ [(set_attr "type" "shift")])
++
++(define_insn "ashrsi3"
++ [(set (match_operand:SI 0 "register_operand" "=r,r")
++ (ashiftrt:SI (match_operand:SI 1 "register_operand" "r,r")
++ (match_operand:SI 2 "shift_operand" "r,L")))]
++ ""
++ "sra%i2\\t%0, %1, %z2"
++ [(set_attr "type" "shift")])
++
++(define_insn "lshrsi3"
++ [(set (match_operand:SI 0 "register_operand" "=r,r")
++ (lshiftrt:SI (match_operand:SI 1 "register_operand" "r,r")
++ (match_operand:SI 2 "shift_operand" "r,L")))]
++ ""
++ "srl%i2\\t%0, %1, %z2"
++ [(set_attr "type" "shift")])
++
++(define_insn "rotlsi3"
++ [(set (match_operand:SI 0 "register_operand" "=r,r")
++ (rotate:SI (match_operand:SI 1 "register_operand" "r,r")
++ (match_operand:SI 2 "shift_operand" "r,L")))]
++ ""
++ "rol%i2\\t%0, %1, %z2"
++ [(set_attr "type" "shift")])
++
++(define_insn "rotrsi3"
++ [(set (match_operand:SI 0 "register_operand" "=r,r")
++ (rotatert:SI (match_operand:SI 1 "register_operand" "r,r")
++ (match_operand:SI 2 "register_operand" "r,r")))]
++ ""
++ "ror\\t%0, %1, %2"
++ [(set_attr "type" "shift")])
++
++(define_insn "*shift_mul_constants"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (ashift:SI (mult:SI (match_operand:SI 1 "register_operand" "r")
++ (match_operand:SI 2 "const_int_operand" "I"))
++ (match_operand:SI 3 "const_int_operand" "I")))]
++ "TARGET_HAS_MUL && SMALL_INT (INTVAL (operands[2]) << INTVAL (operands[3]))"
++{
++ HOST_WIDE_INT mul = INTVAL (operands[2]) << INTVAL (operands[3]);
++ rtx ops[3];
++
++ ops[0] = operands[0];
++ ops[1] = operands[1];
++ ops[2] = GEN_INT (mul);
++
++ output_asm_insn ("muli\t%0, %1, %2", ops);
++ return "";
++}
++ [(set_attr "type" "mul")])
++
++
++
++
++;*****************************************************************************
++;*
++;* Converting between floating point and fixed point
++;*
++;*****************************************************************************
++(define_insn "floatsisf2"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (float:SF (match_operand:SI 1 "register_operand" "r")))]
++ "nios2_fpu_insns[nios2_fpu_floatsisf2].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_floatsisf2].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_insn "floatsidf2"
++ [(set (match_operand:DF 0 "register_operand" "=r")
++ (float:DF (match_operand:SI 1 "register_operand" "r")))]
++ "nios2_fpu_insns[nios2_fpu_floatsidf2].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_floatsidf2].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_insn "floatunssisf2"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (unsigned_float:SF (match_operand:SI 1 "register_operand" "r")))]
++ "nios2_fpu_insns[nios2_fpu_floatunssisf2].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_floatunssisf2].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_insn "floatunssidf2"
++ [(set (match_operand:DF 0 "register_operand" "=r")
++ (unsigned_float:DF (match_operand:SI 1 "register_operand" "r")))]
++ "nios2_fpu_insns[nios2_fpu_floatunssidf2].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_floatunssidf2].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_insn "fixsfsi2"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (fix:SI (match_operand:SF 1 "general_operand" "r")))]
++ "nios2_fpu_insns[nios2_fpu_fixsfsi2].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_fixsfsi2].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_insn "fixdfsi2"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (fix:SI (match_operand:DF 1 "general_operand" "r")))]
++ "nios2_fpu_insns[nios2_fpu_fixdfsi2].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_fixdfsi2].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_insn "fixunssfsi2"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unsigned_fix:SI (match_operand:SF 1 "general_operand" "r")))]
++ "nios2_fpu_insns[nios2_fpu_fixunssfsi2].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_fixunssfsi2].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_insn "fixunsdfsi2"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unsigned_fix:SI (match_operand:DF 1 "general_operand" "r")))]
++ "nios2_fpu_insns[nios2_fpu_fixunsdfsi2].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_fixunsdfsi2].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_insn "extendsfdf2"
++ [(set (match_operand:DF 0 "register_operand" "=r")
++ (float_extend:DF (match_operand:SF 1 "general_operand" "r")))]
++ "nios2_fpu_insns[nios2_fpu_extendsfdf2].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_extendsfdf2].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++(define_insn "truncdfsf2"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (float_truncate:SF (match_operand:DF 1 "general_operand" "r")))]
++ "nios2_fpu_insns[nios2_fpu_truncdfsf2].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_truncdfsf2].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++
++
++
++
++
++
++
++;*****************************************************************************
++;*
++;* Prologue, Epilogue and Return
++;*
++;*****************************************************************************
++
++(define_expand "prologue"
++ [(const_int 1)]
++ ""
++{
++ expand_prologue ();
++ DONE;
++})
++
++(define_expand "epilogue"
++ [(return)]
++ ""
++{
++ expand_epilogue (false);
++ DONE;
++})
++
++(define_expand "sibcall_epilogue"
++ [(return)]
++ ""
++{
++ expand_epilogue (true);
++ DONE;
++})
++
++(define_insn "return"
++ [(return)]
++ "reload_completed && nios2_can_use_return_insn ()"
++ "ret\\t"
++)
++
++(define_insn "return_from_epilogue"
++ [(use (match_operand 0 "pmode_register_operand" ""))
++ (return)]
++ "reload_completed"
++ "ret\\t"
++)
++
++;; Block any insns from being moved before this point, since the
++;; profiling call to mcount can use various registers that aren't
++;; saved or used to pass arguments.
++
++(define_insn "blockage"
++ [(unspec_volatile [(const_int 0)] UNSPEC_BLOCKAGE)]
++ ""
++ ""
++ [(set_attr "type" "unknown")
++ (set_attr "length" "0")])
++
++;; This is used in compiling the unwind routines.
++(define_expand "eh_return"
++ [(use (match_operand 0 "general_operand"))]
++ ""
++{
++ if (GET_MODE (operands[0]) != Pmode)
++ operands[0] = convert_to_mode (Pmode, operands[0], 0);
++ emit_insn (gen_eh_set_ra (operands[0]));
++
++ DONE;
++})
++
++;; Clobber the return address on the stack. We can't expand this
++;; until we know where it will be put in the stack frame.
++
++(define_insn "eh_set_ra"
++ [(unspec [(match_operand:SI 0 "register_operand" "r")] UNSPEC_EH_RETURN)
++ (clobber (match_scratch:SI 1 "=&r"))]
++ ""
++ "#")
++
++(define_split
++ [(unspec [(match_operand 0 "register_operand")] UNSPEC_EH_RETURN)
++ (clobber (match_scratch 1))]
++ "reload_completed"
++ [(const_int 0)]
++{
++ nios2_set_return_address (operands[0], operands[1]);
++ DONE;
++})
++
++
++;*****************************************************************************
++;*
++;* Jumps and Calls
++;*
++;*****************************************************************************
++
++(define_insn "indirect_jump"
++ [(set (pc) (match_operand:SI 0 "register_operand" "r"))]
++ ""
++ "jmp\\t%0"
++ [(set_attr "type" "control")])
++
++(define_insn "jump"
++ [(set (pc)
++ (label_ref (match_operand 0 "" "")))]
++ ""
++ "br\\t%0"
++ [(set_attr "type" "control")])
++
++
++(define_expand "call"
++ [(parallel [(call (match_operand 0 "" "")
++ (match_operand 1 "" ""))
++ (clobber (reg:SI 31))])]
++ ""
++ {
++ nios2_adjust_call_address (&XEXP (operands[0], 0));
++ }
++)
++
++(define_expand "call_value"
++ [(parallel [(set (match_operand 0 "" "")
++ (call (match_operand 1 "" "")
++ (match_operand 2 "" "")))
++ (clobber (reg:SI 31))])]
++ ""
++ {
++ nios2_adjust_call_address (&XEXP (operands[1], 0));
++ }
++)
++
++(define_insn "*call"
++ [(call (mem:QI (match_operand:SI 0 "call_operand" "i,r"))
++ (match_operand 1 "" ""))
++ (clobber (reg:SI 31))]
++ ""
++ "@
++ call\\t%0
++ callr\\t%0"
++ [(set_attr "type" "control,control")])
++
++(define_insn "*call_value"
++ [(set (match_operand 0 "" "")
++ (call (mem:QI (match_operand:SI 1 "call_operand" "i,r"))
++ (match_operand 2 "" "")))
++ (clobber (reg:SI 31))]
++ ""
++ "@
++ call\\t%1
++ callr\\t%1"
++ [(set_attr "type" "control,control")])
++
++(define_expand "sibcall"
++ [(parallel [(call (match_operand 0 "" "")
++ (match_operand 1 "" ""))
++ (return)])]
++ ""
++ {
++ nios2_adjust_call_address (&XEXP (operands[0], 0));
++ }
++)
++
++(define_expand "sibcall_value"
++ [(parallel [(set (match_operand 0 "" "")
++ (call (match_operand 1 "" "")
++ (match_operand 2 "" "")))
++ (return)])]
++ ""
++ {
++ nios2_adjust_call_address (&XEXP (operands[1], 0));
++ }
++)
++
++(define_insn "*sibcall"
++ [(call (mem:QI (match_operand:SI 0 "call_operand" "i,j"))
++ (match_operand 1 "" ""))
++ (return)]
++ ""
++ "@
++ jmpi\\t%0
++ jmp\\t%0"
++)
++
++(define_insn "*sibcall_value"
++ [(set (match_operand 0 "register_operand" "")
++ (call (mem:QI (match_operand:SI 1 "call_operand" "i,j"))
++ (match_operand 2 "" "")))
++ (return)]
++ ""
++ "@
++ jmpi\\t%1
++ jmp\\t%1"
++)
++
++
++
++
++(define_expand "tablejump"
++ [(parallel [(set (pc) (match_operand 0 "register_operand" "r"))
++ (use (label_ref (match_operand 1 "" "")))])]
++ ""
++ {
++ if (flag_pic)
++ {
++ /* Hopefully, CSE will eliminate this copy. */
++ rtx reg1 = copy_addr_to_reg (gen_rtx_LABEL_REF (Pmode, operands[1]));
++ rtx reg2 = gen_reg_rtx (SImode);
++
++ emit_insn (gen_addsi3 (reg2, operands[0], reg1));
++ operands[0] = reg2;
++ }
++ }
++)
++
++(define_insn "*tablejump"
++ [(set (pc)
++ (match_operand:SI 0 "register_operand" "r"))
++ (use (label_ref (match_operand 1 "" "")))]
++ ""
++ "jmp\\t%0"
++ [(set_attr "type" "control")])
++
++
++
++;*****************************************************************************
++;*
++;* Comparisons
++;*
++;*****************************************************************************
++;; Flow here is rather complex (based on MIPS):
++;;
++;; 1) The cmp{si,di,sf,df} routine is called. It deposits the
++;; arguments into the branch_cmp array, and the type into
++;; branch_type. No RTL is generated.
++;;
++;; 2) The appropriate branch define_expand is called, which then
++;; creates the appropriate RTL for the comparison and branch.
++;; Different CC modes are used, based on what type of branch is
++;; done, so that we can constrain things appropriately. There
++;; are assumptions in the rest of GCC that break if we fold the
++;; operands into the branchs for integer operations, and use cc0
++;; for floating point, so we use the fp status register instead.
++;; If needed, an appropriate temporary is created to hold the
++;; of the integer compare.
++
++(define_expand "cmpsi"
++ [(set (cc0)
++ (compare:CC (match_operand:SI 0 "register_operand" "")
++ (match_operand:SI 1 "arith_operand" "")))]
++ ""
++{
++ branch_cmp[0] = operands[0];
++ branch_cmp[1] = operands[1];
++ branch_type = CMP_SI;
++ DONE;
++})
++
++(define_expand "tstsi"
++ [(set (cc0)
++ (match_operand:SI 0 "register_operand" ""))]
++ ""
++{
++ branch_cmp[0] = operands[0];
++ branch_cmp[1] = const0_rtx;
++ branch_type = CMP_SI;
++ DONE;
++})
++
++(define_expand "cmpsf"
++ [(set (cc0)
++ (compare:CC (match_operand:SF 0 "register_operand" "")
++ (match_operand:SF 1 "register_operand" "")))]
++ "(nios2_fpu_insns[nios2_fpu_nios2_sltsf].N >= 0
++ || nios2_fpu_insns[nios2_fpu_nios2_sgtsf].N >= 0)
++ && (nios2_fpu_insns[nios2_fpu_nios2_sgesf].N >= 0
++ || nios2_fpu_insns[nios2_fpu_nios2_slesf].N >= 0)
++ && nios2_fpu_insns[nios2_fpu_nios2_seqsf].N >= 0
++ && nios2_fpu_insns[nios2_fpu_nios2_snesf].N >= 0"
++{
++ branch_cmp[0] = operands[0];
++ branch_cmp[1] = operands[1];
++ branch_type = CMP_SF;
++ DONE;
++})
++
++(define_expand "cmpdf"
++ [(set (cc0)
++ (compare:CC (match_operand:DF 0 "register_operand" "")
++ (match_operand:DF 1 "register_operand" "")))]
++ "(nios2_fpu_insns[nios2_fpu_nios2_sltdf].N >= 0
++ || nios2_fpu_insns[nios2_fpu_nios2_sgtdf].N >= 0)
++ && (nios2_fpu_insns[nios2_fpu_nios2_sgedf].N >= 0
++ || nios2_fpu_insns[nios2_fpu_nios2_sledf].N >= 0)
++ && nios2_fpu_insns[nios2_fpu_nios2_seqdf].N >= 0
++ && nios2_fpu_insns[nios2_fpu_nios2_snedf].N >= 0"
++{
++ branch_cmp[0] = operands[0];
++ branch_cmp[1] = operands[1];
++ branch_type = CMP_DF;
++ DONE;
++})
++
++
++;*****************************************************************************
++;*
++;* setting a register from a comparison
++;*
++;*****************************************************************************
++
++(define_expand "seq"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (eq:SI (match_dup 1)
++ (match_dup 2)))]
++ ""
++{
++ if (branch_type != CMP_SI && branch_type != CMP_SF && branch_type != CMP_DF)
++ FAIL;
++
++ /* set up operands from compare. */
++ operands[1] = branch_cmp[0];
++ operands[2] = branch_cmp[1];
++
++ gen_int_relational (EQ, operands[0], operands[1], operands[2], NULL_RTX);
++ DONE;
++})
++
++
++(define_insn "*seq"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (eq:SI (match_operand:SI 1 "reg_or_0_operand" "%rM")
++ (match_operand:SI 2 "arith_operand" "rI")))]
++ ""
++ "cmpeq%i2\\t%0, %z1, %z2"
++ [(set_attr "type" "alu")])
++
++
++(define_insn "nios2_seqsf"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (eq:SI (match_operand:SF 1 "register_operand" "%r")
++ (match_operand:SF 2 "register_operand" "r")))]
++ "nios2_fpu_insns[nios2_fpu_nios2_seqsf].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_nios2_seqsf].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++
++(define_insn "nios2_seqdf"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (eq:SI (match_operand:DF 1 "register_operand" "%r")
++ (match_operand:DF 2 "register_operand" "r")))]
++ "nios2_fpu_insns[nios2_fpu_nios2_seqdf].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_nios2_seqdf].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++
++(define_expand "sne"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (ne:SI (match_dup 1)
++ (match_dup 2)))]
++ ""
++{
++ if (branch_type != CMP_SI && branch_type != CMP_SF && branch_type != CMP_DF)
++ FAIL;
++
++ /* set up operands from compare. */
++ operands[1] = branch_cmp[0];
++ operands[2] = branch_cmp[1];
++
++ gen_int_relational (NE, operands[0], operands[1], operands[2], NULL_RTX);
++ DONE;
++})
++
++
++(define_insn "*sne"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (ne:SI (match_operand:SI 1 "reg_or_0_operand" "%rM")
++ (match_operand:SI 2 "arith_operand" "rI")))]
++ ""
++ "cmpne%i2\\t%0, %z1, %z2"
++ [(set_attr "type" "alu")])
++
++
++(define_insn "nios2_snesf"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (ne:SI (match_operand:SF 1 "register_operand" "%r")
++ (match_operand:SF 2 "register_operand" "r")))]
++ "nios2_fpu_insns[nios2_fpu_nios2_snesf].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_nios2_snesf].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++
++(define_insn "nios2_snedf"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (ne:SI (match_operand:DF 1 "register_operand" "%r")
++ (match_operand:DF 2 "register_operand" "r")))]
++ "nios2_fpu_insns[nios2_fpu_nios2_snedf].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_nios2_snedf].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++
++(define_expand "sgt"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (gt:SI (match_dup 1)
++ (match_dup 2)))]
++ ""
++{
++ if (branch_type != CMP_SI && branch_type != CMP_SF && branch_type != CMP_DF)
++ FAIL;
++
++ /* set up operands from compare. */
++ operands[1] = branch_cmp[0];
++ operands[2] = branch_cmp[1];
++
++ gen_int_relational (GT, operands[0], operands[1], operands[2], NULL_RTX);
++ DONE;
++})
++
++
++(define_insn "*sgt"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (gt:SI (match_operand:SI 1 "reg_or_0_operand" "rM")
++ (match_operand:SI 2 "reg_or_0_operand" "rM")))]
++ ""
++ "cmplt\\t%0, %z2, %z1"
++ [(set_attr "type" "alu")])
++
++
++(define_insn "nios2_sgtsf"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (gt:SI (match_operand:SF 1 "register_operand" "r")
++ (match_operand:SF 2 "register_operand" "r")))]
++ "nios2_fpu_insns[nios2_fpu_nios2_sgtsf].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_nios2_sgtsf].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++
++(define_insn "nios2_sgtdf"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (gt:SI (match_operand:DF 1 "register_operand" "r")
++ (match_operand:DF 2 "register_operand" "r")))]
++ "nios2_fpu_insns[nios2_fpu_nios2_sgtdf].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_nios2_sgtdf].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++
++(define_expand "sge"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (ge:SI (match_dup 1)
++ (match_dup 2)))]
++ ""
++{
++ if (branch_type != CMP_SI && branch_type != CMP_SF && branch_type != CMP_DF)
++ FAIL;
++
++ /* set up operands from compare. */
++ operands[1] = branch_cmp[0];
++ operands[2] = branch_cmp[1];
++
++ gen_int_relational (GE, operands[0], operands[1], operands[2], NULL_RTX);
++ DONE;
++})
++
++
++(define_insn "*sge"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (ge:SI (match_operand:SI 1 "reg_or_0_operand" "rM")
++ (match_operand:SI 2 "arith_operand" "rI")))]
++ ""
++ "cmpge%i2\\t%0, %z1, %z2"
++ [(set_attr "type" "alu")])
++
++
++(define_insn "nios2_sgesf"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (ge:SI (match_operand:SF 1 "register_operand" "r")
++ (match_operand:SF 2 "register_operand" "r")))]
++ "nios2_fpu_insns[nios2_fpu_nios2_sgesf].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_nios2_sgesf].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++
++(define_insn "nios2_sgedf"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (ge:SI (match_operand:DF 1 "register_operand" "r")
++ (match_operand:DF 2 "register_operand" "r")))]
++ "nios2_fpu_insns[nios2_fpu_nios2_sgedf].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_nios2_sgedf].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++
++(define_expand "sle"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (le:SI (match_dup 1)
++ (match_dup 2)))]
++ ""
++{
++ if (branch_type != CMP_SI && branch_type != CMP_SF && branch_type != CMP_DF)
++ FAIL;
++
++ /* set up operands from compare. */
++ operands[1] = branch_cmp[0];
++ operands[2] = branch_cmp[1];
++
++ gen_int_relational (LE, operands[0], operands[1], operands[2], NULL_RTX);
++ DONE;
++})
++
++
++(define_insn "*sle"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (le:SI (match_operand:SI 1 "reg_or_0_operand" "rM")
++ (match_operand:SI 2 "reg_or_0_operand" "rM")))]
++ ""
++ "cmpge\\t%0, %z2, %z1"
++ [(set_attr "type" "alu")])
++
++
++(define_insn "nios2_slesf"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (le:SI (match_operand:SF 1 "register_operand" "r")
++ (match_operand:SF 2 "register_operand" "r")))]
++ "nios2_fpu_insns[nios2_fpu_nios2_slesf].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_nios2_slesf].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++
++(define_insn "nios2_sledf"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (le:SI (match_operand:DF 1 "register_operand" "r")
++ (match_operand:DF 2 "register_operand" "r")))]
++ "nios2_fpu_insns[nios2_fpu_nios2_sledf].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_nios2_sledf].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++
++(define_expand "slt"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (lt:SI (match_dup 1)
++ (match_dup 2)))]
++ ""
++{
++ if (branch_type != CMP_SI && branch_type != CMP_SF && branch_type != CMP_DF)
++ FAIL;
++
++ /* set up operands from compare. */
++ operands[1] = branch_cmp[0];
++ operands[2] = branch_cmp[1];
++
++ gen_int_relational (LT, operands[0], operands[1], operands[2], NULL_RTX);
++ DONE;
++})
++
++
++(define_insn "*slt"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (lt:SI (match_operand:SI 1 "reg_or_0_operand" "rM")
++ (match_operand:SI 2 "arith_operand" "rI")))]
++ ""
++ "cmplt%i2\\t%0, %z1, %z2"
++ [(set_attr "type" "alu")])
++
++
++(define_insn "nios2_sltsf"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (lt:SI (match_operand:SF 1 "register_operand" "r")
++ (match_operand:SF 2 "register_operand" "r")))]
++ "nios2_fpu_insns[nios2_fpu_nios2_sltsf].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_nios2_sltsf].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++
++(define_insn "nios2_sltdf"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (lt:SI (match_operand:DF 1 "register_operand" "r")
++ (match_operand:DF 2 "register_operand" "r")))]
++ "nios2_fpu_insns[nios2_fpu_nios2_sltdf].N >= 0"
++ {
++ return (*nios2_fpu_insns[nios2_fpu_nios2_sltdf].output) (insn);
++ }
++ [(set_attr "type" "custom")])
++
++
++(define_expand "sgtu"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (gtu:SI (match_dup 1)
++ (match_dup 2)))]
++ ""
++{
++ if (branch_type != CMP_SI)
++ FAIL;
++
++ /* set up operands from compare. */
++ operands[1] = branch_cmp[0];
++ operands[2] = branch_cmp[1];
++
++ gen_int_relational (GTU, operands[0], operands[1], operands[2], NULL_RTX);
++ DONE;
++})
++
++
++(define_insn "*sgtu"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (gtu:SI (match_operand:SI 1 "reg_or_0_operand" "rM")
++ (match_operand:SI 2 "reg_or_0_operand" "rM")))]
++ ""
++ "cmpltu\\t%0, %z2, %z1"
++ [(set_attr "type" "alu")])
++
++
++(define_expand "sgeu"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (geu:SI (match_dup 1)
++ (match_dup 2)))]
++ ""
++{
++ if (branch_type != CMP_SI)
++ FAIL;
++
++ /* set up operands from compare. */
++ operands[1] = branch_cmp[0];
++ operands[2] = branch_cmp[1];
++
++ gen_int_relational (GEU, operands[0], operands[1], operands[2], NULL_RTX);
++ DONE;
++})
++
++
++(define_insn "*sgeu"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (geu:SI (match_operand:SI 1 "reg_or_0_operand" "rM")
++ (match_operand:SI 2 "uns_arith_operand" "rJ")))]
++ ""
++ "cmpgeu%i2\\t%0, %z1, %z2"
++ [(set_attr "type" "alu")])
++
++(define_expand "sleu"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (leu:SI (match_dup 1)
++ (match_dup 2)))]
++ ""
++{
++ if (branch_type != CMP_SI)
++ FAIL;
++
++ /* set up operands from compare. */
++ operands[1] = branch_cmp[0];
++ operands[2] = branch_cmp[1];
++
++ gen_int_relational (LEU, operands[0], operands[1], operands[2], NULL_RTX);
++ DONE;
++})
++
++
++(define_insn "*sleu"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (leu:SI (match_operand:SI 1 "reg_or_0_operand" "rM")
++ (match_operand:SI 2 "reg_or_0_operand" "rM")))]
++ ""
++ "cmpgeu\\t%0, %z2, %z1"
++ [(set_attr "type" "alu")])
++
++
++(define_expand "sltu"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (ltu:SI (match_dup 1)
++ (match_dup 2)))]
++ ""
++{
++ if (branch_type != CMP_SI)
++ FAIL;
++
++ /* set up operands from compare. */
++ operands[1] = branch_cmp[0];
++ operands[2] = branch_cmp[1];
++
++ gen_int_relational (LTU, operands[0], operands[1], operands[2], NULL_RTX);
++ DONE;
++})
++
++
++(define_insn "*sltu"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (ltu:SI (match_operand:SI 1 "reg_or_0_operand" "rM")
++ (match_operand:SI 2 "uns_arith_operand" "rJ")))]
++ ""
++ "cmpltu%i2\\t%0, %z1, %z2"
++ [(set_attr "type" "alu")])
++
++
++
++
++;*****************************************************************************
++;*
++;* branches
++;*
++;*****************************************************************************
++
++(define_insn "*cbranch"
++ [(set (pc)
++ (if_then_else
++ (match_operator:SI 0 "comparison_operator"
++ [(match_operand:SI 2 "reg_or_0_operand" "rM")
++ (match_operand:SI 3 "reg_or_0_operand" "rM")])
++ (label_ref (match_operand 1 "" ""))
++ (pc)))]
++ ""
++ "b%0\\t%z2, %z3, %l1"
++ [(set_attr "type" "control")])
++
++
++(define_insn "nios2_cbranch_sf"
++ [(set (pc)
++ (if_then_else
++ (match_operator:SI 0 "comparison_operator"
++ [(match_operand:SF 2 "register_operand" "r")
++ (match_operand:SF 3 "register_operand" "r")])
++ (label_ref (match_operand 1 "" ""))
++ (pc)))]
++ ""
++ {
++ return nios2_output_fpu_insn_cmps (insn, GET_CODE (operands[0]));
++ }
++ [(set_attr "type" "custom")])
++
++
++(define_insn "nios2_cbranch_df"
++ [(set (pc)
++ (if_then_else
++ (match_operator:SI 0 "comparison_operator"
++ [(match_operand:DF 2 "register_operand" "r")
++ (match_operand:DF 3 "register_operand" "r")])
++ (label_ref (match_operand 1 "" ""))
++ (pc)))]
++ ""
++ {
++ return nios2_output_fpu_insn_cmpd (insn, GET_CODE (operands[0]));
++ }
++ [(set_attr "type" "custom")])
++
++
++(define_expand "beq"
++ [(set (pc)
++ (if_then_else (eq:CC (cc0)
++ (const_int 0))
++ (label_ref (match_operand 0 "" ""))
++ (pc)))]
++ ""
++{
++ gen_int_relational (EQ, NULL_RTX, branch_cmp[0], branch_cmp[1], operands[0]);
++ DONE;
++})
++
++
++(define_expand "bne"
++ [(set (pc)
++ (if_then_else (ne:CC (cc0)
++ (const_int 0))
++ (label_ref (match_operand 0 "" ""))
++ (pc)))]
++ ""
++{
++ gen_int_relational (NE, NULL_RTX, branch_cmp[0], branch_cmp[1], operands[0]);
++ DONE;
++})
++
++
++(define_expand "bgt"
++ [(set (pc)
++ (if_then_else (gt:CC (cc0)
++ (const_int 0))
++ (label_ref (match_operand 0 "" ""))
++ (pc)))]
++ ""
++{
++ gen_int_relational (GT, NULL_RTX, branch_cmp[0], branch_cmp[1], operands[0]);
++ DONE;
++})
++
++(define_expand "bge"
++ [(set (pc)
++ (if_then_else (ge:CC (cc0)
++ (const_int 0))
++ (label_ref (match_operand 0 "" ""))
++ (pc)))]
++ ""
++{
++ gen_int_relational (GE, NULL_RTX, branch_cmp[0], branch_cmp[1], operands[0]);
++ DONE;
++})
++
++(define_expand "ble"
++ [(set (pc)
++ (if_then_else (le:CC (cc0)
++ (const_int 0))
++ (label_ref (match_operand 0 "" ""))
++ (pc)))]
++ ""
++{
++ gen_int_relational (LE, NULL_RTX, branch_cmp[0], branch_cmp[1], operands[0]);
++ DONE;
++})
++
++(define_expand "blt"
++ [(set (pc)
++ (if_then_else (lt:CC (cc0)
++ (const_int 0))
++ (label_ref (match_operand 0 "" ""))
++ (pc)))]
++ ""
++{
++ gen_int_relational (LT, NULL_RTX, branch_cmp[0], branch_cmp[1], operands[0]);
++ DONE;
++})
++
++
++(define_expand "bgtu"
++ [(set (pc)
++ (if_then_else (gtu:CC (cc0)
++ (const_int 0))
++ (label_ref (match_operand 0 "" ""))
++ (pc)))]
++ ""
++{
++ gen_int_relational (GTU, NULL_RTX, branch_cmp[0], branch_cmp[1], operands[0]);
++ DONE;
++})
++
++(define_expand "bgeu"
++ [(set (pc)
++ (if_then_else (geu:CC (cc0)
++ (const_int 0))
++ (label_ref (match_operand 0 "" ""))
++ (pc)))]
++ ""
++{
++ gen_int_relational (GEU, NULL_RTX, branch_cmp[0], branch_cmp[1], operands[0]);
++ DONE;
++})
++
++(define_expand "bleu"
++ [(set (pc)
++ (if_then_else (leu:CC (cc0)
++ (const_int 0))
++ (label_ref (match_operand 0 "" ""))
++ (pc)))]
++ ""
++{
++ gen_int_relational (LEU, NULL_RTX, branch_cmp[0], branch_cmp[1], operands[0]);
++ DONE;
++})
++
++(define_expand "bltu"
++ [(set (pc)
++ (if_then_else (ltu:CC (cc0)
++ (const_int 0))
++ (label_ref (match_operand 0 "" ""))
++ (pc)))]
++ ""
++{
++ gen_int_relational (LTU, NULL_RTX, branch_cmp[0], branch_cmp[1], operands[0]);
++ DONE;
++})
++
++
++;*****************************************************************************
++;*
++;* String and Block Operations
++;*
++;*****************************************************************************
++
++; ??? This is all really a hack to get Dhrystone to work as fast as possible
++; things to be fixed:
++; * let the compiler core handle all of this, for that to work the extra
++; aliasing needs to be addressed.
++; * we use three temporary registers for loading and storing to ensure no
++; ld use stalls, this is excessive, because after the first ld/st only
++; two are needed. Only two would be needed all the way through if
++; we could schedule with other code. Consider:
++; 1 ld $1, 0($src)
++; 2 ld $2, 4($src)
++; 3 ld $3, 8($src)
++; 4 st $1, 0($dest)
++; 5 ld $1, 12($src)
++; 6 st $2, 4($src)
++; 7 etc.
++; The first store has to wait until 4. If it does not there will be one
++; cycle of stalling. However, if any other instruction could be placed
++; between 1 and 4, $3 would not be needed.
++; * In small we probably don't want to ever do this ourself because there
++; is no ld use stall.
++
++(define_expand "movstrsi"
++ [(parallel [(set (match_operand:BLK 0 "general_operand" "")
++ (match_operand:BLK 1 "general_operand" ""))
++ (use (match_operand:SI 2 "const_int_operand" ""))
++ (use (match_operand:SI 3 "const_int_operand" ""))
++ (clobber (match_scratch:SI 4 "=&r"))
++ (clobber (match_scratch:SI 5 "=&r"))
++ (clobber (match_scratch:SI 6 "=&r"))])]
++ "TARGET_INLINE_MEMCPY"
++{
++ rtx ld_addr_reg, st_addr_reg;
++
++ /* If the predicate for op2 fails in expr.c:emit_block_move_via_movstr
++ it trys to copy to a register, but does not re-try the predicate.
++ ??? Intead of fixing expr.c, I fix it here. */
++ if (!const_int_operand (operands[2], SImode))
++ FAIL;
++
++ /* ??? there are some magic numbers which need to be sorted out here.
++ the basis for them is not increasing code size hugely or going
++ out of range of offset addressing */
++ if (INTVAL (operands[3]) < 4)
++ FAIL;
++ if (!optimize
++ || (optimize_size && INTVAL (operands[2]) > 12)
++ || (optimize < 3 && INTVAL (operands[2]) > 100)
++ || INTVAL (operands[2]) > 200)
++ FAIL;
++
++ st_addr_reg
++ = replace_equiv_address (operands[0],
++ copy_to_mode_reg (Pmode, XEXP (operands[0], 0)));
++ ld_addr_reg
++ = replace_equiv_address (operands[1],
++ copy_to_mode_reg (Pmode, XEXP (operands[1], 0)));
++ emit_insn (gen_movstrsi_internal (st_addr_reg, ld_addr_reg,
++ operands[2], operands[3]));
++
++ DONE;
++})
++
++
++(define_insn "movstrsi_internal"
++ [(set (match_operand:BLK 0 "memory_operand" "=o")
++ (match_operand:BLK 1 "memory_operand" "o"))
++ (use (match_operand:SI 2 "const_int_operand" "i"))
++ (use (match_operand:SI 3 "const_int_operand" "i"))
++ (clobber (match_scratch:SI 4 "=&r"))
++ (clobber (match_scratch:SI 5 "=&r"))
++ (clobber (match_scratch:SI 6 "=&r"))]
++ "TARGET_INLINE_MEMCPY"
++{
++ int ld_offset = INTVAL (operands[2]);
++ int ld_len = INTVAL (operands[2]);
++ int ld_reg = 0;
++ rtx ld_addr_reg = XEXP (operands[1], 0);
++ int st_offset = INTVAL (operands[2]);
++ int st_len = INTVAL (operands[2]);
++ int st_reg = 0;
++ rtx st_addr_reg = XEXP (operands[0], 0);
++ int delay_count = 0;
++
++ /* ops[0] is the address used by the insn
++ ops[1] is the register being loaded or stored */
++ rtx ops[2];
++
++ gcc_assert (INTVAL (operands[3]) >= 4);
++
++ while (ld_offset >= 4)
++ {
++ /* if the load use delay has been met, I can start
++ storing */
++ if (delay_count >= 3)
++ {
++ ops[0] = gen_rtx_MEM (SImode,
++ plus_constant (st_addr_reg, st_len - st_offset));
++ ops[1] = operands[st_reg + 4];
++ output_asm_insn ("stw\t%1, %0", ops);
++
++ st_reg = (st_reg + 1) % 3;
++ st_offset -= 4;
++ }
++
++ ops[0] = gen_rtx_MEM (SImode,
++ plus_constant (ld_addr_reg, ld_len - ld_offset));
++ ops[1] = operands[ld_reg + 4];
++ output_asm_insn ("ldw\t%1, %0", ops);
++
++ ld_reg = (ld_reg + 1) % 3;
++ ld_offset -= 4;
++ delay_count++;
++ }
++
++ if (ld_offset >= 2)
++ {
++ /* if the load use delay has been met, I can start
++ storing */
++ if (delay_count >= 3)
++ {
++ ops[0] = gen_rtx_MEM (SImode,
++ plus_constant (st_addr_reg, st_len - st_offset));
++ ops[1] = operands[st_reg + 4];
++ output_asm_insn ("stw\t%1, %0", ops);
++
++ st_reg = (st_reg + 1) % 3;
++ st_offset -= 4;
++ }
++
++ ops[0] = gen_rtx_MEM (HImode,
++ plus_constant (ld_addr_reg, ld_len - ld_offset));
++ ops[1] = operands[ld_reg + 4];
++ output_asm_insn ("ldh\t%1, %0", ops);
++
++ ld_reg = (ld_reg + 1) % 3;
++ ld_offset -= 2;
++ delay_count++;
++ }
++
++ if (ld_offset >= 1)
++ {
++ /* if the load use delay has been met, I can start
++ storing */
++ if (delay_count >= 3)
++ {
++ ops[0] = gen_rtx_MEM (SImode,
++ plus_constant (st_addr_reg, st_len - st_offset));
++ ops[1] = operands[st_reg + 4];
++ output_asm_insn ("stw\t%1, %0", ops);
++
++ st_reg = (st_reg + 1) % 3;
++ st_offset -= 4;
++ }
++
++ ops[0] = gen_rtx_MEM (QImode,
++ plus_constant (ld_addr_reg, ld_len - ld_offset));
++ ops[1] = operands[ld_reg + 4];
++ output_asm_insn ("ldb\t%1, %0", ops);
++
++ ld_reg = (ld_reg + 1) % 3;
++ ld_offset -= 1;
++ delay_count++;
++ }
++
++ while (st_offset >= 4)
++ {
++ ops[0] = gen_rtx_MEM (SImode,
++ plus_constant (st_addr_reg, st_len - st_offset));
++ ops[1] = operands[st_reg + 4];
++ output_asm_insn ("stw\t%1, %0", ops);
++
++ st_reg = (st_reg + 1) % 3;
++ st_offset -= 4;
++ }
++
++ while (st_offset >= 2)
++ {
++ ops[0] = gen_rtx_MEM (HImode,
++ plus_constant (st_addr_reg, st_len - st_offset));
++ ops[1] = operands[st_reg + 4];
++ output_asm_insn ("sth\t%1, %0", ops);
++
++ st_reg = (st_reg + 1) % 3;
++ st_offset -= 2;
++ }
++
++ while (st_offset >= 1)
++ {
++ ops[0] = gen_rtx_MEM (QImode,
++ plus_constant (st_addr_reg, st_len - st_offset));
++ ops[1] = operands[st_reg + 4];
++ output_asm_insn ("stb\t%1, %0", ops);
++
++ st_reg = (st_reg + 1) % 3;
++ st_offset -= 1;
++ }
++
++ return "";
++}
++; ??? lengths are not being used yet, but I will probably forget
++; to update this once I am using lengths, so set it to something
++; definetely big enough to cover it. 400 allows for 200 bytes
++; of motion.
++ [(set_attr "length" "400")])
++
++
++
++;*****************************************************************************
++;*
++;* Custom instructions
++;*
++;*****************************************************************************
++
++(define_constants [
++ (CUSTOM_N 100)
++ (CUSTOM_NI 101)
++ (CUSTOM_NF 102)
++ (CUSTOM_NP 103)
++ (CUSTOM_NII 104)
++ (CUSTOM_NIF 105)
++ (CUSTOM_NIP 106)
++ (CUSTOM_NFI 107)
++ (CUSTOM_NFF 108)
++ (CUSTOM_NFP 109)
++ (CUSTOM_NPI 110)
++ (CUSTOM_NPF 111)
++ (CUSTOM_NPP 112)
++ (CUSTOM_IN 113)
++ (CUSTOM_INI 114)
++ (CUSTOM_INF 115)
++ (CUSTOM_INP 116)
++ (CUSTOM_INII 117)
++ (CUSTOM_INIF 118)
++ (CUSTOM_INIP 119)
++ (CUSTOM_INFI 120)
++ (CUSTOM_INFF 121)
++ (CUSTOM_INFP 122)
++ (CUSTOM_INPI 123)
++ (CUSTOM_INPF 124)
++ (CUSTOM_INPP 125)
++ (CUSTOM_FN 126)
++ (CUSTOM_FNI 127)
++ (CUSTOM_FNF 128)
++ (CUSTOM_FNP 129)
++ (CUSTOM_FNII 130)
++ (CUSTOM_FNIF 131)
++ (CUSTOM_FNIP 132)
++ (CUSTOM_FNFI 133)
++ (CUSTOM_FNFF 134)
++ (CUSTOM_FNFP 135)
++ (CUSTOM_FNPI 136)
++ (CUSTOM_FNPF 137)
++ (CUSTOM_FNPP 138)
++ (CUSTOM_PN 139)
++ (CUSTOM_PNI 140)
++ (CUSTOM_PNF 141)
++ (CUSTOM_PNP 142)
++ (CUSTOM_PNII 143)
++ (CUSTOM_PNIF 144)
++ (CUSTOM_PNIP 145)
++ (CUSTOM_PNFI 146)
++ (CUSTOM_PNFF 147)
++ (CUSTOM_PNFP 148)
++ (CUSTOM_PNPI 149)
++ (CUSTOM_PNPF 150)
++ (CUSTOM_PNPP 151)
++])
++
++
++(define_insn "custom_n"
++ [(unspec_volatile [(match_operand:SI 0 "custom_insn_opcode" "N")] CUSTOM_N)]
++ ""
++ "custom\\t%0, zero, zero, zero"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_ni"
++ [(unspec_volatile [(match_operand:SI 0 "custom_insn_opcode" "N")
++ (match_operand:SI 1 "register_operand" "r")] CUSTOM_NI)]
++ ""
++ "custom\\t%0, zero, %1, zero"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_nf"
++ [(unspec_volatile [(match_operand:SI 0 "custom_insn_opcode" "N")
++ (match_operand:SF 1 "register_operand" "r")] CUSTOM_NF)]
++ ""
++ "custom\\t%0, zero, %1, zero"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_np"
++ [(unspec_volatile [(match_operand:SI 0 "custom_insn_opcode" "N")
++ (match_operand:SI 1 "register_operand" "r")] CUSTOM_NP)]
++ ""
++ "custom\\t%0, zero, %1, zero"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_nii"
++ [(unspec_volatile [(match_operand:SI 0 "custom_insn_opcode" "N")
++ (match_operand:SI 1 "register_operand" "r")
++ (match_operand:SI 2 "register_operand" "r")] CUSTOM_NII)]
++ ""
++ "custom\\t%0, zero, %1, %2"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_nif"
++ [(unspec_volatile [(match_operand:SI 0 "custom_insn_opcode" "N")
++ (match_operand:SI 1 "register_operand" "r")
++ (match_operand:SF 2 "register_operand" "r")] CUSTOM_NIF)]
++ ""
++ "custom\\t%0, zero, %1, %2"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_nip"
++ [(unspec_volatile [(match_operand:SI 0 "custom_insn_opcode" "N")
++ (match_operand:SI 1 "register_operand" "r")
++ (match_operand:SI 2 "register_operand" "r")] CUSTOM_NIP)]
++ ""
++ "custom\\t%0, zero, %1, %2"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_nfi"
++ [(unspec_volatile [(match_operand:SI 0 "custom_insn_opcode" "N")
++ (match_operand:SF 1 "register_operand" "r")
++ (match_operand:SI 2 "register_operand" "r")] CUSTOM_NFI)]
++ ""
++ "custom\\t%0, zero, %1, %2"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_nff"
++ [(unspec_volatile [(match_operand:SI 0 "custom_insn_opcode" "N")
++ (match_operand:SF 1 "register_operand" "r")
++ (match_operand:SF 2 "register_operand" "r")] CUSTOM_NFF)]
++ ""
++ "custom\\t%0, zero, %1, %2"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_nfp"
++ [(unspec_volatile [(match_operand:SI 0 "custom_insn_opcode" "N")
++ (match_operand:SF 1 "register_operand" "r")
++ (match_operand:SI 2 "register_operand" "r")] CUSTOM_NFP)]
++ ""
++ "custom\\t%0, zero, %1, %2"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_npi"
++ [(unspec_volatile [(match_operand:SI 0 "custom_insn_opcode" "N")
++ (match_operand:SI 1 "register_operand" "r")
++ (match_operand:SI 2 "register_operand" "r")] CUSTOM_NPI)]
++ ""
++ "custom\\t%0, zero, %1, %2"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_npf"
++ [(unspec_volatile [(match_operand:SI 0 "custom_insn_opcode" "N")
++ (match_operand:SI 1 "register_operand" "r")
++ (match_operand:SF 2 "register_operand" "r")] CUSTOM_NPF)]
++ ""
++ "custom\\t%0, zero, %1, %2"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_npp"
++ [(unspec_volatile [(match_operand:SI 0 "custom_insn_opcode" "N")
++ (match_operand:SI 1 "register_operand" "r")
++ (match_operand:SI 2 "register_operand" "r")] CUSTOM_NPP)]
++ ""
++ "custom\\t%0, zero, %1, %2"
++ [(set_attr "type" "custom")])
++
++
++
++(define_insn "custom_in"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec_volatile:SI [(match_operand:SI 1 "custom_insn_opcode" "N")]
++ CUSTOM_IN))]
++ ""
++ "custom\\t%1, %0, zero, zero"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_ini"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec_volatile:SI [(match_operand:SI 1 "custom_insn_opcode" "N")
++ (match_operand:SI 2 "register_operand" "r")]
++ CUSTOM_INI))]
++ ""
++ "custom\\t%1, %0, %2, zero"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_inf"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec_volatile:SI [(match_operand:SI 1 "custom_insn_opcode" "N")
++ (match_operand:SF 2 "register_operand" "r")]
++ CUSTOM_INF))]
++ ""
++ "custom\\t%1, %0, %2, zero"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_inp"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec_volatile:SI [(match_operand:SI 1 "custom_insn_opcode" "N")
++ (match_operand:SI 2 "register_operand" "r")]
++ CUSTOM_INP))]
++ ""
++ "custom\\t%1, %0, %2, zero"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_inii"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec_volatile:SI [(match_operand:SI 1 "custom_insn_opcode" "N")
++ (match_operand:SI 2 "register_operand" "r")
++ (match_operand:SI 3 "register_operand" "r")]
++ CUSTOM_INII))]
++ ""
++ "custom\\t%1, %0, %2, %3"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_inif"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec_volatile:SI [(match_operand:SI 1 "custom_insn_opcode" "N")
++ (match_operand:SI 2 "register_operand" "r")
++ (match_operand:SF 3 "register_operand" "r")]
++ CUSTOM_INIF))]
++ ""
++ "custom\\t%1, %0, %2, %3"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_inip"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec_volatile:SI [(match_operand:SI 1 "custom_insn_opcode" "N")
++ (match_operand:SI 2 "register_operand" "r")
++ (match_operand:SI 3 "register_operand" "r")]
++ CUSTOM_INIP))]
++ ""
++ "custom\\t%1, %0, %2, %3"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_infi"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec_volatile:SI [(match_operand:SI 1 "custom_insn_opcode" "N")
++ (match_operand:SF 2 "register_operand" "r")
++ (match_operand:SI 3 "register_operand" "r")]
++ CUSTOM_INFI))]
++ ""
++ "custom\\t%1, %0, %2, %3"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_inff"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec_volatile:SI [(match_operand:SI 1 "custom_insn_opcode" "N")
++ (match_operand:SF 2 "register_operand" "r")
++ (match_operand:SF 3 "register_operand" "r")]
++ CUSTOM_INFF))]
++ ""
++ "custom\\t%1, %0, %2, %3"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_infp"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec_volatile:SI [(match_operand:SI 1 "custom_insn_opcode" "N")
++ (match_operand:SF 2 "register_operand" "r")
++ (match_operand:SI 3 "register_operand" "r")]
++ CUSTOM_INFP))]
++ ""
++ "custom\\t%1, %0, %2, %3"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_inpi"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec_volatile:SI [(match_operand:SI 1 "custom_insn_opcode" "N")
++ (match_operand:SI 2 "register_operand" "r")
++ (match_operand:SI 3 "register_operand" "r")]
++ CUSTOM_INPI))]
++ ""
++ "custom\\t%1, %0, %2, %3"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_inpf"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec_volatile:SI [(match_operand:SI 1 "custom_insn_opcode" "N")
++ (match_operand:SI 2 "register_operand" "r")
++ (match_operand:SF 3 "register_operand" "r")]
++ CUSTOM_INPF))]
++ ""
++ "custom\\t%1, %0, %2, %3"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_inpp"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec_volatile:SI [(match_operand:SI 1 "custom_insn_opcode" "N")
++ (match_operand:SI 2 "register_operand" "r")
++ (match_operand:SI 3 "register_operand" "r")]
++ CUSTOM_INPP))]
++ ""
++ "custom\\t%1, %0, %2, %3"
++ [(set_attr "type" "custom")])
++
++
++
++
++
++(define_insn "custom_fn"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (unspec_volatile:SF [(match_operand:SI 1 "custom_insn_opcode" "N")]
++ CUSTOM_FN))]
++ ""
++ "custom\\t%1, %0, zero, zero"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_fni"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (unspec_volatile:SF [(match_operand:SI 1 "custom_insn_opcode" "N")
++ (match_operand:SI 2 "register_operand" "r")]
++ CUSTOM_FNI))]
++ ""
++ "custom\\t%1, %0, %2, zero"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_fnf"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (unspec_volatile:SF [(match_operand:SI 1 "custom_insn_opcode" "N")
++ (match_operand:SF 2 "register_operand" "r")]
++ CUSTOM_FNF))]
++ ""
++ "custom\\t%1, %0, %2, zero"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_fnp"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (unspec_volatile:SF [(match_operand:SI 1 "custom_insn_opcode" "N")
++ (match_operand:SI 2 "register_operand" "r")]
++ CUSTOM_FNP))]
++ ""
++ "custom\\t%1, %0, %2, zero"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_fnii"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (unspec_volatile:SF [(match_operand:SI 1 "custom_insn_opcode" "N")
++ (match_operand:SI 2 "register_operand" "r")
++ (match_operand:SI 3 "register_operand" "r")]
++ CUSTOM_FNII))]
++ ""
++ "custom\\t%1, %0, %2, %3"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_fnif"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (unspec_volatile:SF [(match_operand:SI 1 "custom_insn_opcode" "N")
++ (match_operand:SI 2 "register_operand" "r")
++ (match_operand:SF 3 "register_operand" "r")]
++ CUSTOM_FNIF))]
++ ""
++ "custom\\t%1, %0, %2, %3"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_fnip"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (unspec_volatile:SF [(match_operand:SI 1 "custom_insn_opcode" "N")
++ (match_operand:SI 2 "register_operand" "r")
++ (match_operand:SI 3 "register_operand" "r")]
++ CUSTOM_FNIP))]
++ ""
++ "custom\\t%1, %0, %2, %3"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_fnfi"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (unspec_volatile:SF [(match_operand:SI 1 "custom_insn_opcode" "N")
++ (match_operand:SF 2 "register_operand" "r")
++ (match_operand:SI 3 "register_operand" "r")]
++ CUSTOM_FNFI))]
++ ""
++ "custom\\t%1, %0, %2, %3"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_fnff"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (unspec_volatile:SF [(match_operand:SI 1 "custom_insn_opcode" "N")
++ (match_operand:SF 2 "register_operand" "r")
++ (match_operand:SF 3 "register_operand" "r")]
++ CUSTOM_FNFF))]
++ ""
++ "custom\\t%1, %0, %2, %3"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_fnfp"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (unspec_volatile:SF [(match_operand:SI 1 "custom_insn_opcode" "N")
++ (match_operand:SF 2 "register_operand" "r")
++ (match_operand:SI 3 "register_operand" "r")]
++ CUSTOM_FNFP))]
++ ""
++ "custom\\t%1, %0, %2, %3"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_fnpi"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (unspec_volatile:SF [(match_operand:SI 1 "custom_insn_opcode" "N")
++ (match_operand:SI 2 "register_operand" "r")
++ (match_operand:SI 3 "register_operand" "r")]
++ CUSTOM_FNPI))]
++ ""
++ "custom\\t%1, %0, %2, %3"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_fnpf"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (unspec_volatile:SF [(match_operand:SI 1 "custom_insn_opcode" "N")
++ (match_operand:SI 2 "register_operand" "r")
++ (match_operand:SF 3 "register_operand" "r")]
++ CUSTOM_FNPF))]
++ ""
++ "custom\\t%1, %0, %2, %3"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_fnpp"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (unspec_volatile:SF [(match_operand:SI 1 "custom_insn_opcode" "N")
++ (match_operand:SI 2 "register_operand" "r")
++ (match_operand:SI 3 "register_operand" "r")]
++ CUSTOM_FNPP))]
++ ""
++ "custom\\t%1, %0, %2, %3"
++ [(set_attr "type" "custom")])
++
++
++
++(define_insn "custom_pn"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec_volatile:SI [(match_operand:SI 1 "custom_insn_opcode" "N")]
++ CUSTOM_PN))]
++ ""
++ "custom\\t%1, %0, zero, zero"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_pni"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec_volatile:SI [(match_operand:SI 1 "custom_insn_opcode" "N")
++ (match_operand:SI 2 "register_operand" "r")]
++ CUSTOM_PNI))]
++ ""
++ "custom\\t%1, %0, %2, zero"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_pnf"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec_volatile:SI [(match_operand:SI 1 "custom_insn_opcode" "N")
++ (match_operand:SF 2 "register_operand" "r")]
++ CUSTOM_PNF))]
++ ""
++ "custom\\t%1, %0, %2, zero"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_pnp"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec_volatile:SI [(match_operand:SI 1 "custom_insn_opcode" "N")
++ (match_operand:SI 2 "register_operand" "r")]
++ CUSTOM_PNP))]
++ ""
++ "custom\\t%1, %0, %2, zero"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_pnii"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec_volatile:SI [(match_operand:SI 1 "custom_insn_opcode" "N")
++ (match_operand:SI 2 "register_operand" "r")
++ (match_operand:SI 3 "register_operand" "r")]
++ CUSTOM_PNII))]
++ ""
++ "custom\\t%1, %0, %2, %3"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_pnif"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec_volatile:SI [(match_operand:SI 1 "custom_insn_opcode" "N")
++ (match_operand:SI 2 "register_operand" "r")
++ (match_operand:SF 3 "register_operand" "r")]
++ CUSTOM_PNIF))]
++ ""
++ "custom\\t%1, %0, %2, %3"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_pnip"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec_volatile:SI [(match_operand:SI 1 "custom_insn_opcode" "N")
++ (match_operand:SI 2 "register_operand" "r")
++ (match_operand:SI 3 "register_operand" "r")]
++ CUSTOM_PNIP))]
++ ""
++ "custom\\t%1, %0, %2, %3"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_pnfi"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec_volatile:SI [(match_operand:SI 1 "custom_insn_opcode" "N")
++ (match_operand:SF 2 "register_operand" "r")
++ (match_operand:SI 3 "register_operand" "r")]
++ CUSTOM_PNFI))]
++ ""
++ "custom\\t%1, %0, %2, %3"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_pnff"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec_volatile:SI [(match_operand:SI 1 "custom_insn_opcode" "N")
++ (match_operand:SF 2 "register_operand" "r")
++ (match_operand:SF 3 "register_operand" "r")]
++ CUSTOM_PNFF))]
++ ""
++ "custom\\t%1, %0, %2, %3"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_pnfp"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec_volatile:SI [(match_operand:SI 1 "custom_insn_opcode" "N")
++ (match_operand:SF 2 "register_operand" "r")
++ (match_operand:SI 3 "register_operand" "r")]
++ CUSTOM_PNFP))]
++ ""
++ "custom\\t%1, %0, %2, %3"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_pnpi"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec_volatile:SI [(match_operand:SI 1 "custom_insn_opcode" "N")
++ (match_operand:SI 2 "register_operand" "r")
++ (match_operand:SI 3 "register_operand" "r")]
++ CUSTOM_PNPI))]
++ ""
++ "custom\\t%1, %0, %2, %3"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_pnpf"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec_volatile:SI [(match_operand:SI 1 "custom_insn_opcode" "N")
++ (match_operand:SI 2 "register_operand" "r")
++ (match_operand:SF 3 "register_operand" "r")]
++ CUSTOM_PNPF))]
++ ""
++ "custom\\t%1, %0, %2, %3"
++ [(set_attr "type" "custom")])
++
++(define_insn "custom_pnpp"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec_volatile:SI [(match_operand:SI 1 "custom_insn_opcode" "N")
++ (match_operand:SI 2 "register_operand" "r")
++ (match_operand:SI 3 "register_operand" "r")]
++ CUSTOM_PNPP))]
++ ""
++ "custom\\t%1, %0, %2, %3"
++ [(set_attr "type" "custom")])
++
++
++
++
++
++
++;*****************************************************************************
++;*
++;* Misc
++;*
++;*****************************************************************************
++
++(define_insn "nop"
++ [(const_int 0)]
++ ""
++ "nop\\t"
++ [(set_attr "type" "alu")])
++
++(define_insn "sync"
++ [(unspec_volatile [(const_int 0)] UNSPEC_SYNC)]
++ ""
++ "sync\\t"
++ [(set_attr "type" "control")])
++
++
++(define_insn "rdctl"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec_volatile:SI [(match_operand:SI 1 "rdwrctl_operand" "O")]
++ UNSPEC_RDCTL))]
++ ""
++ "rdctl\\t%0, ctl%1"
++ [(set_attr "type" "control")])
++
++(define_insn "wrctl"
++ [(unspec_volatile:SI [(match_operand:SI 0 "rdwrctl_operand" "O")
++ (match_operand:SI 1 "reg_or_0_operand" "rM")]
++ UNSPEC_WRCTL)]
++ ""
++ "wrctl\\tctl%0, %z1"
++ [(set_attr "type" "control")])
++
++;Used to signal a stack overflow
++(define_insn "trap"
++ [(unspec_volatile [(const_int 0)] UNSPEC_TRAP)]
++ ""
++ "break\\t3"
++ [(set_attr "type" "control")])
++
++(define_insn "stack_overflow_detect_and_trap"
++ [(unspec_volatile [(const_int 0)] UNSPEC_STACK_OVERFLOW_DETECT_AND_TRAP)]
++ ""
++ "bgeu\\tsp, et, 1f\;break\\t3\;1:"
++ [(set_attr "type" "control")])
++
++;Load the GOT register.
++(define_insn "load_got_register"
++ [(set (match_operand:SI 0 "register_operand" "=&r")
++ (unspec:SI [(const_int 0)] UNSPEC_LOAD_GOT_REGISTER))
++ (set (match_operand:SI 1 "register_operand" "=r")
++ (unspec:SI [(const_int 0)] UNSPEC_LOAD_GOT_REGISTER))]
++ ""
++ "nextpc\\t%0
++\\t1:
++\\tmovhi\\t%1, %%hiadj(_GLOBAL_OFFSET_TABLE_ - 1b)
++\\taddi\\t%1, %1, %%lo(_GLOBAL_OFFSET_TABLE_ - 1b)"
++ [(set_attr "length" "12")])
++
++;; When generating pic, we need to load the symbol offset into a register.
++;; So that the optimizer does not confuse this with a normal symbol load
++;; we use an unspec. The offset will be loaded from a constant pool entry,
++;; since that is the only type of relocation we can use.
++
++;; The rather odd constraints on the following are to force reload to leave
++;; the insn alone, and to force the minipool generation pass to then move
++;; the GOT symbol to memory.
++
++;; FIXME:these uses look unnecessary.
++(define_insn "pic_load_addr"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec:SI [(match_operand:SI 1 "register_operand" "r")
++ (match_operand:SI 2 "" "mX")] UNSPEC_PIC_SYM))
++ (use (match_dup 1))]
++ "flag_pic"
++ "ldw\\t%0, %%got(%2)(%1)")
++
++(define_insn "pic_load_call_addr"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec:SI [(match_operand:SI 1 "register_operand" "r")
++ (match_operand:SI 2 "" "mX")] UNSPEC_PIC_CALL_SYM))
++ (use (match_dup 1))]
++ "flag_pic"
++ "ldw\\t%0, %%call(%2)(%1)")
++
++;; TLS support
++
++(define_insn "add_tls_gd"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec:SI [(match_operand:SI 1 "register_operand" "r")
++ (match_operand:SI 2 "" "mX")] UNSPEC_ADD_TLS_GD))]
++ ""
++ "addi\t%0, %1, %%tls_gd(%2)")
++
++(define_insn "load_tls_ie"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec:SI [(match_operand:SI 1 "register_operand" "r")
++ (match_operand:SI 2 "" "mX")] UNSPEC_LOAD_TLS_IE))]
++ ""
++ "ldw\t%0, %%tls_ie(%2)(%1)")
++
++(define_insn "add_tls_ldm"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec:SI [(match_operand:SI 1 "register_operand" "r")
++ (match_operand:SI 2 "" "mX")] UNSPEC_ADD_TLS_LDM))]
++ ""
++ "addi\t%0, %1, %%tls_ldm(%2)")
++
++(define_insn "add_tls_ldo"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec:SI [(match_operand:SI 1 "register_operand" "r")
++ (match_operand:SI 2 "" "mX")] UNSPEC_ADD_TLS_LDO))]
++ ""
++ "addi\t%0, %1, %%tls_ldo(%2)")
++
++(define_insn "add_tls_le"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec:SI [(match_operand:SI 1 "register_operand" "r")
++ (match_operand:SI 2 "" "mX")] UNSPEC_ADD_TLS_LE))]
++ ""
++ "addi\t%0, %1, %%tls_le(%2)")
++
++
++;*****************************************************************************
++;*
++;* Peepholes
++;*
++;*****************************************************************************
++
++
++;; Local Variables:
++;; mode: lisp
++;; End:
+Index: gcc-4.1.2/gcc/config/nios2/nios2.opt
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ gcc-4.1.2/gcc/config/nios2/nios2.opt 2010-06-30 08:50:26.000000000 +0200
+@@ -0,0 +1,519 @@
++; NOT ASSIGNED TO FSF. COPYRIGHT ALTERA.
++; Options for the Nios II port of the compiler.
++
++; This file is part of GCC.
++;
++; GCC is free software; you can redistribute it and/or modify it under
++; the terms of the GNU General Public License as published by the Free
++; Software Foundation; either version 2, or (at your option) any later
++; version.
++;
++; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
++; WARRANTY; without even the implied warranty of MERCHANTABILITY or
++; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
++; for more details.
++;
++; You should have received a copy of the GNU General Public License
++; along with GCC; see the file COPYING. If not, write to the Free
++; Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
++; 02110-1301, USA.
++
++mhw-div
++Target Report Mask(HAS_DIV)
++Enable DIV, DIVU
++
++mhw-mul
++Target Report Mask(HAS_MUL)
++Enable MUL instructions
++
++mhw-mulx
++Target Report Mask(HAS_MULX)
++Enable MULX instructions, assume fast shifter
++
++mfast-sw-div
++Target Report Mask(FAST_SW_DIV)
++Use table based fast divide (default at -O3)
++
++minline-memory
++Target Report Mask(INLINE_MEMCPY)
++Inline small memcpy (default when optimizing)
++
++mbypass-cache
++Target Report Mask(BYPASS_CACHE)
++All ld/st instructions use io variants
++
++mstack-check
++Target Report Mask(STACK_CHECK)
++Enable stack limit checking
++
++mreverse-bitfields
++Target Report Mask(REVERSE_BITFIELDS)
++Reverse the order of bitfields in a struct
++
++meb
++Target Report RejectNegative Mask(BIG_ENDIAN)
++Use big-endian byte order
++
++mel
++Target Report RejectNegative InverseMask(BIG_ENDIAN)
++Use little-endian byte order
++
++mcustom-fpu-cfg=
++Target RejectNegative Joined Var(nios2_custom_fpu_cfg_string) VarExists
++Floating point custom instruction configuration name
++
++mno-custom-ftruncds
++Target Report RejectNegative Var(nios2_custom_ftruncds, -1) VarExists
++Do not use the ftruncds custom instruction
++
++mcustom-ftruncds=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_ftruncds) VarExists
++Integer id (N) of ftruncds custom instruction
++
++mno-custom-fextsd
++Target Report RejectNegative Var(nios2_custom_fextsd, -1) VarExists
++Do not use the fextsd custom instruction
++
++mcustom-fextsd=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fextsd) VarExists
++Integer id (N) of fextsd custom instruction
++
++mno-custom-fixdu
++Target Report RejectNegative Var(nios2_custom_fixdu, -1) VarExists
++Do not use the fixdu custom instruction
++
++mcustom-fixdu=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fixdu) VarExists
++Integer id (N) of fixdu custom instruction
++
++mno-custom-fixdi
++Target Report RejectNegative Var(nios2_custom_fixdi, -1) VarExists
++Do not use the fixdi custom instruction
++
++mcustom-fixdi=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fixdi) VarExists
++Integer id (N) of fixdi custom instruction
++
++mno-custom-fixsu
++Target Report RejectNegative Var(nios2_custom_fixsu, -1) VarExists
++Do not use the fixsu custom instruction
++
++mcustom-fixsu=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fixsu) VarExists
++Integer id (N) of fixsu custom instruction
++
++mno-custom-fixsi
++Target Report RejectNegative Var(nios2_custom_fixsi, -1) VarExists
++Do not use the fixsi custom instruction
++
++mcustom-fixsi=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fixsi) VarExists
++Integer id (N) of fixsi custom instruction
++
++mno-custom-floatud
++Target Report RejectNegative Var(nios2_custom_floatud, -1) VarExists
++Do not use the floatud custom instruction
++
++mcustom-floatud=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_floatud) VarExists
++Integer id (N) of floatud custom instruction
++
++mno-custom-floatid
++Target Report RejectNegative Var(nios2_custom_floatid, -1) VarExists
++Do not use the floatid custom instruction
++
++mcustom-floatid=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_floatid) VarExists
++Integer id (N) of floatid custom instruction
++
++mno-custom-floatus
++Target Report RejectNegative Var(nios2_custom_floatus, -1) VarExists
++Do not use the floatus custom instruction
++
++mcustom-floatus=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_floatus) VarExists
++Integer id (N) of floatus custom instruction
++
++mno-custom-floatis
++Target Report RejectNegative Var(nios2_custom_floatis, -1) VarExists
++Do not use the floatis custom instruction
++
++mcustom-floatis=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_floatis) VarExists
++Integer id (N) of floatis custom instruction
++
++mno-custom-fcmpned
++Target Report RejectNegative Var(nios2_custom_fcmpned, -1) VarExists
++Do not use the fcmpned custom instruction
++
++mcustom-fcmpned=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fcmpned) VarExists
++Integer id (N) of fcmpned custom instruction
++
++mno-custom-fcmpeqd
++Target Report RejectNegative Var(nios2_custom_fcmpeqd, -1) VarExists
++Do not use the fcmpeqd custom instruction
++
++mcustom-fcmpeqd=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fcmpeqd) VarExists
++Integer id (N) of fcmpeqd custom instruction
++
++mno-custom-fcmpged
++Target Report RejectNegative Var(nios2_custom_fcmpged, -1) VarExists
++Do not use the fcmpged custom instruction
++
++mcustom-fcmpged=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fcmpged) VarExists
++Integer id (N) of fcmpged custom instruction
++
++mno-custom-fcmpgtd
++Target Report RejectNegative Var(nios2_custom_fcmpgtd, -1) VarExists
++Do not use the fcmpgtd custom instruction
++
++mcustom-fcmpgtd=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fcmpgtd) VarExists
++Integer id (N) of fcmpgtd custom instruction
++
++mno-custom-fcmpled
++Target Report RejectNegative Var(nios2_custom_fcmpled, -1) VarExists
++Do not use the fcmpled custom instruction
++
++mcustom-fcmpled=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fcmpled) VarExists
++Integer id (N) of fcmpled custom instruction
++
++mno-custom-fcmpltd
++Target Report RejectNegative Var(nios2_custom_fcmpltd, -1) VarExists
++Do not use the fcmpltd custom instruction
++
++mcustom-fcmpltd=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fcmpltd) VarExists
++Integer id (N) of fcmpltd custom instruction
++
++mno-custom-flogd
++Target Report RejectNegative Var(nios2_custom_flogd, -1) VarExists
++Do not use the flogd custom instruction
++
++mcustom-flogd=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_flogd) VarExists
++Integer id (N) of flogd custom instruction
++
++mno-custom-fexpd
++Target Report RejectNegative Var(nios2_custom_fexpd, -1) VarExists
++Do not use the fexpd custom instruction
++
++mcustom-fexpd=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fexpd) VarExists
++Integer id (N) of fexpd custom instruction
++
++mno-custom-fatand
++Target Report RejectNegative Var(nios2_custom_fatand, -1) VarExists
++Do not use the fatand custom instruction
++
++mcustom-fatand=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fatand) VarExists
++Integer id (N) of fatand custom instruction
++
++mno-custom-ftand
++Target Report RejectNegative Var(nios2_custom_ftand, -1) VarExists
++Do not use the ftand custom instruction
++
++mcustom-ftand=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_ftand) VarExists
++Integer id (N) of ftand custom instruction
++
++mno-custom-fsind
++Target Report RejectNegative Var(nios2_custom_fsind, -1) VarExists
++Do not use the fsind custom instruction
++
++mcustom-fsind=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fsind) VarExists
++Integer id (N) of fsind custom instruction
++
++mno-custom-fcosd
++Target Report RejectNegative Var(nios2_custom_fcosd, -1) VarExists
++Do not use the fcosd custom instruction
++
++mcustom-fcosd=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fcosd) VarExists
++Integer id (N) of fcosd custom instruction
++
++mno-custom-fsqrtd
++Target Report RejectNegative Var(nios2_custom_fsqrtd, -1) VarExists
++Do not use the fsqrtd custom instruction
++
++mcustom-fsqrtd=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fsqrtd) VarExists
++Integer id (N) of fsqrtd custom instruction
++
++mno-custom-fabsd
++Target Report RejectNegative Var(nios2_custom_fabsd, -1) VarExists
++Do not use the fabsd custom instruction
++
++mcustom-fabsd=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fabsd) VarExists
++Integer id (N) of fabsd custom instruction
++
++mno-custom-fnegd
++Target Report RejectNegative Var(nios2_custom_fnegd, -1) VarExists
++Do not use the fnegd custom instruction
++
++mcustom-fnegd=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fnegd) VarExists
++Integer id (N) of fnegd custom instruction
++
++mno-custom-fmaxd
++Target Report RejectNegative Var(nios2_custom_fmaxd, -1) VarExists
++Do not use the fmaxd custom instruction
++
++mcustom-fmaxd=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fmaxd) VarExists
++Integer id (N) of fmaxd custom instruction
++
++mno-custom-fmind
++Target Report RejectNegative Var(nios2_custom_fmind, -1) VarExists
++Do not use the fmind custom instruction
++
++mcustom-fmind=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fmind) VarExists
++Integer id (N) of fmind custom instruction
++
++mno-custom-fdivd
++Target Report RejectNegative Var(nios2_custom_fdivd, -1) VarExists
++Do not use the fdivd custom instruction
++
++mcustom-fdivd=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fdivd) VarExists
++Integer id (N) of fdivd custom instruction
++
++mno-custom-fmuld
++Target Report RejectNegative Var(nios2_custom_fmuld, -1) VarExists
++Do not use the fmuld custom instruction
++
++mcustom-fmuld=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fmuld) VarExists
++Integer id (N) of fmuld custom instruction
++
++mno-custom-fsubd
++Target Report RejectNegative Var(nios2_custom_fsubd, -1) VarExists
++Do not use the fsubd custom instruction
++
++mcustom-fsubd=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fsubd) VarExists
++Integer id (N) of fsubd custom instruction
++
++mno-custom-faddd
++Target Report RejectNegative Var(nios2_custom_faddd, -1) VarExists
++Do not use the faddd custom instruction
++
++mcustom-faddd=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_faddd) VarExists
++Integer id (N) of faddd custom instruction
++
++mno-custom-fcmpnes
++Target Report RejectNegative Var(nios2_custom_fcmpnes, -1) VarExists
++Do not use the fcmpnes custom instruction
++
++mcustom-fcmpnes=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fcmpnes) VarExists
++Integer id (N) of fcmpnes custom instruction
++
++mno-custom-fcmpeqs
++Target Report RejectNegative Var(nios2_custom_fcmpeqs, -1) VarExists
++Do not use the fcmpeqs custom instruction
++
++mcustom-fcmpeqs=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fcmpeqs) VarExists
++Integer id (N) of fcmpeqs custom instruction
++
++mno-custom-fcmpges
++Target Report RejectNegative Var(nios2_custom_fcmpges, -1) VarExists
++Do not use the fcmpges custom instruction
++
++mcustom-fcmpges=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fcmpges) VarExists
++Integer id (N) of fcmpges custom instruction
++
++mno-custom-fcmpgts
++Target Report RejectNegative Var(nios2_custom_fcmpgts, -1) VarExists
++Do not use the fcmpgts custom instruction
++
++mcustom-fcmpgts=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fcmpgts) VarExists
++Integer id (N) of fcmpgts custom instruction
++
++mno-custom-fcmples
++Target Report RejectNegative Var(nios2_custom_fcmples, -1) VarExists
++Do not use the fcmples custom instruction
++
++mcustom-fcmples=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fcmples) VarExists
++Integer id (N) of fcmples custom instruction
++
++mno-custom-fcmplts
++Target Report RejectNegative Var(nios2_custom_fcmplts, -1) VarExists
++Do not use the fcmplts custom instruction
++
++mcustom-fcmplts=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fcmplts) VarExists
++Integer id (N) of fcmplts custom instruction
++
++mno-custom-flogs
++Target Report RejectNegative Var(nios2_custom_flogs, -1) VarExists
++Do not use the flogs custom instruction
++
++mcustom-flogs=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_flogs) VarExists
++Integer id (N) of flogs custom instruction
++
++mno-custom-fexps
++Target Report RejectNegative Var(nios2_custom_fexps, -1) VarExists
++Do not use the fexps custom instruction
++
++mcustom-fexps=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fexps) VarExists
++Integer id (N) of fexps custom instruction
++
++mno-custom-fatans
++Target Report RejectNegative Var(nios2_custom_fatans, -1) VarExists
++Do not use the fatans custom instruction
++
++mcustom-fatans=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fatans) VarExists
++Integer id (N) of fatans custom instruction
++
++mno-custom-ftans
++Target Report RejectNegative Var(nios2_custom_ftans, -1) VarExists
++Do not use the ftans custom instruction
++
++mcustom-ftans=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_ftans) VarExists
++Integer id (N) of ftans custom instruction
++
++mno-custom-fsins
++Target Report RejectNegative Var(nios2_custom_fsins, -1) VarExists
++Do not use the fsins custom instruction
++
++mcustom-fsins=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fsins) VarExists
++Integer id (N) of fsins custom instruction
++
++mno-custom-fcoss
++Target Report RejectNegative Var(nios2_custom_fcoss, -1) VarExists
++Do not use the fcoss custom instruction
++
++mcustom-fcoss=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fcoss) VarExists
++Integer id (N) of fcoss custom instruction
++
++mno-custom-fsqrts
++Target Report RejectNegative Var(nios2_custom_fsqrts, -1) VarExists
++Do not use the fsqrts custom instruction
++
++mcustom-fsqrts=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fsqrts) VarExists
++Integer id (N) of fsqrts custom instruction
++
++mno-custom-fabss
++Target Report RejectNegative Var(nios2_custom_fabss, -1) VarExists
++Do not use the fabss custom instr
++
++mcustom-fabss=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fabss) VarExists
++Integer id (N) of fabss custom instruction
++
++mno-custom-fnegs
++Target Report RejectNegative Var(nios2_custom_fnegs, -1) VarExists
++Do not use the fnegs custom instruction
++
++mcustom-fnegs=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fnegs) VarExists
++Integer id (N) of fnegs custom instruction
++
++mno-custom-fmaxs
++Target Report RejectNegative Var(nios2_custom_fmaxs, -1) VarExists
++Do not use the fmaxs custom instruction
++
++mcustom-fmaxs=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fmaxs) VarExists
++Integer id (N) of fmaxs custom instruction
++
++mno-custom-fmins
++Target Report RejectNegative Var(nios2_custom_fmins, -1) VarExists
++Do not use the fmins custom instruction
++
++mcustom-fmins=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fmins) VarExists
++Integer id (N) of fmins custom instruction
++
++mno-custom-fdivs
++Target Report RejectNegative Var(nios2_custom_fdivs, -1) VarExists
++Do not use the fdivs custom instruction
++
++mcustom-fdivs=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fdivs) VarExists
++Integer id (N) of fdivs custom instruction
++
++mno-custom-fmuls
++Target Report RejectNegative Var(nios2_custom_fmuls, -1) VarExists
++Do not use the fmuls custom instruction
++
++mcustom-fmuls=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fmuls) VarExists
++Integer id (N) of fmuls custom instruction
++
++mno-custom-fsubs
++Target Report RejectNegative Var(nios2_custom_fsubs, -1) VarExists
++Do not use the fsubs custom instruction
++
++mcustom-fsubs=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fsubs) VarExists
++Integer id (N) of fsubs custom instruction
++
++mno-custom-fadds
++Target Report RejectNegative Var(nios2_custom_fadds, -1) VarExists
++Do not use the fadds custom instruction
++
++mcustom-fadds=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fadds) VarExists
++Integer id (N) of fadds custom instruction
++
++mno-custom-frdy
++Target Report RejectNegative Var(nios2_custom_frdy, -1) VarExists
++Do not use the frdy custom instruction
++
++mcustom-frdy=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_frdy) VarExists
++Integer id (N) of frdy custom instruction
++
++mno-custom-frdxhi
++Target Report RejectNegative Var(nios2_custom_frdxhi, -1) VarExists
++Do not use the frdxhi custom instruction
++
++mcustom-frdxhi=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_frdxhi) VarExists
++Integer id (N) of frdxhi custom instruction
++
++mno-custom-frdxlo
++Target Report RejectNegative Var(nios2_custom_frdxlo, -1) VarExists
++Do not use the frdxlo custom instruction
++
++mcustom-frdxlo=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_frdxlo) VarExists
++Integer id (N) of frdxlo custom instruction
++
++mno-custom-fwry
++Target Report RejectNegative Var(nios2_custom_fwry, -1) VarExists
++Do not use the fwry custom instruction
++
++mcustom-fwry=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fwry) VarExists
++Integer id (N) of fwry custom instruction
++
++mno-custom-fwrx
++Target Report RejectNegative Var(nios2_custom_fwrx, -1) VarExists
++Do not use the fwrx custom instruction
++
++mcustom-fwrx=
++Target Report RejectNegative Joined UInteger Var(nios2_custom_fwrx) VarExists
++Integer id (N) of fwrx custom instruction
+Index: gcc-4.1.2/gcc/config/nios2/t-linux
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ gcc-4.1.2/gcc/config/nios2/t-linux 2010-06-30 08:50:26.000000000 +0200
+@@ -0,0 +1,9 @@
++# Soft-float functions go in glibc only, to facilitate the possible
++# future addition of exception and rounding mode support integrated
++# with <fenv.h>.
++
++FPBIT=
++DPBIT=
++LIB2FUNCS_EXCLUDE = _floatdidf _floatdisf _fixunsdfsi _fixunssfsi \
++ _fixunsdfdi _fixdfdi _fixunssfdi _fixsfdi _floatundidf _floatundisf
++LIB2FUNCS_EXTRA += $(srcdir)/config/nios2/linux-atomic.c
+Index: gcc-4.1.2/gcc/config/nios2/t-nios2
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ gcc-4.1.2/gcc/config/nios2/t-nios2 2010-06-30 08:50:26.000000000 +0200
+@@ -0,0 +1,176 @@
++##
++## Compiler flags to use when compiling libgcc2.c.
++##
++## LIB2FUNCS_EXTRA
++## A list of source file names to be compiled or assembled and inserted into
++## libgcc.a.
++
++LIB2FUNCS_EXTRA=$(srcdir)/config/nios2/lib2-divmod.c \
++ $(srcdir)/config/nios2/lib2-divmod-hi.c \
++ $(srcdir)/config/nios2/lib2-divtable.c \
++ $(srcdir)/config/nios2/lib2-mul.c
++
++##
++## Floating Point Emulation
++## To have GCC include software floating point libraries in libgcc.a define FPBIT
++## and DPBIT along with a few rules as follows:
++##
++## # We want fine grained libraries, so use the new code
++## # to build the floating point emulation libraries.
++FPBIT=fp-bit.c
++DPBIT=dp-bit.c
++
++TARGET_LIBGCC2_CFLAGS = -O2 -fpic
++
++# FLOAT_ONLY - no doubles
++# SMALL_MACHINE - QI/HI is faster than SI
++# Actually SMALL_MACHINE uses chars and shorts instead of ints
++# since ints (16-bit ones as they are today) are at least as fast
++# as chars and shorts, don't define SMALL_MACHINE
++# CMPtype - type returned by FP compare, i.e. INT (hard coded in fp-bit - see code )
++
++$(FPBIT): $(srcdir)/config/fp-bit.c Makefile
++ echo '#define FLOAT' > ${FPBIT}
++ echo '#ifndef __nios2_big_endian__' >> ${FPBIT}
++ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> ${FPBIT}
++ echo '#endif' >> ${FPBIT}
++ cat $(srcdir)/config/fp-bit.c >> ${FPBIT}
++
++$(DPBIT): $(srcdir)/config/fp-bit.c Makefile
++ echo '' > ${DPBIT}
++ echo '#ifndef __nios2_big_endian__' >> ${DPBIT}
++ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> ${DPBIT}
++ echo '#endif' >> ${DPBIT}
++ cat $(srcdir)/config/fp-bit.c >> ${DPBIT}
++
++EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o crti.o crtn.o crtbeginS.o \
++ crtbeginT.o crtendS.o
++
++# Assemble startup files.
++$(T)crti.o: $(srcdir)/config/nios2/crti.asm $(GCC_PASSES)
++ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(MULTILIB_CFLAGS) $(INCLUDES) \
++ -c -o $(T)crti.o -x assembler-with-cpp $(srcdir)/config/nios2/crti.asm
++
++$(T)crtn.o: $(srcdir)/config/nios2/crtn.asm $(GCC_PASSES)
++ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(MULTILIB_CFLAGS) $(INCLUDES) \
++ -c -o $(T)crtn.o -x assembler-with-cpp $(srcdir)/config/nios2/crtn.asm
++
++
++## You may need to provide additional #defines at the beginning of
++## fp-bit.c and dp-bit.c to control target endianness and other options
++##
++## CRTSTUFF_T_CFLAGS
++## Special flags used when compiling crtstuff.c. See Initialization.
++##
++## CRTSTUFF_T_CFLAGS_S
++## Special flags used when compiling crtstuff.c for shared linking. Used
++## if you use crtbeginS.o and crtendS.o in EXTRA-PARTS. See Initialization.
++##
++## MULTILIB_OPTIONS
++## For some targets, invoking GCC in different ways produces objects that
++## can not be linked together. For example, for some targets GCC produces
++## both big and little endian code. For these targets, you must arrange
++## for multiple versions of libgcc.a to be compiled, one for each set of
++## incompatible options. When GCC invokes the linker, it arranges to link
++## in the right version of libgcc.a, based on the command line options
++## used.
++## The MULTILIB_OPTIONS macro lists the set of options for which special
++## versions of libgcc.a must be built. Write options that are mutually
++## incompatible side by side, separated by a slash. Write options that may
++## be used together separated by a space. The build procedure will build
++## all combinations of compatible options.
++##
++## For example, if you set MULTILIB_OPTIONS to m68000/m68020 msoft-float,
++## Makefile will build special versions of libgcc.a using the following
++## sets of options: -m68000, -m68020, -msoft-float, -m68000 -msoft-float,
++## and -m68020 -msoft-float.
++
++
++## The BUILD_BE_MULTILIB and BUILD_PG_MULTILIB variables allow the
++## makefile user to enable/disable the generation of the precompiled
++## big endian and profiling libraries. By default, the big endian
++## libraries are not created on a windows build and the profiling
++## libraries are not created on a Solaris build. All other library
++## combinations are created by default.
++
++# Uncomment to temporarily avoid building big endian and profiling libraries during a Windows build.
++#ifeq ($(DEV_HOST_OS), win32)
++#BUILD_BE_MULTILIB ?= 0
++#BUILD_PG_MULTILIB ?= 0
++#endif
++
++#By default, avoid building the profiling libraries during a Solaris build.
++ifeq ($(DEV_HOST_OS), solaris)
++BUILD_PG_MULTILIB ?= 0
++endif
++
++BUILD_BE_MULTILIB ?= 0
++BUILD_PG_MULTILIB ?= 1
++BUILD_MULTILIB ?= 0
++
++ifeq ($(BUILD_MULTILIB), 1)
++
++MULTILIB_OPTIONS = mno-hw-mul mhw-mulx mstack-check mcustom-fpu-cfg=60-1 mcustom-fpu-cfg=60-2
++
++#Add the profiling flag to the multilib variable if required
++ifeq ($(BUILD_PG_MULTILIB), 1)
++MULTILIB_OPTIONS += pg
++endif
++
++#Add the big endian flag to the multilib variable if required
++ifeq ($(BUILD_BE_MULTILIB), 1)
++MULTILIB_OPTIONS += EB/EL
++endif
++
++endif
++
++## MULTILIB_DIRNAMES
++## If MULTILIB_OPTIONS is used, this variable specifies the directory names
++## that should be used to hold the various libraries. Write one element in
++## MULTILIB_DIRNAMES for each element in MULTILIB_OPTIONS. If
++## MULTILIB_DIRNAMES is not used, the default value will be
++## MULTILIB_OPTIONS, with all slashes treated as spaces.
++## For example, if MULTILIB_OPTIONS is set to m68000/m68020 msoft-float,
++## then the default value of MULTILIB_DIRNAMES is m68000 m68020
++## msoft-float. You may specify a different value if you desire a
++## different set of directory names.
++
++# MULTILIB_DIRNAMES =
++
++## MULTILIB_MATCHES
++## Sometimes the same option may be written in two different ways. If an
++## option is listed in MULTILIB_OPTIONS, GCC needs to know about any
++## synonyms. In that case, set MULTILIB_MATCHES to a list of items of the
++## form option=option to describe all relevant synonyms. For example,
++## m68000=mc68000 m68020=mc68020.
++
++ifeq ($(BUILD_MULTILIB), 1)
++ifeq ($(BUILD_BE_MULTILIB), 1)
++MULTILIB_MATCHES = EL=mel EB=meb
++endif
++endif
++
++##
++## MULTILIB_EXCEPTIONS
++## Sometimes when there are multiple sets of MULTILIB_OPTIONS being
++## specified, there are combinations that should not be built. In that
++## case, set MULTILIB_EXCEPTIONS to be all of the switch exceptions in
++## shell case syntax that should not be built.
++## For example, in the PowerPC embedded ABI support, it is not desirable to
++## build libraries compiled with the -mcall-aix option and either of the
++## -fleading-underscore or -mlittle options at the same time. Therefore
++## MULTILIB_EXCEPTIONS is set to
++##
++## *mcall-aix/*fleading-underscore* *mlittle/*mcall-aix*
++##
++
++ifeq ($(BUILD_MULTILIB), 1)
++MULTILIB_EXCEPTIONS = *mno-hw-mul/*mhw-mulx* *mcustom-fpu-cfg=60-1/*mcustom-fpu-cfg=60-2*
++endif
++
++##
++## MULTILIB_EXTRA_OPTS Sometimes it is desirable that when building
++## multiple versions of libgcc.a certain options should always be passed on
++## to the compiler. In that case, set MULTILIB_EXTRA_OPTS to be the list
++## of options to be used for all builds.
++##
diff --git a/recipes/gcc/gcc-4.1.2/nios2-lib-flags.patch b/recipes/gcc/gcc-4.1.2/nios2-lib-flags.patch
new file mode 100644
index 0000000000..e125ba0f6c
--- /dev/null
+++ b/recipes/gcc/gcc-4.1.2/nios2-lib-flags.patch
@@ -0,0 +1,10 @@
+Index: gcc-4.1.2/config/mt-ospace
+===================================================================
+--- gcc-4.1.2.orig/config/mt-ospace 2010-06-02 10:23:01.000000000 +0200
++++ gcc-4.1.2/config/mt-ospace 2010-06-02 10:23:28.000000000 +0200
+@@ -1,3 +1,2 @@
+-# Build libraries optimizing for space, not speed.
+- CFLAGS_FOR_TARGET = -g -Os
+- CXXFLAGS_FOR_TARGET = -g -Os
++ CFLAGS_FOR_TARGET = -g -O2
++ CXXFLAGS_FOR_TARGET = -g -O2