summaryrefslogtreecommitdiff
path: root/gnu/usr.bin/gcc/config/i960
diff options
context:
space:
mode:
authorNiklas Hallqvist <niklas@cvs.openbsd.org>1995-12-20 01:06:22 +0000
committerNiklas Hallqvist <niklas@cvs.openbsd.org>1995-12-20 01:06:22 +0000
commitc482518380683ee38d14024c1e362a0d681cf967 (patch)
treee69b4f6d3fee3aced20a41f3fdf543fc1c77fb5d /gnu/usr.bin/gcc/config/i960
parent76a62188d0db49c65b696d474c855a799fd96dce (diff)
FSF GCC version 2.7.2
Diffstat (limited to 'gnu/usr.bin/gcc/config/i960')
-rw-r--r--gnu/usr.bin/gcc/config/i960/i960-coff.h99
-rw-r--r--gnu/usr.bin/gcc/config/i960/i960.c2593
-rw-r--r--gnu/usr.bin/gcc/config/i960/i960.h1527
-rw-r--r--gnu/usr.bin/gcc/config/i960/i960.md2645
-rw-r--r--gnu/usr.bin/gcc/config/i960/t-960bare20
-rw-r--r--gnu/usr.bin/gcc/config/i960/t-vxworks96023
-rw-r--r--gnu/usr.bin/gcc/config/i960/vx960-coff.h69
-rw-r--r--gnu/usr.bin/gcc/config/i960/vx960.h33
-rw-r--r--gnu/usr.bin/gcc/config/i960/xm-i960.h43
9 files changed, 7052 insertions, 0 deletions
diff --git a/gnu/usr.bin/gcc/config/i960/i960-coff.h b/gnu/usr.bin/gcc/config/i960/i960-coff.h
new file mode 100644
index 00000000000..15415e37ed9
--- /dev/null
+++ b/gnu/usr.bin/gcc/config/i960/i960-coff.h
@@ -0,0 +1,99 @@
+/* Definitions of target machine for GNU compiler, for "naked" Intel
+ 80960 using coff object format and coff debugging symbols.
+ Copyright (C) 1988, 1989, 1991 Intel Corp.
+ Contributed by Steven McGeady (mcg@omepd.intel.com)
+ Additional work by Glenn Colon-Bonet, Jonathan Shapiro, Andy Wilson
+ Converted to GCC 2.0 by Michael Tiemann, Cygnus Support.
+ */
+
+/*
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "i960/i960.h"
+
+/* Generate SDB_DEBUGGING_INFO by default. */
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE SDB_DEBUG
+
+#undef ASM_FILE_START
+#define ASM_FILE_START(FILE) \
+ output_file_directive ((FILE), main_input_filename)
+
+/* Support the ctors and dtors sections for g++. */
+
+#define CTORS_SECTION_ASM_OP ".section\t.ctors,\"x\""
+#define DTORS_SECTION_ASM_OP ".section\t.dtors,\"x\""
+
+/* A list of other sections which the compiler might be "in" at any
+ given time. */
+
+#undef EXTRA_SECTIONS
+#define EXTRA_SECTIONS in_ctors, in_dtors
+
+/* A list of extra section function definitions. */
+
+#undef EXTRA_SECTION_FUNCTIONS
+#define EXTRA_SECTION_FUNCTIONS \
+ CTORS_SECTION_FUNCTION \
+ DTORS_SECTION_FUNCTION
+
+#define CTORS_SECTION_FUNCTION \
+void \
+ctors_section () \
+{ \
+ if (in_section != in_ctors) \
+ { \
+ fprintf (asm_out_file, "%s\n", CTORS_SECTION_ASM_OP); \
+ in_section = in_ctors; \
+ } \
+}
+
+#define DTORS_SECTION_FUNCTION \
+void \
+dtors_section () \
+{ \
+ if (in_section != in_dtors) \
+ { \
+ fprintf (asm_out_file, "%s\n", DTORS_SECTION_ASM_OP); \
+ in_section = in_dtors; \
+ } \
+}
+
+#define INT_ASM_OP ".word"
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global constructors. */
+#define ASM_OUTPUT_CONSTRUCTOR(FILE,NAME) \
+ do { \
+ ctors_section (); \
+ fprintf (FILE, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global destructors. */
+#define ASM_OUTPUT_DESTRUCTOR(FILE,NAME) \
+ do { \
+ dtors_section (); \
+ fprintf (FILE, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+
+/* end of i960-coff.h */
diff --git a/gnu/usr.bin/gcc/config/i960/i960.c b/gnu/usr.bin/gcc/config/i960/i960.c
new file mode 100644
index 00000000000..fde2e43e786
--- /dev/null
+++ b/gnu/usr.bin/gcc/config/i960/i960.c
@@ -0,0 +1,2593 @@
+/* Subroutines used for code generation on intel 80960.
+ Copyright (C) 1992, 1995 Free Software Foundation, Inc.
+ Contributed by Steven McGeady, Intel Corp.
+ Additional Work by Glenn Colon-Bonet, Jonathan Shapiro, Andy Wilson
+ Converted to GCC 2.0 by Jim Wilson and Michael Tiemann, Cygnus Support.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include <stdio.h>
+
+#include "config.h"
+#include "rtl.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "real.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "insn-flags.h"
+#include "output.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "tree.h"
+#include "insn-codes.h"
+#include "assert.h"
+#include "expr.h"
+#include "function.h"
+#include "recog.h"
+#include <math.h>
+
+/* Save the operands last given to a compare for use when we
+ generate a scc or bcc insn. */
+
+rtx i960_compare_op0, i960_compare_op1;
+
+/* Used to implement #pragma align/noalign. Initialized by OVERRIDE_OPTIONS
+ macro in i960.h. */
+
+static int i960_maxbitalignment;
+static int i960_last_maxbitalignment;
+
+/* Used to implement switching between MEM and ALU insn types, for better
+ C series performance. */
+
+enum insn_types i960_last_insn_type;
+
+/* The leaf-procedure return register. Set only if this is a leaf routine. */
+
+static int i960_leaf_ret_reg;
+
+/* True if replacing tail calls with jumps is OK. */
+
+static int tail_call_ok;
+
+/* A string containing a list of insns to emit in the epilogue so as to
+ restore all registers saved by the prologue. Created by the prologue
+ code as it saves registers away. */
+
+char epilogue_string[1000];
+
+/* A unique number (per function) for return labels. */
+
+static int ret_label = 0;
+
+/* This is true if FNDECL is either a varargs or a stdarg function.
+ This is used to help identify functions that use an argument block. */
+
+#define VARARGS_STDARG_FUNCTION(FNDECL) \
+((TYPE_ARG_TYPES (TREE_TYPE (FNDECL)) != 0 \
+ && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (TREE_TYPE (FNDECL)))) != void_type_node)) \
+ || current_function_varargs)
+
+/* Handle pragmas for compatibility with Intel's compilers. */
+
+/* ??? This is incomplete, since it does not handle all pragmas that the
+ intel compilers understand. */
+
+void
+process_pragma (finput)
+ FILE *finput;
+{
+ int c;
+ int i;
+
+ c = getc (finput);
+ while (c == ' ' || c == '\t')
+ c = getc (finput);
+
+ if (c == 'a'
+ && getc (finput) == 'l'
+ && getc (finput) == 'i'
+ && getc (finput) == 'g'
+ && getc (finput) == 'n'
+ && ((c = getc (finput)) == ' ' || c == '\t' || c == '\n'))
+ {
+ char buf[20];
+ char *s = buf;
+ int align;
+
+ while (c == ' ' || c == '\t')
+ c = getc (finput);
+ if (c == '(')
+ c = getc (finput);
+ while (c >= '0' && c <= '9')
+ {
+ if (s < buf + sizeof buf - 1)
+ *s++ = c;
+ c = getc (finput);
+ }
+ *s = '\0';
+
+ align = atoi (buf);
+ switch (align)
+ {
+ case 0:
+ /* Return to last alignment. */
+ align = i960_last_maxbitalignment / 8;
+ /* Fall through. */
+ case 16:
+ case 8:
+ case 4:
+ case 2:
+ case 1:
+ i960_last_maxbitalignment = i960_maxbitalignment;
+ i960_maxbitalignment = align * 8;
+ break;
+
+ default:
+ /* Silently ignore bad values. */
+ break;
+ }
+
+ /* NOTE: ic960 R3.0 pragma align definition:
+
+ #pragma align [(size)] | (identifier=size[,...])
+ #pragma noalign [(identifier)[,...]]
+
+ (all parens are optional)
+
+ - size is [1,2,4,8,16]
+ - noalign means size==1
+ - applies only to component elements of a struct (and union?)
+ - identifier applies to structure tag (only)
+ - missing identifier means next struct
+
+ - alignment rules for bitfields need more investigation */
+ }
+
+ /* Should be pragma 'far' or equivalent for callx/balx here. */
+
+ ungetc (c, finput);
+}
+
+/* Initialize variables before compiling any files. */
+
+void
+i960_initialize ()
+{
+ if (TARGET_IC_COMPAT2_0)
+ {
+ i960_maxbitalignment = 8;
+ i960_last_maxbitalignment = 128;
+ }
+ else
+ {
+ i960_maxbitalignment = 128;
+ i960_last_maxbitalignment = 8;
+ }
+}
+
+/* Return true if OP can be used as the source of an fp move insn. */
+
+int
+fpmove_src_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (GET_CODE (op) == CONST_DOUBLE || general_operand (op, mode));
+}
+
+#if 0
+/* Return true if OP is a register or zero. */
+
+int
+reg_or_zero_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return register_operand (op, mode) || op == const0_rtx;
+}
+#endif
+
+/* Return truth value of whether OP can be used as an operands in a three
+ address arithmetic insn (such as add %o1,7,%l2) of mode MODE. */
+
+int
+arith_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (register_operand (op, mode) || literal (op, mode));
+}
+
+/* Return true if OP is a register or a valid floating point literal. */
+
+int
+fp_arith_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (register_operand (op, mode) || fp_literal (op, mode));
+}
+
+/* Return true is OP is a register or a valid signed integer literal. */
+
+int
+signed_arith_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (register_operand (op, mode) || signed_literal (op, mode));
+}
+
+/* Return truth value of whether OP is a integer which fits the
+ range constraining immediate operands in three-address insns. */
+
+int
+literal (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return ((GET_CODE (op) == CONST_INT) && INTVAL(op) >= 0 && INTVAL(op) < 32);
+}
+
+/* Return true if OP is a float constant of 1. */
+
+int
+fp_literal_one (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (TARGET_NUMERICS && mode == GET_MODE (op) && op == CONST1_RTX (mode));
+}
+
+/* Return true if OP is a float constant of 0. */
+
+int
+fp_literal_zero (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (TARGET_NUMERICS && mode == GET_MODE (op) && op == CONST0_RTX (mode));
+}
+
+/* Return true if OP is a valid floating point literal. */
+
+int
+fp_literal(op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return fp_literal_zero (op, mode) || fp_literal_one (op, mode);
+}
+
+/* Return true if OP is a valid signed immediate constant. */
+
+int
+signed_literal(op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return ((GET_CODE (op) == CONST_INT) && INTVAL(op) > -32 && INTVAL(op) < 32);
+}
+
+/* Return truth value of statement that OP is a symbolic memory
+ operand of mode MODE. */
+
+int
+symbolic_memory_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+ if (GET_CODE (op) != MEM)
+ return 0;
+ op = XEXP (op, 0);
+ return (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST
+ || GET_CODE (op) == HIGH || GET_CODE (op) == LABEL_REF);
+}
+
+/* Return truth value of whether OP is EQ or NE. */
+
+int
+eq_or_neq (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (GET_CODE (op) == EQ || GET_CODE (op) == NE);
+}
+
+/* OP is an integer register or a constant. */
+
+int
+arith32_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (register_operand (op, mode))
+ return 1;
+ return (CONSTANT_P (op));
+}
+
+/* Return true if OP is an integer constant which is a power of 2. */
+
+int
+power2_operand (op,mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) != CONST_INT)
+ return 0;
+
+ return exact_log2 (INTVAL (op)) >= 0;
+}
+
+/* Return true if OP is an integer constant which is the complement of a
+ power of 2. */
+
+int
+cmplpower2_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) != CONST_INT)
+ return 0;
+
+ return exact_log2 (~ INTVAL (op)) >= 0;
+}
+
+/* If VAL has only one bit set, return the index of that bit. Otherwise
+ return -1. */
+
+int
+bitpos (val)
+ unsigned int val;
+{
+ register int i;
+
+ for (i = 0; val != 0; i++, val >>= 1)
+ {
+ if (val & 1)
+ {
+ if (val != 1)
+ return -1;
+ return i;
+ }
+ }
+ return -1;
+}
+
+/* Return non-zero if OP is a mask, i.e. all one bits are consecutive.
+ The return value indicates how many consecutive non-zero bits exist
+ if this is a mask. This is the same as the next function, except that
+ it does not indicate what the start and stop bit positions are. */
+
+int
+is_mask (val)
+ unsigned int val;
+{
+ register int start, end, i;
+
+ start = -1;
+ for (i = 0; val != 0; val >>= 1, i++)
+ {
+ if (val & 1)
+ {
+ if (start < 0)
+ start = i;
+
+ end = i;
+ continue;
+ }
+ /* Still looking for the first bit. */
+ if (start < 0)
+ continue;
+
+ /* We've seen the start of a bit sequence, and now a zero. There
+ must be more one bits, otherwise we would have exited the loop.
+ Therefore, it is not a mask. */
+ if (val)
+ return 0;
+ }
+
+ /* The bit string has ones from START to END bit positions only. */
+ return end - start + 1;
+}
+
+/* If VAL is a mask, then return nonzero, with S set to the starting bit
+ position and E set to the ending bit position of the mask. The return
+ value indicates how many consecutive bits exist in the mask. This is
+ the same as the previous function, except that it also indicates the
+ start and end bit positions of the mask. */
+
+int
+bitstr (val, s, e)
+ unsigned int val;
+ int *s, *e;
+{
+ register int start, end, i;
+
+ start = -1;
+ end = -1;
+ for (i = 0; val != 0; val >>= 1, i++)
+ {
+ if (val & 1)
+ {
+ if (start < 0)
+ start = i;
+
+ end = i;
+ continue;
+ }
+
+ /* Still looking for the first bit. */
+ if (start < 0)
+ continue;
+
+ /* We've seen the start of a bit sequence, and now a zero. There
+ must be more one bits, otherwise we would have exited the loop.
+ Therefor, it is not a mask. */
+ if (val)
+ {
+ start = -1;
+ end = -1;
+ break;
+ }
+ }
+
+ /* The bit string has ones from START to END bit positions only. */
+ *s = start;
+ *e = end;
+ return ((start < 0) ? 0 : end - start + 1);
+}
+
+/* Return the machine mode to use for a comparison. */
+
+enum machine_mode
+select_cc_mode (op, x)
+ RTX_CODE op;
+ rtx x;
+{
+ if (op == GTU || op == LTU || op == GEU || op == LEU)
+ return CC_UNSmode;
+ return CCmode;
+}
+
+/* X and Y are two things to compare using CODE. Emit the compare insn and
+ return the rtx for register 36 in the proper mode. */
+
+rtx
+gen_compare_reg (code, x, y)
+ enum rtx_code code;
+ rtx x, y;
+{
+ rtx cc_reg;
+ enum machine_mode ccmode = SELECT_CC_MODE (code, x, y);
+ enum machine_mode mode
+ = GET_MODE (x) == VOIDmode ? GET_MODE (y) : GET_MODE (x);
+
+ if (mode == SImode)
+ {
+ if (! arith_operand (x, mode))
+ x = force_reg (SImode, x);
+ if (! arith_operand (y, mode))
+ y = force_reg (SImode, y);
+ }
+
+ cc_reg = gen_rtx (REG, ccmode, 36);
+ emit_insn (gen_rtx (SET, VOIDmode, cc_reg,
+ gen_rtx (COMPARE, ccmode, x, y)));
+
+ return cc_reg;
+}
+
+/* For the i960, REG is cost 1, REG+immed CONST is cost 2, REG+REG is cost 2,
+ REG+nonimmed CONST is cost 4. REG+SYMBOL_REF, SYMBOL_REF, and similar
+ are 4. Indexed addresses are cost 6. */
+
+/* ??? Try using just RTX_COST, i.e. not defining ADDRESS_COST. */
+
+int
+i960_address_cost (x)
+ rtx x;
+{
+#if 0
+ /* Handled before calling here. */
+ if (GET_CODE (x) == REG)
+ return 1;
+#endif
+ if (GET_CODE (x) == PLUS)
+ {
+ rtx base = XEXP (x, 0);
+ rtx offset = XEXP (x, 1);
+
+ if (GET_CODE (base) == SUBREG)
+ base = SUBREG_REG (base);
+ if (GET_CODE (offset) == SUBREG)
+ offset = SUBREG_REG (offset);
+
+ if (GET_CODE (base) == REG)
+ {
+ if (GET_CODE (offset) == REG)
+ return 2;
+ if (GET_CODE (offset) == CONST_INT)
+ {
+ if ((unsigned)INTVAL (offset) < 2047)
+ return 2;
+ return 4;
+ }
+ if (CONSTANT_P (offset))
+ return 4;
+ }
+ if (GET_CODE (base) == PLUS || GET_CODE (base) == MULT)
+ return 6;
+
+ /* This is an invalid address. The return value doesn't matter, but
+ for convenience we make this more expensive than anything else. */
+ return 12;
+ }
+ if (GET_CODE (x) == MULT)
+ return 6;
+
+ /* Symbol_refs and other unrecognized addresses are cost 4. */
+ return 4;
+}
+
+/* Emit insns to move operands[1] into operands[0].
+
+ Return 1 if we have written out everything that needs to be done to
+ do the move. Otherwise, return 0 and the caller will emit the move
+ normally. */
+
+int
+emit_move_sequence (operands, mode)
+ rtx *operands;
+ enum machine_mode mode;
+{
+ /* We can only store registers to memory. */
+
+ if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) != REG)
+ operands[1] = force_reg (mode, operands[1]);
+
+ /* Storing multi-word values in unaligned hard registers to memory may
+ require a scratch since we have to store them a register at a time and
+ adding 4 to the memory address may not yield a valid insn. */
+ /* ??? We don't always need the scratch, but that would complicate things.
+ Maybe later. */
+ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
+ && GET_CODE (operands[0]) == MEM
+ && GET_CODE (operands[1]) == REG
+ && REGNO (operands[1]) < FIRST_PSEUDO_REGISTER
+ && ! HARD_REGNO_MODE_OK (REGNO (operands[1]), mode))
+ {
+ emit_insn (gen_rtx (PARALLEL, VOIDmode,
+ gen_rtvec (2,
+ gen_rtx (SET, VOIDmode,
+ operands[0], operands[1]),
+ gen_rtx (CLOBBER, VOIDmode,
+ gen_rtx (SCRATCH, Pmode)))));
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Output assembler to move a double word value. */
+
+char *
+i960_output_move_double (dst, src)
+ rtx dst, src;
+{
+ rtx operands[5];
+
+ if (GET_CODE (dst) == REG
+ && GET_CODE (src) == REG)
+ {
+ if ((REGNO (src) & 1)
+ || (REGNO (dst) & 1))
+ {
+ /* We normally copy the low-numbered register first. However, if
+ the second source register is the same as the first destination
+ register, we must copy in the opposite order. */
+ if (REGNO (src) + 1 == REGNO (dst))
+ return "mov %D1,%D0\n\tmov %1,%0";
+ else
+ return "mov %1,%0\n\tmov %D1,%D0";
+ }
+ else
+ return "movl %1,%0";
+ }
+ else if (GET_CODE (dst) == REG
+ && GET_CODE (src) == CONST_INT
+ && CONST_OK_FOR_LETTER_P (INTVAL (src), 'I'))
+ {
+ if (REGNO (dst) & 1)
+ return "mov %1,%0\n\tmov 0,%D0";
+ else
+ return "movl %1,%0";
+ }
+ else if (GET_CODE (dst) == REG
+ && GET_CODE (src) == MEM)
+ {
+ if (REGNO (dst) & 1)
+ {
+ /* One can optimize a few cases here, but you have to be
+ careful of clobbering registers used in the address and
+ edge conditions. */
+ operands[0] = dst;
+ operands[1] = src;
+ operands[2] = gen_rtx (REG, Pmode, REGNO (dst) + 1);
+ operands[3] = gen_rtx (MEM, word_mode, operands[2]);
+ operands[4] = adj_offsettable_operand (operands[3], UNITS_PER_WORD);
+ output_asm_insn ("lda %1,%2\n\tld %3,%0\n\tld %4,%D0", operands);
+ return "";
+ }
+ else
+ return "ldl %1,%0";
+ }
+ else if (GET_CODE (dst) == MEM
+ && GET_CODE (src) == REG)
+ {
+ if (REGNO (src) & 1)
+ {
+ /* This is handled by emit_move_sequence so we shouldn't get here. */
+ abort ();
+ }
+ return "stl %1,%0";
+ }
+ else
+ abort ();
+}
+
+/* Output assembler to move a quad word value. */
+
+char *
+i960_output_move_quad (dst, src)
+ rtx dst, src;
+{
+ rtx operands[7];
+
+ if (GET_CODE (dst) == REG
+ && GET_CODE (src) == REG)
+ {
+ if ((REGNO (src) & 3)
+ || (REGNO (dst) & 3))
+ {
+ /* We normally copy starting with the low numbered register.
+ However, if there is an overlap such that the first dest reg
+ is <= the last source reg but not < the first source reg, we
+ must copy in the opposite order. */
+ if (REGNO (dst) <= REGNO (src) + 3
+ && REGNO (dst) >= REGNO (src))
+ return "mov %F1,%F0\n\tmov %E1,%E0\n\tmov %D1,%D0\n\tmov %1,%0";
+ else
+ return "mov %1,%0\n\tmov %D1,%D0\n\tmov %E1,%E0\n\tmov %F1,%F0";
+ }
+ else
+ return "movq %1,%0";
+ }
+ else if (GET_CODE (dst) == REG
+ && GET_CODE (src) == CONST_INT
+ && CONST_OK_FOR_LETTER_P (INTVAL (src), 'I'))
+ {
+ if (REGNO (dst) & 3)
+ return "mov %1,%0\n\tmov 0,%D0\n\tmov 0,%E0\n\tmov 0,%F0";
+ else
+ return "movq %1,%0";
+ }
+ else if (GET_CODE (dst) == REG
+ && GET_CODE (src) == MEM)
+ {
+ if (REGNO (dst) & 3)
+ {
+ /* One can optimize a few cases here, but you have to be
+ careful of clobbering registers used in the address and
+ edge conditions. */
+ operands[0] = dst;
+ operands[1] = src;
+ operands[2] = gen_rtx (REG, Pmode, REGNO (dst) + 3);
+ operands[3] = gen_rtx (MEM, word_mode, operands[2]);
+ operands[4] = adj_offsettable_operand (operands[3], UNITS_PER_WORD);
+ operands[5] = adj_offsettable_operand (operands[4], UNITS_PER_WORD);
+ operands[6] = adj_offsettable_operand (operands[5], UNITS_PER_WORD);
+ output_asm_insn ("lda %1,%2\n\tld %3,%0\n\tld %4,%D0\n\tld %5,%E0\n\tld %6,%F0", operands);
+ return "";
+ }
+ else
+ return "ldq %1,%0";
+ }
+ else if (GET_CODE (dst) == MEM
+ && GET_CODE (src) == REG)
+ {
+ if (REGNO (src) & 3)
+ {
+ /* This is handled by emit_move_sequence so we shouldn't get here. */
+ abort ();
+ }
+ return "stq %1,%0";
+ }
+ else
+ abort ();
+}
+
+/* Emit insns to load a constant to non-floating point registers.
+ Uses several strategies to try to use as few insns as possible. */
+
+char *
+i960_output_ldconst (dst, src)
+ register rtx dst, src;
+{
+ register int rsrc1;
+ register unsigned rsrc2;
+ enum machine_mode mode = GET_MODE (dst);
+ rtx operands[4];
+
+ operands[0] = operands[2] = dst;
+ operands[1] = operands[3] = src;
+
+ /* Anything that isn't a compile time constant, such as a SYMBOL_REF,
+ must be a ldconst insn. */
+
+ if (GET_CODE (src) != CONST_INT && GET_CODE (src) != CONST_DOUBLE)
+ {
+ output_asm_insn ("ldconst %1,%0", operands);
+ return "";
+ }
+ else if (mode == XFmode)
+ {
+ REAL_VALUE_TYPE d;
+ long value_long[3];
+ int i;
+
+ if (fp_literal_zero (src, XFmode))
+ return "movt 0,%0";
+
+ REAL_VALUE_FROM_CONST_DOUBLE (d, src);
+ REAL_VALUE_TO_TARGET_LONG_DOUBLE (d, value_long);
+
+ output_asm_insn ("# ldconst %1,%0",operands);
+
+ for (i = 0; i < 3; i++)
+ {
+ operands[0] = gen_rtx (REG, SImode, REGNO (dst) + i);
+ operands[1] = GEN_INT (value_long[i]);
+ output_asm_insn (i960_output_ldconst (operands[0], operands[1]),
+ operands);
+ }
+
+ return "";
+ }
+ else if (mode == DFmode)
+ {
+ rtx first, second;
+
+ if (fp_literal_zero (src, DFmode))
+ return "movl 0,%0";
+
+ split_double (src, &first, &second);
+
+ output_asm_insn ("# ldconst %1,%0",operands);
+
+ operands[0] = gen_rtx (REG, SImode, REGNO (dst));
+ operands[1] = first;
+ output_asm_insn (i960_output_ldconst (operands[0], operands[1]),
+ operands);
+ operands[0] = gen_rtx (REG, SImode, REGNO (dst) + 1);
+ operands[1] = second;
+ output_asm_insn (i960_output_ldconst (operands[0], operands[1]),
+ operands);
+ return "";
+ }
+ else if (mode == SFmode)
+ {
+ REAL_VALUE_TYPE d;
+ long value;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (d, src);
+ REAL_VALUE_TO_TARGET_SINGLE (d, value);
+
+ output_asm_insn ("# ldconst %1,%0",operands);
+ operands[0] = gen_rtx (REG, SImode, REGNO (dst));
+ operands[1] = gen_rtx (CONST_INT, VOIDmode, value);
+ output_asm_insn (i960_output_ldconst (operands[0], operands[1]),
+ operands);
+ return "";
+ }
+ else if (mode == TImode)
+ {
+ /* ??? This is currently not handled at all. */
+ abort ();
+
+ /* Note: lowest order word goes in lowest numbered reg. */
+ rsrc1 = INTVAL (src);
+ if (rsrc1 >= 0 && rsrc1 < 32)
+ return "movq %1,%0";
+ else
+ output_asm_insn ("movq\t0,%0\t# ldconstq %1,%0",operands);
+ /* Go pick up the low-order word. */
+ }
+ else if (mode == DImode)
+ {
+ rtx upperhalf, lowerhalf, xoperands[2];
+
+ if (GET_CODE (src) == CONST_DOUBLE || GET_CODE (src) == CONST_INT)
+ split_double (src, &lowerhalf, &upperhalf);
+
+ else
+ abort ();
+
+ /* Note: lowest order word goes in lowest numbered reg. */
+ /* Numbers from 0 to 31 can be handled with a single insn. */
+ rsrc1 = INTVAL (lowerhalf);
+ if (upperhalf == const0_rtx && rsrc1 >= 0 && rsrc1 < 32)
+ return "movl %1,%0";
+
+ /* Output the upper half with a recursive call. */
+ xoperands[0] = gen_rtx (REG, SImode, REGNO (dst) + 1);
+ xoperands[1] = upperhalf;
+ output_asm_insn (i960_output_ldconst (xoperands[0], xoperands[1]),
+ xoperands);
+ /* The lower word is emitted as normally. */
+ }
+ else
+ {
+ rsrc1 = INTVAL (src);
+ if (mode == QImode)
+ {
+ if (rsrc1 > 0xff)
+ rsrc1 &= 0xff;
+ }
+ else if (mode == HImode)
+ {
+ if (rsrc1 > 0xffff)
+ rsrc1 &= 0xffff;
+ }
+ }
+
+ if (rsrc1 >= 0)
+ {
+ /* ldconst 0..31,X -> mov 0..31,X */
+ if (rsrc1 < 32)
+ {
+ if (i960_last_insn_type == I_TYPE_REG && TARGET_C_SERIES)
+ return "lda %1,%0";
+ return "mov %1,%0";
+ }
+
+ /* ldconst 32..63,X -> add 31,nn,X */
+ if (rsrc1 < 63)
+ {
+ if (i960_last_insn_type == I_TYPE_REG && TARGET_C_SERIES)
+ return "lda %1,%0";
+ operands[1] = gen_rtx (CONST_INT, VOIDmode, rsrc1 - 31);
+ output_asm_insn ("addo\t31,%1,%0\t# ldconst %3,%0", operands);
+ return "";
+ }
+ }
+ else if (rsrc1 < 0)
+ {
+ /* ldconst -1..-31 -> sub 0,0..31,X */
+ if (rsrc1 >= -31)
+ {
+ /* return 'sub -(%1),0,%0' */
+ operands[1] = gen_rtx (CONST_INT, VOIDmode, - rsrc1);
+ output_asm_insn ("subo\t%1,0,%0\t# ldconst %3,%0", operands);
+ return "";
+ }
+
+ /* ldconst -32 -> not 31,X */
+ if (rsrc1 == -32)
+ {
+ operands[1] = gen_rtx (CONST_INT, VOIDmode, ~rsrc1);
+ output_asm_insn ("not\t%1,%0 # ldconst %3,%0", operands);
+ return "";
+ }
+ }
+
+ /* If const is a single bit. */
+ if (bitpos (rsrc1) >= 0)
+ {
+ operands[1] = gen_rtx (CONST_INT, VOIDmode, bitpos (rsrc1));
+ output_asm_insn ("setbit\t%1,0,%0\t# ldconst %3,%0", operands);
+ return "";
+ }
+
+ /* If const is a bit string of less than 6 bits (1..31 shifted). */
+ if (is_mask (rsrc1))
+ {
+ int s, e;
+
+ if (bitstr (rsrc1, &s, &e) < 6)
+ {
+ rsrc2 = ((unsigned int) rsrc1) >> s;
+ operands[1] = gen_rtx (CONST_INT, VOIDmode, rsrc2);
+ operands[2] = gen_rtx (CONST_INT, VOIDmode, s);
+ output_asm_insn ("shlo\t%2,%1,%0\t# ldconst %3,%0", operands);
+ return "";
+ }
+ }
+
+ /* Unimplemented cases:
+ const is in range 0..31 but rotated around end of word:
+ ror 31,3,g0 -> ldconst 0xe0000003,g0
+
+ and any 2 instruction cases that might be worthwhile */
+
+ output_asm_insn ("ldconst %1,%0", operands);
+ return "";
+}
+
+/* Determine if there is an opportunity for a bypass optimization.
+ Bypass succeeds on the 960K* if the destination of the previous
+ instruction is the second operand of the current instruction.
+ Bypass always succeeds on the C*.
+
+ Return 1 if the pattern should interchange the operands.
+
+ CMPBR_FLAG is true if this is for a compare-and-branch insn.
+ OP1 and OP2 are the two source operands of a 3 operand insn. */
+
+int
+i960_bypass (insn, op1, op2, cmpbr_flag)
+ register rtx insn, op1, op2;
+ int cmpbr_flag;
+{
+ register rtx prev_insn, prev_dest;
+
+ if (TARGET_C_SERIES)
+ return 0;
+
+ /* Can't do this if op1 isn't a register. */
+ if (! REG_P (op1))
+ return 0;
+
+ /* Can't do this for a compare-and-branch if both ops aren't regs. */
+ if (cmpbr_flag && ! REG_P (op2))
+ return 0;
+
+ prev_insn = prev_real_insn (insn);
+
+ if (prev_insn && GET_CODE (prev_insn) == INSN
+ && GET_CODE (PATTERN (prev_insn)) == SET)
+ {
+ prev_dest = SET_DEST (PATTERN (prev_insn));
+ if ((GET_CODE (prev_dest) == REG && REGNO (prev_dest) == REGNO (op1))
+ || (GET_CODE (prev_dest) == SUBREG
+ && GET_CODE (SUBREG_REG (prev_dest)) == REG
+ && REGNO (SUBREG_REG (prev_dest)) == REGNO (op1)))
+ return 1;
+ }
+ return 0;
+}
+
+/* Output the code which declares the function name. This also handles
+ leaf routines, which have special requirements, and initializes some
+ global variables. */
+
+void
+i960_function_name_declare (file, name, fndecl)
+ FILE *file;
+ char *name;
+ tree fndecl;
+{
+ register int i, j;
+ int leaf_proc_ok;
+ rtx insn;
+
+ /* Increment global return label. */
+
+ ret_label++;
+
+ /* Compute whether tail calls and leaf routine optimizations can be performed
+ for this function. */
+
+ if (TARGET_TAILCALL)
+ tail_call_ok = 1;
+ else
+ tail_call_ok = 0;
+
+ if (TARGET_LEAFPROC)
+ leaf_proc_ok = 1;
+ else
+ leaf_proc_ok = 0;
+
+ /* Even if nobody uses extra parms, can't have leafproc or tail calls if
+ argblock, because argblock uses g14 implicitly. */
+
+ if (current_function_args_size != 0 || VARARGS_STDARG_FUNCTION (fndecl))
+ {
+ tail_call_ok = 0;
+ leaf_proc_ok = 0;
+ }
+
+ /* See if caller passes in an address to return value. */
+
+ if (aggregate_value_p (DECL_RESULT (fndecl)))
+ {
+ tail_call_ok = 0;
+ leaf_proc_ok = 0;
+ }
+
+ /* Can not use tail calls or make this a leaf routine if there is a non
+ zero frame size. */
+
+ if (get_frame_size () != 0)
+ leaf_proc_ok = 0;
+
+ /* I don't understand this condition, and do not think that it is correct.
+ Apparently this is just checking whether the frame pointer is used, and
+ we can't trust regs_ever_live[fp] since it is (almost?) always set. */
+
+ if (tail_call_ok)
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == INSN
+ && reg_mentioned_p (frame_pointer_rtx, insn))
+ {
+ tail_call_ok = 0;
+ break;
+ }
+
+ /* Check for CALL insns. Can not be a leaf routine if there are any. */
+
+ if (leaf_proc_ok)
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == CALL_INSN)
+ {
+ leaf_proc_ok = 0;
+ break;
+ }
+
+ /* Can not be a leaf routine if any non-call clobbered registers are
+ used in this function. */
+
+ if (leaf_proc_ok)
+ for (i = 0, j = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (regs_ever_live[i]
+ && ((! call_used_regs[i]) || (i > 7 && i < 12)))
+ {
+ /* Global registers. */
+ if (i < 16 && i > 7 && i != 13)
+ leaf_proc_ok = 0;
+ /* Local registers. */
+ else if (i < 32)
+ leaf_proc_ok = 0;
+ }
+
+ /* Now choose a leaf return register, if we can find one, and if it is
+ OK for this to be a leaf routine. */
+
+ i960_leaf_ret_reg = -1;
+
+ if (optimize && leaf_proc_ok)
+ {
+ for (i960_leaf_ret_reg = -1, i = 0; i < 8; i++)
+ if (regs_ever_live[i] == 0)
+ {
+ i960_leaf_ret_reg = i;
+ regs_ever_live[i] = 1;
+ break;
+ }
+ }
+
+ /* Do this after choosing the leaf return register, so it will be listed
+ if one was chosen. */
+
+ fprintf (file, "\t# Function '%s'\n", (name[0] == '*' ? &name[1] : name));
+ fprintf (file, "\t# Registers used: ");
+
+ for (i = 0, j = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ if (regs_ever_live[i])
+ {
+ fprintf (file, "%s%s ", reg_names[i], call_used_regs[i] ? "" : "*");
+
+ if (i > 15 && j == 0)
+ {
+ fprintf (file,"\n\t#\t\t ");
+ j++;
+ }
+ }
+ }
+
+ fprintf (file, "\n");
+
+ if (i960_leaf_ret_reg >= 0)
+ {
+ /* Make it a leaf procedure. */
+
+ if (TREE_PUBLIC (fndecl))
+ fprintf (file,"\t.globl\t%s.lf\n", (name[0] == '*' ? &name[1] : name));
+
+ fprintf (file, "\t.leafproc\t");
+ assemble_name (file, name);
+ fprintf (file, ",%s.lf\n", (name[0] == '*' ? &name[1] : name));
+ ASM_OUTPUT_LABEL (file, name);
+ fprintf (file, "\tlda LR%d,g14\n", ret_label);
+ fprintf (file, "%s.lf:\n", (name[0] == '*' ? &name[1] : name));
+ fprintf (file, "\tmov g14,g%d\n", i960_leaf_ret_reg);
+
+ if (TARGET_C_SERIES)
+ {
+ fprintf (file, "\tlda 0,g14\n");
+ i960_last_insn_type = I_TYPE_MEM;
+ }
+ else
+ {
+ fprintf (file, "\tmov 0,g14\n");
+ i960_last_insn_type = I_TYPE_REG;
+ }
+ }
+ else
+ {
+ ASM_OUTPUT_LABEL (file, name);
+ i960_last_insn_type = I_TYPE_CTRL;
+ }
+}
+
+/* Compute and return the frame size. */
+
+int
+compute_frame_size (size)
+ int size;
+{
+ int actual_fsize;
+ int outgoing_args_size = current_function_outgoing_args_size;
+
+ /* The STARTING_FRAME_OFFSET is totally hidden to us as far
+ as size is concerned. */
+ actual_fsize = (size + 15) & -16;
+ actual_fsize += (outgoing_args_size + 15) & -16;
+
+ return actual_fsize;
+}
+
+/* Output code for the function prologue. */
+
+void
+i960_function_prologue (file, size)
+ FILE *file;
+ unsigned int size;
+{
+ register int i, j, nr;
+ int n_iregs = 0;
+ int rsize = 0;
+ int actual_fsize, offset;
+ char tmpstr[1000];
+ /* -1 if reg must be saved on proc entry, 0 if available, 1 if saved
+ somewhere. */
+ int regs[FIRST_PSEUDO_REGISTER];
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (regs_ever_live[i]
+ && ((! call_used_regs[i]) || (i > 7 && i < 12)))
+ {
+ regs[i] = -1;
+ /* Count global registers that need saving. */
+ if (i < 16)
+ n_iregs++;
+ }
+ else
+ regs[i] = 0;
+
+ epilogue_string[0] = '\0';
+
+ if (profile_flag || profile_block_flag)
+ {
+ /* When profiling, we may use registers 20 to 27 to save arguments, so
+ they can't be used here for saving globals. J is the number of
+ argument registers the mcount call will save. */
+ for (j = 7; j >= 0 && ! regs_ever_live[j]; j--)
+ ;
+
+ for (i = 20; i <= j + 20; i++)
+ regs[i] = -1;
+ }
+
+ /* First look for local registers to save globals in. */
+ for (i = 0; i < 16; i++)
+ {
+ if (regs[i] == 0)
+ continue;
+
+ /* Start at r4, not r3. */
+ for (j = 20; j < 32; j++)
+ {
+ if (regs[j] != 0)
+ continue;
+
+ regs[i] = 1;
+ regs[j] = -1;
+ regs_ever_live[j] = 1;
+ nr = 1;
+ if (i <= 14 && i % 2 == 0 && j <= 30 && j % 2 == 0
+ && regs[i+1] != 0 && regs[j+1] == 0)
+ {
+ nr = 2;
+ regs[i+1] = 1;
+ regs[j+1] = -1;
+ regs_ever_live[j+1] = 1;
+ }
+ if (nr == 2 && i <= 12 && i % 4 == 0 && j <= 28 && j % 4 == 0
+ && regs[i+2] != 0 && regs[j+2] == 0)
+ {
+ nr = 3;
+ regs[i+2] = 1;
+ regs[j+2] = -1;
+ regs_ever_live[j+2] = 1;
+ }
+ if (nr == 3 && regs[i+3] != 0 && regs[j+3] == 0)
+ {
+ nr = 4;
+ regs[i+3] = 1;
+ regs[j+3] = -1;
+ regs_ever_live[j+3] = 1;
+ }
+
+ fprintf (file, "\tmov%s %s,%s\n",
+ ((nr == 4) ? "q" :
+ (nr == 3) ? "t" :
+ (nr == 2) ? "l" : ""),
+ reg_names[i], reg_names[j]);
+ sprintf (tmpstr, "\tmov%s %s,%s\n",
+ ((nr == 4) ? "q" :
+ (nr == 3) ? "t" :
+ (nr == 2) ? "l" : ""),
+ reg_names[j], reg_names[i]);
+ strcat (epilogue_string, tmpstr);
+
+ n_iregs -= nr;
+ i += nr-1;
+ break;
+ }
+ }
+
+ /* N_iregs is now the number of global registers that haven't been saved
+ yet. */
+
+ rsize = (n_iregs * 4);
+ actual_fsize = compute_frame_size (size) + rsize;
+#if 0
+ /* ??? The 1.2.1 compiler does this also. This is meant to round the frame
+ size up to the nearest multiple of 16. I don't know whether this is
+ necessary, or even desirable.
+
+ The frame pointer must be aligned, but the call instruction takes care of
+ that. If we leave the stack pointer unaligned, we may save a little on
+ dynamic stack allocation. And we don't lose, at least according to the
+ i960CA manual. */
+ actual_fsize = (actual_fsize + 15) & ~0xF;
+#endif
+
+ /* Allocate space for register save and locals. */
+ if (actual_fsize > 0)
+ {
+ if (actual_fsize < 32)
+ fprintf (file, "\taddo %d,sp,sp\n", actual_fsize);
+ else
+ fprintf (file, "\tlda\t%d(sp),sp\n", actual_fsize);
+ }
+
+ /* Take hardware register save area created by the call instruction
+ into account, but store them before the argument block area. */
+ offset = 64 + actual_fsize - compute_frame_size (0) - rsize;
+ /* Save registers on stack if needed. */
+ for (i = 0, j = n_iregs; j > 0 && i < 16; i++)
+ {
+ if (regs[i] != -1)
+ continue;
+
+ nr = 1;
+
+ if (i <= 14 && i % 2 == 0 && regs[i+1] == -1 && offset % 2 == 0)
+ nr = 2;
+
+ if (nr == 2 && i <= 12 && i % 4 == 0 && regs[i+2] == -1
+ && offset % 4 == 0)
+ nr = 3;
+
+ if (nr == 3 && regs[i+3] == -1)
+ nr = 4;
+
+ fprintf (file,"\tst%s %s,%d(fp)\n",
+ ((nr == 4) ? "q" :
+ (nr == 3) ? "t" :
+ (nr == 2) ? "l" : ""),
+ reg_names[i], offset);
+ sprintf (tmpstr,"\tld%s %d(fp),%s\n",
+ ((nr == 4) ? "q" :
+ (nr == 3) ? "t" :
+ (nr == 2) ? "l" : ""),
+ offset, reg_names[i]);
+ strcat (epilogue_string, tmpstr);
+ i += nr-1;
+ j -= nr;
+ offset += nr * 4;
+ }
+
+ if (actual_fsize == 0 && size == 0 && rsize == 0)
+ return;
+
+ fprintf (file, "\t#Prologue stats:\n");
+ fprintf (file, "\t# Total Frame Size: %d bytes\n", actual_fsize);
+
+ if (size)
+ fprintf (file, "\t# Local Variable Size: %d bytes\n", size);
+ if (rsize)
+ fprintf (file, "\t# Register Save Size: %d regs, %d bytes\n",
+ n_iregs, rsize);
+ fprintf (file, "\t#End Prologue#\n");
+}
+
+/* Output code for the function profiler. */
+
+void
+output_function_profiler (file, labelno)
+ FILE *file;
+ int labelno;
+{
+ /* The last used parameter register. */
+ int last_parm_reg;
+ int i, j, increment;
+ int varargs_stdarg_function
+ = VARARGS_STDARG_FUNCTION (current_function_decl);
+
+ /* Figure out the last used parameter register. The proper thing to do
+ is to walk incoming args of the function. A function might have live
+ parameter registers even if it has no incoming args. Note that we
+ don't have to save parameter registers g8 to g11 because they are
+ call preserved. */
+
+ /* See also output_function_prologue, which tries to use local registers
+ for preserved call-saved global registers. */
+
+ for (last_parm_reg = 7;
+ last_parm_reg >= 0 && ! regs_ever_live[last_parm_reg];
+ last_parm_reg--)
+ ;
+
+ /* Save parameter registers in regs r4 (20) to r11 (27). */
+
+ for (i = 0, j = 4; i <= last_parm_reg; i += increment, j += increment)
+ {
+ if (i % 4 == 0 && (last_parm_reg - i) >= 3)
+ increment = 4;
+ else if (i % 4 == 0 && (last_parm_reg - i) >= 2)
+ increment = 3;
+ else if (i % 2 == 0 && (last_parm_reg - i) >= 1)
+ increment = 2;
+ else
+ increment = 1;
+
+ fprintf (file, "\tmov%s g%d,r%d\n",
+ (increment == 4 ? "q" : increment == 3 ? "t"
+ : increment == 2 ? "l": ""), i, j);
+ }
+
+ /* If this function uses the arg pointer, then save it in r3 and then
+ set it to zero. */
+
+ if (current_function_args_size != 0 || varargs_stdarg_function)
+ fprintf (file, "\tmov g14,r3\n\tmov 0,g14\n");
+
+ /* Load location address into g0 and call mcount. */
+
+ fprintf (file, "\tlda\tLP%d,g0\n\tcallx\tmcount\n", labelno);
+
+ /* If this function uses the arg pointer, restore it. */
+
+ if (current_function_args_size != 0 || varargs_stdarg_function)
+ fprintf (file, "\tmov r3,g14\n");
+
+ /* Restore parameter registers. */
+
+ for (i = 0, j = 4; i <= last_parm_reg; i += increment, j += increment)
+ {
+ if (i % 4 == 0 && (last_parm_reg - i) >= 3)
+ increment = 4;
+ else if (i % 4 == 0 && (last_parm_reg - i) >= 2)
+ increment = 3;
+ else if (i % 2 == 0 && (last_parm_reg - i) >= 1)
+ increment = 2;
+ else
+ increment = 1;
+
+ fprintf (file, "\tmov%s r%d,g%d\n",
+ (increment == 4 ? "q" : increment == 3 ? "t"
+ : increment == 2 ? "l": ""), j, i);
+ }
+}
+
+/* Output code for the function epilogue. */
+
+void
+i960_function_epilogue (file, size)
+ FILE *file;
+ unsigned int size;
+{
+ if (i960_leaf_ret_reg >= 0)
+ {
+ fprintf (file, "LR%d: ret\n", ret_label);
+ return;
+ }
+
+ if (*epilogue_string == 0)
+ {
+ register rtx tmp;
+
+ /* Emit a return insn, but only if control can fall through to here. */
+
+ tmp = get_last_insn ();
+ while (tmp)
+ {
+ if (GET_CODE (tmp) == BARRIER)
+ return;
+ if (GET_CODE (tmp) == CODE_LABEL)
+ break;
+ if (GET_CODE (tmp) == JUMP_INSN)
+ {
+ if (GET_CODE (PATTERN (tmp)) == RETURN)
+ return;
+ break;
+ }
+ if (GET_CODE (tmp) == NOTE)
+ {
+ tmp = PREV_INSN (tmp);
+ continue;
+ }
+ break;
+ }
+ fprintf (file, "LR%d: ret\n", ret_label);
+ return;
+ }
+
+ fprintf (file, "LR%d:\n", ret_label);
+
+ fprintf (file, "\t#EPILOGUE#\n");
+
+ /* Output the string created by the prologue which will restore all
+ registers saved by the prologue. */
+
+ if (epilogue_string[0] != '\0')
+ fprintf (file, "%s", epilogue_string);
+
+ /* Must clear g14 on return if this function set it.
+ Only varargs/stdarg functions modify g14. */
+
+ if (VARARGS_STDARG_FUNCTION (current_function_decl))
+ fprintf (file, "\tmov 0,g14\n");
+
+ fprintf (file, "\tret\n");
+ fprintf (file, "\t#End Epilogue#\n");
+}
+
+/* Output code for a call insn. */
+
+char *
+i960_output_call_insn (target, argsize_rtx, arg_pointer, insn)
+ register rtx target, argsize_rtx, arg_pointer, insn;
+{
+ int argsize = INTVAL (argsize_rtx);
+ rtx nexti = next_real_insn (insn);
+ rtx operands[2];
+ int varargs_stdarg_function
+ = VARARGS_STDARG_FUNCTION (current_function_decl);
+
+ operands[0] = target;
+ operands[1] = arg_pointer;
+
+ if (current_function_args_size != 0 || varargs_stdarg_function)
+ output_asm_insn ("mov g14,r3", operands);
+
+ if (argsize > 48)
+ output_asm_insn ("lda %a1,g14", operands);
+ else if (current_function_args_size != 0 || varargs_stdarg_function)
+ output_asm_insn ("mov 0,g14", operands);
+
+ /* The code used to assume that calls to SYMBOL_REFs could not be more
+ than 24 bits away (b vs bx, callj vs callx). This is not true. This
+ feature is now implemented by relaxing in the GNU linker. It can convert
+ bx to b if in range, and callx to calls/call/balx/bal as appropriate. */
+
+ /* Nexti could be zero if the called routine is volatile. */
+ if (optimize && (*epilogue_string == 0) && argsize == 0 && tail_call_ok
+ && (nexti == 0 || GET_CODE (PATTERN (nexti)) == RETURN))
+ {
+ /* Delete following return insn. */
+ if (nexti && no_labels_between_p (insn, nexti))
+ delete_insn (nexti);
+ output_asm_insn ("bx %0", operands);
+ return "# notreached";
+ }
+
+ output_asm_insn ("callx %0", operands);
+
+ /* If the caller sets g14 to the address of the argblock, then the caller
+ must clear it after the return. */
+
+ if (current_function_args_size != 0 || varargs_stdarg_function)
+ output_asm_insn ("mov r3,g14", operands);
+ else if (argsize > 48)
+ output_asm_insn ("mov 0,g14", operands);
+
+ return "";
+}
+
+/* Output code for a return insn. */
+
+char *
+i960_output_ret_insn (insn)
+ register rtx insn;
+{
+ static char lbuf[20];
+
+ if (*epilogue_string != 0)
+ {
+ if (! TARGET_CODE_ALIGN && next_real_insn (insn) == 0)
+ return "";
+
+ sprintf (lbuf, "b LR%d", ret_label);
+ return lbuf;
+ }
+
+ /* Must clear g14 on return if this function set it.
+ Only varargs/stdarg functions modify g14. */
+
+ if (VARARGS_STDARG_FUNCTION (current_function_decl))
+ output_asm_insn ("mov 0,g14", 0);
+
+ if (i960_leaf_ret_reg >= 0)
+ {
+ sprintf (lbuf, "bx (%s)", reg_names[i960_leaf_ret_reg]);
+ return lbuf;
+ }
+ return "ret";
+}
+
+#if 0
+/* Return a character string representing the branch prediction
+ opcode to be tacked on an instruction. This must at least
+ return a null string. */
+
+char *
+i960_br_predict_opcode (lab_ref, insn)
+ rtx lab_ref, insn;
+{
+ if (TARGET_BRANCH_PREDICT)
+ {
+ unsigned long label_uid;
+
+ if (GET_CODE (lab_ref) == CODE_LABEL)
+ label_uid = INSN_UID (lab_ref);
+ else if (GET_CODE (lab_ref) == LABEL_REF)
+ label_uid = INSN_UID (XEXP (lab_ref, 0));
+ else
+ return ".f";
+
+ /* If not optimizing, then the insn_addresses array will not be
+ valid. In this case, always return ".t" since most branches
+ are taken. If optimizing, return .t for backward branches
+ and .f for forward branches. */
+ if (! optimize
+ || insn_addresses[label_uid] < insn_addresses[INSN_UID (insn)])
+ return ".t";
+ return ".f";
+ }
+
+ return "";
+}
+#endif
+
+/* Print the operand represented by rtx X formatted by code CODE. */
+
+void
+i960_print_operand (file, x, code)
+ FILE *file;
+ rtx x;
+ char code;
+{
+ enum rtx_code rtxcode = GET_CODE (x);
+
+ if (rtxcode == REG)
+ {
+ switch (code)
+ {
+ case 'D':
+ /* Second reg of a double or quad. */
+ fprintf (file, "%s", reg_names[REGNO (x)+1]);
+ break;
+
+ case 'E':
+ /* Third reg of a quad. */
+ fprintf (file, "%s", reg_names[REGNO (x)+2]);
+ break;
+
+ case 'F':
+ /* Fourth reg of a quad. */
+ fprintf (file, "%s", reg_names[REGNO (x)+3]);
+ break;
+
+ case 0:
+ fprintf (file, "%s", reg_names[REGNO (x)]);
+ break;
+
+ default:
+ abort ();
+ }
+ return;
+ }
+ else if (rtxcode == MEM)
+ {
+ output_address (XEXP (x, 0));
+ return;
+ }
+ else if (rtxcode == CONST_INT)
+ {
+ if (INTVAL (x) > 9999 || INTVAL (x) < -999)
+ fprintf (file, "0x%x", INTVAL (x));
+ else
+ fprintf (file, "%d", INTVAL (x));
+ return;
+ }
+ else if (rtxcode == CONST_DOUBLE)
+ {
+ REAL_VALUE_TYPE d;
+ char dstr[30];
+
+ if (x == CONST0_RTX (GET_MODE (x)))
+ {
+ fprintf (file, "0f0.0");
+ return;
+ }
+ else if (x == CONST1_RTX (GET_MODE (x)))
+ {
+ fprintf (file, "0f1.0");
+ return;
+ }
+
+ REAL_VALUE_FROM_CONST_DOUBLE (d, x);
+ REAL_VALUE_TO_DECIMAL (d, "%#g", dstr);
+ fprintf (file, "0f%s", dstr);
+ return;
+ }
+
+ switch(code)
+ {
+ case 'B':
+ /* Branch or jump, depending on assembler. */
+ if (TARGET_ASM_COMPAT)
+ fputs ("j", file);
+ else
+ fputs ("b", file);
+ break;
+
+ case 'S':
+ /* Sign of condition. */
+ if ((rtxcode == EQ) || (rtxcode == NE) || (rtxcode == GTU)
+ || (rtxcode == LTU) || (rtxcode == GEU) || (rtxcode == LEU))
+ fputs ("o", file);
+ else if ((rtxcode == GT) || (rtxcode == LT)
+ || (rtxcode == GE) || (rtxcode == LE))
+ fputs ("i", file);
+ else
+ abort();
+ break;
+
+ case 'I':
+ /* Inverted condition. */
+ rtxcode = reverse_condition (rtxcode);
+ goto normal;
+
+ case 'X':
+ /* Inverted condition w/ reversed operands. */
+ rtxcode = reverse_condition (rtxcode);
+ /* Fallthrough. */
+
+ case 'R':
+ /* Reversed operand condition. */
+ rtxcode = swap_condition (rtxcode);
+ /* Fallthrough. */
+
+ case 'C':
+ /* Normal condition. */
+ normal:
+ if (rtxcode == EQ) { fputs ("e", file); return; }
+ else if (rtxcode == NE) { fputs ("ne", file); return; }
+ else if (rtxcode == GT) { fputs ("g", file); return; }
+ else if (rtxcode == GTU) { fputs ("g", file); return; }
+ else if (rtxcode == LT) { fputs ("l", file); return; }
+ else if (rtxcode == LTU) { fputs ("l", file); return; }
+ else if (rtxcode == GE) { fputs ("ge", file); return; }
+ else if (rtxcode == GEU) { fputs ("ge", file); return; }
+ else if (rtxcode == LE) { fputs ("le", file); return; }
+ else if (rtxcode == LEU) { fputs ("le", file); return; }
+ else abort ();
+ break;
+
+ case 0:
+ output_addr_const (file, x);
+ break;
+
+ default:
+ abort ();
+ }
+
+ return;
+}
+
+/* Print a memory address as an operand to reference that memory location.
+
+ This is exactly the same as legitimate_address_p, except that it the prints
+ addresses instead of recognizing them. */
+
+void
+i960_print_operand_addr (file, addr)
+ FILE *file;
+ register rtx addr;
+{
+ rtx breg, ireg;
+ rtx scale, offset;
+
+ ireg = 0;
+ breg = 0;
+ offset = 0;
+ scale = const1_rtx;
+
+ if (GET_CODE (addr) == REG)
+ breg = addr;
+ else if (CONSTANT_P (addr))
+ offset = addr;
+ else if (GET_CODE (addr) == PLUS)
+ {
+ rtx op0, op1;
+
+ op0 = XEXP (addr, 0);
+ op1 = XEXP (addr, 1);
+
+ if (GET_CODE (op0) == REG)
+ {
+ breg = op0;
+ if (GET_CODE (op1) == REG)
+ ireg = op1;
+ else if (CONSTANT_P (op1))
+ offset = op1;
+ else
+ abort ();
+ }
+ else if (GET_CODE (op0) == PLUS)
+ {
+ if (GET_CODE (XEXP (op0, 0)) == MULT)
+ {
+ ireg = XEXP (XEXP (op0, 0), 0);
+ scale = XEXP (XEXP (op0, 0), 1);
+ if (GET_CODE (XEXP (op0, 1)) == REG)
+ {
+ breg = XEXP (op0, 1);
+ offset = op1;
+ }
+ else
+ abort ();
+ }
+ else if (GET_CODE (XEXP (op0, 0)) == REG)
+ {
+ breg = XEXP (op0, 0);
+ if (GET_CODE (XEXP (op0, 1)) == REG)
+ {
+ ireg = XEXP (op0, 1);
+ offset = op1;
+ }
+ else
+ abort ();
+ }
+ else
+ abort ();
+ }
+ else if (GET_CODE (op0) == MULT)
+ {
+ ireg = XEXP (op0, 0);
+ scale = XEXP (op0, 1);
+ if (GET_CODE (op1) == REG)
+ breg = op1;
+ else if (CONSTANT_P (op1))
+ offset = op1;
+ else
+ abort ();
+ }
+ else
+ abort ();
+ }
+ else if (GET_CODE (addr) == MULT)
+ {
+ ireg = XEXP (addr, 0);
+ scale = XEXP (addr, 1);
+ }
+ else
+ abort ();
+
+ if (offset)
+ output_addr_const (file, offset);
+ if (breg)
+ fprintf (file, "(%s)", reg_names[REGNO (breg)]);
+ if (ireg)
+ fprintf (file, "[%s*%d]", reg_names[REGNO (ireg)], INTVAL (scale));
+}
+
+/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
+ that is a valid memory address for an instruction.
+ The MODE argument is the machine mode for the MEM expression
+ that wants to use this address.
+
+ On 80960, legitimate addresses are:
+ base ld (g0),r0
+ disp (12 or 32 bit) ld foo,r0
+ base + index ld (g0)[g1*1],r0
+ base + displ ld 0xf00(g0),r0
+ base + index*scale + displ ld 0xf00(g0)[g1*4],r0
+ index*scale + base ld (g0)[g1*4],r0
+ index*scale + displ ld 0xf00[g1*4],r0
+ index*scale ld [g1*4],r0
+ index + base + displ ld 0xf00(g0)[g1*1],r0
+
+ In each case, scale can be 1, 2, 4, 8, or 16. */
+
+/* This is exactly the same as i960_print_operand_addr, except that
+ it recognizes addresses instead of printing them.
+
+ It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
+ convert common non-canonical forms to canonical form so that they will
+ be recognized. */
+
+/* These two macros allow us to accept either a REG or a SUBREG anyplace
+ where a register is valid. */
+
+#define RTX_OK_FOR_BASE_P(X, STRICT) \
+ ((GET_CODE (X) == REG \
+ && (STRICT ? REG_OK_FOR_BASE_P_STRICT (X) : REG_OK_FOR_BASE_P (X))) \
+ || (GET_CODE (X) == SUBREG \
+ && GET_CODE (SUBREG_REG (X)) == REG \
+ && (STRICT ? REG_OK_FOR_BASE_P_STRICT (SUBREG_REG (X)) \
+ : REG_OK_FOR_BASE_P (SUBREG_REG (X)))))
+
+#define RTX_OK_FOR_INDEX_P(X, STRICT) \
+ ((GET_CODE (X) == REG \
+ && (STRICT ? REG_OK_FOR_INDEX_P_STRICT (X) : REG_OK_FOR_INDEX_P (X)))\
+ || (GET_CODE (X) == SUBREG \
+ && GET_CODE (SUBREG_REG (X)) == REG \
+ && (STRICT ? REG_OK_FOR_INDEX_P_STRICT (SUBREG_REG (X)) \
+ : REG_OK_FOR_INDEX_P (SUBREG_REG (X)))))
+
+int
+legitimate_address_p (mode, addr, strict)
+ enum machine_mode mode;
+ register rtx addr;
+ int strict;
+{
+ if (RTX_OK_FOR_BASE_P (addr, strict))
+ return 1;
+ else if (CONSTANT_P (addr))
+ return 1;
+ else if (GET_CODE (addr) == PLUS)
+ {
+ rtx op0, op1;
+
+ if (! TARGET_COMPLEX_ADDR && ! reload_completed)
+ return 0;
+
+ op0 = XEXP (addr, 0);
+ op1 = XEXP (addr, 1);
+
+ if (RTX_OK_FOR_BASE_P (op0, strict))
+ {
+ if (RTX_OK_FOR_INDEX_P (op1, strict))
+ return 1;
+ else if (CONSTANT_P (op1))
+ return 1;
+ else
+ return 0;
+ }
+ else if (GET_CODE (op0) == PLUS)
+ {
+ if (GET_CODE (XEXP (op0, 0)) == MULT)
+ {
+ if (! (RTX_OK_FOR_INDEX_P (XEXP (XEXP (op0, 0), 0), strict)
+ && SCALE_TERM_P (XEXP (XEXP (op0, 0), 1))))
+ return 0;
+
+ if (RTX_OK_FOR_BASE_P (XEXP (op0, 1), strict)
+ && CONSTANT_P (op1))
+ return 1;
+ else
+ return 0;
+ }
+ else if (RTX_OK_FOR_BASE_P (XEXP (op0, 0), strict))
+ {
+ if (RTX_OK_FOR_INDEX_P (XEXP (op0, 1), strict)
+ && CONSTANT_P (op1))
+ return 1;
+ else
+ return 0;
+ }
+ else
+ return 0;
+ }
+ else if (GET_CODE (op0) == MULT)
+ {
+ if (! (RTX_OK_FOR_INDEX_P (XEXP (op0, 0), strict)
+ && SCALE_TERM_P (XEXP (op0, 1))))
+ return 0;
+
+ if (RTX_OK_FOR_BASE_P (op1, strict))
+ return 1;
+ else if (CONSTANT_P (op1))
+ return 1;
+ else
+ return 0;
+ }
+ else
+ return 0;
+ }
+ else if (GET_CODE (addr) == MULT)
+ {
+ if (! TARGET_COMPLEX_ADDR && ! reload_completed)
+ return 0;
+
+ return (RTX_OK_FOR_INDEX_P (XEXP (addr, 0), strict)
+ && SCALE_TERM_P (XEXP (addr, 1)));
+ }
+ else
+ return 0;
+}
+
+/* Try machine-dependent ways of modifying an illegitimate address
+ to be legitimate. If we find one, return the new, valid address.
+ This macro is used in only one place: `memory_address' in explow.c.
+
+ This converts some non-canonical addresses to canonical form so they
+ can be recognized. */
+
+rtx
+legitimize_address (x, oldx, mode)
+ register rtx x;
+ register rtx oldx;
+ enum machine_mode mode;
+{
+ if (GET_CODE (x) == SYMBOL_REF)
+ {
+ abort ();
+ x = copy_to_reg (x);
+ }
+
+ if (! TARGET_COMPLEX_ADDR && ! reload_completed)
+ return x;
+
+ /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
+ into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
+ created by virtual register instantiation, register elimination, and
+ similar optimizations. */
+ if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT
+ && GET_CODE (XEXP (x, 1)) == PLUS)
+ x = gen_rtx (PLUS, Pmode,
+ gen_rtx (PLUS, Pmode, XEXP (x, 0), XEXP (XEXP (x, 1), 0)),
+ XEXP (XEXP (x, 1), 1));
+
+ /* Canonicalize (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
+ into (plus (plus (mult (reg) (const)) (reg)) (const)). */
+ else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
+ && CONSTANT_P (XEXP (x, 1)))
+ {
+ rtx constant, other;
+
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ {
+ constant = XEXP (x, 1);
+ other = XEXP (XEXP (XEXP (x, 0), 1), 1);
+ }
+ else if (GET_CODE (XEXP (XEXP (XEXP (x, 0), 1), 1)) == CONST_INT)
+ {
+ constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
+ other = XEXP (x, 1);
+ }
+ else
+ constant = 0;
+
+ if (constant)
+ x = gen_rtx (PLUS, Pmode,
+ gen_rtx (PLUS, Pmode, XEXP (XEXP (x, 0), 0),
+ XEXP (XEXP (XEXP (x, 0), 1), 0)),
+ plus_constant (other, INTVAL (constant)));
+ }
+
+ return x;
+}
+
+#if 0
+/* Return the most stringent alignment that we are willing to consider
+ objects of size SIZE and known alignment ALIGN as having. */
+
+int
+i960_alignment (size, align)
+ int size;
+ int align;
+{
+ int i;
+
+ if (! TARGET_STRICT_ALIGN)
+ if (TARGET_IC_COMPAT2_0 || align >= 4)
+ {
+ i = i960_object_bytes_bitalign (size) / BITS_PER_UNIT;
+ if (i > align)
+ align = i;
+ }
+
+ return align;
+}
+#endif
+
+/* Modes for condition codes. */
+#define C_MODES \
+ ((1 << (int) CCmode) | (1 << (int) CC_UNSmode) | (1<< (int) CC_CHKmode))
+
+/* Modes for single-word (and smaller) quantities. */
+#define S_MODES \
+ (~C_MODES \
+ & ~ ((1 << (int) DImode) | (1 << (int) TImode) \
+ | (1 << (int) DFmode) | (1 << (int) XFmode)))
+
+/* Modes for double-word (and smaller) quantities. */
+#define D_MODES \
+ (~C_MODES \
+ & ~ ((1 << (int) TImode) | (1 << (int) XFmode)))
+
+/* Modes for quad-word quantities. */
+#define T_MODES (~C_MODES)
+
+/* Modes for single-float quantities. */
+#define SF_MODES ((1 << (int) SFmode))
+
+/* Modes for double-float quantities. */
+#define DF_MODES (SF_MODES | (1 << (int) DFmode) | (1 << (int) SCmode))
+
+/* Modes for quad-float quantities. */
+#define XF_MODES (DF_MODES | (1 << (int) XFmode) | (1 << (int) DCmode))
+
+unsigned int hard_regno_mode_ok[FIRST_PSEUDO_REGISTER] = {
+ T_MODES, S_MODES, D_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
+ T_MODES, S_MODES, D_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
+ T_MODES, S_MODES, D_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
+ T_MODES, S_MODES, D_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
+
+ XF_MODES, XF_MODES, XF_MODES, XF_MODES, C_MODES};
+
+
+/* Return the minimum alignment of an expression rtx X in bytes. This takes
+ advantage of machine specific facts, such as knowing that the frame pointer
+ is always 16 byte aligned. */
+
+int
+i960_expr_alignment (x, size)
+ rtx x;
+ int size;
+{
+ int align = 1;
+
+ if (x == 0)
+ return 1;
+
+ switch (GET_CODE(x))
+ {
+ case CONST_INT:
+ align = INTVAL(x);
+
+ if ((align & 0xf) == 0)
+ align = 16;
+ else if ((align & 0x7) == 0)
+ align = 8;
+ else if ((align & 0x3) == 0)
+ align = 4;
+ else if ((align & 0x1) == 0)
+ align = 2;
+ else
+ align = 1;
+ break;
+
+ case PLUS:
+ align = MIN (i960_expr_alignment (XEXP (x, 0), size),
+ i960_expr_alignment (XEXP (x, 1), size));
+ break;
+
+ case SYMBOL_REF:
+ /* If this is a valid program, objects are guaranteed to be
+ correctly aligned for whatever size the reference actually is. */
+ align = i960_object_bytes_bitalign (size) / BITS_PER_UNIT;
+ break;
+
+ case REG:
+ if (REGNO (x) == FRAME_POINTER_REGNUM)
+ align = 16;
+ break;
+
+ case ASHIFT:
+ align = i960_expr_alignment (XEXP (x, 0));
+
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ {
+ align = align << INTVAL (XEXP (x, 1));
+ align = MIN (align, 16);
+ }
+ break;
+
+ case MULT:
+ align = (i960_expr_alignment (XEXP (x, 0), size) *
+ i960_expr_alignment (XEXP (x, 1), size));
+
+ align = MIN (align, 16);
+ break;
+ }
+
+ return align;
+}
+
+/* Return true if it is possible to reference both BASE and OFFSET, which
+ have alignment at least as great as 4 byte, as if they had alignment valid
+ for an object of size SIZE. */
+
+int
+i960_improve_align (base, offset, size)
+ rtx base;
+ rtx offset;
+ int size;
+{
+ int i, j;
+
+ /* We have at least a word reference to the object, so we know it has to
+ be aligned at least to 4 bytes. */
+
+ i = MIN (i960_expr_alignment (base, 4),
+ i960_expr_alignment (offset, 4));
+
+ i = MAX (i, 4);
+
+ /* We know the size of the request. If strict align is not enabled, we
+ can guess that the alignment is OK for the requested size. */
+
+ if (! TARGET_STRICT_ALIGN)
+ if ((j = (i960_object_bytes_bitalign (size) / BITS_PER_UNIT)) > i)
+ i = j;
+
+ return (i >= size);
+}
+
+/* Return true if it is possible to access BASE and OFFSET, which have 4 byte
+ (SImode) alignment as if they had 16 byte (TImode) alignment. */
+
+int
+i960_si_ti (base, offset)
+ rtx base;
+ rtx offset;
+{
+ return i960_improve_align (base, offset, 16);
+}
+
+/* Return true if it is possible to access BASE and OFFSET, which have 4 byte
+ (SImode) alignment as if they had 8 byte (DImode) alignment. */
+
+int
+i960_si_di (base, offset)
+ rtx base;
+ rtx offset;
+{
+ return i960_improve_align (base, offset, 8);
+}
+
+/* Return raw values of size and alignment (in words) for the data
+ type being accessed. These values will be rounded by the caller. */
+
+static void
+i960_arg_size_and_align (mode, type, size_out, align_out)
+ enum machine_mode mode;
+ tree type;
+ int *size_out;
+ int *align_out;
+{
+ int size, align;
+
+ /* Use formal alignment requirements of type being passed, except make
+ it at least a word. If we don't have a type, this is a library call,
+ and the parm has to be of scalar type. In this case, consider its
+ formal alignment requirement to be its size in words. */
+
+ if (mode == BLKmode)
+ size = (int_size_in_bytes (type) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
+ else if (mode == VOIDmode)
+ {
+ /* End of parm list. */
+ assert (type != 0 && TYPE_MODE (type) == VOIDmode);
+ size = 1;
+ }
+ else
+ size = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
+
+ if (type == 0)
+ {
+ /* ??? This is a hack to properly correct the alignment of XFmode
+ values without affecting anything else. */
+ if (size == 3)
+ align = 4;
+ else
+ align = size;
+ }
+ else if (TYPE_ALIGN (type) >= BITS_PER_WORD)
+ align = TYPE_ALIGN (type) / BITS_PER_WORD;
+ else
+ align = 1;
+
+ *size_out = size;
+ *align_out = align;
+}
+
+/* On the 80960 the first 12 args are in registers and the rest are pushed.
+ Any arg that is bigger than 4 words is placed on the stack and all
+ subsequent arguments are placed on the stack.
+
+ Additionally, parameters with an alignment requirement stronger than
+ a word must be aligned appropriately. Note that this means that a
+ 64 bit object with a 32 bit alignment is not 64 bit aligned and may be
+ passed in an odd/even register pair. */
+
+/* Update CUM to advance past an argument described by MODE and TYPE. */
+
+void
+i960_function_arg_advance (cum, mode, type, named)
+ CUMULATIVE_ARGS *cum;
+ enum machine_mode mode;
+ tree type;
+ int named;
+{
+ int size, align;
+
+ i960_arg_size_and_align (mode, type, &size, &align);
+
+ if (size > 4 || cum->ca_nstackparms != 0
+ || (size + ROUND_PARM (cum->ca_nregparms, align)) > NPARM_REGS
+ || MUST_PASS_IN_STACK (mode, type))
+ {
+ /* Indicate that all the registers are in use, even if all are not,
+ so va_start will compute the right value. */
+ cum->ca_nregparms = NPARM_REGS;
+ cum->ca_nstackparms = ROUND_PARM (cum->ca_nstackparms, align) + size;
+ }
+ else
+ cum->ca_nregparms = ROUND_PARM (cum->ca_nregparms, align) + size;
+}
+
+/* Return the register that the argument described by MODE and TYPE is
+ passed in, or else return 0 if it is passed on the stack. */
+
+rtx
+i960_function_arg (cum, mode, type, named)
+ CUMULATIVE_ARGS *cum;
+ enum machine_mode mode;
+ tree type;
+ int named;
+{
+ rtx ret;
+ int size, align;
+
+ i960_arg_size_and_align (mode, type, &size, &align);
+
+ if (size > 4 || cum->ca_nstackparms != 0
+ || (size + ROUND_PARM (cum->ca_nregparms, align)) > NPARM_REGS
+ || MUST_PASS_IN_STACK (mode, type))
+ {
+ cum->ca_nstackparms = ROUND_PARM (cum->ca_nstackparms, align);
+ ret = 0;
+ }
+ else
+ {
+ cum->ca_nregparms = ROUND_PARM (cum->ca_nregparms, align);
+ ret = gen_rtx (REG, mode, cum->ca_nregparms);
+ }
+
+ return ret;
+}
+
+/* Floating-point support. */
+
+void
+i960_output_long_double (file, value)
+ FILE *file;
+ REAL_VALUE_TYPE value;
+{
+ long value_long[3];
+ char dstr[30];
+
+ REAL_VALUE_TO_TARGET_LONG_DOUBLE (value, value_long);
+ REAL_VALUE_TO_DECIMAL (value, "%.20g", dstr);
+
+ fprintf (file,
+ "\t.word\t0x%08lx\t\t# %s\n\t.word\t0x%08lx\n\t.word\t0x%08lx\n",
+ value_long[0], dstr, value_long[1], value_long[2]);
+ fprintf (file, "\t.word\t0x0\n");
+}
+
+void
+i960_output_double (file, value)
+ FILE *file;
+ REAL_VALUE_TYPE value;
+{
+ long value_long[2];
+ char dstr[30];
+
+ REAL_VALUE_TO_TARGET_DOUBLE (value, value_long);
+ REAL_VALUE_TO_DECIMAL (value, "%.20g", dstr);
+
+ fprintf (file, "\t.word\t0x%08lx\t\t# %s\n\t.word\t0x%08lx\n",
+ value_long[0], dstr, value_long[1]);
+}
+
+void
+i960_output_float (file, value)
+ FILE *file;
+ REAL_VALUE_TYPE value;
+{
+ long value_long;
+ char dstr[30];
+
+ REAL_VALUE_TO_TARGET_SINGLE (value, value_long);
+ REAL_VALUE_TO_DECIMAL (value, "%.12g", dstr);
+
+ fprintf (file, "\t.word\t0x%08lx\t\t# %s (float)\n", value_long, dstr);
+}
+
+/* Return the number of bits that an object of size N bytes is aligned to. */
+
+int
+i960_object_bytes_bitalign (n)
+ int n;
+{
+ if (n > 8) n = 128;
+ else if (n > 4) n = 64;
+ else if (n > 2) n = 32;
+ else if (n > 1) n = 16;
+ else n = 8;
+
+ return n;
+}
+
+/* Compute the alignment for an aggregate type TSIZE.
+ Alignment is MAX (greatest member alignment,
+ MIN (pragma align, structure size alignment)). */
+
+int
+i960_round_align (align, tsize)
+ int align;
+ tree tsize;
+{
+ int new_align;
+
+ if (TREE_CODE (tsize) != INTEGER_CST)
+ return align;
+
+ new_align = i960_object_bytes_bitalign (TREE_INT_CST_LOW (tsize)
+ / BITS_PER_UNIT);
+ /* Handle #pragma align. */
+ if (new_align > i960_maxbitalignment)
+ new_align = i960_maxbitalignment;
+
+ if (align < new_align)
+ align = new_align;
+
+ return align;
+}
+
+/* Do any needed setup for a varargs function. For the i960, we must
+ create a register parameter block if one doesn't exist, and then copy
+ all register parameters to memory. */
+
+void
+i960_setup_incoming_varargs (cum, mode, type, pretend_size, no_rtl)
+ CUMULATIVE_ARGS *cum;
+ enum machine_mode mode;
+ tree type;
+ int *pretend_size;
+ int no_rtl;
+{
+ /* Note: for a varargs fn with only a va_alist argument, this is 0. */
+ int first_reg = cum->ca_nregparms;
+
+ /* Copy only unnamed register arguments to memory. If there are
+ any stack parms, there are no unnamed arguments in registers, and
+ an argument block was already allocated by the caller.
+ Remember that any arg bigger than 4 words is passed on the stack as
+ are all subsequent args.
+
+ If there are no stack arguments but there are exactly NPARM_REGS
+ registers, either there were no extra arguments or the caller
+ allocated an argument block. */
+
+ if (cum->ca_nstackparms == 0 && first_reg < NPARM_REGS && !no_rtl)
+ {
+ rtx label = gen_label_rtx ();
+ rtx regblock;
+
+ /* If arg_pointer_rtx == 0, no arguments were passed on the stack
+ and we need to allocate a chunk to save the registers (if any
+ arguments were passed on the stack the caller would allocate the
+ 48 bytes as well). We must allocate all 48 bytes (12*4) because
+ va_start assumes it. */
+ emit_insn (gen_cmpsi (arg_pointer_rtx, const0_rtx));
+ emit_jump_insn (gen_bne (label));
+ emit_insn (gen_rtx (SET, VOIDmode, arg_pointer_rtx,
+ stack_pointer_rtx));
+ emit_insn (gen_rtx (SET, VOIDmode, stack_pointer_rtx,
+ memory_address (SImode,
+ plus_constant (stack_pointer_rtx,
+ 48))));
+ emit_label (label);
+
+ /* ??? Note that we unnecessarily store one extra register for stdarg
+ fns. We could optimize this, but it's kept as for now. */
+ regblock = gen_rtx (MEM, BLKmode,
+ plus_constant (arg_pointer_rtx,
+ first_reg * 4));
+ move_block_from_reg (first_reg, regblock,
+ NPARM_REGS - first_reg,
+ (NPARM_REGS - first_reg) * UNITS_PER_WORD);
+ }
+}
+
+/* Calculate the final size of the reg parm stack space for the current
+ function, based on how many bytes would be allocated on the stack. */
+
+int
+i960_final_reg_parm_stack_space (const_size, var_size)
+ int const_size;
+ tree var_size;
+{
+ if (var_size || const_size > 48)
+ return 48;
+ else
+ return 0;
+}
+
+/* Calculate the size of the reg parm stack space. This is a bit complicated
+ on the i960. */
+
+int
+i960_reg_parm_stack_space (fndecl)
+ tree fndecl;
+{
+ /* In this case, we are called from emit_library_call, and we don't need
+ to pretend we have more space for parameters than what's apparent. */
+ if (fndecl == 0)
+ return 0;
+
+ /* In this case, we are called from locate_and_pad_parms when we're
+ not IN_REGS, so we have an arg block. */
+ if (fndecl != current_function_decl)
+ return 48;
+
+ /* Otherwise, we have an arg block if the current function has more than
+ 48 bytes of parameters. */
+ if (current_function_args_size != 0 || VARARGS_STDARG_FUNCTION (fndecl))
+ return 48;
+ else
+ return 0;
+}
+
+/* Return the register class of a scratch register needed to copy IN into
+ or out of a register in CLASS in MODE. If it can be done directly,
+ NO_REGS is returned. */
+
+enum reg_class
+secondary_reload_class (class, mode, in)
+ enum reg_class class;
+ enum machine_mode mode;
+ rtx in;
+{
+ int regno = -1;
+
+ if (GET_CODE (in) == REG || GET_CODE (in) == SUBREG)
+ regno = true_regnum (in);
+
+ /* We can place anything into LOCAL_OR_GLOBAL_REGS and can put
+ LOCAL_OR_GLOBAL_REGS into anything. */
+ if (class == LOCAL_OR_GLOBAL_REGS || class == LOCAL_REGS
+ || class == GLOBAL_REGS || (regno >= 0 && regno < 32))
+ return NO_REGS;
+
+ /* We can place any hard register, 0.0, and 1.0 into FP_REGS. */
+ if (class == FP_REGS
+ && ((regno >= 0 && regno < FIRST_PSEUDO_REGISTER)
+ || in == CONST0_RTX (mode) || in == CONST1_RTX (mode)))
+ return NO_REGS;
+
+ return LOCAL_OR_GLOBAL_REGS;
+}
+
+/* Look at the opcode P, and set i96_last_insn_type to indicate which
+ function unit it executed on. */
+
+/* ??? This would make more sense as an attribute. */
+
+void
+i960_scan_opcode (p)
+ char *p;
+{
+ switch (*p)
+ {
+ case 'a':
+ case 'd':
+ case 'e':
+ case 'm':
+ case 'n':
+ case 'o':
+ case 'r':
+ /* Ret is not actually of type REG, but it won't matter, because no
+ insn will ever follow it. */
+ case 'u':
+ case 'x':
+ i960_last_insn_type = I_TYPE_REG;
+ break;
+
+ case 'b':
+ if (p[1] == 'x' || p[3] == 'x')
+ i960_last_insn_type = I_TYPE_MEM;
+ i960_last_insn_type = I_TYPE_CTRL;
+ break;
+
+ case 'f':
+ case 't':
+ i960_last_insn_type = I_TYPE_CTRL;
+ break;
+
+ case 'c':
+ if (p[1] == 'a')
+ {
+ if (p[4] == 'x')
+ i960_last_insn_type = I_TYPE_MEM;
+ else
+ i960_last_insn_type = I_TYPE_CTRL;
+ }
+ else if (p[1] == 'm')
+ {
+ if (p[3] == 'd')
+ i960_last_insn_type = I_TYPE_REG;
+ else if (p[4] == 'b' || p[4] == 'j')
+ i960_last_insn_type = I_TYPE_CTRL;
+ else
+ i960_last_insn_type = I_TYPE_REG;
+ }
+ else
+ i960_last_insn_type = I_TYPE_REG;
+ break;
+
+ case 'l':
+ i960_last_insn_type = I_TYPE_MEM;
+ break;
+
+ case 's':
+ if (p[1] == 't')
+ i960_last_insn_type = I_TYPE_MEM;
+ else
+ i960_last_insn_type = I_TYPE_REG;
+ break;
+ }
+}
diff --git a/gnu/usr.bin/gcc/config/i960/i960.h b/gnu/usr.bin/gcc/config/i960/i960.h
new file mode 100644
index 00000000000..7bde093a4c4
--- /dev/null
+++ b/gnu/usr.bin/gcc/config/i960/i960.h
@@ -0,0 +1,1527 @@
+/* Definitions of target machine for GNU compiler, for Intel 80960
+ Copyright (C) 1992, 1993, 1995 Free Software Foundation, Inc.
+ Contributed by Steven McGeady, Intel Corp.
+ Additional Work by Glenn Colon-Bonet, Jonathan Shapiro, Andy Wilson
+ Converted to GCC 2.0 by Jim Wilson and Michael Tiemann, Cygnus Support.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Note that some other tm.h files may include this one and then override
+ many of the definitions that relate to assembler syntax. */
+
+/* Names to predefine in the preprocessor for this target machine. */
+#define CPP_PREDEFINES "-Di960 -Di80960 -DI960 -DI80960 -Acpu(i960) -Amachine(i960)"
+
+/* Name to predefine in the preprocessor for processor variations. */
+#define CPP_SPEC "%{mic*:-D__i960\
+ %{mka:-D__i960KA}%{mkb:-D__i960KB}\
+ %{msa:-D__i960SA}%{msb:-D__i960SB}\
+ %{mmc:-D__i960MC}\
+ %{mca:-D__i960CA}%{mcc:-D__i960CC}\
+ %{mcf:-D__i960CF}}\
+ %{mka:-D__i960KA__ -D__i960_KA__}\
+ %{mkb:-D__i960KB__ -D__i960_KB__}\
+ %{msa:-D__i960SA__ -D__i960_SA__}\
+ %{msb:-D__i960SB__ -D__i960_SB__}\
+ %{mmc:-D__i960MC__ -D__i960_MC__}\
+ %{mca:-D__i960CA__ -D__i960_CA__}\
+ %{mcc:-D__i960CC__ -D__i960_CC__}\
+ %{mcf:-D__i960CF__ -D__i960_CF__}\
+ %{!mka:%{!mkb:%{!msa:%{!msb:%{!mmc:%{!mca:\
+ %{!mcc:%{!mcf:-D__i960_KB -D__i960KB__ %{mic*:-D__i960KB}}}}}}}}}"
+
+/* -mic* options make characters signed by default. */
+/* Use #if rather than ?: because MIPS C compiler rejects ?: in
+ initializers. */
+#if DEFAULT_SIGNED_CHAR
+#define SIGNED_CHAR_SPEC "%{funsigned-char:-D__CHAR_UNSIGNED__}"
+#else
+#define SIGNED_CHAR_SPEC "%{!fsigned-char:%{!mic*:-D__CHAR_UNSIGNED__}}"
+#endif
+
+/* Specs for the compiler, to handle processor variations.
+ If the user gives an explicit -gstabs or -gcoff option, then do not
+ try to add an implicit one, as this will fail. */
+#define CC1_SPEC \
+ "%{!mka:%{!mkb:%{!msa:%{!msb:%{!mmc:%{!mca:%{!mcc:%{!mcf:-mkb}}}}}}}}\
+ %{!gs*:%{!gc*:%{mbout:%{g*:-gstabs}}\
+ %{mcoff:%{g*:-gcoff}}\
+ %{!mbout:%{!mcoff:%{g*:-gstabs}}}}}"
+
+/* Specs for the assembler, to handle processor variations.
+ For compatibility with Intel's gnu960 tool chain, pass -A options to
+ the assembler. */
+#define ASM_SPEC \
+ "%{mka:-AKA}%{mkb:-AKB}%{msa:-ASA}%{msb:-ASB}\
+ %{mmc:-AMC}%{mca:-ACA}%{mcc:-ACC}%{mcf:-ACF}\
+ %{!mka:%{!mkb:%{!msa:%{!msb:%{!mmc:%{!mca:%{!mcc:%{!mcf:-AKB}}}}}}}}\
+ %{mlink-relax:-linkrelax}"
+
+/* Specs for the linker, to handle processor variations.
+ For compatibility with Intel's gnu960 tool chain, pass -F and -A options
+ to the linker. */
+#define LINK_SPEC \
+ "%{mka:-AKA}%{mkb:-AKB}%{msa:-ASA}%{msb:-ASB}\
+ %{mmc:-AMC}%{mca:-ACA}%{mcc:-ACC}%{mcf:-ACF}\
+ %{!mka:%{!mkb:%{!msa:%{!msb:%{!mmc:%{!mca:%{!mcc:%{!mcf:-AKB}}}}}}}}\
+ %{mbout:-Fbout}%{mcoff:-Fcoff}\
+ %{mlink-relax:-relax}"
+
+/* Specs for the libraries to link with, to handle processor variations.
+ Compatible with Intel's gnu960 tool chain. */
+#define LIB_SPEC "%{!nostdlib:-lcg %{p:-lprof}%{pg:-lgprof}\
+ %{mka:-lfpg}%{msa:-lfpg}%{mca:-lfpg}%{mcf:-lfpg} -lgnu}"
+
+/* Show we can debug even without a frame pointer. */
+#define CAN_DEBUG_WITHOUT_FP
+
+/* Do leaf procedure and tail call optimizations for -O2 and higher. */
+#define OPTIMIZATION_OPTIONS(LEVEL) \
+{ \
+ if ((LEVEL) >= 2) \
+ { \
+ target_flags |= TARGET_FLAG_LEAFPROC; \
+ target_flags |= TARGET_FLAG_TAILCALL; \
+ } \
+}
+
+/* Print subsidiary information on the compiler version in use. */
+#define TARGET_VERSION fprintf (stderr," (intel 80960)");
+
+/* Generate DBX debugging information. */
+#define DBX_DEBUGGING_INFO
+
+/* Generate SDB style debugging information. */
+#define SDB_DEBUGGING_INFO
+
+/* Generate DBX_DEBUGGING_INFO by default. */
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
+
+/* Redefine this to print in hex and adjust values like GNU960. The extra
+ bit is used to handle the type long double. Gcc does not support long
+ double in sdb output, but we do support the non-standard format. */
+#define PUT_SDB_TYPE(A) \
+ fprintf (asm_out_file, "\t.type\t0x%x;", (A & 0xf) + 2 * (A & ~0xf))
+
+/* Handle pragmas for compatibility with Intel's compilers. */
+#define HANDLE_PRAGMA(FILE) process_pragma (FILE)
+
+/* Run-time compilation parameters selecting different hardware subsets. */
+
+/* 960 architecture with floating-point. */
+#define TARGET_FLAG_NUMERICS 0x01
+#define TARGET_NUMERICS (target_flags & TARGET_FLAG_NUMERICS)
+
+/* 960 architecture with memory management. */
+/* ??? Not used currently. */
+#define TARGET_FLAG_PROTECTED 0x02
+#define TARGET_PROTECTED (target_flags & TARGET_FLAG_PROTECTED)
+
+/* The following three are mainly used to provide a little sanity checking
+ against the -mARCH flags given. */
+
+/* Nonzero if we should generate code for the KA and similar processors.
+ No FPU, no microcode instructions. */
+#define TARGET_FLAG_K_SERIES 0x04
+#define TARGET_K_SERIES (target_flags & TARGET_FLAG_K_SERIES)
+
+/* Nonzero if we should generate code for the MC processor.
+ Not really different from KB for our purposes. */
+#define TARGET_FLAG_MC 0x08
+#define TARGET_MC (target_flags & TARGET_FLAG_MC)
+
+/* Nonzero if we should generate code for the CA processor.
+ Enables different optimization strategies. */
+#define TARGET_FLAG_C_SERIES 0x10
+#define TARGET_C_SERIES (target_flags & TARGET_FLAG_C_SERIES)
+
+/* Nonzero if we should generate leaf-procedures when we find them.
+ You may not want to do this because leaf-proc entries are
+ slower when not entered via BAL - this would be true when
+ a linker not supporting the optimization is used. */
+#define TARGET_FLAG_LEAFPROC 0x20
+#define TARGET_LEAFPROC (target_flags & TARGET_FLAG_LEAFPROC)
+
+/* Nonzero if we should perform tail-call optimizations when we find them.
+ You may not want to do this because the detection of cases where
+ this is not valid is not totally complete. */
+#define TARGET_FLAG_TAILCALL 0x40
+#define TARGET_TAILCALL (target_flags & TARGET_FLAG_TAILCALL)
+
+/* Nonzero if use of a complex addressing mode is a win on this implementation.
+ Complex addressing modes are probably not worthwhile on the K-series,
+ but they definitely are on the C-series. */
+#define TARGET_FLAG_COMPLEX_ADDR 0x80
+#define TARGET_COMPLEX_ADDR (target_flags & TARGET_FLAG_COMPLEX_ADDR)
+
+/* Align code to 8 byte boundaries for faster fetching. */
+#define TARGET_FLAG_CODE_ALIGN 0x100
+#define TARGET_CODE_ALIGN (target_flags & TARGET_FLAG_CODE_ALIGN)
+
+/* Append branch prediction suffixes to branch opcodes. */
+/* ??? Not used currently. */
+#define TARGET_FLAG_BRANCH_PREDICT 0x200
+#define TARGET_BRANCH_PREDICT (target_flags & TARGET_FLAG_BRANCH_PREDICT)
+
+/* Forces prototype and return promotions. */
+/* ??? This does not work. */
+#define TARGET_FLAG_CLEAN_LINKAGE 0x400
+#define TARGET_CLEAN_LINKAGE (target_flags & TARGET_FLAG_CLEAN_LINKAGE)
+
+/* For compatibility with iC960 v3.0. */
+#define TARGET_FLAG_IC_COMPAT3_0 0x800
+#define TARGET_IC_COMPAT3_0 (target_flags & TARGET_FLAG_IC_COMPAT3_0)
+
+/* For compatibility with iC960 v2.0. */
+#define TARGET_FLAG_IC_COMPAT2_0 0x1000
+#define TARGET_IC_COMPAT2_0 (target_flags & TARGET_FLAG_IC_COMPAT2_0)
+
+/* If no unaligned accesses are to be permitted. */
+#define TARGET_FLAG_STRICT_ALIGN 0x2000
+#define TARGET_STRICT_ALIGN (target_flags & TARGET_FLAG_STRICT_ALIGN)
+
+/* For compatibility with iC960 assembler. */
+#define TARGET_FLAG_ASM_COMPAT 0x4000
+#define TARGET_ASM_COMPAT (target_flags & TARGET_FLAG_ASM_COMPAT)
+
+/* For compatibility with the gcc960 v1.2 compiler. Use the old structure
+ alignment rules. Also, turns on STRICT_ALIGNMENT. */
+#define TARGET_FLAG_OLD_ALIGN 0x8000
+#define TARGET_OLD_ALIGN (target_flags & TARGET_FLAG_OLD_ALIGN)
+
+extern int target_flags;
+
+/* Macro to define tables used to set the flags.
+ This is a list in braces of pairs in braces,
+ each pair being { "NAME", VALUE }
+ where VALUE is the bits to set or minus the bits to clear.
+ An empty string NAME is used to identify the default VALUE. */
+
+/* ??? Not all ten of these architecture variations actually exist, but I
+ am not sure which are real and which aren't. */
+
+#define TARGET_SWITCHES \
+ { {"sa", (TARGET_FLAG_K_SERIES|TARGET_FLAG_COMPLEX_ADDR)},\
+ {"sb", (TARGET_FLAG_NUMERICS|TARGET_FLAG_K_SERIES| \
+ TARGET_FLAG_COMPLEX_ADDR)},\
+/* {"sc", (TARGET_FLAG_NUMERICS|TARGET_FLAG_PROTECTED|\
+ TARGET_FLAG_MC|TARGET_FLAG_COMPLEX_ADDR)},*/ \
+ {"ka", (TARGET_FLAG_K_SERIES|TARGET_FLAG_COMPLEX_ADDR)},\
+ {"kb", (TARGET_FLAG_NUMERICS|TARGET_FLAG_K_SERIES| \
+ TARGET_FLAG_COMPLEX_ADDR)},\
+/* {"kc", (TARGET_FLAG_NUMERICS|TARGET_FLAG_PROTECTED|\
+ TARGET_FLAG_MC|TARGET_FLAG_COMPLEX_ADDR)},*/ \
+ {"mc", (TARGET_FLAG_NUMERICS|TARGET_FLAG_PROTECTED|\
+ TARGET_FLAG_MC|TARGET_FLAG_COMPLEX_ADDR)},\
+ {"ca", (TARGET_FLAG_C_SERIES|TARGET_FLAG_BRANCH_PREDICT|\
+ TARGET_FLAG_CODE_ALIGN|TARGET_FLAG_COMPLEX_ADDR)},\
+/* {"cb", (TARGET_FLAG_NUMERICS|TARGET_FLAG_C_SERIES|\
+ TARGET_FLAG_BRANCH_PREDICT|TARGET_FLAG_CODE_ALIGN)},\
+ {"cc", (TARGET_FLAG_NUMERICS|TARGET_FLAG_PROTECTED|\
+ TARGET_FLAG_C_SERIES|TARGET_FLAG_BRANCH_PREDICT|\
+ TARGET_FLAG_CODE_ALIGN)}, */ \
+ {"cf", (TARGET_FLAG_C_SERIES|TARGET_FLAG_BRANCH_PREDICT|\
+ TARGET_FLAG_CODE_ALIGN|TARGET_FLAG_COMPLEX_ADDR)},\
+ {"numerics", (TARGET_FLAG_NUMERICS)}, \
+ {"soft-float", -(TARGET_FLAG_NUMERICS)}, \
+ {"leaf-procedures", TARGET_FLAG_LEAFPROC}, \
+ {"no-leaf-procedures",-(TARGET_FLAG_LEAFPROC)}, \
+ {"tail-call",TARGET_FLAG_TAILCALL}, \
+ {"no-tail-call",-(TARGET_FLAG_TAILCALL)}, \
+ {"complex-addr",TARGET_FLAG_COMPLEX_ADDR}, \
+ {"no-complex-addr",-(TARGET_FLAG_COMPLEX_ADDR)}, \
+ {"code-align",TARGET_FLAG_CODE_ALIGN}, \
+ {"no-code-align",-(TARGET_FLAG_CODE_ALIGN)}, \
+ {"clean-linkage", (TARGET_FLAG_CLEAN_LINKAGE)}, \
+ {"no-clean-linkage", -(TARGET_FLAG_CLEAN_LINKAGE)}, \
+ {"ic-compat", TARGET_FLAG_IC_COMPAT2_0}, \
+ {"ic2.0-compat", TARGET_FLAG_IC_COMPAT2_0}, \
+ {"ic3.0-compat", TARGET_FLAG_IC_COMPAT3_0}, \
+ {"asm-compat",TARGET_FLAG_ASM_COMPAT}, \
+ {"intel-asm",TARGET_FLAG_ASM_COMPAT}, \
+ {"strict-align", TARGET_FLAG_STRICT_ALIGN}, \
+ {"no-strict-align", -(TARGET_FLAG_STRICT_ALIGN)}, \
+ {"old-align", (TARGET_FLAG_OLD_ALIGN|TARGET_FLAG_STRICT_ALIGN)}, \
+ {"no-old-align", -(TARGET_FLAG_OLD_ALIGN|TARGET_FLAG_STRICT_ALIGN)}, \
+ {"link-relax", 0}, \
+ {"no-link-relax", 0}, \
+ { "", TARGET_DEFAULT}}
+
+/* Override conflicting target switch options.
+ Doesn't actually detect if more than one -mARCH option is given, but
+ does handle the case of two blatantly conflicting -mARCH options. */
+#define OVERRIDE_OPTIONS \
+{ \
+ if (TARGET_K_SERIES && TARGET_C_SERIES) \
+ { \
+ warning ("conflicting architectures defined - using C series", 0); \
+ target_flags &= ~TARGET_FLAG_K_SERIES; \
+ } \
+ if (TARGET_K_SERIES && TARGET_MC) \
+ { \
+ warning ("conflicting architectures defined - using K series", 0); \
+ target_flags &= ~TARGET_FLAG_MC; \
+ } \
+ if (TARGET_C_SERIES && TARGET_MC) \
+ { \
+ warning ("conflicting architectures defined - using C series", 0);\
+ target_flags &= ~TARGET_FLAG_MC; \
+ } \
+ if (TARGET_IC_COMPAT3_0) \
+ { \
+ flag_short_enums = 1; \
+ flag_signed_char = 1; \
+ target_flags |= TARGET_FLAG_CLEAN_LINKAGE; \
+ if (TARGET_IC_COMPAT2_0) \
+ { \
+ warning ("iC2.0 and iC3.0 are incompatible - using iC3.0", 0); \
+ target_flags &= ~TARGET_FLAG_IC_COMPAT2_0; \
+ } \
+ } \
+ if (TARGET_IC_COMPAT2_0) \
+ { \
+ flag_signed_char = 1; \
+ target_flags |= TARGET_FLAG_CLEAN_LINKAGE; \
+ } \
+ i960_initialize (); \
+}
+
+/* Don't enable anything by default. The user is expected to supply a -mARCH
+ option. If none is given, then -mkb is added by CC1_SPEC. */
+#define TARGET_DEFAULT 0
+
+/* Target machine storage layout. */
+
+/* Define for cross-compilation from a host with a different float format
+ or endianness, as well as to support 80 bit long doubles on the i960. */
+#define REAL_ARITHMETIC
+
+/* Define this if most significant bit is lowest numbered
+ in instructions that operate on numbered bit-fields. */
+#define BITS_BIG_ENDIAN 0
+
+/* Define this if most significant byte of a word is the lowest numbered.
+ The i960 case be either big endian or little endian. We only support
+ little endian, which is the most common. */
+#define BYTES_BIG_ENDIAN 0
+
+/* Define this if most significant word of a multiword number is lowest
+ numbered. */
+#define WORDS_BIG_ENDIAN 0
+
+/* Number of bits in an addressable storage unit. */
+#define BITS_PER_UNIT 8
+
+/* Bitfields cannot cross word boundaries. */
+#define BITFIELD_NBYTES_LIMITED 1
+
+/* Width in bits of a "word", which is the contents of a machine register.
+ Note that this is not necessarily the width of data type `int';
+ if using 16-bit ints on a 68000, this would still be 32.
+ But on a machine with 16-bit registers, this would be 16. */
+#define BITS_PER_WORD 32
+
+/* Width of a word, in units (bytes). */
+#define UNITS_PER_WORD 4
+
+/* Width in bits of a pointer. See also the macro `Pmode' defined below. */
+#define POINTER_SIZE 32
+
+/* Width in bits of a long double. Identical to double for now. */
+#define LONG_DOUBLE_TYPE_SIZE 64
+
+/* Allocation boundary (in *bits*) for storing pointers in memory. */
+#define POINTER_BOUNDARY 32
+
+/* Allocation boundary (in *bits*) for storing arguments in argument list. */
+#define PARM_BOUNDARY 32
+
+/* Boundary (in *bits*) on which stack pointer should be aligned. */
+#define STACK_BOUNDARY 128
+
+/* Allocation boundary (in *bits*) for the code of a function. */
+#define FUNCTION_BOUNDARY 128
+
+/* Alignment of field after `int : 0' in a structure. */
+#define EMPTY_FIELD_BOUNDARY 32
+
+/* This makes zero-length anonymous fields lay the next field
+ at a word boundary. It also makes the whole struct have
+ at least word alignment if there are any bitfields at all. */
+#define PCC_BITFIELD_TYPE_MATTERS 1
+
+/* Every structure's size must be a multiple of this. */
+#define STRUCTURE_SIZE_BOUNDARY 8
+
+/* No data type wants to be aligned rounder than this.
+ Extended precision floats gets 4-word alignment. */
+#define BIGGEST_ALIGNMENT 128
+
+/* Define this if move instructions will actually fail to work
+ when given unaligned data.
+ 80960 will work even with unaligned data, but it is slow. */
+#define STRICT_ALIGNMENT TARGET_STRICT_ALIGN
+
+/* Specify alignment for string literals (which might be higher than the
+ base type's minimal alignment requirement. This allows strings to be
+ aligned on word boundaries, and optimizes calls to the str* and mem*
+ library functions. */
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ (TREE_CODE (EXP) == STRING_CST \
+ && i960_object_bytes_bitalign (int_size_in_bytes (TREE_TYPE (EXP))) > (ALIGN) \
+ ? i960_object_bytes_bitalign (int_size_in_bytes (TREE_TYPE (EXP))) \
+ : (ALIGN))
+
+/* Make XFmode floating point quantities be 128 bit aligned. */
+#define DATA_ALIGNMENT(TYPE, ALIGN) \
+ (TREE_CODE (TYPE) == ARRAY_TYPE \
+ && TYPE_MODE (TREE_TYPE (TYPE)) == XFmode \
+ && (ALIGN) < 128 ? 128 : (ALIGN))
+
+/* Macros to determine size of aggregates (structures and unions
+ in C). Normally, these may be defined to simply return the maximum
+ alignment and simple rounded-up size, but on some machines (like
+ the i960), the total size of a structure is based on a non-trivial
+ rounding method. */
+
+#define ROUND_TYPE_ALIGN(TYPE, COMPUTED, SPECIFIED) \
+ ((TREE_CODE (TYPE) == REAL_TYPE && TYPE_MODE (TYPE) == XFmode) \
+ ? 128 /* Put 80 bit floating point elements on 128 bit boundaries. */ \
+ : ((!TARGET_OLD_ALIGN && TREE_CODE (TYPE) == RECORD_TYPE) \
+ ? i960_round_align (MAX ((COMPUTED), (SPECIFIED)), TYPE_SIZE (TYPE)) \
+ : MAX ((COMPUTED), (SPECIFIED))))
+
+#define ROUND_TYPE_SIZE(TYPE, COMPUTED, SPECIFIED) \
+ ((TREE_CODE (TYPE) == REAL_TYPE && TYPE_MODE (TYPE) == XFmode) \
+ ? build_int_2 (128, 0) : (COMPUTED))
+
+/* Standard register usage. */
+
+/* Number of actual hardware registers.
+ The hardware registers are assigned numbers for the compiler
+ from 0 to just below FIRST_PSEUDO_REGISTER.
+ All registers that the compiler knows about must be given numbers,
+ even those that are not normally considered general registers.
+
+ Registers 0-15 are the global registers (g0-g15).
+ Registers 16-31 are the local registers (r0-r15).
+ Register 32-35 are the fp registers (fp0-fp3).
+ Register 36 is the condition code register.
+ Register 37 is unused. */
+
+#define FIRST_PSEUDO_REGISTER 38
+
+/* 1 for registers that have pervasive standard uses and are not available
+ for the register allocator. On 80960, this includes the frame pointer
+ (g15), the previous FP (r0), the stack pointer (r1), the return
+ instruction pointer (r2), and the argument pointer (g14). */
+#define FIXED_REGISTERS \
+ {0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 1, 1, \
+ 1, 1, 1, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 1, 1}
+
+/* 1 for registers not available across function calls.
+ These must include the FIXED_REGISTERS and also any
+ registers that can be used without being saved.
+ The latter must include the registers where values are returned
+ and the register where structure-value addresses are passed.
+ Aside from that, you can include as many other registers as you like. */
+
+/* On the 80960, note that:
+ g0..g3 are used for return values,
+ g0..g7 may always be used for parameters,
+ g8..g11 may be used for parameters, but are preserved if they aren't,
+ g12 is always preserved, but otherwise unused,
+ g13 is the struct return ptr if used, or temp, but may be trashed,
+ g14 is the leaf return ptr or the arg block ptr otherwise zero,
+ must be reset to zero before returning if it was used,
+ g15 is the frame pointer,
+ r0 is the previous FP,
+ r1 is the stack pointer,
+ r2 is the return instruction pointer,
+ r3-r15 are always available,
+ r3 is clobbered by calls in functions that use the arg pointer
+ r4-r11 may be clobbered by the mcount call when profiling
+ r4-r15 if otherwise unused may be used for preserving global registers
+ fp0..fp3 are never available. */
+#define CALL_USED_REGISTERS \
+ {1, 1, 1, 1, 1, 1, 1, 1, \
+ 0, 0, 0, 0, 0, 1, 1, 1, \
+ 1, 1, 1, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, \
+ 1, 1, 1, 1, 1, 1}
+
+/* If no fp unit, make all of the fp registers fixed so that they can't
+ be used. */
+#define CONDITIONAL_REGISTER_USAGE \
+ if (! TARGET_NUMERICS) { \
+ fixed_regs[32] = fixed_regs[33] = fixed_regs[34] = fixed_regs[35] = 1;\
+ } \
+
+/* Return number of consecutive hard regs needed starting at reg REGNO
+ to hold something of mode MODE.
+ This is ordinarily the length in words of a value of mode MODE
+ but can be less for certain modes in special long registers.
+
+ On 80960, ordinary registers hold 32 bits worth, but can be ganged
+ together to hold double or extended precision floating point numbers,
+ and the floating point registers hold any size floating point number */
+#define HARD_REGNO_NREGS(REGNO, MODE) \
+ ((REGNO) < 32 \
+ ? (((MODE) == VOIDmode) \
+ ? 1 : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)) \
+ : ((REGNO) < FIRST_PSEUDO_REGISTER) ? 1 : 0)
+
+/* Value is 1 if hard register REGNO can hold a value of machine-mode MODE.
+ On 80960, the cpu registers can hold any mode but the float registers
+ can only hold SFmode, DFmode, or XFmode. */
+extern unsigned int hard_regno_mode_ok[FIRST_PSEUDO_REGISTER];
+#define HARD_REGNO_MODE_OK(REGNO, MODE) \
+ ((hard_regno_mode_ok[REGNO] & (1 << (int) (MODE))) != 0)
+
+/* Value is 1 if it is a good idea to tie two pseudo registers
+ when one has mode MODE1 and one has mode MODE2.
+ If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
+ for any hard reg, then this must be 0 for correct output. */
+
+#define MODES_TIEABLE_P(MODE1, MODE2) \
+ ((MODE1) == (MODE2) || GET_MODE_CLASS (MODE1) == GET_MODE_CLASS (MODE2))
+
+/* Specify the registers used for certain standard purposes.
+ The values of these macros are register numbers. */
+
+/* 80960 pc isn't overloaded on a register that the compiler knows about. */
+/* #define PC_REGNUM */
+
+/* Register to use for pushing function arguments. */
+#define STACK_POINTER_REGNUM 17
+
+/* Actual top-of-stack address is same as
+ the contents of the stack pointer register. */
+#define STACK_POINTER_OFFSET (-current_function_outgoing_args_size)
+
+/* Base register for access to local variables of the function. */
+#define FRAME_POINTER_REGNUM 15
+
+/* Value should be nonzero if functions must have frame pointers.
+ Zero means the frame pointer need not be set up (and parms
+ may be accessed via the stack pointer) in functions that seem suitable.
+ This is computed in `reload', in reload1.c. */
+/* ??? It isn't clear to me why this is here. Perhaps because of a bug (since
+ fixed) in the definition of INITIAL_FRAME_POINTER_OFFSET which would have
+ caused this to fail. */
+#define FRAME_POINTER_REQUIRED (! leaf_function_p ())
+
+/* C statement to store the difference between the frame pointer
+ and the stack pointer values immediately after the function prologue.
+
+ Since the stack grows upward on the i960, this must be a negative number.
+ This includes the 64 byte hardware register save area and the size of
+ the frame. */
+
+#define INITIAL_FRAME_POINTER_OFFSET(VAR) \
+ do { (VAR) = - (64 + compute_frame_size (get_frame_size ())); } while (0)
+
+/* Base register for access to arguments of the function. */
+#define ARG_POINTER_REGNUM 14
+
+/* Register in which static-chain is passed to a function.
+ On i960, we use r3. */
+#define STATIC_CHAIN_REGNUM 19
+
+/* Functions which return large structures get the address
+ to place the wanted value at in g13. */
+
+#define STRUCT_VALUE_REGNUM 13
+
+/* The order in which to allocate registers. */
+
+#define REG_ALLOC_ORDER \
+{ 4, 5, 6, 7, 0, 1, 2, 3, 13, /* g4, g5, g6, g7, g0, g1, g2, g3, g13 */ \
+ 20, 21, 22, 23, 24, 25, 26, 27,/* r4, r5, r6, r7, r8, r9, r10, r11 */ \
+ 28, 29, 30, 31, 19, 8, 9, 10, /* r12, r13, r14, r15, r3, g8, g9, g10 */ \
+ 11, 12, /* g11, g12 */ \
+ 32, 33, 34, 35, /* fp0, fp1, fp2, fp3 */ \
+ /* We can't actually allocate these. */ \
+ 16, 17, 18, 14, 15, 36, 37} /* r0, r1, r2, g14, g15, cc */
+
+/* Define the classes of registers for register constraints in the
+ machine description. Also define ranges of constants.
+
+ One of the classes must always be named ALL_REGS and include all hard regs.
+ If there is more than one class, another class must be named NO_REGS
+ and contain no registers.
+
+ The name GENERAL_REGS must be the name of a class (or an alias for
+ another name such as ALL_REGS). This is the class of registers
+ that is allowed by "g" or "r" in a register constraint.
+ Also, registers outside this class are allocated only when
+ instructions express preferences for them.
+
+ The classes must be numbered in nondecreasing order; that is,
+ a larger-numbered class must never be contained completely
+ in a smaller-numbered class.
+
+ For any two classes, it is very desirable that there be another
+ class that represents their union. */
+
+/* The 80960 has four kinds of registers, global, local, floating point,
+ and condition code. The cc register is never allocated, so no class
+ needs to be defined for it. */
+
+enum reg_class { NO_REGS, GLOBAL_REGS, LOCAL_REGS, LOCAL_OR_GLOBAL_REGS,
+ FP_REGS, ALL_REGS, LIM_REG_CLASSES };
+
+/* 'r' includes floating point registers if TARGET_NUMERICS. 'd' never
+ does. */
+#define GENERAL_REGS ((TARGET_NUMERICS) ? ALL_REGS : LOCAL_OR_GLOBAL_REGS)
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+/* Give names of register classes as strings for dump file. */
+
+#define REG_CLASS_NAMES \
+{ "NO_REGS", "GLOBAL_REGS", "LOCAL_REGS", "LOCAL_OR_GLOBAL_REGS", \
+ "FP_REGS", "ALL_REGS" }
+
+/* Define which registers fit in which classes.
+ This is an initializer for a vector of HARD_REG_SET
+ of length N_REG_CLASSES. */
+
+#define REG_CLASS_CONTENTS \
+{ {0, 0}, {0x0ffff, 0}, {0xffff0000, 0}, {-1,0}, {0, -1}, {-1,-1}}
+
+/* The same information, inverted:
+ Return the class number of the smallest class containing
+ reg number REGNO. This could be a conditional expression
+ or could index an array. */
+
+#define REGNO_REG_CLASS(REGNO) \
+ ((REGNO) < 16 ? GLOBAL_REGS \
+ : (REGNO) < 32 ? LOCAL_REGS \
+ : (REGNO) < 36 ? FP_REGS \
+ : NO_REGS)
+
+/* The class value for index registers, and the one for base regs.
+ There is currently no difference between base and index registers on the
+ i960, but this distinction may one day be useful. */
+#define INDEX_REG_CLASS LOCAL_OR_GLOBAL_REGS
+#define BASE_REG_CLASS LOCAL_OR_GLOBAL_REGS
+
+/* Get reg_class from a letter such as appears in the machine description.
+ 'f' is a floating point register (fp0..fp3)
+ 'l' is a local register (r0-r15)
+ 'b' is a global register (g0-g15)
+ 'd' is any local or global register
+ 'r' or 'g' are pre-defined to the class GENERAL_REGS. */
+/* 'l' and 'b' are probably never used. Note that 'd' and 'r' are *not*
+ the same thing, since 'r' may include the fp registers. */
+#define REG_CLASS_FROM_LETTER(C) \
+ (((C) == 'f') && (TARGET_NUMERICS) ? FP_REGS : ((C) == 'l' ? LOCAL_REGS : \
+ (C) == 'b' ? GLOBAL_REGS : ((C) == 'd' ? LOCAL_OR_GLOBAL_REGS : NO_REGS)))
+
+/* The letters I, J, K, L and M in a register constraint string
+ can be used to stand for particular ranges of immediate operands.
+ This macro defines what the ranges are.
+ C is the letter, and VALUE is a constant value.
+ Return 1 if VALUE is in the range specified by C.
+
+ For 80960:
+ 'I' is used for literal values 0..31
+ 'J' means literal 0
+ 'K' means 0..-31. */
+
+#define CONST_OK_FOR_LETTER_P(VALUE, C) \
+ ((C) == 'I' ? (((unsigned) (VALUE)) <= 31) \
+ : (C) == 'J' ? ((VALUE) == 0) \
+ : (C) == 'K' ? ((VALUE) > -32 && (VALUE) <= 0) \
+ : 0)
+
+/* Similar, but for floating constants, and defining letters G and H.
+ Here VALUE is the CONST_DOUBLE rtx itself.
+ For the 80960, G is 0.0 and H is 1.0. */
+
+#define CONST_DOUBLE_OK_FOR_LETTER_P(VALUE, C) \
+ ((TARGET_NUMERICS) && \
+ (((C) == 'G' && (VALUE) == CONST0_RTX (GET_MODE (VALUE))) \
+ || ((C) == 'H' && ((VALUE) == CONST1_RTX (GET_MODE (VALUE))))))
+
+/* Given an rtx X being reloaded into a reg required to be
+ in class CLASS, return the class of reg to actually use.
+ In general this is just CLASS; but on some machines
+ in some cases it is preferable to use a more restrictive class. */
+
+/* On 960, can't load constant into floating-point reg except
+ 0.0 or 1.0.
+
+ Any hard reg is ok as a src operand of a reload insn. */
+
+#define PREFERRED_RELOAD_CLASS(X,CLASS) \
+ (GET_CODE (X) == REG && REGNO (X) < FIRST_PSEUDO_REGISTER \
+ ? (CLASS) \
+ : ((CLASS) == FP_REGS && CONSTANT_P (X) \
+ && (X) != CONST0_RTX (DFmode) && (X) != CONST1_RTX (DFmode)\
+ && (X) != CONST0_RTX (SFmode) && (X) != CONST1_RTX (SFmode)\
+ ? NO_REGS \
+ : (CLASS) == ALL_REGS ? LOCAL_OR_GLOBAL_REGS : (CLASS)))
+
+#define SECONDARY_RELOAD_CLASS(CLASS,MODE,IN) \
+ secondary_reload_class (CLASS, MODE, IN)
+
+/* Return the maximum number of consecutive registers
+ needed to represent mode MODE in a register of class CLASS. */
+/* On 80960, this is the size of MODE in words,
+ except in the FP regs, where a single reg is always enough. */
+#define CLASS_MAX_NREGS(CLASS, MODE) \
+ ((CLASS) == FP_REGS ? 1 : HARD_REGNO_NREGS (0, (MODE)))
+
+/* Stack layout; function entry, exit and calling. */
+
+/* Define this if pushing a word on the stack
+ makes the stack pointer a smaller address. */
+/* #define STACK_GROWS_DOWNWARD */
+
+/* Define this if the nominal address of the stack frame
+ is at the high-address end of the local variables;
+ that is, each additional local variable allocated
+ goes at a more negative offset in the frame. */
+/* #define FRAME_GROWS_DOWNWARD */
+
+/* Offset within stack frame to start allocating local variables at.
+ If FRAME_GROWS_DOWNWARD, this is the offset to the END of the
+ first local allocated. Otherwise, it is the offset to the BEGINNING
+ of the first local allocated.
+
+ The i960 has a 64 byte register save area, plus possibly some extra
+ bytes allocated for varargs functions. */
+#define STARTING_FRAME_OFFSET 64
+
+/* If we generate an insn to push BYTES bytes,
+ this says how many the stack pointer really advances by.
+ On 80960, don't define this because there are no push insns. */
+/* #define PUSH_ROUNDING(BYTES) BYTES */
+
+/* Offset of first parameter from the argument pointer register value. */
+#define FIRST_PARM_OFFSET(FNDECL) 0
+
+/* When a parameter is passed in a register, no stack space is
+ allocated for it. However, when args are passed in the
+ stack, space is allocated for every register parameter. */
+#define MAYBE_REG_PARM_STACK_SPACE 48
+#define FINAL_REG_PARM_STACK_SPACE(CONST_SIZE, VAR_SIZE) \
+ i960_final_reg_parm_stack_space (CONST_SIZE, VAR_SIZE);
+#define REG_PARM_STACK_SPACE(DECL) i960_reg_parm_stack_space (DECL)
+#define OUTGOING_REG_PARM_STACK_SPACE
+
+/* Keep the stack pointer constant throughout the function. */
+#define ACCUMULATE_OUTGOING_ARGS
+
+/* Value is 1 if returning from a function call automatically
+ pops the arguments described by the number-of-args field in the call.
+ FUNDECL is the declaration node of the function (as a tree),
+ FUNTYPE is the data type of the function (as a tree),
+ or for a library call it is an identifier node for the subroutine name. */
+
+#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) 0
+
+/* Define how to find the value returned by a library function
+ assuming the value has mode MODE. */
+
+#define LIBCALL_VALUE(MODE) gen_rtx ((REG), (MODE), 0)
+
+/* 1 if N is a possible register number for a function value
+ as seen by the caller.
+ On 80960, returns are in g0..g3 */
+
+#define FUNCTION_VALUE_REGNO_P(N) ((N) == 0)
+
+/* 1 if N is a possible register number for function argument passing.
+ On 80960, parameters are passed in g0..g11 */
+
+#define FUNCTION_ARG_REGNO_P(N) ((N) < 12)
+
+/* Perform any needed actions needed for a function that is receiving a
+ variable number of arguments.
+
+ CUM is as above.
+
+ MODE and TYPE are the mode and type of the current parameter.
+
+ PRETEND_SIZE is a variable that should be set to the amount of stack
+ that must be pushed by the prolog to pretend that our caller pushed
+ it.
+
+ Normally, this macro will push all remaining incoming registers on the
+ stack and set PRETEND_SIZE to the length of the registers pushed. */
+
+#define SETUP_INCOMING_VARARGS(CUM,MODE,TYPE,PRETEND_SIZE,NO_RTL) \
+ i960_setup_incoming_varargs(&CUM,MODE,TYPE,&PRETEND_SIZE,NO_RTL)
+
+/* Define a data type for recording info about an argument list
+ during the scan of that argument list. This data type should
+ hold all necessary information about the function itself
+ and about the args processed so far, enough to enable macros
+ such as FUNCTION_ARG to determine where the next arg should go.
+
+ On 80960, this is two integers, which count the number of register
+ parameters and the number of stack parameters seen so far. */
+
+struct cum_args { int ca_nregparms; int ca_nstackparms; };
+
+#define CUMULATIVE_ARGS struct cum_args
+
+/* Define the number of registers that can hold parameters.
+ This macro is used only in macro definitions below and/or i960.c. */
+#define NPARM_REGS 12
+
+/* Define how to round to the next parameter boundary.
+ This macro is used only in macro definitions below and/or i960.c. */
+#define ROUND_PARM(X, MULTIPLE_OF) \
+ ((((X) + (MULTIPLE_OF) - 1) / (MULTIPLE_OF)) * MULTIPLE_OF)
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0.
+
+ On 80960, the offset always starts at 0; the first parm reg is g0. */
+
+#define INIT_CUMULATIVE_ARGS(CUM,FNTYPE,LIBNAME) \
+ ((CUM).ca_nregparms = 0, (CUM).ca_nstackparms = 0)
+
+/* Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ CUM should be advanced to align with the data type accessed and
+ also the size of that data type in # of regs.
+ (TYPE is null for libcalls where that information may not be available.) */
+
+#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
+ i960_function_arg_advance(&CUM, MODE, TYPE, NAMED)
+
+/* Indicate the alignment boundary for an argument of the specified mode and
+ type. */
+#define FUNCTION_ARG_BOUNDARY(MODE, TYPE) \
+ (((TYPE) != 0) \
+ ? ((TYPE_ALIGN (TYPE) <= PARM_BOUNDARY) \
+ ? PARM_BOUNDARY \
+ : TYPE_ALIGN (TYPE)) \
+ : ((GET_MODE_ALIGNMENT (MODE) <= PARM_BOUNDARY) \
+ ? PARM_BOUNDARY \
+ : GET_MODE_ALIGNMENT (MODE)))
+
+/* Determine where to put an argument to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis). */
+
+extern struct rtx_def *i960_function_arg ();
+#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
+ i960_function_arg(&CUM, MODE, TYPE, NAMED)
+
+/* Define how to find the value returned by a function.
+ VALTYPE is the data type of the value (as a tree).
+ If the precise function being called is known, FUNC is its FUNCTION_DECL;
+ otherwise, FUNC is 0. */
+
+#define FUNCTION_VALUE(TYPE, FUNC) \
+ gen_rtx (REG, TYPE_MODE (TYPE), 0)
+
+/* Force aggregates and objects larger than 16 bytes to be returned in memory,
+ since we only have 4 registers available for return values. */
+
+#define RETURN_IN_MEMORY(TYPE) \
+ (TYPE_MODE (TYPE) == BLKmode || int_size_in_bytes (TYPE) > 16)
+
+/* Don't default to pcc-struct-return, because we have already specified
+ exactly how to return structures in the RETURN_IN_MEMORY macro. */
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+/* For an arg passed partly in registers and partly in memory,
+ this is the number of registers used.
+ This never happens on 80960. */
+
+#define FUNCTION_ARG_PARTIAL_NREGS(CUM, MODE, TYPE, NAMED) 0
+
+/* Output the label for a function definition.
+ This handles leaf functions and a few other things for the i960. */
+
+#define ASM_DECLARE_FUNCTION_NAME(FILE, NAME, DECL) \
+ i960_function_name_declare (FILE, NAME, DECL)
+
+/* This macro generates the assembly code for function entry.
+ FILE is a stdio stream to output the code to.
+ SIZE is an int: how many units of temporary storage to allocate.
+ Refer to the array `regs_ever_live' to determine which registers
+ to save; `regs_ever_live[I]' is nonzero if register number I
+ is ever used in the function. This macro is responsible for
+ knowing which registers should not be saved even if used. */
+
+#define FUNCTION_PROLOGUE(FILE, SIZE) i960_function_prologue ((FILE), (SIZE))
+
+/* Output assembler code to FILE to increment profiler label # LABELNO
+ for profiling a function entry. */
+
+#define FUNCTION_PROFILER(FILE, LABELNO) \
+ output_function_profiler ((FILE), (LABELNO));
+
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
+ the stack pointer does not matter. The value is tested only in
+ functions that have frame pointers.
+ No definition is equivalent to always zero. */
+
+#define EXIT_IGNORE_STACK 1
+
+/* This macro generates the assembly code for function exit,
+ on machines that need it. If FUNCTION_EPILOGUE is not defined
+ then individual return instructions are generated for each
+ return statement. Args are same as for FUNCTION_PROLOGUE.
+
+ The function epilogue should not depend on the current stack pointer!
+ It should use the frame pointer only. This is mandatory because
+ of alloca; we also take advantage of it to omit stack adjustments
+ before returning. */
+
+#define FUNCTION_EPILOGUE(FILE, SIZE) i960_function_epilogue (FILE, SIZE)
+
+/* Addressing modes, and classification of registers for them. */
+
+/* #define HAVE_POST_INCREMENT */
+/* #define HAVE_POST_DECREMENT */
+
+/* #define HAVE_PRE_DECREMENT */
+/* #define HAVE_PRE_INCREMENT */
+
+/* Macros to check register numbers against specific register classes. */
+
+/* These assume that REGNO is a hard or pseudo reg number.
+ They give nonzero only if REGNO is a hard reg of the suitable class
+ or a pseudo reg currently allocated to a suitable hard reg.
+ Since they use reg_renumber, they are safe only once reg_renumber
+ has been allocated, which happens in local-alloc.c. */
+
+#define REGNO_OK_FOR_INDEX_P(REGNO) \
+ ((REGNO) < 32 || (unsigned) reg_renumber[REGNO] < 32)
+#define REGNO_OK_FOR_BASE_P(REGNO) \
+ ((REGNO) < 32 || (unsigned) reg_renumber[REGNO] < 32)
+#define REGNO_OK_FOR_FP_P(REGNO) \
+ ((REGNO) < 36 || (unsigned) reg_renumber[REGNO] < 36)
+
+/* Now macros that check whether X is a register and also,
+ strictly, whether it is in a specified class.
+
+ These macros are specific to the 960, and may be used only
+ in code for printing assembler insns and in conditions for
+ define_optimization. */
+
+/* 1 if X is an fp register. */
+
+#define FP_REG_P(X) (REGNO (X) >= 32 && REGNO (X) < 36)
+
+/* Maximum number of registers that can appear in a valid memory address. */
+#define MAX_REGS_PER_ADDRESS 2
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == LABEL_REF || GET_CODE (X) == SYMBOL_REF \
+ || GET_CODE (X) == CONST_INT || GET_CODE (X) == CONST \
+ || GET_CODE (X) == HIGH)
+
+/* LEGITIMATE_CONSTANT_P is nonzero if the constant value X
+ is a legitimate general operand.
+ It is given that X satisfies CONSTANT_P.
+
+ Anything but a CONST_DOUBLE can be made to work, excepting 0.0 and 1.0.
+
+ ??? This probably should be defined to 1. */
+
+#define LEGITIMATE_CONSTANT_P(X) \
+ ((GET_CODE (X) != CONST_DOUBLE) || fp_literal ((X), GET_MODE (X)))
+
+/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
+ and check its validity for a certain class.
+ We have two alternate definitions for each of them.
+ The usual definition accepts all pseudo regs; the other rejects
+ them unless they have been allocated suitable hard regs.
+ The symbol REG_OK_STRICT causes the latter definition to be used.
+
+ Most source files want to accept pseudo regs in the hope that
+ they will get allocated to the class that the insn wants them to be in.
+ Source files for reload pass need to be strict.
+ After reload, it makes no difference, since pseudo regs have
+ been eliminated by then. */
+
+#ifndef REG_OK_STRICT
+
+/* Nonzero if X is a hard reg that can be used as an index
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_INDEX_P(X) \
+ (REGNO (X) < 32 || REGNO (X) >= FIRST_PSEUDO_REGISTER)
+/* Nonzero if X is a hard reg that can be used as a base reg
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_BASE_P(X) \
+ (REGNO (X) < 32 || REGNO (X) >= FIRST_PSEUDO_REGISTER)
+
+#define REG_OK_FOR_INDEX_P_STRICT(X) REGNO_OK_FOR_INDEX_P (REGNO (X))
+#define REG_OK_FOR_BASE_P_STRICT(X) REGNO_OK_FOR_BASE_P (REGNO (X))
+
+#else
+
+/* Nonzero if X is a hard reg that can be used as an index. */
+#define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X))
+/* Nonzero if X is a hard reg that can be used as a base reg. */
+#define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
+
+#endif
+
+/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
+ that is a valid memory address for an instruction.
+ The MODE argument is the machine mode for the MEM expression
+ that wants to use this address.
+
+ On 80960, legitimate addresses are:
+ base ld (g0),r0
+ disp (12 or 32 bit) ld foo,r0
+ base + index ld (g0)[g1*1],r0
+ base + displ ld 0xf00(g0),r0
+ base + index*scale + displ ld 0xf00(g0)[g1*4],r0
+ index*scale + base ld (g0)[g1*4],r0
+ index*scale + displ ld 0xf00[g1*4],r0
+ index*scale ld [g1*4],r0
+ index + base + displ ld 0xf00(g0)[g1*1],r0
+
+ In each case, scale can be 1, 2, 4, 8, or 16. */
+
+/* Returns 1 if the scale factor of an index term is valid. */
+#define SCALE_TERM_P(X) \
+ (GET_CODE (X) == CONST_INT \
+ && (INTVAL (X) == 1 || INTVAL (X) == 2 || INTVAL (X) == 4 \
+ || INTVAL(X) == 8 || INTVAL (X) == 16))
+
+
+#ifdef REG_OK_STRICT
+#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \
+ { if (legitimate_address_p (MODE, X, 1)) goto ADDR; }
+#else
+#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \
+ { if (legitimate_address_p (MODE, X, 0)) goto ADDR; }
+#endif
+
+/* Try machine-dependent ways of modifying an illegitimate address
+ to be legitimate. If we find one, return the new, valid address.
+ This macro is used in only one place: `memory_address' in explow.c.
+
+ OLDX is the address as it was before break_out_memory_refs was called.
+ In some cases it is useful to look at this to decide what needs to be done.
+
+ MODE and WIN are passed so that this macro can use
+ GO_IF_LEGITIMATE_ADDRESS.
+
+ It is always safe for this macro to do nothing. It exists to recognize
+ opportunities to optimize the output. */
+
+/* On 80960, convert non-canonical addresses to canonical form. */
+
+extern struct rtx_def *legitimize_address ();
+#define LEGITIMIZE_ADDRESS(X, OLDX, MODE, WIN) \
+{ rtx orig_x = (X); \
+ (X) = legitimize_address (X, OLDX, MODE); \
+ if ((X) != orig_x && memory_address_p (MODE, X)) \
+ goto WIN; }
+
+/* Go to LABEL if ADDR (a legitimate address expression)
+ has an effect that depends on the machine mode it is used for.
+ On the 960 this is never true. */
+
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL)
+
+/* Specify the machine mode that this machine uses
+ for the index in the tablejump instruction. */
+#define CASE_VECTOR_MODE SImode
+
+/* Define this if the tablejump instruction expects the table
+ to contain offsets from the address of the table.
+ Do not define this if the table should contain absolute addresses. */
+/* #define CASE_VECTOR_PC_RELATIVE */
+
+/* Specify the tree operation to be used to convert reals to integers. */
+#define IMPLICIT_FIX_EXPR FIX_ROUND_EXPR
+
+/* This is the kind of divide that is easiest to do in the general case. */
+#define EASY_DIV_EXPR TRUNC_DIV_EXPR
+
+/* Define this as 1 if `char' should by default be signed; else as 0. */
+#define DEFAULT_SIGNED_CHAR 0
+
+/* Allow and ignore #sccs directives. */
+#define SCCS_DIRECTIVE
+
+/* Max number of bytes we can move from memory to memory
+ in one reasonably fast instruction. */
+#define MOVE_MAX 16
+
+/* Define if operations between registers always perform the operation
+ on the full register even if a narrower mode is specified. */
+#define WORD_REGISTER_OPERATIONS
+
+/* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD
+ will either zero-extend or sign-extend. The value of this macro should
+ be the code that says which one of the two operations is implicitly
+ done, NIL if none. */
+#define LOAD_EXTEND_OP(MODE) ZERO_EXTEND
+
+/* Nonzero if access to memory by bytes is no faster than for words.
+ Defining this results in worse code on the i960. */
+
+#define SLOW_BYTE_ACCESS 0
+
+/* We assume that the store-condition-codes instructions store 0 for false
+ and some other value for true. This is the value stored for true. */
+
+#define STORE_FLAG_VALUE 1
+
+/* Define this to be nonzero if shift instructions ignore all but the low-order
+ few bits. */
+#define SHIFT_COUNT_TRUNCATED 1
+
+/* Value is 1 if truncating an integer of INPREC bits to OUTPREC bits
+ is done just by pretending it is already truncated. */
+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
+
+/* Specify the machine mode that pointers have.
+ After generation of rtl, the compiler makes no further distinction
+ between pointers and any other objects of this machine mode. */
+#define Pmode SImode
+
+/* Specify the widest mode that BLKmode objects can be promoted to */
+#define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (TImode)
+
+/* These global variables are used to pass information between
+ cc setter and cc user at insn emit time. */
+
+extern struct rtx_def *i960_compare_op0, *i960_compare_op1;
+
+/* Define the function that build the compare insn for scc and bcc. */
+
+extern struct rtx_def *gen_compare_reg ();
+
+/* Add any extra modes needed to represent the condition code.
+
+ Also, signed and unsigned comparisons are distinguished, as
+ are operations which are compatible with chkbit insns. */
+#define EXTRA_CC_MODES CC_UNSmode, CC_CHKmode
+
+/* Define the names for the modes specified above. */
+#define EXTRA_CC_NAMES "CC_UNS", "CC_CHK"
+
+/* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
+ return the mode to be used for the comparison. For floating-point, CCFPmode
+ should be used. CC_NOOVmode should be used when the first operand is a
+ PLUS, MINUS, or NEG. CCmode should be used when no special processing is
+ needed. */
+#define SELECT_CC_MODE(OP,X,Y) select_cc_mode (OP, X)
+
+/* A function address in a call instruction is a byte address
+ (for indexing purposes) so give the MEM rtx a byte's mode. */
+#define FUNCTION_MODE SImode
+
+/* Define this if addresses of constant functions
+ shouldn't be put through pseudo regs where they can be cse'd.
+ Desirable on machines where ordinary constants are expensive
+ but a CALL with constant address is cheap. */
+#define NO_FUNCTION_CSE
+
+/* Use memcpy, etc. instead of bcopy. */
+
+#ifndef WIND_RIVER
+#define TARGET_MEM_FUNCTIONS 1
+#endif
+
+/* Compute the cost of computing a constant rtl expression RTX
+ whose rtx-code is CODE. The body of this macro is a portion
+ of a switch statement. If the code is computed here,
+ return it with a return statement. Otherwise, break from the switch. */
+
+/* Constants that can be (non-ldconst) insn operands are cost 0. Constants
+ that can be non-ldconst operands in rare cases are cost 1. Other constants
+ have higher costs. */
+
+#define CONST_COSTS(RTX, CODE, OUTER_CODE) \
+ case CONST_INT: \
+ if ((INTVAL (RTX) >= 0 && INTVAL (RTX) < 32) \
+ || power2_operand (RTX, VOIDmode)) \
+ return 0; \
+ else if (INTVAL (RTX) >= -31 && INTVAL (RTX) < 0) \
+ return 1; \
+ case CONST: \
+ case LABEL_REF: \
+ case SYMBOL_REF: \
+ return (TARGET_FLAG_C_SERIES ? 6 : 8); \
+ case CONST_DOUBLE: \
+ if ((RTX) == CONST0_RTX (DFmode) || (RTX) == CONST0_RTX (SFmode) \
+ || (RTX) == CONST1_RTX (DFmode) || (RTX) == CONST1_RTX (SFmode))\
+ return 1; \
+ return 12;
+
+/* The i960 offers addressing modes which are "as cheap as a register".
+ See i960.c (or gcc.texinfo) for details. */
+
+#define ADDRESS_COST(RTX) \
+ (GET_CODE (RTX) == REG ? 1 : i960_address_cost (RTX))
+
+/* Control the assembler format that we output. */
+
+/* Output at beginning of assembler file. */
+
+#define ASM_FILE_START(file)
+
+/* Output to assembler file text saying following lines
+ may contain character constants, extra white space, comments, etc. */
+
+#define ASM_APP_ON ""
+
+/* Output to assembler file text saying following lines
+ no longer contain unusual constructs. */
+
+#define ASM_APP_OFF ""
+
+/* Output before read-only data. */
+
+#define TEXT_SECTION_ASM_OP ".text"
+
+/* Output before writable data. */
+
+#define DATA_SECTION_ASM_OP ".data"
+
+/* How to refer to registers in assembler output.
+ This sequence is indexed by compiler's hard-register-number (see above). */
+
+#define REGISTER_NAMES { \
+ "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7", \
+ "g8", "g9", "g10", "g11", "g12", "g13", "g14", "fp", \
+ "pfp","sp", "rip", "r3", "r4", "r5", "r6", "r7", \
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", \
+ "fp0","fp1","fp2", "fp3", "cc", "fake" }
+
+/* How to renumber registers for dbx and gdb.
+ In the 960 encoding, g0..g15 are registers 16..31. */
+
+#define DBX_REGISTER_NUMBER(REGNO) \
+ (((REGNO) < 16) ? (REGNO) + 16 \
+ : (((REGNO) > 31) ? (REGNO) : (REGNO) - 16))
+
+/* Don't emit dbx records longer than this. This is an arbitrary value. */
+#define DBX_CONTIN_LENGTH 1500
+
+/* This is how to output a note to DBX telling it the line number
+ to which the following sequence of instructions corresponds. */
+
+#define ASM_OUTPUT_SOURCE_LINE(FILE, LINE) \
+{ if (write_symbols == SDB_DEBUG) { \
+ fprintf ((FILE), "\t.ln %d\n", \
+ (sdb_begin_function_line \
+ ? (LINE) - sdb_begin_function_line : 1)); \
+ } else if (write_symbols == DBX_DEBUG) { \
+ fprintf((FILE),"\t.stabd 68,0,%d\n",(LINE)); \
+ } }
+
+/* This is how to output the definition of a user-level label named NAME,
+ such as the label on a static function or variable NAME. */
+
+#define ASM_OUTPUT_LABEL(FILE,NAME) \
+ do { assemble_name (FILE, NAME); fputs (":\n", FILE); } while (0)
+
+/* This is how to output a command to make the user-level label named NAME
+ defined for reference from other files. */
+
+#define ASM_GLOBALIZE_LABEL(FILE,NAME) \
+{ fputs ("\t.globl ", FILE); \
+ assemble_name (FILE, NAME); \
+ fputs ("\n", FILE); }
+
+/* This is how to output a reference to a user-level label named NAME.
+ `assemble_name' uses this. */
+
+#define ASM_OUTPUT_LABELREF(FILE,NAME) fprintf (FILE, "_%s", NAME)
+
+/* This is how to output an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class. */
+
+#define ASM_OUTPUT_INTERNAL_LABEL(FILE,PREFIX,NUM) \
+ fprintf (FILE, "%s%d:\n", PREFIX, NUM)
+
+/* This is how to store into the string LABEL
+ the symbol_ref name of an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class.
+ This is suitable for output with `assemble_name'. */
+
+#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM) \
+ sprintf (LABEL, "*%s%d", PREFIX, NUM)
+
+/* This is how to output an assembler line defining a `long double'
+ constant. */
+
+#define ASM_OUTPUT_LONG_DOUBLE(FILE,VALUE) i960_output_long_double(FILE, VALUE)
+
+/* This is how to output an assembler line defining a `double' constant. */
+
+#define ASM_OUTPUT_DOUBLE(FILE,VALUE) i960_output_double(FILE, VALUE)
+
+/* This is how to output an assembler line defining a `float' constant. */
+
+#define ASM_OUTPUT_FLOAT(FILE,VALUE) i960_output_float(FILE, VALUE)
+
+/* This is how to output an assembler line defining an `int' constant. */
+
+#define ASM_OUTPUT_INT(FILE,VALUE) \
+( fprintf (FILE, "\t.word "), \
+ output_addr_const (FILE, (VALUE)), \
+ fprintf (FILE, "\n"))
+
+/* Likewise for `char' and `short' constants. */
+
+#define ASM_OUTPUT_SHORT(FILE,VALUE) \
+( fprintf (FILE, "\t.short "), \
+ output_addr_const (FILE, (VALUE)), \
+ fprintf (FILE, "\n"))
+
+#define ASM_OUTPUT_CHAR(FILE,VALUE) \
+( fprintf (FILE, "\t.byte "), \
+ output_addr_const (FILE, (VALUE)), \
+ fprintf (FILE, "\n"))
+
+/* This is how to output an assembler line for a numeric constant byte. */
+
+#define ASM_OUTPUT_BYTE(FILE,VALUE) \
+ fprintf (FILE, "\t.byte 0x%x\n", (VALUE))
+
+#define ASM_OUTPUT_REG_PUSH(FILE,REGNO) \
+ fprintf (FILE, "\tst\t%s,(sp)\n\taddo\t4,sp,sp\n", reg_names[REGNO])
+
+/* This is how to output an insn to pop a register from the stack.
+ It need not be very fast code. */
+
+#define ASM_OUTPUT_REG_POP(FILE,REGNO) \
+ fprintf (FILE, "\tsubo\t4,sp,sp\n\tld\t(sp),%s\n", reg_names[REGNO])
+
+/* This is how to output an element of a case-vector that is absolute. */
+
+#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \
+ fprintf (FILE, "\t.word L%d\n", VALUE)
+
+/* This is how to output an element of a case-vector that is relative. */
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, VALUE, REL) \
+ fprintf (FILE, "\t.word L%d-L%d\n", VALUE, REL)
+
+/* This is how to output an assembler line that says to advance the
+ location counter to a multiple of 2**LOG bytes. */
+
+#define ASM_OUTPUT_ALIGN(FILE,LOG) \
+ fprintf (FILE, "\t.align %d\n", (LOG))
+
+#define ASM_OUTPUT_SKIP(FILE,SIZE) \
+ fprintf (FILE, "\t.space %d\n", (SIZE))
+
+/* This says how to output an assembler line
+ to define a global common symbol. */
+
+/* For common objects, output unpadded size... gld960 & lnk960 both
+ have code to align each common object at link time. Also, if size
+ is 0, treat this as a declaration, not a definition - i.e.,
+ do nothing at all. */
+
+#define ASM_OUTPUT_COMMON(FILE, NAME, SIZE, ROUNDED) \
+{ if ((SIZE) != 0) \
+ { \
+ fputs (".globl ", (FILE)), \
+ assemble_name ((FILE), (NAME)), \
+ fputs ("\n.comm ", (FILE)), \
+ assemble_name ((FILE), (NAME)), \
+ fprintf ((FILE), ",%d\n", (SIZE)); \
+ } \
+}
+
+/* This says how to output an assembler line to define a local common symbol.
+ Output unpadded size, with request to linker to align as requested.
+ 0 size should not be possible here. */
+
+#define ASM_OUTPUT_ALIGNED_LOCAL(FILE, NAME, SIZE, ALIGN) \
+( fputs (".bss\t", (FILE)), \
+ assemble_name ((FILE), (NAME)), \
+ fprintf ((FILE), ",%d,%d\n", (SIZE), \
+ ((ALIGN) <= 8 ? 0 \
+ : ((ALIGN) <= 16 ? 1 \
+ : ((ALIGN) <= 32 ? 2 \
+ : ((ALIGN <= 64 ? 3 : 4)))))))
+
+/* Output text for an #ident directive. */
+#define ASM_OUTPUT_IDENT(FILE, STR) fprintf(FILE, "\t# %s\n", STR);
+
+/* Align code to 8 byte boundary if TARGET_CODE_ALIGN is true. */
+
+#define ASM_OUTPUT_ALIGN_CODE(FILE) \
+{ if (TARGET_CODE_ALIGN) fputs("\t.align 3\n",FILE); }
+
+/* Store in OUTPUT a string (made with alloca) containing
+ an assembler-name for a local static variable named NAME.
+ LABELNO is an integer which is different for each call. */
+
+#define ASM_FORMAT_PRIVATE_NAME(OUTPUT, NAME, LABELNO) \
+ ( (OUTPUT) = (char *) alloca (strlen ((NAME)) + 10), \
+ sprintf ((OUTPUT), "%s.%d", (NAME), (LABELNO)))
+
+/* Define the parentheses used to group arithmetic operations
+ in assembler code. */
+
+#define ASM_OPEN_PAREN "("
+#define ASM_CLOSE_PAREN ")"
+
+/* Define results of standard character escape sequences. */
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+/* Output assembler code to FILE to initialize this source file's
+ basic block profiling info, if that has not already been done. */
+
+#define FUNCTION_BLOCK_PROFILER(FILE, LABELNO) \
+{ fprintf (FILE, "\tld LPBX0,g12\n"); \
+ fprintf (FILE, "\tcmpobne 0,g12,LPY%d\n",LABELNO);\
+ fprintf (FILE, "\tlda LPBX0,g12\n"); \
+ fprintf (FILE, "\tcall ___bb_init_func\n"); \
+ fprintf (FILE, "LPY%d:\n",LABELNO); }
+
+/* Output assembler code to FILE to increment the entry-count for
+ the BLOCKNO'th basic block in this source file. */
+
+#define BLOCK_PROFILER(FILE, BLOCKNO) \
+{ int blockn = (BLOCKNO); \
+ fprintf (FILE, "\tld LPBX2+%d,g12\n", 4 * blockn); \
+ fprintf (FILE, "\taddo g12,1,g12\n"); \
+ fprintf (FILE, "\tst g12,LPBX2+%d\n", 4 * blockn); }
+
+/* Print operand X (an rtx) in assembler syntax to file FILE.
+ CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
+ For `%' followed by punctuation, CODE is the punctuation and X is null. */
+
+#define PRINT_OPERAND(FILE, X, CODE) \
+ i960_print_operand (FILE, X, CODE);
+
+/* Print a memory address as an operand to reference that memory location. */
+
+#define PRINT_OPERAND_ADDRESS(FILE, ADDR) \
+ i960_print_operand_addr (FILE, ADDR)
+
+/* Output assembler code for a block containing the constant parts
+ of a trampoline, leaving space for the variable parts. */
+
+/* On the i960, the trampoline contains three instructions:
+ ldconst _function, r4
+ ldconst static addr, r3
+ jump (r4) */
+
+#define TRAMPOLINE_TEMPLATE(FILE) \
+{ \
+ ASM_OUTPUT_INT (FILE, gen_rtx (CONST_INT, VOIDmode, 0x8C203000)); \
+ ASM_OUTPUT_INT (FILE, gen_rtx (CONST_INT, VOIDmode, 0x00000000)); \
+ ASM_OUTPUT_INT (FILE, gen_rtx (CONST_INT, VOIDmode, 0x8C183000)); \
+ ASM_OUTPUT_INT (FILE, gen_rtx (CONST_INT, VOIDmode, 0x00000000)); \
+ ASM_OUTPUT_INT (FILE, gen_rtx (CONST_INT, VOIDmode, 0x84212000)); \
+}
+
+/* Length in units of the trampoline for entering a nested function. */
+
+#define TRAMPOLINE_SIZE 20
+
+/* Emit RTL insns to initialize the variable parts of a trampoline.
+ FNADDR is an RTX for the address of the function's pure code.
+ CXT is an RTX for the static chain value for the function. */
+
+#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
+{ \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant (TRAMP, 4)), \
+ FNADDR); \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant (TRAMP, 12)), \
+ CXT); \
+}
+
+#if 0
+/* Promote char and short arguments to ints, when want compatibility with
+ the iC960 compilers. */
+
+/* ??? In order for this to work, all users would need to be changed
+ to test the value of the macro at run time. */
+#define PROMOTE_PROTOTYPES TARGET_CLEAN_LINKAGE
+/* ??? This does not exist. */
+#define PROMOTE_RETURN TARGET_CLEAN_LINKAGE
+#endif
+
+/* Instruction type definitions. Used to alternate instructions types for
+ better performance on the C series chips. */
+
+enum insn_types { I_TYPE_REG, I_TYPE_MEM, I_TYPE_CTRL };
+
+/* Holds the insn type of the last insn output to the assembly file. */
+
+extern enum insn_types i960_last_insn_type;
+
+/* Parse opcodes, and set the insn last insn type based on them. */
+
+#define ASM_OUTPUT_OPCODE(FILE, INSN) i960_scan_opcode (INSN)
+
+/* Table listing what rtl codes each predicate in i960.c will accept. */
+
+#define PREDICATE_CODES \
+ {"fpmove_src_operand", {CONST_INT, CONST_DOUBLE, CONST, SYMBOL_REF, \
+ LABEL_REF, SUBREG, REG, MEM}}, \
+ {"arith_operand", {SUBREG, REG, CONST_INT}}, \
+ {"fp_arith_operand", {SUBREG, REG, CONST_DOUBLE}}, \
+ {"signed_arith_operand", {SUBREG, REG, CONST_INT}}, \
+ {"literal", {CONST_INT}}, \
+ {"fp_literal_one", {CONST_DOUBLE}}, \
+ {"fp_literal_double", {CONST_DOUBLE}}, \
+ {"fp_literal", {CONST_DOUBLE}}, \
+ {"signed_literal", {CONST_INT}}, \
+ {"symbolic_memory_operand", {SUBREG, MEM}}, \
+ {"eq_or_neq", {EQ, NE}}, \
+ {"arith32_operand", {SUBREG, REG, LABEL_REF, SYMBOL_REF, CONST_INT, \
+ CONST_DOUBLE, CONST}}, \
+ {"power2_operand", {CONST_INT}}, \
+ {"cmplpower2_operand", {CONST_INT}},
+
+/* Define functions in i960.c and used in insn-output.c. */
+
+extern char *i960_output_ldconst ();
+extern char *i960_output_call_insn ();
+extern char *i960_output_ret_insn ();
+
+/* Defined in reload.c, and used in insn-recog.c. */
+
+extern int rtx_equal_function_value_matters;
diff --git a/gnu/usr.bin/gcc/config/i960/i960.md b/gnu/usr.bin/gcc/config/i960/i960.md
new file mode 100644
index 00000000000..01e18c5ac9c
--- /dev/null
+++ b/gnu/usr.bin/gcc/config/i960/i960.md
@@ -0,0 +1,2645 @@
+;;- Machine description for Intel 80960 chip for GNU C compiler
+;; Copyright (C) 1992, 1995 Free Software Foundation, Inc.
+;; Contributed by Steven McGeady, Intel Corp.
+;; Additional work by Glenn Colon-Bonet, Jonathan Shapiro, Andy Wilson
+;; Converted to GCC 2.0 by Jim Wilson and Michael Tiemann, Cygnus Support.
+
+;; This file is part of GNU CC.
+
+;; GNU CC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+
+;; GNU CC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GNU CC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 59 Temple Place - Suite 330,
+;; Boston, MA 02111-1307, USA.
+
+;;- See file "rtl.def" for documentation on define_insn, match_*, et. al.
+
+;; There are very few (4) 'f' registers, they can't be loaded/stored from/to
+;; memory, and some instructions explicitly require them, so we get better
+;; code by discouraging pseudo-registers from being allocated to them.
+;; However, we do want to allow all patterns which can store to them to
+;; include them in their constraints, so we always use '*f' in a destination
+;; constraint except when 'f' is the only alternative.
+
+;; Insn attributes which describe the i960.
+
+;; Modscan is not used, since the compiler never emits any of these insns.
+(define_attr "type"
+ "move,arith,alu2,mult,div,modscan,load,store,branch,call,address,compare,fpload,fpstore,fpmove,fpcvt,fpcc,fpadd,fpmul,fpdiv,multi,misc"
+ (const_string "arith"))
+
+;; Length (in # of insns).
+(define_attr "length" ""
+ (cond [(eq_attr "type" "load,fpload")
+ (if_then_else (match_operand 1 "symbolic_memory_operand" "")
+ (const_int 2)
+ (const_int 1))
+ (eq_attr "type" "store,fpstore")
+ (if_then_else (match_operand 0 "symbolic_memory_operand" "")
+ (const_int 2)
+ (const_int 1))
+ (eq_attr "type" "address")
+ (const_int 2)]
+ (const_int 1)))
+
+(define_asm_attributes
+ [(set_attr "length" "1")
+ (set_attr "type" "multi")])
+
+;; (define_function_unit {name} {num-units} {n-users} {test}
+;; {ready-delay} {issue-delay} [{conflict-list}])
+
+;; The integer ALU
+(define_function_unit "alu" 2 0 (eq_attr "type" "arith,compare,move,address") 1 0)
+(define_function_unit "alu" 2 0 (eq_attr "type" "alu2") 2 0)
+(define_function_unit "alu" 2 0 (eq_attr "type" "mult") 5 0)
+(define_function_unit "alu" 2 0 (eq_attr "type" "div") 35 0)
+(define_function_unit "alu" 2 0 (eq_attr "type" "modscan") 3 0)
+
+;; Memory with load-delay of 1 (i.e., 2 cycle load).
+(define_function_unit "memory" 1 0 (eq_attr "type" "load,fpload") 2 0)
+
+;; Floating point operations.
+(define_function_unit "fp" 1 2 (eq_attr "type" "fpmove") 5 0)
+(define_function_unit "fp" 1 2 (eq_attr "type" "fpcvt") 35 0)
+(define_function_unit "fp" 1 2 (eq_attr "type" "fpcc") 10 0)
+(define_function_unit "fp" 1 2 (eq_attr "type" "fpadd") 10 0)
+(define_function_unit "fp" 1 2 (eq_attr "type" "fpmul") 20 0)
+(define_function_unit "fp" 1 2 (eq_attr "type" "fpdiv") 35 0)
+
+;; Compare instructions.
+;; This controls RTL generation and register allocation.
+
+;; We generate RTL for comparisons and branches by having the cmpxx
+;; patterns store away the operands. Then, the scc and bcc patterns
+;; emit RTL for both the compare and the branch.
+;;
+;; We start with the DEFINE_EXPANDs, then then DEFINE_INSNs to match
+;; the patterns. Finally, we have the DEFINE_SPLITs for some of the scc
+;; insns that actually require more than one machine instruction.
+
+;; Put cmpsi first because it is expected to be the most common.
+
+(define_expand "cmpsi"
+ [(set (reg:CC 36)
+ (compare:CC (match_operand:SI 0 "nonimmediate_operand" "")
+ (match_operand:SI 1 "general_operand" "")))]
+ ""
+ "
+{
+ i960_compare_op0 = operands[0];
+ i960_compare_op1 = operands[1];
+ DONE;
+}")
+
+(define_expand "cmpdf"
+ [(set (reg:CC 36)
+ (compare:CC (match_operand:DF 0 "register_operand" "r")
+ (match_operand:DF 1 "nonmemory_operand" "rGH")))]
+ "TARGET_NUMERICS"
+ "
+{
+ i960_compare_op0 = operands[0];
+ i960_compare_op1 = operands[1];
+ DONE;
+}")
+
+(define_expand "cmpsf"
+ [(set (reg:CC 36)
+ (compare:CC (match_operand:SF 0 "register_operand" "r")
+ (match_operand:SF 1 "nonmemory_operand" "rGH")))]
+ "TARGET_NUMERICS"
+ "
+{
+ i960_compare_op0 = operands[0];
+ i960_compare_op1 = operands[1];
+ DONE;
+}")
+
+;; Now the DEFINE_INSNs for the compare and scc cases. First the compares.
+
+(define_insn ""
+ [(set (reg:CC 36)
+ (compare:CC (match_operand:SI 0 "register_operand" "d")
+ (match_operand:SI 1 "arith_operand" "dI")))]
+ ""
+ "cmpi %0,%1"
+ [(set_attr "type" "compare")])
+
+(define_insn ""
+ [(set (reg:CC_UNS 36)
+ (compare:CC_UNS (match_operand:SI 0 "register_operand" "d")
+ (match_operand:SI 1 "arith_operand" "dI")))]
+ ""
+ "cmpo %0,%1"
+ [(set_attr "type" "compare")])
+
+(define_insn ""
+ [(set (reg:CC 36)
+ (compare:CC (match_operand:DF 0 "register_operand" "r")
+ (match_operand:DF 1 "nonmemory_operand" "rGH")))]
+ "TARGET_NUMERICS"
+ "cmprl %0,%1"
+ [(set_attr "type" "fpcc")])
+
+(define_insn ""
+ [(set (reg:CC 36)
+ (compare:CC (match_operand:SF 0 "register_operand" "r")
+ (match_operand:SF 1 "nonmemory_operand" "rGH")))]
+ "TARGET_NUMERICS"
+ "cmpr %0,%1"
+ [(set_attr "type" "fpcc")])
+
+;; Instruction definitions for branch-on-bit-set and clear insns.
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (ne (sign_extract:SI (match_operand:SI 1 "register_operand" "d")
+ (const_int 1)
+ (match_operand:SI 2 "arith_operand" "dI"))
+ (const_int 0))
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ ""
+ "bbs %2,%1,%l3"
+ [(set_attr "type" "branch")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (eq (sign_extract:SI (match_operand:SI 1 "register_operand" "d")
+ (const_int 1)
+ (match_operand:SI 2 "arith_operand" "dI"))
+ (const_int 0))
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ ""
+ "bbc %2,%1,%l3"
+ [(set_attr "type" "branch")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (ne (zero_extract:SI (match_operand:SI 1 "register_operand" "d")
+ (const_int 1)
+ (match_operand:SI 2 "arith_operand" "dI"))
+ (const_int 0))
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ ""
+ "bbs %2,%1,%l3"
+ [(set_attr "type" "branch")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (eq (zero_extract:SI (match_operand:SI 1 "register_operand" "d")
+ (const_int 1)
+ (match_operand:SI 2 "arith_operand" "dI"))
+ (const_int 0))
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ ""
+ "bbc %2,%1,%l3"
+ [(set_attr "type" "branch")])
+
+;; ??? These will never match. The LOG_LINKs necessary to make these match
+;; are not created by flow. These remain as a reminder to make this work
+;; some day.
+
+(define_insn ""
+ [(set (reg:CC 36)
+ (compare (match_operand:SI 0 "arith_operand" "d")
+ (match_operand:SI 1 "arith_operand" "d")))
+ (set (match_dup 1) (plus:SI (match_dup 1) (const_int 1)))]
+ "0"
+ "cmpinci %0,%1"
+ [(set_attr "type" "compare")])
+
+(define_insn ""
+ [(set (reg:CC_UNS 36)
+ (compare (match_operand:SI 0 "arith_operand" "d")
+ (match_operand:SI 1 "arith_operand" "d")))
+ (set (match_dup 1) (plus:SI (match_dup 1) (const_int 1)))]
+ "0"
+ "cmpinco %0,%1"
+ [(set_attr "type" "compare")])
+
+(define_insn ""
+ [(set (reg:CC 36)
+ (compare (match_operand:SI 0 "arith_operand" "d")
+ (match_operand:SI 1 "arith_operand" "d")))
+ (set (match_dup 1) (minus:SI (match_dup 1) (const_int 1)))]
+ "0"
+ "cmpdeci %0,%1"
+ [(set_attr "type" "compare")])
+
+(define_insn ""
+ [(set (reg:CC_UNS 36)
+ (compare (match_operand:SI 0 "arith_operand" "d")
+ (match_operand:SI 1 "arith_operand" "d")))
+ (set (match_dup 1) (minus:SI (match_dup 1) (const_int 1)))]
+ "0"
+ "cmpdeco %0,%1"
+ [(set_attr "type" "compare")])
+
+;; Templates to store result of condition.
+;; '1' is stored if condition is true.
+;; '0' is stored if condition is false.
+;; These should use predicate "general_operand", since
+;; gcc seems to be creating mem references which use these
+;; templates.
+
+(define_expand "seq"
+ [(set (match_operand:SI 0 "general_operand" "=d")
+ (eq:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (EQ, i960_compare_op0, i960_compare_op1);
+}")
+
+(define_expand "sne"
+ [(set (match_operand:SI 0 "general_operand" "=d")
+ (ne:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (NE, i960_compare_op0, i960_compare_op1);
+}")
+
+(define_expand "sgt"
+ [(set (match_operand:SI 0 "general_operand" "=d")
+ (gt:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GT, i960_compare_op0, i960_compare_op1);
+}")
+
+(define_expand "sgtu"
+ [(set (match_operand:SI 0 "general_operand" "=d")
+ (gtu:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GTU, i960_compare_op0, i960_compare_op1);
+}")
+
+(define_expand "slt"
+ [(set (match_operand:SI 0 "general_operand" "=d")
+ (lt:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LT, i960_compare_op0, i960_compare_op1);
+}")
+
+(define_expand "sltu"
+ [(set (match_operand:SI 0 "general_operand" "=d")
+ (ltu:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LTU, i960_compare_op0, i960_compare_op1);
+}")
+
+(define_expand "sge"
+ [(set (match_operand:SI 0 "general_operand" "=d")
+ (ge:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GE, i960_compare_op0, i960_compare_op1);
+}")
+
+(define_expand "sgeu"
+ [(set (match_operand:SI 0 "general_operand" "=d")
+ (geu:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GEU, i960_compare_op0, i960_compare_op1);
+}")
+
+(define_expand "sle"
+ [(set (match_operand:SI 0 "general_operand" "=d")
+ (le:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LE, i960_compare_op0, i960_compare_op1);
+}")
+
+(define_expand "sleu"
+ [(set (match_operand:SI 0 "general_operand" "=d")
+ (leu:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LEU, i960_compare_op0, i960_compare_op1);
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=d")
+ (match_operator:SI 1 "comparison_operator" [(reg:CC 36) (const_int 0)]))]
+ ""
+ "test%C1 %0"
+ [(set_attr "type" "compare")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=d")
+ (match_operator:SI 1 "comparison_operator" [(reg:CC_UNS 36) (const_int 0)]))]
+ ""
+ "test%C1 %0"
+ [(set_attr "type" "compare")])
+
+;; These control RTL generation for conditional jump insns
+;; and match them for register allocation.
+
+(define_expand "beq"
+ [(set (pc)
+ (if_then_else (eq (match_dup 1)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{ operands[1] = gen_compare_reg (EQ, i960_compare_op0, i960_compare_op1); }")
+
+(define_expand "bne"
+ [(set (pc)
+ (if_then_else (ne (match_dup 1)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{ operands[1] = gen_compare_reg (NE, i960_compare_op0, i960_compare_op1); }")
+
+(define_expand "bgt"
+ [(set (pc)
+ (if_then_else (gt (match_dup 1)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{ operands[1] = gen_compare_reg (GT, i960_compare_op0, i960_compare_op1); }")
+
+(define_expand "bgtu"
+ [(set (pc)
+ (if_then_else (gtu (match_dup 1)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{ operands[1] = gen_compare_reg (GTU, i960_compare_op0, i960_compare_op1); }")
+
+(define_expand "blt"
+ [(set (pc)
+ (if_then_else (lt (match_dup 1)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{ operands[1] = gen_compare_reg (LT, i960_compare_op0, i960_compare_op1); }")
+
+(define_expand "bltu"
+ [(set (pc)
+ (if_then_else (ltu (match_dup 1)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{ operands[1] = gen_compare_reg (LTU, i960_compare_op0, i960_compare_op1); }")
+
+(define_expand "bge"
+ [(set (pc)
+ (if_then_else (ge (match_dup 1)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{ operands[1] = gen_compare_reg (GE, i960_compare_op0, i960_compare_op1); }")
+
+(define_expand "bgeu"
+ [(set (pc)
+ (if_then_else (geu (match_dup 1)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{ operands[1] = gen_compare_reg (GEU, i960_compare_op0, i960_compare_op1); }")
+
+(define_expand "ble"
+ [(set (pc)
+ (if_then_else (le (match_dup 1)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{ operands[1] = gen_compare_reg (LE, i960_compare_op0, i960_compare_op1); }")
+
+(define_expand "bleu"
+ [(set (pc)
+ (if_then_else (leu (match_dup 1)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{ operands[1] = gen_compare_reg (LEU, i960_compare_op0, i960_compare_op1); }")
+
+;; Now the normal branch insns (forward and reverse).
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(reg:CC 36) (const_int 0)])
+ (label_ref (match_operand 1 "" ""))
+ (pc)))]
+ ""
+ "b%C0 %l1"
+ [(set_attr "type" "branch")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(reg:CC 36) (const_int 0)])
+ (pc)
+ (label_ref (match_operand 1 "" ""))))]
+ ""
+ "b%I0 %l1"
+ [(set_attr "type" "branch")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(reg:CC_UNS 36) (const_int 0)])
+ (label_ref (match_operand 1 "" ""))
+ (pc)))]
+ ""
+ "b%C0 %l1"
+ [(set_attr "type" "branch")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(reg:CC_UNS 36) (const_int 0)])
+ (pc)
+ (label_ref (match_operand 1 "" ""))))]
+ ""
+ "b%I0 %l1"
+ [(set_attr "type" "branch")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (match_operator 0 "comparison_operator"
+ [(match_operand:SI 1 "arith_operand" "d")
+ (match_operand:SI 2 "arith_operand" "dI")])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ ""
+ "cmp%S0%B0%R0 %2,%1,%l3"
+ [(set_attr "type" "branch")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (match_operator 0 "comparison_operator"
+ [(match_operand:SI 1 "arith_operand" "d")
+ (match_operand:SI 2 "arith_operand" "dI")])
+ (pc)
+ (label_ref (match_operand 3 "" ""))))]
+ ""
+ "cmp%S0%B0%X0 %2,%1,%l3"
+ [(set_attr "type" "branch")])
+
+;; Normal move instructions.
+;; This code is based on the sparc machine description.
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (emit_move_sequence (operands, SImode))
+ DONE;
+}")
+
+;; The store case can not be separate, because reload may convert a register
+;; to register move insn to a store (or load) insn without rerecognizing
+;; the insn.
+
+;; The i960 does not have any store constant to memory instruction. However,
+;; the calling convention is defined so that the arg pointer when it is not
+;; overwise being used is zero. Thus, we can handle store zero to memory
+;; by storing an unused arg pointer. The arg pointer will be unused if
+;; current_function_args_size is zero and this is not a stdarg/varargs
+;; function. This value of the former variable is not valid until after
+;; all rtl generation is complete, including function inlining (because a
+;; function that doesn't need an arg pointer may be inlined into a function
+;; that does need an arg pointer), so we must also check that
+;; rtx_equal_function_value_matters is zero.
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=d,d,d,m")
+ (match_operand:SI 1 "general_operand" "dI,i,m,dJ"))]
+ "(current_function_args_size == 0
+ && current_function_varargs == 0
+ && current_function_stdarg == 0
+ && rtx_equal_function_value_matters == 0)
+ && (register_operand (operands[0], SImode)
+ || register_operand (operands[1], SImode)
+ || operands[1] == const0_rtx)"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ if (i960_last_insn_type == I_TYPE_REG && TARGET_C_SERIES)
+ {
+ if (GET_CODE (operands[1]) == REG)
+ return \"lda (%1),%0\";
+ else
+ return \"lda %1,%0\";
+ }
+ return \"mov %1,%0\";
+ case 1:
+ return i960_output_ldconst (operands[0], operands[1]);
+ case 2:
+ return \"ld %1,%0\";
+ case 3:
+ if (operands[1] == const0_rtx)
+ return \"st g14,%0\";
+ return \"st %1,%0\";
+ }
+}"
+ [(set_attr "type" "move,address,load,store")
+ (set_attr "length" "*,3,*,*")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=d,d,d,m")
+ (match_operand:SI 1 "general_operand" "dI,i,m,d"))]
+ "(current_function_args_size != 0
+ || current_function_varargs != 0
+ || current_function_stdarg != 0
+ || rtx_equal_function_value_matters != 0)
+ && (register_operand (operands[0], SImode)
+ || register_operand (operands[1], SImode))"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ if (i960_last_insn_type == I_TYPE_REG && TARGET_C_SERIES)
+ {
+ if (GET_CODE (operands[1]) == REG)
+ return \"lda (%1),%0\";
+ else
+ return \"lda %1,%0\";
+ }
+ return \"mov %1,%0\";
+ case 1:
+ return i960_output_ldconst (operands[0], operands[1]);
+ case 2:
+ return \"ld %1,%0\";
+ case 3:
+ return \"st %1,%0\";
+ }
+}"
+ [(set_attr "type" "move,address,load,store")
+ (set_attr "length" "*,3,*,*")])
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (emit_move_sequence (operands, HImode))
+ DONE;
+}")
+
+;; Special pattern for zero stores to memory for functions which don't use
+;; the arg pointer.
+
+;; The store case can not be separate. See above.
+(define_insn ""
+ [(set (match_operand:HI 0 "general_operand" "=d,d,d,m")
+ (match_operand:HI 1 "general_operand" "dI,i,m,dJ"))]
+ "(current_function_args_size == 0
+ && current_function_varargs == 0
+ && current_function_stdarg == 0
+ && rtx_equal_function_value_matters == 0)
+ && (register_operand (operands[0], HImode)
+ || register_operand (operands[1], HImode)
+ || operands[1] == const0_rtx)"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ if (i960_last_insn_type == I_TYPE_REG && TARGET_C_SERIES)
+ {
+ if (GET_CODE (operands[1]) == REG)
+ return \"lda (%1),%0\";
+ else
+ return \"lda %1,%0\";
+ }
+ return \"mov %1,%0\";
+ case 1:
+ return i960_output_ldconst (operands[0], operands[1]);
+ case 2:
+ return \"ldos %1,%0\";
+ case 3:
+ if (operands[1] == const0_rtx)
+ return \"stos g14,%0\";
+ return \"stos %1,%0\";
+ }
+}"
+ [(set_attr "type" "move,misc,load,store")
+ (set_attr "length" "*,3,*,*")])
+
+;; The store case can not be separate. See above.
+(define_insn ""
+ [(set (match_operand:HI 0 "general_operand" "=d,d,d,m")
+ (match_operand:HI 1 "general_operand" "dI,i,m,d"))]
+ "(current_function_args_size != 0
+ || current_function_varargs != 0
+ || current_function_stdarg != 0
+ || rtx_equal_function_value_matters != 0)
+ && (register_operand (operands[0], HImode)
+ || register_operand (operands[1], HImode))"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ if (i960_last_insn_type == I_TYPE_REG && TARGET_C_SERIES)
+ {
+ if (GET_CODE (operands[1]) == REG)
+ return \"lda (%1),%0\";
+ else
+ return \"lda %1,%0\";
+ }
+ return \"mov %1,%0\";
+ case 1:
+ return i960_output_ldconst (operands[0], operands[1]);
+ case 2:
+ return \"ldos %1,%0\";
+ case 3:
+ return \"stos %1,%0\";
+ }
+}"
+ [(set_attr "type" "move,misc,load,store")
+ (set_attr "length" "*,3,*,*")])
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "general_operand" "")
+ (match_operand:QI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (emit_move_sequence (operands, QImode))
+ DONE;
+}")
+
+;; The store case can not be separate. See comment above.
+(define_insn ""
+ [(set (match_operand:QI 0 "general_operand" "=d,d,d,m")
+ (match_operand:QI 1 "general_operand" "dI,i,m,dJ"))]
+ "(current_function_args_size == 0
+ && current_function_varargs == 0
+ && current_function_stdarg == 0
+ && rtx_equal_function_value_matters == 0)
+ && (register_operand (operands[0], QImode)
+ || register_operand (operands[1], QImode)
+ || operands[1] == const0_rtx)"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ if (i960_last_insn_type == I_TYPE_REG && TARGET_C_SERIES)
+ {
+ if (GET_CODE (operands[1]) == REG)
+ return \"lda (%1),%0\";
+ else
+ return \"lda %1,%0\";
+ }
+ return \"mov %1,%0\";
+ case 1:
+ return i960_output_ldconst (operands[0], operands[1]);
+ case 2:
+ return \"ldob %1,%0\";
+ case 3:
+ if (operands[1] == const0_rtx)
+ return \"stob g14,%0\";
+ return \"stob %1,%0\";
+ }
+}"
+ [(set_attr "type" "move,misc,load,store")
+ (set_attr "length" "*,3,*,*")])
+
+;; The store case can not be separate. See comment above.
+(define_insn ""
+ [(set (match_operand:QI 0 "general_operand" "=d,d,d,m")
+ (match_operand:QI 1 "general_operand" "dI,i,m,d"))]
+ "(current_function_args_size != 0
+ || current_function_varargs != 0
+ || current_function_stdarg != 0
+ || rtx_equal_function_value_matters != 0)
+ && (register_operand (operands[0], QImode)
+ || register_operand (operands[1], QImode))"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ if (i960_last_insn_type == I_TYPE_REG && TARGET_C_SERIES)
+ {
+ if (GET_CODE (operands[1]) == REG)
+ return \"lda (%1),%0\";
+ else
+ return \"lda %1,%0\";
+ }
+ return \"mov %1,%0\";
+ case 1:
+ return i960_output_ldconst (operands[0], operands[1]);
+ case 2:
+ return \"ldob %1,%0\";
+ case 3:
+ return \"stob %1,%0\";
+ }
+}"
+ [(set_attr "type" "move,misc,load,store")
+ (set_attr "length" "*,3,*,*")])
+
+(define_expand "movdi"
+ [(set (match_operand:DI 0 "general_operand" "")
+ (match_operand:DI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (emit_move_sequence (operands, DImode))
+ DONE;
+}")
+
+;; The store case can not be separate. See comment above.
+(define_insn ""
+ [(set (match_operand:DI 0 "general_operand" "=d,d,d,d,m,o")
+ (match_operand:DI 1 "general_operand" "d,I,i,m,d,J"))]
+ "(current_function_args_size == 0
+ && current_function_varargs == 0
+ && current_function_stdarg == 0
+ && rtx_equal_function_value_matters == 0)
+ && (register_operand (operands[0], DImode)
+ || register_operand (operands[1], DImode)
+ || operands[1] == const0_rtx)"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ case 1:
+ case 3:
+ case 4:
+ return i960_output_move_double (operands[0], operands[1]);
+ case 2:
+ return i960_output_ldconst (operands[0], operands[1]);
+ case 5:
+ operands[1] = adj_offsettable_operand (operands[0], 4);
+ return \"st g14,%0\;st g14,%1\";
+ }
+}"
+ [(set_attr "type" "move,move,load,load,store,store")])
+
+;; The store case can not be separate. See comment above.
+(define_insn ""
+ [(set (match_operand:DI 0 "general_operand" "=d,d,d,d,m")
+ (match_operand:DI 1 "general_operand" "d,I,i,m,d"))]
+ "(current_function_args_size != 0
+ || current_function_varargs != 0
+ || current_function_stdarg != 0
+ || rtx_equal_function_value_matters != 0)
+ && (register_operand (operands[0], DImode)
+ || register_operand (operands[1], DImode))"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ case 1:
+ case 3:
+ case 4:
+ return i960_output_move_double (operands[0], operands[1]);
+ case 2:
+ return i960_output_ldconst (operands[0], operands[1]);
+ }
+}"
+ [(set_attr "type" "move,move,load,load,store")])
+
+(define_insn "*store_unaligned_di_reg"
+ [(set (match_operand:DI 0 "memory_operand" "=m")
+ (match_operand:DI 1 "register_operand" "d"))
+ (clobber (match_scratch:SI 2 "=&d"))]
+ ""
+ "*
+{
+ operands[3] = gen_rtx (MEM, word_mode, operands[2]);
+ operands[4] = adj_offsettable_operand (operands[3], UNITS_PER_WORD);
+ return \"lda %0,%2\;st %1,%3\;st %D1,%4\";
+}"
+ [(set_attr "type" "store")])
+
+(define_expand "movti"
+ [(set (match_operand:TI 0 "general_operand" "")
+ (match_operand:TI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (emit_move_sequence (operands, TImode))
+ DONE;
+}")
+
+;; The store case can not be separate. See comment above.
+(define_insn ""
+ [(set (match_operand:TI 0 "general_operand" "=d,d,d,d,m,o")
+ (match_operand:TI 1 "general_operand" "d,I,i,m,d,J"))]
+ "(current_function_args_size == 0
+ && current_function_varargs == 0
+ && current_function_stdarg == 0
+ && rtx_equal_function_value_matters == 0)
+ && (register_operand (operands[0], TImode)
+ || register_operand (operands[1], TImode)
+ || operands[1] == const0_rtx)"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ case 1:
+ case 3:
+ case 4:
+ return i960_output_move_quad (operands[0], operands[1]);
+ case 2:
+ return i960_output_ldconst (operands[0], operands[1]);
+ case 5:
+ operands[1] = adj_offsettable_operand (operands[0], 4);
+ operands[2] = adj_offsettable_operand (operands[0], 8);
+ operands[3] = adj_offsettable_operand (operands[0], 12);
+ return \"st g14,%0\;st g14,%1\;st g14,%2\;st g14,%3\";
+ }
+}"
+ [(set_attr "type" "move,move,load,load,store,store")])
+
+;; The store case can not be separate. See comment above.
+(define_insn ""
+ [(set (match_operand:TI 0 "general_operand" "=d,d,d,d,m")
+ (match_operand:TI 1 "general_operand" "d,I,i,m,d"))]
+ "(current_function_args_size != 0
+ || current_function_varargs != 0
+ || current_function_stdarg != 0
+ || rtx_equal_function_value_matters != 0)
+ && (register_operand (operands[0], TImode)
+ || register_operand (operands[1], TImode))"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ case 1:
+ case 3:
+ case 4:
+ return i960_output_move_quad (operands[0], operands[1]);
+ case 2:
+ return i960_output_ldconst (operands[0], operands[1]);
+ }
+}"
+ [(set_attr "type" "move,move,load,load,store")])
+
+(define_insn "*store_unaligned_ti_reg"
+ [(set (match_operand:TI 0 "memory_operand" "=m")
+ (match_operand:TI 1 "register_operand" "d"))
+ (clobber (match_scratch:SI 2 "=&d"))]
+ ""
+ "*
+{
+ operands[3] = gen_rtx (MEM, word_mode, operands[2]);
+ operands[4] = adj_offsettable_operand (operands[3], UNITS_PER_WORD);
+ operands[5] = adj_offsettable_operand (operands[4], UNITS_PER_WORD);
+ operands[6] = adj_offsettable_operand (operands[5], UNITS_PER_WORD);
+ return \"lda %0,%2\;st %1,%3\;st %D1,%4\;st %E1,%5\;st %F1,%6\";
+}"
+ [(set_attr "type" "store")])
+
+(define_expand "store_multiple"
+ [(set (match_operand:SI 0 "" "") ;;- dest
+ (match_operand:SI 1 "" "")) ;;- src
+ (use (match_operand:SI 2 "" ""))] ;;- nregs
+ ""
+ "
+{
+ int regno;
+ int count;
+ rtx from;
+ int i;
+
+ if (GET_CODE (operands[0]) != MEM
+ || GET_CODE (operands[1]) != REG
+ || GET_CODE (operands[2]) != CONST_INT)
+ FAIL;
+
+ count = INTVAL (operands[2]);
+ if (count > 12)
+ FAIL;
+
+ regno = REGNO (operands[1]);
+ from = memory_address (SImode, XEXP (operands[0], 0));
+ while (count >= 4 && ((regno & 3) == 0))
+ {
+ emit_insn (gen_rtx (SET, VOIDmode,
+ gen_rtx (MEM, TImode, from),
+ gen_rtx (REG, TImode, regno)));
+ count -= 4;
+ regno += 4;
+ from = memory_address (TImode, plus_constant (from, 16));
+ }
+ while (count >= 2 && ((regno & 1) == 0))
+ {
+ emit_insn (gen_rtx (SET, VOIDmode,
+ gen_rtx (MEM, DImode, from),
+ gen_rtx (REG, DImode, regno)));
+ count -= 2;
+ regno += 2;
+ from = memory_address (DImode, plus_constant (from, 8));
+ }
+ while (count > 0)
+ {
+ emit_insn (gen_rtx (SET, VOIDmode,
+ gen_rtx (MEM, SImode, from),
+ gen_rtx (REG, SImode, regno)));
+ count -= 1;
+ regno += 1;
+ from = memory_address (SImode, plus_constant (from, 4));
+ }
+ DONE;
+}")
+
+;; Floating point move insns
+
+(define_expand "movdf"
+ [(set (match_operand:DF 0 "general_operand" "")
+ (match_operand:DF 1 "fpmove_src_operand" ""))]
+ ""
+ "
+{
+ if (emit_move_sequence (operands, DFmode))
+ DONE;
+}")
+
+(define_insn ""
+ [(set (match_operand:DF 0 "general_operand" "=r,*f,d,d,m,o")
+ (match_operand:DF 1 "fpmove_src_operand" "r,GH,F,m,d,G"))]
+ "(current_function_args_size == 0
+ && current_function_varargs == 0
+ && current_function_stdarg == 0
+ && rtx_equal_function_value_matters == 0)
+ && (register_operand (operands[0], DFmode)
+ || register_operand (operands[1], DFmode)
+ || operands[1] == CONST0_RTX (DFmode))"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ if (FP_REG_P (operands[0]) || FP_REG_P (operands[1]))
+ return \"movrl %1,%0\";
+ else
+ return \"movl %1,%0\";
+ case 1:
+ return \"movrl %1,%0\";
+ case 2:
+ return i960_output_ldconst (operands[0], operands[1]);
+ case 3:
+ return \"ldl %1,%0\";
+ case 4:
+ return \"stl %1,%0\";
+ case 5:
+ operands[1] = adj_offsettable_operand (operands[0], 4);
+ return \"st g14,%0\;st g14,%1\";
+ }
+}"
+ [(set_attr "type" "move,move,load,fpload,fpstore,fpstore")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "general_operand" "=r,*f,d,d,m")
+ (match_operand:DF 1 "fpmove_src_operand" "r,GH,F,m,d"))]
+ "(current_function_args_size != 0
+ || current_function_varargs != 0
+ || current_function_stdarg != 0
+ || rtx_equal_function_value_matters != 0)
+ && (register_operand (operands[0], DFmode)
+ || register_operand (operands[1], DFmode))"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ if (FP_REG_P (operands[0]) || FP_REG_P (operands[1]))
+ return \"movrl %1,%0\";
+ else
+ return \"movl %1,%0\";
+ case 1:
+ return \"movrl %1,%0\";
+ case 2:
+ return i960_output_ldconst (operands[0], operands[1]);
+ case 3:
+ return \"ldl %1,%0\";
+ case 4:
+ return \"stl %1,%0\";
+ }
+}"
+ [(set_attr "type" "move,move,load,fpload,fpstore")])
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "general_operand" "")
+ (match_operand:SF 1 "fpmove_src_operand" ""))]
+ ""
+ "
+{
+ if (emit_move_sequence (operands, SFmode))
+ DONE;
+}")
+
+(define_insn ""
+ [(set (match_operand:SF 0 "general_operand" "=r,*f,d,d,m")
+ (match_operand:SF 1 "fpmove_src_operand" "r,GH,F,m,dG"))]
+ "(current_function_args_size == 0
+ && current_function_varargs == 0
+ && current_function_stdarg == 0
+ && rtx_equal_function_value_matters == 0)
+ && (register_operand (operands[0], SFmode)
+ || register_operand (operands[1], SFmode)
+ || operands[1] == CONST0_RTX (SFmode))"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ if (FP_REG_P (operands[0]) || FP_REG_P (operands[1]))
+ return \"movr %1,%0\";
+ else
+ return \"mov %1,%0\";
+ case 1:
+ return \"movr %1,%0\";
+ case 2:
+ return i960_output_ldconst (operands[0], operands[1]);
+ case 3:
+ return \"ld %1,%0\";
+ case 4:
+ if (operands[1] == CONST0_RTX (SFmode))
+ return \"st g14,%0\";
+ return \"st %1,%0\";
+ }
+}"
+ [(set_attr "type" "move,move,load,fpload,fpstore")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "general_operand" "=r,*f,d,d,m")
+ (match_operand:SF 1 "fpmove_src_operand" "r,GH,F,m,d"))]
+ "(current_function_args_size != 0
+ || current_function_varargs != 0
+ || current_function_stdarg != 0
+ || rtx_equal_function_value_matters != 0)
+ && (register_operand (operands[0], SFmode)
+ || register_operand (operands[1], SFmode))"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ if (FP_REG_P (operands[0]) || FP_REG_P (operands[1]))
+ return \"movr %1,%0\";
+ else
+ return \"mov %1,%0\";
+ case 1:
+ return \"movr %1,%0\";
+ case 2:
+ return i960_output_ldconst (operands[0], operands[1]);
+ case 3:
+ return \"ld %1,%0\";
+ case 4:
+ return \"st %1,%0\";
+ }
+}"
+ [(set_attr "type" "move,move,load,fpload,fpstore")])
+
+;; Mixed-mode moves with sign and zero-extension.
+
+;; Note that the one starting from HImode comes before those for QImode
+;; so that a constant operand will match HImode, not QImode.
+
+(define_expand "extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (sign_extend:SI
+ (match_operand:HI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operand1) == REG
+ || (GET_CODE (operand1) == SUBREG
+ && GET_CODE (XEXP (operand1, 0)) == REG))
+ {
+ rtx temp = gen_reg_rtx (SImode);
+ rtx shift_16 = gen_rtx (CONST_INT, VOIDmode, 16);
+ int op1_subreg_word = 0;
+
+ if (GET_CODE (operand1) == SUBREG)
+ {
+ op1_subreg_word = SUBREG_WORD (operand1);
+ operand1 = SUBREG_REG (operand1);
+ }
+ operand1 = gen_rtx (SUBREG, SImode, operand1, op1_subreg_word);
+
+ emit_insn (gen_ashlsi3 (temp, operand1, shift_16));
+ emit_insn (gen_ashrsi3 (operand0, temp, shift_16));
+ DONE;
+ }
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (sign_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
+ ""
+ "ldis %1,%0"
+ [(set_attr "type" "load")])
+
+(define_expand "extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operand1) == REG
+ || (GET_CODE (operand1) == SUBREG
+ && GET_CODE (XEXP (operand1, 0)) == REG))
+ {
+ rtx temp = gen_reg_rtx (SImode);
+ rtx shift_24 = gen_rtx (CONST_INT, VOIDmode, 24);
+ int op1_subreg_word = 0;
+
+ if (GET_CODE (operand1) == SUBREG)
+ {
+ op1_subreg_word = SUBREG_WORD (operand1);
+ operand1 = SUBREG_REG (operand1);
+ }
+ operand1 = gen_rtx (SUBREG, SImode, operand1, op1_subreg_word),
+
+ emit_insn (gen_ashlsi3 (temp, operand1, shift_24));
+ emit_insn (gen_ashrsi3 (operand0, temp, shift_24));
+ DONE;
+ }
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (sign_extend:SI (match_operand:QI 1 "memory_operand" "m")))]
+ ""
+ "ldib %1,%0"
+ [(set_attr "type" "load")])
+
+(define_expand "extendqihi2"
+ [(set (match_operand:HI 0 "register_operand" "")
+ (sign_extend:HI
+ (match_operand:QI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operand1) == REG
+ || (GET_CODE (operand1) == SUBREG
+ && GET_CODE (XEXP (operand1, 0)) == REG))
+ {
+ rtx temp = gen_reg_rtx (SImode);
+ rtx shift_24 = gen_rtx (CONST_INT, VOIDmode, 24);
+ int op0_subreg_word = 0;
+ int op1_subreg_word = 0;
+
+ if (GET_CODE (operand1) == SUBREG)
+ {
+ op1_subreg_word = SUBREG_WORD (operand1);
+ operand1 = SUBREG_REG (operand1);
+ }
+ operand1 = gen_rtx (SUBREG, SImode, operand1, op1_subreg_word);
+
+ if (GET_CODE (operand0) == SUBREG)
+ {
+ op0_subreg_word = SUBREG_WORD (operand0);
+ operand0 = SUBREG_REG (operand0);
+ }
+ if (GET_MODE (operand0) != SImode)
+ operand0 = gen_rtx (SUBREG, SImode, operand0, op0_subreg_word);
+
+ emit_insn (gen_ashlsi3 (temp, operand1, shift_24));
+ emit_insn (gen_ashrsi3 (operand0, temp, shift_24));
+ DONE;
+ }
+}")
+
+(define_insn ""
+ [(set (match_operand:HI 0 "register_operand" "=d")
+ (sign_extend:HI (match_operand:QI 1 "memory_operand" "m")))]
+ ""
+ "ldib %1,%0"
+ [(set_attr "type" "load")])
+
+(define_expand "zero_extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (zero_extend:SI
+ (match_operand:HI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operand1) == REG
+ || (GET_CODE (operand1) == SUBREG
+ && GET_CODE (XEXP (operand1, 0)) == REG))
+ {
+ rtx temp = gen_reg_rtx (SImode);
+ rtx shift_16 = gen_rtx (CONST_INT, VOIDmode, 16);
+ int op1_subreg_word = 0;
+
+ if (GET_CODE (operand1) == SUBREG)
+ {
+ op1_subreg_word = SUBREG_WORD (operand1);
+ operand1 = SUBREG_REG (operand1);
+ }
+ operand1 = gen_rtx (SUBREG, SImode, operand1, op1_subreg_word);
+
+ emit_insn (gen_ashlsi3 (temp, operand1, shift_16));
+ emit_insn (gen_lshrsi3 (operand0, temp, shift_16));
+ DONE;
+ }
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (zero_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
+ ""
+ "ldos %1,%0"
+ [(set_attr "type" "load")])
+
+;; Using shifts here generates much better code than doing an `and 255'.
+;; This is mainly because the `and' requires loading the constant separately,
+;; the constant is likely to get optimized, and then the compiler can't
+;; optimize the `and' because it doesn't know that one operand is a constant.
+
+(define_expand "zero_extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operand1) == REG
+ || (GET_CODE (operand1) == SUBREG
+ && GET_CODE (XEXP (operand1, 0)) == REG))
+ {
+ rtx temp = gen_reg_rtx (SImode);
+ rtx shift_24 = gen_rtx (CONST_INT, VOIDmode, 24);
+ int op1_subreg_word = 0;
+
+ if (GET_CODE (operand1) == SUBREG)
+ {
+ op1_subreg_word = SUBREG_WORD (operand1);
+ operand1 = SUBREG_REG (operand1);
+ }
+ operand1 = gen_rtx (SUBREG, SImode, operand1, op1_subreg_word);
+
+ emit_insn (gen_ashlsi3 (temp, operand1, shift_24));
+ emit_insn (gen_lshrsi3 (operand0, temp, shift_24));
+ DONE;
+ }
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (zero_extend:SI (match_operand:QI 1 "memory_operand" "m")))]
+ ""
+ "ldob %1,%0"
+ [(set_attr "type" "load")])
+
+(define_expand "zero_extendqihi2"
+ [(set (match_operand:HI 0 "register_operand" "")
+ (zero_extend:HI
+ (match_operand:QI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operand1) == REG
+ || (GET_CODE (operand1) == SUBREG
+ && GET_CODE (XEXP (operand1, 0)) == REG))
+ {
+ rtx temp = gen_reg_rtx (SImode);
+ rtx shift_24 = gen_rtx (CONST_INT, VOIDmode, 24);
+ int op0_subreg_word = 0;
+ int op1_subreg_word = 0;
+
+ if (GET_CODE (operand1) == SUBREG)
+ {
+ op1_subreg_word = SUBREG_WORD (operand1);
+ operand1 = SUBREG_REG (operand1);
+ }
+ operand1 = gen_rtx (SUBREG, SImode, operand1, op1_subreg_word);
+
+ if (GET_CODE (operand0) == SUBREG)
+ {
+ op0_subreg_word = SUBREG_WORD (operand0);
+ operand0 = SUBREG_REG (operand0);
+ }
+ if (GET_MODE (operand0) != SImode)
+ operand0 = gen_rtx (SUBREG, SImode, operand0, op0_subreg_word);
+
+ emit_insn (gen_ashlsi3 (temp, operand1, shift_24));
+ emit_insn (gen_lshrsi3 (operand0, temp, shift_24));
+ DONE;
+ }
+}")
+
+(define_insn ""
+ [(set (match_operand:HI 0 "register_operand" "=d")
+ (zero_extend:HI (match_operand:QI 1 "memory_operand" "m")))]
+ ""
+ "ldob %1,%0"
+ [(set_attr "type" "load")])
+
+;; Conversions between float and double.
+
+(define_insn "extendsfdf2"
+ [(set (match_operand:DF 0 "register_operand" "=*f,d")
+ (float_extend:DF (match_operand:SF 1 "fp_arith_operand" "dGH,fGH")))]
+ "TARGET_NUMERICS"
+ "@
+ movr %1,%0
+ movrl %1,%0"
+ [(set_attr "type" "fpmove")])
+
+(define_insn "truncdfsf2"
+ [(set (match_operand:SF 0 "register_operand" "=d")
+ (float_truncate:SF
+ (match_operand:DF 1 "fp_arith_operand" "fGH")))]
+ "TARGET_NUMERICS"
+ "movr %1,%0"
+ [(set_attr "type" "fpmove")])
+
+;; Conversion between fixed point and floating point.
+
+(define_insn "floatsidf2"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (float:DF (match_operand:SI 1 "register_operand" "d")))]
+ "TARGET_NUMERICS"
+ "cvtir %1,%0"
+ [(set_attr "type" "fpcvt")])
+
+(define_insn "floatsisf2"
+ [(set (match_operand:SF 0 "register_operand" "=d*f")
+ (float:SF (match_operand:SI 1 "register_operand" "d")))]
+ "TARGET_NUMERICS"
+ "cvtir %1,%0"
+ [(set_attr "type" "fpcvt")])
+
+;; Convert a float to an actual integer.
+;; Truncation is performed as part of the conversion.
+;; The i960 requires conversion from DFmode to DImode to make
+;; unsigned conversions work properly.
+
+(define_insn "fixuns_truncdfdi2"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (unsigned_fix:DI (fix:DF (match_operand:DF 1 "fp_arith_operand" "fGH"))))]
+ "TARGET_NUMERICS"
+ "cvtzril %1,%0"
+ [(set_attr "type" "fpcvt")])
+
+(define_insn "fixuns_truncsfdi2"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (unsigned_fix:DI (fix:SF (match_operand:SF 1 "fp_arith_operand" "fGH"))))]
+ "TARGET_NUMERICS"
+ "cvtzril %1,%0"
+ [(set_attr "type" "fpcvt")])
+
+(define_insn "fix_truncdfsi2"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (fix:SI (fix:DF (match_operand:DF 1 "fp_arith_operand" "fGH"))))]
+ "TARGET_NUMERICS"
+ "cvtzri %1,%0"
+ [(set_attr "type" "fpcvt")])
+
+(define_expand "fixuns_truncdfsi2"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (unsigned_fix:SI (fix:DF (match_operand:DF 1 "fp_arith_operand" ""))))]
+ "TARGET_NUMERICS"
+ "
+{
+ rtx temp = gen_reg_rtx (DImode);
+ emit_insn (gen_rtx (SET, VOIDmode, temp,
+ gen_rtx (UNSIGNED_FIX, DImode,
+ gen_rtx (FIX, DFmode, operands[1]))));
+ emit_insn (gen_rtx (SET, VOIDmode, operands[0],
+ gen_rtx (SUBREG, SImode, temp, 0)));
+ DONE;
+}")
+
+(define_insn "fix_truncsfsi2"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (fix:SI (fix:SF (match_operand:SF 1 "fp_arith_operand" "dfGH"))))]
+ "TARGET_NUMERICS"
+ "cvtzri %1,%0"
+ [(set_attr "type" "fpcvt")])
+
+(define_expand "fixuns_truncsfsi2"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (unsigned_fix:SI (fix:SF (match_operand:SF 1 "fp_arith_operand" ""))))]
+ "TARGET_NUMERICS"
+ "
+{
+ rtx temp = gen_reg_rtx (DImode);
+ emit_insn (gen_rtx (SET, VOIDmode, temp,
+ gen_rtx (UNSIGNED_FIX, DImode,
+ gen_rtx (FIX, SFmode, operands[1]))));
+ emit_insn (gen_rtx (SET, VOIDmode, operands[0],
+ gen_rtx (SUBREG, SImode, temp, 0)));
+ DONE;
+}")
+
+;; Arithmetic instructions.
+
+(define_insn "subsi3"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (minus:SI (match_operand:SI 1 "arith_operand" "dI")
+ (match_operand:SI 2 "arith_operand" "dI")))]
+ ""
+ "subo %2,%1,%0")
+
+;; Try to generate an lda instruction when it would be faster than an
+;; add instruction.
+;; Some assemblers apparently won't accept two addresses added together.
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=d,d,d")
+ (plus:SI (match_operand:SI 1 "arith32_operand" "%dn,i,dn")
+ (match_operand:SI 2 "arith32_operand" "dn,dn,i")))]
+ "(TARGET_C_SERIES) && (CONSTANT_P (operands[1]) || CONSTANT_P (operands[2]))"
+ "*
+{
+ if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ rtx tmp = operands[1];
+ operands[1] = operands[2];
+ operands[2] = tmp;
+ }
+ if (GET_CODE (operands[2]) == CONST_INT
+ && GET_CODE (operands[1]) == REG
+ && i960_last_insn_type != I_TYPE_REG)
+ {
+ if (INTVAL (operands[2]) < 0 && INTVAL (operands[2]) > -32)
+ return \"subo %n2,%1,%0\";
+ else if (INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) < 32)
+ return \"addo %1,%2,%0\";
+ }
+ if (CONSTANT_P (operands[1]))
+ return \"lda %1+%2,%0\";
+ return \"lda %2(%1),%0\";
+}")
+
+(define_insn "addsi3"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (plus:SI (match_operand:SI 1 "signed_arith_operand" "%dI")
+ (match_operand:SI 2 "signed_arith_operand" "dIK")))]
+ ""
+ "*
+{
+ if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) < 0)
+ return \"subo %n2,%1,%0\";
+ if (i960_bypass (insn, operands[1], operands[2], 0))
+ return \"addo %2,%1,%0\";
+ return \"addo %1,%2,%0\";
+}")
+
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (mult:SI (match_operand:SI 1 "arith_operand" "%dI")
+ (match_operand:SI 2 "arith_operand" "dI")))]
+ ""
+ "*
+{
+ if (i960_bypass (insn, operands[1], operands[2], 0))
+ return \"mulo %2,%1,%0\";
+ return \"mulo %1,%2,%0\";
+}"
+ [(set_attr "type" "mult")])
+
+(define_insn "umulsidi3"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "d"))
+ (zero_extend:DI (match_operand:SI 2 "register_operand" "d"))))]
+ ""
+ "*
+{
+ if (i960_bypass (insn, operands[1], operands[2], 0))
+ return \"emul %2,%1,%0\";
+ return \"emul %1,%2,%0\";
+}"
+ [(set_attr "type" "mult")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "%d"))
+ (match_operand:SI 2 "literal" "I")))]
+ ""
+ "*
+{
+ if (i960_bypass (insn, operands[1], operands[2], 0))
+ return \"emul %2,%1,%0\";
+ return \"emul %1,%2,%0\";
+}"
+ [(set_attr "type" "mult")])
+
+;; This goes after the move/add/sub/mul instructions
+;; because those instructions are better when they apply.
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (match_operand:SI 1 "address_operand" "p"))]
+ ""
+ "lda %a1,%0"
+ [(set_attr "type" "load")])
+
+;; This will never be selected because of an "optimization" that GCC does.
+;; It always converts divides by a power of 2 into a sequence of instructions
+;; that does a right shift, and then corrects the result if it was negative.
+
+;; (define_insn ""
+;; [(set (match_operand:SI 0 "register_operand" "=d")
+;; (div:SI (match_operand:SI 1 "arith_operand" "dI")
+;; (match_operand:SI 2 "power2_operand" "nI")))]
+;; ""
+;; "*{
+;; operands[2] = gen_rtx(CONST_INT, VOIDmode,bitpos (INTVAL (operands[2])));
+;; return \"shrdi %2,%1,%0\";
+;; }"
+
+(define_insn "divsi3"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (div:SI (match_operand:SI 1 "arith_operand" "dI")
+ (match_operand:SI 2 "arith_operand" "dI")))]
+ ""
+ "divi %2,%1,%0"
+ [(set_attr "type" "div")])
+
+(define_insn "udivsi3"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (udiv:SI (match_operand:SI 1 "arith_operand" "dI")
+ (match_operand:SI 2 "arith_operand" "dI")))]
+ ""
+ "divo %2,%1,%0"
+ [(set_attr "type" "div")])
+
+;; We must use `remi' not `modi' here, to ensure that `%' has the effects
+;; specified by the ANSI C standard.
+
+(define_insn "modsi3"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (mod:SI (match_operand:SI 1 "arith_operand" "dI")
+ (match_operand:SI 2 "arith_operand" "dI")))]
+ ""
+ "remi %2,%1,%0"
+ [(set_attr "type" "div")])
+
+(define_insn "umodsi3"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (umod:SI (match_operand:SI 1 "arith_operand" "dI")
+ (match_operand:SI 2 "arith_operand" "dI")))]
+ ""
+ "remo %2,%1,%0"
+ [(set_attr "type" "div")])
+
+;; And instructions (with complement also).
+
+(define_insn "andsi3"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (and:SI (match_operand:SI 1 "arith_operand" "%dI")
+ (match_operand:SI 2 "arith_operand" "dI")))]
+ ""
+ "*
+{
+ if (i960_bypass (insn, operands[1], operands[2], 0))
+ return \"and %2,%1,%0\";
+ return \"and %1,%2,%0\";
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (and:SI (not:SI (match_operand:SI 1 "arith_operand" "dI"))
+ (match_operand:SI 2 "arith_operand" "dI")))]
+ ""
+ "*
+{
+ if (i960_bypass (insn, operands[1], operands[2], 0))
+ return \"notand %2,%1,%0\";
+ return \"andnot %1,%2,%0\";
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (ior:SI (not:SI (match_operand:SI 1 "arith_operand" "%dI"))
+ (not:SI (match_operand:SI 2 "arith_operand" "dI"))))]
+ ""
+ "*
+{
+ if (i960_bypass (insn, operands[1], operands[2], 0))
+ return \"nand %2,%1,%0\";
+ return \"nand %1,%2,%0\";
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (ior:SI (match_operand:SI 1 "arith_operand" "dI")
+ (match_operand:SI 2 "power2_operand" "n")))]
+ ""
+ "*
+{
+ operands[2] = gen_rtx (CONST_INT, VOIDmode,
+ bitpos (INTVAL (operands[2])));
+ return \"setbit %2,%1,%0\";
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (ior:SI (ashift:SI (const_int 1)
+ (match_operand:SI 1 "register_operand" "d"))
+ (match_operand:SI 2 "arith_operand" "dI")))]
+ ""
+ "setbit %1,%2,%0")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (and:SI (match_operand:SI 1 "arith_operand" "dI")
+ (match_operand:SI 2 "cmplpower2_operand" "n")))]
+ ""
+ "*
+{
+ operands[2] = gen_rtx (CONST_INT, VOIDmode,
+ bitpos (~INTVAL (operands[2])));
+ return \"clrbit %2,%1,%0\";
+}")
+
+;; (not (ashift 1 reg)) canonicalizes to (rotate -2 reg)
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (and:SI (rotate:SI (const_int -2)
+ (match_operand:SI 1 "register_operand" "d"))
+ (match_operand:SI 2 "register_operand" "d")))]
+ ""
+ "clrbit %1,%2,%0")
+
+;; The above pattern canonicalizes to this when both the input and output
+;; are the same pseudo-register.
+(define_insn ""
+ [(set (zero_extract:SI (match_operand:SI 0 "register_operand" "=d")
+ (const_int 1)
+ (match_operand:SI 1 "register_operand" "d"))
+ (const_int 0))]
+ ""
+ "clrbit %1,%0,%0")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (xor:SI (match_operand:SI 1 "arith_operand" "dI")
+ (match_operand:SI 2 "power2_operand" "n")))]
+ ""
+ "*
+{
+ operands[2] = gen_rtx (CONST_INT, VOIDmode,
+ bitpos (INTVAL (operands[2])));
+ return \"notbit %2,%1,%0\";
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (xor:SI (ashift:SI (const_int 1)
+ (match_operand:SI 1 "register_operand" "d"))
+ (match_operand:SI 2 "arith_operand" "dI")))]
+ ""
+ "notbit %1,%2,%0")
+
+(define_insn "iorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (ior:SI (match_operand:SI 1 "arith_operand" "%dI")
+ (match_operand:SI 2 "arith_operand" "dI")))]
+ ""
+ "*
+{
+ if (i960_bypass (insn, operands[1], operands[2], 0))
+ return \"or %2,%1,%0\";
+ return \"or %1,%2,%0\";
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (ior:SI (not:SI (match_operand:SI 1 "arith_operand" "dI"))
+ (match_operand:SI 2 "arith_operand" "dI")))]
+ ""
+ "*
+{
+ if (i960_bypass (insn, operands[1], operands[2], 0))
+ return \"notor %2,%1,%0\";
+ return \"ornot %1,%2,%0\";
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (and:SI (not:SI (match_operand:SI 1 "arith_operand" "%dI"))
+ (not:SI (match_operand:SI 2 "arith_operand" "dI"))))]
+ ""
+ "*
+{
+ if (i960_bypass (insn, operands[1], operands[2], 0))
+ return \"nor %2,%1,%0\";
+ return \"nor %1,%2,%0\";
+}")
+
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (xor:SI (match_operand:SI 1 "arith_operand" "%dI")
+ (match_operand:SI 2 "arith_operand" "dI")))]
+ ""
+ "*
+{
+ if (i960_bypass (insn, operands[1], operands[2], 0))
+ return \"xor %2,%1,%0\";
+ return \"xor %1,%2,%0\";
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (not:SI (xor:SI (match_operand:SI 1 "arith_operand" "%dI")
+ (match_operand:SI 2 "arith_operand" "dI"))))]
+ ""
+ "*
+{
+ if (i960_bypass (insn, operands[1], operands[2], 0))
+ return \"xnor %2,%1,%0\";
+ return \"xnor %2,%1,%0\";
+}")
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (neg:SI (match_operand:SI 1 "arith_operand" "dI")))]
+ ""
+ "subo %1,0,%0"
+ [(set_attr "length" "1")])
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (not:SI (match_operand:SI 1 "arith_operand" "dI")))]
+ ""
+ "not %1,%0"
+ [(set_attr "length" "1")])
+
+;; Floating point arithmetic instructions.
+
+(define_insn "adddf3"
+ [(set (match_operand:DF 0 "register_operand" "=d*f")
+ (plus:DF (match_operand:DF 1 "fp_arith_operand" "%rGH")
+ (match_operand:DF 2 "fp_arith_operand" "rGH")))]
+ "TARGET_NUMERICS"
+ "addrl %1,%2,%0"
+ [(set_attr "type" "fpadd")])
+
+(define_insn "addsf3"
+ [(set (match_operand:SF 0 "register_operand" "=d*f")
+ (plus:SF (match_operand:SF 1 "fp_arith_operand" "%rGH")
+ (match_operand:SF 2 "fp_arith_operand" "rGH")))]
+ "TARGET_NUMERICS"
+ "addr %1,%2,%0"
+ [(set_attr "type" "fpadd")])
+
+
+(define_insn "subdf3"
+ [(set (match_operand:DF 0 "register_operand" "=d*f")
+ (minus:DF (match_operand:DF 1 "fp_arith_operand" "rGH")
+ (match_operand:DF 2 "fp_arith_operand" "rGH")))]
+ "TARGET_NUMERICS"
+ "subrl %2,%1,%0"
+ [(set_attr "type" "fpadd")])
+
+(define_insn "subsf3"
+ [(set (match_operand:SF 0 "register_operand" "=d*f")
+ (minus:SF (match_operand:SF 1 "fp_arith_operand" "rGH")
+ (match_operand:SF 2 "fp_arith_operand" "rGH")))]
+ "TARGET_NUMERICS"
+ "subr %2,%1,%0"
+ [(set_attr "type" "fpadd")])
+
+
+(define_insn "muldf3"
+ [(set (match_operand:DF 0 "register_operand" "=d*f")
+ (mult:DF (match_operand:DF 1 "fp_arith_operand" "%rGH")
+ (match_operand:DF 2 "fp_arith_operand" "rGH")))]
+ "TARGET_NUMERICS"
+ "mulrl %1,%2,%0"
+ [(set_attr "type" "fpmul")])
+
+(define_insn "mulsf3"
+ [(set (match_operand:SF 0 "register_operand" "=d*f")
+ (mult:SF (match_operand:SF 1 "fp_arith_operand" "%rGH")
+ (match_operand:SF 2 "fp_arith_operand" "rGH")))]
+ "TARGET_NUMERICS"
+ "mulr %1,%2,%0"
+ [(set_attr "type" "fpmul")])
+
+
+(define_insn "divdf3"
+ [(set (match_operand:DF 0 "register_operand" "=d*f")
+ (div:DF (match_operand:DF 1 "fp_arith_operand" "rGH")
+ (match_operand:DF 2 "fp_arith_operand" "rGH")))]
+ "TARGET_NUMERICS"
+ "divrl %2,%1,%0"
+ [(set_attr "type" "fpdiv")])
+
+(define_insn "divsf3"
+ [(set (match_operand:SF 0 "register_operand" "=d*f")
+ (div:SF (match_operand:SF 1 "fp_arith_operand" "rGH")
+ (match_operand:SF 2 "fp_arith_operand" "rGH")))]
+ "TARGET_NUMERICS"
+ "divr %2,%1,%0"
+ [(set_attr "type" "fpdiv")])
+
+(define_insn "negdf2"
+ [(set (match_operand:DF 0 "register_operand" "=d,d*f")
+ (neg:DF (match_operand:DF 1 "register_operand" "d,r")))]
+ ""
+ "*
+{
+ if (which_alternative == 0)
+ {
+ if (REGNO (operands[0]) == REGNO (operands[1]))
+ return \"notbit 31,%D1,%D0\";
+ return \"mov %1,%0\;notbit 31,%D1,%D0\";
+ }
+ return \"subrl %1,0f0.0,%0\";
+}"
+ [(set_attr "type" "fpadd")])
+
+(define_insn "negsf2"
+ [(set (match_operand:SF 0 "register_operand" "=d,d*f")
+ (neg:SF (match_operand:SF 1 "register_operand" "d,r")))]
+ ""
+ "@
+ notbit 31,%1,%0
+ subr %1,0f0.0,%0"
+ [(set_attr "type" "fpadd")])
+
+;;; The abs patterns also work even if the target machine doesn't have
+;;; floating point, because in that case dstreg and srcreg will always be
+;;; less than 32.
+
+(define_insn "absdf2"
+ [(set (match_operand:DF 0 "register_operand" "=d*f")
+ (abs:DF (match_operand:DF 1 "register_operand" "df")))]
+ ""
+ "*
+{
+ int dstreg = REGNO (operands[0]);
+ int srcreg = REGNO (operands[1]);
+
+ if (dstreg < 32)
+ {
+ if (srcreg < 32)
+ {
+ if (dstreg != srcreg)
+ output_asm_insn (\"mov %1,%0\", operands);
+ return \"clrbit 31,%D1,%D0\";
+ }
+ /* Src is an fp reg. */
+ return \"movrl %1,%0\;clrbit 31,%D1,%D0\";
+ }
+ if (srcreg >= 32)
+ return \"cpysre %1,0f0.0,%0\";
+ return \"movrl %1,%0\;cpysre %0,0f0.0,%0\";
+}"
+ [(set_attr "type" "multi")])
+
+(define_insn "abssf2"
+ [(set (match_operand:SF 0 "register_operand" "=d*f")
+ (abs:SF (match_operand:SF 1 "register_operand" "df")))]
+ ""
+ "*
+{
+ int dstreg = REGNO (operands[0]);
+ int srcreg = REGNO (operands[1]);
+
+ if (dstreg < 32 && srcreg < 32)
+ return \"clrbit 31,%1,%0\";
+
+ if (dstreg >= 32 && srcreg >= 32)
+ return \"cpysre %1,0f0.0,%0\";
+
+ if (dstreg < 32)
+ return \"movr %1,%0\;clrbit 31,%0,%0\";
+
+ return \"movr %1,%0\;cpysre %0,0f0.0,%0\";
+}"
+ [(set_attr "type" "multi")])
+
+;; Tetra (16 byte) float support.
+
+(define_expand "cmpxf"
+ [(set (reg:CC 36)
+ (compare:CC (match_operand:XF 0 "register_operand" "")
+ (match_operand:XF 1 "nonmemory_operand" "")))]
+ "TARGET_NUMERICS"
+ "
+{
+ i960_compare_op0 = operands[0];
+ i960_compare_op1 = operands[1];
+ DONE;
+}")
+
+(define_insn ""
+ [(set (reg:CC 36)
+ (compare:CC (match_operand:XF 0 "register_operand" "f")
+ (match_operand:XF 1 "nonmemory_operand" "fGH")))]
+ "TARGET_NUMERICS"
+ "cmpr %0,%1"
+ [(set_attr "type" "fpcc")])
+
+(define_expand "movxf"
+ [(set (match_operand:XF 0 "general_operand" "")
+ (match_operand:XF 1 "fpmove_src_operand" ""))]
+ ""
+ "
+{
+ if (emit_move_sequence (operands, XFmode))
+ DONE;
+}")
+
+(define_insn ""
+ [(set (match_operand:XF 0 "general_operand" "=r,f,d,d,m")
+ (match_operand:XF 1 "fpmove_src_operand" "r,GH,F,m,d"))]
+ "register_operand (operands[0], XFmode)
+ || register_operand (operands[1], XFmode)"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ if (FP_REG_P (operands[0]) || FP_REG_P (operands[1]))
+ return \"movre %1,%0\";
+ else
+ return \"movq %1,%0\";
+ case 1:
+ return \"movre %1,%0\";
+ case 2:
+ return i960_output_ldconst (operands[0], operands[1]);
+ case 3:
+ return \"ldt %1,%0\";
+ case 4:
+ return \"stt %1,%0\";
+ }
+}"
+ [(set_attr "type" "move,move,load,fpload,fpstore")])
+
+(define_insn "extendsfxf2"
+ [(set (match_operand:XF 0 "register_operand" "=f,d")
+ (float_extend:XF
+ (match_operand:SF 1 "register_operand" "d,f")))]
+ "TARGET_NUMERICS"
+ "@
+ movr %1,%0
+ movre %1,%0"
+ [(set_attr "type" "fpmove")])
+
+(define_insn "extenddfxf2"
+ [(set (match_operand:XF 0 "register_operand" "=f,d")
+ (float_extend:XF
+ (match_operand:DF 1 "register_operand" "d,f")))]
+ "TARGET_NUMERICS"
+ "@
+ movrl %1,%0
+ movre %1,%0"
+ [(set_attr "type" "fpmove")])
+
+(define_insn "truncxfdf2"
+ [(set (match_operand:DF 0 "register_operand" "=d")
+ (float_truncate:DF
+ (match_operand:XF 1 "register_operand" "f")))]
+ "TARGET_NUMERICS"
+ "movrl %1,%0"
+ [(set_attr "type" "fpmove")])
+
+(define_insn "truncxfsf2"
+ [(set (match_operand:SF 0 "register_operand" "=d")
+ (float_truncate:SF
+ (match_operand:XF 1 "register_operand" "f")))]
+ "TARGET_NUMERICS"
+ "movr %1,%0"
+ [(set_attr "type" "fpmove")])
+
+(define_insn "floatsixf2"
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (float:XF (match_operand:SI 1 "register_operand" "d")))]
+ "TARGET_NUMERICS"
+ "cvtir %1,%0"
+ [(set_attr "type" "fpcvt")])
+
+(define_insn "fix_truncxfsi2"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (fix:SI (fix:XF (match_operand:XF 1 "register_operand" "f"))))]
+ "TARGET_NUMERICS"
+ "cvtzri %1,%0"
+ [(set_attr "type" "fpcvt")])
+
+(define_insn "fixuns_truncxfsi2"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (unsigned_fix:SI (fix:XF (match_operand:XF 1 "register_operand" "f"))))]
+ "TARGET_NUMERICS"
+ "cvtzri %1,%0"
+ [(set_attr "type" "fpcvt")])
+
+(define_insn "addxf3"
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (plus:XF (match_operand:XF 1 "nonmemory_operand" "%fGH")
+ (match_operand:XF 2 "nonmemory_operand" "fGH")))]
+ "TARGET_NUMERICS"
+ "addr %1,%2,%0"
+ [(set_attr "type" "fpadd")])
+
+(define_insn "subxf3"
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (minus:XF (match_operand:XF 1 "nonmemory_operand" "fGH")
+ (match_operand:XF 2 "nonmemory_operand" "fGH")))]
+ "TARGET_NUMERICS"
+ "subr %2,%1,%0"
+ [(set_attr "type" "fpadd")])
+
+(define_insn "mulxf3"
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (mult:XF (match_operand:XF 1 "nonmemory_operand" "%fGH")
+ (match_operand:XF 2 "nonmemory_operand" "fGH")))]
+ "TARGET_NUMERICS"
+ "mulr %1,%2,%0"
+ [(set_attr "type" "fpmul")])
+
+(define_insn "divxf3"
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (div:XF (match_operand:XF 1 "nonmemory_operand" "fGH")
+ (match_operand:XF 2 "nonmemory_operand" "fGH")))]
+ "TARGET_NUMERICS"
+ "divr %2,%1,%0"
+ [(set_attr "type" "fpdiv")])
+
+(define_insn "negxf2"
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (neg:XF (match_operand:XF 1 "register_operand" "f")))]
+ "TARGET_NUMERICS"
+ "subr %1,0f0.0,%0"
+ [(set_attr "type" "fpadd")])
+
+(define_insn "absxf2"
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (abs:XF (match_operand:XF 1 "register_operand" "f")))]
+ "(TARGET_NUMERICS)"
+ "cpysre %1,0f0.0,%0"
+ [(set_attr "type" "fpmove")])
+
+;; Arithmetic shift instructions.
+
+;; The shli instruction generates an overflow fault if the sign changes.
+;; In the case of overflow, it does not give the natural result, it instead
+;; gives the last shift value before the overflow. We can not use this
+;; instruction because gcc thinks that arithmetic left shift and logical
+;; left shift are identical, and sometimes canonicalizes the logical left
+;; shift to an arithmetic left shift. Therefore we must always use the
+;; logical left shift instruction.
+
+(define_insn "ashlsi3"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (ashift:SI (match_operand:SI 1 "arith_operand" "dI")
+ (match_operand:SI 2 "arith_operand" "dI")))]
+ ""
+ "shlo %2,%1,%0"
+ [(set_attr "type" "alu2")])
+
+(define_insn "ashrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (ashiftrt:SI (match_operand:SI 1 "arith_operand" "dI")
+ (match_operand:SI 2 "arith_operand" "dI")))]
+ ""
+ "shri %2,%1,%0"
+ [(set_attr "type" "alu2")])
+
+(define_insn "lshrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (lshiftrt:SI (match_operand:SI 1 "arith_operand" "dI")
+ (match_operand:SI 2 "arith_operand" "dI")))]
+ ""
+ "shro %2,%1,%0"
+ [(set_attr "type" "alu2")])
+
+;; Unconditional and other jump instructions.
+
+(define_insn "jump"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+ ""
+ "b %l0"
+ [(set_attr "type" "branch")])
+
+(define_insn "indirect_jump"
+ [(set (pc) (match_operand:SI 0 "address_operand" "p"))]
+ ""
+ "bx %a0"
+ [(set_attr "type" "branch")])
+
+(define_insn "tablejump"
+ [(set (pc) (match_operand:SI 0 "register_operand" "d"))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+ "bx (%0)"
+ [(set_attr "type" "branch")])
+
+;;- jump to subroutine
+
+(define_expand "call"
+ [(call (match_operand:SI 0 "memory_operand" "m")
+ (match_operand:SI 1 "immediate_operand" "i"))]
+ ""
+ "
+{
+ emit_insn (gen_call_internal (operands[0], operands[1],
+ virtual_outgoing_args_rtx));
+ DONE;
+}")
+
+;; We need a call saved register allocated for the match_scratch, so we use
+;; 'l' because all local registers are call saved.
+
+;; ??? I would prefer to use a match_scratch here, but match_scratch allocated
+;; registers can't be used for spills. In a function with lots of calls,
+;; local-alloc may allocate all local registers to a match_scratch, leaving
+;; no local registers available for spills.
+
+(define_insn "call_internal"
+ [(call (match_operand:SI 0 "memory_operand" "m")
+ (match_operand:SI 1 "immediate_operand" "i"))
+ (use (match_operand:SI 2 "address_operand" "p"))
+ (clobber (reg:SI 19))]
+ ""
+ "* return i960_output_call_insn (operands[0], operands[1], operands[2],
+ insn);"
+ [(set_attr "type" "call")])
+
+(define_expand "call_value"
+ [(set (match_operand 0 "register_operand" "=d")
+ (call (match_operand:SI 1 "memory_operand" "m")
+ (match_operand:SI 2 "immediate_operand" "i")))]
+ ""
+ "
+{
+ emit_insn (gen_call_value_internal (operands[0], operands[1], operands[2],
+ virtual_outgoing_args_rtx));
+ DONE;
+}")
+
+;; We need a call saved register allocated for the match_scratch, so we use
+;; 'l' because all local registers are call saved.
+
+(define_insn "call_value_internal"
+ [(set (match_operand 0 "register_operand" "=d")
+ (call (match_operand:SI 1 "memory_operand" "m")
+ (match_operand:SI 2 "immediate_operand" "i")))
+ (use (match_operand:SI 3 "address_operand" "p"))
+ (clobber (reg:SI 19))]
+ ""
+ "* return i960_output_call_insn (operands[1], operands[2], operands[3],
+ insn);"
+ [(set_attr "type" "call")])
+
+(define_insn "return"
+ [(return)]
+ ""
+ "* return i960_output_ret_insn (insn);"
+ [(set_attr "type" "branch")])
+
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "")
+
+;; Various peephole optimizations for multiple-word moves, loads, and stores.
+;; Multiple register moves.
+
+;; Matched 5/28/91
+(define_peephole
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (match_operand:SI 1 "register_operand" "r"))
+ (set (match_operand:SI 2 "register_operand" "=r")
+ (match_operand:SI 3 "register_operand" "r"))
+ (set (match_operand:SI 4 "register_operand" "=r")
+ (match_operand:SI 5 "register_operand" "r"))
+ (set (match_operand:SI 6 "register_operand" "=r")
+ (match_operand:SI 7 "register_operand" "r"))]
+ "((REGNO (operands[0]) & 3) == 0)
+ && ((REGNO (operands[1]) & 3) == 0)
+ && (REGNO (operands[0]) + 1 == REGNO (operands[2]))
+ && (REGNO (operands[1]) + 1 == REGNO (operands[3]))
+ && (REGNO (operands[0]) + 2 == REGNO (operands[4]))
+ && (REGNO (operands[1]) + 2 == REGNO (operands[5]))
+ && (REGNO (operands[0]) + 3 == REGNO (operands[6]))
+ && (REGNO (operands[1]) + 3 == REGNO (operands[7]))"
+ "movq %1,%0")
+
+;; Matched 4/17/92
+(define_peephole
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (match_operand:DI 1 "register_operand" "r"))
+ (set (match_operand:DI 2 "register_operand" "=r")
+ (match_operand:DI 3 "register_operand" "r"))]
+ "((REGNO (operands[0]) & 3) == 0)
+ && ((REGNO (operands[1]) & 3) == 0)
+ && (REGNO (operands[0]) + 2 == REGNO (operands[2]))
+ && (REGNO (operands[1]) + 2 == REGNO (operands[3]))"
+ "movq %1,%0")
+
+;; Matched 4/17/92
+(define_peephole
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (match_operand:DI 1 "register_operand" "r"))
+ (set (match_operand:SI 2 "register_operand" "=r")
+ (match_operand:SI 3 "register_operand" "r"))
+ (set (match_operand:SI 4 "register_operand" "=r")
+ (match_operand:SI 5 "register_operand" "r"))]
+ "((REGNO (operands[0]) & 3) == 0)
+ && ((REGNO (operands[1]) & 3) == 0)
+ && (REGNO (operands[0]) + 2 == REGNO (operands[2]))
+ && (REGNO (operands[1]) + 2 == REGNO (operands[3]))
+ && (REGNO (operands[0]) + 3 == REGNO (operands[4]))
+ && (REGNO (operands[1]) + 3 == REGNO (operands[5]))"
+ "movq %1,%0")
+
+;; Matched 4/17/92
+(define_peephole
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (match_operand:SI 1 "register_operand" "r"))
+ (set (match_operand:SI 2 "register_operand" "=r")
+ (match_operand:SI 3 "register_operand" "r"))
+ (set (match_operand:DI 4 "register_operand" "=r")
+ (match_operand:DI 5 "register_operand" "r"))]
+ "((REGNO (operands[0]) & 3) == 0)
+ && ((REGNO (operands[1]) & 3) == 0)
+ && (REGNO (operands[0]) + 1 == REGNO (operands[2]))
+ && (REGNO (operands[1]) + 1 == REGNO (operands[3]))
+ && (REGNO (operands[0]) + 2 == REGNO (operands[4]))
+ && (REGNO (operands[1]) + 2 == REGNO (operands[5]))"
+ "movq %1,%0")
+
+;; Matched 4/17/92
+(define_peephole
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (match_operand:DI 1 "register_operand" "r"))
+ (set (match_operand:SI 2 "register_operand" "=r")
+ (match_operand:SI 3 "register_operand" "r"))]
+ "((REGNO (operands[0]) & 3) == 0)
+ && ((REGNO (operands[1]) & 3) == 0)
+ && (REGNO (operands[0]) + 2 == REGNO (operands[2]))
+ && (REGNO (operands[1]) + 2 == REGNO (operands[3]))"
+ "movt %1,%0")
+
+;; Matched 5/28/91
+(define_peephole
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (match_operand:SI 1 "register_operand" "r"))
+ (set (match_operand:SI 2 "register_operand" "=r")
+ (match_operand:SI 3 "register_operand" "r"))
+ (set (match_operand:SI 4 "register_operand" "=r")
+ (match_operand:SI 5 "register_operand" "r"))]
+ "((REGNO (operands[0]) & 3) == 0)
+ && ((REGNO (operands[1]) & 3) == 0)
+ && (REGNO (operands[0]) + 1 == REGNO (operands[2]))
+ && (REGNO (operands[1]) + 1 == REGNO (operands[3]))
+ && (REGNO (operands[0]) + 2 == REGNO (operands[4]))
+ && (REGNO (operands[1]) + 2 == REGNO (operands[5]))"
+ "movt %1,%0")
+
+;; Matched 5/28/91
+(define_peephole
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (match_operand:SI 1 "register_operand" "r"))
+ (set (match_operand:SI 2 "register_operand" "=r")
+ (match_operand:SI 3 "register_operand" "r"))]
+ "((REGNO (operands[0]) & 1) == 0)
+ && ((REGNO (operands[1]) & 1) == 0)
+ && (REGNO (operands[0]) + 1 == REGNO (operands[2]))
+ && (REGNO (operands[1]) + 1 == REGNO (operands[3]))"
+ "movl %1,%0")
+
+; Multiple register loads.
+
+;; Matched 6/15/91
+(define_peephole
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (mem:SI (plus:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "immediate_operand" "n"))))
+ (set (match_operand:SI 3 "register_operand" "=r")
+ (mem:SI (plus:SI (match_dup 1)
+ (match_operand:SI 4 "immediate_operand" "n"))))
+ (set (match_operand:SI 5 "register_operand" "=r")
+ (mem:SI (plus:SI (match_dup 1)
+ (match_operand:SI 6 "immediate_operand" "n"))))
+ (set (match_operand:SI 7 "register_operand" "=r")
+ (mem:SI (plus:SI (match_dup 1)
+ (match_operand:SI 8 "immediate_operand" "n"))))]
+ "(i960_si_ti (operands[1], operands[2]) && ((REGNO (operands[0]) & 3) == 0)
+ && (REGNO (operands[1]) != REGNO (operands[0]))
+ && (REGNO (operands[0]) + 1 == REGNO (operands[3]))
+ && (REGNO (operands[1]) != REGNO (operands[3]))
+ && (REGNO (operands[0]) + 2 == REGNO (operands[5]))
+ && (REGNO (operands[1]) != REGNO (operands[5]))
+ && (REGNO (operands[0]) + 3 == REGNO (operands[7]))
+ && (INTVAL (operands[2]) + 4 == INTVAL (operands[4]))
+ && (INTVAL (operands[2]) + 8 == INTVAL (operands[6]))
+ && (INTVAL (operands[2]) + 12 == INTVAL (operands[8])))"
+ "ldq %2(%1),%0")
+
+;; Matched 5/28/91
+(define_peephole
+ [(set (match_operand:DF 0 "register_operand" "=d")
+ (mem:DF (plus:SI (match_operand:SI 1 "register_operand" "d")
+ (match_operand:SI 2 "immediate_operand" "n"))))
+ (set (match_operand:DF 3 "register_operand" "=d")
+ (mem:DF (plus:SI (match_dup 1)
+ (match_operand:SI 4 "immediate_operand" "n"))))]
+ "(i960_si_ti (operands[1], operands[2]) && ((REGNO (operands[0]) & 3) == 0)
+ && (REGNO (operands[1]) != REGNO (operands[0]))
+ && (REGNO (operands[0]) + 2 == REGNO (operands[3]))
+ && (REGNO (operands[1]) != REGNO (operands[3]))
+ && (INTVAL (operands[2]) + 8 == INTVAL (operands[4])))"
+ "ldq %2(%1),%0")
+
+;; Matched 1/24/92
+(define_peephole
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (mem:DI (plus:SI (match_operand:SI 1 "register_operand" "d")
+ (match_operand:SI 2 "immediate_operand" "n"))))
+ (set (match_operand:DI 3 "register_operand" "=d")
+ (mem:DI (plus:SI (match_dup 1)
+ (match_operand:SI 4 "immediate_operand" "n"))))]
+ "(i960_si_ti (operands[1], operands[2]) && ((REGNO (operands[0]) & 3) == 0)
+ && (REGNO (operands[1]) != REGNO (operands[0]))
+ && (REGNO (operands[0]) + 2 == REGNO (operands[3]))
+ && (REGNO (operands[1]) != REGNO (operands[3]))
+ && (INTVAL (operands[2]) + 8 == INTVAL (operands[4])))"
+ "ldq %2(%1),%0")
+
+;; Matched 4/17/92
+(define_peephole
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (mem:SI (match_operand:SI 1 "register_operand" "d")))
+ (set (match_operand:SI 2 "register_operand" "=d")
+ (mem:SI (plus:SI (match_dup 1)
+ (match_operand:SI 3 "immediate_operand" "n"))))
+ (set (match_operand:SI 4 "register_operand" "=d")
+ (mem:SI (plus:SI (match_dup 1)
+ (match_operand:SI 5 "immediate_operand" "n"))))
+ (set (match_operand:SI 6 "register_operand" "=d")
+ (mem:SI (plus:SI (match_dup 1)
+ (match_operand:SI 7 "immediate_operand" "n"))))]
+ "(i960_si_ti (operands[1], 0) && ((REGNO (operands[0]) & 3) == 0)
+ && (REGNO (operands[1]) != REGNO (operands[0]))
+ && (REGNO (operands[0]) + 1 == REGNO (operands[2]))
+ && (REGNO (operands[1]) != REGNO (operands[2]))
+ && (REGNO (operands[0]) + 2 == REGNO (operands[4]))
+ && (REGNO (operands[1]) != REGNO (operands[4]))
+ && (REGNO (operands[0]) + 3 == REGNO (operands[6]))
+ && (INTVAL (operands[3]) == 4)
+ && (INTVAL (operands[5]) == 8)
+ && (INTVAL (operands[7]) == 12))"
+ "ldq (%1),%0")
+
+;; Matched 5/28/91
+(define_peephole
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (mem:SI (plus:SI (match_operand:SI 1 "register_operand" "d")
+ (match_operand:SI 2 "immediate_operand" "n"))))
+ (set (match_operand:SI 3 "register_operand" "=d")
+ (mem:SI (plus:SI (match_dup 1)
+ (match_operand:SI 4 "immediate_operand" "n"))))
+ (set (match_operand:SI 5 "register_operand" "=d")
+ (mem:SI (plus:SI (match_dup 1)
+ (match_operand:SI 6 "immediate_operand" "n"))))]
+ "(i960_si_ti (operands[1], operands[2]) && ((REGNO (operands[0]) & 3) == 0)
+ && (REGNO (operands[1]) != REGNO (operands[0]))
+ && (REGNO (operands[0]) + 1 == REGNO (operands[3]))
+ && (REGNO (operands[1]) != REGNO (operands[3]))
+ && (REGNO (operands[0]) + 2 == REGNO (operands[5]))
+ && (INTVAL (operands[2]) + 4 == INTVAL (operands[4]))
+ && (INTVAL (operands[2]) + 8 == INTVAL (operands[6])))"
+ "ldt %2(%1),%0")
+
+;; Matched 6/15/91
+(define_peephole
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (mem:SI (match_operand:SI 1 "register_operand" "d")))
+ (set (match_operand:SI 2 "register_operand" "=d")
+ (mem:SI (plus:SI (match_dup 1)
+ (match_operand:SI 3 "immediate_operand" "n"))))
+ (set (match_operand:SI 4 "register_operand" "=d")
+ (mem:SI (plus:SI (match_dup 1)
+ (match_operand:SI 5 "immediate_operand" "n"))))]
+ "(i960_si_ti (operands[1], 0) && ((REGNO (operands[0]) & 3) == 0)
+ && (REGNO (operands[1]) != REGNO (operands[0]))
+ && (REGNO (operands[0]) + 1 == REGNO (operands[2]))
+ && (REGNO (operands[1]) != REGNO (operands[2]))
+ && (REGNO (operands[0]) + 2 == REGNO (operands[4]))
+ && (INTVAL (operands[3]) == 4)
+ && (INTVAL (operands[5]) == 8))"
+ "ldt (%1),%0")
+
+;; Matched 5/28/91
+(define_peephole
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (mem:SI (plus:SI (match_operand:SI 1 "register_operand" "d")
+ (match_operand:SI 2 "immediate_operand" "n"))))
+ (set (match_operand:SI 3 "register_operand" "=d")
+ (mem:SI (plus:SI (match_dup 1)
+ (match_operand:SI 4 "immediate_operand" "n"))))]
+ "(i960_si_di (operands[1], operands[2]) && ((REGNO (operands[0]) & 1) == 0)
+ && (REGNO (operands[1]) != REGNO (operands[0]))
+ && (REGNO (operands[0]) + 1 == REGNO (operands[3]))
+ && (INTVAL (operands[2]) + 4 == INTVAL (operands[4])))"
+ "ldl %2(%1),%0")
+
+;; Matched 5/28/91
+(define_peephole
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (mem:SI (match_operand:SI 1 "register_operand" "d")))
+ (set (match_operand:SI 2 "register_operand" "=d")
+ (mem:SI (plus:SI (match_dup 1)
+ (match_operand:SI 3 "immediate_operand" "n"))))]
+ "(i960_si_di (operands[1], 0) && ((REGNO (operands[0]) & 1) == 0)
+ && (REGNO (operands[1]) != REGNO (operands[0]))
+ && (REGNO (operands[0]) + 1 == REGNO (operands[2]))
+ && (INTVAL (operands[3]) == 4))"
+ "ldl (%1),%0")
+
+; Multiple register stores.
+
+;; Matched 5/28/91
+(define_peephole
+ [(set (mem:SI (plus:SI (match_operand:SI 0 "register_operand" "d")
+ (match_operand:SI 1 "immediate_operand" "n")))
+ (match_operand:SI 2 "register_operand" "d"))
+ (set (mem:SI (plus:SI (match_dup 0)
+ (match_operand:SI 3 "immediate_operand" "n")))
+ (match_operand:SI 4 "register_operand" "d"))
+ (set (mem:SI (plus:SI (match_dup 0)
+ (match_operand:SI 5 "immediate_operand" "n")))
+ (match_operand:SI 6 "register_operand" "d"))
+ (set (mem:SI (plus:SI (match_dup 0)
+ (match_operand:SI 7 "immediate_operand" "n")))
+ (match_operand:SI 8 "register_operand" "d"))]
+ "(i960_si_ti (operands[0], operands[1]) && ((REGNO (operands[2]) & 3) == 0)
+ && (REGNO (operands[2]) + 1 == REGNO (operands[4]))
+ && (REGNO (operands[2]) + 2 == REGNO (operands[6]))
+ && (REGNO (operands[2]) + 3 == REGNO (operands[8]))
+ && (INTVAL (operands[1]) + 4 == INTVAL (operands[3]))
+ && (INTVAL (operands[1]) + 8 == INTVAL (operands[5]))
+ && (INTVAL (operands[1]) + 12 == INTVAL (operands[7])))"
+ "stq %2,%1(%0)")
+
+;; Matched 6/16/91
+(define_peephole
+ [(set (mem:DF (plus:SI (match_operand:SI 0 "register_operand" "d")
+ (match_operand:SI 1 "immediate_operand" "n")))
+ (match_operand:DF 2 "register_operand" "d"))
+ (set (mem:DF (plus:SI (match_dup 0)
+ (match_operand:SI 3 "immediate_operand" "n")))
+ (match_operand:DF 4 "register_operand" "d"))]
+ "(i960_si_ti (operands[0], operands[1]) && ((REGNO (operands[2]) & 3) == 0)
+ && (REGNO (operands[2]) + 2 == REGNO (operands[4]))
+ && (INTVAL (operands[1]) + 8 == INTVAL (operands[3])))"
+ "stq %2,%1(%0)")
+
+;; Matched 4/17/92
+(define_peephole
+ [(set (mem:DI (plus:SI (match_operand:SI 0 "register_operand" "d")
+ (match_operand:SI 1 "immediate_operand" "n")))
+ (match_operand:DI 2 "register_operand" "d"))
+ (set (mem:DI (plus:SI (match_dup 0)
+ (match_operand:SI 3 "immediate_operand" "n")))
+ (match_operand:DI 4 "register_operand" "d"))]
+ "(i960_si_ti (operands[0], operands[1]) && ((REGNO (operands[2]) & 3) == 0)
+ && (REGNO (operands[2]) + 2 == REGNO (operands[4]))
+ && (INTVAL (operands[1]) + 8 == INTVAL (operands[3])))"
+ "stq %2,%1(%0)")
+
+;; Matched 1/23/92
+(define_peephole
+ [(set (mem:SI (match_operand:SI 0 "register_operand" "d"))
+ (match_operand:SI 1 "register_operand" "d"))
+ (set (mem:SI (plus:SI (match_dup 0)
+ (match_operand:SI 2 "immediate_operand" "n")))
+ (match_operand:SI 3 "register_operand" "d"))
+ (set (mem:SI (plus:SI (match_dup 0)
+ (match_operand:SI 4 "immediate_operand" "n")))
+ (match_operand:SI 5 "register_operand" "d"))
+ (set (mem:SI (plus:SI (match_dup 0)
+ (match_operand:SI 6 "immediate_operand" "n")))
+ (match_operand:SI 7 "register_operand" "d"))]
+ "(i960_si_ti (operands[0], 0) && ((REGNO (operands[1]) & 3) == 0)
+ && (REGNO (operands[1]) + 1 == REGNO (operands[3]))
+ && (REGNO (operands[1]) + 2 == REGNO (operands[5]))
+ && (REGNO (operands[1]) + 3 == REGNO (operands[7]))
+ && (INTVAL (operands[2]) == 4)
+ && (INTVAL (operands[4]) == 8)
+ && (INTVAL (operands[6]) == 12))"
+ "stq %1,(%0)")
+
+;; Matched 5/29/91
+(define_peephole
+ [(set (mem:SI (plus:SI (match_operand:SI 0 "register_operand" "d")
+ (match_operand:SI 1 "immediate_operand" "n")))
+ (match_operand:SI 2 "register_operand" "d"))
+ (set (mem:SI (plus:SI (match_dup 0)
+ (match_operand:SI 3 "immediate_operand" "n")))
+ (match_operand:SI 4 "register_operand" "d"))
+ (set (mem:SI (plus:SI (match_dup 0)
+ (match_operand:SI 5 "immediate_operand" "n")))
+ (match_operand:SI 6 "register_operand" "d"))]
+ "(i960_si_ti (operands[0], operands[1]) && ((REGNO (operands[2]) & 3) == 0)
+ && (REGNO (operands[2]) + 1 == REGNO (operands[4]))
+ && (REGNO (operands[2]) + 2 == REGNO (operands[6]))
+ && (INTVAL (operands[1]) + 4 == INTVAL (operands[3]))
+ && (INTVAL (operands[1]) + 8 == INTVAL (operands[5])))"
+ "stt %2,%1(%0)")
+
+;; Matched 5/29/91
+(define_peephole
+ [(set (mem:SI (match_operand:SI 0 "register_operand" "d"))
+ (match_operand:SI 1 "register_operand" "d"))
+ (set (mem:SI (plus:SI (match_dup 0)
+ (match_operand:SI 2 "immediate_operand" "n")))
+ (match_operand:SI 3 "register_operand" "d"))
+ (set (mem:SI (plus:SI (match_dup 0)
+ (match_operand:SI 4 "immediate_operand" "n")))
+ (match_operand:SI 5 "register_operand" "d"))]
+ "(i960_si_ti (operands[0], 0) && ((REGNO (operands[1]) & 3) == 0)
+ && (REGNO (operands[1]) + 1 == REGNO (operands[3]))
+ && (REGNO (operands[1]) + 2 == REGNO (operands[5]))
+ && (INTVAL (operands[2]) == 4)
+ && (INTVAL (operands[4]) == 8))"
+ "stt %1,(%0)")
+
+;; Matched 5/28/91
+(define_peephole
+ [(set (mem:SI (plus:SI (match_operand:SI 0 "register_operand" "d")
+ (match_operand:SI 1 "immediate_operand" "n")))
+ (match_operand:SI 2 "register_operand" "d"))
+ (set (mem:SI (plus:SI (match_dup 0)
+ (match_operand:SI 3 "immediate_operand" "n")))
+ (match_operand:SI 4 "register_operand" "d"))]
+ "(i960_si_di (operands[0], operands[1]) && ((REGNO (operands[2]) & 1) == 0)
+ && (REGNO (operands[2]) + 1 == REGNO (operands[4]))
+ && (INTVAL (operands[1]) + 4 == INTVAL (operands[3])))"
+ "stl %2,%1(%0)")
+
+;; Matched 5/28/91
+(define_peephole
+ [(set (mem:SI (match_operand:SI 0 "register_operand" "d"))
+ (match_operand:SI 1 "register_operand" "d"))
+ (set (mem:SI (plus:SI (match_dup 0)
+ (match_operand:SI 2 "immediate_operand" "n")))
+ (match_operand:SI 3 "register_operand" "d"))]
+ "(i960_si_di (operands[0], 0) && ((REGNO (operands[1]) & 1) == 0)
+ && (REGNO (operands[1]) + 1 == REGNO (operands[3]))
+ && (INTVAL (operands[2]) == 4))"
+ "stl %1,(%0)")
diff --git a/gnu/usr.bin/gcc/config/i960/t-960bare b/gnu/usr.bin/gcc/config/i960/t-960bare
new file mode 100644
index 00000000000..20a4870c939
--- /dev/null
+++ b/gnu/usr.bin/gcc/config/i960/t-960bare
@@ -0,0 +1,20 @@
+LIBGCC1 =
+CROSS_LIBGCC1 =
+
+LIB2FUNCS_EXTRA = fp-bit.c dp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' > dp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+MULTILIB_OPTIONS=mnumerics
+MULTILIB_DIRNAMES=float
+MULTILIB_MATCHES=mnumerics=msb mnumerics=msc mnumerics=mkb mnumerics=mkc mnumerics=mmc mnumerics=mcb mnumerics=mcc
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
diff --git a/gnu/usr.bin/gcc/config/i960/t-vxworks960 b/gnu/usr.bin/gcc/config/i960/t-vxworks960
new file mode 100644
index 00000000000..84949cf17f5
--- /dev/null
+++ b/gnu/usr.bin/gcc/config/i960/t-vxworks960
@@ -0,0 +1,23 @@
+LIBGCC1 =
+CROSS_LIBGCC1 =
+
+# We don't want to put exit in libgcc.a for VxWorks, because VxWorks
+# does not have _exit.
+LIBGCC2_CFLAGS = -O2 $(GCC_CFLAGS) -g1 -Dexit=unused_exit
+LIB2FUNCS_EXTRA = fp-bit.c dp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' > dp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+MULTILIB_OPTIONS=mnumerics
+MULTILIB_DIRNAMES=float
+MULTILIB_MATCHES=mnumerics=msb mnumerics=msc mnumerics=mkb mnumerics=mkc mnumerics=mmc mnumerics=mcb mnumerics=mcc
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
diff --git a/gnu/usr.bin/gcc/config/i960/vx960-coff.h b/gnu/usr.bin/gcc/config/i960/vx960-coff.h
new file mode 100644
index 00000000000..74b1b668f0d
--- /dev/null
+++ b/gnu/usr.bin/gcc/config/i960/vx960-coff.h
@@ -0,0 +1,69 @@
+/* Definitions of target machine for GNU compiler. Vxworks i960 version.
+ Copyright (C) 1994, 1995 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* This file just exists to give specs for the 960 running on VxWorks. */
+
+#include "i960/i960-coff.h"
+
+/* VxWorks does all the library stuff itself. */
+
+#undef LIB_SPEC
+#define LIB_SPEC ""
+
+/* VxWorks provides the functionality of crt0.o and friends itself. */
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC ""
+
+/* Predefine vxworks. */
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Di960 -Di80960 -DI960 -DI80960 -Dvxworks -Acpu(i960) -Amachine(i960)"
+
+/* The VxWorks header files expect the compiler to define CPU to a
+ magic number. */
+
+#undef CPP_SPEC
+#define CPP_SPEC "%{mic*:-D__i960\
+ %{mka:-D__i960KA}%{mkb:-D__i960KB}\
+ %{msa:-D__i960SA}%{msb:-D__i960SB}\
+ %{mmc:-D__i960MC}\
+ %{mca:-D__i960CA}%{mcc:-D__i960CC}\
+ %{mcf:-D__i960CF}}\
+ %{mka:-D__i960KA__ -D__i960_KA__ %{!ansi:-DCPU=I960KA}}\
+ %{mkb:-D__i960KB__ -D__i960_KB__ %{!ansi:-DCPU=I960KB}}\
+ %{msa:-D__i960SA__ -D__i960_SA__}\
+ %{msb:-D__i960SB__ -D__i960_SB__}\
+ %{mmc:-D__i960MC__ -D__i960_MC__}\
+ %{mca:-D__i960CA__ -D__i960_CA__ %{!ansi:-DCPU=I960CA}}\
+ %{mcc:-D__i960CC__ -D__i960_CC__}\
+ %{mcf:-D__i960CF__ -D__i960_CF__}\
+ %{!mka:%{!mkb:%{!msa:%{!msb:%{!mmc:%{!mca:\
+ %{!mcc:%{!mcf:-D__i960_CA -D__i960CA__ %{!ansi:-DCPU=I960CA}\
+ %{mic*:-D__i960CA}}}}}}}}}"
+
+/* Default to -mca. */
+
+#undef CC1_SPEC
+#define CC1_SPEC \
+ "%{!mka:%{!mkb:%{!msa:%{!msb:%{!mmc:%{!mca:%{!mcc:%{!mcf:-mca}}}}}}}}\
+ %{!gs*:%{!gc*:%{mbout:%{g*:-gstabs}}\
+ %{mcoff:%{g*:-gcoff}}\
+ %{!mbout:%{!mcoff:%{g*:-gcoff}}}}}"
diff --git a/gnu/usr.bin/gcc/config/i960/vx960.h b/gnu/usr.bin/gcc/config/i960/vx960.h
new file mode 100644
index 00000000000..25f4f9793e0
--- /dev/null
+++ b/gnu/usr.bin/gcc/config/i960/vx960.h
@@ -0,0 +1,33 @@
+/* Definitions of target machine for GNU compiler. Vxworks i960 version.
+ Copyright (C) 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* This file just exists to give specs for the 960 running on VxWorks. */
+
+#include "i960/i960.h"
+
+/* VxWorks does all the library stuff itself. */
+
+#undef LIB_SPEC
+#define LIB_SPEC ""
+
+/* VxWorks provides the functionality of crt0.o and friends itself. */
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC ""
diff --git a/gnu/usr.bin/gcc/config/i960/xm-i960.h b/gnu/usr.bin/gcc/config/i960/xm-i960.h
new file mode 100644
index 00000000000..09dcadff460
--- /dev/null
+++ b/gnu/usr.bin/gcc/config/i960/xm-i960.h
@@ -0,0 +1,43 @@
+/* Configuration for GNU C-compiler for Intel 960 family
+ Copyright (C) 1987, 1993 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* #defines that need visibility everywhere. */
+#define FALSE 0
+#define TRUE 1
+
+/* This describes the machine the compiler is hosted on. */
+#define HOST_BITS_PER_CHAR 8
+#define HOST_BITS_PER_SHORT 16
+#define HOST_BITS_PER_INT 32
+#define HOST_BITS_PER_LONG 32
+#define HOST_BITS_PER_LONGLONG 64
+
+/* Arguments to use with `exit'. */
+#define SUCCESS_EXIT_CODE 0
+#define FATAL_EXIT_CODE 33
+
+/* If not compiled with GNU C, use the C alloca */
+#ifndef __GNUC__
+#define USE_C_ALLOCA
+#endif
+
+/* target machine dependencies.
+ tm.h is a symbolic link to the actual target specific file. */
+#include "tm.h"